@@ -472,6 +472,49 @@ static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
472472 set_bit_le (i , map );
473473}
474474
475+ static inline u64 sanitize_msr (u64 msr )
476+ {
477+ msr &= ~MSR_HV ;
478+ msr |= MSR_ME ;
479+ return msr ;
480+ }
481+
482+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
483+ static inline void copy_from_checkpoint (struct kvm_vcpu * vcpu )
484+ {
485+ vcpu -> arch .cr = vcpu -> arch .cr_tm ;
486+ vcpu -> arch .xer = vcpu -> arch .xer_tm ;
487+ vcpu -> arch .lr = vcpu -> arch .lr_tm ;
488+ vcpu -> arch .ctr = vcpu -> arch .ctr_tm ;
489+ vcpu -> arch .amr = vcpu -> arch .amr_tm ;
490+ vcpu -> arch .ppr = vcpu -> arch .ppr_tm ;
491+ vcpu -> arch .dscr = vcpu -> arch .dscr_tm ;
492+ vcpu -> arch .tar = vcpu -> arch .tar_tm ;
493+ memcpy (vcpu -> arch .gpr , vcpu -> arch .gpr_tm ,
494+ sizeof (vcpu -> arch .gpr ));
495+ vcpu -> arch .fp = vcpu -> arch .fp_tm ;
496+ vcpu -> arch .vr = vcpu -> arch .vr_tm ;
497+ vcpu -> arch .vrsave = vcpu -> arch .vrsave_tm ;
498+ }
499+
500+ static inline void copy_to_checkpoint (struct kvm_vcpu * vcpu )
501+ {
502+ vcpu -> arch .cr_tm = vcpu -> arch .cr ;
503+ vcpu -> arch .xer_tm = vcpu -> arch .xer ;
504+ vcpu -> arch .lr_tm = vcpu -> arch .lr ;
505+ vcpu -> arch .ctr_tm = vcpu -> arch .ctr ;
506+ vcpu -> arch .amr_tm = vcpu -> arch .amr ;
507+ vcpu -> arch .ppr_tm = vcpu -> arch .ppr ;
508+ vcpu -> arch .dscr_tm = vcpu -> arch .dscr ;
509+ vcpu -> arch .tar_tm = vcpu -> arch .tar ;
510+ memcpy (vcpu -> arch .gpr_tm , vcpu -> arch .gpr ,
511+ sizeof (vcpu -> arch .gpr ));
512+ vcpu -> arch .fp_tm = vcpu -> arch .fp ;
513+ vcpu -> arch .vr_tm = vcpu -> arch .vr ;
514+ vcpu -> arch .vrsave_tm = vcpu -> arch .vrsave ;
515+ }
516+ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
517+
475518#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
476519
477520#endif /* __ASM_KVM_BOOK3S_64_H__ */
0 commit comments