powerpc/kvm/book3s_hv: Preserve guest CFAR register value

The CFAR (Come-From Address Register) is a useful debugging aid that
exists on POWER7 processors.  Currently HV KVM doesn't save or restore
the CFAR register for guest vcpus, making the CFAR of limited use in
guests.

This adds the necessary code to capture the CFAR value saved in the
early exception entry code (it has to be saved before any branch is
executed), save it in the vcpu.arch struct, and restore it on entry
to the guest.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Paul Mackerras 2013-02-04 18:10:51 +00:00 committed by Benjamin Herrenschmidt
parent 1707dd1613
commit 0acb91112a
5 changed files with 24 additions and 2 deletions

View File

@ -199,10 +199,14 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define __KVM_HANDLER(area, h, n) \ #define __KVM_HANDLER(area, h, n) \
do_kvm_##n: \ do_kvm_##n: \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
std r10,HSTATE_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
ld r10,area+EX_R10(r13); \ ld r10,area+EX_R10(r13); \
stw r9,HSTATE_SCRATCH1(r13); \ stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \ ld r9,area+EX_R9(r13); \
std r12,HSTATE_SCRATCH0(r13); \ std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \ li r12,n; \
b kvmppc_interrupt b kvmppc_interrupt

View File

@ -93,6 +93,9 @@ struct kvmppc_host_state {
u64 host_dscr; u64 host_dscr;
u64 dec_expires; u64 dec_expires;
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64
u64 cfar;
#endif
}; };
struct kvmppc_book3s_shadow_vcpu { struct kvmppc_book3s_shadow_vcpu {

View File

@ -440,6 +440,7 @@ struct kvm_vcpu_arch {
ulong uamor; ulong uamor;
u32 ctrl; u32 ctrl;
ulong dabr; ulong dabr;
ulong cfar;
#endif #endif
u32 vrsave; /* also USPRG0 */ u32 vrsave; /* also USPRG0 */
u32 mmucr; u32 mmucr;

View File

@ -479,6 +479,7 @@ int main(void)
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
@ -558,6 +559,10 @@ int main(void)
DEFINE(IPI_PRIORITY, IPI_PRIORITY); DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
HSTATE_FIELD(HSTATE_CFAR, cfar);
#endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */ #else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));

View File

@ -539,6 +539,11 @@ fast_guest_return:
/* Enter guest */ /* Enter guest */
BEGIN_FTR_SECTION
ld r5, VCPU_CFAR(r4)
mtspr SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r5, VCPU_LR(r4) ld r5, VCPU_LR(r4)
lwz r6, VCPU_CR(r4) lwz r6, VCPU_CR(r4)
mtlr r5 mtlr r5
@ -604,6 +609,10 @@ kvmppc_interrupt:
lwz r4, HSTATE_SCRATCH1(r13) lwz r4, HSTATE_SCRATCH1(r13)
std r3, VCPU_GPR(R12)(r9) std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9) stw r4, VCPU_CR(r9)
BEGIN_FTR_SECTION
ld r3, HSTATE_CFAR(r13)
std r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
/* Restore R1/R2 so we can handle faults */ /* Restore R1/R2 so we can handle faults */
ld r1, HSTATE_HOST_R1(r13) ld r1, HSTATE_HOST_R1(r13)