KVM: PPC: Book3S: correct width in XER handling
In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64 bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is accessed as such. This patch corrects places where it is accessed as a 32 bit field by a 64 bit kernel. In some cases this is via a 32 bit load or store instruction which, depending on endianness, will cause either the lower or upper 32 bits to be missed. In another case it is cast as a u32, causing the upper 32 bits to be cleared. This patch corrects those places by extending the access methods to 64 bits. Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com> Reviewed-by: Laurent Vivier <lvivier@redhat.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Tested-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
563a1e93af
commit
c63517c2e3
|
@ -226,12 +226,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
|||
return vcpu->arch.cr;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
vcpu->arch.xer = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.xer;
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ struct kvmppc_book3s_shadow_vcpu {
|
|||
bool in_use;
|
||||
ulong gpr[14];
|
||||
u32 cr;
|
||||
u32 xer;
|
||||
ulong xer;
|
||||
ulong ctr;
|
||||
ulong lr;
|
||||
ulong pc;
|
||||
|
|
|
@ -54,12 +54,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
|||
return vcpu->arch.cr;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
vcpu->arch.xer = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.xer;
|
||||
}
|
||||
|
|
|
@ -944,7 +944,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
blt hdec_soon
|
||||
|
||||
ld r6, VCPU_CTR(r4)
|
||||
lwz r7, VCPU_XER(r4)
|
||||
ld r7, VCPU_XER(r4)
|
||||
|
||||
mtctr r6
|
||||
mtxer r7
|
||||
|
@ -1181,7 +1181,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
mfctr r3
|
||||
mfxer r4
|
||||
std r3, VCPU_CTR(r9)
|
||||
stw r4, VCPU_XER(r9)
|
||||
std r4, VCPU_XER(r9)
|
||||
|
||||
/* If this is a page table miss then see if it's theirs or ours */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
|
||||
|
@ -1763,7 +1763,7 @@ kvmppc_hdsi:
|
|||
bl kvmppc_msr_interrupt
|
||||
fast_interrupt_c_return:
|
||||
6: ld r7, VCPU_CTR(r9)
|
||||
lwz r8, VCPU_XER(r9)
|
||||
ld r8, VCPU_XER(r9)
|
||||
mtctr r7
|
||||
mtxer r8
|
||||
mr r4, r9
|
||||
|
|
|
@ -123,7 +123,7 @@ no_dcbz32_on:
|
|||
PPC_LL r8, SVCPU_CTR(r3)
|
||||
PPC_LL r9, SVCPU_LR(r3)
|
||||
lwz r10, SVCPU_CR(r3)
|
||||
lwz r11, SVCPU_XER(r3)
|
||||
PPC_LL r11, SVCPU_XER(r3)
|
||||
|
||||
mtctr r8
|
||||
mtlr r9
|
||||
|
@ -237,7 +237,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
|||
mfctr r8
|
||||
mflr r9
|
||||
|
||||
stw r5, SVCPU_XER(r13)
|
||||
PPC_STL r5, SVCPU_XER(r13)
|
||||
PPC_STL r6, SVCPU_FAULT_DAR(r13)
|
||||
stw r7, SVCPU_FAULT_DSISR(r13)
|
||||
PPC_STL r8, SVCPU_CTR(r13)
|
||||
|
|
Loading…
Reference in New Issue