KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest

This patch make sure we inherit the LE bit correctly in different case
so that we can run Little Endian distro in PR mode

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Aneesh Kumar K.V 2014-05-05 08:39:44 +05:30 committed by Alexander Graf
parent 8f20a3ab27
commit e5ee5422f8
4 changed files with 25 additions and 4 deletions

View File

@ -562,6 +562,7 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
ulong fault_dar; ulong fault_dar;
u32 fault_dsisr; u32 fault_dsisr;
unsigned long intr_msr;
#endif #endif
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
@ -654,7 +655,6 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock; spinlock_t tbacct_lock;
u64 busy_stolen; u64 busy_stolen;
u64 busy_preempt; u64 busy_preempt;
unsigned long intr_msr;
#endif #endif
}; };

View File

@ -493,7 +493,6 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif #endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@ -528,6 +527,7 @@ int main(void)
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));

View File

@ -38,7 +38,7 @@
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
{ {
kvmppc_set_msr(vcpu, MSR_SF); kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
} }
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(

View File

@ -249,7 +249,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
ulong smsr = vcpu->arch.shared->msr; ulong smsr = vcpu->arch.shared->msr;
/* Guest MSR values */ /* Guest MSR values */
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
/* Process MSR values */ /* Process MSR values */
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */ /* External providers the guest reserved */
@ -1110,6 +1110,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior); *val = get_reg_val(id, to_book3s(vcpu)->hior);
break; break;
case KVM_REG_PPC_LPCR:
/*
* We are only interested in the LPCR_ILE bit
*/
if (vcpu->arch.intr_msr & MSR_LE)
*val = get_reg_val(id, LPCR_ILE);
else
*val = get_reg_val(id, 0);
break;
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
@ -1118,6 +1127,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r; return r;
} }
static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
{
if (new_lpcr & LPCR_ILE)
vcpu->arch.intr_msr |= MSR_LE;
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val) union kvmppc_one_reg *val)
{ {
@ -1128,6 +1145,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true; to_book3s(vcpu)->hior_explicit = true;
break; break;
case KVM_REG_PPC_LPCR:
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
break;
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
@ -1180,6 +1200,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
vcpu->arch.pvr = 0x3C0301; vcpu->arch.pvr = 0x3C0301;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu->arch.intr_msr = MSR_SF;
#else #else
/* default to book3s_32 (750) */ /* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202; vcpu->arch.pvr = 0x84202;