mirror of https://gitee.com/openkylin/linux.git
KVM: PPC: booke: Check for MSR[WE] in prepare_to_enter
This prevents us from inappropriately blocking in a KVM_SET_REGS ioctl -- the MSR[WE] will take effect when the guest is next entered. It also causes SRR1[WE] to be set when we enter the guest's interrupt handler, which is what e500 hardware is documented to do. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
25051b5a5a
commit
c59a6a3e4e
|
@ -124,12 +124,6 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
|
||||||
vcpu->arch.shared->msr = new_msr;
|
vcpu->arch.shared->msr = new_msr;
|
||||||
|
|
||||||
kvmppc_mmu_msr_notify(vcpu, old_msr);
|
kvmppc_mmu_msr_notify(vcpu, old_msr);
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_WE) {
|
|
||||||
kvm_vcpu_block(vcpu);
|
|
||||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
|
||||||
};
|
|
||||||
|
|
||||||
kvmppc_vcpu_sync_spe(vcpu);
|
kvmppc_vcpu_sync_spe(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,15 +282,12 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
||||||
return allowed;
|
return allowed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check pending exceptions and deliver one, if possible. */
|
static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
|
||||||
void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
{
|
||||||
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
||||||
unsigned long old_pending = vcpu->arch.pending_exceptions;
|
unsigned long old_pending = vcpu->arch.pending_exceptions;
|
||||||
unsigned int priority;
|
unsigned int priority;
|
||||||
|
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
|
||||||
|
|
||||||
priority = __ffs(*pending);
|
priority = __ffs(*pending);
|
||||||
while (priority <= BOOKE_IRQPRIO_MAX) {
|
while (priority <= BOOKE_IRQPRIO_MAX) {
|
||||||
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
|
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
|
||||||
|
@ -314,6 +305,23 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.shared->int_pending = 0;
|
vcpu->arch.shared->int_pending = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check pending exceptions and deliver one, if possible. */
|
||||||
|
void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
|
||||||
|
kvmppc_core_check_exceptions(vcpu);
|
||||||
|
|
||||||
|
if (vcpu->arch.shared->msr & MSR_WE) {
|
||||||
|
local_irq_enable();
|
||||||
|
kvm_vcpu_block(vcpu);
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
|
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
||||||
|
kvmppc_core_check_exceptions(vcpu);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
Loading…
Reference in New Issue