mirror of https://gitee.com/openkylin/linux.git
x86/pv: Rework arch_local_irq_restore() to not use popf
POPF is a rather expensive operation, so don't use it for restoring irq flags. Instead, test whether interrupts are enabled in the flags parameter and enable interrupts via STI in that case. This results in the restore_fl paravirt op to be no longer needed. Suggested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20210120135555.32594-7-jgross@suse.com
This commit is contained in:
parent
afd30525a6
commit
ab234a260b
|
@ -35,15 +35,6 @@ extern __always_inline unsigned long native_save_fl(void)
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline void native_restore_fl(unsigned long flags);
|
|
||||||
extern inline void native_restore_fl(unsigned long flags)
|
|
||||||
{
|
|
||||||
asm volatile("push %0 ; popf"
|
|
||||||
: /* no output */
|
|
||||||
:"g" (flags)
|
|
||||||
:"memory", "cc");
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void native_irq_disable(void)
|
static __always_inline void native_irq_disable(void)
|
||||||
{
|
{
|
||||||
asm volatile("cli": : :"memory");
|
asm volatile("cli": : :"memory");
|
||||||
|
@ -79,11 +70,6 @@ static __always_inline unsigned long arch_local_save_flags(void)
|
||||||
return native_save_fl();
|
return native_save_fl();
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void arch_local_irq_restore(unsigned long flags)
|
|
||||||
{
|
|
||||||
native_restore_fl(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void arch_local_irq_disable(void)
|
static __always_inline void arch_local_irq_disable(void)
|
||||||
{
|
{
|
||||||
native_irq_disable();
|
native_irq_disable();
|
||||||
|
@ -152,6 +138,12 @@ static __always_inline int arch_irqs_disabled(void)
|
||||||
|
|
||||||
return arch_irqs_disabled_flags(flags);
|
return arch_irqs_disabled_flags(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void arch_local_irq_restore(unsigned long flags)
|
||||||
|
{
|
||||||
|
if (!arch_irqs_disabled_flags(flags))
|
||||||
|
arch_local_irq_enable();
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
|
|
|
@ -648,11 +648,6 @@ static inline notrace unsigned long arch_local_save_flags(void)
|
||||||
return PVOP_CALLEE0(unsigned long, irq.save_fl);
|
return PVOP_CALLEE0(unsigned long, irq.save_fl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline notrace void arch_local_irq_restore(unsigned long f)
|
|
||||||
{
|
|
||||||
PVOP_VCALLEE1(irq.restore_fl, f);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline notrace void arch_local_irq_disable(void)
|
static inline notrace void arch_local_irq_disable(void)
|
||||||
{
|
{
|
||||||
PVOP_VCALLEE0(irq.irq_disable);
|
PVOP_VCALLEE0(irq.irq_disable);
|
||||||
|
|
|
@ -168,16 +168,13 @@ struct pv_cpu_ops {
|
||||||
struct pv_irq_ops {
|
struct pv_irq_ops {
|
||||||
#ifdef CONFIG_PARAVIRT_XXL
|
#ifdef CONFIG_PARAVIRT_XXL
|
||||||
/*
|
/*
|
||||||
* Get/set interrupt state. save_fl and restore_fl are only
|
* Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF;
|
||||||
* expected to use X86_EFLAGS_IF; all other bits
|
* all other bits returned from save_fl are undefined.
|
||||||
* returned from save_fl are undefined, and may be ignored by
|
|
||||||
* restore_fl.
|
|
||||||
*
|
*
|
||||||
* NOTE: These functions callers expect the callee to preserve
|
* NOTE: These functions callers expect the callee to preserve
|
||||||
* more registers than the standard C calling convention.
|
* more registers than the standard C calling convention.
|
||||||
*/
|
*/
|
||||||
struct paravirt_callee_save save_fl;
|
struct paravirt_callee_save save_fl;
|
||||||
struct paravirt_callee_save restore_fl;
|
|
||||||
struct paravirt_callee_save irq_disable;
|
struct paravirt_callee_save irq_disable;
|
||||||
struct paravirt_callee_save irq_enable;
|
struct paravirt_callee_save irq_enable;
|
||||||
|
|
||||||
|
|
|
@ -13,14 +13,3 @@ SYM_FUNC_START(native_save_fl)
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(native_save_fl)
|
SYM_FUNC_END(native_save_fl)
|
||||||
EXPORT_SYMBOL(native_save_fl)
|
EXPORT_SYMBOL(native_save_fl)
|
||||||
|
|
||||||
/*
|
|
||||||
* void native_restore_fl(unsigned long flags)
|
|
||||||
* %eax/%rdi: flags
|
|
||||||
*/
|
|
||||||
SYM_FUNC_START(native_restore_fl)
|
|
||||||
push %_ASM_ARG1
|
|
||||||
popf
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(native_restore_fl)
|
|
||||||
EXPORT_SYMBOL(native_restore_fl)
|
|
||||||
|
|
|
@ -320,7 +320,6 @@ struct paravirt_patch_template pv_ops = {
|
||||||
|
|
||||||
/* Irq ops. */
|
/* Irq ops. */
|
||||||
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
|
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
|
||||||
.irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
|
|
||||||
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
|
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
|
||||||
.irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
|
.irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
|
||||||
.irq.safe_halt = native_safe_halt,
|
.irq.safe_halt = native_safe_halt,
|
||||||
|
|
|
@ -25,7 +25,6 @@ struct patch_xxl {
|
||||||
const unsigned char mmu_read_cr2[3];
|
const unsigned char mmu_read_cr2[3];
|
||||||
const unsigned char mmu_read_cr3[3];
|
const unsigned char mmu_read_cr3[3];
|
||||||
const unsigned char mmu_write_cr3[3];
|
const unsigned char mmu_write_cr3[3];
|
||||||
const unsigned char irq_restore_fl[2];
|
|
||||||
const unsigned char cpu_wbinvd[2];
|
const unsigned char cpu_wbinvd[2];
|
||||||
const unsigned char mov64[3];
|
const unsigned char mov64[3];
|
||||||
};
|
};
|
||||||
|
@ -37,7 +36,6 @@ static const struct patch_xxl patch_data_xxl = {
|
||||||
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
|
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
|
||||||
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
|
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
|
||||||
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
|
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
|
||||||
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
|
|
||||||
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
|
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
|
||||||
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
|
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
|
||||||
};
|
};
|
||||||
|
@ -71,7 +69,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_XXL
|
#ifdef CONFIG_PARAVIRT_XXL
|
||||||
PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
|
|
||||||
PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
|
PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
|
||||||
PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
|
PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
|
||||||
PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
|
PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
|
||||||
|
|
|
@ -1035,8 +1035,6 @@ void __init xen_setup_vcpu_info_placement(void)
|
||||||
*/
|
*/
|
||||||
if (xen_have_vcpu_info_placement) {
|
if (xen_have_vcpu_info_placement) {
|
||||||
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
|
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
|
||||||
pv_ops.irq.restore_fl =
|
|
||||||
__PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
|
|
||||||
pv_ops.irq.irq_disable =
|
pv_ops.irq.irq_disable =
|
||||||
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
|
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
|
||||||
pv_ops.irq.irq_enable =
|
pv_ops.irq.irq_enable =
|
||||||
|
|
|
@ -42,28 +42,6 @@ asmlinkage __visible unsigned long xen_save_fl(void)
|
||||||
}
|
}
|
||||||
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
|
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
|
||||||
|
|
||||||
__visible void xen_restore_fl(unsigned long flags)
|
|
||||||
{
|
|
||||||
struct vcpu_info *vcpu;
|
|
||||||
|
|
||||||
/* convert from IF type flag */
|
|
||||||
flags = !(flags & X86_EFLAGS_IF);
|
|
||||||
|
|
||||||
/* See xen_irq_enable() for why preemption must be disabled. */
|
|
||||||
preempt_disable();
|
|
||||||
vcpu = this_cpu_read(xen_vcpu);
|
|
||||||
vcpu->evtchn_upcall_mask = flags;
|
|
||||||
|
|
||||||
if (flags == 0) {
|
|
||||||
barrier(); /* unmask then check (avoid races) */
|
|
||||||
if (unlikely(vcpu->evtchn_upcall_pending))
|
|
||||||
xen_force_evtchn_callback();
|
|
||||||
preempt_enable();
|
|
||||||
} else
|
|
||||||
preempt_enable_no_resched();
|
|
||||||
}
|
|
||||||
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
|
|
||||||
|
|
||||||
asmlinkage __visible void xen_irq_disable(void)
|
asmlinkage __visible void xen_irq_disable(void)
|
||||||
{
|
{
|
||||||
/* There's a one instruction preempt window here. We need to
|
/* There's a one instruction preempt window here. We need to
|
||||||
|
@ -118,7 +96,6 @@ static void xen_halt(void)
|
||||||
|
|
||||||
static const struct pv_irq_ops xen_irq_ops __initconst = {
|
static const struct pv_irq_ops xen_irq_ops __initconst = {
|
||||||
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
|
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
|
||||||
.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
|
|
||||||
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
|
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
|
||||||
.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
|
.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
|
||||||
|
|
||||||
|
|
|
@ -72,34 +72,6 @@ SYM_FUNC_START(xen_save_fl_direct)
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(xen_save_fl_direct)
|
SYM_FUNC_END(xen_save_fl_direct)
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In principle the caller should be passing us a value return from
|
|
||||||
* xen_save_fl_direct, but for robustness sake we test only the
|
|
||||||
* X86_EFLAGS_IF flag rather than the whole byte. After setting the
|
|
||||||
* interrupt mask state, it checks for unmasked pending events and
|
|
||||||
* enters the hypervisor to get them delivered if so.
|
|
||||||
*/
|
|
||||||
SYM_FUNC_START(xen_restore_fl_direct)
|
|
||||||
FRAME_BEGIN
|
|
||||||
testw $X86_EFLAGS_IF, %di
|
|
||||||
setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
|
||||||
/*
|
|
||||||
* Preempt here doesn't matter because that will deal with any
|
|
||||||
* pending interrupts. The pending check may end up being run
|
|
||||||
* on the wrong CPU, but that doesn't hurt.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* check for unmasked and pending */
|
|
||||||
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
|
|
||||||
jnz 1f
|
|
||||||
call check_events
|
|
||||||
1:
|
|
||||||
FRAME_END
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(xen_restore_fl_direct)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force an event check by making a hypercall, but preserve regs
|
* Force an event check by making a hypercall, but preserve regs
|
||||||
* before making the call.
|
* before making the call.
|
||||||
|
|
|
@ -131,7 +131,6 @@ static inline void __init xen_efi_init(struct boot_params *boot_params)
|
||||||
__visible void xen_irq_enable_direct(void);
|
__visible void xen_irq_enable_direct(void);
|
||||||
__visible void xen_irq_disable_direct(void);
|
__visible void xen_irq_disable_direct(void);
|
||||||
__visible unsigned long xen_save_fl_direct(void);
|
__visible unsigned long xen_save_fl_direct(void);
|
||||||
__visible void xen_restore_fl_direct(unsigned long);
|
|
||||||
|
|
||||||
__visible unsigned long xen_read_cr2(void);
|
__visible unsigned long xen_read_cr2(void);
|
||||||
__visible unsigned long xen_read_cr2_direct(void);
|
__visible unsigned long xen_read_cr2_direct(void);
|
||||||
|
|
Loading…
Reference in New Issue