mirror of https://gitee.com/openkylin/linux.git
x86/paravirt: Add _safe to the read_ms()r and write_msr() PV callbacks
These callbacks match the _safe variants, so name them accordingly. This will make room for unsafe PV callbacks. Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Andy Lutomirski <luto@kernel.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: KVM list <kvm@vger.kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: xen-devel <Xen-devel@lists.xen.org> Link: http://lkml.kernel.org/r/9ee3fb6a196a514c93325bdfa15594beecf04876.1459605520.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ae7ef45e12
commit
c2ee03b2a9
|
@ -130,34 +130,35 @@ static inline void wbinvd(void)
|
|||
|
||||
#define get_kernel_rpl() (pv_info.kernel_rpl)
|
||||
|
||||
static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
||||
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
|
||||
}
|
||||
|
||||
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
||||
static inline int paravirt_write_msr_safe(unsigned msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
|
||||
}
|
||||
|
||||
/* These should all do BUG_ON(_err), but our headers are too tangled. */
|
||||
#define rdmsr(msr, val1, val2) \
|
||||
do { \
|
||||
int _err; \
|
||||
u64 _l = paravirt_read_msr(msr, &_err); \
|
||||
u64 _l = paravirt_read_msr_safe(msr, &_err); \
|
||||
val1 = (u32)_l; \
|
||||
val2 = _l >> 32; \
|
||||
} while (0)
|
||||
|
||||
#define wrmsr(msr, val1, val2) \
|
||||
do { \
|
||||
paravirt_write_msr(msr, val1, val2); \
|
||||
paravirt_write_msr_safe(msr, val1, val2); \
|
||||
} while (0)
|
||||
|
||||
#define rdmsrl(msr, val) \
|
||||
do { \
|
||||
int _err; \
|
||||
val = paravirt_read_msr(msr, &_err); \
|
||||
val = paravirt_read_msr_safe(msr, &_err); \
|
||||
} while (0)
|
||||
|
||||
static inline void wrmsrl(unsigned msr, u64 val)
|
||||
|
@ -165,23 +166,23 @@ static inline void wrmsrl(unsigned msr, u64 val)
|
|||
wrmsr(msr, (u32)val, (u32)(val>>32));
|
||||
}
|
||||
|
||||
#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
|
||||
#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
|
||||
|
||||
/* rdmsr with exception handling */
|
||||
#define rdmsr_safe(msr, a, b) \
|
||||
({ \
|
||||
int _err; \
|
||||
u64 _l = paravirt_read_msr(msr, &_err); \
|
||||
(*a) = (u32)_l; \
|
||||
(*b) = _l >> 32; \
|
||||
_err; \
|
||||
#define rdmsr_safe(msr, a, b) \
|
||||
({ \
|
||||
int _err; \
|
||||
u64 _l = paravirt_read_msr_safe(msr, &_err); \
|
||||
(*a) = (u32)_l; \
|
||||
(*b) = _l >> 32; \
|
||||
_err; \
|
||||
})
|
||||
|
||||
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = paravirt_read_msr(msr, &err);
|
||||
*p = paravirt_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -155,10 +155,10 @@ struct pv_cpu_ops {
|
|||
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx);
|
||||
|
||||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
/* MSR operations.
|
||||
err = 0/-EIO. wrmsr returns 0/-EIO. */
|
||||
u64 (*read_msr_safe)(unsigned int msr, int *err);
|
||||
int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
|
||||
|
||||
u64 (*read_pmc)(int counter);
|
||||
|
||||
|
|
|
@ -339,8 +339,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
|
|||
.write_cr8 = native_write_cr8,
|
||||
#endif
|
||||
.wbinvd = native_wbinvd,
|
||||
.read_msr = native_read_msr_safe,
|
||||
.write_msr = native_write_msr_safe,
|
||||
.read_msr_safe = native_read_msr_safe,
|
||||
.write_msr_safe = native_write_msr_safe,
|
||||
.read_pmc = native_read_pmc,
|
||||
.load_tr_desc = native_load_tr_desc,
|
||||
.set_ldt = native_set_ldt,
|
||||
|
|
|
@ -1222,8 +1222,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
|||
|
||||
.wbinvd = native_wbinvd,
|
||||
|
||||
.read_msr = xen_read_msr_safe,
|
||||
.write_msr = xen_write_msr_safe,
|
||||
.read_msr_safe = xen_read_msr_safe,
|
||||
.write_msr_safe = xen_write_msr_safe,
|
||||
|
||||
.read_pmc = xen_read_pmc,
|
||||
|
||||
|
|
Loading…
Reference in New Issue