x86/split_lock: Avoid runtime reads of the TEST_CTRL MSR

In a context switch from a task that is detecting split locks to one that
is not (or vice versa) we need to update the TEST_CTRL MSR. Currently this
is done with the common sequence:

        read the MSR
	flip the bit
	write the MSR
in order to avoid changing the value of any reserved bits in the MSR.

Cache unused and reserved bits of TEST_CTRL MSR with SPLIT_LOCK_DETECT bit
cleared during initialization, so we can avoid an expensive RDMSR
instruction during context switch.

Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Originally-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200325030924.132881-3-xiaoyao.li@intel.com
This commit is contained in:
Xiaoyao Li 2020-03-25 11:09:24 +08:00 committed by Thomas Gleixner
parent dbaba47085
commit a6a6074103
1 changed files with 4 additions and 5 deletions

View File

@ -45,6 +45,7 @@ enum split_lock_detect_state {
* split lock detect, unless there is a command line override.
*/
static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
static u64 msr_test_ctrl_cache __ro_after_init;
/*
* Processors which have self-snooping capability can handle conflicting
@ -1034,6 +1035,8 @@ static void __init split_lock_setup(void)
break;
}
rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
if (!split_lock_verify_msr(true)) {
pr_info("MSR access failed: Disabled\n");
return;
@ -1050,14 +1053,10 @@ static void __init split_lock_setup(void)
*/
static void sld_update_msr(bool on)
{
u64 test_ctrl_val;
rdmsrl(MSR_TEST_CTRL, test_ctrl_val);
u64 test_ctrl_val = msr_test_ctrl_cache;
if (on)
test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
else
test_ctrl_val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
}