hyper-v: Globalize vp_index

To support implementing remote TLB flushing on Hyper-V with a hypercall
we need to make vp_index available outside of vmbus module. Rename and
globalize.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Reviewed-by: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Jork Loeser <Jork.Loeser@microsoft.com>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Simon Xiao <sixiao@microsoft.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: devel@linuxdriverproject.org
Link: http://lkml.kernel.org/r/20170802160921.21791-7-vkuznets@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Vitaly Kuznetsov 2017-08-02 18:09:18 +02:00 committed by Ingo Molnar
parent 806c89273b
commit 7415aea607
9 changed files with 65 additions and 95 deletions

View File

@ -26,6 +26,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/hyperv.h> #include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#ifdef CONFIG_HYPERV_TSCPAGE #ifdef CONFIG_HYPERV_TSCPAGE
@ -80,6 +82,20 @@ EXPORT_SYMBOL_GPL(hv_hypercall_pg);
struct clocksource *hyperv_cs; struct clocksource *hyperv_cs;
EXPORT_SYMBOL_GPL(hyperv_cs); EXPORT_SYMBOL_GPL(hyperv_cs);
u32 *hv_vp_index;
EXPORT_SYMBOL_GPL(hv_vp_index);
static int hv_cpu_init(unsigned int cpu)
{
u64 msr_vp_index;
hv_get_vp_index(msr_vp_index);
hv_vp_index[smp_processor_id()] = msr_vp_index;
return 0;
}
/* /*
* This function is to be invoked early in the boot sequence after the * This function is to be invoked early in the boot sequence after the
* hypervisor has been detected. * hypervisor has been detected.
@ -95,6 +111,16 @@ void hyperv_init(void)
if (x86_hyper != &x86_hyper_ms_hyperv) if (x86_hyper != &x86_hyper_ms_hyperv)
return; return;
/* Allocate percpu VP index */
hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index)
return;
if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
hv_cpu_init, NULL) < 0)
goto free_vp_index;
/* /*
* Setup the hypercall page and enable hypercalls. * Setup the hypercall page and enable hypercalls.
* 1. Register the guest ID * 1. Register the guest ID
@ -106,7 +132,7 @@ void hyperv_init(void)
hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
if (hv_hypercall_pg == NULL) { if (hv_hypercall_pg == NULL) {
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
return; goto free_vp_index;
} }
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@ -149,6 +175,12 @@ void hyperv_init(void)
hyperv_cs = &hyperv_cs_msr; hyperv_cs = &hyperv_cs_msr;
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
return;
free_vp_index:
kfree(hv_vp_index);
hv_vp_index = NULL;
} }
/* /*

View File

@ -282,6 +282,30 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
return status; return status;
} }
/*
* Hypervisor's notion of virtual processor ID is different from
* Linux' notion of CPU ID. This information can only be retrieved
* in the context of the calling CPU. Setup a map for easy access
* to this information.
*/
extern u32 *hv_vp_index;
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
*
* This function returns the mapping between the Linux processor
* number and the hypervisor's virtual processor number, useful
* in making hypercalls and such that talk about specific
* processors.
*
* Return: Virtual processor number in Hyper-V terms
*/
static inline int hv_cpu_number_to_vp_number(int cpu_number)
{
return hv_vp_index[cpu_number];
}
void hyperv_init(void); void hyperv_init(void);
void hyperv_report_panic(struct pt_regs *regs); void hyperv_report_panic(struct pt_regs *regs);
bool hv_is_hypercall_page_setup(void); bool hv_is_hypercall_page_setup(void);

View File

@ -599,7 +599,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
*/ */
channel->numa_node = 0; channel->numa_node = 0;
channel->target_cpu = 0; channel->target_cpu = 0;
channel->target_vp = hv_context.vp_index[0]; channel->target_vp = hv_cpu_number_to_vp_number(0);
return; return;
} }
@ -683,7 +683,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
} }
channel->target_cpu = cur_cpu; channel->target_cpu = cur_cpu;
channel->target_vp = hv_context.vp_index[cur_cpu]; channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
} }
static void vmbus_wait_for_unload(void) static void vmbus_wait_for_unload(void)
@ -1219,8 +1219,7 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
return outgoing_channel; return outgoing_channel;
} }
cur_cpu = hv_context.vp_index[get_cpu()]; cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
put_cpu();
list_for_each_safe(cur, tmp, &primary->sc_list) { list_for_each_safe(cur, tmp, &primary->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list); cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
if (cur_channel->state != CHANNEL_OPENED_STATE) if (cur_channel->state != CHANNEL_OPENED_STATE)

View File

@ -96,7 +96,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* the CPU attempting to connect may not be CPU 0. * the CPU attempting to connect may not be CPU 0.
*/ */
if (version >= VERSION_WIN8_1) { if (version >= VERSION_WIN8_1) {
msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; msg->target_vcpu =
hv_cpu_number_to_vp_number(smp_processor_id());
vmbus_connection.connect_cpu = smp_processor_id(); vmbus_connection.connect_cpu = smp_processor_id();
} else { } else {
msg->target_vcpu = 0; msg->target_vcpu = 0;

View File

@ -234,7 +234,6 @@ int hv_synic_init(unsigned int cpu)
union hv_synic_siefp siefp; union hv_synic_siefp siefp;
union hv_synic_sint shared_sint; union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl; union hv_synic_scontrol sctrl;
u64 vp_index;
/* Setup the Synic's message page */ /* Setup the Synic's message page */
hv_get_simp(simp.as_uint64); hv_get_simp(simp.as_uint64);
@ -275,14 +274,6 @@ int hv_synic_init(unsigned int cpu)
hv_context.synic_initialized = true; hv_context.synic_initialized = true;
/*
* Setup the mapping between Hyper-V's notion
* of cpuid and Linux' notion of cpuid.
* This array will be indexed using Linux cpuid.
*/
hv_get_vp_index(vp_index);
hv_context.vp_index[cpu] = (u32)vp_index;
/* /*
* Register the per-cpu clockevent source. * Register the per-cpu clockevent source.
*/ */

View File

@ -228,17 +228,6 @@ struct hv_context {
struct hv_per_cpu_context __percpu *cpu_context; struct hv_per_cpu_context __percpu *cpu_context;
/*
* Hypervisor's notion of virtual processor ID is different from
* Linux' notion of CPU ID. This information can only be retrieved
* in the context of the calling CPU. Setup a map for easy access
* to this information:
*
* vp_index[a] is the Hyper-V's processor ID corresponding to
* Linux cpuid 'a'.
*/
u32 vp_index[NR_CPUS];
/* /*
* To manage allocations in a NUMA node. * To manage allocations in a NUMA node.
* Array indexed by numa node ID. * Array indexed by numa node ID.

View File

@ -1451,23 +1451,6 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
} }
EXPORT_SYMBOL_GPL(vmbus_free_mmio); EXPORT_SYMBOL_GPL(vmbus_free_mmio);
/**
* vmbus_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
*
* This function returns the mapping between the Linux processor
* number and the hypervisor's virtual processor number, useful
* in making hypercalls and such that talk about specific
* processors.
*
* Return: Virtual processor number in Hyper-V terms
*/
int vmbus_cpu_number_to_vp_number(int cpu_number)
{
return hv_context.vp_index[cpu_number];
}
EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
static int vmbus_acpi_add(struct acpi_device *device) static int vmbus_acpi_add(struct acpi_device *device)
{ {
acpi_status result; acpi_status result;

View File

@ -562,52 +562,6 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
/*
* Temporary CPU to vCPU mapping to address transitioning
* vmbus_cpu_number_to_vp_number() being migrated to
* hv_cpu_number_to_vp_number() in a separate patch. Once that patch
* has been picked up in the main line, remove this code here and use
* the official code.
*/
static struct hv_tmpcpumap
{
bool initialized;
u32 vp_index[NR_CPUS];
} hv_tmpcpumap;
static void hv_tmpcpumap_init_cpu(void *_unused)
{
int cpu = smp_processor_id();
u64 vp_index;
hv_get_vp_index(vp_index);
hv_tmpcpumap.vp_index[cpu] = vp_index;
}
static void hv_tmpcpumap_init(void)
{
if (hv_tmpcpumap.initialized)
return;
memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index));
on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true);
hv_tmpcpumap.initialized = true;
}
/**
* hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr
*
* Remove once vmbus_cpu_number_to_vp_number() has been converted to
* hv_cpu_number_to_vp_number() and replace callers appropriately.
*/
static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu)
{
return hv_tmpcpumap.vp_index[cpu];
}
/** /**
* devfn_to_wslot() - Convert from Linux PCI slot to Windows * devfn_to_wslot() - Convert from Linux PCI slot to Windows
* @devfn: The Linux representation of PCI slot * @devfn: The Linux representation of PCI slot
@ -971,7 +925,7 @@ static void hv_irq_unmask(struct irq_data *data)
var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
for_each_cpu_and(cpu, dest, cpu_online_mask) { for_each_cpu_and(cpu, dest, cpu_online_mask) {
cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); cpu_vmbus = hv_cpu_number_to_vp_number(cpu);
if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
dev_err(&hbus->hdev->device, dev_err(&hbus->hdev->device,
@ -986,7 +940,7 @@ static void hv_irq_unmask(struct irq_data *data)
} else { } else {
for_each_cpu_and(cpu, dest, cpu_online_mask) { for_each_cpu_and(cpu, dest, cpu_online_mask) {
params->int_target.vp_mask |= params->int_target.vp_mask |=
(1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); (1ULL << hv_cpu_number_to_vp_number(cpu));
} }
} }
@ -1063,7 +1017,7 @@ static u32 hv_compose_msi_req_v2(
*/ */
cpu = cpumask_first_and(affinity, cpu_online_mask); cpu = cpumask_first_and(affinity, cpu_online_mask);
int_pkt->int_desc.processor_array[0] = int_pkt->int_desc.processor_array[0] =
hv_tmp_cpu_nr_to_vp_nr(cpu); hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1; int_pkt->int_desc.processor_count = 1;
return sizeof(*int_pkt); return sizeof(*int_pkt);
@ -2490,8 +2444,6 @@ static int hv_pci_probe(struct hv_device *hdev,
return -ENOMEM; return -ENOMEM;
hbus->state = hv_pcibus_init; hbus->state = hv_pcibus_init;
hv_tmpcpumap_init();
/* /*
* The PCI bus "domain" is what is called "segment" in ACPI and * The PCI bus "domain" is what is called "segment" in ACPI and
* other specs. Pull it from the instance ID, to get something * other specs. Pull it from the instance ID, to get something

View File

@ -1173,7 +1173,6 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t size, resource_size_t align, resource_size_t size, resource_size_t align,
bool fb_overlap_ok); bool fb_overlap_ok);
void vmbus_free_mmio(resource_size_t start, resource_size_t size); void vmbus_free_mmio(resource_size_t start, resource_size_t size);
int vmbus_cpu_number_to_vp_number(int cpu_number);
/* /*
* GUID definitions of various offer types - services offered to the guest. * GUID definitions of various offer types - services offered to the guest.