mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Work around compilation warning in arch/x86/kernel/apm_32.c x86, UV: Complete IRQ interrupt migration in arch_enable_uv_irq() x86, 32-bit: Fix double accounting in reserve_top_address() x86: Don't use current_cpu_data in x2apic phys_pkg_id x86, UV: Fix UV apic mode x86, UV: Fix macros for accessing large node numbers x86, UV: Delete mapping of MMR rangs mapped by BIOS x86, UV: Handle missing blade-local memory correctly x86: fix assembly constraints in native_save_fl() x86, msr: execute on the correct CPU subset x86: Fix assert syntax in vmlinux.lds.S x86: Make 64-bit efi_ioremap use ioremap on MMIO regions x86: Add quirk to make Apple MacBook5,2 use reboot=pci x86: Fix CPA memtype reserving in the set_pages_array*() cases x86, pat: Fix set_memory_wc related corruption x86: fix section mismatch for i386 init code
This commit is contained in:
commit
067e18133f
|
@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
||||||
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
||||||
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
||||||
|
|
||||||
#define efi_ioremap(addr, size) ioremap_cache(addr, size)
|
#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
|
||||||
|
|
||||||
#else /* !CONFIG_X86_32 */
|
#else /* !CONFIG_X86_32 */
|
||||||
|
|
||||||
|
@ -84,7 +84,8 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
||||||
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
||||||
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
||||||
|
|
||||||
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
|
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
|
||||||
|
u32 type);
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
|
|
@ -12,9 +12,15 @@ static inline unsigned long native_save_fl(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: this needs to be "=r" not "=rm", because we have the
|
||||||
|
* stack offset from what gcc expects at the time the "pop" is
|
||||||
|
* executed, and so a memory reference with respect to the stack
|
||||||
|
* would end up using the wrong address.
|
||||||
|
*/
|
||||||
asm volatile("# __raw_save_flags\n\t"
|
asm volatile("# __raw_save_flags\n\t"
|
||||||
"pushf ; pop %0"
|
"pushf ; pop %0"
|
||||||
: "=g" (flags)
|
: "=r" (flags)
|
||||||
: /* no input */
|
: /* no input */
|
||||||
: "memory");
|
: "memory");
|
||||||
|
|
||||||
|
|
|
@ -175,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||||
#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
|
#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
|
||||||
|
|
||||||
#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
|
#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
|
||||||
((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
|
(((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
|
||||||
|
|
||||||
#define UV_APIC_PNODE_SHIFT 6
|
#define UV_APIC_PNODE_SHIFT 6
|
||||||
|
|
||||||
|
@ -327,6 +327,7 @@ struct uv_blade_info {
|
||||||
unsigned short nr_possible_cpus;
|
unsigned short nr_possible_cpus;
|
||||||
unsigned short nr_online_cpus;
|
unsigned short nr_online_cpus;
|
||||||
unsigned short pnode;
|
unsigned short pnode;
|
||||||
|
short memory_nid;
|
||||||
};
|
};
|
||||||
extern struct uv_blade_info *uv_blade_info;
|
extern struct uv_blade_info *uv_blade_info;
|
||||||
extern short *uv_node_to_blade;
|
extern short *uv_node_to_blade;
|
||||||
|
@ -363,6 +364,12 @@ static inline int uv_blade_to_pnode(int bid)
|
||||||
return uv_blade_info[bid].pnode;
|
return uv_blade_info[bid].pnode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Nid of memory node on blade. -1 if no blade-local memory */
|
||||||
|
static inline int uv_blade_to_memory_nid(int bid)
|
||||||
|
{
|
||||||
|
return uv_blade_info[bid].memory_nid;
|
||||||
|
}
|
||||||
|
|
||||||
/* Determine the number of possible cpus on a blade */
|
/* Determine the number of possible cpus on a blade */
|
||||||
static inline int uv_blade_nr_possible_cpus(int bid)
|
static inline int uv_blade_nr_possible_cpus(int bid)
|
||||||
{
|
{
|
||||||
|
|
|
@ -3793,6 +3793,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||||
|
|
||||||
|
if (cfg->move_in_progress)
|
||||||
|
send_cleanup_vector(cfg);
|
||||||
|
|
||||||
return irq;
|
return irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||||
|
|
||||||
static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
|
static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
|
||||||
{
|
{
|
||||||
return current_cpu_data.initial_apicid >> index_msb;
|
return initial_apicid >> index_msb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x2apic_send_IPI_self(int vector)
|
static void x2apic_send_IPI_self(int vector)
|
||||||
|
|
|
@ -162,7 +162,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||||
|
|
||||||
static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
|
static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
|
||||||
{
|
{
|
||||||
return current_cpu_data.initial_apicid >> index_msb;
|
return initial_apicid >> index_msb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x2apic_send_IPI_self(int vector)
|
static void x2apic_send_IPI_self(int vector)
|
||||||
|
|
|
@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = {
|
||||||
.apic_id_registered = uv_apic_id_registered,
|
.apic_id_registered = uv_apic_id_registered,
|
||||||
|
|
||||||
.irq_delivery_mode = dest_Fixed,
|
.irq_delivery_mode = dest_Fixed,
|
||||||
.irq_dest_mode = 1, /* logical */
|
.irq_dest_mode = 0, /* physical */
|
||||||
|
|
||||||
.target_cpus = uv_target_cpus,
|
.target_cpus = uv_target_cpus,
|
||||||
.disable_esr = 0,
|
.disable_esr = 0,
|
||||||
|
@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init void map_low_mmrs(void)
|
|
||||||
{
|
|
||||||
init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
|
|
||||||
init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
enum map_type {map_wb, map_uc};
|
enum map_type {map_wb, map_uc};
|
||||||
|
|
||||||
static __init void map_high(char *id, unsigned long base, int shift,
|
static __init void map_high(char *id, unsigned long base, int shift,
|
||||||
|
@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode)
|
||||||
map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
|
map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __init void map_config_high(int max_pnode)
|
|
||||||
{
|
|
||||||
union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
|
|
||||||
int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
|
||||||
|
|
||||||
cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
|
|
||||||
if (cfg.s.enable)
|
|
||||||
map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __init void map_mmr_high(int max_pnode)
|
|
||||||
{
|
|
||||||
union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
|
|
||||||
int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
|
||||||
|
|
||||||
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
|
|
||||||
if (mmr.s.enable)
|
|
||||||
map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __init void map_mmioh_high(int max_pnode)
|
static __init void map_mmioh_high(int max_pnode)
|
||||||
{
|
{
|
||||||
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
||||||
|
@ -566,8 +540,6 @@ void __init uv_system_init(void)
|
||||||
unsigned long mmr_base, present, paddr;
|
unsigned long mmr_base, present, paddr;
|
||||||
unsigned short pnode_mask;
|
unsigned short pnode_mask;
|
||||||
|
|
||||||
map_low_mmrs();
|
|
||||||
|
|
||||||
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
||||||
m_val = m_n_config.s.m_skt;
|
m_val = m_n_config.s.m_skt;
|
||||||
n_val = m_n_config.s.n_skt;
|
n_val = m_n_config.s.n_skt;
|
||||||
|
@ -591,6 +563,8 @@ void __init uv_system_init(void)
|
||||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||||
BUG_ON(!uv_blade_info);
|
BUG_ON(!uv_blade_info);
|
||||||
|
for (blade = 0; blade < uv_num_possible_blades(); blade++)
|
||||||
|
uv_blade_info[blade].memory_nid = -1;
|
||||||
|
|
||||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||||
|
|
||||||
|
@ -629,6 +603,9 @@ void __init uv_system_init(void)
|
||||||
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
||||||
uv_blade_info[blade].nr_possible_cpus++;
|
uv_blade_info[blade].nr_possible_cpus++;
|
||||||
|
|
||||||
|
/* Any node on the blade, else will contain -1. */
|
||||||
|
uv_blade_info[blade].memory_nid = nid;
|
||||||
|
|
||||||
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
||||||
uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
|
uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
|
||||||
uv_cpu_hub_info(cpu)->m_val = m_val;
|
uv_cpu_hub_info(cpu)->m_val = m_val;
|
||||||
|
@ -662,11 +639,10 @@ void __init uv_system_init(void)
|
||||||
pnode = (paddr >> m_val) & pnode_mask;
|
pnode = (paddr >> m_val) & pnode_mask;
|
||||||
blade = boot_pnode_to_blade(pnode);
|
blade = boot_pnode_to_blade(pnode);
|
||||||
uv_node_to_blade[nid] = blade;
|
uv_node_to_blade[nid] = blade;
|
||||||
|
max_pnode = max(pnode, max_pnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
map_gru_high(max_pnode);
|
map_gru_high(max_pnode);
|
||||||
map_mmr_high(max_pnode);
|
|
||||||
map_config_high(max_pnode);
|
|
||||||
map_mmioh_high(max_pnode);
|
map_mmioh_high(max_pnode);
|
||||||
|
|
||||||
uv_cpu_init();
|
uv_cpu_init();
|
||||||
|
|
|
@ -811,7 +811,7 @@ static int apm_do_idle(void)
|
||||||
u8 ret = 0;
|
u8 ret = 0;
|
||||||
int idled = 0;
|
int idled = 0;
|
||||||
int polling;
|
int polling;
|
||||||
int err;
|
int err = 0;
|
||||||
|
|
||||||
polling = !!(current_thread_info()->status & TS_POLLING);
|
polling = !!(current_thread_info()->status & TS_POLLING);
|
||||||
if (polling) {
|
if (polling) {
|
||||||
|
|
|
@ -512,7 +512,7 @@ void __init efi_enter_virtual_mode(void)
|
||||||
&& end_pfn <= max_pfn_mapped))
|
&& end_pfn <= max_pfn_mapped))
|
||||||
va = __va(md->phys_addr);
|
va = __va(md->phys_addr);
|
||||||
else
|
else
|
||||||
va = efi_ioremap(md->phys_addr, size);
|
va = efi_ioremap(md->phys_addr, size, md->type);
|
||||||
|
|
||||||
md->virt_addr = (u64) (unsigned long) va;
|
md->virt_addr = (u64) (unsigned long) va;
|
||||||
|
|
||||||
|
|
|
@ -98,10 +98,14 @@ void __init efi_call_phys_epilog(void)
|
||||||
early_runtime_code_mapping_set_exec(0);
|
early_runtime_code_mapping_set_exec(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
||||||
|
u32 type)
|
||||||
{
|
{
|
||||||
unsigned long last_map_pfn;
|
unsigned long last_map_pfn;
|
||||||
|
|
||||||
|
if (type == EFI_MEMORY_MAPPED_IO)
|
||||||
|
return ioremap(phys_addr, size);
|
||||||
|
|
||||||
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
||||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -602,7 +602,11 @@ ignore_int:
|
||||||
#endif
|
#endif
|
||||||
iret
|
iret
|
||||||
|
|
||||||
.section .cpuinit.data,"wa"
|
#ifndef CONFIG_HOTPLUG_CPU
|
||||||
|
__CPUINITDATA
|
||||||
|
#else
|
||||||
|
__REFDATA
|
||||||
|
#endif
|
||||||
.align 4
|
.align 4
|
||||||
ENTRY(initial_code)
|
ENTRY(initial_code)
|
||||||
.long i386_start_kernel
|
.long i386_start_kernel
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
|
#include <linux/dmi.h>
|
||||||
#include <acpi/reboot.h>
|
#include <acpi/reboot.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
@ -17,7 +18,6 @@
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
# include <linux/dmi.h>
|
|
||||||
# include <linux/ctype.h>
|
# include <linux/ctype.h>
|
||||||
# include <linux/mc146818rtc.h>
|
# include <linux/mc146818rtc.h>
|
||||||
#else
|
#else
|
||||||
|
@ -404,6 +404,38 @@ EXPORT_SYMBOL(machine_real_restart);
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Apple MacBook5,2 (2009 MacBook) needs reboot=p
|
||||||
|
*/
|
||||||
|
static int __init set_pci_reboot(const struct dmi_system_id *d)
|
||||||
|
{
|
||||||
|
if (reboot_type != BOOT_CF9) {
|
||||||
|
reboot_type = BOOT_CF9;
|
||||||
|
printk(KERN_INFO "%s series board detected. "
|
||||||
|
"Selecting PCI-method for reboots.\n", d->ident);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
||||||
|
{ /* Handle problems with rebooting on Apple MacBook5,2 */
|
||||||
|
.callback = set_pci_reboot,
|
||||||
|
.ident = "Apple MacBook",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init pci_reboot_init(void)
|
||||||
|
{
|
||||||
|
dmi_check_system(pci_reboot_dmi_table);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
core_initcall(pci_reboot_init);
|
||||||
|
|
||||||
static inline void kb_wait(void)
|
static inline void kb_wait(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -393,8 +393,8 @@ SECTIONS
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
||||||
"kernel image bigger than KERNEL_IMAGE_SIZE")
|
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* Per-cpu symbols which need to be offset from __per_cpu_load
|
* Per-cpu symbols which need to be offset from __per_cpu_load
|
||||||
|
@ -407,12 +407,12 @@ INIT_PER_CPU(irq_stack_union);
|
||||||
/*
|
/*
|
||||||
* Build-time check on the image size:
|
* Build-time check on the image size:
|
||||||
*/
|
*/
|
||||||
ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
|
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
|
||||||
"kernel image bigger than KERNEL_IMAGE_SIZE")
|
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
ASSERT((per_cpu__irq_stack_union == 0),
|
. = ASSERT((per_cpu__irq_stack_union == 0),
|
||||||
"irq_stack_union is not at start of per-cpu area");
|
"irq_stack_union is not at start of per-cpu area");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
@ -420,7 +420,7 @@ ASSERT((per_cpu__irq_stack_union == 0),
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
|
|
||||||
ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
|
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
|
||||||
"kexec control code size is too big")
|
"kexec control code size is too big");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -89,16 +89,13 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
|
||||||
rv.msrs = msrs;
|
rv.msrs = msrs;
|
||||||
rv.msr_no = msr_no;
|
rv.msr_no = msr_no;
|
||||||
|
|
||||||
preempt_disable();
|
this_cpu = get_cpu();
|
||||||
/*
|
|
||||||
* FIXME: handle the CPU we're executing on separately for now until
|
if (cpumask_test_cpu(this_cpu, mask))
|
||||||
* smp_call_function_many has been fixed to not skip it.
|
__rdmsr_on_cpu(&rv);
|
||||||
*/
|
|
||||||
this_cpu = raw_smp_processor_id();
|
|
||||||
smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
|
|
||||||
|
|
||||||
smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
|
smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
|
||||||
preempt_enable();
|
put_cpu();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rdmsr_on_cpus);
|
EXPORT_SYMBOL(rdmsr_on_cpus);
|
||||||
|
|
||||||
|
@ -121,16 +118,13 @@ void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
|
||||||
rv.msrs = msrs;
|
rv.msrs = msrs;
|
||||||
rv.msr_no = msr_no;
|
rv.msr_no = msr_no;
|
||||||
|
|
||||||
preempt_disable();
|
this_cpu = get_cpu();
|
||||||
/*
|
|
||||||
* FIXME: handle the CPU we're executing on separately for now until
|
if (cpumask_test_cpu(this_cpu, mask))
|
||||||
* smp_call_function_many has been fixed to not skip it.
|
__wrmsr_on_cpu(&rv);
|
||||||
*/
|
|
||||||
this_cpu = raw_smp_processor_id();
|
|
||||||
smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
|
|
||||||
|
|
||||||
smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
|
smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
|
||||||
preempt_enable();
|
put_cpu();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(wrmsr_on_cpus);
|
EXPORT_SYMBOL(wrmsr_on_cpus);
|
||||||
|
|
||||||
|
|
|
@ -591,9 +591,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
|
||||||
unsigned int level;
|
unsigned int level;
|
||||||
pte_t *kpte, old_pte;
|
pte_t *kpte, old_pte;
|
||||||
|
|
||||||
if (cpa->flags & CPA_PAGES_ARRAY)
|
if (cpa->flags & CPA_PAGES_ARRAY) {
|
||||||
address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
|
struct page *page = cpa->pages[cpa->curpage];
|
||||||
else if (cpa->flags & CPA_ARRAY)
|
if (unlikely(PageHighMem(page)))
|
||||||
|
return 0;
|
||||||
|
address = (unsigned long)page_address(page);
|
||||||
|
} else if (cpa->flags & CPA_ARRAY)
|
||||||
address = cpa->vaddr[cpa->curpage];
|
address = cpa->vaddr[cpa->curpage];
|
||||||
else
|
else
|
||||||
address = *cpa->vaddr;
|
address = *cpa->vaddr;
|
||||||
|
@ -697,9 +700,12 @@ static int cpa_process_alias(struct cpa_data *cpa)
|
||||||
* No need to redo, when the primary call touched the direct
|
* No need to redo, when the primary call touched the direct
|
||||||
* mapping already:
|
* mapping already:
|
||||||
*/
|
*/
|
||||||
if (cpa->flags & CPA_PAGES_ARRAY)
|
if (cpa->flags & CPA_PAGES_ARRAY) {
|
||||||
vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
|
struct page *page = cpa->pages[cpa->curpage];
|
||||||
else if (cpa->flags & CPA_ARRAY)
|
if (unlikely(PageHighMem(page)))
|
||||||
|
return 0;
|
||||||
|
vaddr = (unsigned long)page_address(page);
|
||||||
|
} else if (cpa->flags & CPA_ARRAY)
|
||||||
vaddr = cpa->vaddr[cpa->curpage];
|
vaddr = cpa->vaddr[cpa->curpage];
|
||||||
else
|
else
|
||||||
vaddr = *cpa->vaddr;
|
vaddr = *cpa->vaddr;
|
||||||
|
@ -997,12 +1003,15 @@ EXPORT_SYMBOL(set_memory_array_uc);
|
||||||
int _set_memory_wc(unsigned long addr, int numpages)
|
int _set_memory_wc(unsigned long addr, int numpages)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned long addr_copy = addr;
|
||||||
|
|
||||||
ret = change_page_attr_set(&addr, numpages,
|
ret = change_page_attr_set(&addr, numpages,
|
||||||
__pgprot(_PAGE_CACHE_UC_MINUS), 0);
|
__pgprot(_PAGE_CACHE_UC_MINUS), 0);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = change_page_attr_set(&addr, numpages,
|
ret = change_page_attr_set_clr(&addr_copy, numpages,
|
||||||
__pgprot(_PAGE_CACHE_WC), 0);
|
__pgprot(_PAGE_CACHE_WC),
|
||||||
|
__pgprot(_PAGE_CACHE_MASK),
|
||||||
|
0, 0, NULL);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1119,7 +1128,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
|
||||||
int free_idx;
|
int free_idx;
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++) {
|
for (i = 0; i < addrinarray; i++) {
|
||||||
start = (unsigned long)page_address(pages[i]);
|
if (PageHighMem(pages[i]))
|
||||||
|
continue;
|
||||||
|
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
|
||||||
end = start + PAGE_SIZE;
|
end = start + PAGE_SIZE;
|
||||||
if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
|
if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
@ -1132,7 +1143,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
|
||||||
err_out:
|
err_out:
|
||||||
free_idx = i;
|
free_idx = i;
|
||||||
for (i = 0; i < free_idx; i++) {
|
for (i = 0; i < free_idx; i++) {
|
||||||
start = (unsigned long)page_address(pages[i]);
|
if (PageHighMem(pages[i]))
|
||||||
|
continue;
|
||||||
|
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
|
||||||
end = start + PAGE_SIZE;
|
end = start + PAGE_SIZE;
|
||||||
free_memtype(start, end);
|
free_memtype(start, end);
|
||||||
}
|
}
|
||||||
|
@ -1161,7 +1174,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
for (i = 0; i < addrinarray; i++) {
|
for (i = 0; i < addrinarray; i++) {
|
||||||
start = (unsigned long)page_address(pages[i]);
|
if (PageHighMem(pages[i]))
|
||||||
|
continue;
|
||||||
|
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
|
||||||
end = start + PAGE_SIZE;
|
end = start + PAGE_SIZE;
|
||||||
free_memtype(start, end);
|
free_memtype(start, end);
|
||||||
}
|
}
|
||||||
|
|
|
@ -329,7 +329,6 @@ void __init reserve_top_address(unsigned long reserve)
|
||||||
printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
|
printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
|
||||||
(int)-reserve);
|
(int)-reserve);
|
||||||
__FIXADDR_TOP = -reserve - PAGE_SIZE;
|
__FIXADDR_TOP = -reserve - PAGE_SIZE;
|
||||||
__VMALLOC_RESERVE += reserve;
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue