Merge branches 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess' and 'x86/urgent' into x86/core
This commit is contained in:
commit
7032e86967
|
@ -2,6 +2,7 @@
|
||||||
*
|
*
|
||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
|
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
|
||||||
|
* Copyright 2009 Intel Corporation
|
||||||
*
|
*
|
||||||
* This file is part of the Linux kernel, and is made available under
|
* This file is part of the Linux kernel, and is made available under
|
||||||
* the terms of the GNU General Public License version 2.
|
* the terms of the GNU General Public License version 2.
|
||||||
|
@ -15,16 +16,23 @@
|
||||||
#include "boot.h"
|
#include "boot.h"
|
||||||
|
|
||||||
#define MAX_8042_LOOPS 100000
|
#define MAX_8042_LOOPS 100000
|
||||||
|
#define MAX_8042_FF 32
|
||||||
|
|
||||||
static int empty_8042(void)
|
static int empty_8042(void)
|
||||||
{
|
{
|
||||||
u8 status;
|
u8 status;
|
||||||
int loops = MAX_8042_LOOPS;
|
int loops = MAX_8042_LOOPS;
|
||||||
|
int ffs = MAX_8042_FF;
|
||||||
|
|
||||||
while (loops--) {
|
while (loops--) {
|
||||||
io_delay();
|
io_delay();
|
||||||
|
|
||||||
status = inb(0x64);
|
status = inb(0x64);
|
||||||
|
if (status == 0xff) {
|
||||||
|
/* FF is a plausible, but very unlikely status */
|
||||||
|
if (!--ffs)
|
||||||
|
return -1; /* Assume no KBC present */
|
||||||
|
}
|
||||||
if (status & 1) {
|
if (status & 1) {
|
||||||
/* Read and discard input data */
|
/* Read and discard input data */
|
||||||
io_delay();
|
io_delay();
|
||||||
|
@ -118,18 +126,14 @@ static void enable_a20_fast(void)
|
||||||
|
|
||||||
int enable_a20(void)
|
int enable_a20(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_X86_ELAN)
|
#ifdef CONFIG_X86_VOYAGER
|
||||||
/* Elan croaks if we try to touch the KBC */
|
|
||||||
enable_a20_fast();
|
|
||||||
while (!a20_test_long())
|
|
||||||
;
|
|
||||||
return 0;
|
|
||||||
#elif defined(CONFIG_X86_VOYAGER)
|
|
||||||
/* On Voyager, a20_test() is unsafe? */
|
/* On Voyager, a20_test() is unsafe? */
|
||||||
enable_a20_kbc();
|
enable_a20_kbc();
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
#else
|
||||||
int loops = A20_ENABLE_LOOPS;
|
int loops = A20_ENABLE_LOOPS;
|
||||||
|
int kbc_err;
|
||||||
|
|
||||||
while (loops--) {
|
while (loops--) {
|
||||||
/* First, check to see if A20 is already enabled
|
/* First, check to see if A20 is already enabled
|
||||||
(legacy free, etc.) */
|
(legacy free, etc.) */
|
||||||
|
@ -142,13 +146,16 @@ int enable_a20(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Try enabling A20 through the keyboard controller */
|
/* Try enabling A20 through the keyboard controller */
|
||||||
empty_8042();
|
kbc_err = empty_8042();
|
||||||
|
|
||||||
if (a20_test_short())
|
if (a20_test_short())
|
||||||
return 0; /* BIOS worked, but with delayed reaction */
|
return 0; /* BIOS worked, but with delayed reaction */
|
||||||
|
|
||||||
|
if (!kbc_err) {
|
||||||
enable_a20_kbc();
|
enable_a20_kbc();
|
||||||
if (a20_test_long())
|
if (a20_test_long())
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Finally, try enabling the "fast A20 gate" */
|
/* Finally, try enabling the "fast A20 gate" */
|
||||||
enable_a20_fast();
|
enable_a20_fast();
|
||||||
|
|
|
@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
|
||||||
typedef struct { pgprotval_t pgprot; } pgprot_t;
|
typedef struct { pgprotval_t pgprot; } pgprot_t;
|
||||||
|
|
||||||
extern int page_is_ram(unsigned long pagenr);
|
extern int page_is_ram(unsigned long pagenr);
|
||||||
extern int pagerange_is_ram(unsigned long start, unsigned long end);
|
|
||||||
extern int devmem_is_allowed(unsigned long pagenr);
|
extern int devmem_is_allowed(unsigned long pagenr);
|
||||||
extern void map_devmem(unsigned long pfn, unsigned long size,
|
extern void map_devmem(unsigned long pfn, unsigned long size,
|
||||||
pgprot_t vma_prot);
|
pgprot_t vma_prot);
|
||||||
|
|
|
@ -1431,14 +1431,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
|
||||||
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
|
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_flush_lazy_cpu_mode(void)
|
void arch_flush_lazy_cpu_mode(void);
|
||||||
{
|
|
||||||
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
|
|
||||||
arch_leave_lazy_cpu_mode();
|
|
||||||
arch_enter_lazy_cpu_mode();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
||||||
static inline void arch_enter_lazy_mmu_mode(void)
|
static inline void arch_enter_lazy_mmu_mode(void)
|
||||||
|
@ -1451,13 +1444,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
||||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_flush_lazy_mmu_mode(void)
|
void arch_flush_lazy_mmu_mode(void);
|
||||||
{
|
|
||||||
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
|
|
||||||
arch_leave_lazy_mmu_mode();
|
|
||||||
arch_enter_lazy_mmu_mode();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||||
unsigned long phys, pgprot_t flags)
|
unsigned long phys, pgprot_t flags)
|
||||||
|
|
|
@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode,
|
||||||
now = hpet_readl(HPET_COUNTER);
|
now = hpet_readl(HPET_COUNTER);
|
||||||
cmp = now + (unsigned long) delta;
|
cmp = now + (unsigned long) delta;
|
||||||
cfg = hpet_readl(HPET_Tn_CFG(timer));
|
cfg = hpet_readl(HPET_Tn_CFG(timer));
|
||||||
|
/* Make sure we use edge triggered interrupts */
|
||||||
|
cfg &= ~HPET_TN_LEVEL;
|
||||||
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
|
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
|
||||||
HPET_TN_SETVAL | HPET_TN_32BIT;
|
HPET_TN_SETVAL | HPET_TN_32BIT;
|
||||||
hpet_writel(cfg, HPET_Tn_CFG(timer));
|
hpet_writel(cfg, HPET_Tn_CFG(timer));
|
||||||
|
|
|
@ -286,6 +286,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
||||||
return __get_cpu_var(paravirt_lazy_mode);
|
return __get_cpu_var(paravirt_lazy_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void arch_flush_lazy_mmu_mode(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||||
|
WARN_ON(preempt_count() == 1);
|
||||||
|
arch_leave_lazy_mmu_mode();
|
||||||
|
arch_enter_lazy_mmu_mode();
|
||||||
|
}
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
void arch_flush_lazy_cpu_mode(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
|
||||||
|
WARN_ON(preempt_count() == 1);
|
||||||
|
arch_leave_lazy_cpu_mode();
|
||||||
|
arch_enter_lazy_cpu_mode();
|
||||||
|
}
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
struct pv_info pv_info = {
|
struct pv_info pv_info = {
|
||||||
.name = "bare hardware",
|
.name = "bare hardware",
|
||||||
.paravirt_enabled = 0,
|
.paravirt_enabled = 0,
|
||||||
|
|
|
@ -805,12 +805,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
|
||||||
|
|
||||||
static void ptrace_bts_detach(struct task_struct *child)
|
static void ptrace_bts_detach(struct task_struct *child)
|
||||||
{
|
{
|
||||||
if (unlikely(child->bts)) {
|
/*
|
||||||
ds_release_bts(child->bts);
|
* Ptrace_detach() races with ptrace_untrace() in case
|
||||||
child->bts = NULL;
|
* the child dies and is reaped by another thread.
|
||||||
|
*
|
||||||
ptrace_bts_free_buffer(child);
|
* We only do the memory accounting at this point and
|
||||||
}
|
* leave the buffer deallocation and the bts tracer
|
||||||
|
* release to ptrace_bts_untrace() which will be called
|
||||||
|
* later on with tasklist_lock held.
|
||||||
|
*/
|
||||||
|
release_locked_buffer(child->bts_buffer, child->bts_size);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void ptrace_bts_fork(struct task_struct *tsk) {}
|
static inline void ptrace_bts_fork(struct task_struct *tsk) {}
|
||||||
|
|
|
@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int pagerange_is_ram(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
int ram_page = 0, not_rampage = 0;
|
|
||||||
unsigned long page_nr;
|
|
||||||
|
|
||||||
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
|
|
||||||
++page_nr) {
|
|
||||||
if (page_is_ram(page_nr))
|
|
||||||
ram_page = 1;
|
|
||||||
else
|
|
||||||
not_rampage = 1;
|
|
||||||
|
|
||||||
if (ram_page == not_rampage)
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ram_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fix up the linear direct mapping of the kernel to avoid cache attribute
|
* Fix up the linear direct mapping of the kernel to avoid cache attribute
|
||||||
* conflicts.
|
* conflicts.
|
||||||
|
|
|
@ -575,7 +575,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
|
||||||
address = cpa->vaddr[cpa->curpage];
|
address = cpa->vaddr[cpa->curpage];
|
||||||
else
|
else
|
||||||
address = *cpa->vaddr;
|
address = *cpa->vaddr;
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
kpte = lookup_address(address, &level);
|
kpte = lookup_address(address, &level);
|
||||||
if (!kpte)
|
if (!kpte)
|
||||||
|
@ -812,6 +811,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||||
|
|
||||||
vm_unmap_aliases();
|
vm_unmap_aliases();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we're called with lazy mmu updates enabled, the
|
||||||
|
* in-memory pte state may be stale. Flush pending updates to
|
||||||
|
* bring them up to date.
|
||||||
|
*/
|
||||||
|
arch_flush_lazy_mmu_mode();
|
||||||
|
|
||||||
cpa.vaddr = addr;
|
cpa.vaddr = addr;
|
||||||
cpa.numpages = numpages;
|
cpa.numpages = numpages;
|
||||||
cpa.mask_set = mask_set;
|
cpa.mask_set = mask_set;
|
||||||
|
@ -854,6 +860,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
||||||
} else
|
} else
|
||||||
cpa_flush_all(cache);
|
cpa_flush_all(cache);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we've been called with lazy mmu updates enabled, then
|
||||||
|
* make sure that everything gets flushed out before we
|
||||||
|
* return.
|
||||||
|
*/
|
||||||
|
arch_flush_lazy_mmu_mode();
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -220,6 +220,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
|
||||||
static struct memtype *cached_entry;
|
static struct memtype *cached_entry;
|
||||||
static u64 cached_start;
|
static u64 cached_start;
|
||||||
|
|
||||||
|
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
int ram_page = 0, not_rampage = 0;
|
||||||
|
unsigned long page_nr;
|
||||||
|
|
||||||
|
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
|
||||||
|
++page_nr) {
|
||||||
|
/*
|
||||||
|
* For legacy reasons, physical address range in the legacy ISA
|
||||||
|
* region is tracked as non-RAM. This will allow users of
|
||||||
|
* /dev/mem to map portions of legacy ISA region, even when
|
||||||
|
* some of those portions are listed(or not even listed) with
|
||||||
|
* different e820 types(RAM/reserved/..)
|
||||||
|
*/
|
||||||
|
if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
|
||||||
|
page_is_ram(page_nr))
|
||||||
|
ram_page = 1;
|
||||||
|
else
|
||||||
|
not_rampage = 1;
|
||||||
|
|
||||||
|
if (ram_page == not_rampage)
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ram_page;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For RAM pages, mark the pages as non WB memory type using
|
* For RAM pages, mark the pages as non WB memory type using
|
||||||
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
|
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
|
||||||
|
@ -345,20 +372,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||||
if (new_type)
|
if (new_type)
|
||||||
*new_type = actual_type;
|
*new_type = actual_type;
|
||||||
|
|
||||||
/*
|
is_range_ram = pat_pagerange_is_ram(start, end);
|
||||||
* For legacy reasons, some parts of the physical address range in the
|
|
||||||
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
|
|
||||||
* the e820 tables). So we will track the memory attributes of this
|
|
||||||
* legacy 1MB region using the linear memtype_list always.
|
|
||||||
*/
|
|
||||||
if (end >= ISA_END_ADDRESS) {
|
|
||||||
is_range_ram = pagerange_is_ram(start, end);
|
|
||||||
if (is_range_ram == 1)
|
if (is_range_ram == 1)
|
||||||
return reserve_ram_pages_type(start, end, req_type,
|
return reserve_ram_pages_type(start, end, req_type,
|
||||||
new_type);
|
new_type);
|
||||||
else if (is_range_ram < 0)
|
else if (is_range_ram < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
|
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
|
||||||
if (!new)
|
if (!new)
|
||||||
|
@ -455,19 +474,11 @@ int free_memtype(u64 start, u64 end)
|
||||||
if (is_ISA_range(start, end - 1))
|
if (is_ISA_range(start, end - 1))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
is_range_ram = pat_pagerange_is_ram(start, end);
|
||||||
* For legacy reasons, some parts of the physical address range in the
|
|
||||||
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
|
|
||||||
* the e820 tables). So we will track the memory attributes of this
|
|
||||||
* legacy 1MB region using the linear memtype_list always.
|
|
||||||
*/
|
|
||||||
if (end >= ISA_END_ADDRESS) {
|
|
||||||
is_range_ram = pagerange_is_ram(start, end);
|
|
||||||
if (is_range_ram == 1)
|
if (is_range_ram == 1)
|
||||||
return free_ram_pages_type(start, end);
|
return free_ram_pages_type(start, end);
|
||||||
else if (is_range_ram < 0)
|
else if (is_range_ram < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&memtype_lock);
|
spin_lock(&memtype_lock);
|
||||||
list_for_each_entry(entry, &memtype_list, nd) {
|
list_for_each_entry(entry, &memtype_list, nd) {
|
||||||
|
@ -635,17 +646,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
||||||
|
|
||||||
is_ram = pagerange_is_ram(paddr, paddr + size);
|
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
||||||
|
|
||||||
if (is_ram != 0) {
|
|
||||||
/*
|
/*
|
||||||
* For mapping RAM pages, drivers need to call
|
* reserve_pfn_range() doesn't support RAM pages.
|
||||||
* set_memory_[uc|wc|wb] directly, for reserve and free, before
|
|
||||||
* setting up the PTE.
|
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(1);
|
if (is_ram != 0)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
|
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -702,7 +709,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
||||||
{
|
{
|
||||||
int is_ram;
|
int is_ram;
|
||||||
|
|
||||||
is_ram = pagerange_is_ram(paddr, paddr + size);
|
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
||||||
if (is_ram == 0)
|
if (is_ram == 0)
|
||||||
free_memtype(paddr, paddr + size);
|
free_memtype(paddr, paddr + size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1304,5 +1304,6 @@ void vmemmap_populate_print_last(void);
|
||||||
|
|
||||||
extern void *alloc_locked_buffer(size_t size);
|
extern void *alloc_locked_buffer(size_t size);
|
||||||
extern void free_locked_buffer(void *buffer, size_t size);
|
extern void free_locked_buffer(void *buffer, size_t size);
|
||||||
|
extern void release_locked_buffer(void *buffer, size_t size);
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _LINUX_MM_H */
|
#endif /* _LINUX_MM_H */
|
||||||
|
|
|
@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_locked_buffer(void *buffer, size_t size)
|
void release_locked_buffer(void *buffer, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
|
||||||
current->mm->locked_vm -= pgsz;
|
current->mm->locked_vm -= pgsz;
|
||||||
|
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_locked_buffer(void *buffer, size_t size)
|
||||||
|
{
|
||||||
|
release_locked_buffer(buffer, size);
|
||||||
|
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue