x86: use the same pgd_list for PAE and 64-bit
Use a standard list threaded through page->lru for maintaining the pgd list on PAE. This is the same as 64-bit, and seems saner than using a non-standard list via page->index. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
fa28ba21ce
commit
e3ed910db2
|
@ -907,10 +907,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||||
force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
|
force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
DEFINE_SPINLOCK(pgd_lock);
|
DEFINE_SPINLOCK(pgd_lock);
|
||||||
LIST_HEAD(pgd_list);
|
LIST_HEAD(pgd_list);
|
||||||
#endif
|
|
||||||
|
|
||||||
void vmalloc_sync_all(void)
|
void vmalloc_sync_all(void)
|
||||||
{
|
{
|
||||||
|
@ -935,13 +933,11 @@ void vmalloc_sync_all(void)
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
spin_lock_irqsave(&pgd_lock, flags);
|
spin_lock_irqsave(&pgd_lock, flags);
|
||||||
for (page = pgd_list; page; page =
|
list_for_each_entry(page, &pgd_list, lru) {
|
||||||
(struct page *)page->index)
|
|
||||||
if (!vmalloc_sync_one(page_address(page),
|
if (!vmalloc_sync_one(page_address(page),
|
||||||
address)) {
|
address))
|
||||||
BUG_ON(page != pgd_list);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||||
if (!page)
|
if (!page)
|
||||||
set_bit(pgd_index(address), insync);
|
set_bit(pgd_index(address), insync);
|
||||||
|
|
|
@ -175,7 +175,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
|
||||||
if (!SHARED_KERNEL_PMD) {
|
if (!SHARED_KERNEL_PMD) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
for (page = pgd_list; page; page = (struct page *)page->index) {
|
list_for_each_entry(page, &pgd_list, lru) {
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
|
|
|
@ -205,27 +205,18 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
* vmalloc faults work because attached pagetables are never freed.
|
* vmalloc faults work because attached pagetables are never freed.
|
||||||
* -- wli
|
* -- wli
|
||||||
*/
|
*/
|
||||||
DEFINE_SPINLOCK(pgd_lock);
|
|
||||||
struct page *pgd_list;
|
|
||||||
|
|
||||||
static inline void pgd_list_add(pgd_t *pgd)
|
static inline void pgd_list_add(pgd_t *pgd)
|
||||||
{
|
{
|
||||||
struct page *page = virt_to_page(pgd);
|
struct page *page = virt_to_page(pgd);
|
||||||
page->index = (unsigned long)pgd_list;
|
|
||||||
if (pgd_list)
|
list_add(&page->lru, &pgd_list);
|
||||||
set_page_private(pgd_list, (unsigned long)&page->index);
|
|
||||||
pgd_list = page;
|
|
||||||
set_page_private(page, (unsigned long)&pgd_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pgd_list_del(pgd_t *pgd)
|
static inline void pgd_list_del(pgd_t *pgd)
|
||||||
{
|
{
|
||||||
struct page *next, **pprev, *page = virt_to_page(pgd);
|
struct page *page = virt_to_page(pgd);
|
||||||
next = (struct page *)page->index;
|
|
||||||
pprev = (struct page **)page_private(page);
|
list_del(&page->lru);
|
||||||
*pprev = next;
|
|
||||||
if (next)
|
|
||||||
set_page_private(next, (unsigned long)pprev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -131,6 +131,8 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
|
||||||
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
||||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||||
|
|
||||||
|
extern spinlock_t pgd_lock;
|
||||||
|
extern struct list_head pgd_list;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following only work if pte_present() is true.
|
* The following only work if pte_present() is true.
|
||||||
|
|
|
@ -27,8 +27,6 @@ struct vm_area_struct;
|
||||||
|
|
||||||
extern pgd_t swapper_pg_dir[1024];
|
extern pgd_t swapper_pg_dir[1024];
|
||||||
extern struct kmem_cache *pmd_cache;
|
extern struct kmem_cache *pmd_cache;
|
||||||
extern spinlock_t pgd_lock;
|
|
||||||
extern struct page *pgd_list;
|
|
||||||
void check_pgt_cache(void);
|
void check_pgt_cache(void);
|
||||||
|
|
||||||
static inline void pgtable_cache_init(void) {}
|
static inline void pgtable_cache_init(void) {}
|
||||||
|
|
|
@ -240,9 +240,6 @@ static inline unsigned long pmd_bad(pmd_t pmd)
|
||||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||||
|
|
||||||
extern spinlock_t pgd_lock;
|
|
||||||
extern struct list_head pgd_list;
|
|
||||||
|
|
||||||
extern int kern_addr_valid(unsigned long addr);
|
extern int kern_addr_valid(unsigned long addr);
|
||||||
|
|
||||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||||
|
|
Loading…
Reference in New Issue