linux_old1/arch/arm/mm/pgd.c

102 lines
2.1 KiB
C
Raw Normal View History

/*
* linux/arch/arm/mm/pgd.c
*
* Copyright (C) 1998-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
/*
* need to get a 16k page for level 1
*/
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
if (!new_pgd)
goto no_pgd;
memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
*/
new_pmd = pmd_alloc(mm, new_pgd, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset_map_nested(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
pte_unmap_nested(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
void free_pgd_slow(pgd_t *pgd)
{
pmd_t *pmd;
struct page *pte;
if (!pgd)
return;
/* pgd is always present and good */
pmd = pmd_off(pgd, 0);
if (pmd_none(*pmd))
goto free;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
goto free;
}
pte = pmd_page(*pmd);
pmd_clear(pmd);
dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
[PATCH] mm: split page table lock Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with a many-threaded application which concurrently initializes different parts of a large anonymous area. This patch corrects that, by using a separate spinlock per page table page, to guard the page table entries in that page, instead of using the mm's single page_table_lock. (But even then, page_table_lock is still used to guard page table allocation, and anon_vma allocation.) In this implementation, the spinlock is tucked inside the struct page of the page table page: with a BUILD_BUG_ON in case it overflows - which it would in the case of 32-bit PA-RISC with spinlock debugging enabled. Splitting the lock is not quite for free: another cacheline access. Ideally, I suppose we would use split ptlock only for multi-threaded processes on multi-cpu machines; but deciding that dynamically would have its own costs. So for now enable it by config, at some number of cpus - since the Kconfig language doesn't support inequalities, let preprocessor compare that with NR_CPUS. But I don't think it's worth being user-configurable: for good testing of both split and unsplit configs, split now at 4 cpus, and perhaps change that to 8 later. There is a benefit even for singly threaded processes: kswapd can be attacking one part of the mm while another part is busy faulting. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 09:16:40 +08:00
pte_lock_deinit(pte);
pte_free(pte);
pmd_free(pmd);
free:
free_pages((unsigned long) pgd, 2);
}