x86: use the pfn from the page when change its attributes

When changing the attributes of a pte, we should use the PFN from the
existing PTE rather than going through hoops calculating what we think
it might have been; this is both fragile and totally unneeded. It also
makes it more hairy to call any of these functions on non-direct maps
for no good reason whatsover.

With this change, __change_page_attr() no longer takes a pfn as argument,
which simplifies all the callers.

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@tglx.de>
This commit is contained in:
Arjan van de Ven 2008-02-04 16:48:05 +01:00 committed by Ingo Molnar
parent cc0f21bbc1
commit 626c2c9d06
1 changed files with 17 additions and 14 deletions

View File

@ -277,17 +277,12 @@ static int split_large_page(pte_t *kpte, unsigned long address)
} }
static int static int
__change_page_attr(unsigned long address, unsigned long pfn, __change_page_attr(unsigned long address, pgprot_t mask_set, pgprot_t mask_clr)
pgprot_t mask_set, pgprot_t mask_clr)
{ {
struct page *kpte_page; struct page *kpte_page;
int level, err = 0; int level, err = 0;
pte_t *kpte; pte_t *kpte;
#ifdef CONFIG_X86_32
BUG_ON(pfn > max_low_pfn);
#endif
repeat: repeat:
kpte = lookup_address(address, &level); kpte = lookup_address(address, &level);
if (!kpte) if (!kpte)
@ -298,17 +293,25 @@ __change_page_attr(unsigned long address, unsigned long pfn,
BUG_ON(PageCompound(kpte_page)); BUG_ON(PageCompound(kpte_page));
if (level == PG_LEVEL_4K) { if (level == PG_LEVEL_4K) {
pgprot_t new_prot = pte_pgprot(*kpte);
pte_t new_pte, old_pte = *kpte; pte_t new_pte, old_pte = *kpte;
pgprot_t new_prot = pte_pgprot(old_pte);
if(!pte_val(old_pte)) {
WARN_ON_ONCE(1);
return -EINVAL;
}
pgprot_val(new_prot) &= ~pgprot_val(mask_clr); pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
pgprot_val(new_prot) |= pgprot_val(mask_set); pgprot_val(new_prot) |= pgprot_val(mask_set);
new_prot = static_protections(new_prot, address); new_prot = static_protections(new_prot, address);
new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); /*
BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte)); * We need to keep the pfn from the existing PTE,
* after all we're only going to change it's attributes
* not the memory it points to
*/
new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
set_pte_atomic(kpte, new_pte); set_pte_atomic(kpte, new_pte);
} else { } else {
err = split_large_page(kpte, address); err = split_large_page(kpte, address);
@ -337,11 +340,11 @@ static int
change_page_attr_addr(unsigned long address, pgprot_t mask_set, change_page_attr_addr(unsigned long address, pgprot_t mask_set,
pgprot_t mask_clr) pgprot_t mask_clr)
{ {
unsigned long phys_addr = __pa(address);
unsigned long pfn = phys_addr >> PAGE_SHIFT;
int err; int err;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
unsigned long phys_addr = __pa(address);
/* /*
* If we are inside the high mapped kernel range, then we * If we are inside the high mapped kernel range, then we
* fixup the low mapping first. __va() returns the virtual * fixup the low mapping first. __va() returns the virtual
@ -351,7 +354,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set,
address = (unsigned long) __va(phys_addr); address = (unsigned long) __va(phys_addr);
#endif #endif
err = __change_page_attr(address, pfn, mask_set, mask_clr); err = __change_page_attr(address, mask_set, mask_clr);
if (err) if (err)
return err; return err;
@ -375,7 +378,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set,
* everything between 0 and KERNEL_TEXT_SIZE, so do * everything between 0 and KERNEL_TEXT_SIZE, so do
* not propagate lookup failures back to users: * not propagate lookup failures back to users:
*/ */
__change_page_attr(address, pfn, mask_set, mask_clr); __change_page_attr(address, mask_set, mask_clr);
} }
#endif #endif
return err; return err;