From 7fc7e2eeecb599ba719c4c4503100fc8cd6a6920 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 11 Dec 2005 19:57:52 -0800 Subject: [PATCH] Remove (at least temporarily) the "incomplete PFN mapping" support With the previous commit, we can handle arbitrary shared re-mappings even without this complexity, and since the only known private mappings are for strange users of /dev/mem (which never create an incomplete one), there seems to be no reason to support it. Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 - mm/memory.c | 46 +--------------------------------------------- 2 files changed, 1 insertion(+), 46 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 29f02d8513f6..e5677f456742 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -163,7 +163,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ -#define VM_INCOMPLETE 0x02000000 /* Strange partial PFN mapping marker */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS diff --git a/mm/memory.c b/mm/memory.c index e65f8fc8ea67..430a72ed08d5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1227,50 +1227,6 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page * } EXPORT_SYMBOL(vm_insert_page); -/* - * Somebody does a pfn remapping that doesn't actually work as a vma. - * - * Do it as individual pages instead, and warn about it. It's bad form, - * and very inefficient. - */ -static int incomplete_pfn_remap(struct vm_area_struct *vma, - unsigned long start, unsigned long end, - unsigned long pfn, pgprot_t prot) -{ - static int warn = 10; - struct page *page; - int retval; - - if (!(vma->vm_flags & VM_INCOMPLETE)) { - if (warn) { - warn--; - printk("%s does an incomplete pfn remapping", current->comm); - dump_stack(); - } - } - vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED; - - if (start < vma->vm_start || end > vma->vm_end) - return -EINVAL; - - if (!pfn_valid(pfn)) - return -EINVAL; - - page = pfn_to_page(pfn); - if (!PageReserved(page)) - return -EINVAL; - - retval = 0; - while (start < end) { - retval = insert_page(vma->vm_mm, start, page, prot); - if (retval < 0) - break; - start += PAGE_SIZE; - page++; - } - return retval; -} - /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results @@ -1365,7 +1321,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, */ if (!(vma->vm_flags & VM_SHARED)) { if (addr != vma->vm_start || end != vma->vm_end) - return incomplete_pfn_remap(vma, addr, end, pfn, prot); + return -EINVAL; vma->vm_pgoff = pfn; }