mm: Add vm_insert_pfn_prot()
The x86 vvar vma contains pages with differing cacheability flags. x86 currently implements this by manually inserting all the ptes using (io_)remap_pfn_range when the vma is set up. x86 wants to move to using .fault with VM_FAULT_NOPAGE to set up the mappings as needed. The correct API to use to insert a pfn in .fault is vm_insert_pfn(), but vm_insert_pfn() can't override the vma's cache mode, and the HPET page in particular needs to be uncached despite the fact that the rest of the VMA is cached. Add vm_insert_pfn_prot() to support varying cacheability within the same non-COW VMA in a more sane manner. x86 could alternatively use multiple VMAs, but that's messy, would break CRIU, and would create unnecessary VMAs that would waste memory. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/d2938d1eb37be7a5e4f86182db646551f11e45aa.1451446564.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f872f5400c
commit
1745cbc5d0
|
@ -2080,6 +2080,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
|||
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn);
|
||||
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot);
|
||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn);
|
||||
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
||||
|
|
25
mm/memory.c
25
mm/memory.c
|
@ -1563,9 +1563,30 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|||
*/
|
||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_pfn);
|
||||
|
||||
/**
|
||||
* vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
|
||||
* @vma: user vma to map to
|
||||
* @addr: target user address of this page
|
||||
* @pfn: source kernel pfn
|
||||
* @pgprot: pgprot flags for the inserted page
|
||||
*
|
||||
* This is exactly like vm_insert_pfn, except that it allows drivers to
|
||||
* to override pgprot on a per-page basis.
|
||||
*
|
||||
* This only makes sense for IO mappings, and it makes no sense for
|
||||
* cow mappings. In general, using multiple vmas is preferable;
|
||||
* vm_insert_pfn_prot should only be used if using multiple VMAs is
|
||||
* impractical.
|
||||
*/
|
||||
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot)
|
||||
{
|
||||
int ret;
|
||||
pgprot_t pgprot = vma->vm_page_prot;
|
||||
/*
|
||||
* Technically, architectures with pte_special can avoid all these
|
||||
* restrictions (same for remap_pfn_range). However we would like
|
||||
|
@ -1587,7 +1608,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_pfn);
|
||||
EXPORT_SYMBOL(vm_insert_pfn_prot);
|
||||
|
||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
|
|
Loading…
Reference in New Issue