x86/mm: Use proper encryption attributes with /dev/mem
When accessing memory using /dev/mem (or /dev/kmem) use the proper encryption attributes when mapping the memory. To insure the proper attributes are applied when reading or writing /dev/mem, update the xlate_dev_mem_ptr() function to use memremap() which will essentially perform the same steps of applying __va for RAM or using ioremap() if not RAM. To insure the proper attributes are applied when mmapping /dev/mem, update the phys_mem_access_prot() to call phys_mem_access_encrypted(), a new function which will check if the memory should be mapped encrypted or not. If it is not to be mapped encrypted then the VMA protection value is updated to remove the encryption bit. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Dave Young <dyoung@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Toshimitsu Kani <toshi.kani@hpe.com> Cc: kasan-dev@googlegroups.com Cc: kvm@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-efi@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/c917f403ab9f61cbfd455ad6425ed8429a5e7b54.1500319216.git.thomas.lendacky@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f2f931c681
commit
8458bf94b0
|
@ -386,4 +386,7 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset,
|
||||||
unsigned long flags);
|
unsigned long flags);
|
||||||
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
|
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
|
||||||
|
|
||||||
|
extern bool phys_mem_access_encrypted(unsigned long phys_addr,
|
||||||
|
unsigned long size);
|
||||||
|
|
||||||
#endif /* _ASM_X86_IO_H */
|
#endif /* _ASM_X86_IO_H */
|
||||||
|
|
|
@ -400,12 +400,10 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
|
||||||
unsigned long offset = phys & ~PAGE_MASK;
|
unsigned long offset = phys & ~PAGE_MASK;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
|
||||||
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
|
/* memremap() maps if RAM, otherwise falls back to ioremap() */
|
||||||
if (page_is_ram(start >> PAGE_SHIFT))
|
vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
|
||||||
return __va(phys);
|
|
||||||
|
|
||||||
vaddr = ioremap_cache(start, PAGE_SIZE);
|
/* Only add the offset on success and return NULL if memremap() failed */
|
||||||
/* Only add the offset on success and return NULL if the ioremap() failed: */
|
|
||||||
if (vaddr)
|
if (vaddr)
|
||||||
vaddr += offset;
|
vaddr += offset;
|
||||||
|
|
||||||
|
@ -414,10 +412,7 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
|
||||||
|
|
||||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
||||||
{
|
{
|
||||||
if (page_is_ram(phys >> PAGE_SHIFT))
|
memunmap((void *)((unsigned long)addr & PAGE_MASK));
|
||||||
return;
|
|
||||||
|
|
||||||
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -626,6 +621,11 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
|
||||||
return prot;
|
return prot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
|
||||||
|
{
|
||||||
|
return arch_memremap_can_ram_remap(phys_addr, size, 0);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
|
#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
|
||||||
/* Remap memory with encryption */
|
/* Remap memory with encryption */
|
||||||
void __init *early_memremap_encrypted(resource_size_t phys_addr,
|
void __init *early_memremap_encrypted(resource_size_t phys_addr,
|
||||||
|
|
|
@ -744,6 +744,9 @@ EXPORT_SYMBOL(arch_io_free_memtype_wc);
|
||||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||||
unsigned long size, pgprot_t vma_prot)
|
unsigned long size, pgprot_t vma_prot)
|
||||||
{
|
{
|
||||||
|
if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
|
||||||
|
vma_prot = pgprot_decrypted(vma_prot);
|
||||||
|
|
||||||
return vma_prot;
|
return vma_prot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue