arm64: hibernate: add trans_pgd public functions
trans_pgd_create_copy() and trans_pgd_map_page() are going to be the basis for new shared code that handles page tables for cases which are between kernels: kexec, and hibernate. Note: Eventually, get_safe_page() will be moved into a function pointer passed via argument, but for now keep it as is. Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: James Morse <james.morse@arm.com> [will: Keep these functions static until kexec needs them] Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
7ea4088938
commit
a2c2e67923
|
@ -182,39 +182,15 @@ int arch_hibernation_header_restore(void *addr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(arch_hibernation_header_restore);
|
EXPORT_SYMBOL(arch_hibernation_header_restore);
|
||||||
|
|
||||||
/*
|
static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
|
||||||
* Copies length bytes, starting at src_start into an new page,
|
unsigned long dst_addr,
|
||||||
* perform cache maintentance, then maps it at the specified address low
|
pgprot_t pgprot)
|
||||||
* address as executable.
|
|
||||||
*
|
|
||||||
* This is used by hibernate to copy the code it needs to execute when
|
|
||||||
* overwriting the kernel text. This function generates a new set of page
|
|
||||||
* tables, which it loads into ttbr0.
|
|
||||||
*
|
|
||||||
* Length is provided as we probably only want 4K of data, even on a 64K
|
|
||||||
* page system.
|
|
||||||
*/
|
|
||||||
static int create_safe_exec_page(void *src_start, size_t length,
|
|
||||||
unsigned long dst_addr,
|
|
||||||
phys_addr_t *phys_dst_addr)
|
|
||||||
{
|
{
|
||||||
void *page = (void *)get_safe_page(GFP_ATOMIC);
|
|
||||||
pgd_t *trans_pgd;
|
|
||||||
pgd_t *pgdp;
|
pgd_t *pgdp;
|
||||||
pud_t *pudp;
|
pud_t *pudp;
|
||||||
pmd_t *pmdp;
|
pmd_t *pmdp;
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
memcpy(page, src_start, length);
|
|
||||||
__flush_icache_range((unsigned long)page, (unsigned long)page + length);
|
|
||||||
|
|
||||||
trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
|
|
||||||
if (!trans_pgd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
|
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
|
||||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||||
pudp = (void *)get_safe_page(GFP_ATOMIC);
|
pudp = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
|
@ -242,6 +218,44 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
||||||
ptep = pte_offset_kernel(pmdp, dst_addr);
|
ptep = pte_offset_kernel(pmdp, dst_addr);
|
||||||
set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
|
set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copies length bytes, starting at src_start into an new page,
|
||||||
|
* perform cache maintenance, then maps it at the specified address low
|
||||||
|
* address as executable.
|
||||||
|
*
|
||||||
|
* This is used by hibernate to copy the code it needs to execute when
|
||||||
|
* overwriting the kernel text. This function generates a new set of page
|
||||||
|
* tables, which it loads into ttbr0.
|
||||||
|
*
|
||||||
|
* Length is provided as we probably only want 4K of data, even on a 64K
|
||||||
|
* page system.
|
||||||
|
*/
|
||||||
|
static int create_safe_exec_page(void *src_start, size_t length,
|
||||||
|
unsigned long dst_addr,
|
||||||
|
phys_addr_t *phys_dst_addr)
|
||||||
|
{
|
||||||
|
void *page = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
|
pgd_t *trans_pgd;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (!page)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memcpy(page, src_start, length);
|
||||||
|
__flush_icache_range((unsigned long)page, (unsigned long)page + length);
|
||||||
|
|
||||||
|
trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
|
if (!trans_pgd)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
|
||||||
|
PAGE_KERNEL_EXEC);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load our new page tables. A strict BBM approach requires that we
|
* Load our new page tables. A strict BBM approach requires that we
|
||||||
* ensure that TLBs are free of any entries that may overlap with the
|
* ensure that TLBs are free of any entries that may overlap with the
|
||||||
|
@ -462,6 +476,24 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
||||||
|
|
||||||
|
if (!trans_pgd) {
|
||||||
|
pr_err("Failed to allocate memory for temporary page tables.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = copy_page_tables(trans_pgd, start, end);
|
||||||
|
if (!rc)
|
||||||
|
*dst_pgdp = trans_pgd;
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
|
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
|
||||||
*
|
*
|
||||||
|
@ -483,12 +515,7 @@ int swsusp_arch_resume(void)
|
||||||
* Create a second copy of just the linear map, and use this when
|
* Create a second copy of just the linear map, and use this when
|
||||||
* restoring.
|
* restoring.
|
||||||
*/
|
*/
|
||||||
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
|
||||||
if (!tmp_pg_dir) {
|
|
||||||
pr_err("Failed to allocate memory for temporary page tables.\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
|
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue