kexec: allow architectures to override boot mapping
kexec physical addresses are the boot-time view of the system. For certain ARM systems (such as Keystone 2), the boot view of the system does not match the kernel's view of the system: the boot view uses a special alias in the lower 4GB of the physical address space. To cater for these kinds of setups, we need to translate between the boot view physical addresses and the normal kernel view physical addresses. This patch extracts the current transation points into linux/kexec.h, and allows an architecture to override the functions. Due to the translations required, we unfortunately end up with six translation functions, which are reduced down to four that the architecture can override. [akpm@linux-foundation.org: kexec.h needs asm/io.h for phys_to_virt()] Link: http://lkml.kernel.org/r/E1b8koP-0004HZ-Vf@rmk-PC.armlinux.org.uk Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Keerthy <j-keerthy@ti.com> Cc: Pratyush Anand <panand@redhat.com> Cc: Vitaly Andrianov <vitalya@ti.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Dave Young <dyoung@redhat.com> Cc: Baoquan He <bhe@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Simon Horman <horms@verge.net.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dae28018f5
commit
43546d8669
|
@ -14,6 +14,8 @@
|
||||||
|
|
||||||
#if !defined(__ASSEMBLY__)
|
#if !defined(__ASSEMBLY__)
|
||||||
|
|
||||||
|
#include <asm/io.h>
|
||||||
|
|
||||||
#include <uapi/linux/kexec.h>
|
#include <uapi/linux/kexec.h>
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC_CORE
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
|
@ -318,6 +320,44 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||||
void arch_kexec_protect_crashkres(void);
|
void arch_kexec_protect_crashkres(void);
|
||||||
void arch_kexec_unprotect_crashkres(void);
|
void arch_kexec_unprotect_crashkres(void);
|
||||||
|
|
||||||
|
#ifndef page_to_boot_pfn
|
||||||
|
static inline unsigned long page_to_boot_pfn(struct page *page)
|
||||||
|
{
|
||||||
|
return page_to_pfn(page);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef boot_pfn_to_page
|
||||||
|
static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
|
||||||
|
{
|
||||||
|
return pfn_to_page(boot_pfn);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef phys_to_boot_phys
|
||||||
|
static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
|
||||||
|
{
|
||||||
|
return phys;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef boot_phys_to_phys
|
||||||
|
static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
|
||||||
|
{
|
||||||
|
return boot_phys;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline unsigned long virt_to_boot_phys(void *addr)
|
||||||
|
{
|
||||||
|
return phys_to_boot_phys(__pa((unsigned long)addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *boot_phys_to_virt(unsigned long entry)
|
||||||
|
{
|
||||||
|
return phys_to_virt(boot_phys_to_phys(entry));
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_KEXEC_CORE */
|
#else /* !CONFIG_KEXEC_CORE */
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
|
|
@ -48,7 +48,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
|
||||||
|
|
||||||
if (kexec_on_panic) {
|
if (kexec_on_panic) {
|
||||||
/* Verify we have a valid entry point */
|
/* Verify we have a valid entry point */
|
||||||
if ((entry < crashk_res.start) || (entry > crashk_res.end))
|
if ((entry < phys_to_boot_phys(crashk_res.start)) ||
|
||||||
|
(entry > phys_to_boot_phys(crashk_res.end)))
|
||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -225,8 +225,8 @@ int sanity_check_segment_list(struct kimage *image)
|
||||||
mstart = image->segment[i].mem;
|
mstart = image->segment[i].mem;
|
||||||
mend = mstart + image->segment[i].memsz - 1;
|
mend = mstart + image->segment[i].memsz - 1;
|
||||||
/* Ensure we are within the crash kernel limits */
|
/* Ensure we are within the crash kernel limits */
|
||||||
if ((mstart < crashk_res.start) ||
|
if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
|
||||||
(mend > crashk_res.end))
|
(mend > phys_to_boot_phys(crashk_res.end)))
|
||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -350,7 +350,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
|
||||||
pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
|
pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
|
||||||
if (!pages)
|
if (!pages)
|
||||||
break;
|
break;
|
||||||
pfn = page_to_pfn(pages);
|
pfn = page_to_boot_pfn(pages);
|
||||||
epfn = pfn + count;
|
epfn = pfn + count;
|
||||||
addr = pfn << PAGE_SHIFT;
|
addr = pfn << PAGE_SHIFT;
|
||||||
eaddr = epfn << PAGE_SHIFT;
|
eaddr = epfn << PAGE_SHIFT;
|
||||||
|
@ -476,7 +476,7 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ind_page = page_address(page);
|
ind_page = page_address(page);
|
||||||
*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
|
*image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
|
||||||
image->entry = ind_page;
|
image->entry = ind_page;
|
||||||
image->last_entry = ind_page +
|
image->last_entry = ind_page +
|
||||||
((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
|
((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
|
||||||
|
@ -531,13 +531,13 @@ void kimage_terminate(struct kimage *image)
|
||||||
#define for_each_kimage_entry(image, ptr, entry) \
|
#define for_each_kimage_entry(image, ptr, entry) \
|
||||||
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
|
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
|
||||||
ptr = (entry & IND_INDIRECTION) ? \
|
ptr = (entry & IND_INDIRECTION) ? \
|
||||||
phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
|
boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
|
||||||
|
|
||||||
static void kimage_free_entry(kimage_entry_t entry)
|
static void kimage_free_entry(kimage_entry_t entry)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
page = pfn_to_page(entry >> PAGE_SHIFT);
|
page = boot_pfn_to_page(entry >> PAGE_SHIFT);
|
||||||
kimage_free_pages(page);
|
kimage_free_pages(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -631,7 +631,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
|
||||||
* have a match.
|
* have a match.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(page, &image->dest_pages, lru) {
|
list_for_each_entry(page, &image->dest_pages, lru) {
|
||||||
addr = page_to_pfn(page) << PAGE_SHIFT;
|
addr = page_to_boot_pfn(page) << PAGE_SHIFT;
|
||||||
if (addr == destination) {
|
if (addr == destination) {
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
return page;
|
return page;
|
||||||
|
@ -646,12 +646,12 @@ static struct page *kimage_alloc_page(struct kimage *image,
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
/* If the page cannot be used file it away */
|
/* If the page cannot be used file it away */
|
||||||
if (page_to_pfn(page) >
|
if (page_to_boot_pfn(page) >
|
||||||
(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
|
(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
|
||||||
list_add(&page->lru, &image->unusable_pages);
|
list_add(&page->lru, &image->unusable_pages);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
addr = page_to_pfn(page) << PAGE_SHIFT;
|
addr = page_to_boot_pfn(page) << PAGE_SHIFT;
|
||||||
|
|
||||||
/* If it is the destination page we want use it */
|
/* If it is the destination page we want use it */
|
||||||
if (addr == destination)
|
if (addr == destination)
|
||||||
|
@ -674,7 +674,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
|
||||||
struct page *old_page;
|
struct page *old_page;
|
||||||
|
|
||||||
old_addr = *old & PAGE_MASK;
|
old_addr = *old & PAGE_MASK;
|
||||||
old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
|
old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
|
||||||
copy_highpage(page, old_page);
|
copy_highpage(page, old_page);
|
||||||
*old = addr | (*old & ~PAGE_MASK);
|
*old = addr | (*old & ~PAGE_MASK);
|
||||||
|
|
||||||
|
@ -730,7 +730,7 @@ static int kimage_load_normal_segment(struct kimage *image,
|
||||||
result = -ENOMEM;
|
result = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
result = kimage_add_page(image, page_to_pfn(page)
|
result = kimage_add_page(image, page_to_boot_pfn(page)
|
||||||
<< PAGE_SHIFT);
|
<< PAGE_SHIFT);
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -791,7 +791,7 @@ static int kimage_load_crash_segment(struct kimage *image,
|
||||||
char *ptr;
|
char *ptr;
|
||||||
size_t uchunk, mchunk;
|
size_t uchunk, mchunk;
|
||||||
|
|
||||||
page = pfn_to_page(maddr >> PAGE_SHIFT);
|
page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
result = -ENOMEM;
|
result = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -919,7 +919,7 @@ void __weak crash_free_reserved_phys_range(unsigned long begin,
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
for (addr = begin; addr < end; addr += PAGE_SIZE)
|
for (addr = begin; addr < end; addr += PAGE_SIZE)
|
||||||
free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
|
free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
|
||||||
}
|
}
|
||||||
|
|
||||||
int crash_shrink_memory(unsigned long new_size)
|
int crash_shrink_memory(unsigned long new_size)
|
||||||
|
|
Loading…
Reference in New Issue