Six hotfixes. One from Miaohe Lin is considered a minor thing so it isn't
for -stable. The remainder address pre-5.19 issues and are cc:stable. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYpEC8gAKCRDdBJ7gKXxA jlukAQDCaXF7YTBjpoaAl0zhSu+5h7CawiB6cnRlq87/uJ2S4QD/eLVX3zfxI2DX YcOhc5H8BOgZ8ppD80Nv9qjmyvEWzAA= =ZFFG -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull hotfixes from Andrew Morton: "Six hotfixes. The page_table_check one from Miaohe Lin is considered a minor thing so it isn't marked for -stable. The remainder address pre-5.19 issues and are cc:stable" * tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/page_table_check: fix accessing unmapped ptep kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add] mm/page_alloc: always attempt to allocate at least one page during bulk allocation hugetlb: fix huge_pmd_unshare address update zsmalloc: fix races between asynchronous zspage free and page migration Revert "mm/cma.c: remove redundant cma_mutex lock"
This commit is contained in:
commit
77fb622de1
|
@ -9,6 +9,8 @@
|
|||
#ifndef _S390_KEXEC_H
|
||||
#define _S390_KEXEC_H
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -83,4 +85,12 @@ struct kimage_arch {
|
|||
extern const struct kexec_file_ops s390_kexec_image_ops;
|
||||
extern const struct kexec_file_ops s390_kexec_elf_ops;
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
struct purgatory_info;
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
|
||||
#endif
|
||||
#endif /*_S390_KEXEC_H */
|
||||
|
|
|
@ -186,6 +186,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
|
|||
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
|
||||
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
struct purgatory_info;
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef void crash_vmclear_fn(void);
|
||||
|
|
|
@ -193,14 +193,6 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
|
|||
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len);
|
||||
void *arch_kexec_kernel_image_load(struct kimage *image);
|
||||
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
int arch_kexec_apply_relocations(struct purgatory_info *pi,
|
||||
Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image);
|
||||
#ifdef CONFIG_KEXEC_SIG
|
||||
int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
|
||||
|
@ -229,6 +221,44 @@ extern int crash_exclude_mem_range(struct crash_mem *mem,
|
|||
unsigned long long mend);
|
||||
extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
|
||||
void **addr, unsigned long *sz);
|
||||
|
||||
#ifndef arch_kexec_apply_relocations_add
|
||||
/*
|
||||
* arch_kexec_apply_relocations_add - apply relocations of type RELA
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELAs.
|
||||
* @symtab: Corresponding symtab.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
static inline int
|
||||
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
|
||||
{
|
||||
pr_err("RELA relocation unsupported.\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef arch_kexec_apply_relocations
|
||||
/*
|
||||
* arch_kexec_apply_relocations - apply relocations of type REL
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELs.
|
||||
* @symtab: Corresponding symtab.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
static inline int
|
||||
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
|
||||
{
|
||||
pr_err("REL relocation unsupported.\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_KEXEC_FILE */
|
||||
|
||||
#ifdef CONFIG_KEXEC_ELF
|
||||
|
|
|
@ -108,40 +108,6 @@ int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* arch_kexec_apply_relocations_add - apply relocations of type RELA
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELAs.
|
||||
* @symtab: Corresponding symtab.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
int __weak
|
||||
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
|
||||
{
|
||||
pr_err("RELA relocation unsupported.\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
/*
|
||||
* arch_kexec_apply_relocations - apply relocations of type REL
|
||||
* @pi: Purgatory to be relocated.
|
||||
* @section: Section relocations applying to.
|
||||
* @relsec: Section containing RELs.
|
||||
* @symtab: Corresponding symtab.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
int __weak
|
||||
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
|
||||
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
|
||||
{
|
||||
pr_err("REL relocation unsupported.\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free up memory used by kernel, initrd, and command line. This is temporary
|
||||
* memory allocation which is not needed any more after these buffers have
|
||||
|
|
4
mm/cma.c
4
mm/cma.c
|
@ -37,6 +37,7 @@
|
|||
|
||||
struct cma cma_areas[MAX_CMA_AREAS];
|
||||
unsigned cma_area_count;
|
||||
static DEFINE_MUTEX(cma_mutex);
|
||||
|
||||
phys_addr_t cma_get_base(const struct cma *cma)
|
||||
{
|
||||
|
@ -468,9 +469,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
|||
spin_unlock_irq(&cma->lock);
|
||||
|
||||
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
|
||||
mutex_lock(&cma_mutex);
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
|
||||
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
|
||||
|
||||
mutex_unlock(&cma_mutex);
|
||||
if (ret == 0) {
|
||||
page = pfn_to_page(pfn);
|
||||
break;
|
||||
|
|
|
@ -6755,7 +6755,14 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
pud_clear(pud);
|
||||
put_page(virt_to_page(ptep));
|
||||
mm_dec_nr_pmds(mm);
|
||||
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
|
||||
/*
|
||||
* This update of passed address optimizes loops sequentially
|
||||
* processing addresses in increments of huge page size (PMD_SIZE
|
||||
* in this case). By clearing the pud, a PUD_SIZE area is unmapped.
|
||||
* Update address to the 'last page' in the cleared area so that
|
||||
* calling loop can move to first page past this area.
|
||||
*/
|
||||
*addr |= PUD_SIZE - PMD_SIZE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -5324,8 +5324,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
|||
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
|
||||
pcp, pcp_list);
|
||||
if (unlikely(!page)) {
|
||||
/* Try and get at least one page */
|
||||
if (!nr_populated)
|
||||
/* Try and allocate at least one page */
|
||||
if (!nr_account)
|
||||
goto failed_irq;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -234,11 +234,11 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm,
|
|||
pte_t *ptep = pte_offset_map(&pmd, addr);
|
||||
unsigned long i;
|
||||
|
||||
pte_unmap(ptep);
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
__page_table_check_pte_clear(mm, addr, *ptep);
|
||||
addr += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
pte_unmap(ptep - PTRS_PER_PTE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
|
|||
*/
|
||||
static void lock_zspage(struct zspage *zspage)
|
||||
{
|
||||
struct page *page = get_first_page(zspage);
|
||||
struct page *curr_page, *page;
|
||||
|
||||
do {
|
||||
lock_page(page);
|
||||
} while ((page = get_next_page(page)) != NULL);
|
||||
/*
|
||||
* Pages we haven't locked yet can be migrated off the list while we're
|
||||
* trying to lock them, so we need to be careful and only attempt to
|
||||
* lock each page under migrate_read_lock(). Otherwise, the page we lock
|
||||
* may no longer belong to the zspage. This means that we may wait for
|
||||
* the wrong page to unlock, so we must take a reference to the page
|
||||
* prior to waiting for it to unlock outside migrate_read_lock().
|
||||
*/
|
||||
while (1) {
|
||||
migrate_read_lock(zspage);
|
||||
page = get_first_page(zspage);
|
||||
if (trylock_page(page))
|
||||
break;
|
||||
get_page(page);
|
||||
migrate_read_unlock(zspage);
|
||||
wait_on_page_locked(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
curr_page = page;
|
||||
while ((page = get_next_page(curr_page))) {
|
||||
if (trylock_page(page)) {
|
||||
curr_page = page;
|
||||
} else {
|
||||
get_page(page);
|
||||
migrate_read_unlock(zspage);
|
||||
wait_on_page_locked(page);
|
||||
put_page(page);
|
||||
migrate_read_lock(zspage);
|
||||
}
|
||||
}
|
||||
migrate_read_unlock(zspage);
|
||||
}
|
||||
|
||||
static int zs_init_fs_context(struct fs_context *fc)
|
||||
|
|
Loading…
Reference in New Issue