mirror of https://gitee.com/openkylin/linux.git
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "26 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (26 commits) userfaultfd: remove wrong comment from userfaultfd_ctx_get() fat: fix using uninitialized fields of fat_inode/fsinfo_inode sh: cayman: IDE support fix kasan: fix races in quarantine_remove_cache() kasan: resched in quarantine_remove_cache() mm: do not call mem_cgroup_free() from within mem_cgroup_alloc() thp: fix another corner case of munlock() vs. THPs rmap: fix NULL-pointer dereference on THP munlocking mm/memblock.c: fix memblock_next_valid_pfn() userfaultfd: selftest: vm: allow to build in vm/ directory userfaultfd: non-cooperative: userfaultfd_remove revalidate vma in MADV_DONTNEED userfaultfd: non-cooperative: fix fork fctx->new memleak mm/cgroup: avoid panic when init with low memory drivers/md/bcache/util.h: remove duplicate inclusion of blkdev.h mm/vmstats: add thp_split_pud event for clarity include/linux/fs.h: fix unsigned enum warning with gcc-4.2 userfaultfd: non-cooperative: release all ctx in dup_userfaultfd_complete userfaultfd: non-cooperative: robustness check userfaultfd: non-cooperative: rollback userfaultfd_exit x86, mm: unify exit paths in gup_pte_range() ...
This commit is contained in:
commit
8fe3ccaed0
|
@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
|
|||
to collect more or less stable coverage that is function of syscall inputs.
|
||||
To achieve this goal it does not collect coverage in soft/hard interrupts
|
||||
and instrumentation of some inherently non-deterministic parts of kernel is
|
||||
disbled (e.g. scheduler, locking).
|
||||
disabled (e.g. scheduler, locking).
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
|
|
@ -45,7 +45,7 @@ Required Properties:
|
|||
Optional Properties:
|
||||
- reg-names: In addition to the required properties, the following are optional
|
||||
- "efuse-address" - Contains efuse base address used to pick up ABB info.
|
||||
- "ldo-address" - Contains address of ABB LDO overide register address.
|
||||
- "ldo-address" - Contains address of ABB LDO override register.
|
||||
"efuse-address" is required for this.
|
||||
- ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override
|
||||
register to provide override vset value.
|
||||
|
|
|
@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
|
|||
manager has to explicitly enable these events by setting appropriate
|
||||
bits in uffdio_api.features passed to UFFDIO_API ioctl:
|
||||
|
||||
UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
|
||||
non-cooperative process. When the monitored process exits, the uffd
|
||||
manager will get UFFD_EVENT_EXIT.
|
||||
|
||||
UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
|
||||
this feature is enabled, the userfaultfd context of the parent process
|
||||
is duplicated into the newly created process. The manager receives
|
||||
|
|
|
@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
|
|||
dma_in_cfg.en = regk_dma_no;
|
||||
REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
|
||||
|
||||
/* Disble the cryptocop. */
|
||||
/* Disable the cryptocop. */
|
||||
rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
|
||||
rw_cfg.en = 0;
|
||||
REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
|
||||
|
|
|
@ -347,23 +347,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
|
|||
__r; \
|
||||
})
|
||||
|
||||
static inline int __pte_write(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
#define pte_savedwrite pte_savedwrite
|
||||
static inline bool pte_savedwrite(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* Saved write ptes are prot none ptes that doesn't have
|
||||
* privileged bit sit. We mark prot none as one which has
|
||||
* present and pviliged bit set and RWX cleared. To mark
|
||||
* protnone which used to have _PAGE_WRITE set we clear
|
||||
* the privileged bit.
|
||||
*/
|
||||
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
|
||||
}
|
||||
#else
|
||||
#define pte_savedwrite pte_savedwrite
|
||||
static inline bool pte_savedwrite(pte_t pte)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return __pte_write(pte) || pte_savedwrite(pte);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
|
||||
return;
|
||||
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
|
||||
if (__pte_write(*ptep))
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
|
||||
else if (unlikely(pte_savedwrite(*ptep)))
|
||||
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
|
||||
return;
|
||||
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
|
||||
/*
|
||||
* We should not find protnone for hugetlb, but this complete the
|
||||
* interface.
|
||||
*/
|
||||
if (__pte_write(*ptep))
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
|
||||
else if (unlikely(pte_savedwrite(*ptep)))
|
||||
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
|
@ -397,11 +432,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
pte_update(mm, addr, ptep, ~0UL, 0, 0);
|
||||
}
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
|
||||
}
|
||||
|
||||
static inline int pte_dirty(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
|
||||
|
@ -465,19 +495,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
|
|||
VM_BUG_ON(!pte_protnone(pte));
|
||||
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
|
||||
}
|
||||
|
||||
#define pte_savedwrite pte_savedwrite
|
||||
static inline bool pte_savedwrite(pte_t pte)
|
||||
#else
|
||||
#define pte_clear_savedwrite pte_clear_savedwrite
|
||||
static inline pte_t pte_clear_savedwrite(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* Saved write ptes are prot none ptes that doesn't have
|
||||
* privileged bit sit. We mark prot none as one which has
|
||||
* present and pviliged bit set and RWX cleared. To mark
|
||||
* protnone which used to have _PAGE_WRITE set we clear
|
||||
* the privileged bit.
|
||||
*/
|
||||
VM_BUG_ON(!pte_protnone(pte));
|
||||
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
|
||||
VM_WARN_ON(1);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
|
@ -506,6 +529,8 @@ static inline unsigned long pte_pfn(pte_t pte)
|
|||
/* Generic modifiers for PTE bits */
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
if (unlikely(pte_savedwrite(pte)))
|
||||
return pte_clear_savedwrite(pte);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
}
|
||||
|
||||
|
@ -926,6 +951,7 @@ static inline int pmd_protnone(pmd_t pmd)
|
|||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
|
||||
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
|
||||
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -982,11 +1008,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
|
|||
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
|
||||
if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
|
||||
return;
|
||||
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
|
||||
if (__pmd_write((*pmdp)))
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
|
||||
else if (unlikely(pmd_savedwrite(*pmdp)))
|
||||
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
|
||||
}
|
||||
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
|
|
|
@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
hva, NULL, NULL);
|
||||
if (ptep) {
|
||||
pte = kvmppc_read_update_linux_pte(ptep, 1);
|
||||
if (pte_write(pte))
|
||||
if (__pte_write(pte))
|
||||
write_ok = 1;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
}
|
||||
pte = kvmppc_read_update_linux_pte(ptep, writing);
|
||||
if (pte_present(pte) && !pte_protnone(pte)) {
|
||||
if (writing && !pte_write(pte))
|
||||
if (writing && !__pte_write(pte))
|
||||
/* make the actual HPTE be read-only */
|
||||
ptel = hpte_make_readonly(ptel);
|
||||
is_ci = pte_ci(pte);
|
||||
|
|
|
@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void)
|
|||
SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
|
||||
SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
|
||||
|
||||
#ifdef CONFIG_IDE
|
||||
/*
|
||||
* Only IDE1 exists on the Cayman
|
||||
*/
|
||||
|
@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void)
|
|||
SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
|
||||
SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
|
||||
SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
|
||||
#endif
|
||||
|
||||
/* Exit the configuration state */
|
||||
outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
|
||||
|
|
|
@ -535,7 +535,7 @@ static void run_sync(void)
|
|||
{
|
||||
int enable_irqs = irqs_disabled();
|
||||
|
||||
/* We may be called with interrupts disbled (on bootup). */
|
||||
/* We may be called with interrupts disabled (on bootup). */
|
||||
if (enable_irqs)
|
||||
local_irq_enable();
|
||||
on_each_cpu(do_sync_core, NULL, 1);
|
||||
|
|
|
@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
|||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
struct dev_pagemap *pgmap = NULL;
|
||||
int nr_start = *nr;
|
||||
pte_t *ptep;
|
||||
int nr_start = *nr, ret = 0;
|
||||
pte_t *ptep, *ptem;
|
||||
|
||||
ptep = pte_offset_map(&pmd, addr);
|
||||
/*
|
||||
* Keep the original mapped PTE value (ptem) around since we
|
||||
* might increment ptep off the end of the page when finishing
|
||||
* our loop iteration.
|
||||
*/
|
||||
ptem = ptep = pte_offset_map(&pmd, addr);
|
||||
do {
|
||||
pte_t pte = gup_get_pte(ptep);
|
||||
struct page *page;
|
||||
|
||||
/* Similar to the PMD case, NUMA hinting must take slow path */
|
||||
if (pte_protnone(pte)) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
if (pte_protnone(pte))
|
||||
break;
|
||||
|
||||
if (!pte_allows_gup(pte_val(pte), write))
|
||||
break;
|
||||
|
||||
if (pte_devmap(pte)) {
|
||||
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
|
||||
if (unlikely(!pgmap)) {
|
||||
undo_dev_pagemap(nr, nr_start, pages);
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
} else if (!pte_allows_gup(pte_val(pte), write) ||
|
||||
pte_special(pte)) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
} else if (pte_special(pte))
|
||||
break;
|
||||
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
page = pte_page(pte);
|
||||
get_page(page);
|
||||
|
@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
|||
(*nr)++;
|
||||
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
pte_unmap(ptep - 1);
|
||||
if (addr == end)
|
||||
ret = 1;
|
||||
pte_unmap(ptem);
|
||||
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void get_head_page_multiple(struct page *page, int nr)
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
the slower the port i/o. In some cases, setting
|
||||
this to zero will speed up the device. (default -1)
|
||||
|
||||
major You may use this parameter to overide the
|
||||
major You may use this parameter to override the
|
||||
default major number (46) that this driver
|
||||
will use. Be sure to change the device
|
||||
name as well.
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
first drive found.
|
||||
|
||||
|
||||
major You may use this parameter to overide the
|
||||
major You may use this parameter to override the
|
||||
default major number (45) that this driver
|
||||
will use. Be sure to change the device
|
||||
name as well.
|
||||
|
|
|
@ -59,7 +59,7 @@
|
|||
the slower the port i/o. In some cases, setting
|
||||
this to zero will speed up the device. (default -1)
|
||||
|
||||
major You may use this parameter to overide the
|
||||
major You may use this parameter to override the
|
||||
default major number (47) that this driver
|
||||
will use. Be sure to change the device
|
||||
name as well.
|
||||
|
|
|
@ -84,7 +84,7 @@
|
|||
the slower the port i/o. In some cases, setting
|
||||
this to zero will speed up the device. (default -1)
|
||||
|
||||
major You may use this parameter to overide the
|
||||
major You may use this parameter to override the
|
||||
default major number (97) that this driver
|
||||
will use. Be sure to change the device
|
||||
name as well.
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
the slower the port i/o. In some cases, setting
|
||||
this to zero will speed up the device. (default -1)
|
||||
|
||||
major You may use this parameter to overide the
|
||||
major You may use this parameter to override the
|
||||
default major number (96) that this driver
|
||||
will use. Be sure to change the device
|
||||
name as well.
|
||||
|
|
|
@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data,
|
|||
void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
|
||||
{
|
||||
/*
|
||||
* We always need to disble the hardware before trying to flush the
|
||||
* We always need to disable the hardware before trying to flush the
|
||||
* FIFO. This is something that isn't written in the design
|
||||
* specification, but we have been informed by the hardware designers
|
||||
* that this must be done.
|
||||
|
|
|
@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/* disble sdma engine before programing it */
|
||||
/* disable sdma engine before programing it */
|
||||
sdma_v3_0_ctx_switch_enable(adev, false);
|
||||
sdma_v3_0_enable(adev, false);
|
||||
|
||||
|
|
|
@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
|||
/*
|
||||
* In case a device driver's probe() fails (e.g.,
|
||||
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
|
||||
* rescinded later (e.g., we dynamically disble an Integrated Service
|
||||
* rescinded later (e.g., we dynamically disable an Integrated Service
|
||||
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
|
||||
* here we should skip most of the below cleanup work.
|
||||
*/
|
||||
|
|
|
@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
// Disble B channel interrupts
|
||||
// Disable B channel interrupts
|
||||
st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
|
||||
|
||||
// Disable B channel FIFOs
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/llist.h>
|
||||
|
|
|
@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
|
|||
*
|
||||
* The actual DAP implementation may be restricted to only one of the modes.
|
||||
* A compiler warning or error will be generated if the DAP implementation
|
||||
* overides or cannot handle the mode defined below.
|
||||
*
|
||||
* overrides or cannot handle the mode defined below.
|
||||
*/
|
||||
#ifndef DRXDAP_SINGLE_MASTER
|
||||
#define DRXDAP_SINGLE_MASTER 1
|
||||
|
@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
|
|||
*
|
||||
* This maximum size may be restricted by the actual DAP implementation.
|
||||
* A compiler warning or error will be generated if the DAP implementation
|
||||
* overides or cannot handle the chunksize defined below.
|
||||
* overrides or cannot handle the chunksize defined below.
|
||||
*
|
||||
* Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data
|
||||
* buffer. Do not undefine or choose too large, unless your system is able to
|
||||
|
@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
|
|||
*
|
||||
* This maximum size may be restricted by the actual DAP implementation.
|
||||
* A compiler warning or error will be generated if the DAP implementation
|
||||
* overides or cannot handle the chunksize defined below.
|
||||
*
|
||||
* overrides or cannot handle the chunksize defined below.
|
||||
*/
|
||||
#ifndef DRXDAP_MAX_RCHUNKSIZE
|
||||
#define DRXDAP_MAX_RCHUNKSIZE 60
|
||||
|
|
|
@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor)
|
|||
}
|
||||
|
||||
/*
|
||||
* Send write disble instruction to the chip.
|
||||
* Send write disable instruction to the chip.
|
||||
*/
|
||||
static inline int write_disable(struct spi_nor *nor)
|
||||
{
|
||||
|
|
|
@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp {
|
|||
struct ib_mac_iocb_rsp {
|
||||
u8 opcode; /* 0x20 */
|
||||
u8 flags1;
|
||||
#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
|
||||
#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
|
||||
#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
|
||||
#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
|
||||
#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
|
||||
#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
|
||||
#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
|
||||
|
|
|
@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
|
|||
* does not disable its parity logic prior to
|
||||
* the start of the reset. This may cause a
|
||||
* parity error to be detected and thus a
|
||||
* spurious SERR or PERR assertion. Disble
|
||||
* spurious SERR or PERR assertion. Disable
|
||||
* PERR and SERR responses during the CHIPRST.
|
||||
*/
|
||||
mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
|
||||
|
|
|
@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *);
|
|||
|
||||
/* /dev/gadget/$CHIP represents ep0 and the whole device */
|
||||
enum ep0_state {
|
||||
/* DISBLED is the initial state.
|
||||
*/
|
||||
/* DISABLED is the initial state. */
|
||||
STATE_DEV_DISABLED = 0,
|
||||
|
||||
/* Only one open() of /dev/gadget/$CHIP; only one file tracks
|
||||
|
|
|
@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
|
|||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
|
||||
/* disble usb3 ports Wake bits*/
|
||||
/* disable usb3 ports Wake bits */
|
||||
port_index = xhci->num_usb3_ports;
|
||||
port_array = xhci->usb3_ports;
|
||||
while (port_index--) {
|
||||
|
@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
|
|||
writel(t2, port_array[port_index]);
|
||||
}
|
||||
|
||||
/* disble usb2 ports Wake bits*/
|
||||
/* disable usb2 ports Wake bits */
|
||||
port_index = xhci->num_usb2_ports;
|
||||
port_array = xhci->usb2_ports;
|
||||
while (port_index--) {
|
||||
|
|
|
@ -1359,6 +1359,16 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void fat_dummy_inode_init(struct inode *inode)
|
||||
{
|
||||
/* Initialize this dummy inode to work as no-op. */
|
||||
MSDOS_I(inode)->mmu_private = 0;
|
||||
MSDOS_I(inode)->i_start = 0;
|
||||
MSDOS_I(inode)->i_logstart = 0;
|
||||
MSDOS_I(inode)->i_attrs = 0;
|
||||
MSDOS_I(inode)->i_pos = 0;
|
||||
}
|
||||
|
||||
static int fat_read_root(struct inode *inode)
|
||||
{
|
||||
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
|
||||
|
@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
|
|||
fat_inode = new_inode(sb);
|
||||
if (!fat_inode)
|
||||
goto out_fail;
|
||||
MSDOS_I(fat_inode)->i_pos = 0;
|
||||
fat_dummy_inode_init(fat_inode);
|
||||
sbi->fat_inode = fat_inode;
|
||||
|
||||
fsinfo_inode = new_inode(sb);
|
||||
if (!fsinfo_inode)
|
||||
goto out_fail;
|
||||
fat_dummy_inode_init(fsinfo_inode);
|
||||
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
|
||||
sbi->fsinfo_inode = fsinfo_inode;
|
||||
insert_inode_hash(fsinfo_inode);
|
||||
|
|
|
@ -138,8 +138,6 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
|
|||
* userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
|
||||
* context.
|
||||
* @ctx: [in] Pointer to the userfaultfd context.
|
||||
*
|
||||
* Returns: In case of success, returns not zero.
|
||||
*/
|
||||
static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
|
||||
{
|
||||
|
@ -490,7 +488,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
|||
* in such case.
|
||||
*/
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = 0;
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -527,10 +525,11 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
||||
struct userfaultfd_wait_queue *ewq)
|
||||
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
||||
struct userfaultfd_wait_queue *ewq)
|
||||
{
|
||||
int ret = 0;
|
||||
if (WARN_ON_ONCE(current->flags & PF_EXITING))
|
||||
goto out;
|
||||
|
||||
ewq->ctx = ctx;
|
||||
init_waitqueue_entry(&ewq->wq, current);
|
||||
|
@ -547,8 +546,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|||
break;
|
||||
if (ACCESS_ONCE(ctx->released) ||
|
||||
fatal_signal_pending(current)) {
|
||||
ret = -1;
|
||||
__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
|
||||
if (ewq->msg.event == UFFD_EVENT_FORK) {
|
||||
struct userfaultfd_ctx *new;
|
||||
|
||||
new = (struct userfaultfd_ctx *)
|
||||
(unsigned long)
|
||||
ewq->msg.arg.reserved.reserved1;
|
||||
|
||||
userfaultfd_ctx_put(new);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -566,9 +573,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|||
* ctx may go away after this if the userfault pseudo fd is
|
||||
* already released.
|
||||
*/
|
||||
|
||||
out:
|
||||
userfaultfd_ctx_put(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
|
||||
|
@ -626,7 +632,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
|
||||
static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
|
||||
{
|
||||
struct userfaultfd_ctx *ctx = fctx->orig;
|
||||
struct userfaultfd_wait_queue ewq;
|
||||
|
@ -636,17 +642,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
|
|||
ewq.msg.event = UFFD_EVENT_FORK;
|
||||
ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
|
||||
|
||||
return userfaultfd_event_wait_completion(ctx, &ewq);
|
||||
userfaultfd_event_wait_completion(ctx, &ewq);
|
||||
}
|
||||
|
||||
void dup_userfaultfd_complete(struct list_head *fcs)
|
||||
{
|
||||
int ret = 0;
|
||||
struct userfaultfd_fork_ctx *fctx, *n;
|
||||
|
||||
list_for_each_entry_safe(fctx, n, fcs, list) {
|
||||
if (!ret)
|
||||
ret = dup_fctx(fctx);
|
||||
dup_fctx(fctx);
|
||||
list_del(&fctx->list);
|
||||
kfree(fctx);
|
||||
}
|
||||
|
@ -689,8 +693,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
|
|||
userfaultfd_event_wait_completion(ctx, &ewq);
|
||||
}
|
||||
|
||||
void userfaultfd_remove(struct vm_area_struct *vma,
|
||||
struct vm_area_struct **prev,
|
||||
bool userfaultfd_remove(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
@ -699,13 +702,11 @@ void userfaultfd_remove(struct vm_area_struct *vma,
|
|||
|
||||
ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
|
||||
return;
|
||||
return true;
|
||||
|
||||
userfaultfd_ctx_get(ctx);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
*prev = NULL; /* We wait for ACK w/o the mmap semaphore */
|
||||
|
||||
msg_init(&ewq.msg);
|
||||
|
||||
ewq.msg.event = UFFD_EVENT_REMOVE;
|
||||
|
@ -714,7 +715,7 @@ void userfaultfd_remove(struct vm_area_struct *vma,
|
|||
|
||||
userfaultfd_event_wait_completion(ctx, &ewq);
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
|
||||
|
@ -775,34 +776,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
|
|||
}
|
||||
}
|
||||
|
||||
void userfaultfd_exit(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma = mm->mmap;
|
||||
|
||||
/*
|
||||
* We can do the vma walk without locking because the caller
|
||||
* (exit_mm) knows it now has exclusive access
|
||||
*/
|
||||
while (vma) {
|
||||
struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
|
||||
if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) {
|
||||
struct userfaultfd_wait_queue ewq;
|
||||
|
||||
userfaultfd_ctx_get(ctx);
|
||||
|
||||
msg_init(&ewq.msg);
|
||||
ewq.msg.event = UFFD_EVENT_EXIT;
|
||||
|
||||
userfaultfd_event_wait_completion(ctx, &ewq);
|
||||
|
||||
ctx->features &= ~UFFD_FEATURE_EVENT_EXIT;
|
||||
}
|
||||
|
||||
vma = vma->vm_next;
|
||||
}
|
||||
}
|
||||
|
||||
static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct userfaultfd_ctx *ctx = file->private_data;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#define CS42L42_HPOUT_LOAD_1NF 0
|
||||
#define CS42L42_HPOUT_LOAD_10NF 1
|
||||
|
||||
/* HPOUT Clamp to GND Overide */
|
||||
/* HPOUT Clamp to GND Override */
|
||||
#define CS42L42_HPOUT_CLAMP_EN 0
|
||||
#define CS42L42_HPOUT_CLAMP_DIS 1
|
||||
|
||||
|
|
|
@ -2678,7 +2678,7 @@ static const char * const kernel_read_file_str[] = {
|
|||
|
||||
static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
|
||||
{
|
||||
if (id < 0 || id >= READING_MAX_ID)
|
||||
if ((unsigned)id >= READING_MAX_ID)
|
||||
return kernel_read_file_str[READING_UNKNOWN];
|
||||
|
||||
return kernel_read_file_str[id];
|
||||
|
|
|
@ -65,7 +65,7 @@ struct regulator_state {
|
|||
int uV; /* suspend voltage */
|
||||
unsigned int mode; /* suspend regulator operating mode */
|
||||
int enabled; /* is regulator enabled in this suspend state */
|
||||
int disabled; /* is the regulator disbled in this suspend state */
|
||||
int disabled; /* is the regulator disabled in this suspend state */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
|
|||
unsigned long from, unsigned long to,
|
||||
unsigned long len);
|
||||
|
||||
extern void userfaultfd_remove(struct vm_area_struct *vma,
|
||||
struct vm_area_struct **prev,
|
||||
extern bool userfaultfd_remove(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
|
@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
|
|||
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
|
||||
struct list_head *uf);
|
||||
|
||||
extern void userfaultfd_exit(struct mm_struct *mm);
|
||||
|
||||
#else /* CONFIG_USERFAULTFD */
|
||||
|
||||
/* mm helpers */
|
||||
|
@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void userfaultfd_remove(struct vm_area_struct *vma,
|
||||
struct vm_area_struct **prev,
|
||||
static inline bool userfaultfd_remove(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
|
||||
|
@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void userfaultfd_exit(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USERFAULTFD */
|
||||
|
||||
#endif /* _LINUX_USERFAULTFD_K_H */
|
||||
|
|
|
@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||
THP_SPLIT_PAGE_FAILED,
|
||||
THP_DEFERRED_SPLIT_PAGE,
|
||||
THP_SPLIT_PMD,
|
||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
THP_SPLIT_PUD,
|
||||
#endif
|
||||
THP_ZERO_PAGE_ALLOC,
|
||||
THP_ZERO_PAGE_ALLOC_FAILED,
|
||||
#endif
|
||||
|
|
|
@ -59,7 +59,7 @@ struct lap_cb;
|
|||
* Slot timer must never exceed 85 ms, and must always be at least 25 ms,
|
||||
* suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of
|
||||
* devices, and other stackes uses a lot more, so it's best we do it as well
|
||||
* (Note : this is the default value and sysctl overides it - Jean II)
|
||||
* (Note : this is the default value and sysctl overrides it - Jean II)
|
||||
*/
|
||||
#define SLOT_TIMEOUT (90*HZ/1000)
|
||||
|
||||
|
|
|
@ -18,8 +18,7 @@
|
|||
* means the userland is reading).
|
||||
*/
|
||||
#define UFFD_API ((__u64)0xAA)
|
||||
#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \
|
||||
UFFD_FEATURE_EVENT_FORK | \
|
||||
#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
|
||||
UFFD_FEATURE_EVENT_REMAP | \
|
||||
UFFD_FEATURE_EVENT_REMOVE | \
|
||||
UFFD_FEATURE_EVENT_UNMAP | \
|
||||
|
@ -113,7 +112,6 @@ struct uffd_msg {
|
|||
#define UFFD_EVENT_REMAP 0x14
|
||||
#define UFFD_EVENT_REMOVE 0x15
|
||||
#define UFFD_EVENT_UNMAP 0x16
|
||||
#define UFFD_EVENT_EXIT 0x17
|
||||
|
||||
/* flags for UFFD_EVENT_PAGEFAULT */
|
||||
#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
|
||||
|
@ -163,7 +161,6 @@ struct uffdio_api {
|
|||
#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
|
||||
#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
|
||||
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
|
||||
#define UFFD_FEATURE_EVENT_EXIT (1<<7)
|
||||
__u64 features;
|
||||
|
||||
__u64 ioctls;
|
||||
|
|
|
@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
|
|||
*
|
||||
* Returns 0 on success, -errno on failure. On failure, csses which have
|
||||
* been processed already aren't cleaned up. The caller is responsible for
|
||||
* cleaning up with cgroup_apply_control_disble().
|
||||
* cleaning up with cgroup_apply_control_disable().
|
||||
*/
|
||||
static int cgroup_apply_control_enable(struct cgroup *cgrp)
|
||||
{
|
||||
|
|
|
@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event,
|
|||
*/
|
||||
#define PERF_CPU_HRTIMER (1000 / HZ)
|
||||
/*
|
||||
* function must be called with interrupts disbled
|
||||
* function must be called with interrupts disabled
|
||||
*/
|
||||
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
|
||||
{
|
||||
|
|
|
@ -554,7 +554,6 @@ static void exit_mm(void)
|
|||
enter_lazy_tlb(mm, current);
|
||||
task_unlock(current);
|
||||
mm_update_next_owner(mm);
|
||||
userfaultfd_exit(mm);
|
||||
mmput(mm);
|
||||
if (test_thread_flag(TIF_MEMDIE))
|
||||
exit_oom_victim();
|
||||
|
|
|
@ -65,7 +65,7 @@ void stack_trace_print(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* When arch-specific code overides this function, the following
|
||||
* When arch-specific code overrides this function, the following
|
||||
* data should be filled up, assuming stack_trace_max_lock is held to
|
||||
* prevent concurrent updates.
|
||||
* stack_trace_index[]
|
||||
|
|
|
@ -1828,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
|
|||
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
|
||||
VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
|
||||
|
||||
count_vm_event(THP_SPLIT_PMD);
|
||||
count_vm_event(THP_SPLIT_PUD);
|
||||
|
||||
pudp_huge_clear_flush_notify(vma, haddr, pud);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/printk.h>
|
||||
#include <linux/shrinker.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -103,6 +104,7 @@ static int quarantine_tail;
|
|||
/* Total size of all objects in global_quarantine across all batches. */
|
||||
static unsigned long quarantine_size;
|
||||
static DEFINE_SPINLOCK(quarantine_lock);
|
||||
DEFINE_STATIC_SRCU(remove_cache_srcu);
|
||||
|
||||
/* Maximum size of the global queue. */
|
||||
static unsigned long quarantine_max_size;
|
||||
|
@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
|||
struct qlist_head *q;
|
||||
struct qlist_head temp = QLIST_INIT;
|
||||
|
||||
/*
|
||||
* Note: irq must be disabled until after we move the batch to the
|
||||
* global quarantine. Otherwise quarantine_remove_cache() can miss
|
||||
* some objects belonging to the cache if they are in our local temp
|
||||
* list. quarantine_remove_cache() executes on_each_cpu() at the
|
||||
* beginning which ensures that it either sees the objects in per-cpu
|
||||
* lists or in the global quarantine.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
q = this_cpu_ptr(&cpu_quarantine);
|
||||
qlist_put(q, &info->quarantine_link, cache->size);
|
||||
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
|
||||
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
|
||||
qlist_move_all(q, &temp);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (unlikely(!qlist_empty(&temp))) {
|
||||
spin_lock_irqsave(&quarantine_lock, flags);
|
||||
spin_lock(&quarantine_lock);
|
||||
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
|
||||
qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
|
||||
if (global_quarantine[quarantine_tail].bytes >=
|
||||
|
@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
|
|||
if (new_tail != quarantine_head)
|
||||
quarantine_tail = new_tail;
|
||||
}
|
||||
spin_unlock_irqrestore(&quarantine_lock, flags);
|
||||
spin_unlock(&quarantine_lock);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void quarantine_reduce(void)
|
||||
{
|
||||
size_t total_size, new_quarantine_size, percpu_quarantines;
|
||||
unsigned long flags;
|
||||
int srcu_idx;
|
||||
struct qlist_head to_free = QLIST_INIT;
|
||||
|
||||
if (likely(READ_ONCE(quarantine_size) <=
|
||||
READ_ONCE(quarantine_max_size)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* srcu critical section ensures that quarantine_remove_cache()
|
||||
* will not miss objects belonging to the cache while they are in our
|
||||
* local to_free list. srcu is chosen because (1) it gives us private
|
||||
* grace period domain that does not interfere with anything else,
|
||||
* and (2) it allows synchronize_srcu() to return without waiting
|
||||
* if there are no pending read critical sections (which is the
|
||||
* expected case).
|
||||
*/
|
||||
srcu_idx = srcu_read_lock(&remove_cache_srcu);
|
||||
spin_lock_irqsave(&quarantine_lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -237,6 +257,7 @@ void quarantine_reduce(void)
|
|||
spin_unlock_irqrestore(&quarantine_lock, flags);
|
||||
|
||||
qlist_free_all(&to_free, NULL);
|
||||
srcu_read_unlock(&remove_cache_srcu, srcu_idx);
|
||||
}
|
||||
|
||||
static void qlist_move_cache(struct qlist_head *from,
|
||||
|
@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache)
|
|||
unsigned long flags, i;
|
||||
struct qlist_head to_free = QLIST_INIT;
|
||||
|
||||
/*
|
||||
* Must be careful to not miss any objects that are being moved from
|
||||
* per-cpu list to the global quarantine in quarantine_put(),
|
||||
* nor objects being freed in quarantine_reduce(). on_each_cpu()
|
||||
* achieves the first goal, while synchronize_srcu() achieves the
|
||||
* second.
|
||||
*/
|
||||
on_each_cpu(per_cpu_remove_cache, cache, 1);
|
||||
|
||||
spin_lock_irqsave(&quarantine_lock, flags);
|
||||
for (i = 0; i < QUARANTINE_BATCHES; i++)
|
||||
for (i = 0; i < QUARANTINE_BATCHES; i++) {
|
||||
if (qlist_empty(&global_quarantine[i]))
|
||||
continue;
|
||||
qlist_move_cache(&global_quarantine[i], &to_free, cache);
|
||||
/* Scanning whole quarantine can take a while. */
|
||||
spin_unlock_irqrestore(&quarantine_lock, flags);
|
||||
cond_resched();
|
||||
spin_lock_irqsave(&quarantine_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&quarantine_lock, flags);
|
||||
|
||||
qlist_free_all(&to_free, cache);
|
||||
|
||||
synchronize_srcu(&remove_cache_srcu);
|
||||
}
|
||||
|
|
44
mm/madvise.c
44
mm/madvise.c
|
@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma,
|
|||
if (!can_madv_dontneed_vma(vma))
|
||||
return -EINVAL;
|
||||
|
||||
userfaultfd_remove(vma, prev, start, end);
|
||||
if (!userfaultfd_remove(vma, start, end)) {
|
||||
*prev = NULL; /* mmap_sem has been dropped, prev is stale */
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma(current->mm, start);
|
||||
if (!vma)
|
||||
return -ENOMEM;
|
||||
if (start < vma->vm_start) {
|
||||
/*
|
||||
* This "vma" under revalidation is the one
|
||||
* with the lowest vma->vm_start where start
|
||||
* is also < vma->vm_end. If start <
|
||||
* vma->vm_start it means an hole materialized
|
||||
* in the user address space within the
|
||||
* virtual range passed to MADV_DONTNEED.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!can_madv_dontneed_vma(vma))
|
||||
return -EINVAL;
|
||||
if (end > vma->vm_end) {
|
||||
/*
|
||||
* Don't fail if end > vma->vm_end. If the old
|
||||
* vma was splitted while the mmap_sem was
|
||||
* released the effect of the concurrent
|
||||
* operation may not cause MADV_DONTNEED to
|
||||
* have an undefined result. There may be an
|
||||
* adjacent next vma that we'll walk
|
||||
* next. userfaultfd_remove() will generate an
|
||||
* UFFD_EVENT_REMOVE repetition on the
|
||||
* end-vma->vm_end range, but the manager can
|
||||
* handle a repetition fine.
|
||||
*/
|
||||
end = vma->vm_end;
|
||||
}
|
||||
VM_WARN_ON(start >= end);
|
||||
}
|
||||
zap_page_range(vma, start, end - start);
|
||||
return 0;
|
||||
}
|
||||
|
@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma,
|
|||
* mmap_sem.
|
||||
*/
|
||||
get_file(f);
|
||||
userfaultfd_remove(vma, prev, start, end);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (userfaultfd_remove(vma, start, end)) {
|
||||
/* mmap_sem was not released by userfaultfd_remove() */
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
error = vfs_fallocate(f,
|
||||
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
offset, end - start);
|
||||
|
|
|
@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
|
|||
}
|
||||
} while (left < right);
|
||||
|
||||
return min(PHYS_PFN(type->regions[right].base), max_pfn);
|
||||
if (right == type->cnt)
|
||||
return max_pfn;
|
||||
else
|
||||
return min(PHYS_PFN(type->regions[right].base), max_pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -466,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
|
|||
struct mem_cgroup_tree_per_node *mctz;
|
||||
|
||||
mctz = soft_limit_tree_from_page(page);
|
||||
if (!mctz)
|
||||
return;
|
||||
/*
|
||||
* Necessary to update all ancestors when hierarchy is used.
|
||||
* because their event counter is not touched.
|
||||
|
@ -503,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
|
|||
for_each_node(nid) {
|
||||
mz = mem_cgroup_nodeinfo(memcg, nid);
|
||||
mctz = soft_limit_tree_node(nid);
|
||||
mem_cgroup_remove_exceeded(mz, mctz);
|
||||
if (mctz)
|
||||
mem_cgroup_remove_exceeded(mz, mctz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2558,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
|||
* is empty. Do it lockless to prevent lock bouncing. Races
|
||||
* are acceptable as soft limit is best effort anyway.
|
||||
*/
|
||||
if (RB_EMPTY_ROOT(&mctz->rb_root))
|
||||
if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -4135,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
|
|||
kfree(memcg->nodeinfo[node]);
|
||||
}
|
||||
|
||||
static void mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
{
|
||||
int node;
|
||||
|
||||
memcg_wb_domain_exit(memcg);
|
||||
for_each_node(node)
|
||||
free_mem_cgroup_per_node_info(memcg, node);
|
||||
free_percpu(memcg->stat);
|
||||
kfree(memcg);
|
||||
}
|
||||
|
||||
static void mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
{
|
||||
memcg_wb_domain_exit(memcg);
|
||||
__mem_cgroup_free(memcg);
|
||||
}
|
||||
|
||||
static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
|
@ -4196,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
|||
fail:
|
||||
if (memcg->id.id > 0)
|
||||
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
||||
mem_cgroup_free(memcg);
|
||||
__mem_cgroup_free(memcg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
|
||||
while (start < end) {
|
||||
struct page *page;
|
||||
unsigned int page_mask;
|
||||
unsigned int page_mask = 0;
|
||||
unsigned long page_increm;
|
||||
struct pagevec pvec;
|
||||
struct zone *zone;
|
||||
|
@ -456,8 +456,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
* suits munlock very well (and if somehow an abnormal page
|
||||
* has sneaked into the range, we won't oops here: great).
|
||||
*/
|
||||
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
|
||||
&page_mask);
|
||||
page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
|
||||
|
||||
if (page && !IS_ERR(page)) {
|
||||
if (PageTransTail(page)) {
|
||||
|
@ -468,8 +467,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
/*
|
||||
* Any THP page found by follow_page_mask() may
|
||||
* have gotten split before reaching
|
||||
* munlock_vma_page(), so we need to recompute
|
||||
* the page_mask here.
|
||||
* munlock_vma_page(), so we need to compute
|
||||
* the page_mask here instead.
|
||||
*/
|
||||
page_mask = munlock_vma_page(page);
|
||||
unlock_page(page);
|
||||
|
|
13
mm/rmap.c
13
mm/rmap.c
|
@ -1316,12 +1316,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
|
||||
address = pvmw.address;
|
||||
|
||||
/* Unexpected PMD-mapped THP? */
|
||||
VM_BUG_ON_PAGE(!pvmw.pte, page);
|
||||
|
||||
/*
|
||||
* If the page is mlock()d, we cannot swap it out.
|
||||
* If it's recently referenced (perhaps page_referenced
|
||||
|
@ -1345,6 +1339,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Unexpected PMD-mapped THP? */
|
||||
VM_BUG_ON_PAGE(!pvmw.pte, page);
|
||||
|
||||
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
|
||||
address = pvmw.address;
|
||||
|
||||
|
||||
if (!(flags & TTU_IGNORE_ACCESS)) {
|
||||
if (ptep_clear_flush_young_notify(vma, address,
|
||||
pvmw.pte)) {
|
||||
|
|
|
@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = {
|
|||
"thp_split_page_failed",
|
||||
"thp_deferred_split_page",
|
||||
"thp_split_pmd",
|
||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
"thp_split_pud",
|
||||
#endif
|
||||
"thp_zero_page_alloc",
|
||||
"thp_zero_page_alloc_failed",
|
||||
#endif
|
||||
|
|
|
@ -372,6 +372,8 @@ disassocation||disassociation
|
|||
disapear||disappear
|
||||
disapeared||disappeared
|
||||
disappared||disappeared
|
||||
disble||disable
|
||||
disbled||disabled
|
||||
disconnet||disconnect
|
||||
discontinous||discontinuous
|
||||
dispertion||dispersion
|
||||
|
@ -732,6 +734,7 @@ oustanding||outstanding
|
|||
overaall||overall
|
||||
overhread||overhead
|
||||
overlaping||overlapping
|
||||
overide||override
|
||||
overrided||overridden
|
||||
overriden||overridden
|
||||
overun||overrun
|
||||
|
|
|
@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg)
|
|||
writel(val, acp_mmio + (reg * 4));
|
||||
}
|
||||
|
||||
/* Configure a given dma channel parameters - enable/disble,
|
||||
/* Configure a given dma channel parameters - enable/disable,
|
||||
* number of descriptors, priority
|
||||
*/
|
||||
static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num,
|
||||
|
|
|
@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
|
|||
/* Allow writing to any other BAR, or expansion ROM */
|
||||
iowrite(portoff, val, mask, &d->config_words[reg]);
|
||||
return true;
|
||||
/* We let them overide latency timer and cacheline size */
|
||||
/* We let them override latency timer and cacheline size */
|
||||
} else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
|
||||
/* Only let them change the first two fields. */
|
||||
if (mask == 0xFFFFFFFF)
|
||||
|
|
|
@ -132,7 +132,7 @@ else
|
|||
Q = @
|
||||
endif
|
||||
|
||||
# Disable command line variables (CFLAGS) overide from top
|
||||
# Disable command line variables (CFLAGS) override from top
|
||||
# level Makefile (perf), otherwise build Makefile will get
|
||||
# the same command line setup.
|
||||
MAKEOVERRIDES=
|
||||
|
|
|
@ -135,7 +135,7 @@ else
|
|||
Q = @
|
||||
endif
|
||||
|
||||
# Disable command line variables (CFLAGS) overide from top
|
||||
# Disable command line variables (CFLAGS) override from top
|
||||
# level Makefile (perf), otherwise build Makefile will get
|
||||
# the same command line setup.
|
||||
MAKEOVERRIDES=
|
||||
|
|
|
@ -140,7 +140,7 @@ struct pevent_plugin_option {
|
|||
* struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
|
||||
* {
|
||||
* .name = "option-name",
|
||||
* .plugin_alias = "overide-file-name", (optional)
|
||||
* .plugin_alias = "override-file-name", (optional)
|
||||
* .description = "description of option to show users",
|
||||
* },
|
||||
* {
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
# Makefile for vm selftests
|
||||
|
||||
ifndef OUTPUT
|
||||
OUTPUT := $(shell pwd)
|
||||
endif
|
||||
|
||||
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
|
||||
LDLIBS = -lrt
|
||||
TEST_GEN_FILES = compaction_test
|
||||
|
|
Loading…
Reference in New Issue