Merge branch 'akpm' (patches from Andrew)
Merge first patchbomb from Andrew Morton: - arch/sh updates - ocfs2 updates - kernel/watchdog feature - about half of mm/ * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (122 commits) Documentation: update arch list in the 'memtest' entry Kconfig: memtest: update number of test patterns up to 17 arm: add support for memtest arm64: add support for memtest memtest: use phys_addr_t for physical addresses mm: move memtest under mm mm, hugetlb: abort __get_user_pages if current has been oom killed mm, mempool: do not allow atomic resizing memcg: print cgroup information when system panics due to panic_on_oom mm: numa: remove migrate_ratelimited mm: fold arch_randomize_brk into ARCH_HAS_ELF_RANDOMIZE mm: split ET_DYN ASLR from mmap ASLR s390: redefine randomize_et_dyn for ELF_ET_DYN_BASE mm: expose arch_mmap_rnd when available s390: standardize mmap_rnd() usage powerpc: standardize mmap_rnd() usage mips: extract logic for mmap_rnd() arm64: standardize mmap_rnd() usage x86: standardize mmap_rnd() usage arm: factor out mmap ASLR into mmap_rnd ...
This commit is contained in:
commit
1dcf58d6e6
|
@ -0,0 +1,21 @@
|
|||
The CMA debugfs interface is useful to retrieve basic information out of the
|
||||
different CMA areas and to test allocation/release in each of the areas.
|
||||
|
||||
Each CMA zone represents a directory under <debugfs>/cma/, indexed by the
|
||||
kernel's CMA index. So the first CMA zone would be:
|
||||
|
||||
<debugfs>/cma/cma-0
|
||||
|
||||
The structure of the files created under that directory is as follows:
|
||||
|
||||
- [RO] base_pfn: The base PFN (Page Frame Number) of the zone.
|
||||
- [RO] count: Amount of memory in the CMA area.
|
||||
- [RO] order_per_bit: Order of pages represented by one bit.
|
||||
- [RO] bitmap: The bitmap of page states in the zone.
|
||||
- [WO] alloc: Allocate N pages from that CMA area. For example:
|
||||
|
||||
echo 5 > <debugfs>/cma/cma-2/alloc
|
||||
|
||||
would try to allocate 5 pages from the cma-2 area.
|
||||
|
||||
- [WO] free: Free N pages from that CMA area, similar to the above.
|
|
@ -1989,7 +1989,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
seconds. Use this parameter to check at some
|
||||
other rate. 0 disables periodic checking.
|
||||
|
||||
memtest= [KNL,X86] Enable memtest
|
||||
memtest= [KNL,X86,ARM] Enable memtest
|
||||
Format: <integer>
|
||||
default : 0 <disable>
|
||||
Specifies the number of memtest passes to be
|
||||
|
@ -2236,8 +2236,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
|
||||
nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
|
||||
Format: [panic,][nopanic,][num]
|
||||
Valid num: 0
|
||||
Valid num: 0 or 1
|
||||
0 - turn nmi_watchdog off
|
||||
1 - turn nmi_watchdog on
|
||||
When panic is specified, panic when an NMI watchdog
|
||||
timeout occurs (or 'nopanic' to override the opposite
|
||||
default).
|
||||
|
@ -2322,6 +2323,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
register save and restore. The kernel will only save
|
||||
legacy floating-point registers on task switch.
|
||||
|
||||
nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
|
||||
|
||||
noxsave [BUGS=X86] Disables x86 extended register state save
|
||||
and restore using xsave. The kernel will fallback to
|
||||
enabling legacy floating-point and sse state.
|
||||
|
@ -2464,7 +2467,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
|
||||
nousb [USB] Disable the USB subsystem
|
||||
|
||||
nowatchdog [KNL] Disable the lockup detector (NMI watchdog).
|
||||
nowatchdog [KNL] Disable both lockup detectors, i.e.
|
||||
soft-lockup and NMI watchdog (hard-lockup).
|
||||
|
||||
nowb [ARM]
|
||||
|
||||
|
|
|
@ -77,12 +77,14 @@ show up in /proc/sys/kernel:
|
|||
- shmmax [ sysv ipc ]
|
||||
- shmmni
|
||||
- softlockup_all_cpu_backtrace
|
||||
- soft_watchdog
|
||||
- stop-a [ SPARC only ]
|
||||
- sysrq ==> Documentation/sysrq.txt
|
||||
- sysctl_writes_strict
|
||||
- tainted
|
||||
- threads-max
|
||||
- unknown_nmi_panic
|
||||
- watchdog
|
||||
- watchdog_thresh
|
||||
- version
|
||||
|
||||
|
@ -417,16 +419,23 @@ successful IPC object allocation.
|
|||
|
||||
nmi_watchdog:
|
||||
|
||||
Enables/Disables the NMI watchdog on x86 systems. When the value is
|
||||
non-zero the NMI watchdog is enabled and will continuously test all
|
||||
online cpus to determine whether or not they are still functioning
|
||||
properly. Currently, passing "nmi_watchdog=" parameter at boot time is
|
||||
required for this function to work.
|
||||
This parameter can be used to control the NMI watchdog
|
||||
(i.e. the hard lockup detector) on x86 systems.
|
||||
|
||||
If LAPIC NMI watchdog method is in use (nmi_watchdog=2 kernel
|
||||
parameter), the NMI watchdog shares registers with oprofile. By
|
||||
disabling the NMI watchdog, oprofile may have more registers to
|
||||
utilize.
|
||||
0 - disable the hard lockup detector
|
||||
1 - enable the hard lockup detector
|
||||
|
||||
The hard lockup detector monitors each CPU for its ability to respond to
|
||||
timer interrupts. The mechanism utilizes CPU performance counter registers
|
||||
that are programmed to generate Non-Maskable Interrupts (NMIs) periodically
|
||||
while a CPU is busy. Hence, the alternative name 'NMI watchdog'.
|
||||
|
||||
The NMI watchdog is disabled by default if the kernel is running as a guest
|
||||
in a KVM virtual machine. This default can be overridden by adding
|
||||
|
||||
nmi_watchdog=1
|
||||
|
||||
to the guest kernel command line (see Documentation/kernel-parameters.txt).
|
||||
|
||||
==============================================================
|
||||
|
||||
|
@ -816,6 +825,22 @@ NMI.
|
|||
|
||||
==============================================================
|
||||
|
||||
soft_watchdog
|
||||
|
||||
This parameter can be used to control the soft lockup detector.
|
||||
|
||||
0 - disable the soft lockup detector
|
||||
1 - enable the soft lockup detector
|
||||
|
||||
The soft lockup detector monitors CPUs for threads that are hogging the CPUs
|
||||
without rescheduling voluntarily, and thus prevent the 'watchdog/N' threads
|
||||
from running. The mechanism depends on the CPUs ability to respond to timer
|
||||
interrupts which are needed for the 'watchdog/N' threads to be woken up by
|
||||
the watchdog timer function, otherwise the NMI watchdog - if enabled - can
|
||||
detect a hard lockup condition.
|
||||
|
||||
==============================================================
|
||||
|
||||
tainted:
|
||||
|
||||
Non-zero if the kernel has been tainted. Numeric values, which
|
||||
|
@ -858,6 +883,25 @@ example. If a system hangs up, try pressing the NMI switch.
|
|||
|
||||
==============================================================
|
||||
|
||||
watchdog:
|
||||
|
||||
This parameter can be used to disable or enable the soft lockup detector
|
||||
_and_ the NMI watchdog (i.e. the hard lockup detector) at the same time.
|
||||
|
||||
0 - disable both lockup detectors
|
||||
1 - enable both lockup detectors
|
||||
|
||||
The soft lockup detector and the NMI watchdog can also be disabled or
|
||||
enabled individually, using the soft_watchdog and nmi_watchdog parameters.
|
||||
If the watchdog parameter is read, for example by executing
|
||||
|
||||
cat /proc/sys/kernel/watchdog
|
||||
|
||||
the output of this command (0 or 1) shows the logical OR of soft_watchdog
|
||||
and nmi_watchdog.
|
||||
|
||||
==============================================================
|
||||
|
||||
watchdog_thresh:
|
||||
|
||||
This value can be used to control the frequency of hrtimer and NMI
|
||||
|
|
|
@ -28,9 +28,7 @@ IMPLEMENTATION OVERVIEW
|
|||
A cleancache "backend" that provides transcendent memory registers itself
|
||||
to the kernel's cleancache "frontend" by calling cleancache_register_ops,
|
||||
passing a pointer to a cleancache_ops structure with funcs set appropriately.
|
||||
Note that cleancache_register_ops returns the previous settings so that
|
||||
chaining can be performed if desired. The functions provided must conform to
|
||||
certain semantics as follows:
|
||||
The functions provided must conform to certain semantics as follows:
|
||||
|
||||
Most important, cleancache is "ephemeral". Pages which are copied into
|
||||
cleancache have an indefinite lifetime which is completely unknowable
|
||||
|
|
|
@ -317,7 +317,7 @@ If the VMA passes some filtering as described in "Filtering Special Vmas"
|
|||
below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
|
||||
off a subset of the VMA if the range does not cover the entire VMA. Once the
|
||||
VMA has been merged or split or neither, mlock_fixup() will call
|
||||
__mlock_vma_pages_range() to fault in the pages via get_user_pages() and to
|
||||
populate_vma_page_range() to fault in the pages via get_user_pages() and to
|
||||
mark the pages as mlocked via mlock_vma_page().
|
||||
|
||||
Note that the VMA being mlocked might be mapped with PROT_NONE. In this case,
|
||||
|
@ -327,7 +327,7 @@ fault path or in vmscan.
|
|||
|
||||
Also note that a page returned by get_user_pages() could be truncated or
|
||||
migrated out from under us, while we're trying to mlock it. To detect this,
|
||||
__mlock_vma_pages_range() checks page_mapping() after acquiring the page lock.
|
||||
populate_vma_page_range() checks page_mapping() after acquiring the page lock.
|
||||
If the page is still associated with its mapping, we'll go ahead and call
|
||||
mlock_vma_page(). If the mapping is gone, we just unlock the page and move on.
|
||||
In the worst case, this will result in a page mapped in a VM_LOCKED VMA
|
||||
|
@ -392,7 +392,7 @@ ignored for munlock.
|
|||
|
||||
If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
|
||||
specified range. The range is then munlocked via the function
|
||||
__mlock_vma_pages_range() - the same function used to mlock a VMA range -
|
||||
populate_vma_page_range() - the same function used to mlock a VMA range -
|
||||
passing a flag to indicate that munlock() is being performed.
|
||||
|
||||
Because the VMA access protections could have been changed to PROT_NONE after
|
||||
|
@ -402,7 +402,7 @@ get_user_pages() was enhanced to accept a flag to ignore the permissions when
|
|||
fetching the pages - all of which should be resident as a result of previous
|
||||
mlocking.
|
||||
|
||||
For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling
|
||||
For munlock(), populate_vma_page_range() unlocks individual pages by calling
|
||||
munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked
|
||||
flag using TestClearPageMlocked(). As with mlock_vma_page(),
|
||||
munlock_vma_page() use the Test*PageMlocked() function to handle the case where
|
||||
|
@ -463,21 +463,11 @@ populate the page table.
|
|||
|
||||
To mlock a range of memory under the unevictable/mlock infrastructure, the
|
||||
mmap() handler and task address space expansion functions call
|
||||
mlock_vma_pages_range() specifying the vma and the address range to mlock.
|
||||
mlock_vma_pages_range() filters VMAs like mlock_fixup(), as described above in
|
||||
"Filtering Special VMAs". It will clear the VM_LOCKED flag, which will have
|
||||
already been set by the caller, in filtered VMAs. Thus these VMA's need not be
|
||||
visited for munlock when the region is unmapped.
|
||||
populate_vma_page_range() specifying the vma and the address range to mlock.
|
||||
|
||||
For "normal" VMAs, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
|
||||
fault/allocate the pages and mlock them. Again, like mlock_fixup(),
|
||||
mlock_vma_pages_range() downgrades the mmap semaphore to read mode before
|
||||
attempting to fault/allocate and mlock the pages and "upgrades" the semaphore
|
||||
back to write mode before returning.
|
||||
|
||||
The callers of mlock_vma_pages_range() will have already added the memory range
|
||||
The callers of populate_vma_page_range() will have already added the memory range
|
||||
to be mlocked to the task's "locked_vm". To account for filtered VMAs,
|
||||
mlock_vma_pages_range() returns the number of pages NOT mlocked. All of the
|
||||
populate_vma_page_range() returns the number of pages NOT mlocked. All of the
|
||||
callers then subtract a non-negative return value from the task's locked_vm. A
|
||||
negative return value represent an error - for example, from get_user_pages()
|
||||
attempting to fault in a VMA with PROT_NONE access. In this case, we leave the
|
||||
|
|
15
arch/Kconfig
15
arch/Kconfig
|
@ -446,6 +446,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
|
|||
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_HUGE_VMAP
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_SOFT_DIRTY
|
||||
bool
|
||||
|
||||
|
@ -484,6 +487,18 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
|
|||
This spares a stack switch and improves cache usage on softirq
|
||||
processing.
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 2
|
||||
|
||||
config ARCH_HAS_ELF_RANDOMIZE
|
||||
bool
|
||||
help
|
||||
An architecture supports choosing randomized locations for
|
||||
stack, mmap, brk, and ET_DYN. Defined functions:
|
||||
- arch_mmap_rnd()
|
||||
- arch_randomize_brk()
|
||||
|
||||
#
|
||||
# ABI hall of shame
|
||||
#
|
||||
|
|
|
@ -76,6 +76,10 @@ config GENERIC_ISA_DMA
|
|||
bool
|
||||
default y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 3
|
||||
|
||||
source "init/Kconfig"
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
config ARM
|
||||
bool
|
||||
default y
|
||||
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
@ -286,6 +286,11 @@ config GENERIC_BUG
|
|||
def_bool y
|
||||
depends on BUG
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 3 if ARM_LPAE
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -125,10 +125,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
|
|||
extern void elf_set_personality(const struct elf32_hdr *);
|
||||
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||
struct linux_binprm;
|
||||
|
|
|
@ -335,6 +335,9 @@ void __init bootmem_init(void)
|
|||
|
||||
find_limits(&min, &max_low, &max_high);
|
||||
|
||||
early_memtest((phys_addr_t)min << PAGE_SHIFT,
|
||||
(phys_addr_t)max_low << PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* Sparsemem tries to allocate bootmem in memory_present(),
|
||||
* so must be done after the fixed reservations
|
||||
|
|
|
@ -169,14 +169,22 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
return addr;
|
||||
}
|
||||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd;
|
||||
|
||||
/* 8 bits of randomness in 20 address space bits */
|
||||
rnd = (unsigned long)get_random_int() % (1 << 8);
|
||||
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
/* 8 bits of randomness in 20 address space bits */
|
||||
if ((current->flags & PF_RANDOMIZE) &&
|
||||
!(current->personality & ADDR_NO_RANDOMIZE))
|
||||
random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
config ARM64
|
||||
def_bool y
|
||||
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
@ -143,6 +143,13 @@ config KERNEL_MODE_NEON
|
|||
config FIX_EARLYCON_MEM
|
||||
def_bool y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
|
||||
default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
|
||||
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
|
||||
default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
@ -413,13 +420,6 @@ config ARM64_VA_BITS
|
|||
default 42 if ARM64_VA_BITS_42
|
||||
default 48 if ARM64_VA_BITS_48
|
||||
|
||||
config ARM64_PGTABLE_LEVELS
|
||||
int
|
||||
default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
|
||||
default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
|
||||
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
|
||||
default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
bool "Build big-endian kernel"
|
||||
help
|
||||
|
|
|
@ -125,7 +125,6 @@ typedef struct user_fpsimd_state elf_fpregset_t;
|
|||
* the loader. We need to make sure that it is out of the way of the program
|
||||
* that it will "exec", and that there is sufficient room for the brk.
|
||||
*/
|
||||
extern unsigned long randomize_et_dyn(unsigned long base);
|
||||
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
|
||||
|
||||
/*
|
||||
|
@ -157,10 +156,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
|
||||
#endif
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
|
|
|
@ -163,12 +163,12 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
|||
/*
|
||||
* If we are concatenating first level stage-2 page tables, we would have less
|
||||
* than or equal to 16 pointers in the fake PGD, because that's what the
|
||||
* architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
|
||||
* architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
|
||||
* represents the first level for the host, and we add 1 to go to the next
|
||||
* level (which uses contatenation) for the stage-2 tables.
|
||||
*/
|
||||
#if PTRS_PER_S2_PGD <= 16
|
||||
#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
|
||||
#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
|
||||
#else
|
||||
#define KVM_PREALLOC_LEVEL (0)
|
||||
#endif
|
||||
|
|
|
@ -36,9 +36,9 @@
|
|||
* for more information).
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS)
|
||||
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
|
||||
#else
|
||||
#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1)
|
||||
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
|
||||
#endif
|
||||
|
||||
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
|
@ -46,9 +46,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|||
set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
|
@ -66,7 +66,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|||
set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
/*
|
||||
* PMD_SHIFT determines the size a level 2 page table entry can map.
|
||||
*/
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
|
||||
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
|
@ -31,7 +31,7 @@
|
|||
/*
|
||||
* PUD_SHIFT determines the size a level 1 page table entry can map.
|
||||
*/
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
#define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3)
|
||||
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
|
||||
#define PUD_MASK (~(PUD_SIZE-1))
|
||||
|
@ -42,7 +42,7 @@
|
|||
* PGDIR_SHIFT determines the size a top-level page table entry can map
|
||||
* (depending on the configuration, this level can be 0, 1 or 2).
|
||||
*/
|
||||
#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3)
|
||||
#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3)
|
||||
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
|
||||
|
|
|
@ -38,13 +38,13 @@ typedef struct { pteval_t pte; } pte_t;
|
|||
#define pte_val(x) ((x).pte)
|
||||
#define __pte(x) ((pte_t) { (x) } )
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
typedef struct { pmdval_t pmd; } pmd_t;
|
||||
#define pmd_val(x) ((x).pmd)
|
||||
#define __pmd(x) ((pmd_t) { (x) } )
|
||||
#endif
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
typedef struct { pudval_t pud; } pud_t;
|
||||
#define pud_val(x) ((x).pud)
|
||||
#define __pud(x) ((pud_t) { (x) } )
|
||||
|
@ -64,13 +64,13 @@ typedef pteval_t pte_t;
|
|||
#define pte_val(x) (x)
|
||||
#define __pte(x) (x)
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
typedef pmdval_t pmd_t;
|
||||
#define pmd_val(x) (x)
|
||||
#define __pmd(x) (x)
|
||||
#endif
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
typedef pudval_t pud_t;
|
||||
#define pud_val(x) (x)
|
||||
#define __pud(x) (x)
|
||||
|
@ -86,9 +86,9 @@ typedef pteval_t pgprot_t;
|
|||
|
||||
#endif /* STRICT_MM_TYPECHECKS */
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS == 2
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#elif CONFIG_ARM64_PGTABLE_LEVELS == 3
|
||||
#elif CONFIG_PGTABLE_LEVELS == 3
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -374,7 +374,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
|||
*/
|
||||
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
|
||||
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
|
||||
|
||||
|
@ -409,9 +409,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|||
|
||||
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
|
||||
|
||||
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
|
||||
#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
|
||||
|
||||
|
@ -445,7 +445,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
|
|||
|
||||
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
|
||||
|
||||
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
|
||||
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|||
tlb_remove_entry(tlb, pte);
|
||||
}
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
||||
unsigned long addr)
|
||||
{
|
||||
|
@ -62,7 +62,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
|||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
|
||||
unsigned long addr)
|
||||
{
|
||||
|
|
|
@ -190,6 +190,8 @@ void __init bootmem_init(void)
|
|||
min = PFN_UP(memblock_start_of_DRAM());
|
||||
max = PFN_DOWN(memblock_end_of_DRAM());
|
||||
|
||||
early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* Sparsemem tries to allocate bootmem in memory_present(), so must be
|
||||
* done after the fixed reservations.
|
||||
|
|
|
@ -47,17 +47,16 @@ static int mmap_is_legacy(void)
|
|||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
static unsigned long mmap_rnd(void)
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd = 0;
|
||||
unsigned long rnd;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
rnd = (long)get_random_int() & STACK_RND_MASK;
|
||||
rnd = (unsigned long)get_random_int() & STACK_RND_MASK;
|
||||
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base(void)
|
||||
static unsigned long mmap_base(unsigned long rnd)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
|
@ -66,7 +65,7 @@ static unsigned long mmap_base(void)
|
|||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
|
||||
return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
|
||||
return PAGE_ALIGN(STACK_TOP - gap - rnd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -75,15 +74,20 @@ static unsigned long mmap_base(void)
|
|||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
/*
|
||||
* Fall back to the standard layout if the personality bit is set, or
|
||||
* if the expected stack growth is unlimited:
|
||||
*/
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -550,10 +550,10 @@ void vmemmap_free(unsigned long start, unsigned long end)
|
|||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
|
||||
#endif
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
config PGTABLE_LEVELS
|
||||
int "Page Table Levels" if !IA64_PAGE_SIZE_64KB
|
||||
range 3 4 if !IA64_PAGE_SIZE_64KB
|
||||
default 3
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
@ -286,19 +291,6 @@ config IA64_PAGE_SIZE_64KB
|
|||
|
||||
endchoice
|
||||
|
||||
choice
|
||||
prompt "Page Table Levels"
|
||||
default PGTABLE_3
|
||||
|
||||
config PGTABLE_3
|
||||
bool "3 Levels"
|
||||
|
||||
config PGTABLE_4
|
||||
depends on !IA64_PAGE_SIZE_64KB
|
||||
bool "4 Levels"
|
||||
|
||||
endchoice
|
||||
|
||||
if IA64_HP_SIM
|
||||
config HZ
|
||||
default 32
|
||||
|
|
|
@ -173,7 +173,7 @@ get_order (unsigned long size)
|
|||
*/
|
||||
typedef struct { unsigned long pte; } pte_t;
|
||||
typedef struct { unsigned long pmd; } pmd_t;
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
typedef struct { unsigned long pud; } pud_t;
|
||||
#endif
|
||||
typedef struct { unsigned long pgd; } pgd_t;
|
||||
|
@ -182,7 +182,7 @@ get_order (unsigned long size)
|
|||
|
||||
# define pte_val(x) ((x).pte)
|
||||
# define pmd_val(x) ((x).pmd)
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
# define pud_val(x) ((x).pud)
|
||||
#endif
|
||||
# define pgd_val(x) ((x).pgd)
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||
quicklist_free(0, NULL, pgd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
static inline void
|
||||
pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
|
||||
{
|
||||
|
@ -49,7 +49,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|||
quicklist_free(0, NULL, pud);
|
||||
}
|
||||
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
|
||||
#endif /* CONFIG_PGTABLE_4 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
|
||||
|
||||
static inline void
|
||||
pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
|
||||
|
|
|
@ -99,7 +99,7 @@
|
|||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
/*
|
||||
* Definitions for second level:
|
||||
*
|
||||
|
@ -117,7 +117,7 @@
|
|||
*
|
||||
* PGDIR_SHIFT determines what a first-level page table entry can map.
|
||||
*/
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
|
||||
#else
|
||||
#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
|
||||
|
@ -180,7 +180,7 @@
|
|||
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
|
||||
|
||||
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
|
||||
#endif
|
||||
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
@ -281,7 +281,7 @@ extern unsigned long VMALLOC_END;
|
|||
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
|
||||
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
#define pgd_none(pgd) (!pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
|
||||
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
|
||||
|
@ -384,7 +384,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
|
|||
here. */
|
||||
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pud_offset(dir,addr) \
|
||||
((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
|
||||
|
@ -586,7 +586,7 @@ extern struct page *zero_page_memmap_ptr;
|
|||
#define __HAVE_ARCH_PGD_OFFSET_GATE
|
||||
|
||||
|
||||
#ifndef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#endif
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
|
|
@ -146,7 +146,7 @@ ENTRY(vhpt_miss)
|
|||
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
|
||||
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
|
||||
cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
shr.u r28=r22,PUD_SHIFT // shift pud index into position
|
||||
#else
|
||||
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
|
||||
|
@ -155,7 +155,7 @@ ENTRY(vhpt_miss)
|
|||
ld8 r17=[r17] // get *pgd (may be 0)
|
||||
;;
|
||||
(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
|
||||
;;
|
||||
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
|
||||
|
@ -222,13 +222,13 @@ ENTRY(vhpt_miss)
|
|||
*/
|
||||
ld8 r25=[r21] // read *pte again
|
||||
ld8 r26=[r17] // read *pmd again
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
ld8 r19=[r28] // read *pud again
|
||||
#endif
|
||||
cmp.ne p6,p7=r0,r0
|
||||
;;
|
||||
cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
|
||||
#endif
|
||||
mov r27=PAGE_SHIFT<<2
|
||||
|
@ -476,7 +476,7 @@ ENTRY(nested_dtlb_miss)
|
|||
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
|
||||
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
|
||||
cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
shr.u r18=r22,PUD_SHIFT // shift pud index into position
|
||||
#else
|
||||
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
|
||||
|
@ -487,7 +487,7 @@ ENTRY(nested_dtlb_miss)
|
|||
(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
|
||||
dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
|
||||
;;
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
(p7) ld8 r17=[r17] // get *pud (may be 0)
|
||||
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
|
||||
;;
|
||||
|
|
|
@ -156,9 +156,9 @@ void arch_crash_save_vmcoreinfo(void)
|
|||
VMCOREINFO_OFFSET(node_memblk_s, start_paddr);
|
||||
VMCOREINFO_OFFSET(node_memblk_s, size);
|
||||
#endif
|
||||
#ifdef CONFIG_PGTABLE_3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
VMCOREINFO_CONFIG(PGTABLE_3);
|
||||
#elif defined(CONFIG_PGTABLE_4)
|
||||
#elif CONFIG_PGTABLE_LEVELS == 4
|
||||
VMCOREINFO_CONFIG(PGTABLE_4);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -67,6 +67,10 @@ config HZ
|
|||
default 1000 if CLEOPATRA
|
||||
default 100
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
default 2 if SUN3 || COLDFIRE
|
||||
default 3
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -23,7 +23,7 @@ config MIPS
|
|||
select HAVE_KRETPROBES
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
|
||||
select RTC_LIB if !MACH_LOONGSON
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
|
@ -2600,6 +2600,11 @@ config STACKTRACE_SUPPORT
|
|||
bool
|
||||
default y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 3 if 64BIT && !PAGE_SIZE_64KB
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -410,10 +410,6 @@ struct linux_binprm;
|
|||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int uses_interp);
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
struct arch_elf_state {
|
||||
int fp_abi;
|
||||
int interp_fp_abi;
|
||||
|
|
|
@ -142,18 +142,26 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
|
|||
addr0, len, pgoff, flags, DOWN);
|
||||
}
|
||||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd;
|
||||
|
||||
rnd = (unsigned long)get_random_int();
|
||||
rnd <<= PAGE_SHIFT;
|
||||
if (TASK_IS_32BIT_ADDR)
|
||||
rnd &= 0xfffffful;
|
||||
else
|
||||
rnd &= 0xffffffful;
|
||||
|
||||
return rnd;
|
||||
}
|
||||
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
random_factor = get_random_int();
|
||||
random_factor = random_factor << PAGE_SHIFT;
|
||||
if (TASK_IS_32BIT_ADDR)
|
||||
random_factor &= 0xfffffful;
|
||||
else
|
||||
random_factor &= 0xffffffful;
|
||||
}
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
|
|
|
@ -103,6 +103,11 @@ config ARCH_MAY_HAVE_PC_FDC
|
|||
depends on BROKEN
|
||||
default y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -51,7 +51,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
|
||||
}
|
||||
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
|
||||
/* Three Level Page Table Support for pmd's */
|
||||
|
||||
|
|
|
@ -68,13 +68,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
|
|||
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
|
||||
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
#define PT_NLEVELS 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define PGD_ORDER 1 /* Number of pages per pgd */
|
||||
#define PMD_ORDER 1 /* Number of pages per pmd */
|
||||
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
|
||||
#else
|
||||
#define PT_NLEVELS 2
|
||||
#define PGD_ORDER 1 /* Number of pages per pgd */
|
||||
#define PGD_ALLOC_ORDER PGD_ORDER
|
||||
#endif
|
||||
|
@ -93,7 +91,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
|
|||
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
|
||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
|
||||
#else
|
||||
#define __PAGETABLE_PMD_FOLDED
|
||||
|
@ -277,7 +275,7 @@ extern unsigned long *empty_zero_page;
|
|||
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
|
||||
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
|
||||
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
/* The first entry of the permanent pmd is not there if it contains
|
||||
* the gateway marker */
|
||||
#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
|
||||
|
@ -287,7 +285,7 @@ extern unsigned long *empty_zero_page;
|
|||
#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
|
||||
#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
|
||||
static inline void pmd_clear(pmd_t *pmd) {
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
|
||||
/* This is the entry pointing to the permanent pmd
|
||||
* attached to the pgd; cannot clear it */
|
||||
|
@ -299,7 +297,7 @@ static inline void pmd_clear(pmd_t *pmd) {
|
|||
|
||||
|
||||
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
|
||||
#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
|
||||
|
||||
|
@ -309,7 +307,7 @@ static inline void pmd_clear(pmd_t *pmd) {
|
|||
#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
|
||||
#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
|
||||
static inline void pgd_clear(pgd_t *pgd) {
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
|
||||
/* This is the permanent pmd attached to the pgd; cannot
|
||||
* free it */
|
||||
|
@ -393,7 +391,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pmd_offset(dir,address) \
|
||||
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
|
||||
#else
|
||||
|
|
|
@ -398,7 +398,7 @@
|
|||
* can address up to 1TB
|
||||
*/
|
||||
.macro L2_ptep pmd,pte,index,va,fault
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
#else
|
||||
# if defined(CONFIG_64BIT)
|
||||
|
@ -436,7 +436,7 @@
|
|||
* all ILP32 processes and all the kernel for machines with
|
||||
* under 4GB of memory) */
|
||||
.macro L3_ptep pgd,pte,index,va,fault
|
||||
#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
|
||||
#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
|
||||
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
copy %r0,\pte
|
||||
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
|
||||
|
|
|
@ -74,7 +74,7 @@ $bss_loop:
|
|||
mtctl %r4,%cr24 /* Initialize kernel root pointer */
|
||||
mtctl %r4,%cr25 /* Initialize user root pointer */
|
||||
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
/* Set pmd in pgd */
|
||||
load32 PA(pmd0),%r5
|
||||
shrd %r5,PxD_VALUE_SHIFT,%r3
|
||||
|
@ -97,7 +97,7 @@ $bss_loop:
|
|||
stw %r3,0(%r4)
|
||||
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
|
||||
addib,> -1,%r1,1b
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
|
||||
#else
|
||||
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
extern int data_start;
|
||||
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
|
||||
|
||||
#if PT_NLEVELS == 3
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
|
||||
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
|
||||
* guarantee that global objects will be laid out in memory in the same order
|
||||
|
|
|
@ -88,7 +88,7 @@ config PPC
|
|||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select BINFMT_ELF
|
||||
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
select OF_RESERVED_MEM
|
||||
|
@ -297,6 +297,12 @@ config ZONE_DMA32
|
|||
bool
|
||||
default y if PPC64
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 2 if !PPC64
|
||||
default 3 if PPC_64K_PAGES
|
||||
default 4
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -128,10 +128,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
(0x7ff >> (PAGE_SHIFT - 12)) : \
|
||||
(0x3ffff >> (PAGE_SHIFT - 12)))
|
||||
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
|
||||
#ifdef CONFIG_SPU_BASE
|
||||
/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
|
||||
#define NT_SPU 1
|
||||
|
|
|
@ -53,21 +53,20 @@ static inline int mmap_is_legacy(void)
|
|||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
static unsigned long mmap_rnd(void)
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd = 0;
|
||||
unsigned long rnd;
|
||||
|
||||
/* 8MB for 32bit, 1GB for 64bit */
|
||||
if (is_32bit_task())
|
||||
rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
|
||||
else
|
||||
rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
|
||||
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
/* 8MB for 32bit, 1GB for 64bit */
|
||||
if (is_32bit_task())
|
||||
rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
|
||||
else
|
||||
rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
|
||||
}
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long mmap_base(void)
|
||||
static inline unsigned long mmap_base(unsigned long rnd)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
|
@ -76,7 +75,7 @@ static inline unsigned long mmap_base(void)
|
|||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
|
||||
return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
|
||||
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -85,6 +84,11 @@ static inline unsigned long mmap_base(void)
|
|||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
/*
|
||||
* Fall back to the standard layout if the personality
|
||||
* bit is set, or if the expected stack growth is unlimited:
|
||||
|
@ -93,7 +97,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ config S390
|
|||
def_bool y
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
|
@ -156,6 +157,11 @@ config S390
|
|||
config SCHED_OMIT_FRAME_POINTER
|
||||
def_bool y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if 64BIT
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -161,10 +161,11 @@ extern unsigned int vdso_enabled;
|
|||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
extern unsigned long randomize_et_dyn(void);
|
||||
#define ELF_ET_DYN_BASE randomize_et_dyn()
|
||||
that it will "exec", and that there is sufficient room for the brk. 64-bit
|
||||
tasks are aligned to 4GB. */
|
||||
#define ELF_ET_DYN_BASE (is_32bit_task() ? \
|
||||
(STACK_TOP / 3 * 2) : \
|
||||
(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. */
|
||||
|
@ -225,9 +226,6 @@ struct linux_binprm;
|
|||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||
int arch_setup_additional_pages(struct linux_binprm *, int);
|
||||
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -60,22 +60,20 @@ static inline int mmap_is_legacy(void)
|
|||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
static unsigned long mmap_rnd(void)
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
if (is_32bit_task())
|
||||
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
|
||||
else
|
||||
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base_legacy(void)
|
||||
static unsigned long mmap_base_legacy(unsigned long rnd)
|
||||
{
|
||||
return TASK_UNMAPPED_BASE + mmap_rnd();
|
||||
return TASK_UNMAPPED_BASE + rnd;
|
||||
}
|
||||
|
||||
static inline unsigned long mmap_base(void)
|
||||
static inline unsigned long mmap_base(unsigned long rnd)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
|
@ -84,7 +82,7 @@ static inline unsigned long mmap_base(void)
|
|||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
gap &= PAGE_MASK;
|
||||
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
|
||||
return STACK_TOP - stack_maxrandom_size() - rnd - gap;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
|
@ -179,17 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
return addr;
|
||||
}
|
||||
|
||||
unsigned long randomize_et_dyn(void)
|
||||
{
|
||||
unsigned long base;
|
||||
|
||||
base = STACK_TOP / 3 * 2;
|
||||
if (!is_32bit_task())
|
||||
/* Align to 4GB */
|
||||
base &= ~((1UL << 32) - 1);
|
||||
return base + mmap_rnd();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
|
||||
/*
|
||||
|
@ -198,15 +185,20 @@ unsigned long randomize_et_dyn(void)
|
|||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
/*
|
||||
* Fall back to the standard layout if the personality
|
||||
* bit is set, or if the expected stack growth is unlimited:
|
||||
*/
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mmap_base_legacy();
|
||||
mm->mmap_base = mmap_base_legacy(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
@ -273,15 +265,20 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
|
|||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
/*
|
||||
* Fall back to the standard layout if the personality
|
||||
* bit is set, or if the expected stack growth is unlimited:
|
||||
*/
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mmap_base_legacy();
|
||||
mm->mmap_base = mmap_base_legacy(random_factor);
|
||||
mm->get_unmapped_area = s390_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -162,6 +162,10 @@ config NEED_DMA_MAP_STATE
|
|||
config NEED_SG_DMA_LENGTH
|
||||
def_bool y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
default 3 if X2TLB
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -993,7 +993,7 @@ static struct unwinder dwarf_unwinder = {
|
|||
.rating = 150,
|
||||
};
|
||||
|
||||
static void dwarf_unwinder_cleanup(void)
|
||||
static void __init dwarf_unwinder_cleanup(void)
|
||||
{
|
||||
struct dwarf_fde *fde, *next_fde;
|
||||
struct dwarf_cie *cie, *next_cie;
|
||||
|
@ -1009,6 +1009,10 @@ static void dwarf_unwinder_cleanup(void)
|
|||
rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
|
||||
kfree(cie);
|
||||
|
||||
if (dwarf_reg_pool)
|
||||
mempool_destroy(dwarf_reg_pool);
|
||||
if (dwarf_frame_pool)
|
||||
mempool_destroy(dwarf_frame_pool);
|
||||
kmem_cache_destroy(dwarf_reg_cachep);
|
||||
kmem_cache_destroy(dwarf_frame_cachep);
|
||||
}
|
||||
|
@ -1176,17 +1180,13 @@ static int __init dwarf_unwinder_init(void)
|
|||
sizeof(struct dwarf_reg), 0,
|
||||
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
|
||||
|
||||
dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
|
||||
mempool_alloc_slab,
|
||||
mempool_free_slab,
|
||||
dwarf_frame_cachep);
|
||||
dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
|
||||
dwarf_frame_cachep);
|
||||
if (!dwarf_frame_pool)
|
||||
goto out;
|
||||
|
||||
dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
|
||||
mempool_alloc_slab,
|
||||
mempool_free_slab,
|
||||
dwarf_reg_cachep);
|
||||
dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
|
||||
dwarf_reg_cachep);
|
||||
if (!dwarf_reg_pool)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -146,6 +146,10 @@ config GENERIC_ISA_DMA
|
|||
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
def_bool y if SPARC64
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
default 4 if 64BIT
|
||||
default 3
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -130,26 +130,26 @@ static struct mdesc_mem_ops memblock_mdesc_ops = {
|
|||
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
|
||||
{
|
||||
unsigned int handle_size;
|
||||
struct mdesc_handle *hp;
|
||||
unsigned long addr;
|
||||
void *base;
|
||||
|
||||
handle_size = (sizeof(struct mdesc_handle) -
|
||||
sizeof(struct mdesc_hdr) +
|
||||
mdesc_size);
|
||||
|
||||
/*
|
||||
* Allocation has to succeed because mdesc update would be missed
|
||||
* and such events are not retransmitted.
|
||||
*/
|
||||
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (base) {
|
||||
struct mdesc_handle *hp;
|
||||
unsigned long addr;
|
||||
addr = (unsigned long)base;
|
||||
addr = (addr + 15UL) & ~15UL;
|
||||
hp = (struct mdesc_handle *) addr;
|
||||
|
||||
addr = (unsigned long)base;
|
||||
addr = (addr + 15UL) & ~15UL;
|
||||
hp = (struct mdesc_handle *) addr;
|
||||
mdesc_handle_init(hp, handle_size, base);
|
||||
|
||||
mdesc_handle_init(hp, handle_size, base);
|
||||
return hp;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return hp;
|
||||
}
|
||||
|
||||
static void mdesc_kfree(struct mdesc_handle *hp)
|
||||
|
|
|
@ -147,6 +147,11 @@ config ARCH_DEFCONFIG
|
|||
default "arch/tile/configs/tilepro_defconfig" if !TILEGX
|
||||
default "arch/tile/configs/tilegx_defconfig" if TILEGX
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 3 if 64BIT
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -155,3 +155,8 @@ config MMAPPER
|
|||
|
||||
config NO_DMA
|
||||
def_bool y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 3 if 3_LEVEL_PGTABLES
|
||||
default 2
|
||||
|
|
|
@ -87,7 +87,7 @@ config X86
|
|||
select HAVE_ARCH_KMEMCHECK
|
||||
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
|
||||
select HAVE_USER_RETURN_NOTIFIER
|
||||
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select SPARSE_IRQ
|
||||
|
@ -99,6 +99,7 @@ config X86
|
|||
select IRQ_FORCED_THREADING
|
||||
select HAVE_BPF_JIT if X86_64
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select CLKEVT_I8253
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
|
@ -277,6 +278,12 @@ config ARCH_SUPPORTS_UPROBES
|
|||
config FIX_EARLYCON_MEM
|
||||
def_bool y
|
||||
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if X86_64
|
||||
default 3 if X86_PAE
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
||||
|
@ -714,17 +721,6 @@ endif #HYPERVISOR_GUEST
|
|||
config NO_BOOTMEM
|
||||
def_bool y
|
||||
|
||||
config MEMTEST
|
||||
bool "Memtest"
|
||||
---help---
|
||||
This option adds a kernel parameter 'memtest', which allows memtest
|
||||
to be set.
|
||||
memtest=0, mean disabled; -- default
|
||||
memtest=1, mean do 1 test pattern;
|
||||
...
|
||||
memtest=4, mean do 4 test patterns.
|
||||
If you are unsure how to answer this question, answer N.
|
||||
|
||||
source "arch/x86/Kconfig.cpu"
|
||||
|
||||
config HPET_TIMER
|
||||
|
|
|
@ -40,14 +40,6 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMTEST
|
||||
extern void early_memtest(unsigned long start, unsigned long end);
|
||||
#else
|
||||
static inline void early_memtest(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long e820_end_of_ram_pfn(void);
|
||||
extern unsigned long e820_end_of_low_ram_pfn(void);
|
||||
extern u64 early_reserve_e820(u64 sizet, u64 align);
|
||||
|
|
|
@ -339,9 +339,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
int uses_interp);
|
||||
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
|
||||
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
/*
|
||||
* True on X86_32 or when emulating IA32 on X86_64
|
||||
*/
|
||||
|
|
|
@ -40,8 +40,10 @@
|
|||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <asm/page_64_types.h>
|
||||
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
|
||||
#else
|
||||
#include <asm/page_32_types.h>
|
||||
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
|
|
@ -545,7 +545,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|||
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#if CONFIG_PGTABLE_LEVELS >= 3
|
||||
static inline pmd_t __pmd(pmdval_t val)
|
||||
{
|
||||
pmdval_t ret;
|
||||
|
@ -585,7 +585,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
|
|||
PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
|
||||
val);
|
||||
}
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
static inline pud_t __pud(pudval_t val)
|
||||
{
|
||||
pudval_t ret;
|
||||
|
@ -636,9 +636,9 @@ static inline void pud_clear(pud_t *pudp)
|
|||
set_pud(pudp, __pud(0));
|
||||
}
|
||||
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
|
||||
|
||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* Special-case pte-setting operations for PAE, which can't update a
|
||||
|
|
|
@ -294,7 +294,7 @@ struct pv_mmu_ops {
|
|||
struct paravirt_callee_save pgd_val;
|
||||
struct paravirt_callee_save make_pgd;
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#if CONFIG_PGTABLE_LEVELS >= 3
|
||||
#ifdef CONFIG_X86_PAE
|
||||
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
|
||||
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
|
||||
|
@ -308,13 +308,13 @@ struct pv_mmu_ops {
|
|||
struct paravirt_callee_save pmd_val;
|
||||
struct paravirt_callee_save make_pmd;
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
struct paravirt_callee_save pud_val;
|
||||
struct paravirt_callee_save make_pud;
|
||||
|
||||
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
|
||||
|
||||
struct pv_lazy_ops lazy_mode;
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
struct page *page;
|
||||
|
@ -116,7 +116,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|||
}
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
{
|
||||
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
|
||||
|
@ -142,7 +142,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
|||
___pud_free_tlb(tlb, pud);
|
||||
}
|
||||
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
||||
|
||||
#endif /* _ASM_X86_PGALLOC_H */
|
||||
|
|
|
@ -17,7 +17,6 @@ typedef union {
|
|||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define SHARED_KERNEL_PMD 0
|
||||
#define PAGETABLE_LEVELS 2
|
||||
|
||||
/*
|
||||
* traditional i386 two-level paging structure:
|
||||
|
|
|
@ -24,8 +24,6 @@ typedef union {
|
|||
#define SHARED_KERNEL_PMD 1
|
||||
#endif
|
||||
|
||||
#define PAGETABLE_LEVELS 3
|
||||
|
||||
/*
|
||||
* PGDIR_SHIFT determines what a top-level page table entry can map
|
||||
*/
|
||||
|
|
|
@ -551,7 +551,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
|
|||
return npg >> (20 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) == 0;
|
||||
|
@ -594,9 +594,9 @@ static inline int pud_large(pud_t pud)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
{
|
||||
return pgd_flags(pgd) & _PAGE_PRESENT;
|
||||
|
@ -633,7 +633,7 @@ static inline int pgd_none(pgd_t pgd)
|
|||
{
|
||||
return !native_pgd_val(pgd);
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ typedef struct { pteval_t pte; } pte_t;
|
|||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define SHARED_KERNEL_PMD 0
|
||||
#define PAGETABLE_LEVELS 4
|
||||
|
||||
/*
|
||||
* PGDIR_SHIFT determines what a top-level page table entry can map
|
||||
|
|
|
@ -234,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
|
|||
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
typedef struct { pudval_t pud; } pud_t;
|
||||
|
||||
static inline pud_t native_make_pud(pmdval_t val)
|
||||
|
@ -255,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
typedef struct { pmdval_t pmd; } pmd_t;
|
||||
|
||||
static inline pmd_t native_make_pmd(pmdval_t val)
|
||||
|
|
|
@ -513,7 +513,7 @@ void __init kvm_guest_init(void)
|
|||
* can get false positives too easily, for example if the host is
|
||||
* overcommitted.
|
||||
*/
|
||||
watchdog_enable_hardlockup_detector(false);
|
||||
hardlockup_detector_disable();
|
||||
}
|
||||
|
||||
static noinline uint32_t __kvm_cpuid_base(void)
|
||||
|
|
|
@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|||
.ptep_modify_prot_start = __ptep_modify_prot_start,
|
||||
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#if CONFIG_PGTABLE_LEVELS >= 3
|
||||
#ifdef CONFIG_X86_PAE
|
||||
.set_pte_atomic = native_set_pte_atomic,
|
||||
.pte_clear = native_pte_clear,
|
||||
|
@ -454,13 +454,13 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|||
.pmd_val = PTE_IDENT,
|
||||
.make_pmd = PTE_IDENT,
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
.pud_val = PTE_IDENT,
|
||||
.make_pud = PTE_IDENT,
|
||||
|
||||
.set_pgd = native_set_pgd,
|
||||
#endif
|
||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
|
||||
|
||||
.pte_val = PTE_IDENT,
|
||||
.pgd_val = PTE_IDENT,
|
||||
|
|
|
@ -32,6 +32,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
|
|||
obj-$(CONFIG_ACPI_NUMA) += srat.o
|
||||
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
|
||||
|
||||
obj-$(CONFIG_MEMTEST) += memtest.o
|
||||
|
||||
obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
|
||||
|
|
|
@ -67,8 +67,13 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
|||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
* directly.
|
||||
* address space. It transparently creates kernel huge I/O mapping when
|
||||
* the physical address is aligned by a huge page size (1GB or 2MB) and
|
||||
* the requested size is at least the huge page size.
|
||||
*
|
||||
* NOTE: MTRRs can override PAT memory types with a 4KB granularity.
|
||||
* Therefore, the mapping code falls back to use a smaller page toward 4KB
|
||||
* when a mapping range is covered by non-WB type of MTRRs.
|
||||
*
|
||||
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
|
@ -326,6 +331,20 @@ void iounmap(volatile void __iomem *addr)
|
|||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
int arch_ioremap_pud_supported(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
return cpu_has_gbpages;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int arch_ioremap_pmd_supported(void)
|
||||
{
|
||||
return cpu_has_pse;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
|
|
|
@ -65,24 +65,23 @@ static int mmap_is_legacy(void)
|
|||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
static unsigned long mmap_rnd(void)
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd = 0;
|
||||
unsigned long rnd;
|
||||
|
||||
/*
|
||||
* 8 bits of randomness in 32bit mmaps, 20 address space bits
|
||||
* 28 bits of randomness in 64bit mmaps, 40 address space bits
|
||||
*/
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
if (mmap_is_ia32())
|
||||
rnd = get_random_int() % (1<<8);
|
||||
else
|
||||
rnd = get_random_int() % (1<<28);
|
||||
}
|
||||
* 8 bits of randomness in 32bit mmaps, 20 address space bits
|
||||
* 28 bits of randomness in 64bit mmaps, 40 address space bits
|
||||
*/
|
||||
if (mmap_is_ia32())
|
||||
rnd = (unsigned long)get_random_int() % (1<<8);
|
||||
else
|
||||
rnd = (unsigned long)get_random_int() % (1<<28);
|
||||
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base(void)
|
||||
static unsigned long mmap_base(unsigned long rnd)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
|
@ -91,19 +90,19 @@ static unsigned long mmap_base(void)
|
|||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
|
||||
return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
|
||||
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
|
||||
* does, but not when emulating X86_32
|
||||
*/
|
||||
static unsigned long mmap_legacy_base(void)
|
||||
static unsigned long mmap_legacy_base(unsigned long rnd)
|
||||
{
|
||||
if (mmap_is_ia32())
|
||||
return TASK_UNMAPPED_BASE;
|
||||
else
|
||||
return TASK_UNMAPPED_BASE + mmap_rnd();
|
||||
return TASK_UNMAPPED_BASE + rnd;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -112,13 +111,18 @@ static unsigned long mmap_legacy_base(void)
|
|||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
mm->mmap_legacy_base = mmap_legacy_base();
|
||||
mm->mmap_base = mmap_base();
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
random_factor = arch_mmap_rnd();
|
||||
|
||||
mm->mmap_legacy_base = mmap_legacy_base(random_factor);
|
||||
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mm->mmap_legacy_base;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/mtrr.h>
|
||||
|
||||
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
|
||||
|
||||
|
@ -58,7 +59,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
|
|||
tlb_remove_page(tlb, pte);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
||||
{
|
||||
struct page *page = virt_to_page(pmd);
|
||||
|
@ -74,14 +75,14 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
|
|||
tlb_remove_page(tlb, page);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
|
||||
{
|
||||
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
|
||||
tlb_remove_page(tlb, virt_to_page(pud));
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
||||
|
||||
static inline void pgd_list_add(pgd_t *pgd)
|
||||
{
|
||||
|
@ -117,9 +118,9 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
|
|||
/* If the pgd points to a shared pagetable level (either the
|
||||
ptes in non-PAE, or shared PMD in PAE), then just copy the
|
||||
references from swapper_pg_dir. */
|
||||
if (PAGETABLE_LEVELS == 2 ||
|
||||
(PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
|
||||
PAGETABLE_LEVELS == 4) {
|
||||
if (CONFIG_PGTABLE_LEVELS == 2 ||
|
||||
(CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
|
||||
CONFIG_PGTABLE_LEVELS == 4) {
|
||||
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
@ -560,3 +561,67 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
|
|||
{
|
||||
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
||||
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
/*
|
||||
* Do not use a huge page when the range is covered by non-WB type
|
||||
* of MTRRs.
|
||||
*/
|
||||
mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
|
||||
if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
|
||||
return 0;
|
||||
|
||||
prot = pgprot_4k_2_large(prot);
|
||||
|
||||
set_pte((pte_t *)pud, pfn_pte(
|
||||
(u64)addr >> PAGE_SHIFT,
|
||||
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
/*
|
||||
* Do not use a huge page when the range is covered by non-WB type
|
||||
* of MTRRs.
|
||||
*/
|
||||
mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
|
||||
if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
|
||||
return 0;
|
||||
|
||||
prot = pgprot_4k_2_large(prot);
|
||||
|
||||
set_pte((pte_t *)pmd, pfn_pte(
|
||||
(u64)addr >> PAGE_SHIFT,
|
||||
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (pud_large(*pud)) {
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
{
|
||||
if (pmd_large(*pmd)) {
|
||||
pmd_clear(pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
|
|
|
@ -502,7 +502,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
|
|||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
__visible pudval_t xen_pud_val(pud_t pud)
|
||||
{
|
||||
return pte_mfn_to_pfn(pud.pud);
|
||||
|
@ -589,7 +589,7 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
|
|||
|
||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
|
||||
|
||||
/*
|
||||
* (Yet another) pagetable walker. This one is intended for pinning a
|
||||
|
@ -1628,7 +1628,7 @@ static void xen_release_pmd(unsigned long pfn)
|
|||
xen_release_ptpage(pfn, PT_PMD);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
xen_alloc_ptpage(mm, pfn, PT_PUD);
|
||||
|
@ -2046,7 +2046,7 @@ static void __init xen_post_allocator_init(void)
|
|||
pv_mmu_ops.set_pte = xen_set_pte;
|
||||
pv_mmu_ops.set_pmd = xen_set_pmd;
|
||||
pv_mmu_ops.set_pud = xen_set_pud;
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
pv_mmu_ops.set_pgd = xen_set_pgd;
|
||||
#endif
|
||||
|
||||
|
@ -2056,7 +2056,7 @@ static void __init xen_post_allocator_init(void)
|
|||
pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
|
||||
pv_mmu_ops.release_pte = xen_release_pte;
|
||||
pv_mmu_ops.release_pmd = xen_release_pmd;
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
pv_mmu_ops.alloc_pud = xen_alloc_pud;
|
||||
pv_mmu_ops.release_pud = xen_release_pud;
|
||||
#endif
|
||||
|
@ -2122,14 +2122,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
|
|||
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
|
||||
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
|
||||
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
|
||||
.set_pgd = xen_set_pgd_hyper,
|
||||
|
||||
.alloc_pud = xen_alloc_pmd_init,
|
||||
.release_pud = xen_release_pmd_init,
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
|
||||
|
||||
.activate_mm = xen_activate_mm,
|
||||
.dup_mmap = xen_dup_mmap,
|
||||
|
|
|
@ -219,6 +219,7 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
|
|||
/*
|
||||
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
|
||||
* OK to have direct references to sparsemem variables in here.
|
||||
* Must already be protected by mem_hotplug_begin().
|
||||
*/
|
||||
static int
|
||||
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
|
||||
|
@ -228,7 +229,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
|
|||
struct page *first_page;
|
||||
int ret;
|
||||
|
||||
start_pfn = phys_index << PFN_SECTION_SHIFT;
|
||||
start_pfn = section_nr_to_pfn(phys_index);
|
||||
first_page = pfn_to_page(start_pfn);
|
||||
|
||||
switch (action) {
|
||||
|
@ -286,6 +287,7 @@ static int memory_subsys_online(struct device *dev)
|
|||
if (mem->online_type < 0)
|
||||
mem->online_type = MMOP_ONLINE_KEEP;
|
||||
|
||||
/* Already under protection of mem_hotplug_begin() */
|
||||
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
|
||||
|
||||
/* clear online_type */
|
||||
|
@ -328,17 +330,19 @@ store_mem_state(struct device *dev,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory hotplug needs to hold mem_hotplug_begin() for probe to find
|
||||
* the correct memory block to online before doing device_online(dev),
|
||||
* which will take dev->mutex. Take the lock early to prevent an
|
||||
* inversion, memory_subsys_online() callbacks will be implemented by
|
||||
* assuming it's already protected.
|
||||
*/
|
||||
mem_hotplug_begin();
|
||||
|
||||
switch (online_type) {
|
||||
case MMOP_ONLINE_KERNEL:
|
||||
case MMOP_ONLINE_MOVABLE:
|
||||
case MMOP_ONLINE_KEEP:
|
||||
/*
|
||||
* mem->online_type is not protected so there can be a
|
||||
* race here. However, when racing online, the first
|
||||
* will succeed and the second will just return as the
|
||||
* block will already be online. The online type
|
||||
* could be either one, but that is expected.
|
||||
*/
|
||||
mem->online_type = online_type;
|
||||
ret = device_online(&mem->dev);
|
||||
break;
|
||||
|
@ -349,6 +353,7 @@ store_mem_state(struct device *dev,
|
|||
ret = -EINVAL; /* should never happen */
|
||||
}
|
||||
|
||||
mem_hotplug_done();
|
||||
err:
|
||||
unlock_device_hotplug();
|
||||
|
||||
|
|
|
@ -738,11 +738,11 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
|
|||
return ZFCP_ERP_FAILED;
|
||||
|
||||
if (mempool_resize(act->adapter->pool.sr_data,
|
||||
act->adapter->stat_read_buf_num, GFP_KERNEL))
|
||||
act->adapter->stat_read_buf_num))
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
||||
if (mempool_resize(act->adapter->pool.status_read_req,
|
||||
act->adapter->stat_read_buf_num, GFP_KERNEL))
|
||||
act->adapter->stat_read_buf_num))
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
||||
atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
|
||||
|
|
|
@ -55,7 +55,9 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
|
|||
if (PagePrivate(page))
|
||||
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
||||
|
||||
cancel_dirty_page(page, PAGE_SIZE);
|
||||
if (TestClearPageDirty(page))
|
||||
account_page_cleaned(page, mapping);
|
||||
|
||||
ClearPageMappedToDisk(page);
|
||||
ll_delete_from_page_cache(page);
|
||||
}
|
||||
|
|
|
@ -397,13 +397,15 @@ static int __init xen_tmem_init(void)
|
|||
#ifdef CONFIG_CLEANCACHE
|
||||
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
|
||||
if (tmem_enabled && cleancache) {
|
||||
char *s = "";
|
||||
struct cleancache_ops *old_ops =
|
||||
cleancache_register_ops(&tmem_cleancache_ops);
|
||||
if (old_ops)
|
||||
s = " (WARNING: cleancache_ops overridden)";
|
||||
pr_info("cleancache enabled, RAM provided by Xen Transcendent Memory%s\n",
|
||||
s);
|
||||
int err;
|
||||
|
||||
err = cleancache_register_ops(&tmem_cleancache_ops);
|
||||
if (err)
|
||||
pr_warn("xen-tmem: failed to enable cleancache: %d\n",
|
||||
err);
|
||||
else
|
||||
pr_info("cleancache enabled, RAM provided by "
|
||||
"Xen Transcendent Memory\n");
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_SELFBALLOONING
|
||||
|
|
|
@ -27,9 +27,6 @@ config COMPAT_BINFMT_ELF
|
|||
bool
|
||||
depends on COMPAT && BINFMT_ELF
|
||||
|
||||
config ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
bool
|
||||
|
||||
config ARCH_BINFMT_ELF_STATE
|
||||
bool
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/security.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/elf-randomize.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/coredump.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -862,6 +863,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
|
||||
int elf_prot = 0, elf_flags;
|
||||
unsigned long k, vaddr;
|
||||
unsigned long total_size = 0;
|
||||
|
||||
if (elf_ppnt->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
@ -909,25 +911,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
* default mmap base, as well as whatever program they
|
||||
* might try to exec. This is because the brk will
|
||||
* follow the loader, and is not movable. */
|
||||
#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
|
||||
/* Memory randomization might have been switched off
|
||||
* in runtime via sysctl or explicit setting of
|
||||
* personality flags.
|
||||
* If that is the case, retain the original non-zero
|
||||
* load_bias value in order to establish proper
|
||||
* non-randomized mappings.
|
||||
*/
|
||||
load_bias = ELF_ET_DYN_BASE - vaddr;
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
load_bias = 0;
|
||||
else
|
||||
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
|
||||
#else
|
||||
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
|
||||
#endif
|
||||
load_bias += arch_mmap_rnd();
|
||||
load_bias = ELF_PAGESTART(load_bias);
|
||||
total_size = total_mapping_size(elf_phdata,
|
||||
loc->elf_ex.e_phnum);
|
||||
if (!total_size) {
|
||||
error = -EINVAL;
|
||||
goto out_free_dentry;
|
||||
}
|
||||
}
|
||||
|
||||
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
|
||||
elf_prot, elf_flags, 0);
|
||||
elf_prot, elf_flags, total_size);
|
||||
if (BAD_ADDR(error)) {
|
||||
retval = IS_ERR((void *)error) ?
|
||||
PTR_ERR((void*)error) : -EINVAL;
|
||||
|
@ -1053,15 +1050,13 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
current->mm->end_data = end_data;
|
||||
current->mm->start_stack = bprm->p;
|
||||
|
||||
#ifdef arch_randomize_brk
|
||||
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
|
||||
current->mm->brk = current->mm->start_brk =
|
||||
arch_randomize_brk(current->mm);
|
||||
#ifdef CONFIG_COMPAT_BRK
|
||||
#ifdef compat_brk_randomized
|
||||
current->brk_randomized = 1;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
if (current->personality & MMAP_PAGE_ZERO) {
|
||||
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
|
||||
|
|
|
@ -3243,8 +3243,8 @@ int try_to_free_buffers(struct page *page)
|
|||
* to synchronise against __set_page_dirty_buffers and prevent the
|
||||
* dirty bit from being lost.
|
||||
*/
|
||||
if (ret)
|
||||
cancel_dirty_page(page, PAGE_CACHE_SIZE);
|
||||
if (ret && TestClearPageDirty(page))
|
||||
account_page_cleaned(page, mapping);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
out:
|
||||
if (buffers_to_free) {
|
||||
|
|
|
@ -773,8 +773,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
|
|||
|
||||
length = atomic_dec_return(&tcpSesAllocCount);
|
||||
if (length > 0)
|
||||
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
|
||||
GFP_KERNEL);
|
||||
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -848,8 +847,7 @@ cifs_demultiplex_thread(void *p)
|
|||
|
||||
length = atomic_inc_return(&tcpSesAllocCount);
|
||||
if (length > 1)
|
||||
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
|
||||
GFP_KERNEL);
|
||||
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
|
||||
|
||||
set_freezable();
|
||||
while (server->tcpStatus != CifsExiting) {
|
||||
|
|
|
@ -319,7 +319,7 @@ static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
|
|||
|
||||
static void truncate_huge_page(struct page *page)
|
||||
{
|
||||
cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
|
||||
ClearPageDirty(page);
|
||||
ClearPageUptodate(page);
|
||||
delete_from_page_cache(page);
|
||||
}
|
||||
|
|
|
@ -1876,11 +1876,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
|
|||
* request from the inode / page_private pointer and
|
||||
* release it */
|
||||
nfs_inode_remove_request(req);
|
||||
/*
|
||||
* In case nfs_inode_remove_request has marked the
|
||||
* page as being dirty
|
||||
*/
|
||||
cancel_dirty_page(page, PAGE_CACHE_SIZE);
|
||||
nfs_unlock_and_release_request(req);
|
||||
}
|
||||
|
||||
|
|
|
@ -3370,7 +3370,7 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
|
|||
ret = ocfs2_get_right_path(et, left_path, &right_path);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
right_el = path_leaf_el(right_path);
|
||||
|
@ -3453,8 +3453,7 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
|
|||
subtree_index);
|
||||
}
|
||||
out:
|
||||
if (right_path)
|
||||
ocfs2_free_path(right_path);
|
||||
ocfs2_free_path(right_path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3536,7 +3535,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
|
|||
ret = ocfs2_get_left_path(et, right_path, &left_path);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
left_el = path_leaf_el(left_path);
|
||||
|
@ -3647,8 +3646,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
|
|||
right_path, subtree_index);
|
||||
}
|
||||
out:
|
||||
if (left_path)
|
||||
ocfs2_free_path(left_path);
|
||||
ocfs2_free_path(left_path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4334,17 +4332,17 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
} else if (path->p_tree_depth > 0) {
|
||||
status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
|
||||
if (status)
|
||||
goto out;
|
||||
goto exit;
|
||||
|
||||
if (left_cpos != 0) {
|
||||
left_path = ocfs2_new_path_from_path(path);
|
||||
if (!left_path)
|
||||
goto out;
|
||||
goto exit;
|
||||
|
||||
status = ocfs2_find_path(et->et_ci, left_path,
|
||||
left_cpos);
|
||||
if (status)
|
||||
goto out;
|
||||
goto free_left_path;
|
||||
|
||||
new_el = path_leaf_el(left_path);
|
||||
|
||||
|
@ -4361,7 +4359,7 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
le16_to_cpu(new_el->l_next_free_rec),
|
||||
le16_to_cpu(new_el->l_count));
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
goto free_left_path;
|
||||
}
|
||||
rec = &new_el->l_recs[
|
||||
le16_to_cpu(new_el->l_next_free_rec) - 1];
|
||||
|
@ -4388,18 +4386,18 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
path->p_tree_depth > 0) {
|
||||
status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
|
||||
if (status)
|
||||
goto out;
|
||||
goto free_left_path;
|
||||
|
||||
if (right_cpos == 0)
|
||||
goto out;
|
||||
goto free_left_path;
|
||||
|
||||
right_path = ocfs2_new_path_from_path(path);
|
||||
if (!right_path)
|
||||
goto out;
|
||||
goto free_left_path;
|
||||
|
||||
status = ocfs2_find_path(et->et_ci, right_path, right_cpos);
|
||||
if (status)
|
||||
goto out;
|
||||
goto free_right_path;
|
||||
|
||||
new_el = path_leaf_el(right_path);
|
||||
rec = &new_el->l_recs[0];
|
||||
|
@ -4413,7 +4411,7 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
(unsigned long long)le64_to_cpu(eb->h_blkno),
|
||||
le16_to_cpu(new_el->l_next_free_rec));
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
goto free_right_path;
|
||||
}
|
||||
rec = &new_el->l_recs[1];
|
||||
}
|
||||
|
@ -4430,12 +4428,11 @@ ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
|
|||
ret = contig_type;
|
||||
}
|
||||
|
||||
out:
|
||||
if (left_path)
|
||||
ocfs2_free_path(left_path);
|
||||
if (right_path)
|
||||
ocfs2_free_path(right_path);
|
||||
|
||||
free_right_path:
|
||||
ocfs2_free_path(right_path);
|
||||
free_left_path:
|
||||
ocfs2_free_path(left_path);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -6858,13 +6855,13 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||
if (pages == NULL) {
|
||||
ret = -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
goto free_pages;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6996,9 +6993,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||
out:
|
||||
if (data_ac)
|
||||
ocfs2_free_alloc_context(data_ac);
|
||||
if (pages)
|
||||
kfree(pages);
|
||||
|
||||
free_pages:
|
||||
kfree(pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
155
fs/ocfs2/aops.c
155
fs/ocfs2/aops.c
|
@ -664,6 +664,117 @@ static int ocfs2_is_overwrite(struct ocfs2_super *osb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
|
||||
struct inode *inode, loff_t offset,
|
||||
u64 zero_len, int cluster_align)
|
||||
{
|
||||
u32 p_cpos = 0;
|
||||
u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
|
||||
unsigned int num_clusters = 0;
|
||||
unsigned int ext_flags = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (offset <= i_size_read(inode) || cluster_align)
|
||||
return 0;
|
||||
|
||||
ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
|
||||
&ext_flags);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
|
||||
u64 s = i_size_read(inode);
|
||||
sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
|
||||
(do_div(s, osb->s_clustersize) >> 9);
|
||||
|
||||
ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
|
||||
zero_len >> 9, GFP_NOFS, false);
|
||||
if (ret < 0)
|
||||
mlog_errno(ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb,
|
||||
struct inode *inode, loff_t offset)
|
||||
{
|
||||
u64 zero_start, zero_len, total_zero_len;
|
||||
u32 p_cpos = 0, clusters_to_add;
|
||||
u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
|
||||
unsigned int num_clusters = 0;
|
||||
unsigned int ext_flags = 0;
|
||||
u32 size_div, offset_div;
|
||||
int ret = 0;
|
||||
|
||||
{
|
||||
u64 o = offset;
|
||||
u64 s = i_size_read(inode);
|
||||
|
||||
offset_div = do_div(o, osb->s_clustersize);
|
||||
size_div = do_div(s, osb->s_clustersize);
|
||||
}
|
||||
|
||||
if (offset <= i_size_read(inode))
|
||||
return 0;
|
||||
|
||||
clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) -
|
||||
ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode));
|
||||
total_zero_len = offset - i_size_read(inode);
|
||||
if (clusters_to_add)
|
||||
total_zero_len -= offset_div;
|
||||
|
||||
/* Allocate clusters to fill out holes, and this is only needed
|
||||
* when we add more than one clusters. Otherwise the cluster will
|
||||
* be allocated during direct IO */
|
||||
if (clusters_to_add > 1) {
|
||||
ret = ocfs2_extend_allocation(inode,
|
||||
OCFS2_I(inode)->ip_clusters,
|
||||
clusters_to_add - 1, 0);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
while (total_zero_len) {
|
||||
ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
|
||||
&ext_flags);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) +
|
||||
size_div;
|
||||
zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) -
|
||||
size_div;
|
||||
zero_len = min(total_zero_len, zero_len);
|
||||
|
||||
if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
|
||||
ret = blkdev_issue_zeroout(osb->sb->s_bdev,
|
||||
zero_start >> 9, zero_len >> 9,
|
||||
GFP_NOFS, false);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
total_zero_len -= zero_len;
|
||||
v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div);
|
||||
|
||||
/* Only at first iteration can be cluster not aligned.
|
||||
* So set size_div to 0 for the rest */
|
||||
size_div = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
||||
struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
|
@ -678,8 +789,8 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
|||
struct buffer_head *di_bh = NULL;
|
||||
size_t count = iter->count;
|
||||
journal_t *journal = osb->journal->j_journal;
|
||||
u32 zero_len;
|
||||
int cluster_align;
|
||||
u64 zero_len_head, zero_len_tail;
|
||||
int cluster_align_head, cluster_align_tail;
|
||||
loff_t final_size = offset + count;
|
||||
int append_write = offset >= i_size_read(inode) ? 1 : 0;
|
||||
unsigned int num_clusters = 0;
|
||||
|
@ -687,9 +798,16 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
|||
|
||||
{
|
||||
u64 o = offset;
|
||||
u64 s = i_size_read(inode);
|
||||
|
||||
zero_len = do_div(o, 1 << osb->s_clustersize_bits);
|
||||
cluster_align = !zero_len;
|
||||
zero_len_head = do_div(o, 1 << osb->s_clustersize_bits);
|
||||
cluster_align_head = !zero_len_head;
|
||||
|
||||
zero_len_tail = osb->s_clustersize -
|
||||
do_div(s, osb->s_clustersize);
|
||||
if ((offset - i_size_read(inode)) < zero_len_tail)
|
||||
zero_len_tail = offset - i_size_read(inode);
|
||||
cluster_align_tail = !zero_len_tail;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -707,21 +825,23 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
|||
}
|
||||
|
||||
if (append_write) {
|
||||
ret = ocfs2_inode_lock(inode, &di_bh, 1);
|
||||
ret = ocfs2_inode_lock(inode, NULL, 1);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto clean_orphan;
|
||||
}
|
||||
|
||||
/* zeroing out the previously allocated cluster tail
|
||||
* that but not zeroed */
|
||||
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
|
||||
ret = ocfs2_zero_extend(inode, di_bh, offset);
|
||||
ret = ocfs2_direct_IO_zero_extend(osb, inode, offset,
|
||||
zero_len_tail, cluster_align_tail);
|
||||
else
|
||||
ret = ocfs2_extend_no_holes(inode, di_bh, offset,
|
||||
ret = ocfs2_direct_IO_extend_no_holes(osb, inode,
|
||||
offset);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
brelse(di_bh);
|
||||
goto clean_orphan;
|
||||
}
|
||||
|
||||
|
@ -729,13 +849,10 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
|||
if (is_overwrite < 0) {
|
||||
mlog_errno(is_overwrite);
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
brelse(di_bh);
|
||||
goto clean_orphan;
|
||||
}
|
||||
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
brelse(di_bh);
|
||||
di_bh = NULL;
|
||||
}
|
||||
|
||||
written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
|
||||
|
@ -772,15 +889,23 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
|||
if (ret < 0)
|
||||
mlog_errno(ret);
|
||||
}
|
||||
} else if (written < 0 && append_write && !is_overwrite &&
|
||||
!cluster_align) {
|
||||
} else if (written > 0 && append_write && !is_overwrite &&
|
||||
!cluster_align_head) {
|
||||
/* zeroing out the allocated cluster head */
|
||||
u32 p_cpos = 0;
|
||||
u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
|
||||
|
||||
ret = ocfs2_inode_lock(inode, NULL, 0);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto clean_orphan;
|
||||
}
|
||||
|
||||
ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
|
||||
&num_clusters, &ext_flags);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
ocfs2_inode_unlock(inode, 0);
|
||||
goto clean_orphan;
|
||||
}
|
||||
|
||||
|
@ -788,9 +913,11 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
|
|||
|
||||
ret = blkdev_issue_zeroout(osb->sb->s_bdev,
|
||||
p_cpos << (osb->s_clustersize_bits - 9),
|
||||
zero_len >> 9, GFP_KERNEL, false);
|
||||
zero_len_head >> 9, GFP_NOFS, false);
|
||||
if (ret < 0)
|
||||
mlog_errno(ret);
|
||||
|
||||
ocfs2_inode_unlock(inode, 0);
|
||||
}
|
||||
|
||||
clean_orphan:
|
||||
|
|
|
@ -1312,7 +1312,9 @@ static int o2hb_debug_init(void)
|
|||
int ret = -ENOMEM;
|
||||
|
||||
o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL);
|
||||
if (!o2hb_debug_dir) {
|
||||
if (IS_ERR_OR_NULL(o2hb_debug_dir)) {
|
||||
ret = o2hb_debug_dir ?
|
||||
PTR_ERR(o2hb_debug_dir) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -1325,7 +1327,9 @@ static int o2hb_debug_init(void)
|
|||
sizeof(o2hb_live_node_bitmap),
|
||||
O2NM_MAX_NODES,
|
||||
o2hb_live_node_bitmap);
|
||||
if (!o2hb_debug_livenodes) {
|
||||
if (IS_ERR_OR_NULL(o2hb_debug_livenodes)) {
|
||||
ret = o2hb_debug_livenodes ?
|
||||
PTR_ERR(o2hb_debug_livenodes) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -1338,7 +1342,9 @@ static int o2hb_debug_init(void)
|
|||
sizeof(o2hb_live_region_bitmap),
|
||||
O2NM_MAX_REGIONS,
|
||||
o2hb_live_region_bitmap);
|
||||
if (!o2hb_debug_liveregions) {
|
||||
if (IS_ERR_OR_NULL(o2hb_debug_liveregions)) {
|
||||
ret = o2hb_debug_liveregions ?
|
||||
PTR_ERR(o2hb_debug_liveregions) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -1352,7 +1358,9 @@ static int o2hb_debug_init(void)
|
|||
sizeof(o2hb_quorum_region_bitmap),
|
||||
O2NM_MAX_REGIONS,
|
||||
o2hb_quorum_region_bitmap);
|
||||
if (!o2hb_debug_quorumregions) {
|
||||
if (IS_ERR_OR_NULL(o2hb_debug_quorumregions)) {
|
||||
ret = o2hb_debug_quorumregions ?
|
||||
PTR_ERR(o2hb_debug_quorumregions) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -1366,7 +1374,9 @@ static int o2hb_debug_init(void)
|
|||
sizeof(o2hb_failed_region_bitmap),
|
||||
O2NM_MAX_REGIONS,
|
||||
o2hb_failed_region_bitmap);
|
||||
if (!o2hb_debug_failedregions) {
|
||||
if (IS_ERR_OR_NULL(o2hb_debug_failedregions)) {
|
||||
ret = o2hb_debug_failedregions ?
|
||||
PTR_ERR(o2hb_debug_failedregions) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2000,7 +2010,8 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
|
|||
|
||||
reg->hr_debug_dir =
|
||||
debugfs_create_dir(config_item_name(®->hr_item), dir);
|
||||
if (!reg->hr_debug_dir) {
|
||||
if (IS_ERR_OR_NULL(reg->hr_debug_dir)) {
|
||||
ret = reg->hr_debug_dir ? PTR_ERR(reg->hr_debug_dir) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2013,7 +2024,9 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
|
|||
O2HB_DB_TYPE_REGION_LIVENODES,
|
||||
sizeof(reg->hr_live_node_bitmap),
|
||||
O2NM_MAX_NODES, reg);
|
||||
if (!reg->hr_debug_livenodes) {
|
||||
if (IS_ERR_OR_NULL(reg->hr_debug_livenodes)) {
|
||||
ret = reg->hr_debug_livenodes ?
|
||||
PTR_ERR(reg->hr_debug_livenodes) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2025,7 +2038,9 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
|
|||
sizeof(*(reg->hr_db_regnum)),
|
||||
O2HB_DB_TYPE_REGION_NUMBER,
|
||||
0, O2NM_MAX_NODES, reg);
|
||||
if (!reg->hr_debug_regnum) {
|
||||
if (IS_ERR_OR_NULL(reg->hr_debug_regnum)) {
|
||||
ret = reg->hr_debug_regnum ?
|
||||
PTR_ERR(reg->hr_debug_regnum) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2037,7 +2052,9 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
|
|||
sizeof(*(reg->hr_db_elapsed_time)),
|
||||
O2HB_DB_TYPE_REGION_ELAPSED_TIME,
|
||||
0, 0, reg);
|
||||
if (!reg->hr_debug_elapsed_time) {
|
||||
if (IS_ERR_OR_NULL(reg->hr_debug_elapsed_time)) {
|
||||
ret = reg->hr_debug_elapsed_time ?
|
||||
PTR_ERR(reg->hr_debug_elapsed_time) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -2049,13 +2066,16 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
|
|||
sizeof(*(reg->hr_db_pinned)),
|
||||
O2HB_DB_TYPE_REGION_PINNED,
|
||||
0, 0, reg);
|
||||
if (!reg->hr_debug_pinned) {
|
||||
if (IS_ERR_OR_NULL(reg->hr_debug_pinned)) {
|
||||
ret = reg->hr_debug_pinned ?
|
||||
PTR_ERR(reg->hr_debug_pinned) : -ENOMEM;
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
return 0;
|
||||
bail:
|
||||
debugfs_remove_recursive(reg->hr_debug_dir);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -196,13 +196,14 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define mlog_errno(st) do { \
|
||||
#define mlog_errno(st) ({ \
|
||||
int _st = (st); \
|
||||
if (_st != -ERESTARTSYS && _st != -EINTR && \
|
||||
_st != AOP_TRUNCATED_PAGE && _st != -ENOSPC && \
|
||||
_st != -EDQUOT) \
|
||||
mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
|
||||
} while (0)
|
||||
_st; \
|
||||
})
|
||||
|
||||
#define mlog_bug_on_msg(cond, fmt, args...) do { \
|
||||
if (cond) { \
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*
|
||||
* linux/fs/minix/dir.c
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linux Torvalds
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
|
@ -2047,22 +2047,19 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
|
|||
const char *name,
|
||||
int namelen)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct ocfs2_dir_lookup_result lookup = { NULL, };
|
||||
|
||||
trace_ocfs2_check_dir_for_entry(
|
||||
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
|
||||
|
||||
ret = -EEXIST;
|
||||
if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0)
|
||||
goto bail;
|
||||
if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
|
||||
ret = -EEXIST;
|
||||
mlog_errno(ret);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
bail:
|
||||
ocfs2_free_dir_lookup_result(&lookup);
|
||||
|
||||
if (ret)
|
||||
mlog_errno(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1391,6 +1391,11 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
|
|||
int noqueue_attempted = 0;
|
||||
int dlm_locked = 0;
|
||||
|
||||
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
|
||||
mlog_errno(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ocfs2_init_mask_waiter(&mw);
|
||||
|
||||
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
|
||||
|
@ -2954,7 +2959,7 @@ static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
|
|||
osb->osb_debug_root,
|
||||
osb,
|
||||
&ocfs2_dlm_debug_fops);
|
||||
if (!dlm_debug->d_locking_state) {
|
||||
if (IS_ERR_OR_NULL(dlm_debug->d_locking_state)) {
|
||||
ret = -EINVAL;
|
||||
mlog(ML_ERROR,
|
||||
"Unable to create locking state debugfs file.\n");
|
||||
|
|
|
@ -82,7 +82,6 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
|
|||
}
|
||||
|
||||
status = ocfs2_test_inode_bit(osb, blkno, &set);
|
||||
trace_ocfs2_get_dentry_test_bit(status, set);
|
||||
if (status < 0) {
|
||||
if (status == -EINVAL) {
|
||||
/*
|
||||
|
@ -96,6 +95,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
|
|||
goto unlock_nfs_sync;
|
||||
}
|
||||
|
||||
trace_ocfs2_get_dentry_test_bit(status, set);
|
||||
/* If the inode allocator bit is clear, this inode must be stale */
|
||||
if (!set) {
|
||||
status = -ESTALE;
|
||||
|
|
|
@ -624,7 +624,7 @@ static int ocfs2_remove_inode(struct inode *inode,
|
|||
ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
|
||||
le16_to_cpu(di->i_suballoc_slot));
|
||||
if (!inode_alloc_inode) {
|
||||
status = -EEXIST;
|
||||
status = -ENOENT;
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
}
|
||||
|
@ -742,7 +742,7 @@ static int ocfs2_wipe_inode(struct inode *inode,
|
|||
ORPHAN_DIR_SYSTEM_INODE,
|
||||
orphaned_slot);
|
||||
if (!orphan_dir_inode) {
|
||||
status = -EEXIST;
|
||||
status = -ENOENT;
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
}
|
||||
|
|
|
@ -666,7 +666,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
|
|||
if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
|
||||
ocfs2_local_alloc_count_bits(alloc)) {
|
||||
ocfs2_error(osb->sb, "local alloc inode %llu says it has "
|
||||
"%u free bits, but a count shows %u",
|
||||
"%u used bits, but a count shows %u",
|
||||
(unsigned long long)le64_to_cpu(alloc->i_blkno),
|
||||
le32_to_cpu(alloc->id1.bitmap1.i_used),
|
||||
ocfs2_local_alloc_count_bits(alloc));
|
||||
|
@ -839,7 +839,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
|
|||
u32 *numbits,
|
||||
struct ocfs2_alloc_reservation *resv)
|
||||
{
|
||||
int numfound, bitoff, left, startoff, lastzero;
|
||||
int numfound = 0, bitoff, left, startoff, lastzero;
|
||||
int local_resv = 0;
|
||||
struct ocfs2_alloc_reservation r;
|
||||
void *bitmap = NULL;
|
||||
|
|
|
@ -2322,10 +2322,10 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
|
|||
|
||||
trace_ocfs2_orphan_del(
|
||||
(unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
|
||||
name, namelen);
|
||||
name, strlen(name));
|
||||
|
||||
/* find it's spot in the orphan directory */
|
||||
status = ocfs2_find_entry(name, namelen, orphan_dir_inode,
|
||||
status = ocfs2_find_entry(name, strlen(name), orphan_dir_inode,
|
||||
&lookup);
|
||||
if (status) {
|
||||
mlog_errno(status);
|
||||
|
@ -2808,7 +2808,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
|
|||
ORPHAN_DIR_SYSTEM_INODE,
|
||||
osb->slot_num);
|
||||
if (!orphan_dir_inode) {
|
||||
status = -EEXIST;
|
||||
status = -ENOENT;
|
||||
mlog_errno(status);
|
||||
goto leave;
|
||||
}
|
||||
|
|
|
@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
|
|||
error = posix_acl_create(dir, &mode, &default_acl, &acl);
|
||||
if (error) {
|
||||
mlog_errno(error);
|
||||
goto out;
|
||||
return error;
|
||||
}
|
||||
|
||||
error = ocfs2_create_inode_in_orphan(dir, mode,
|
||||
|
|
|
@ -427,7 +427,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
|
|||
if (!si) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
return status;
|
||||
}
|
||||
|
||||
si->si_extended = ocfs2_uses_extended_slot_map(osb);
|
||||
|
@ -452,7 +452,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
|
|||
|
||||
osb->slot_info = (struct ocfs2_slot_info *)si;
|
||||
bail:
|
||||
if (status < 0 && si)
|
||||
if (status < 0)
|
||||
__ocfs2_free_slot_info(si);
|
||||
|
||||
return status;
|
||||
|
|
|
@ -295,7 +295,7 @@ static int o2cb_cluster_check(void)
|
|||
set_bit(node_num, netmap);
|
||||
if (!memcmp(hbmap, netmap, sizeof(hbmap)))
|
||||
return 0;
|
||||
if (i < O2CB_MAP_STABILIZE_COUNT)
|
||||
if (i < O2CB_MAP_STABILIZE_COUNT - 1)
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
|
|
|
@ -1004,10 +1004,8 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
|
|||
BUG_ON(conn == NULL);
|
||||
|
||||
lc = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL);
|
||||
if (!lc) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!lc)
|
||||
return -ENOMEM;
|
||||
|
||||
init_waitqueue_head(&lc->oc_wait);
|
||||
init_completion(&lc->oc_sync_wait);
|
||||
|
@ -1063,7 +1061,7 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
|
|||
}
|
||||
|
||||
out:
|
||||
if (rc && lc)
|
||||
if (rc)
|
||||
kfree(lc);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -2499,6 +2499,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
|
|||
alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
|
||||
start_bit, count);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
|
|
@ -1112,7 +1112,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
|
||||
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
|
||||
ocfs2_debugfs_root);
|
||||
if (!osb->osb_debug_root) {
|
||||
if (IS_ERR_OR_NULL(osb->osb_debug_root)) {
|
||||
status = -EINVAL;
|
||||
mlog(ML_ERROR, "Unable to create per-mount debugfs root.\n");
|
||||
goto read_super_error;
|
||||
|
@ -1122,7 +1122,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
osb->osb_debug_root,
|
||||
osb,
|
||||
&ocfs2_osb_debug_fops);
|
||||
if (!osb->osb_ctxt) {
|
||||
if (IS_ERR_OR_NULL(osb->osb_ctxt)) {
|
||||
status = -EINVAL;
|
||||
mlog_errno(status);
|
||||
goto read_super_error;
|
||||
|
@ -1606,8 +1606,9 @@ static int __init ocfs2_init(void)
|
|||
}
|
||||
|
||||
ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL);
|
||||
if (!ocfs2_debugfs_root) {
|
||||
status = -ENOMEM;
|
||||
if (IS_ERR_OR_NULL(ocfs2_debugfs_root)) {
|
||||
status = ocfs2_debugfs_root ?
|
||||
PTR_ERR(ocfs2_debugfs_root) : -ENOMEM;
|
||||
mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n");
|
||||
goto out4;
|
||||
}
|
||||
|
@ -2069,6 +2070,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|||
cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
|
||||
bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
|
||||
sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
|
||||
memcpy(sb->s_uuid, di->id2.i_super.s_uuid,
|
||||
sizeof(di->id2.i_super.s_uuid));
|
||||
|
||||
osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;
|
||||
|
||||
|
@ -2333,7 +2336,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|||
mlog_errno(status);
|
||||
goto bail;
|
||||
}
|
||||
cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
|
||||
cleancache_init_shared_fs(sb);
|
||||
|
||||
bail:
|
||||
return status;
|
||||
|
@ -2563,22 +2566,22 @@ static void ocfs2_handle_error(struct super_block *sb)
|
|||
ocfs2_set_ro_flag(osb, 0);
|
||||
}
|
||||
|
||||
static char error_buf[1024];
|
||||
|
||||
void __ocfs2_error(struct super_block *sb,
|
||||
const char *function,
|
||||
const char *fmt, ...)
|
||||
void __ocfs2_error(struct super_block *sb, const char *function,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(error_buf, sizeof(error_buf), fmt, args);
|
||||
va_end(args);
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
/* Not using mlog here because we want to show the actual
|
||||
* function the error came from. */
|
||||
printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %s\n",
|
||||
sb->s_id, function, error_buf);
|
||||
printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV\n",
|
||||
sb->s_id, function, &vaf);
|
||||
|
||||
va_end(args);
|
||||
|
||||
ocfs2_handle_error(sb);
|
||||
}
|
||||
|
@ -2586,18 +2589,21 @@ void __ocfs2_error(struct super_block *sb,
|
|||
/* Handle critical errors. This is intentionally more drastic than
|
||||
* ocfs2_handle_error, so we only use for things like journal errors,
|
||||
* etc. */
|
||||
void __ocfs2_abort(struct super_block* sb,
|
||||
const char *function,
|
||||
void __ocfs2_abort(struct super_block *sb, const char *function,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(error_buf, sizeof(error_buf), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
|
||||
sb->s_id, function, error_buf);
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV\n",
|
||||
sb->s_id, function, &vaf);
|
||||
|
||||
va_end(args);
|
||||
|
||||
/* We don't have the cluster support yet to go straight to
|
||||
* hard readonly in here. Until then, we want to keep
|
||||
|
|
|
@ -1238,6 +1238,10 @@ static int ocfs2_xattr_block_get(struct inode *inode,
|
|||
i,
|
||||
&block_off,
|
||||
&name_offset);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto cleanup;
|
||||
}
|
||||
xs->base = bucket_block(xs->bucket, block_off);
|
||||
}
|
||||
if (ocfs2_xattr_is_local(xs->here)) {
|
||||
|
@ -5665,6 +5669,10 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
|
|||
|
||||
ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
|
||||
i, &xv, NULL);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
|
||||
args->ref_ci,
|
||||
|
|
|
@ -224,7 +224,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
|
|||
s->s_maxbytes = MAX_NON_LFS;
|
||||
s->s_op = &default_op;
|
||||
s->s_time_gran = 1000000000;
|
||||
s->cleancache_poolid = -1;
|
||||
s->cleancache_poolid = CLEANCACHE_NO_POOL;
|
||||
|
||||
s->s_shrink.seeks = DEFAULT_SEEKS;
|
||||
s->s_shrink.scan_objects = super_cache_scan;
|
||||
|
|
|
@ -6,6 +6,12 @@
|
|||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
|
||||
CONFIG_PGTABLE_LEVELS
|
||||
#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On almost all architectures and configurations, 0 can be used as the
|
||||
|
@ -691,6 +697,30 @@ static inline int pmd_protnone(pmd_t pmd)
|
|||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
||||
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
|
||||
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
|
||||
int pud_clear_huge(pud_t *pud);
|
||||
int pmd_clear_huge(pmd_t *pmd);
|
||||
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int pmd_clear_huge(pmd_t *pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
|
|
|
@ -5,6 +5,10 @@
|
|||
#include <linux/exportfs.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#define CLEANCACHE_NO_POOL -1
|
||||
#define CLEANCACHE_NO_BACKEND -2
|
||||
#define CLEANCACHE_NO_BACKEND_SHARED -3
|
||||
|
||||
#define CLEANCACHE_KEY_MAX 6
|
||||
|
||||
/*
|
||||
|
@ -33,10 +37,9 @@ struct cleancache_ops {
|
|||
void (*invalidate_fs)(int);
|
||||
};
|
||||
|
||||
extern struct cleancache_ops *
|
||||
cleancache_register_ops(struct cleancache_ops *ops);
|
||||
extern int cleancache_register_ops(struct cleancache_ops *ops);
|
||||
extern void __cleancache_init_fs(struct super_block *);
|
||||
extern void __cleancache_init_shared_fs(char *, struct super_block *);
|
||||
extern void __cleancache_init_shared_fs(struct super_block *);
|
||||
extern int __cleancache_get_page(struct page *);
|
||||
extern void __cleancache_put_page(struct page *);
|
||||
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
|
||||
|
@ -78,10 +81,10 @@ static inline void cleancache_init_fs(struct super_block *sb)
|
|||
__cleancache_init_fs(sb);
|
||||
}
|
||||
|
||||
static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
|
||||
static inline void cleancache_init_shared_fs(struct super_block *sb)
|
||||
{
|
||||
if (cleancache_enabled)
|
||||
__cleancache_init_shared_fs(uuid, sb);
|
||||
__cleancache_init_shared_fs(sb);
|
||||
}
|
||||
|
||||
static inline int cleancache_get_page(struct page *page)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue