Merge 4.7-rc6 into usb-next

We want the USB fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2016-07-04 08:19:21 -07:00
commit c318a821b9
504 changed files with 4323 additions and 2572 deletions

View File

@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org> Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com> Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu> Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Archit Taneja <archit@ti.com> Archit Taneja <archit@ti.com>
Arnaud Patard <arnaud.patard@rtp-net.org> Arnaud Patard <arnaud.patard@rtp-net.org>
@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
Ben Gardner <bgardner@wabtec.com> Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com> Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de> Björn Steinbrink <B.Steinbrink@gmx.de>
Boris Brezillon <boris.brezillon@free-electrons.com>
Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
Brian Avery <b.avery@hp.com> Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com> Brian King <brking@us.ibm.com>
Christoph Hellwig <hch@lst.de> Christoph Hellwig <hch@lst.de>

View File

@ -263,19 +263,23 @@ scmd->allowed.
3. scmd recovered 3. scmd recovered
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
- shost->host_failed--
- clear scmd->eh_eflags - clear scmd->eh_eflags
- scsi_setup_cmd_retry() - scsi_setup_cmd_retry()
- move from local eh_work_q to local eh_done_q - move from local eh_work_q to local eh_done_q
LOCKING: none LOCKING: none
CONCURRENCY: at most one thread per separate eh_work_q to
keep queue manipulation lockless
4. EH completes 4. EH completes
ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
layer of failure. layer of failure. May be called concurrently but must have
a no more than one thread per separate eh_work_q to
manipulate the queue locklessly
- scmd is removed from eh_done_q and scmd->eh_entry is cleared - scmd is removed from eh_done_q and scmd->eh_entry is cleared
- if retry is necessary, scmd is requeued using - if retry is necessary, scmd is requeued using
scsi_queue_insert() scsi_queue_insert()
- otherwise, scsi_finish_command() is invoked for scmd - otherwise, scsi_finish_command() is invoked for scmd
- zero shost->host_failed
LOCKING: queue or finish function performs appropriate locking LOCKING: queue or finish function performs appropriate locking

View File

@ -595,6 +595,10 @@ S: Odd Fixes
L: linux-alpha@vger.kernel.org L: linux-alpha@vger.kernel.org
F: arch/alpha/ F: arch/alpha/
ALPS PS/2 TOUCHPAD DRIVER
R: Pali Rohár <pali.rohar@gmail.com>
F: drivers/input/mouse/alps.*
ALTERA MAILBOX DRIVER ALTERA MAILBOX DRIVER
M: Ley Foon Tan <lftan@altera.com> M: Ley Foon Tan <lftan@altera.com>
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
@ -2776,9 +2780,9 @@ F: include/net/caif/
F: net/caif/ F: net/caif/
CALGARY x86-64 IOMMU CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <muli@il.ibm.com> M: Muli Ben-Yehuda <mulix@mulix.org>
M: "Jon D. Mason" <jdmason@kudzu.us> M: Jon Mason <jdmason@kudzu.us>
L: discuss@x86-64.org L: iommu@lists.linux-foundation.org
S: Maintained S: Maintained
F: arch/x86/kernel/pci-calgary_64.c F: arch/x86/kernel/pci-calgary_64.c
F: arch/x86/kernel/tce_64.c F: arch/x86/kernel/tce_64.c
@ -7420,7 +7424,7 @@ F: drivers/scsi/megaraid.*
F: drivers/scsi/megaraid/ F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en) MELLANOX ETHERNET DRIVER (mlx4_en)
M: Eugenia Emantayev <eugenia@mellanox.com> M: Tariq Toukan <tariqt@mellanox.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
W: http://www.mellanox.com W: http://www.mellanox.com
@ -8959,6 +8963,7 @@ L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/pinctrl/ F: Documentation/devicetree/bindings/pinctrl/
F: Documentation/pinctrl.txt
F: drivers/pinctrl/ F: drivers/pinctrl/
F: include/linux/pinctrl/ F: include/linux/pinctrl/

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 7 PATCHLEVEL = 7
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc6
NAME = Psychotic Stoned Sheep NAME = Psychotic Stoned Sheep
# *DOCUMENTATION* # *DOCUMENTATION*
@ -363,11 +363,13 @@ CHECK = sparse
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF) -Wbitwise -Wno-return-void $(CF)
NOSTDINC_FLAGS =
CFLAGS_MODULE = CFLAGS_MODULE =
AFLAGS_MODULE = AFLAGS_MODULE =
LDFLAGS_MODULE = LDFLAGS_MODULE =
CFLAGS_KERNEL = CFLAGS_KERNEL =
AFLAGS_KERNEL = AFLAGS_KERNEL =
LDFLAGS_vmlinux =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV = -fsanitize-coverage=trace-pc CFLAGS_KCOV = -fsanitize-coverage=trace-pc

View File

@ -226,8 +226,8 @@ config ARCH_INIT_TASK
config ARCH_TASK_STRUCT_ALLOCATOR config ARCH_TASK_STRUCT_ALLOCATOR
bool bool
# Select if arch has its private alloc_thread_info() function # Select if arch has its private alloc_thread_stack() function
config ARCH_THREAD_INFO_ALLOCATOR config ARCH_THREAD_STACK_ALLOCATOR
bool bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size: # Select if arch wants to size task_struct dynamically via arch_task_struct_size:

View File

@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pmd_t * static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return ret; return ret;
} }
@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte; return pte;
} }

View File

@ -66,8 +66,6 @@ endif
endif endif
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y) ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2

View File

@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte()); __get_order_pte());
return pte; return pte;
@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_pg; pgtable_t pte_pg;
struct page *page; struct page *page;
pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg) if (!pte_pg)
return 0; return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));

View File

@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
* prelogue is setup (callee regs saved and then fp set and not other * prelogue is setup (callee regs saved and then fp set and not other
* way around * way around
*/ */
pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0; return 0;
#endif #endif

View File

@ -29,7 +29,7 @@
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); return (pmd_t *)get_zeroed_page(GFP_KERNEL);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)

View File

@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_terminate(vcpu); kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu); kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu); kvm_pmu_vcpu_destroy(vcpu);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu);
} }

View File

@ -95,7 +95,7 @@ boot := arch/arm64/boot
Image: vmlinux Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
Image.%: vmlinux Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install: zinstall install:

View File

@ -26,7 +26,7 @@
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2

View File

@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
cpu_park_loop(); cpu_park_loop();
} }
/*
* If a secondary CPU enters the kernel but fails to come online,
* (e.g. due to mismatched features), and cannot exit the kernel,
* we increment cpus_stuck_in_kernel and leave the CPU in a
* quiesecent loop within the kernel text. The memory containing
* this loop must not be re-used for anything else as the 'stuck'
* core is executing it.
*
* This function is used to inhibit features like kexec and hibernate.
*/
bool cpus_are_stuck_in_kernel(void);
#endif /* ifndef __ASSEMBLY__ */ #endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */ #endif /* ifndef __ASM_SMP_H */

View File

@ -33,6 +33,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/smp.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/virt.h> #include <asm/virt.h>
@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
unsigned long flags; unsigned long flags;
struct sleep_stack_data state; struct sleep_stack_data state;
if (cpus_are_stuck_in_kernel()) {
pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
return -EBUSY;
}
local_dbg_save(flags); local_dbg_save(flags);
if (__cpu_suspend_enter(&state)) { if (__cpu_suspend_enter(&state)) {

View File

@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
{ {
return -EINVAL; return -EINVAL;
} }
static bool have_cpu_die(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
if (cpu_ops[any_cpu]->cpu_die)
return true;
#endif
return false;
}
bool cpus_are_stuck_in_kernel(void)
{
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
return !!cpus_stuck_in_kernel || smp_spin_tables;
}

View File

@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
&asid_generation); &asid_generation);
flush_context(cpu); flush_context(cpu);
/* We have at least 1 ASID per CPU, so this will always succeed */ /* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid: set_asid:
@ -227,8 +227,11 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
static int asids_init(void) static int asids_init(void)
{ {
asid_bits = get_cpu_asid_bits(); asid_bits = get_cpu_asid_bits();
/* If we end up with more CPUs than ASIDs, expect things to crash */ /*
WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); * Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION); atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
GFP_KERNEL); GFP_KERNEL);

View File

@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{ {
struct page *page = pte_page(pte); struct page *page = pte_page(pte);
/* no flushing needed for anonymous pages */
if (!page_mapping(page))
return;
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page), sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page)); PAGE_SIZE << compound_order(page));

View File

@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
*/ */
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
} }
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page; struct page *page;
void *pg; void *pg;
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg) if (!pg)
return NULL; return NULL;

View File

@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte; return pte;
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if (!pte) if (!pte)
return NULL; return NULL;
if (!pgtable_page_ctor(pte)) { if (!pgtable_page_ctor(pte)) {

View File

@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
return pte; return pte;
@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *page; struct page *page;
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else #else
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); page = alloc_pages(GFP_KERNEL, 0);
#endif #endif
if (!page) if (!page)
return NULL; return NULL;

View File

@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{ {
struct page *pte; struct page *pte;
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pte) if (!pte)
return NULL; return NULL;
if (!pgtable_page_ctor(pte)) { if (!pgtable_page_ctor(pte)) {
@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; gfp_t flags = GFP_KERNEL | __GFP_ZERO;
return (pte_t *) __get_free_page(flags); return (pte_t *) __get_free_page(flags);
} }

View File

@ -45,7 +45,7 @@ config IA64
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_INFO_ALLOCATOR select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD select GENERIC_TIME_VSYSCALL_OLD
select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_ARCH_UNALIGN_NO_WARN

View File

@ -48,15 +48,15 @@ struct thread_info {
#ifndef ASM_OFFSETS_C #ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
#define alloc_thread_info_node(tsk, node) \ #define alloc_thread_stack_node(tsk, node) \
((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else #else
#define current_thread_info() ((struct thread_info *) 0) #define current_thread_info() ((struct thread_info *) 0)
#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0) #define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
#define task_thread_info(tsk) ((struct thread_info *) 0) #define task_thread_info(tsk) ((struct thread_info *) 0)
#endif #endif
#define free_thread_info(ti) /* nothing */ #define free_thread_stack(ti) /* nothing */
#define task_stack_page(tsk) ((void *)(tsk)) #define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS #define __HAVE_THREAD_FUNCTIONS

View File

@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* handled. This is done by having a special ".data..init_task" section... * handled. This is done by having a special ".data..init_task" section...
*/ */
#define init_thread_info init_task_mem.s.thread_info #define init_thread_info init_task_mem.s.thread_info
#define init_stack init_task_mem.stack
union { union {
struct { struct {

View File

@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); unsigned long page = __get_free_page(GFP_DMA);
if (!page) if (!page)
return NULL; return NULL;
@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
static inline struct page *pte_alloc_one(struct mm_struct *mm, static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte; pte_t *pte;
if (!page) if (!page)

View File

@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte) { if (pte) {
__flush_page_to_ram(pte); __flush_page_to_ram(pte);
flush_tlb_kernel_page(pte); flush_tlb_kernel_page(pte);
@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
struct page *page; struct page *page;
pte_t *pte; pte_t *pte;
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page) if(!page)
return NULL; return NULL;
if (!pgtable_page_ctor(page)) { if (!pgtable_page_ctor(page)) {

View File

@ -37,7 +37,7 @@ do { \
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); unsigned long page = __get_free_page(GFP_KERNEL);
if (!page) if (!page)
return NULL; return NULL;
@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); struct page *page = alloc_pages(GFP_KERNEL, 0);
if (page == NULL) if (page == NULL)
return NULL; return NULL;

View File

@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
__GFP_ZERO);
return pte; return pte;
} }
@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!pte) if (!pte)
return NULL; return NULL;
if (!pgtable_page_ctor(pte)) { if (!pgtable_page_ctor(pte)) {

View File

@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
struct page *ptepage; struct page *ptepage;
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else #else
int flags = GFP_KERNEL | __GFP_REPEAT; int flags = GFP_KERNEL;
#endif #endif
ptepage = alloc_pages(flags, 0); ptepage = alloc_pages(flags, 0);

View File

@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
if (mem_init_done) { if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL | pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
__GFP_REPEAT | __GFP_ZERO);
} else { } else {
pte = (pte_t *)early_get_page(); pte = (pte_t *)early_get_page();
if (pte) if (pte)

View File

@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte; return pte;
} }
@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte) if (!pte)
return NULL; return NULL;
clear_highpage(pte); clear_highpage(pte);
@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *pmd; pmd_t *pmd;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd) if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd; return pmd;

View File

@ -24,7 +24,7 @@ struct mm_struct;
struct vm_area_struct; struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \ #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
_CACHE_CACHABLE_NONCOHERENT) _page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default) _page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \ #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK); pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte; return pte;
} }
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#else #else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
} }
#endif #endif
@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{ {
pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd; return pmd;
} }

View File

@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
} }
#ifndef CONFIG_KGDB #ifndef CONFIG_KGDB
void arch_release_thread_info(struct thread_info *ti); void arch_release_thread_stack(unsigned long *stack);
#endif #endif
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task)

View File

@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
* single-step state is cleared. At this point the breakpoints should have * single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to(). * been removed by __switch_to().
*/ */
void arch_release_thread_info(struct thread_info *ti) void arch_release_thread_stack(unsigned long *stack)
{ {
struct thread_info *ti = (void *)stack;
if (kgdb_sstep_thread == ti) { if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL; kgdb_sstep_thread = NULL;

View File

@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte) if (pte)
clear_page(pte); clear_page(pte);
return pte; return pte;
@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *pte; struct page *pte;
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else #else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL, 0);
#endif #endif
if (!pte) if (!pte)
return NULL; return NULL;

View File

@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
PTE_ORDER);
return pte; return pte;
} }
@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (pte) { if (pte) {
if (!pgtable_page_ctor(pte)) { if (!pgtable_page_ctor(pte)) {
__free_page(pte); __free_page(pte);

View File

@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte = alloc_pages(GFP_KERNEL, 0);
if (!pte) if (!pte)
return NULL; return NULL;
clear_page(page_address(pte)); clear_page(page_address(pte));

View File

@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
pte_t *pte; pte_t *pte;
if (likely(mem_init_done)) { if (likely(mem_init_done)) {
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT); pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else { } else {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
#if 0 #if 0

View File

@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
PMD_ORDER);
if (pmd) if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
return pmd; return pmd;
@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline pgtable_t static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!page) if (!page)
return NULL; return NULL;
if (!pgtable_page_ctor(page)) { if (!pgtable_page_ctor(page)) {
@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t * static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{ {
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte; return pte;
} }

View File

@ -128,7 +128,7 @@ config PPC
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_CBPF_JIT select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL

View File

@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address) unsigned long address)
{ {
tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(table); pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0); pgtable_free_tlb(tlb, page_address(table), 0);
} }

View File

@ -88,6 +88,7 @@
#define HPTE_R_RPN_SHIFT 12 #define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003) #define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004) #define HPTE_R_N ASM_CONST(0x0000000000000004)
#define HPTE_R_G ASM_CONST(0x0000000000000008) #define HPTE_R_G ASM_CONST(0x0000000000000008)
#define HPTE_R_M ASM_CONST(0x0000000000000010) #define HPTE_R_M ASM_CONST(0x0000000000000010)

View File

@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
pgtable_cache[(shift) - 1]; \ pgtable_cache[(shift) - 1]; \
}) })
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int); extern void pte_fragment_free(unsigned long *, int);
@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
return (pgd_t *)__get_free_page(PGALLOC_GFP); return (pgd_t *)__get_free_page(PGALLOC_GFP);
#else #else
struct page *page; struct page *page;
page = alloc_pages(PGALLOC_GFP, 4); page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
if (!page) if (!page)
return NULL; return NULL;
return (pgd_t *) page_address(page); return (pgd_t *) page_address(page);
@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address) unsigned long address)
{ {
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
} }
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address) unsigned long address)
{ {
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
} }
@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address) unsigned long address)
{ {
tlb_flush_pgtable(tlb, address); /*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, 0); pgtable_free_tlb(tlb, table, 0);
} }

View File

@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
#define KERN_VIRT_SIZE __kernel_virt_size #define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap; extern struct page *vmemmap;
extern unsigned long ioremap_bot; extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#include <asm/book3s/64/hash.h> #include <asm/book3s/64/hash.h>

View File

@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz); pgprot_t flags, unsigned int psz);
static inline unsigned long radix__get_tree_size(void)
{
unsigned long rts_field;
/*
* we support 52 bits, hence 52-31 = 21, 0b10101
* RTS encoding details
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
*/
rts_field = (0x5UL << 5); /* 6 - 8 bits */
rts_field |= (0x2UL << 61);
return rts_field;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif

View File

@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid); unsigned long ap, int nid);
extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__tlb_flush(struct mmu_gather *tlb); extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm); extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid); unsigned long ap, int nid);
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
#else #else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) #define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif #endif
#endif #endif

View File

@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/*
* flush the page walk cache for the address
*/
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
{
/*
* Flush the page table walk cache on freeing a page table. We already
* have marked the upper/higher level page table entry none by now.
* So it is safe to flush PWC here.
*/
if (!radix_enabled())
return;
radix__flush_tlb_pwc(tlb, address);
}
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */

View File

@ -4,11 +4,6 @@
#include <linux/mm.h> #include <linux/mm.h>
extern void tlb_remove_table(struct mmu_gather *tlb, void *table); extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
unsigned long address)
{
}
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/book3s/64/pgalloc.h> #include <asm/book3s/64/pgalloc.h>

View File

@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)

View File

@ -642,13 +642,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
if (pe->type & EEH_PE_VF) { if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
} else { } else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove(); pci_lock_rescan_remove();
pci_hp_remove_devices(bus); pci_hp_remove_devices(bus);
pci_unlock_rescan_remove(); pci_unlock_rescan_remove();
} }
} else if (frozen_bus) { } else if (frozen_bus) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data); eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
} }
/* /*
@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
*/ */
edev = list_first_entry(&pe->edevs, struct eeh_dev, list); edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
if (pe->type & EEH_PE_VF) if (pe->type & EEH_PE_VF) {
eeh_add_virt_device(edev, NULL); eeh_add_virt_device(edev, NULL);
else } else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_hp_add_devices(bus); pci_hp_add_devices(bus);
}
} else if (frozen_bus && rmv_data->removed) { } else if (frozen_bus && rmv_data->removed) {
pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
ssleep(5); ssleep(5);

View File

@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10 mtlr r10
BEGIN_MMU_FTR_SECTION
b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
andi. r10,r12,MSR_RI /* check for unrecoverable exception */ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
BEGIN_MMU_FTR_SECTION
beq- 2f beq- 2f
FTR_SECTION_ELSE
b 2f
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
.machine push .machine push
.machine "power4" .machine "power4"

View File

@ -47,7 +47,6 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n"); printk(KERN_INFO "PCI: Probing PCI hardware\n");
pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g /* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md * later, we may move that initialization to each ppc_md
*/ */

View File

@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.regs = regs - 1; current->thread.regs = regs - 1;
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Clear any transactional state, we're exec()ing. The cause is
* not important as there will never be a recheckpoint so it's not
* user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
#endif
memset(regs->gpr, 0, sizeof(regs->gpr)); memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0; regs->ctr = 0;
regs->link = 0; regs->link = 0;

View File

@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1) std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
/* We need to setup MSR for VSX register save instructions. Here we /* We need to setup MSR for VSX register save instructions. */
* also clear the MSR RI since when we do the treclaim, we won't have a
* valid kernel pointer for a while. We clear RI here as it avoids
* adding another mtmsr closer to the treclaim. This makes the region
* maked as non-recoverable wider than it needs to be but it saves on
* inserting another mtmsrd later.
*/
mfmsr r14 mfmsr r14
mr r15, r14 mr r15, r14
ori r15, r15, MSR_FP ori r15, r15, MSR_FP
li r16, MSR_RI li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */ ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16 andc r15, r15, r16
oris r15, r15, MSR_VEC@h oris r15, r15, MSR_VEC@h
@ -176,7 +170,17 @@ dont_backup_fp:
1: tdeqi r6, 0 1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
/* The moment we treclaim, ALL of our GPRs will switch /* Clear MSR RI since we are about to change r1, EE is already off. */
li r4, 0
mtmsrd r4, 1
/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*
* The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!) * to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle. * Use an sprg and a tm_scratch in the PACA to shuffle.
*/ */
@ -197,6 +201,11 @@ dont_backup_fp:
/* Store the PPR in r11 and reset to decent value */ /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */ std r11, GPR11(r1) /* Temporary stash */
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
mfspr r11, SPRN_PPR mfspr r11, SPRN_PPR
HMT_MEDIUM HMT_MEDIUM
@ -397,11 +406,6 @@ restore_gprs:
ld r5, THREAD_TM_DSCR(r3) ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3) ld r6, THREAD_TM_PPR(r3)
/* Clear the MSR RI since we are about to change R1. EE is already off
*/
li r4, 0
mtmsrd r4, 1
REST_GPR(0, r7) /* GPR0 */ REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */ REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */ REST_GPR(4, r7) /* GPR4 */
@ -439,10 +443,33 @@ restore_gprs:
ld r6, _CCR(r7) ld r6, _CCR(r7)
mtcr r6 mtcr r6
REST_GPR(1, r7) /* GPR1 */
REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7) REST_GPR(6, r7)
ld r7, GPR7(r7)
/*
* Store r1 and r5 on the stack so that we can access them
* after we clear MSR RI.
*/
REST_GPR(5, r7)
std r5, -8(r1)
ld r5, GPR1(r7)
std r5, -16(r1)
REST_GPR(7, r7)
/* Clear MSR RI since we are about to change r1. EE is already off */
li r5, 0
mtmsrd r5, 1
/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*/
ld r5, -8(r1)
ld r1, -16(r1)
/* Commit register state as checkpointed state: */ /* Commit register state as checkpointed state: */
TRECHKPT TRECHKPT

View File

@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" -> hit\n"); DBG_LOW(" -> hit\n");
/* Update the HPTE */ /* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
~(HPTE_R_PP | HPTE_R_N)) | ~(HPTE_R_PPP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N | (newpp & (HPTE_R_PPP | HPTE_R_N |
HPTE_R_C))); HPTE_R_C)));
} }
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
/* Update the HPTE */ /* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
~(HPTE_R_PP | HPTE_R_N)) | ~(HPTE_R_PPP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N))); (newpp & (HPTE_R_PPP | HPTE_R_N)));
/* /*
* Ensure it is out of the tlb too. Bolted entries base and * Ensure it is out of the tlb too. Bolted entries base and
* actual page size will be same. * actual page size will be same.

View File

@ -201,9 +201,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
/* /*
* We can't allow hardware to update hpte bits. Hence always * We can't allow hardware to update hpte bits. Hence always
* set 'R' bit and set 'C' if it is a write fault * set 'R' bit and set 'C' if it is a write fault
* Memory coherence is always enabled
*/ */
rflags |= HPTE_R_R | HPTE_R_M; rflags |= HPTE_R_R;
if (pteflags & _PAGE_DIRTY) if (pteflags & _PAGE_DIRTY)
rflags |= HPTE_R_C; rflags |= HPTE_R_C;
@ -213,10 +212,15 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
rflags |= HPTE_R_I; rflags |= HPTE_R_I;
if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT) else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G); rflags |= (HPTE_R_I | HPTE_R_G);
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
rflags |= (HPTE_R_I | HPTE_R_W); rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
else
/*
* Add memory coherence if cache inhibited is not set
*/
rflags |= HPTE_R_M;
return rflags; return rflags;
} }
@ -918,6 +922,10 @@ void __init hash__early_init_mmu(void)
vmemmap = (struct page *)H_VMEMMAP_BASE; vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE; ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif
/* Initialize the MMU Hash table and create the linear mapping /* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is * of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained. * currently where the page size encoding is obtained.

View File

@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
cachep = PGT_CACHE(pdshift - pshift); cachep = PGT_CACHE(pdshift - pshift);
#endif #endif
new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); new = kmem_cache_zalloc(cachep, GFP_KERNEL);
BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);

View File

@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
/* /*
* set the process table entry, * set the process table entry,
*/ */
rts_field = 3ull << PPC_BITLSHIFT(2); rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
return 0; return 0;
} }

View File

@ -160,9 +160,8 @@ static void __init radix_init_pgtable(void)
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/* /*
* Fill in the process table. * Fill in the process table.
* we support 52 bits, hence 52-28 = 24, 11000
*/ */
rts_field = 3ull << PPC_BITLSHIFT(2); rts_field = radix__get_tree_size();
process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
/* /*
* Fill in the partition table. We are suppose to use effective address * Fill in the partition table. We are suppose to use effective address
@ -176,10 +175,8 @@ static void __init radix_init_pgtable(void)
static void __init radix_init_partition_table(void) static void __init radix_init_partition_table(void)
{ {
unsigned long rts_field; unsigned long rts_field;
/*
* we support 52 bits, hence 52-28 = 24, 11000 rts_field = radix__get_tree_size();
*/
rts_field = 3ull << PPC_BITLSHIFT(2);
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
@ -331,6 +328,11 @@ void __init radix__early_init_mmu(void)
__vmalloc_end = RADIX_VMALLOC_END; __vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE; vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE; ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif
/* /*
* For now radix also use the same frag size * For now radix also use the same frag size
*/ */

View File

@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
pte_t *pte; pte_t *pte;
if (slab_is_available()) { if (slab_is_available()) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else { } else {
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte) if (pte)
@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *ptepage; struct page *ptepage;
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; gfp_t flags = GFP_KERNEL | __GFP_ZERO;
ptepage = alloc_pages(flags, 0); ptepage = alloc_pages(flags, 0);
if (!ptepage) if (!ptepage)

View File

@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{ {
void *ret = NULL; void *ret = NULL;
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
__GFP_REPEAT | __GFP_ZERO);
if (!page) if (!page)
return NULL; return NULL;
if (!kernel && !pgtable_page_ctor(page)) { if (!kernel && !pgtable_page_ctor(page)) {

View File

@ -18,16 +18,20 @@
static DEFINE_RAW_SPINLOCK(native_tlbie_lock); static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbiel_pid(unsigned long pid, int set) #define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
#define RIC_FLUSH_ALL 2
static inline void __tlbiel_pid(unsigned long pid, int set,
unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */ rb = PPC_BIT(53); /* IS = 1 */
rb |= set << PPC_BITLSHIFT(51); rb |= set << PPC_BITLSHIFT(51);
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
/* /*
* We use 128 set in radix mode and 256 set in hpt mode. * We use 128 set in radix mode and 256 set in hpt mode.
*/ */
static inline void _tlbiel_pid(unsigned long pid) static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{ {
int set; int set;
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set); __tlbiel_pid(pid, set, ric);
} }
return; return;
} }
static inline void _tlbie_pid(unsigned long pid) static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */ rb = PPC_BIT(53); /* IS = 1 */
rs = pid << PPC_BITLSHIFT(31); rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
} }
static inline void _tlbiel_va(unsigned long va, unsigned long pid, static inline void _tlbiel_va(unsigned long va, unsigned long pid,
unsigned long ap) unsigned long ap, unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63)); rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58); rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31); rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
} }
static inline void _tlbie_va(unsigned long va, unsigned long pid, static inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long ap) unsigned long ap, unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63)); rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58); rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31); rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@ -122,11 +123,26 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
preempt_disable(); preempt_disable();
pid = mm->context.id; pid = mm->context.id;
if (pid != MMU_NO_CONTEXT) if (pid != MMU_NO_CONTEXT)
_tlbiel_pid(pid); _tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(radix__local_flush_tlb_mm); EXPORT_SYMBOL(radix__local_flush_tlb_mm);
void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
{
unsigned long pid;
struct mm_struct *mm = tlb->mm;
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
_tlbiel_pid(pid, RIC_FLUSH_PWC);
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid) unsigned long ap, int nid)
{ {
@ -135,7 +151,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
preempt_disable(); preempt_disable();
pid = mm ? mm->context.id : 0; pid = mm ? mm->context.id : 0;
if (pid != MMU_NO_CONTEXT) if (pid != MMU_NO_CONTEXT)
_tlbiel_va(vmaddr, pid, ap); _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
preempt_enable(); preempt_enable();
} }
@ -172,16 +188,42 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (lock_tlbie) if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(pid); _tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie) if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} else } else
_tlbiel_pid(pid); _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context: no_context:
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(radix__flush_tlb_mm); EXPORT_SYMBOL(radix__flush_tlb_mm);
void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
{
unsigned long pid;
struct mm_struct *mm = tlb->mm;
preempt_disable();
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
if (!mm_is_core_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(pid, RIC_FLUSH_PWC);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
_tlbiel_pid(pid, RIC_FLUSH_PWC);
no_context:
preempt_enable();
}
EXPORT_SYMBOL(radix__flush_tlb_pwc);
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid) unsigned long ap, int nid)
{ {
@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
if (lock_tlbie) if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
_tlbie_va(vmaddr, pid, ap); _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie) if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} else } else
_tlbiel_va(vmaddr, pid, ap); _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail: bail:
preempt_enable(); preempt_enable();
} }
@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
if (lock_tlbie) if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(0); _tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie) if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} }

View File

@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
" la %0,0\n" " la %0,0\n"
"1:\n" "1:\n"
EX_TABLE(0b,1b) EX_TABLE(0b,1b)
: "=d" (rc), "=d" (orig_fpc) : "=d" (rc), "=&d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL)); : "d" (fpc), "0" (-EINVAL));
return rc; return rc;
} }

View File

@ -2064,12 +2064,5 @@ void s390_reset_system(void)
S390_lowcore.program_new_psw.addr = S390_lowcore.program_new_psw.addr =
(unsigned long) s390_base_pgm_handler; (unsigned long) s390_base_pgm_handler;
/*
* Clear subchannel ID and number to signal new kernel that no CCW or
* SCSI IPL has been done (for kexec and kdump)
*/
S390_lowcore.subchannel_id = 0;
S390_lowcore.subchannel_nr = 0;
do_reset_calls(); do_reset_calls();
} }

View File

@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
/* Performance monitoring unit for s390x */ /* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = { static struct pmu cpumf_pmu = {
.task_ctx_nr = perf_sw_context,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
.pmu_enable = cpumf_pmu_enable, .pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable, .pmu_disable = cpumf_pmu_disable,
.event_init = cpumf_pmu_event_init, .event_init = cpumf_pmu_event_init,
@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void)
goto out; goto out;
} }
/* The CPU measurement counter facility does not have overflow
* interrupts to do sampling. Sampling must be provided by
* external means, for example, by timers.
*/
cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
cpumf_pmu.attr_groups = cpumf_cf_event_group(); cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) { if (rc) {

View File

@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
return table; return table;
} }
/* Allocate a fresh page */ /* Allocate a fresh page */
page = alloc_page(GFP_KERNEL|__GFP_REPEAT); page = alloc_page(GFP_KERNEL);
if (!page) if (!page)
return NULL; return NULL;
if (!pgtable_page_ctor(page)) { if (!pgtable_page_ctor(page)) {

View File

@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pgste = pgste_get_lock(ptep); pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste); pgstev = pgste_val(pgste);
pte = *ptep; pte = *ptep;
if (pte_swap(pte) && if (!reset && pte_swap(pte) &&
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO))) { (pgstev & _PGSTE_GPS_ZERO))) {
ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));

View File

@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
PTE_ORDER);
return pte; return pte;
} }
@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{ {
struct page *pte; struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte) if (!pte)
return NULL; return NULL;
clear_highpage(pte); clear_highpage(pte);

View File

@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page; struct page *page;
void *pg; void *pg;
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg) if (!pg)
return NULL; return NULL;
page = virt_to_page(pg); page = virt_to_page(pg);

View File

@ -1,7 +1,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO #define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
static struct kmem_cache *pgd_cachep; static struct kmem_cache *pgd_cachep;
#if PAGETABLE_LEVELS > 2 #if PAGETABLE_LEVELS > 2

View File

@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(pgtable_cache, return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(pgtable_cache, return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)

View File

@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
__GFP_REPEAT | __GFP_ZERO);
pte_t *pte = NULL; pte_t *pte = NULL;
if (page) if (page)
@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
pgtable_t pte_alloc_one(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
__GFP_REPEAT | __GFP_ZERO);
if (!page) if (!page)
return NULL; return NULL;
if (!pgtable_page_ctor(page)) { if (!pgtable_page_ctor(page)) {

View File

@ -78,7 +78,7 @@ struct thread_info {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
void arch_release_thread_info(struct thread_info *info); void arch_release_thread_stack(unsigned long *stack);
/* How to get the thread information struct from C. */ /* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp"); register unsigned long stack_pointer __asm__("sp");

View File

@ -73,8 +73,9 @@ void arch_cpu_idle(void)
/* /*
* Release a thread_info structure * Release a thread_info structure
*/ */
void arch_release_thread_info(struct thread_info *info) void arch_release_thread_stack(unsigned long *stack)
{ {
struct thread_info *info = (void *)stack;
struct single_step_state *step_state = info->step_state; struct single_step_state *step_state = info->step_state;
if (step_state) { if (step_state) {

View File

@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
int order) int order)
{ {
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; gfp_t flags = GFP_KERNEL|__GFP_ZERO;
struct page *p; struct page *p;
int i; int i;

View File

@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte; return pte;
} }
@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *pte; struct page *pte;
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pte) if (!pte)
return NULL; return NULL;
if (!pgtable_page_ctor(pte)) { if (!pgtable_page_ctor(pte)) {

View File

@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/* /*
* Allocate one PTE table. * Allocate one PTE table.

View File

@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
#define RELATIVECALL_OPCODE 0xe8 #define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4 #define RELATIVE_ADDR_SIZE 4
#define MAX_STACK_SIZE 64 #define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) \ #define CUR_STACK_SIZE(ADDR) \
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ (current_top_of_stack() - (unsigned long)(ADDR))
THREAD_SIZE - (unsigned long)(ADDR))) \ #define MIN_STACK_SIZE(ADDR) \
? (MAX_STACK_SIZE) \ (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
: (((unsigned long)current_thread_info()) + \ MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
THREAD_SIZE - (unsigned long)(ADDR)))
#define flush_insn_slot(p) do { } while (0) #define flush_insn_slot(p) do { } while (0)

View File

@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
struct page *page; struct page *page;
page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!page) if (!page)
return NULL; return NULL;
if (!pgtable_pmd_page_ctor(page)) { if (!pgtable_pmd_page_ctor(page)) {
@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); return (pud_t *)get_zeroed_page(GFP_KERNEL);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)

View File

@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
return product; return product;
} }
static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{
u64 delta = rdtsc_ordered() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
}
static __always_inline static __always_inline
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
cycle_t *cycles, u8 *flags) cycle_t *cycles, u8 *flags)
{ {
unsigned version; unsigned version;
cycle_t ret, offset; cycle_t offset;
u8 ret_flags; u64 delta;
version = src->version; version = src->version;
/* Make the latest version visible */
smp_rmb();
offset = pvclock_get_nsec_offset(src); delta = rdtsc_ordered() - src->tsc_timestamp;
ret = src->system_time + offset; offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
ret_flags = src->flags; src->tsc_shift);
*cycles = src->system_time + offset;
*cycles = ret; *flags = src->flags;
*flags = ret_flags;
return version; return version;
} }

View File

@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
struct thread_info; struct thread_info;
struct stacktrace_ops; struct stacktrace_ops;
typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, typedef unsigned long (*walk_stack_t)(struct task_struct *task,
unsigned long *stack, unsigned long *stack,
unsigned long bp, unsigned long bp,
const struct stacktrace_ops *ops, const struct stacktrace_ops *ops,
@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
int *graph); int *graph);
extern unsigned long extern unsigned long
print_context_stack(struct thread_info *tinfo, print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp, unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data, const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph); unsigned long *end, int *graph);
extern unsigned long extern unsigned long
print_context_stack_bp(struct thread_info *tinfo, print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp, unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data, const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph); unsigned long *end, int *graph);

View File

@ -42,16 +42,14 @@ void printk_address(unsigned long address)
static void static void
print_ftrace_graph_addr(unsigned long addr, void *data, print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops, const struct stacktrace_ops *ops,
struct thread_info *tinfo, int *graph) struct task_struct *task, int *graph)
{ {
struct task_struct *task;
unsigned long ret_addr; unsigned long ret_addr;
int index; int index;
if (addr != (unsigned long)return_to_handler) if (addr != (unsigned long)return_to_handler)
return; return;
task = tinfo->task;
index = task->curr_ret_stack; index = task->curr_ret_stack;
if (!task->ret_stack || index < *graph) if (!task->ret_stack || index < *graph)
@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
static inline void static inline void
print_ftrace_graph_addr(unsigned long addr, void *data, print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops, const struct stacktrace_ops *ops,
struct thread_info *tinfo, int *graph) struct task_struct *task, int *graph)
{ } { }
#endif #endif
@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/ */
static inline int valid_stack_ptr(struct thread_info *tinfo, static inline int valid_stack_ptr(struct task_struct *task,
void *p, unsigned int size, void *end) void *p, unsigned int size, void *end)
{ {
void *t = tinfo; void *t = task_stack_page(task);
if (end) { if (end) {
if (p < end && p >= (end-THREAD_SIZE)) if (p < end && p >= (end-THREAD_SIZE))
return 1; return 1;
@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
} }
unsigned long unsigned long
print_context_stack(struct thread_info *tinfo, print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp, unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data, const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph) unsigned long *end, int *graph)
{ {
struct stack_frame *frame = (struct stack_frame *)bp; struct stack_frame *frame = (struct stack_frame *)bp;
while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
unsigned long addr; unsigned long addr;
addr = *stack; addr = *stack;
@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
} else { } else {
ops->address(data, addr, 0); ops->address(data, addr, 0);
} }
print_ftrace_graph_addr(addr, data, ops, tinfo, graph); print_ftrace_graph_addr(addr, data, ops, task, graph);
} }
stack++; stack++;
} }
@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
EXPORT_SYMBOL_GPL(print_context_stack); EXPORT_SYMBOL_GPL(print_context_stack);
unsigned long unsigned long
print_context_stack_bp(struct thread_info *tinfo, print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp, unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data, const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph) unsigned long *end, int *graph)
@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
struct stack_frame *frame = (struct stack_frame *)bp; struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long *ret_addr = &frame->return_address; unsigned long *ret_addr = &frame->return_address;
while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
unsigned long addr = *ret_addr; unsigned long addr = *ret_addr;
if (!__kernel_text_address(addr)) if (!__kernel_text_address(addr))
@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
break; break;
frame = frame->next_frame; frame = frame->next_frame;
ret_addr = &frame->return_address; ret_addr = &frame->return_address;
print_ftrace_graph_addr(addr, data, ops, tinfo, graph); print_ftrace_graph_addr(addr, data, ops, task, graph);
} }
return (unsigned long)frame; return (unsigned long)frame;

View File

@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
bp = stack_frame(task, regs); bp = stack_frame(task, regs);
for (;;) { for (;;) {
struct thread_info *context;
void *end_stack; void *end_stack;
end_stack = is_hardirq_stack(stack, cpu); end_stack = is_hardirq_stack(stack, cpu);
if (!end_stack) if (!end_stack)
end_stack = is_softirq_stack(stack, cpu); end_stack = is_softirq_stack(stack, cpu);
context = task_thread_info(task); bp = ops->walk_stack(task, stack, bp, ops, data,
bp = ops->walk_stack(context, stack, bp, ops, data,
end_stack, &graph); end_stack, &graph);
/* Stop if not on irq stack */ /* Stop if not on irq stack */

View File

@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
const struct stacktrace_ops *ops, void *data) const struct stacktrace_ops *ops, void *data)
{ {
const unsigned cpu = get_cpu(); const unsigned cpu = get_cpu();
struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy; unsigned long dummy;
unsigned used = 0; unsigned used = 0;
@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* current stack address. If the stacks consist of nested * current stack address. If the stacks consist of nested
* exceptions * exceptions
*/ */
tinfo = task_thread_info(task);
while (!done) { while (!done) {
unsigned long *stack_end; unsigned long *stack_end;
enum stack_type stype; enum stack_type stype;
@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, id) < 0) if (ops->stack(data, id) < 0)
break; break;
bp = ops->walk_stack(tinfo, stack, bp, ops, bp = ops->walk_stack(task, stack, bp, ops,
data, stack_end, &graph); data, stack_end, &graph);
ops->stack(data, "<EOE>"); ops->stack(data, "<EOE>");
/* /*
@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, "IRQ") < 0) if (ops->stack(data, "IRQ") < 0)
break; break;
bp = ops->walk_stack(tinfo, stack, bp, bp = ops->walk_stack(task, stack, bp,
ops, data, stack_end, &graph); ops, data, stack_end, &graph);
/* /*
* We link to the next stack (which would be * We link to the next stack (which would be
@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
/* /*
* This handles the process stack: * This handles the process stack:
*/ */
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
put_cpu(); put_cpu();
} }
EXPORT_SYMBOL(dump_trace); EXPORT_SYMBOL(dump_trace);

View File

@ -57,7 +57,7 @@
# error "Need more than one PGD for the ESPFIX hack" # error "Need more than one PGD for the ESPFIX hack"
#endif #endif
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */ /* This contains the *bottom* address of the espfix stack */
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);

View File

@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
struct thread_info *curstk;
struct irq_stack *irqstk; struct irq_stack *irqstk;
u32 *isp, *prev_esp; u32 *isp, *prev_esp;
curstk = current_stack();
irqstk = __this_cpu_read(softirq_stack); irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */ /* build the stack frame on the softirq stack */

View File

@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* normal page fault. * normal page fault.
*/ */
regs->ip = (unsigned long)cur->addr; regs->ip = (unsigned long)cur->addr;
/*
* Trap flag (TF) has been set here because this fault
* happened where the single stepping will be done.
* So clear it by resetting the current kprobe:
*/
regs->flags &= ~X86_EFLAGS_TF;
/*
* If the TF flag was set before the kprobe hit,
* don't touch it:
*/
regs->flags |= kcb->kprobe_old_flags; regs->flags |= kcb->kprobe_old_flags;
if (kcb->kprobe_status == KPROBE_REENTER) if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb); restore_previous_kprobe(kcb);
else else

View File

@ -61,11 +61,16 @@ void pvclock_resume(void)
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
{ {
unsigned version; unsigned version;
cycle_t ret;
u8 flags; u8 flags;
do { do {
version = __pvclock_read_cycles(src, &ret, &flags); version = src->version;
/* Make the latest version visible */
smp_rmb();
flags = src->flags;
/* Make sure that the version double-check is last. */
smp_rmb();
} while ((src->version & 1) || version != src->version); } while ((src->version & 1) || version != src->version);
return flags & valid_flags; return flags & valid_flags;
@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
do { do {
version = __pvclock_read_cycles(src, &ret, &flags); version = __pvclock_read_cycles(src, &ret, &flags);
/* Make sure that the version double-check is last. */
smp_rmb();
} while ((src->version & 1) || version != src->version); } while ((src->version & 1) || version != src->version);
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {

View File

@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */ /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline) if (guest_tsc < tsc_deadline)
__delay(tsc_deadline - guest_tsc); __delay(min(tsc_deadline - guest_tsc,
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
} }
static void start_apic_timer(struct kvm_lapic *apic) static void start_apic_timer(struct kvm_lapic *apic)

View File

@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Checks for #GP/#SS exceptions. */ /* Checks for #GP/#SS exceptions. */
exn = false; exn = false;
if (is_protmode(vcpu)) { if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the /* Protected mode: apply checks for segment validity in the
* following order: * following order:
* - segment type check (#GP(0) may be thrown) * - segment type check (#GP(0) may be thrown)
@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
* execute-only code segment * execute-only code segment
*/ */
exn = ((s.type & 0xa) == 8); exn = ((s.type & 0xa) == 8);
} if (exn) {
if (exn) { kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return 1;
return 1; }
}
if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is an only check for long mode.
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable. /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/ */
exn = (s.unusable != 0); exn = (s.unusable != 0);

View File

@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz; static unsigned long max_tsc_khz;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}
static u32 adjust_tsc_khz(u32 khz, s32 ppm) static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{ {
u64 v = (u64)khz * (1000000 + ppm); u64 v = (u64)khz * (1000000 + ppm);

View File

@ -2,6 +2,7 @@
#define ARCH_X86_KVM_X86_H #define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
extern struct static_key kvm_no_apic_vcpu; extern struct static_key kvm_no_apic_vcpu;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}
/* Same "calling convention" as do_div: /* Same "calling convention" as do_div:
* - divide (n << 32) by base * - divide (n << 32) by base
* - put result in n * - put result in n

View File

@ -6,7 +6,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM #define PGALLOC_USER_GFP __GFP_HIGHMEM

View File

@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
if (efi_enabled(EFI_OLD_MEMMAP)) if (efi_enabled(EFI_OLD_MEMMAP))
return 0; return 0;
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_page(gfp_mask); efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
if (!efi_pgd) if (!efi_pgd)
return -ENOMEM; return -ENOMEM;

View File

@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
/* NOTE: The loop is more greedy than the cleanup_highmap variant. /* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */ * We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) { pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd)) if (pmd_none(*pmd))
continue; continue;
@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif #endif
} }
#ifdef CONFIG_X86_32
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
{
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
return pte;
}
#else /* CONFIG_X86_64 */
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
{
unsigned long pfn;
if (xen_feature(XENFEAT_writable_page_tables) ||
xen_feature(XENFEAT_auto_translated_physmap) ||
xen_start_info->mfn_list >= __START_KERNEL_map)
return pte;
/*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = pte_pfn(pte);
if (pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
return pte;
}
#endif /* CONFIG_X86_64 */
/* /*
* Init-time set_pte while constructing initial pagetables, which * Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW. * doesn't allow RO page table pages to be remapped RW.
@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
* so always write the PTE directly and rely on Xen trapping and * so always write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary. * emulating any updates as necessary.
*/ */
__visible pte_t xen_make_pte_init(pteval_t pte)
{
#ifdef CONFIG_X86_64
unsigned long pfn;
/*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
if (xen_start_info->mfn_list < __START_KERNEL_map &&
pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte &= ~_PAGE_RW;
#endif
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{ {
if (pte_mfn(pte) != INVALID_P2M_ENTRY) #ifdef CONFIG_X86_32
pte = mask_rw_pte(ptep, pte); /* If there's an existing pte, then don't allow _PAGE_RW to be set */
else if (pte_mfn(pte) != INVALID_P2M_ENTRY
pte = __pte_ma(0); && pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
#endif
native_set_pte(ptep, pte); native_set_pte(ptep, pte);
} }
@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud; pv_mmu_ops.release_pud = xen_release_pud;
#endif #endif
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3; pv_mmu_ops.write_cr3 = &xen_write_cr3;
@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pte_val = PV_CALLEE_SAVE(xen_pte_val), .pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val), .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
.make_pte = PV_CALLEE_SAVE(xen_make_pte), .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd), .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE

View File

@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
if (unlikely(!slab_is_available())) if (unlikely(!slab_is_available()))
return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); return (void *)__get_free_page(GFP_KERNEL);
} }
static void __ref free_p2m_page(void *p) static void __ref free_p2m_page(void *p)

View File

@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
pte_t *ptep; pte_t *ptep;
int i; int i;
ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); ptep = (pte_t *)__get_free_page(GFP_KERNEL);
if (!ptep) if (!ptep)
return NULL; return NULL;
for (i = 0; i < 1024; i++) for (i = 0; i < 1024; i++)

View File

@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0, [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
}; };

View File

@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index,
/* Add the table to the namespace */ /* Add the table to the namespace */
acpi_ex_exit_interpreter();
status = acpi_ns_load_table(table_index, parent_node); status = acpi_ns_load_table(table_index, parent_node);
acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(obj_desc); acpi_ut_remove_reference(obj_desc);
*ddb_handle = NULL; *ddb_handle = NULL;

Some files were not shown because too many files have changed in this diff Show More