mirror of https://gitee.com/openkylin/linux.git
powerpc: add context.vdso_base for 32-bit too
This adds a vdso_base element to the mm_context_t for 32-bit compiles (both for ARCH=powerpc and ARCH=ppc). This fixes the compile errors that have been reported in arch/powerpc/kernel/signal_32.c. Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
050613545b
commit
6218a761bb
|
@ -30,7 +30,7 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
mm_context_t next_mmu_context;
|
||||
unsigned long next_mmu_context;
|
||||
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
|
||||
#ifdef FEW_CONTEXTS
|
||||
atomic_t nr_free_contexts;
|
||||
|
|
|
@ -190,7 +190,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
|||
return;
|
||||
pmd = pmd_offset(pgd_offset(mm, ea), ea);
|
||||
if (!pmd_none(*pmd))
|
||||
add_hash_page(mm->context, ea, pmd_val(*pmd));
|
||||
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -42,7 +42,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
|
|||
|
||||
if (Hash != 0) {
|
||||
ptephys = __pa(ptep) & PAGE_MASK;
|
||||
flush_hash_pages(mm->context, addr, ptephys, 1);
|
||||
flush_hash_pages(mm->context.id, addr, ptephys, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
|
|||
pmd_t *pmd;
|
||||
unsigned long pmd_end;
|
||||
int count;
|
||||
unsigned int ctx = mm->context;
|
||||
unsigned int ctx = mm->context.id;
|
||||
|
||||
if (Hash == 0) {
|
||||
_tlbia();
|
||||
|
@ -172,7 +172,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
|||
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
|
||||
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
|
||||
if (!pmd_none(*pmd))
|
||||
flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
|
||||
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
|
||||
FINISH_FLUSH;
|
||||
}
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ static int pmu_set_cpu_speed(int low_speed)
|
|||
_set_L3CR(save_l3cr);
|
||||
|
||||
/* Restore userland MMU context */
|
||||
set_context(current->active_mm->context, current->active_mm->pgd);
|
||||
set_context(current->active_mm->context.id, current->active_mm->pgd);
|
||||
|
||||
#ifdef DEBUG_FREQ
|
||||
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
|
||||
|
|
|
@ -458,7 +458,7 @@ static int pmac_pm_finish(suspend_state_t state)
|
|||
printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
|
||||
|
||||
/* Restore userland MMU context */
|
||||
set_context(current->active_mm->context, current->active_mm->pgd);
|
||||
set_context(current->active_mm->context.id, current->active_mm->pgd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
mm_context_t next_mmu_context;
|
||||
unsigned long next_mmu_context;
|
||||
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
|
||||
#ifdef FEW_CONTEXTS
|
||||
atomic_t nr_free_contexts;
|
||||
|
|
|
@ -2268,7 +2268,7 @@ static int powerbook_sleep_grackle(void)
|
|||
_set_L2CR(save_l2cr);
|
||||
|
||||
/* Restore userland MMU context */
|
||||
set_context(current->active_mm->context, current->active_mm->pgd);
|
||||
set_context(current->active_mm->context.id, current->active_mm->pgd);
|
||||
|
||||
/* Power things up */
|
||||
pmu_unlock();
|
||||
|
@ -2366,7 +2366,7 @@ powerbook_sleep_Core99(void)
|
|||
_set_L3CR(save_l3cr);
|
||||
|
||||
/* Restore userland MMU context */
|
||||
set_context(current->active_mm->context, current->active_mm->pgd);
|
||||
set_context(current->active_mm->context.id, current->active_mm->pgd);
|
||||
|
||||
/* Tell PMU we are ready */
|
||||
pmu_unlock();
|
||||
|
|
|
@ -24,8 +24,10 @@ extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t);
|
|||
#define PHYS_FMT "%16Lx"
|
||||
#endif
|
||||
|
||||
/* Default "unsigned long" context */
|
||||
typedef unsigned long mm_context_t;
|
||||
typedef struct {
|
||||
unsigned long id;
|
||||
unsigned long vdso_base;
|
||||
} mm_context_t;
|
||||
|
||||
/* Hardware Page Table Entry */
|
||||
typedef struct _PTE {
|
||||
|
|
|
@ -71,7 +71,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||
#else
|
||||
|
||||
/* PPC 6xx, 7xx CPUs */
|
||||
#define NO_CONTEXT ((mm_context_t) -1)
|
||||
#define NO_CONTEXT ((unsigned long) -1)
|
||||
#define LAST_CONTEXT 32767
|
||||
#define FIRST_CONTEXT 1
|
||||
#endif
|
||||
|
@ -86,7 +86,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||
* can be used for debugging on all processors (if you happen to have
|
||||
* an Abatron).
|
||||
*/
|
||||
extern void set_context(mm_context_t context, pgd_t *pgd);
|
||||
extern void set_context(unsigned long contextid, pgd_t *pgd);
|
||||
|
||||
/*
|
||||
* Bitmap of contexts in use.
|
||||
|
@ -99,7 +99,7 @@ extern unsigned long context_map[];
|
|||
* Its use is an optimization only, we can't rely on this context
|
||||
* number to be free, but it usually will be.
|
||||
*/
|
||||
extern mm_context_t next_mmu_context;
|
||||
extern unsigned long next_mmu_context;
|
||||
|
||||
/*
|
||||
* If we don't have sufficient contexts to give one to every task
|
||||
|
@ -118,9 +118,9 @@ extern void steal_context(void);
|
|||
*/
|
||||
static inline void get_mmu_context(struct mm_struct *mm)
|
||||
{
|
||||
mm_context_t ctx;
|
||||
unsigned long ctx;
|
||||
|
||||
if (mm->context != NO_CONTEXT)
|
||||
if (mm->context.id != NO_CONTEXT)
|
||||
return;
|
||||
#ifdef FEW_CONTEXTS
|
||||
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
|
||||
|
@ -133,7 +133,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
|
|||
ctx = 0;
|
||||
}
|
||||
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
|
||||
mm->context = ctx;
|
||||
mm->context.id = ctx;
|
||||
#ifdef FEW_CONTEXTS
|
||||
context_mm[ctx] = mm;
|
||||
#endif
|
||||
|
@ -142,7 +142,12 @@ static inline void get_mmu_context(struct mm_struct *mm)
|
|||
/*
|
||||
* Set up the context for a new address space.
|
||||
*/
|
||||
#define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
|
||||
static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
|
||||
{
|
||||
mm->context.id = NO_CONTEXT;
|
||||
mm->context.vdso_base = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're finished using the context for an address space.
|
||||
|
@ -150,9 +155,9 @@ static inline void get_mmu_context(struct mm_struct *mm)
|
|||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
preempt_disable();
|
||||
if (mm->context != NO_CONTEXT) {
|
||||
clear_bit(mm->context, context_map);
|
||||
mm->context = NO_CONTEXT;
|
||||
if (mm->context.id != NO_CONTEXT) {
|
||||
clear_bit(mm->context.id, context_map);
|
||||
mm->context.id = NO_CONTEXT;
|
||||
#ifdef FEW_CONTEXTS
|
||||
atomic_inc(&nr_free_contexts);
|
||||
#endif
|
||||
|
@ -180,7 +185,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
/* Setup new userspace context */
|
||||
get_mmu_context(next);
|
||||
set_context(next->context, next->pgd);
|
||||
set_context(next->context.id, next->pgd);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
|
|
@ -663,7 +663,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
|
|||
return (old & _PAGE_ACCESSED) != 0;
|
||||
}
|
||||
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
|
||||
__ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep)
|
||||
__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
|
||||
static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
|
||||
|
|
Loading…
Reference in New Issue