mirror of https://gitee.com/openkylin/linux.git
sh: Split out MMUCR.URB based entry wiring in to shared helper.
Presently this is duplicated between tlb-sh4 and tlb-pteaex. Split the helpers out in to a generic tlb-urb that can be used by any parts equipped with MMUCR.URB. At the same time, move the SH-5 code out-of-line, as we require single global state for DTLB entry wiring. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
046581f962
commit
bb29c677b3
|
@ -98,49 +98,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
||||||
|
|
||||||
#define tlb_migrate_finish(mm) do { } while (0)
|
#define tlb_migrate_finish(mm) do { } while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_SH4
|
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
|
||||||
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
|
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
|
||||||
extern void tlb_unwire_entry(void);
|
extern void tlb_unwire_entry(void);
|
||||||
#elif defined(CONFIG_SUPERH64)
|
|
||||||
static int dtlb_entry;
|
|
||||||
static unsigned long long dtlb_entries[64];
|
|
||||||
|
|
||||||
static inline void tlb_wire_entry(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr, pte_t pte)
|
|
||||||
{
|
|
||||||
unsigned long long entry;
|
|
||||||
unsigned long paddr, flags;
|
|
||||||
|
|
||||||
BUG_ON(dtlb_entry == 64);
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
entry = sh64_get_wired_dtlb_entry();
|
|
||||||
dtlb_entries[dtlb_entry++] = entry;
|
|
||||||
|
|
||||||
paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
|
|
||||||
paddr &= ~PAGE_MASK;
|
|
||||||
|
|
||||||
sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void tlb_unwire_entry(void)
|
|
||||||
{
|
|
||||||
unsigned long long entry;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
BUG_ON(!dtlb_entry);
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
entry = dtlb_entries[dtlb_entry--];
|
|
||||||
|
|
||||||
sh64_teardown_tlb_slot(entry);
|
|
||||||
sh64_put_wired_dtlb_entry(entry);
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
static inline void tlb_wire_entry(struct vm_area_struct *vma ,
|
static inline void tlb_wire_entry(struct vm_area_struct *vma ,
|
||||||
unsigned long addr, pte_t pte)
|
unsigned long addr, pte_t pte)
|
||||||
|
@ -152,7 +112,7 @@ static inline void tlb_unwire_entry(void)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CPU_SH4 */
|
#endif
|
||||||
|
|
||||||
#else /* CONFIG_MMU */
|
#else /* CONFIG_MMU */
|
||||||
|
|
||||||
|
|
|
@ -26,9 +26,9 @@ endif
|
||||||
|
|
||||||
ifdef CONFIG_MMU
|
ifdef CONFIG_MMU
|
||||||
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
|
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
|
||||||
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o
|
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
|
||||||
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
|
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
|
||||||
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o
|
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
|
||||||
obj-y += $(tlb-y)
|
obj-y += $(tlb-y)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
|
@ -76,69 +76,3 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
|
||||||
__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
|
__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
|
||||||
back_to_cached();
|
back_to_cached();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Load the entry for 'addr' into the TLB and wire the entry.
|
|
||||||
*/
|
|
||||||
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
|
||||||
{
|
|
||||||
unsigned long status, flags;
|
|
||||||
int urb;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
/* Load the entry into the TLB */
|
|
||||||
__update_tlb(vma, addr, pte);
|
|
||||||
|
|
||||||
/* ... and wire it up. */
|
|
||||||
status = ctrl_inl(MMUCR);
|
|
||||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
|
||||||
status &= ~MMUCR_URB;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we're not trying to wire the last TLB entry slot.
|
|
||||||
*/
|
|
||||||
BUG_ON(!--urb);
|
|
||||||
|
|
||||||
urb = urb % MMUCR_URB_NENTRIES;
|
|
||||||
|
|
||||||
status |= (urb << MMUCR_URB_SHIFT);
|
|
||||||
ctrl_outl(status, MMUCR);
|
|
||||||
ctrl_barrier();
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Unwire the last wired TLB entry.
|
|
||||||
*
|
|
||||||
* It should also be noted that it is not possible to wire and unwire
|
|
||||||
* TLB entries in an arbitrary order. If you wire TLB entry N, followed
|
|
||||||
* by entry N+1, you must unwire entry N+1 first, then entry N. In this
|
|
||||||
* respect, it works like a stack or LIFO queue.
|
|
||||||
*/
|
|
||||||
void tlb_unwire_entry(void)
|
|
||||||
{
|
|
||||||
unsigned long status, flags;
|
|
||||||
int urb;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
status = ctrl_inl(MMUCR);
|
|
||||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
|
||||||
status &= ~MMUCR_URB;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we're not trying to unwire a TLB entry when none
|
|
||||||
* have been wired.
|
|
||||||
*/
|
|
||||||
BUG_ON(urb++ == MMUCR_URB_NENTRIES);
|
|
||||||
|
|
||||||
urb = urb % MMUCR_URB_NENTRIES;
|
|
||||||
|
|
||||||
status |= (urb << MMUCR_URB_SHIFT);
|
|
||||||
ctrl_outl(status, MMUCR);
|
|
||||||
ctrl_barrier();
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
|
@ -81,69 +81,3 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
|
||||||
ctrl_outl(data, addr);
|
ctrl_outl(data, addr);
|
||||||
back_to_cached();
|
back_to_cached();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Load the entry for 'addr' into the TLB and wire the entry.
|
|
||||||
*/
|
|
||||||
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
|
||||||
{
|
|
||||||
unsigned long status, flags;
|
|
||||||
int urb;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
/* Load the entry into the TLB */
|
|
||||||
__update_tlb(vma, addr, pte);
|
|
||||||
|
|
||||||
/* ... and wire it up. */
|
|
||||||
status = ctrl_inl(MMUCR);
|
|
||||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
|
||||||
status &= ~MMUCR_URB;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we're not trying to wire the last TLB entry slot.
|
|
||||||
*/
|
|
||||||
BUG_ON(!--urb);
|
|
||||||
|
|
||||||
urb = urb % MMUCR_URB_NENTRIES;
|
|
||||||
|
|
||||||
status |= (urb << MMUCR_URB_SHIFT);
|
|
||||||
ctrl_outl(status, MMUCR);
|
|
||||||
ctrl_barrier();
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Unwire the last wired TLB entry.
|
|
||||||
*
|
|
||||||
* It should also be noted that it is not possible to wire and unwire
|
|
||||||
* TLB entries in an arbitrary order. If you wire TLB entry N, followed
|
|
||||||
* by entry N+1, you must unwire entry N+1 first, then entry N. In this
|
|
||||||
* respect, it works like a stack or LIFO queue.
|
|
||||||
*/
|
|
||||||
void tlb_unwire_entry(void)
|
|
||||||
{
|
|
||||||
unsigned long status, flags;
|
|
||||||
int urb;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
status = ctrl_inl(MMUCR);
|
|
||||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
|
||||||
status &= ~MMUCR_URB;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we're not trying to unwire a TLB entry when none
|
|
||||||
* have been wired.
|
|
||||||
*/
|
|
||||||
BUG_ON(urb++ == MMUCR_URB_NENTRIES);
|
|
||||||
|
|
||||||
urb = urb % MMUCR_URB_NENTRIES;
|
|
||||||
|
|
||||||
status |= (urb << MMUCR_URB_SHIFT);
|
|
||||||
ctrl_outl(status, MMUCR);
|
|
||||||
ctrl_barrier();
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
|
@ -143,3 +143,42 @@ void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
|
||||||
*/
|
*/
|
||||||
void sh64_teardown_tlb_slot(unsigned long long config_addr)
|
void sh64_teardown_tlb_slot(unsigned long long config_addr)
|
||||||
__attribute__ ((alias("__flush_tlb_slot")));
|
__attribute__ ((alias("__flush_tlb_slot")));
|
||||||
|
|
||||||
|
static int dtlb_entry;
|
||||||
|
static unsigned long long dtlb_entries[64];
|
||||||
|
|
||||||
|
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||||
|
{
|
||||||
|
unsigned long long entry;
|
||||||
|
unsigned long paddr, flags;
|
||||||
|
|
||||||
|
BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
entry = sh64_get_wired_dtlb_entry();
|
||||||
|
dtlb_entries[dtlb_entry++] = entry;
|
||||||
|
|
||||||
|
paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
|
||||||
|
paddr &= ~PAGE_MASK;
|
||||||
|
|
||||||
|
sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void tlb_unwire_entry(void)
|
||||||
|
{
|
||||||
|
unsigned long long entry;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
BUG_ON(!dtlb_entry);
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
entry = dtlb_entries[dtlb_entry--];
|
||||||
|
|
||||||
|
sh64_teardown_tlb_slot(entry);
|
||||||
|
sh64_put_wired_dtlb_entry(entry);
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
/*
|
||||||
|
* arch/sh/mm/tlb-urb.c
|
||||||
|
*
|
||||||
|
* TLB entry wiring helpers for URB-equipped parts.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2010 Matt Fleming
|
||||||
|
*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*/
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/io.h>
|
||||||
|
#include <asm/tlb.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load the entry for 'addr' into the TLB and wire the entry.
|
||||||
|
*/
|
||||||
|
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||||
|
{
|
||||||
|
unsigned long status, flags;
|
||||||
|
int urb;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
/* Load the entry into the TLB */
|
||||||
|
__update_tlb(vma, addr, pte);
|
||||||
|
|
||||||
|
/* ... and wire it up. */
|
||||||
|
status = __raw_readl(MMUCR);
|
||||||
|
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||||
|
status &= ~MMUCR_URB;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure we're not trying to wire the last TLB entry slot.
|
||||||
|
*/
|
||||||
|
BUG_ON(!--urb);
|
||||||
|
|
||||||
|
urb = urb % MMUCR_URB_NENTRIES;
|
||||||
|
|
||||||
|
status |= (urb << MMUCR_URB_SHIFT);
|
||||||
|
__raw_writel(status, MMUCR);
|
||||||
|
ctrl_barrier();
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unwire the last wired TLB entry.
|
||||||
|
*
|
||||||
|
* It should also be noted that it is not possible to wire and unwire
|
||||||
|
* TLB entries in an arbitrary order. If you wire TLB entry N, followed
|
||||||
|
* by entry N+1, you must unwire entry N+1 first, then entry N. In this
|
||||||
|
* respect, it works like a stack or LIFO queue.
|
||||||
|
*/
|
||||||
|
void tlb_unwire_entry(void)
|
||||||
|
{
|
||||||
|
unsigned long status, flags;
|
||||||
|
int urb;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
status = __raw_readl(MMUCR);
|
||||||
|
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||||
|
status &= ~MMUCR_URB;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure we're not trying to unwire a TLB entry when none
|
||||||
|
* have been wired.
|
||||||
|
*/
|
||||||
|
BUG_ON(urb++ == MMUCR_URB_NENTRIES);
|
||||||
|
|
||||||
|
urb = urb % MMUCR_URB_NENTRIES;
|
||||||
|
|
||||||
|
status |= (urb << MMUCR_URB_SHIFT);
|
||||||
|
__raw_writel(status, MMUCR);
|
||||||
|
ctrl_barrier();
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
Loading…
Reference in New Issue