linux/arch/arm/mm/cache-v7.S

449 lines
12 KiB
ArmAsm
Raw Normal View History

/*
* linux/arch/arm/mm/cache-v7.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2005 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv7 processor support.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
#include "proc-macros.S"
/*
* The secondary kernel init calls v7_flush_dcache_all before it enables
* the L1; however, the L1 comes out of reset in an undefined state, so
* the clean + invalidate performed by v7_flush_dcache_all causes a bunch
* of cache lines with uninitialized data and uninitialized tags to get
* written out to memory, which does really unpleasant things to the main
* processor. We fix this by performing an invalidate, rather than a
* clean + invalidate, before jumping into the kernel.
*
* This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs
* to be called for both secondary cores startup and primary core resume
* procedures.
*/
ENTRY(v7_invalidate_l1)
mov r0, #0
mcr p15, 2, r0, c0, c0, 0
mrc p15, 1, r0, c0, c0, 0
ldr r1, =0x7fff
and r2, r1, r0, lsr #13
ldr r1, =0x3ff
and r3, r1, r0, lsr #3 @ NumWays - 1
add r2, r2, #1 @ NumSets
and r0, r0, #0x7
add r0, r0, #4 @ SetShift
clz r1, r3 @ WayShift
add r4, r3, #1 @ NumWays
1: sub r2, r2, #1 @ NumSets--
mov r3, r4 @ Temp = NumWays
2: subs r3, r3, #1 @ Temp--
mov r5, r3, lsl r1
mov r6, r2, lsl r0
orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
mcr p15, 0, r5, c7, c6, 2
bgt 2b
cmp r2, #0
bgt 1b
dsb st
isb
mov pc, lr
ENDPROC(v7_invalidate_l1)
/*
* v7_flush_icache_all()
*
* Flush the whole I-cache.
*
* Registers:
* r0 - set to 0
*/
ENTRY(v7_flush_icache_all)
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
mov pc, lr
ENDPROC(v7_flush_icache_all)
ARM: mm: implement LoUIS API for cache maintenance ops ARM v7 architecture introduced the concept of cache levels and related control registers. New processors like A7 and A15 embed an L2 unified cache controller that becomes part of the cache level hierarchy. Some operations in the kernel like cpu_suspend and __cpu_disable do not require a flush of the entire cache hierarchy to DRAM but just the cache levels belonging to the Level of Unification Inner Shareable (LoUIS), which in most of ARM v7 systems correspond to L1. The current cache flushing API used in cpu_suspend and __cpu_disable, flush_cache_all(), ends up flushing the whole cache hierarchy since for v7 it cleans and invalidates all cache levels up to Level of Coherency (LoC) which cripples system performance when used in hot paths like hotplug and cpuidle. Therefore a new kernel cache maintenance API must be added to cope with latest ARM system requirements. This patch adds flush_cache_louis() to the ARM kernel cache maintenance API. This function cleans and invalidates all data cache levels up to the Level of Unification Inner Shareable (LoUIS) and invalidates the instruction cache for processors that support it (> v7). This patch also creates an alias of the cache LoUIS function to flush_kern_all for all processor versions prior to v7, so that the current cache flushing behaviour is unchanged for those processors. v7 cache maintenance code implements a cache LoUIS function that cleans and invalidates the D-cache up to LoUIS and invalidates the I-cache, according to the new API. Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Shawn Guo <shawn.guo@linaro.org>
2012-09-06 21:05:13 +08:00
/*
* v7_flush_dcache_louis()
*
* Flush the D-cache up to the Level of Unification Inner Shareable
*
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*/
ENTRY(v7_flush_dcache_louis)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
#ifdef CONFIG_ARM_ERRATA_643719
ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do
ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
biceq r2, r2, #0x0000000f @ clear minor revision number
teqeq r2, r1 @ test for errata affected core and if so...
orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
#endif
ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
ARM: mm: implement LoUIS API for cache maintenance ops ARM v7 architecture introduced the concept of cache levels and related control registers. New processors like A7 and A15 embed an L2 unified cache controller that becomes part of the cache level hierarchy. Some operations in the kernel like cpu_suspend and __cpu_disable do not require a flush of the entire cache hierarchy to DRAM but just the cache levels belonging to the Level of Unification Inner Shareable (LoUIS), which in most of ARM v7 systems correspond to L1. The current cache flushing API used in cpu_suspend and __cpu_disable, flush_cache_all(), ends up flushing the whole cache hierarchy since for v7 it cleans and invalidates all cache levels up to Level of Coherency (LoC) which cripples system performance when used in hot paths like hotplug and cpuidle. Therefore a new kernel cache maintenance API must be added to cope with latest ARM system requirements. This patch adds flush_cache_louis() to the ARM kernel cache maintenance API. This function cleans and invalidates all data cache levels up to the Level of Unification Inner Shareable (LoUIS) and invalidates the instruction cache for processors that support it (> v7). This patch also creates an alias of the cache LoUIS function to flush_kern_all for all processor versions prior to v7, so that the current cache flushing behaviour is unchanged for those processors. v7 cache maintenance code implements a cache LoUIS function that cleans and invalidates the D-cache up to LoUIS and invalidates the I-cache, according to the new API. Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Shawn Guo <shawn.guo@linaro.org>
2012-09-06 21:05:13 +08:00
moveq pc, lr @ return if level == 0
mov r10, #0 @ r10 (starting level) = 0
b flush_levels @ start flushing cache levels
ARM: mm: implement LoUIS API for cache maintenance ops ARM v7 architecture introduced the concept of cache levels and related control registers. New processors like A7 and A15 embed an L2 unified cache controller that becomes part of the cache level hierarchy. Some operations in the kernel like cpu_suspend and __cpu_disable do not require a flush of the entire cache hierarchy to DRAM but just the cache levels belonging to the Level of Unification Inner Shareable (LoUIS), which in most of ARM v7 systems correspond to L1. The current cache flushing API used in cpu_suspend and __cpu_disable, flush_cache_all(), ends up flushing the whole cache hierarchy since for v7 it cleans and invalidates all cache levels up to Level of Coherency (LoC) which cripples system performance when used in hot paths like hotplug and cpuidle. Therefore a new kernel cache maintenance API must be added to cope with latest ARM system requirements. This patch adds flush_cache_louis() to the ARM kernel cache maintenance API. This function cleans and invalidates all data cache levels up to the Level of Unification Inner Shareable (LoUIS) and invalidates the instruction cache for processors that support it (> v7). This patch also creates an alias of the cache LoUIS function to flush_kern_all for all processor versions prior to v7, so that the current cache flushing behaviour is unchanged for those processors. v7 cache maintenance code implements a cache LoUIS function that cleans and invalidates the D-cache up to LoUIS and invalidates the I-cache, according to the new API. Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Shawn Guo <shawn.guo@linaro.org>
2012-09-06 21:05:13 +08:00
ENDPROC(v7_flush_dcache_louis)
/*
* v7_flush_dcache_all()
*
* Flush the whole D-cache.
*
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*
* - mm - mm_struct describing address space
*/
ENTRY(v7_flush_dcache_all)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished @ if loc is 0, then no need to clean
mov r10, #0 @ start clean at cache level 0
flush_levels:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
mov r1, r0, lsr r2 @ extract cache type bits from clidr
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
#ifdef CONFIG_PREEMPT
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
#ifdef CONFIG_PREEMPT
restore_irqs_notrace r9
#endif
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
clz r5, r4 @ find bit position of way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
loop1:
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence Set-associative caches on all v7 implementations map the index bits to physical addresses LSBs and tag bits to MSBs. As the last level of cache on current and upcoming ARM systems grows in size, this means that under normal DRAM controller configurations, the current v7 cache flush routine using set/way operations triggers a DRAM memory controller precharge/activate for every cache line writeback since the cache routine cleans lines by first fixing the index and then looping through ways (index bits are mapped to lower physical addresses on all v7 cache implementations; this means that, with last level cache sizes in the order of MBytes, lines belonging to the same set but different ways map to different DRAM pages). Given the random content of cache tags, swapping the order between indexes and ways loops do not prevent DRAM pages precharge and activate cycles but at least, on average, improves the chances that either multiple lines hit the same page or multiple lines belong to different DRAM banks, improving throughput significantly. This patch swaps the inner loops in the v7 cache flushing routine to carry out the clean operations first on all sets belonging to a given way (looping through sets) and then decrementing the way. Benchmarks showed that by swapping the ordering in which sets and ways are decremented in the v7 cache flushing routine, that uses set/way operations, time required to flush caches is reduced significantly, owing to improved writebacks throughput to the DRAM controller. Benchmarks results vary and depend heavily on the last level of cache tag RAM content when cache is cleaned and invalidated, ranging from 2x throughput when all tag RAM entries contain dirty lines mapping to sequential pages of RAM to 1x (ie no improvement) when all tag RAM accesses trigger a DRAM precharge/activate cycle, as the current code implies on most DRAM controller configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
mov r9, r7 @ create working copy of max index
loop2:
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence Set-associative caches on all v7 implementations map the index bits to physical addresses LSBs and tag bits to MSBs. As the last level of cache on current and upcoming ARM systems grows in size, this means that under normal DRAM controller configurations, the current v7 cache flush routine using set/way operations triggers a DRAM memory controller precharge/activate for every cache line writeback since the cache routine cleans lines by first fixing the index and then looping through ways (index bits are mapped to lower physical addresses on all v7 cache implementations; this means that, with last level cache sizes in the order of MBytes, lines belonging to the same set but different ways map to different DRAM pages). Given the random content of cache tags, swapping the order between indexes and ways loops do not prevent DRAM pages precharge and activate cycles but at least, on average, improves the chances that either multiple lines hit the same page or multiple lines belong to different DRAM banks, improving throughput significantly. This patch swaps the inner loops in the v7 cache flushing routine to carry out the clean operations first on all sets belonging to a given way (looping through sets) and then decrementing the way. Benchmarks showed that by swapping the ordering in which sets and ways are decremented in the v7 cache flushing routine, that uses set/way operations, time required to flush caches is reduced significantly, owing to improved writebacks throughput to the DRAM controller. Benchmarks results vary and depend heavily on the last level of cache tag RAM content when cache is cleaned and invalidated, ranging from 2x throughput when all tag RAM entries contain dirty lines mapping to sequential pages of RAM to 1x (ie no improvement) when all tag RAM accesses trigger a DRAM precharge/activate cycle, as the current code implies on most DRAM controller configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
THUMB( lsl r6, r4, r5 )
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence Set-associative caches on all v7 implementations map the index bits to physical addresses LSBs and tag bits to MSBs. As the last level of cache on current and upcoming ARM systems grows in size, this means that under normal DRAM controller configurations, the current v7 cache flush routine using set/way operations triggers a DRAM memory controller precharge/activate for every cache line writeback since the cache routine cleans lines by first fixing the index and then looping through ways (index bits are mapped to lower physical addresses on all v7 cache implementations; this means that, with last level cache sizes in the order of MBytes, lines belonging to the same set but different ways map to different DRAM pages). Given the random content of cache tags, swapping the order between indexes and ways loops do not prevent DRAM pages precharge and activate cycles but at least, on average, improves the chances that either multiple lines hit the same page or multiple lines belong to different DRAM banks, improving throughput significantly. This patch swaps the inner loops in the v7 cache flushing routine to carry out the clean operations first on all sets belonging to a given way (looping through sets) and then decrementing the way. Benchmarks showed that by swapping the ordering in which sets and ways are decremented in the v7 cache flushing routine, that uses set/way operations, time required to flush caches is reduced significantly, owing to improved writebacks throughput to the DRAM controller. Benchmarks results vary and depend heavily on the last level of cache tag RAM content when cache is cleaned and invalidated, ranging from 2x throughput when all tag RAM entries contain dirty lines mapping to sequential pages of RAM to 1x (ie no improvement) when all tag RAM accesses trigger a DRAM precharge/activate cycle, as the current code implies on most DRAM controller configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
THUMB( lsl r6, r9, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence Set-associative caches on all v7 implementations map the index bits to physical addresses LSBs and tag bits to MSBs. As the last level of cache on current and upcoming ARM systems grows in size, this means that under normal DRAM controller configurations, the current v7 cache flush routine using set/way operations triggers a DRAM memory controller precharge/activate for every cache line writeback since the cache routine cleans lines by first fixing the index and then looping through ways (index bits are mapped to lower physical addresses on all v7 cache implementations; this means that, with last level cache sizes in the order of MBytes, lines belonging to the same set but different ways map to different DRAM pages). Given the random content of cache tags, swapping the order between indexes and ways loops do not prevent DRAM pages precharge and activate cycles but at least, on average, improves the chances that either multiple lines hit the same page or multiple lines belong to different DRAM banks, improving throughput significantly. This patch swaps the inner loops in the v7 cache flushing routine to carry out the clean operations first on all sets belonging to a given way (looping through sets) and then decrementing the way. Benchmarks showed that by swapping the ordering in which sets and ways are decremented in the v7 cache flushing routine, that uses set/way operations, time required to flush caches is reduced significantly, owing to improved writebacks throughput to the DRAM controller. Benchmarks results vary and depend heavily on the last level of cache tag RAM content when cache is cleaned and invalidated, ranging from 2x throughput when all tag RAM entries contain dirty lines mapping to sequential pages of RAM to 1x (ie no improvement) when all tag RAM accesses trigger a DRAM precharge/activate cycle, as the current code implies on most DRAM controller configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
subs r9, r9, #1 @ decrement the index
bge loop2
ARM: 7919/1: mm: refactor v7 cache cleaning ops to use way/index sequence Set-associative caches on all v7 implementations map the index bits to physical addresses LSBs and tag bits to MSBs. As the last level of cache on current and upcoming ARM systems grows in size, this means that under normal DRAM controller configurations, the current v7 cache flush routine using set/way operations triggers a DRAM memory controller precharge/activate for every cache line writeback since the cache routine cleans lines by first fixing the index and then looping through ways (index bits are mapped to lower physical addresses on all v7 cache implementations; this means that, with last level cache sizes in the order of MBytes, lines belonging to the same set but different ways map to different DRAM pages). Given the random content of cache tags, swapping the order between indexes and ways loops do not prevent DRAM pages precharge and activate cycles but at least, on average, improves the chances that either multiple lines hit the same page or multiple lines belong to different DRAM banks, improving throughput significantly. This patch swaps the inner loops in the v7 cache flushing routine to carry out the clean operations first on all sets belonging to a given way (looping through sets) and then decrementing the way. Benchmarks showed that by swapping the ordering in which sets and ways are decremented in the v7 cache flushing routine, that uses set/way operations, time required to flush caches is reduced significantly, owing to improved writebacks throughput to the DRAM controller. Benchmarks results vary and depend heavily on the last level of cache tag RAM content when cache is cleaned and invalidated, ranging from 2x throughput when all tag RAM entries contain dirty lines mapping to sequential pages of RAM to 1x (ie no improvement) when all tag RAM accesses trigger a DRAM precharge/activate cycle, as the current code implies on most DRAM controller configurations. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-12-10 01:06:53 +08:00
subs r4, r4, #1 @ decrement the way
bge loop1
skip:
add r10, r10, #2 @ increment cache number
cmp r3, r10
bgt flush_levels
finished:
mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
dsb st
isb
mov pc, lr
ENDPROC(v7_flush_dcache_all)
/*
* v7_flush_cache_all()
*
* Flush the entire cache system.
* The data cache flush is now achieved using atomic clean / invalidates
* working outwards from L1 cache. This is done using Set/Way based cache
* maintenance instructions.
* The instruction cache can still be invalidated back to the point of
* unification in a single instruction.
*
*/
ENTRY(v7_flush_kern_cache_all)
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
ARM: mm: implement LoUIS API for cache maintenance ops ARM v7 architecture introduced the concept of cache levels and related control registers. New processors like A7 and A15 embed an L2 unified cache controller that becomes part of the cache level hierarchy. Some operations in the kernel like cpu_suspend and __cpu_disable do not require a flush of the entire cache hierarchy to DRAM but just the cache levels belonging to the Level of Unification Inner Shareable (LoUIS), which in most of ARM v7 systems correspond to L1. The current cache flushing API used in cpu_suspend and __cpu_disable, flush_cache_all(), ends up flushing the whole cache hierarchy since for v7 it cleans and invalidates all cache levels up to Level of Coherency (LoC) which cripples system performance when used in hot paths like hotplug and cpuidle. Therefore a new kernel cache maintenance API must be added to cope with latest ARM system requirements. This patch adds flush_cache_louis() to the ARM kernel cache maintenance API. This function cleans and invalidates all data cache levels up to the Level of Unification Inner Shareable (LoUIS) and invalidates the instruction cache for processors that support it (> v7). This patch also creates an alias of the cache LoUIS function to flush_kern_all for all processor versions prior to v7, so that the current cache flushing behaviour is unchanged for those processors. v7 cache maintenance code implements a cache LoUIS function that cleans and invalidates the D-cache up to LoUIS and invalidates the I-cache, according to the new API. Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Shawn Guo <shawn.guo@linaro.org>
2012-09-06 21:05:13 +08:00
/*
* v7_flush_kern_cache_louis(void)
*
* Flush the data cache up to Level of Unification Inner Shareable.
* Invalidate the I-cache to the point of unification.
*/
ENTRY(v7_flush_kern_cache_louis)
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_louis
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
ENDPROC(v7_flush_kern_cache_louis)
/*
* v7_flush_cache_all()
*
* Flush all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v7_flush_user_cache_all)
/*FALLTHROUGH*/
/*
* v7_flush_cache_range(start, end, flags)
*
* Flush a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - flags - vm_area_struct flags describing address space
*
* It is assumed that:
* - we have a VIPT cache.
*/
ENTRY(v7_flush_user_cache_range)
mov pc, lr
ENDPROC(v7_flush_user_cache_all)
ENDPROC(v7_flush_user_cache_range)
/*
* v7_coherent_kern_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
ENTRY(v7_coherent_kern_range)
/* FALLTHROUGH */
/*
* v7_coherent_user_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
ENTRY(v7_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
add r12, r12, r2
cmp r12, r1
blo 1b
dsb ishst
icache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
2:
USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
add r12, r12, r2
cmp r12, r1
blo 2b
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
dsb ishst
isb
mov pc, lr
/*
* Fault handling for the cache operation above. If the virtual address in r0
* isn't mapped, fail with -EFAULT.
*/
9001:
#ifdef CONFIG_ARM_ERRATA_775420
dsb
#endif
mov r0, #-EFAULT
mov pc, lr
UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
/*
* v7_flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - addr - kernel address
* - size - region size
*/
ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb st
mov pc, lr
ENDPROC(v7_flush_kern_dcache_area)
/*
* v7_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
v7_dma_inv_range:
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb st
mov pc, lr
ENDPROC(v7_dma_inv_range)
/*
* v7_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb st
mov pc, lr
ENDPROC(v7_dma_clean_range)
/*
* v7_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2
cmp r0, r1
blo 1b
dsb st
mov pc, lr
ENDPROC(v7_dma_flush_range)
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(v7_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v7_dma_inv_range
b v7_dma_clean_range
ENDPROC(v7_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(v7_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v7_dma_inv_range
mov pc, lr
ENDPROC(v7_dma_unmap_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7