sh: disable aliased page logic on NOMMU models
SH3/4 (with MMU) have a virtually indexed cache, requiring explicit work to avoid consistency problems arising from having the same physical address range cached in multiple cache lines. This is unneeded for the NOMMU case, and some of the resulting code paths (kmap_coherent) don't work. SH2 only avoided this problem by having a 4-way associative cache with way size equal to the page size (4k), yielding no cache index bits outside of the page offset and thus no aliases. Signed-off-by: Rich Felker <dalias@libc.org>
This commit is contained in:
parent
bbe6c77857
commit
57155c6523
|
@ -323,9 +323,13 @@ asmlinkage void cpu_init(void)
|
|||
cache_init();
|
||||
|
||||
if (raw_smp_processor_id() == 0) {
|
||||
#ifdef CONFIG_MMU
|
||||
shm_align_mask = max_t(unsigned long,
|
||||
current_cpu_data.dcache.way_size - 1,
|
||||
PAGE_SIZE - 1);
|
||||
#else
|
||||
shm_align_mask = PAGE_SIZE - 1;
|
||||
#endif
|
||||
|
||||
/* Boot CPU sets the cache shape */
|
||||
detect_cache_shape();
|
||||
|
|
|
@ -244,7 +244,11 @@ void flush_cache_sigtramp(unsigned long address)
|
|||
|
||||
static void compute_alias(struct cache_info *c)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
|
||||
#else
|
||||
c->alias_mask = 0;
|
||||
#endif
|
||||
c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue