mirror of https://gitee.com/openkylin/linux.git
powerpc/32s: Don't hash_preload() kernel text
We now always map kernel text with BATs. Neither need to preload
hash with kernel text addresses nor ensure they are never evicted.
This is more or less a revert of commit ee4f2ea486
("[POWERPC] Fix
32-bit mm operations when not using BATs")
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/0a0bab7fadd89aa829e33420fbc10d60c59040a7.1606285014.git.christophe.leroy@csgroup.eu
This commit is contained in:
parent
035b19a15a
commit
79d1befe05
|
@ -411,30 +411,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
|||
* and we know there is a definite (although small) speed
|
||||
* advantage to putting the PTE in the primary PTEG, we always
|
||||
* put the PTE in the primary PTEG.
|
||||
*
|
||||
* In addition, we skip any slot that is mapping kernel text in
|
||||
* order to avoid a deadlock when not using BAT mappings if
|
||||
* trying to hash in the kernel hash code itself after it has
|
||||
* already taken the hash table lock. This works in conjunction
|
||||
* with pre-faulting of the kernel text.
|
||||
*
|
||||
* If the hash table bucket is full of kernel text entries, we'll
|
||||
* lockup here but that shouldn't happen
|
||||
*/
|
||||
|
||||
1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
|
||||
lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
|
||||
lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
|
||||
addi r6,r6,HPTE_SIZE /* search for candidate */
|
||||
andi. r6,r6,7*HPTE_SIZE
|
||||
stw r6,next_slot@l(r4)
|
||||
add r4,r3,r6
|
||||
LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
|
||||
clrrwi r0,r0,12
|
||||
lis r6,etext@h
|
||||
ori r6,r6,etext@l /* get etext */
|
||||
tophys(r6,r6)
|
||||
cmpl cr0,r0,r6 /* compare and try again */
|
||||
blt 1b
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
/* Store PTE in PTEG */
|
||||
|
|
|
@ -302,7 +302,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
|
|||
/*
|
||||
* Preload a translation in the hash table
|
||||
*/
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea)
|
||||
static void hash_preload(struct mm_struct *mm, unsigned long ea)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
||||
|
|
|
@ -91,8 +91,6 @@ void print_system_hash_info(void);
|
|||
|
||||
#ifdef CONFIG_PPC32
|
||||
|
||||
void hash_preload(struct mm_struct *mm, unsigned long ea);
|
||||
|
||||
extern void mapin_ram(void);
|
||||
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
|
||||
unsigned int size, pgprot_t prot);
|
||||
|
|
|
@ -112,10 +112,6 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
|
|||
ktext = ((char *)v >= _stext && (char *)v < etext) ||
|
||||
((char *)v >= _sinittext && (char *)v < _einittext);
|
||||
map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
if (ktext)
|
||||
hash_preload(&init_mm, v);
|
||||
#endif
|
||||
v += PAGE_SIZE;
|
||||
p += PAGE_SIZE;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue