arm64 fixes:

- Fix EFI stub cache maintenance causing aborts during boot on certain
   platforms
 - Handle byte stores in __clear_user without panicking
 - Fix race condition in aarch64_insn_patch_text_sync() (instruction
   patching)
 - Couple of type fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJUZkvBAAoJEGvWsS0AyF7xfSMP/1GRyhDmyLCzVosgaN+aArow
 zP/UQWTLhr+zE0efF8acBEB57kNAO9abUvyi5XA70ZJcSBFYXCxTp73mZbw7ZVhc
 jhZxTHX9OFsfWvnDDlAXg+pV9W1RJ9CbvhumWeORR7M03g8B16HD3N7jfgxpbb83
 TjyhEtsWuxEVOxzNFetbV+UWwejWmgpnvMrw+WLwBj0Gchj/uEYnjSDN8HCsZpOb
 9VWuJcWZEmjlsJu8IhSbbyklRrV1fRSH+Bc5TcXFQ62kHrqJuGQ6FyM9tN31DRZx
 xtEH0FmWtkCSQ9SMYk1U8smta8noHEtlrlXCO2t1IDb+SmjepZ3rtQkKhibgJlOG
 cstQ/oCoh9O8mMyA4mFZMaA85naIueB2gTy46UnftUh3IPOEqw9upMU0iBfvlwNf
 umqa+RkMKoJLz7ZrieV9NHRK/7/VA6q6IVPUgyQqmSQZRQoZLZEgI084/4cw53Ir
 sRzylCdp6qXe0m0YIovWcckxEBO/+gm+i1cDSMaNeUzN7TFvusk5FeFFHSgms5Oq
 kQZhBFQFu9xikQSwoNulKdvDVb4mX12Zs45RMiRmH0pGriy/EjLRdyYlc/UKXEix
 VbS4SHLnt+8DyVVrpeLhbd+TIazKjIOnh53JTiYgyDLm2TQNJv0bn1w8ctR0zt4G
 /wjukJe9Y8ywPhCPk797
 =XfeB
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - fix EFI stub cache maintenance causing aborts during boot on certain
   platforms

 - handle byte stores in __clear_user without panicking

 - fix race condition in aarch64_insn_patch_text_sync() (instruction
   patching)

 - Couple of type fixes

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: ARCH_PFN_OFFSET should be unsigned long
  Correct the race condition in aarch64_insn_patch_text_sync()
  arm64: __clear_user: handle exceptions on strb
  arm64: Fix data type for physical address
  arm64: efi: Fix stub cache maintenance
This commit is contained in:
Linus Torvalds 2014-11-14 14:24:33 -08:00
commit 0861fd1c25
5 changed files with 27 additions and 11 deletions

View File

@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)

View File

@ -54,18 +54,17 @@ ENTRY(efi_stub_entry)
b.eq efi_load_fail
/*
* efi_entry() will have relocated the kernel image if necessary
* and we return here with device tree address in x0 and the kernel
* entry point stored at *image_addr. Save those values in registers
* which are callee preserved.
* efi_entry() will have copied the kernel image if necessary and we
* return here with device tree address in x0 and the kernel entry
* point stored at *image_addr. Save those values in registers which
* are callee preserved.
*/
mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address
mov x21, x0
/*
* Flush dcache covering current runtime addresses
* of kernel text/data. Then flush all of icache.
* Calculate size of the kernel Image (same for original and copy).
*/
adrp x1, _text
add x1, x1, #:lo12:_text
@ -73,9 +72,24 @@ ENTRY(efi_stub_entry)
add x2, x2, #:lo12:_edata
sub x1, x2, x1
/*
* Flush the copied Image to the PoC, and ensure it is not shadowed by
* stale icache entries from before relocation.
*/
bl __flush_dcache_area
ic ialluis
/*
* Ensure that the rest of this function (in the original Image) is
* visible when the caches are disabled. The I-cache can't have stale
* entries for the VA range of the current image, so no maintenance is
* necessary.
*/
adr x0, efi_stub_entry
adr x1, efi_stub_entry_end
sub x1, x1, x0
bl __flush_dcache_area
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@ -105,4 +119,5 @@ efi_load_fail:
ldp x29, x30, [sp], #32
ret
efi_stub_entry_end:
ENDPROC(efi_stub_entry)

View File

@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
* which ends with "dsb; isb" pair guaranteeing global
* visibility.
*/
atomic_set(&pp->cpu_count, -1);
/* Notify other processors with an additional increment. */
atomic_inc(&pp->cpu_count);
} else {
while (atomic_read(&pp->cpu_count) != -1)
while (atomic_read(&pp->cpu_count) <= num_online_cpus())
cpu_relax();
isb();
}

View File

@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
strb wzr, [x0]
USER(9f, strb wzr, [x0] )
5: mov x0, #0
ret
ENDPROC(__clear_user)

View File

@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys,
unsigned long end, phys_addr_t phys,
int map_io)
{
pud_t *pud;