parisc: add spinlock to patch function

If multiple CPUs are patching code we need the spinlock
to protect against parallel fixmap maps/unmap calls.

Signed-off-by: Sven Schnelle <svens@stackframe.org>
Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Sven Schnelle 2019-06-05 22:32:18 +02:00 committed by Helge Deller
parent 4e87ace902
commit 7e923369b1
1 changed files with 19 additions and 6 deletions

View File

@ -23,7 +23,9 @@ struct patch {
static DEFINE_RAW_SPINLOCK(patch_lock); static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, int *need_unmap) static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
int *need_unmap)
{
unsigned long uintaddr = (uintptr_t) addr; unsigned long uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr); bool module = !core_kernel_text(uintaddr);
struct page *page; struct page *page;
@ -38,19 +40,29 @@ static void __kprobes *patch_map(void *addr, int fixmap, int *need_unmap)
*need_unmap = 1; *need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page)); set_fixmap(fixmap, page_to_phys(page));
if (flags)
raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
} }
static void __kprobes patch_unmap(int fixmap) static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{ {
clear_fixmap(fixmap); clear_fixmap(fixmap);
if (flags)
raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
} }
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
{ {
unsigned long start = (unsigned long)addr; unsigned long start = (unsigned long)addr;
unsigned long end = (unsigned long)addr + len; unsigned long end = (unsigned long)addr + len;
unsigned long flags;
u32 *p, *fixmap; u32 *p, *fixmap;
int mapped; int mapped;
@ -58,7 +70,7 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
flush_kernel_vmap_range(addr, len); flush_kernel_vmap_range(addr, len);
flush_icache_range(start, end); flush_icache_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &mapped); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
while (len >= 4) { while (len >= 4) {
*p++ = *insn++; *p++ = *insn++;
@ -72,14 +84,15 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
flush_kernel_vmap_range((void *)fixmap, flush_kernel_vmap_range((void *)fixmap,
(p-fixmap) * sizeof(*p)); (p-fixmap) * sizeof(*p));
if (mapped) if (mapped)
patch_unmap(FIX_TEXT_POKE0); patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &mapped); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
&mapped);
} }
} }
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
if (mapped) if (mapped)
patch_unmap(FIX_TEXT_POKE0); patch_unmap(FIX_TEXT_POKE0, &flags);
flush_icache_range(start, end); flush_icache_range(start, end);
} }