sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*
The existing code relies on pagefault_disable() implicitly disabling preemption, so that no schedule will happen between kmap_atomic() and kunmap_atomic(). Let's make this explicit, to prepare for pagefault_disable() not touching preemption anymore. Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b3c395ef55
commit
2cb7c9cb42
|
@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
|
|||
void *kmap;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
|
||||
}
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|||
int idx, type;
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
|
|
@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
|
|||
unsigned long paddr;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
type = kmap_atomic_idx_push();
|
||||
paddr = page_to_phys(page);
|
||||
|
@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
|
|||
}
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
|
|
@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
|
|||
unsigned long vaddr;
|
||||
int type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|||
unsigned long vaddr;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
|
|
@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
|
|||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
|
|
@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
|
|
@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (page < highmem_start_page)
|
||||
return page_address(page);
|
||||
|
@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
|
|||
|
||||
if (vaddr < FIXADDR_START) { /* FIXME */
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
|
|||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
|
|||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
return page_address(page);
|
||||
}
|
||||
|
@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
|
|||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
|
||||
|
|
|
@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
|
|
@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
|
|||
unsigned long vaddr;
|
||||
long idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
|
|||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
|
|
@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
int idx, type;
|
||||
pte_t *pte;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
/* Avoid icache flushes by disallowing atomic executable mappings. */
|
||||
|
@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
if (!PageHighMem(page))
|
||||
|
@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
#endif
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
|
|||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
|
|||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iounmap_atomic);
|
||||
|
|
|
@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
|
|||
enum fixed_addresses idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
|
|||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
|
|||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
return page_address(page);
|
||||
}
|
||||
|
@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
|
|||
static inline void __kunmap_atomic(void *addr)
|
||||
{
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
|
||||
|
|
|
@ -141,6 +141,7 @@ static inline void __iomem *
|
|||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset)
|
||||
{
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
@ -149,6 +150,7 @@ static inline void
|
|||
io_mapping_unmap_atomic(void __iomem *vaddr)
|
||||
{
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Non-atomic map/unmap */
|
||||
|
|
Loading…
Reference in New Issue