2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2007-10-16 16:27:00 +08:00
|
|
|
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
2005-04-17 06:20:36 +08:00
|
|
|
* Licensed under the GPL
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __UM_MMU_CONTEXT_H
|
|
|
|
#define __UM_MMU_CONTEXT_H
|
|
|
|
|
2011-08-19 03:09:09 +08:00
|
|
|
#include <linux/sched.h>
|
2017-02-04 07:16:44 +08:00
|
|
|
#include <linux/mm_types.h>
|
|
|
|
|
2011-08-19 03:09:09 +08:00
|
|
|
#include <asm/mmu.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-02-23 14:37:19 +08:00
|
|
|
extern void uml_setup_stubs(struct mm_struct *mm);
|
2014-11-19 02:23:50 +08:00
|
|
|
/*
|
|
|
|
* Needed since we do not use the asm-generic/mm_hooks.h:
|
|
|
|
*/
|
2017-12-14 19:27:29 +08:00
|
|
|
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
2014-11-19 02:23:50 +08:00
|
|
|
{
|
|
|
|
uml_setup_stubs(mm);
|
2017-12-14 19:27:29 +08:00
|
|
|
return 0;
|
2014-11-19 02:23:50 +08:00
|
|
|
}
|
uml: cover stubs with a VMA
Give the stubs a VMA. This allows the removal of a truly nasty kludge to make
sure that mm->nr_ptes was correct in exit_mmap. The underlying problem was
always that the stubs, which have ptes, and thus allocated a page table,
weren't covered by a VMA.
This patch fixes that by using install_special_mapping in arch_dup_mmap and
activate_context to create the VMA. The stubs have to be moved, since
shift_arg_pages seems to assume that the stack is the only VMA present at that
point during exec, and uses vma_adjust to fiddle its VMA. However, that
extends the stub VMA by the amount removed from the stack VMA.
To avoid this problem, the stubs were moved to a different fixed location at
the start of the address space.
The init_stub_pte calls were moved from init_new_context to arch_dup_mmap
because I was occasionally seeing arch_dup_mmap not being called, causing
exit_mmap to die. Rather than figure out what was really happening, I decided
it was cleaner to just move the calls so that there's no doubt that both the
pte and VMA creation happen, no matter what. arch_exit_mmap is used to clear
the stub ptes at exit time.
The STUB_* constants in as-layout.h no longer depend on UM_TASK_SIZE, that
that definition is removed, along with the comments complaining about gcc.
Because the stubs are no longer at the top of the address space, some care is
needed while flushing TLBs. update_pte_range checks for addresses in the stub
range and skips them. flush_thread now issues two unmaps, one for the range
before STUB_START and one for the range after STUB_END.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 14:31:01 +08:00
|
|
|
extern void arch_exit_mmap(struct mm_struct *mm);
|
2014-11-19 02:23:50 +08:00
|
|
|
static inline void arch_unmap(struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline void arch_bprm_mm_init(struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
}
|
2016-02-19 02:35:57 +08:00
|
|
|
|
|
|
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
|
|
|
bool write, bool execute, bool foreign)
|
|
|
|
{
|
|
|
|
/* by default, allow everything */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-11-19 02:23:50 +08:00
|
|
|
/*
|
|
|
|
* end asm-generic/mm_hooks.h functions
|
|
|
|
*/
|
uml: cover stubs with a VMA
Give the stubs a VMA. This allows the removal of a truly nasty kludge to make
sure that mm->nr_ptes was correct in exit_mmap. The underlying problem was
always that the stubs, which have ptes, and thus allocated a page table,
weren't covered by a VMA.
This patch fixes that by using install_special_mapping in arch_dup_mmap and
activate_context to create the VMA. The stubs have to be moved, since
shift_arg_pages seems to assume that the stack is the only VMA present at that
point during exec, and uses vma_adjust to fiddle its VMA. However, that
extends the stub VMA by the amount removed from the stack VMA.
To avoid this problem, the stubs were moved to a different fixed location at
the start of the address space.
The init_stub_pte calls were moved from init_new_context to arch_dup_mmap
because I was occasionally seeing arch_dup_mmap not being called, causing
exit_mmap to die. Rather than figure out what was really happening, I decided
it was cleaner to just move the calls so that there's no doubt that both the
pte and VMA creation happen, no matter what. arch_exit_mmap is used to clear
the stub ptes at exit time.
The STUB_* constants in as-layout.h no longer depend on UM_TASK_SIZE, that
that definition is removed, along with the comments complaining about gcc.
Because the stubs are no longer at the top of the address space, some care is
needed while flushing TLBs. update_pte_range checks for addresses in the stub
range and skips them. flush_thread now issues two unmaps, one for the range
before STUB_START and one for the range after STUB_END.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 14:31:01 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
|
2005-07-13 04:58:22 +08:00
|
|
|
extern void force_flush_all(void);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
|
|
|
|
{
|
[PATCH] uml: fixes performance regression in activate_mm and thus exec()
Normally, activate_mm() is called from exec(), and thus it used to be a
no-op because we use a completely new "MM context" on the host (for
instance, a new process), and so we didn't need to flush any "TLB entries"
(which for us are the set of memory mappings for the host process from the
virtual "RAM" file).
Kernel threads, instead, are usually handled in a different way. So, when
for AIO we call use_mm(), things used to break and so Benjamin implemented
activate_mm(). However, that is only needed for AIO, and could slow down
exec() inside UML, so be smart: detect being called for AIO (via
PF_BORROWED_MM) and do the full flush only in that situation.
Comment also the caller so that people won't go breaking UML without
noticing. I also rely on the caller's locks for testing current->flags.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
CC: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-04 06:57:25 +08:00
|
|
|
/*
|
2008-06-07 02:31:39 +08:00
|
|
|
* This is called by fs/exec.c and sys_unshare()
|
|
|
|
* when the new ->mm is used for the first time.
|
[PATCH] uml: fixes performance regression in activate_mm and thus exec()
Normally, activate_mm() is called from exec(), and thus it used to be a
no-op because we use a completely new "MM context" on the host (for
instance, a new process), and so we didn't need to flush any "TLB entries"
(which for us are the set of memory mappings for the host process from the
virtual "RAM" file).
Kernel threads, instead, are usually handled in a different way. So, when
for AIO we call use_mm(), things used to break and so Benjamin implemented
activate_mm(). However, that is only needed for AIO, and could slow down
exec() inside UML, so be smart: detect being called for AIO (via
PF_BORROWED_MM) and do the full flush only in that situation.
Comment also the caller so that people won't go breaking UML without
noticing. I also rely on the caller's locks for testing current->flags.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
CC: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-04 06:57:25 +08:00
|
|
|
*/
|
2008-06-07 02:31:39 +08:00
|
|
|
__switch_mm(&new->context.id);
|
2012-02-23 14:37:19 +08:00
|
|
|
down_write(&new->mmap_sem);
|
|
|
|
uml_setup_stubs(new);
|
|
|
|
up_write(&new->mmap_sem);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
|
struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
unsigned cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if(prev != next){
|
2009-09-24 23:34:51 +08:00
|
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
2005-04-17 06:20:36 +08:00
|
|
|
if(next != &init_mm)
|
2007-10-16 16:27:06 +08:00
|
|
|
__switch_mm(&next->context.id);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|
|
|
struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-10-16 16:26:58 +08:00
|
|
|
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-16 16:26:58 +08:00
|
|
|
extern void destroy_context(struct mm_struct *mm);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#endif
|