[S390] mm: fix mmu_gather rework

Quite a few functions that get called from the tlb gather code require that
preemption must be disabled. So disable preemption inside of the called
functions instead.
The only drawback is that rcu_table_freelist_finish() doesn't get necessarily
called on the cpu(s) that filled the free lists. So we may see a delay, until
we finally see an rcu callback. However over time this shouldn't matter.

So we get rid of lots of "BUG: using smp_processor_id() in preemptible"
messages.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
This commit is contained in:
Heiko Carstens 2011-05-29 12:40:51 +02:00
parent a43a9d93d4
commit 3c5cffb66d
1 changed files with 16 additions and 7 deletions

View File

@ -71,12 +71,15 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
void rcu_table_freelist_finish(void) void rcu_table_freelist_finish(void)
{ {
struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist); struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
struct rcu_table_freelist *batch = *batchp;
if (!batch) if (!batch)
return; goto out;
call_rcu(&batch->rcu, rcu_table_freelist_callback); call_rcu(&batch->rcu, rcu_table_freelist_callback);
__get_cpu_var(rcu_table_freelist) = NULL; *batchp = NULL;
out:
put_cpu_var(rcu_table_freelist);
} }
static void smp_sync(void *arg) static void smp_sync(void *arg)
@ -141,20 +144,23 @@ void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{ {
struct rcu_table_freelist *batch; struct rcu_table_freelist *batch;
preempt_disable();
if (atomic_read(&mm->mm_users) < 2 && if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
crst_table_free(mm, table); crst_table_free(mm, table);
return; goto out;
} }
batch = rcu_table_freelist_get(mm); batch = rcu_table_freelist_get(mm);
if (!batch) { if (!batch) {
smp_call_function(smp_sync, NULL, 1); smp_call_function(smp_sync, NULL, 1);
crst_table_free(mm, table); crst_table_free(mm, table);
return; goto out;
} }
batch->table[--batch->crst_index] = table; batch->table[--batch->crst_index] = table;
if (batch->pgt_index >= batch->crst_index) if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish(); rcu_table_freelist_finish();
out:
preempt_enable();
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
@ -323,16 +329,17 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
struct page *page; struct page *page;
unsigned long bits; unsigned long bits;
preempt_disable();
if (atomic_read(&mm->mm_users) < 2 && if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
page_table_free(mm, table); page_table_free(mm, table);
return; goto out;
} }
batch = rcu_table_freelist_get(mm); batch = rcu_table_freelist_get(mm);
if (!batch) { if (!batch) {
smp_call_function(smp_sync, NULL, 1); smp_call_function(smp_sync, NULL, 1);
page_table_free(mm, table); page_table_free(mm, table);
return; goto out;
} }
bits = (mm->context.has_pgste) ? 3UL : 1UL; bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
@ -345,6 +352,8 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
batch->table[batch->pgt_index++] = table; batch->table[batch->pgt_index++] = table;
if (batch->pgt_index >= batch->crst_index) if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish(); rcu_table_freelist_finish();
out:
preempt_enable();
} }
/* /*