audit: Simplify locking around untag_chunk()

untag_chunk() has to be called with hash_lock, it drops it and
reacquires it when returning. The unlocking of hash_lock is thus hidden
from the callers of untag_chunk() with is rather error prone. Reorganize
the code so that untag_chunk() is called without hash_lock, only with
mark reference preventing the chunk from going away.

Since this requires some more code in the caller of untag_chunk() to
assure forward progress, factor out loop pruning tree from all chunks
into a common helper function.

Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Richard Guy Briggs <rgb@redhat.com>
Signed-off-by: Paul Moore <paul@paul-moore.com>
This commit is contained in:
Jan Kara 2018-11-12 09:54:56 -05:00 committed by Paul Moore
parent c22fcde775
commit 8432c70062
1 changed files with 40 additions and 37 deletions

View File

@ -332,28 +332,18 @@ static int chunk_count_trees(struct audit_chunk *chunk)
return ret; return ret;
} }
static void untag_chunk(struct node *p) static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *entry)
{ {
struct audit_chunk *chunk = find_chunk(p); struct audit_chunk *new;
struct fsnotify_mark *entry = chunk->mark;
struct audit_chunk *new = NULL;
int size; int size;
remove_chunk_node(chunk, p); mutex_lock(&audit_tree_group->mark_mutex);
fsnotify_get_mark(entry);
spin_unlock(&hash_lock);
mutex_lock(&entry->group->mark_mutex);
/* /*
* mark_mutex protects mark from getting detached and thus also from * mark_mutex protects mark from getting detached and thus also from
* mark->connector->obj getting NULL. * mark->connector->obj getting NULL.
*/ */
if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED))
mutex_unlock(&entry->group->mark_mutex); goto out_mutex;
if (new)
fsnotify_put_mark(new->mark);
goto out;
}
size = chunk_count_trees(chunk); size = chunk_count_trees(chunk);
if (!size) { if (!size) {
@ -363,9 +353,9 @@ static void untag_chunk(struct node *p)
list_del_rcu(&chunk->hash); list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
fsnotify_detach_mark(entry); fsnotify_detach_mark(entry);
mutex_unlock(&entry->group->mark_mutex); mutex_unlock(&audit_tree_group->mark_mutex);
fsnotify_free_mark(entry); fsnotify_free_mark(entry);
goto out; return;
} }
new = alloc_chunk(size); new = alloc_chunk(size);
@ -387,16 +377,13 @@ static void untag_chunk(struct node *p)
replace_chunk(new, chunk); replace_chunk(new, chunk);
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
fsnotify_detach_mark(entry); fsnotify_detach_mark(entry);
mutex_unlock(&entry->group->mark_mutex); mutex_unlock(&audit_tree_group->mark_mutex);
fsnotify_free_mark(entry); fsnotify_free_mark(entry);
fsnotify_put_mark(new->mark); /* drop initial reference */ fsnotify_put_mark(new->mark); /* drop initial reference */
goto out; return;
out_mutex: out_mutex:
mutex_unlock(&entry->group->mark_mutex); mutex_unlock(&audit_tree_group->mark_mutex);
out:
fsnotify_put_mark(entry);
spin_lock(&hash_lock);
} }
/* Call with group->mark_mutex held, releases it */ /* Call with group->mark_mutex held, releases it */
@ -579,22 +566,45 @@ static void kill_rules(struct audit_tree *tree)
} }
/* /*
* finish killing struct audit_tree * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
* chunks. The function expects tagged chunks are all at the beginning of the
* chunks list.
*/ */
static void prune_one(struct audit_tree *victim) static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
{ {
spin_lock(&hash_lock); spin_lock(&hash_lock);
while (!list_empty(&victim->chunks)) { while (!list_empty(&victim->chunks)) {
struct node *p; struct node *p;
struct audit_chunk *chunk;
struct fsnotify_mark *mark;
p = list_entry(victim->chunks.next, struct node, list); p = list_first_entry(&victim->chunks, struct node, list);
/* have we run out of marked? */
if (tagged && !(p->index & (1U<<31)))
break;
chunk = find_chunk(p);
mark = chunk->mark;
remove_chunk_node(chunk, p);
fsnotify_get_mark(mark);
spin_unlock(&hash_lock);
untag_chunk(p); untag_chunk(chunk, mark);
fsnotify_put_mark(mark);
spin_lock(&hash_lock);
} }
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
put_tree(victim); put_tree(victim);
} }
/*
* finish killing struct audit_tree
*/
static void prune_one(struct audit_tree *victim)
{
prune_tree_chunks(victim, false);
}
/* trim the uncommitted chunks from tree */ /* trim the uncommitted chunks from tree */
static void trim_marked(struct audit_tree *tree) static void trim_marked(struct audit_tree *tree)
@ -614,18 +624,11 @@ static void trim_marked(struct audit_tree *tree)
list_add(p, &tree->chunks); list_add(p, &tree->chunks);
} }
} }
spin_unlock(&hash_lock);
while (!list_empty(&tree->chunks)) { prune_tree_chunks(tree, true);
struct node *node;
node = list_entry(tree->chunks.next, struct node, list); spin_lock(&hash_lock);
/* have we run out of marked? */
if (!(node->index & (1U<<31)))
break;
untag_chunk(node);
}
if (!tree->root && !tree->goner) { if (!tree->root && !tree->goner) {
tree->goner = 1; tree->goner = 1;
spin_unlock(&hash_lock); spin_unlock(&hash_lock);