drm/mm: Use clear_bit_unlock() for releasing the drm_mm_node()

A few callers need to serialise the destruction of their drm_mm_node and
ensure it is removed from the drm_mm before freeing. However, to be
completely sure that any access from another thread is complete before
we free the struct, we require the RELEASE semantics of
clear_bit_unlock().

This allows the conditional locking such as

Thread A			Thread B
  mutex_lock(mm_lock);		  if (drm_mm_node_allocated(node)) {
  drm_mm_node_remove(node);	    mutex_lock(mm_lock);
  mutex_unlock(mm_lock);	    if (drm_mm_node_allocated(node))
				      drm_mm_node_remove(node);
				    mutex_unlock(mm_lock);
				  }
				  kfree(node);

to serialise correctly without any lingering accesses from A to the
freed node. Allocation / insertion of the node is assumed never to race
with removal or eviction scanning.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003210100.22250-5-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-10-03 22:01:00 +01:00
parent 4ee92c7149
commit 3dda22d3dc
1 changed files with 6 additions and 5 deletions

View File

@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm; node->mm = mm;
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list); list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node); drm_mm_interval_tree_add_node(hole, node);
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
node->hole_size = 0; node->hole_size = 0;
rm_hole(hole); rm_hole(hole);
@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color; node->color = color;
node->hole_size = 0; node->hole_size = 0;
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list); list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node); drm_mm_interval_tree_add_node(hole, node);
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
rm_hole(hole); rm_hole(hole);
if (adj_start > hole_start) if (adj_start > hole_start)
@ -589,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree); drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list); list_del(&node->node_list);
__clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
if (drm_mm_hole_follows(prev_node)) if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node); rm_hole(prev_node);
add_hole(prev_node); add_hole(prev_node);
clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
} }
EXPORT_SYMBOL(drm_mm_remove_node); EXPORT_SYMBOL(drm_mm_remove_node);
@ -614,6 +615,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
*new = *old; *new = *old;
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list); list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
@ -627,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr); &mm->holes_addr);
} }
__clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &old->flags); clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
} }
EXPORT_SYMBOL(drm_mm_replace_node); EXPORT_SYMBOL(drm_mm_replace_node);