drm/mm: Convert drm_mm_node booleans to bitops
A straightforward conversion of assignment and checking of the boolean state flags (allocated, scanned) into non-atomic bitops. The caller remains responsible for all locking around the drm_mm and its nodes. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191003210100.22250-4-chris@chris-wilson.co.uk
This commit is contained in:
@@ -426,7 +426,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
|||||||
|
|
||||||
list_add(&node->node_list, &hole->node_list);
|
list_add(&node->node_list, &hole->node_list);
|
||||||
drm_mm_interval_tree_add_node(hole, node);
|
drm_mm_interval_tree_add_node(hole, node);
|
||||||
node->allocated = true;
|
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
|
||||||
node->hole_size = 0;
|
node->hole_size = 0;
|
||||||
|
|
||||||
rm_hole(hole);
|
rm_hole(hole);
|
||||||
@@ -545,7 +545,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
|
|||||||
|
|
||||||
list_add(&node->node_list, &hole->node_list);
|
list_add(&node->node_list, &hole->node_list);
|
||||||
drm_mm_interval_tree_add_node(hole, node);
|
drm_mm_interval_tree_add_node(hole, node);
|
||||||
node->allocated = true;
|
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
|
||||||
|
|
||||||
rm_hole(hole);
|
rm_hole(hole);
|
||||||
if (adj_start > hole_start)
|
if (adj_start > hole_start)
|
||||||
@@ -563,7 +563,7 @@ EXPORT_SYMBOL(drm_mm_insert_node_in_range);
|
|||||||
|
|
||||||
static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
|
static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
|
||||||
{
|
{
|
||||||
return node->scanned_block;
|
return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -589,7 +589,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
|
|||||||
|
|
||||||
drm_mm_interval_tree_remove(node, &mm->interval_tree);
|
drm_mm_interval_tree_remove(node, &mm->interval_tree);
|
||||||
list_del(&node->node_list);
|
list_del(&node->node_list);
|
||||||
node->allocated = false;
|
__clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
|
||||||
|
|
||||||
if (drm_mm_hole_follows(prev_node))
|
if (drm_mm_hole_follows(prev_node))
|
||||||
rm_hole(prev_node);
|
rm_hole(prev_node);
|
||||||
@@ -627,8 +627,8 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
|
|||||||
&mm->holes_addr);
|
&mm->holes_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
old->allocated = false;
|
__clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
|
||||||
new->allocated = true;
|
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_replace_node);
|
EXPORT_SYMBOL(drm_mm_replace_node);
|
||||||
|
|
||||||
@@ -738,7 +738,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
|
|||||||
DRM_MM_BUG_ON(node->mm != mm);
|
DRM_MM_BUG_ON(node->mm != mm);
|
||||||
DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
|
DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
|
||||||
DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
|
DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
|
||||||
node->scanned_block = true;
|
__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
|
||||||
mm->scan_active++;
|
mm->scan_active++;
|
||||||
|
|
||||||
/* Remove this block from the node_list so that we enlarge the hole
|
/* Remove this block from the node_list so that we enlarge the hole
|
||||||
@@ -824,7 +824,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
|
|||||||
|
|
||||||
DRM_MM_BUG_ON(node->mm != scan->mm);
|
DRM_MM_BUG_ON(node->mm != scan->mm);
|
||||||
DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
|
DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
|
||||||
node->scanned_block = false;
|
__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
|
||||||
|
|
||||||
DRM_MM_BUG_ON(!node->mm->scan_active);
|
DRM_MM_BUG_ON(!node->mm->scan_active);
|
||||||
node->mm->scan_active--;
|
node->mm->scan_active--;
|
||||||
@@ -922,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
|
|||||||
|
|
||||||
/* Clever trick to avoid a special case in the free hole tracking. */
|
/* Clever trick to avoid a special case in the free hole tracking. */
|
||||||
INIT_LIST_HEAD(&mm->head_node.node_list);
|
INIT_LIST_HEAD(&mm->head_node.node_list);
|
||||||
mm->head_node.allocated = false;
|
mm->head_node.flags = 0;
|
||||||
mm->head_node.mm = mm;
|
mm->head_node.mm = mm;
|
||||||
mm->head_node.start = start + size;
|
mm->head_node.start = start + size;
|
||||||
mm->head_node.size = -size;
|
mm->head_node.size = -size;
|
||||||
|
@@ -902,7 +902,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
|
|||||||
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
|
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
|
||||||
cache->has_fence = cache->gen < 4;
|
cache->has_fence = cache->gen < 4;
|
||||||
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
|
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
|
||||||
cache->node.allocated = false;
|
cache->node.flags = 0;
|
||||||
cache->rq = NULL;
|
cache->rq = NULL;
|
||||||
cache->rq_size = 0;
|
cache->rq_size = 0;
|
||||||
}
|
}
|
||||||
|
@@ -351,7 +351,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
|||||||
PIN_NOEVICT);
|
PIN_NOEVICT);
|
||||||
if (!IS_ERR(vma)) {
|
if (!IS_ERR(vma)) {
|
||||||
node.start = i915_ggtt_offset(vma);
|
node.start = i915_ggtt_offset(vma);
|
||||||
node.allocated = false;
|
node.flags = 0;
|
||||||
} else {
|
} else {
|
||||||
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -561,7 +561,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||||||
PIN_NOEVICT);
|
PIN_NOEVICT);
|
||||||
if (!IS_ERR(vma)) {
|
if (!IS_ERR(vma)) {
|
||||||
node.start = i915_ggtt_offset(vma);
|
node.start = i915_ggtt_offset(vma);
|
||||||
node.allocated = false;
|
node.flags = 0;
|
||||||
} else {
|
} else {
|
||||||
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@@ -168,8 +168,9 @@ struct drm_mm_node {
|
|||||||
struct rb_node rb_hole_addr;
|
struct rb_node rb_hole_addr;
|
||||||
u64 __subtree_last;
|
u64 __subtree_last;
|
||||||
u64 hole_size;
|
u64 hole_size;
|
||||||
bool allocated : 1;
|
unsigned long flags;
|
||||||
bool scanned_block : 1;
|
#define DRM_MM_NODE_ALLOCATED_BIT 0
|
||||||
|
#define DRM_MM_NODE_SCANNED_BIT 1
|
||||||
#ifdef CONFIG_DRM_DEBUG_MM
|
#ifdef CONFIG_DRM_DEBUG_MM
|
||||||
depot_stack_handle_t stack;
|
depot_stack_handle_t stack;
|
||||||
#endif
|
#endif
|
||||||
@@ -253,7 +254,7 @@ struct drm_mm_scan {
|
|||||||
*/
|
*/
|
||||||
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
|
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
|
||||||
{
|
{
|
||||||
return node->allocated;
|
return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Reference in New Issue
Block a user