dma-buf: add more reservation object locking wrappers
Complete the abstraction of the ww_mutex inside the reservation object. This allows us to add more handling and debugging to the reservation object in the future. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/320761/
This commit is contained in:
@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
|||||||
*map = mapping;
|
*map = mapping;
|
||||||
|
|
||||||
/* Double check that the BO is reserved by this CS */
|
/* Double check that the BO is reserved by this CS */
|
||||||
if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
|
if (reservation_object_locking_ctx((*bo)->tbo.resv) != &parser->ticket)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
||||||
|
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
bp.flags = 0;
|
bp.flags = 0;
|
||||||
bp.type = ttm_bo_type_sg;
|
bp.type = ttm_bo_type_sg;
|
||||||
bp.resv = resv;
|
bp.resv = resv;
|
||||||
ww_mutex_lock(&resv->lock, NULL);
|
reservation_object_lock(resv, NULL);
|
||||||
ret = amdgpu_bo_create(adev, &bp, &bo);
|
ret = amdgpu_bo_create(adev, &bp, &bo);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
|
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
|
||||||
bo->prime_shared_count = 1;
|
bo->prime_shared_count = 1;
|
||||||
|
|
||||||
ww_mutex_unlock(&resv->lock);
|
reservation_object_unlock(resv);
|
||||||
return &bo->gem_base;
|
return &bo->gem_base;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
ww_mutex_unlock(&resv->lock);
|
reservation_object_unlock(resv);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -546,7 +546,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
fail_unreserve:
|
fail_unreserve:
|
||||||
if (!bp->resv)
|
if (!bp->resv)
|
||||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
reservation_object_unlock(bo->tbo.resv);
|
||||||
amdgpu_bo_unref(&bo);
|
amdgpu_bo_unref(&bo);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -1089,7 +1089,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
|||||||
*/
|
*/
|
||||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&bo->tbo.resv->lock.base);
|
reservation_object_assert_held(bo->tbo.resv);
|
||||||
|
|
||||||
if (tiling_flags)
|
if (tiling_flags)
|
||||||
*tiling_flags = bo->tiling_flags;
|
*tiling_flags = bo->tiling_flags;
|
||||||
@@ -1330,7 +1330,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
|||||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.resv) &&
|
||||||
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
|
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||||
|
@@ -2416,7 +2416,8 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
|
|||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
bo = mapping->bo_va->base.bo;
|
bo = mapping->bo_va->base.bo;
|
||||||
if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
|
if (reservation_object_locking_ctx(bo->tbo.resv) !=
|
||||||
|
ticket)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1288,7 +1288,7 @@ retry:
|
|||||||
if (contended != -1) {
|
if (contended != -1) {
|
||||||
struct drm_gem_object *obj = objs[contended];
|
struct drm_gem_object *obj = objs[contended];
|
||||||
|
|
||||||
ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
|
ret = reservation_object_lock_slow_interruptible(obj->resv,
|
||||||
acquire_ctx);
|
acquire_ctx);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ww_acquire_done(acquire_ctx);
|
ww_acquire_done(acquire_ctx);
|
||||||
@@ -1300,16 +1300,16 @@ retry:
|
|||||||
if (i == contended)
|
if (i == contended)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
|
ret = reservation_object_lock_interruptible(objs[i]->resv,
|
||||||
acquire_ctx);
|
acquire_ctx);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
ww_mutex_unlock(&objs[j]->resv->lock);
|
reservation_object_unlock(objs[j]->resv);
|
||||||
|
|
||||||
if (contended != -1 && contended >= i)
|
if (contended != -1 && contended >= i)
|
||||||
ww_mutex_unlock(&objs[contended]->resv->lock);
|
reservation_object_unlock(objs[contended]->resv);
|
||||||
|
|
||||||
if (ret == -EDEADLK) {
|
if (ret == -EDEADLK) {
|
||||||
contended = i;
|
contended = i;
|
||||||
@@ -1334,7 +1334,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
ww_mutex_unlock(&objs[i]->resv->lock);
|
reservation_object_unlock(objs[i]->resv);
|
||||||
|
|
||||||
ww_acquire_fini(acquire_ctx);
|
ww_acquire_fini(acquire_ctx);
|
||||||
}
|
}
|
||||||
|
@@ -68,10 +68,10 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
|
|
||||||
flags = TTM_PL_FLAG_TT;
|
flags = TTM_PL_FLAG_TT;
|
||||||
|
|
||||||
ww_mutex_lock(&robj->lock, NULL);
|
reservation_object_lock(robj, NULL);
|
||||||
ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
|
ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
|
||||||
sg, robj, &nvbo);
|
sg, robj, &nvbo);
|
||||||
ww_mutex_unlock(&robj->lock);
|
reservation_object_unlock(robj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
@@ -611,7 +611,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
|
|||||||
int steal;
|
int steal;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
lockdep_assert_held(&bo->tbo.resv->lock.base);
|
reservation_object_assert_held(bo->tbo.resv);
|
||||||
|
|
||||||
if (!bo->tiling_flags)
|
if (!bo->tiling_flags)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -737,7 +737,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
|
|||||||
uint32_t *tiling_flags,
|
uint32_t *tiling_flags,
|
||||||
uint32_t *pitch)
|
uint32_t *pitch)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&bo->tbo.resv->lock.base);
|
reservation_object_assert_held(bo->tbo.resv);
|
||||||
|
|
||||||
if (tiling_flags)
|
if (tiling_flags)
|
||||||
*tiling_flags = bo->tiling_flags;
|
*tiling_flags = bo->tiling_flags;
|
||||||
@@ -749,7 +749,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
|
|||||||
bool force_drop)
|
bool force_drop)
|
||||||
{
|
{
|
||||||
if (!force_drop)
|
if (!force_drop)
|
||||||
lockdep_assert_held(&bo->tbo.resv->lock.base);
|
reservation_object_assert_held(bo->tbo.resv);
|
||||||
|
|
||||||
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
|
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -68,10 +68,10 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
struct radeon_bo *bo;
|
struct radeon_bo *bo;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ww_mutex_lock(&resv->lock, NULL);
|
reservation_object_lock(resv, NULL);
|
||||||
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
|
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||||
RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
|
RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
|
||||||
ww_mutex_unlock(&resv->lock);
|
reservation_object_unlock(resv);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
@@ -850,8 +850,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|||||||
|
|
||||||
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
|
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
|
||||||
&busy)) {
|
&busy)) {
|
||||||
if (busy && !busy_bo &&
|
if (busy && !busy_bo && ticket !=
|
||||||
bo->resv->lock.ctx != ticket)
|
reservation_object_locking_ctx(bo->resv))
|
||||||
busy_bo = bo;
|
busy_bo = bo;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -957,8 +957,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
|||||||
{
|
{
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||||
|
struct ww_acquire_ctx *ticket;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
ticket = reservation_object_locking_ctx(bo->resv);
|
||||||
do {
|
do {
|
||||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
@@ -966,7 +968,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
|||||||
if (mem->mm_node)
|
if (mem->mm_node)
|
||||||
break;
|
break;
|
||||||
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
|
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
|
||||||
bo->resv->lock.ctx);
|
ticket);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
} while (1);
|
} while (1);
|
||||||
@@ -1963,7 +1965,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
|
|||||||
ret = mutex_lock_interruptible(&bo->wu_mutex);
|
ret = mutex_lock_interruptible(&bo->wu_mutex);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
if (!ww_mutex_is_locked(&bo->resv->lock))
|
if (!reservation_object_is_locked(bo->resv))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
ret = reservation_object_lock_interruptible(bo->resv, NULL);
|
ret = reservation_object_lock_interruptible(bo->resv, NULL);
|
||||||
if (ret == -EINTR)
|
if (ret == -EINTR)
|
||||||
|
@@ -144,10 +144,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
|||||||
|
|
||||||
if (ret == -EDEADLK) {
|
if (ret == -EDEADLK) {
|
||||||
if (intr) {
|
if (intr) {
|
||||||
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
ret = reservation_object_lock_slow_interruptible(bo->resv,
|
||||||
ticket);
|
ticket);
|
||||||
} else {
|
} else {
|
||||||
ww_mutex_lock_slow(&bo->resv->lock, ticket);
|
reservation_object_lock_slow(bo->resv, ticket);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||||||
|
|
||||||
/* Buffer objects need to be either pinned or reserved: */
|
/* Buffer objects need to be either pinned or reserved: */
|
||||||
if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
|
if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
|
||||||
lockdep_assert_held(&dst->resv->lock.base);
|
reservation_object_assert_held(dst->resv);
|
||||||
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
|
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
|
||||||
lockdep_assert_held(&src->resv->lock.base);
|
reservation_object_assert_held(src->resv);
|
||||||
|
|
||||||
if (dst->ttm->state == tt_unpopulated) {
|
if (dst->ttm->state == tt_unpopulated) {
|
||||||
ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
|
ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
|
||||||
|
@@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
|
|||||||
uint32_t old_mem_type = bo->mem.mem_type;
|
uint32_t old_mem_type = bo->mem.mem_type;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&bo->resv->lock.base);
|
reservation_object_assert_held(bo->resv);
|
||||||
|
|
||||||
if (pin) {
|
if (pin) {
|
||||||
if (vbo->pin_count++ > 0)
|
if (vbo->pin_count++ > 0)
|
||||||
|
@@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
|
|||||||
} *cmd;
|
} *cmd;
|
||||||
|
|
||||||
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
lockdep_assert_held(&bo->resv->lock.base);
|
reservation_object_assert_held(bo->resv);
|
||||||
|
|
||||||
cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
|
cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
@@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
||||||
lockdep_assert_held(&bo->resv->lock.base);
|
reservation_object_assert_held(bo->resv);
|
||||||
|
|
||||||
mutex_lock(&dev_priv->binding_mutex);
|
mutex_lock(&dev_priv->binding_mutex);
|
||||||
if (!vcotbl->scrubbed)
|
if (!vcotbl->scrubbed)
|
||||||
|
@@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res,
|
|||||||
|
|
||||||
if (switch_backup && new_backup != res->backup) {
|
if (switch_backup && new_backup != res->backup) {
|
||||||
if (res->backup) {
|
if (res->backup) {
|
||||||
lockdep_assert_held(&res->backup->base.resv->lock.base);
|
reservation_object_assert_held(res->backup->base.resv);
|
||||||
list_del_init(&res->mob_head);
|
list_del_init(&res->mob_head);
|
||||||
vmw_bo_unreference(&res->backup);
|
vmw_bo_unreference(&res->backup);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_backup) {
|
if (new_backup) {
|
||||||
res->backup = vmw_bo_reference(new_backup);
|
res->backup = vmw_bo_reference(new_backup);
|
||||||
lockdep_assert_held(&new_backup->base.resv->lock.base);
|
reservation_object_assert_held(new_backup->base.resv);
|
||||||
list_add_tail(&res->mob_head, &new_backup->res_list);
|
list_add_tail(&res->mob_head, &new_backup->res_list);
|
||||||
} else {
|
} else {
|
||||||
res->backup = NULL;
|
res->backup = NULL;
|
||||||
@@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
|
|||||||
.num_shared = 0
|
.num_shared = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
lockdep_assert_held(&vbo->base.resv->lock.base);
|
reservation_object_assert_held(vbo->base.resv);
|
||||||
list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
|
list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
|
||||||
if (!res->func->unbind)
|
if (!res->func->unbind)
|
||||||
continue;
|
continue;
|
||||||
|
@@ -745,10 +745,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
|
|||||||
WARN_ON(!kref_read(&bo->kref));
|
WARN_ON(!kref_read(&bo->kref));
|
||||||
|
|
||||||
if (interruptible)
|
if (interruptible)
|
||||||
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
ret = reservation_object_lock_slow_interruptible(bo->resv,
|
||||||
ticket);
|
ticket);
|
||||||
else
|
else
|
||||||
ww_mutex_lock_slow(&bo->resv->lock, ticket);
|
reservation_object_lock_slow(bo->resv, ticket);
|
||||||
|
|
||||||
if (likely(ret == 0))
|
if (likely(ret == 0))
|
||||||
ttm_bo_del_sub_from_lru(bo);
|
ttm_bo_del_sub_from_lru(bo);
|
||||||
|
@@ -140,6 +140,38 @@ reservation_object_lock_interruptible(struct reservation_object *obj,
|
|||||||
return ww_mutex_lock_interruptible(&obj->lock, ctx);
|
return ww_mutex_lock_interruptible(&obj->lock, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* reservation_object_lock_slow - slowpath lock the reservation object
|
||||||
|
* @obj: the reservation object
|
||||||
|
* @ctx: the locking context
|
||||||
|
*
|
||||||
|
* Acquires the reservation object after a die case. This function
|
||||||
|
* will sleep until the lock becomes available. See reservation_object_lock() as
|
||||||
|
* well.
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
reservation_object_lock_slow(struct reservation_object *obj,
|
||||||
|
struct ww_acquire_ctx *ctx)
|
||||||
|
{
|
||||||
|
ww_mutex_lock_slow(&obj->lock, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* reservation_object_lock_slow_interruptible - slowpath lock the reservation
|
||||||
|
* object, interruptible
|
||||||
|
* @obj: the reservation object
|
||||||
|
* @ctx: the locking context
|
||||||
|
*
|
||||||
|
* Acquires the reservation object interruptible after a die case. This function
|
||||||
|
* will sleep until the lock becomes available. See
|
||||||
|
* reservation_object_lock_interruptible() as well.
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
reservation_object_lock_slow_interruptible(struct reservation_object *obj,
|
||||||
|
struct ww_acquire_ctx *ctx)
|
||||||
|
{
|
||||||
|
return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reservation_object_trylock - trylock the reservation object
|
* reservation_object_trylock - trylock the reservation object
|
||||||
@@ -161,6 +193,31 @@ reservation_object_trylock(struct reservation_object *obj)
|
|||||||
return ww_mutex_trylock(&obj->lock);
|
return ww_mutex_trylock(&obj->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* reservation_object_is_locked - is the reservation object locked
|
||||||
|
* @obj: the reservation object
|
||||||
|
*
|
||||||
|
* Returns true if the mutex is locked, false if unlocked.
|
||||||
|
*/
|
||||||
|
static inline bool
|
||||||
|
reservation_object_is_locked(struct reservation_object *obj)
|
||||||
|
{
|
||||||
|
return ww_mutex_is_locked(&obj->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* reservation_object_locking_ctx - returns the context used to lock the object
|
||||||
|
* @obj: the reservation object
|
||||||
|
*
|
||||||
|
* Returns the context used to lock a reservation object or NULL if no context
|
||||||
|
* was used or the object is not locked at all.
|
||||||
|
*/
|
||||||
|
static inline struct ww_acquire_ctx *
|
||||||
|
reservation_object_locking_ctx(struct reservation_object *obj)
|
||||||
|
{
|
||||||
|
return READ_ONCE(obj->lock.ctx);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reservation_object_unlock - unlock the reservation object
|
* reservation_object_unlock - unlock the reservation object
|
||||||
* @obj: the reservation object
|
* @obj: the reservation object
|
||||||
|
Reference in New Issue
Block a user