RDMA/mlx5: Simplify devx async commands
With the new FD structure the async commands do not need to hold any references while running. The existing mlx5_cmd_exec_cb() and mlx5_cmd_cleanup_async_ctx() provide enough synchronization to ensure that all outstanding commands are completed before the uobject can be destructed. Remove the now confusing get_file() and the type erasure of the devx_async_cmd_event_file. Link: https://lore.kernel.org/r/1578504126-9400-4-git-send-email-yishaih@mellanox.com Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -30,7 +30,7 @@ enum devx_obj_flags {
|
|||||||
struct devx_async_data {
|
struct devx_async_data {
|
||||||
struct mlx5_ib_dev *mdev;
|
struct mlx5_ib_dev *mdev;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct ib_uobject *fd_uobj;
|
struct devx_async_cmd_event_file *ev_file;
|
||||||
struct mlx5_async_work cb_work;
|
struct mlx5_async_work cb_work;
|
||||||
u16 cmd_out_len;
|
u16 cmd_out_len;
|
||||||
/* must be last field in this structure */
|
/* must be last field in this structure */
|
||||||
@@ -1673,21 +1673,20 @@ static void devx_query_callback(int status, struct mlx5_async_work *context)
|
|||||||
{
|
{
|
||||||
struct devx_async_data *async_data =
|
struct devx_async_data *async_data =
|
||||||
container_of(context, struct devx_async_data, cb_work);
|
container_of(context, struct devx_async_data, cb_work);
|
||||||
struct ib_uobject *fd_uobj = async_data->fd_uobj;
|
struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
|
||||||
struct devx_async_cmd_event_file *ev_file;
|
struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
|
||||||
struct devx_async_event_queue *ev_queue;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
|
/*
|
||||||
uobj);
|
* Note that if the struct devx_async_cmd_event_file uobj begins to be
|
||||||
ev_queue = &ev_file->ev_queue;
|
* destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
|
||||||
|
* routine returns, ensuring that it always remains valid here.
|
||||||
|
*/
|
||||||
spin_lock_irqsave(&ev_queue->lock, flags);
|
spin_lock_irqsave(&ev_queue->lock, flags);
|
||||||
list_add_tail(&async_data->list, &ev_queue->event_list);
|
list_add_tail(&async_data->list, &ev_queue->event_list);
|
||||||
spin_unlock_irqrestore(&ev_queue->lock, flags);
|
spin_unlock_irqrestore(&ev_queue->lock, flags);
|
||||||
|
|
||||||
wake_up_interruptible(&ev_queue->poll_wait);
|
wake_up_interruptible(&ev_queue->poll_wait);
|
||||||
fput(fd_uobj->object);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
|
#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
|
||||||
@@ -1756,9 +1755,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
|
|||||||
|
|
||||||
async_data->cmd_out_len = cmd_out_len;
|
async_data->cmd_out_len = cmd_out_len;
|
||||||
async_data->mdev = mdev;
|
async_data->mdev = mdev;
|
||||||
async_data->fd_uobj = fd_uobj;
|
async_data->ev_file = ev_file;
|
||||||
|
|
||||||
get_file(fd_uobj->object);
|
|
||||||
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
||||||
err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
|
err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
|
||||||
uverbs_attr_get_len(attrs,
|
uverbs_attr_get_len(attrs,
|
||||||
@@ -1768,12 +1766,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
|
|||||||
devx_query_callback, &async_data->cb_work);
|
devx_query_callback, &async_data->cb_work);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto cb_err;
|
goto free_async;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cb_err:
|
|
||||||
fput(fd_uobj->object);
|
|
||||||
free_async:
|
free_async:
|
||||||
kvfree(async_data);
|
kvfree(async_data);
|
||||||
sub_bytes:
|
sub_bytes:
|
||||||
|
Reference in New Issue
Block a user