Merge tag 'vmwgfx-next-4.19-2' of git://people.freedesktop.org/~thomash/linux into drm-next
A series of cleanups / reorganizations and modesetting changes that mostly target atomic state validation. [airlied: conflicts with SPDX stuff in amdgpu tree] Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/1a88485e-e509-b00e-8485-19194f074115@vmware.com
This commit is contained in:
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
|
||||
struct list_head head;
|
||||
struct drm_hash_item hash;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_dma_buffer *new_backup;
|
||||
struct vmw_buffer_object *new_backup;
|
||||
struct vmw_ctx_binding_state *staged_bindings;
|
||||
unsigned long new_backup_offset;
|
||||
u32 first_usage : 1;
|
||||
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
||||
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGAMobId *id,
|
||||
struct vmw_dma_buffer **vmw_bo_p);
|
||||
struct vmw_buffer_object **vmw_bo_p);
|
||||
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
||||
struct vmw_dma_buffer *vbo,
|
||||
struct vmw_buffer_object *vbo,
|
||||
bool validate_as_mob,
|
||||
uint32_t *p_val_node);
|
||||
/**
|
||||
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
|
||||
}
|
||||
vmw_resource_unreserve(res, switch_backup, val->new_backup,
|
||||
val->new_backup_offset);
|
||||
vmw_dmabuf_unreference(&val->new_backup);
|
||||
vmw_bo_unreference(&val->new_backup);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
|
||||
struct vmw_dma_buffer *dx_query_mob;
|
||||
struct vmw_buffer_object *dx_query_mob;
|
||||
|
||||
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
|
||||
if (dx_query_mob)
|
||||
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
|
||||
* submission is reached.
|
||||
*/
|
||||
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
||||
struct vmw_dma_buffer *vbo,
|
||||
struct vmw_buffer_object *vbo,
|
||||
bool validate_as_mob,
|
||||
uint32_t *p_val_node)
|
||||
{
|
||||
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
|
||||
return ret;
|
||||
|
||||
if (res->backup) {
|
||||
struct vmw_dma_buffer *vbo = res->backup;
|
||||
struct vmw_buffer_object *vbo = res->backup;
|
||||
|
||||
ret = vmw_bo_to_validate_list
|
||||
(sw_context, vbo,
|
||||
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
|
||||
}
|
||||
|
||||
if (sw_context->dx_query_mob) {
|
||||
struct vmw_dma_buffer *expected_dx_query_mob;
|
||||
struct vmw_buffer_object *expected_dx_query_mob;
|
||||
|
||||
expected_dx_query_mob =
|
||||
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
|
||||
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
||||
|
||||
list_for_each_entry(val, &sw_context->resource_list, head) {
|
||||
struct vmw_resource *res = val->res;
|
||||
struct vmw_dma_buffer *backup = res->backup;
|
||||
struct vmw_buffer_object *backup = res->backup;
|
||||
|
||||
ret = vmw_resource_validate(res);
|
||||
if (unlikely(ret != 0)) {
|
||||
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
|
||||
|
||||
/* Check if the resource switched backup buffer */
|
||||
if (backup && res->backup && (backup != res->backup)) {
|
||||
struct vmw_dma_buffer *vbo = res->backup;
|
||||
struct vmw_buffer_object *vbo = res->backup;
|
||||
|
||||
ret = vmw_bo_to_validate_list
|
||||
(sw_context, vbo,
|
||||
@@ -821,7 +821,7 @@ out_no_reloc:
|
||||
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
|
||||
{
|
||||
struct vmw_private *dev_priv = ctx_res->dev_priv;
|
||||
struct vmw_dma_buffer *dx_query_mob;
|
||||
struct vmw_buffer_object *dx_query_mob;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDXBindAllQuery body;
|
||||
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
|
||||
* command batch.
|
||||
*/
|
||||
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *new_query_bo,
|
||||
struct vmw_buffer_object *new_query_bo,
|
||||
struct vmw_sw_context *sw_context)
|
||||
{
|
||||
struct vmw_res_cache_entry *ctx_entry =
|
||||
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
|
||||
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
|
||||
if (dev_priv->pinned_bo) {
|
||||
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
|
||||
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
|
||||
vmw_bo_unreference(&dev_priv->pinned_bo);
|
||||
}
|
||||
|
||||
if (!sw_context->needs_post_query_barrier) {
|
||||
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
|
||||
dev_priv->query_cid = sw_context->last_query_ctx->id;
|
||||
dev_priv->query_cid_valid = true;
|
||||
dev_priv->pinned_bo =
|
||||
vmw_dmabuf_reference(sw_context->cur_query_bo);
|
||||
vmw_bo_reference(sw_context->cur_query_bo);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
|
||||
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGAMobId *id,
|
||||
struct vmw_dma_buffer **vmw_bo_p)
|
||||
struct vmw_buffer_object **vmw_bo_p)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = NULL;
|
||||
struct vmw_buffer_object *vmw_bo = NULL;
|
||||
uint32_t handle = *id;
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use MOB buffer.\n");
|
||||
ret = -EINVAL;
|
||||
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
return 0;
|
||||
|
||||
out_no_reloc:
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
*vmw_bo_p = NULL;
|
||||
return ret;
|
||||
}
|
||||
@@ -1343,15 +1342,14 @@ out_no_reloc:
|
||||
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGAGuestPtr *ptr,
|
||||
struct vmw_dma_buffer **vmw_bo_p)
|
||||
struct vmw_buffer_object **vmw_bo_p)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = NULL;
|
||||
struct vmw_buffer_object *vmw_bo = NULL;
|
||||
uint32_t handle = ptr->gmrId;
|
||||
struct vmw_relocation *reloc;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
|
||||
NULL);
|
||||
ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not find or use GMR region.\n");
|
||||
ret = -EINVAL;
|
||||
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||
return 0;
|
||||
|
||||
out_no_reloc:
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
*vmw_bo_p = NULL;
|
||||
return ret;
|
||||
}
|
||||
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
|
||||
SVGA3dCmdDXBindQuery q;
|
||||
} *cmd;
|
||||
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
int ret;
|
||||
|
||||
|
||||
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
|
||||
sw_context->dx_query_mob = vmw_bo;
|
||||
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
|
||||
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
struct vmw_query_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdEndGBQuery q;
|
||||
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
|
||||
|
||||
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
|
||||
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
struct vmw_query_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdEndQuery q;
|
||||
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
|
||||
|
||||
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
|
||||
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
struct vmw_query_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdWaitForGBQuery q;
|
||||
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
struct vmw_query_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdWaitForQuery q;
|
||||
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = NULL;
|
||||
struct vmw_buffer_object *vmw_bo = NULL;
|
||||
struct vmw_surface *srf = NULL;
|
||||
struct vmw_dma_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||
header);
|
||||
|
||||
out_no_surface:
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
void *buf)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct vmw_buffer_object *vmw_bo;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_dmabuf_unreference(&vmw_bo);
|
||||
vmw_bo_unreference(&vmw_bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
|
||||
uint32_t *buf_id,
|
||||
unsigned long backup_offset)
|
||||
{
|
||||
struct vmw_dma_buffer *dma_buf;
|
||||
struct vmw_buffer_object *dma_buf;
|
||||
int ret;
|
||||
|
||||
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
|
||||
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
|
||||
if (val_node->first_usage)
|
||||
val_node->no_buffer_needed = true;
|
||||
|
||||
vmw_dmabuf_unreference(&val_node->new_backup);
|
||||
vmw_bo_unreference(&val_node->new_backup);
|
||||
val_node->new_backup = dma_buf;
|
||||
val_node->new_backup_offset = backup_offset;
|
||||
|
||||
@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
bool interruptible,
|
||||
bool validate_as_mob)
|
||||
{
|
||||
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
|
||||
base);
|
||||
struct vmw_buffer_object *vbo =
|
||||
container_of(bo, struct vmw_buffer_object, base);
|
||||
struct ttm_operation_ctx ctx = { interruptible, true };
|
||||
int ret;
|
||||
|
||||
@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
||||
|
||||
ttm_bo_unref(&query_val.bo);
|
||||
ttm_bo_unref(&pinned_val.bo);
|
||||
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
|
||||
vmw_bo_unreference(&dev_priv->pinned_bo);
|
||||
out_unlock:
|
||||
return;
|
||||
|
||||
@@ -4432,7 +4430,7 @@ out_no_emit:
|
||||
out_no_reserve:
|
||||
ttm_bo_unref(&query_val.bo);
|
||||
ttm_bo_unref(&pinned_val.bo);
|
||||
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
|
||||
vmw_bo_unreference(&dev_priv->pinned_bo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user