drm/ttm: use an operation ctx for ttm_mem_global_alloc
forward the operation context to ttm_mem_global_alloc as well, and the ultimate goal is swapout enablement for reserved BOs Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -1202,10 +1202,14 @@ struct vmw_ctx_binding_state *
|
||||
vmw_binding_state_alloc(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_ctx_binding_state *cbs;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
|
||||
false, false);
|
||||
&ctx);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@@ -394,6 +394,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
struct vmw_piter iter;
|
||||
dma_addr_t old;
|
||||
int ret = 0;
|
||||
@@ -417,8 +421,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
sgt_size = ttm_round_pot(sizeof(struct sg_table));
|
||||
}
|
||||
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
|
||||
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
|
||||
true);
|
||||
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
@@ -638,6 +641,10 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
@@ -646,7 +653,7 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
|
||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
||||
size_t size =
|
||||
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
|
||||
ret = ttm_mem_global_alloc(glob, size, false, true);
|
||||
ret = ttm_mem_global_alloc(glob, size, &ctx);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@@ -746,6 +746,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
|
||||
struct vmw_resource *tmp;
|
||||
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct ttm_operation_ctx ttm_opt_ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->has_dx && dx) {
|
||||
@@ -768,7 +772,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size,
|
||||
false, true);
|
||||
&ttm_opt_ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for context"
|
||||
|
@@ -573,6 +573,10 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
||||
u32 type)
|
||||
{
|
||||
struct vmw_cotable *vcotbl;
|
||||
struct ttm_operation_ctx ttm_opt_ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
u32 num_entries;
|
||||
|
||||
@@ -580,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
||||
cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
cotable_acc_size, false, true);
|
||||
cotable_acc_size, &ttm_opt_ctx);
|
||||
if (unlikely(ret))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@@ -588,6 +588,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
|
||||
struct vmw_user_fence *ufence;
|
||||
struct vmw_fence_obj *tmp;
|
||||
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -596,7 +600,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
|
||||
*/
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
|
||||
false, false);
|
||||
&ctx);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@@ -607,6 +607,10 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_dx_shader *shader;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_private *dev_priv = ctx->dev_priv;
|
||||
struct ttm_operation_ctx ttm_opt_ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (!vmw_shader_dx_size)
|
||||
@@ -616,7 +620,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
|
||||
false, true);
|
||||
&ttm_opt_ctx);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for shader "
|
||||
@@ -730,6 +734,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
||||
{
|
||||
struct vmw_user_shader *ushader;
|
||||
struct vmw_resource *res, *tmp;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -742,7 +750,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_user_shader_size,
|
||||
false, true);
|
||||
&ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for shader "
|
||||
@@ -800,6 +808,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
{
|
||||
struct vmw_shader *shader;
|
||||
struct vmw_resource *res;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -812,7 +824,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_shader_size,
|
||||
false, true);
|
||||
&ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for shader "
|
||||
|
@@ -149,6 +149,10 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource *tmp;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
size_t alloc_size;
|
||||
size_t account_size;
|
||||
int ret;
|
||||
@@ -162,7 +166,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
|
||||
false, true);
|
||||
&ctx);
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
|
@@ -329,6 +329,10 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
|
||||
struct vmw_private *dev_priv = ctx->dev_priv;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_view *view;
|
||||
struct ttm_operation_ctx ttm_opt_ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
@@ -345,7 +349,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
|
||||
|
||||
size = offsetof(struct vmw_view, cmd) + cmd_size;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for view"
|
||||
|
@@ -700,6 +700,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_vmw_surface_create_req *req = &arg->req;
|
||||
struct drm_vmw_surface_arg *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
int i, j;
|
||||
uint32_t cur_bo_offset;
|
||||
@@ -741,7 +745,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
size, false, true);
|
||||
size, &ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for surface"
|
||||
@@ -1479,6 +1483,10 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
struct vmw_surface *srf;
|
||||
int ret;
|
||||
u32 num_layers;
|
||||
@@ -1525,7 +1533,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
user_accounting_size, false, true);
|
||||
user_accounting_size, &ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for surface"
|
||||
|
Reference in New Issue
Block a user