vmwgfx_streamoutput.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <drm/ttm/ttm_placement.h>
  29. #include "vmwgfx_drv.h"
  30. #include "vmwgfx_resource_priv.h"
  31. #include "vmwgfx_binding.h"
  32. /**
  33. * struct vmw_dx_streamoutput - Streamoutput resource metadata.
  34. * @res: Base resource struct.
  35. * @ctx: Non-refcounted context to which @res belong.
  36. * @cotable: Refcounted cotable holding this Streamoutput.
  37. * @cotable_head: List head for cotable-so_res list.
  38. * @id: User-space provided identifier.
  39. * @size: User-space provided mob size.
  40. * @committed: Whether streamoutput is actually created or pending creation.
  41. */
  42. struct vmw_dx_streamoutput {
  43. struct vmw_resource res;
  44. struct vmw_resource *ctx;
  45. struct vmw_resource *cotable;
  46. struct list_head cotable_head;
  47. u32 id;
  48. u32 size;
  49. bool committed;
  50. };
  51. static int vmw_dx_streamoutput_create(struct vmw_resource *res);
  52. static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
  53. struct ttm_validate_buffer *val_buf);
  54. static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
  55. struct ttm_validate_buffer *val_buf);
  56. static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
  57. enum vmw_cmdbuf_res_state state);
  58. static const struct vmw_res_func vmw_dx_streamoutput_func = {
  59. .res_type = vmw_res_streamoutput,
  60. .needs_backup = true,
  61. .may_evict = false,
  62. .type_name = "DX streamoutput",
  63. .backup_placement = &vmw_mob_placement,
  64. .create = vmw_dx_streamoutput_create,
  65. .destroy = NULL, /* Command buffer managed resource. */
  66. .bind = vmw_dx_streamoutput_bind,
  67. .unbind = vmw_dx_streamoutput_unbind,
  68. .commit_notify = vmw_dx_streamoutput_commit_notify,
  69. };
  70. static inline struct vmw_dx_streamoutput *
  71. vmw_res_to_dx_streamoutput(struct vmw_resource *res)
  72. {
  73. return container_of(res, struct vmw_dx_streamoutput, res);
  74. }
  75. /**
  76. * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput.
  77. * @res: The streamoutput resource.
  78. *
  79. * Return: 0 on success, negative error code on failure.
  80. */
  81. static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
  82. {
  83. struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
  84. struct vmw_private *dev_priv = res->dev_priv;
  85. struct {
  86. SVGA3dCmdHeader header;
  87. SVGA3dCmdDXBindStreamOutput body;
  88. } *cmd;
  89. if (!list_empty(&so->cotable_head) || !so->committed )
  90. return 0;
  91. cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
  92. if (!cmd)
  93. return -ENOMEM;
  94. cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
  95. cmd->header.size = sizeof(cmd->body);
  96. cmd->body.soid = so->id;
  97. cmd->body.mobid = res->backup->base.resource->start;
  98. cmd->body.offsetInBytes = res->backup_offset;
  99. cmd->body.sizeInBytes = so->size;
  100. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  101. vmw_cotable_add_resource(so->cotable, &so->cotable_head);
  102. return 0;
  103. }
  104. static int vmw_dx_streamoutput_create(struct vmw_resource *res)
  105. {
  106. struct vmw_private *dev_priv = res->dev_priv;
  107. struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
  108. int ret = 0;
  109. WARN_ON_ONCE(!so->committed);
  110. if (vmw_resource_mob_attached(res)) {
  111. mutex_lock(&dev_priv->binding_mutex);
  112. ret = vmw_dx_streamoutput_unscrub(res);
  113. mutex_unlock(&dev_priv->binding_mutex);
  114. }
  115. res->id = so->id;
  116. return ret;
  117. }
  118. static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
  119. struct ttm_validate_buffer *val_buf)
  120. {
  121. struct vmw_private *dev_priv = res->dev_priv;
  122. struct ttm_buffer_object *bo = val_buf->bo;
  123. int ret;
  124. if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
  125. return -EINVAL;
  126. mutex_lock(&dev_priv->binding_mutex);
  127. ret = vmw_dx_streamoutput_unscrub(res);
  128. mutex_unlock(&dev_priv->binding_mutex);
  129. return ret;
  130. }
  131. /**
  132. * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput.
  133. * @res: The streamoutput resource.
  134. *
  135. * Return: 0 on success, negative error code on failure.
  136. */
  137. static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
  138. {
  139. struct vmw_private *dev_priv = res->dev_priv;
  140. struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
  141. struct {
  142. SVGA3dCmdHeader header;
  143. SVGA3dCmdDXBindStreamOutput body;
  144. } *cmd;
  145. if (list_empty(&so->cotable_head))
  146. return 0;
  147. WARN_ON_ONCE(!so->committed);
  148. cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
  149. if (!cmd)
  150. return -ENOMEM;
  151. cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
  152. cmd->header.size = sizeof(cmd->body);
  153. cmd->body.soid = res->id;
  154. cmd->body.mobid = SVGA3D_INVALID_ID;
  155. cmd->body.offsetInBytes = 0;
  156. cmd->body.sizeInBytes = so->size;
  157. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  158. res->id = -1;
  159. list_del_init(&so->cotable_head);
  160. return 0;
  161. }
  162. static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
  163. struct ttm_validate_buffer *val_buf)
  164. {
  165. struct vmw_private *dev_priv = res->dev_priv;
  166. struct vmw_fence_obj *fence;
  167. int ret;
  168. if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
  169. return -EINVAL;
  170. mutex_lock(&dev_priv->binding_mutex);
  171. ret = vmw_dx_streamoutput_scrub(res);
  172. mutex_unlock(&dev_priv->binding_mutex);
  173. if (ret)
  174. return ret;
  175. (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  176. vmw_bo_fence_single(val_buf->bo, fence);
  177. if (fence != NULL)
  178. vmw_fence_obj_unreference(&fence);
  179. return 0;
  180. }
  181. static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
  182. enum vmw_cmdbuf_res_state state)
  183. {
  184. struct vmw_private *dev_priv = res->dev_priv;
  185. struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
  186. if (state == VMW_CMDBUF_RES_ADD) {
  187. mutex_lock(&dev_priv->binding_mutex);
  188. vmw_cotable_add_resource(so->cotable, &so->cotable_head);
  189. so->committed = true;
  190. res->id = so->id;
  191. mutex_unlock(&dev_priv->binding_mutex);
  192. } else {
  193. mutex_lock(&dev_priv->binding_mutex);
  194. list_del_init(&so->cotable_head);
  195. so->committed = false;
  196. res->id = -1;
  197. mutex_unlock(&dev_priv->binding_mutex);
  198. }
  199. }
  200. /**
  201. * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key.
  202. * @man: Command buffer managed resource manager for current context.
  203. * @user_key: User-space identifier for lookup.
  204. *
  205. * Return: Valid refcounted vmw_resource on success, error pointer on failure.
  206. */
  207. struct vmw_resource *
  208. vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
  209. u32 user_key)
  210. {
  211. return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
  212. user_key);
  213. }
  214. static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
  215. {
  216. struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
  217. vmw_resource_unreference(&so->cotable);
  218. kfree(so);
  219. }
  220. static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
  221. {
  222. /* Destroyed by user-space cmd buf or as part of context takedown. */
  223. res->id = -1;
  224. }
  225. /**
  226. * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource.
  227. * @man: Command buffer managed resource manager for current context.
  228. * @ctx: Pointer to context resource.
  229. * @user_key: The identifier for this streamoutput.
  230. * @list: The list of staged command buffer managed resources.
  231. *
  232. * Return: 0 on success, negative error code on failure.
  233. */
  234. int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
  235. struct vmw_resource *ctx, u32 user_key,
  236. struct list_head *list)
  237. {
  238. struct vmw_dx_streamoutput *so;
  239. struct vmw_resource *res;
  240. struct vmw_private *dev_priv = ctx->dev_priv;
  241. int ret;
  242. so = kmalloc(sizeof(*so), GFP_KERNEL);
  243. if (!so) {
  244. return -ENOMEM;
  245. }
  246. res = &so->res;
  247. so->ctx = ctx;
  248. so->cotable = vmw_resource_reference
  249. (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
  250. so->id = user_key;
  251. so->committed = false;
  252. INIT_LIST_HEAD(&so->cotable_head);
  253. ret = vmw_resource_init(dev_priv, res, true,
  254. vmw_dx_streamoutput_res_free,
  255. &vmw_dx_streamoutput_func);
  256. if (ret)
  257. goto out_resource_init;
  258. ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
  259. res, list);
  260. if (ret)
  261. goto out_resource_init;
  262. res->id = so->id;
  263. res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
  264. out_resource_init:
  265. vmw_resource_unreference(&res);
  266. return ret;
  267. }
  268. /**
  269. * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct.
  270. * @res: The streamoutput res for which need to set size.
  271. * @size: The size provided by user-space to set.
  272. */
  273. void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
  274. {
  275. struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
  276. so->size = size;
  277. }
  278. /**
  279. * vmw_dx_streamoutput_remove - Stage streamoutput for removal.
  280. * @man: Command buffer managed resource manager for current context.
  281. * @user_key: The identifier for this streamoutput.
  282. * @list: The list of staged command buffer managed resources.
  283. *
  284. * Return: 0 on success, negative error code on failure.
  285. */
  286. int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
  287. u32 user_key,
  288. struct list_head *list)
  289. {
  290. struct vmw_resource *r;
  291. return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
  292. (u32)user_key, list, &r);
  293. }
  294. /**
  295. * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback.
  296. * @dev_priv: Device private.
  297. * @list: The list of cotable resources.
  298. * @readback: Whether the call was part of a readback unbind.
  299. */
  300. void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
  301. struct list_head *list,
  302. bool readback)
  303. {
  304. struct vmw_dx_streamoutput *entry, *next;
  305. lockdep_assert_held_once(&dev_priv->binding_mutex);
  306. list_for_each_entry_safe(entry, next, list, cotable_head) {
  307. WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
  308. if (!readback)
  309. entry->committed =false;
  310. }
  311. }