vmwgfx_context.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/ttm/ttm_placement.h>
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "vmwgfx_binding.h"
  31. struct vmw_user_context {
  32. struct ttm_base_object base;
  33. struct vmw_resource res;
  34. struct vmw_ctx_binding_state *cbs;
  35. struct vmw_cmdbuf_res_manager *man;
  36. struct vmw_resource *cotables[SVGA_COTABLE_MAX];
  37. spinlock_t cotable_lock;
  38. struct vmw_buffer_object *dx_query_mob;
  39. };
  40. static void vmw_user_context_free(struct vmw_resource *res);
  41. static struct vmw_resource *
  42. vmw_user_context_base_to_res(struct ttm_base_object *base);
  43. static int vmw_gb_context_create(struct vmw_resource *res);
  44. static int vmw_gb_context_bind(struct vmw_resource *res,
  45. struct ttm_validate_buffer *val_buf);
  46. static int vmw_gb_context_unbind(struct vmw_resource *res,
  47. bool readback,
  48. struct ttm_validate_buffer *val_buf);
  49. static int vmw_gb_context_destroy(struct vmw_resource *res);
  50. static int vmw_dx_context_create(struct vmw_resource *res);
  51. static int vmw_dx_context_bind(struct vmw_resource *res,
  52. struct ttm_validate_buffer *val_buf);
  53. static int vmw_dx_context_unbind(struct vmw_resource *res,
  54. bool readback,
  55. struct ttm_validate_buffer *val_buf);
  56. static int vmw_dx_context_destroy(struct vmw_resource *res);
  57. static const struct vmw_user_resource_conv user_context_conv = {
  58. .object_type = VMW_RES_CONTEXT,
  59. .base_obj_to_res = vmw_user_context_base_to_res,
  60. .res_free = vmw_user_context_free
  61. };
  62. const struct vmw_user_resource_conv *user_context_converter =
  63. &user_context_conv;
  64. static const struct vmw_res_func vmw_legacy_context_func = {
  65. .res_type = vmw_res_context,
  66. .needs_backup = false,
  67. .may_evict = false,
  68. .type_name = "legacy contexts",
  69. .backup_placement = NULL,
  70. .create = NULL,
  71. .destroy = NULL,
  72. .bind = NULL,
  73. .unbind = NULL
  74. };
  75. static const struct vmw_res_func vmw_gb_context_func = {
  76. .res_type = vmw_res_context,
  77. .needs_backup = true,
  78. .may_evict = true,
  79. .prio = 3,
  80. .dirty_prio = 3,
  81. .type_name = "guest backed contexts",
  82. .backup_placement = &vmw_mob_placement,
  83. .create = vmw_gb_context_create,
  84. .destroy = vmw_gb_context_destroy,
  85. .bind = vmw_gb_context_bind,
  86. .unbind = vmw_gb_context_unbind
  87. };
  88. static const struct vmw_res_func vmw_dx_context_func = {
  89. .res_type = vmw_res_dx_context,
  90. .needs_backup = true,
  91. .may_evict = true,
  92. .prio = 3,
  93. .dirty_prio = 3,
  94. .type_name = "dx contexts",
  95. .backup_placement = &vmw_mob_placement,
  96. .create = vmw_dx_context_create,
  97. .destroy = vmw_dx_context_destroy,
  98. .bind = vmw_dx_context_bind,
  99. .unbind = vmw_dx_context_unbind
  100. };
  101. /*
  102. * Context management:
  103. */
  104. static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
  105. struct vmw_user_context *uctx)
  106. {
  107. struct vmw_resource *res;
  108. int i;
  109. u32 cotable_max = has_sm5_context(dev_priv) ?
  110. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  111. for (i = 0; i < cotable_max; ++i) {
  112. spin_lock(&uctx->cotable_lock);
  113. res = uctx->cotables[i];
  114. uctx->cotables[i] = NULL;
  115. spin_unlock(&uctx->cotable_lock);
  116. if (res)
  117. vmw_resource_unreference(&res);
  118. }
  119. }
  120. static void vmw_hw_context_destroy(struct vmw_resource *res)
  121. {
  122. struct vmw_user_context *uctx =
  123. container_of(res, struct vmw_user_context, res);
  124. struct vmw_private *dev_priv = res->dev_priv;
  125. struct {
  126. SVGA3dCmdHeader header;
  127. SVGA3dCmdDestroyContext body;
  128. } *cmd;
  129. if (res->func->destroy == vmw_gb_context_destroy ||
  130. res->func->destroy == vmw_dx_context_destroy) {
  131. mutex_lock(&dev_priv->cmdbuf_mutex);
  132. vmw_cmdbuf_res_man_destroy(uctx->man);
  133. mutex_lock(&dev_priv->binding_mutex);
  134. vmw_binding_state_kill(uctx->cbs);
  135. (void) res->func->destroy(res);
  136. mutex_unlock(&dev_priv->binding_mutex);
  137. if (dev_priv->pinned_bo != NULL &&
  138. !dev_priv->query_cid_valid)
  139. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  140. mutex_unlock(&dev_priv->cmdbuf_mutex);
  141. vmw_context_cotables_unref(dev_priv, uctx);
  142. return;
  143. }
  144. vmw_execbuf_release_pinned_bo(dev_priv);
  145. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  146. if (unlikely(cmd == NULL))
  147. return;
  148. cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
  149. cmd->header.size = sizeof(cmd->body);
  150. cmd->body.cid = res->id;
  151. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  152. vmw_fifo_resource_dec(dev_priv);
  153. }
  154. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  155. bool dx,
  156. struct vmw_resource *res,
  157. void (*res_free)(struct vmw_resource *res))
  158. {
  159. int ret, i;
  160. struct vmw_user_context *uctx =
  161. container_of(res, struct vmw_user_context, res);
  162. res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
  163. sizeof(SVGAGBContextData));
  164. ret = vmw_resource_init(dev_priv, res, true,
  165. res_free,
  166. dx ? &vmw_dx_context_func :
  167. &vmw_gb_context_func);
  168. if (unlikely(ret != 0))
  169. goto out_err;
  170. if (dev_priv->has_mob) {
  171. uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
  172. if (IS_ERR(uctx->man)) {
  173. ret = PTR_ERR(uctx->man);
  174. uctx->man = NULL;
  175. goto out_err;
  176. }
  177. }
  178. uctx->cbs = vmw_binding_state_alloc(dev_priv);
  179. if (IS_ERR(uctx->cbs)) {
  180. ret = PTR_ERR(uctx->cbs);
  181. goto out_err;
  182. }
  183. spin_lock_init(&uctx->cotable_lock);
  184. if (dx) {
  185. u32 cotable_max = has_sm5_context(dev_priv) ?
  186. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  187. for (i = 0; i < cotable_max; ++i) {
  188. uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
  189. &uctx->res, i);
  190. if (IS_ERR(uctx->cotables[i])) {
  191. ret = PTR_ERR(uctx->cotables[i]);
  192. goto out_cotables;
  193. }
  194. }
  195. }
  196. res->hw_destroy = vmw_hw_context_destroy;
  197. return 0;
  198. out_cotables:
  199. vmw_context_cotables_unref(dev_priv, uctx);
  200. out_err:
  201. if (res_free)
  202. res_free(res);
  203. else
  204. kfree(res);
  205. return ret;
  206. }
  207. static int vmw_context_init(struct vmw_private *dev_priv,
  208. struct vmw_resource *res,
  209. void (*res_free)(struct vmw_resource *res),
  210. bool dx)
  211. {
  212. int ret;
  213. struct {
  214. SVGA3dCmdHeader header;
  215. SVGA3dCmdDefineContext body;
  216. } *cmd;
  217. if (dev_priv->has_mob)
  218. return vmw_gb_context_init(dev_priv, dx, res, res_free);
  219. ret = vmw_resource_init(dev_priv, res, false,
  220. res_free, &vmw_legacy_context_func);
  221. if (unlikely(ret != 0)) {
  222. DRM_ERROR("Failed to allocate a resource id.\n");
  223. goto out_early;
  224. }
  225. if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
  226. DRM_ERROR("Out of hw context ids.\n");
  227. vmw_resource_unreference(&res);
  228. return -ENOMEM;
  229. }
  230. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  231. if (unlikely(cmd == NULL)) {
  232. vmw_resource_unreference(&res);
  233. return -ENOMEM;
  234. }
  235. cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
  236. cmd->header.size = sizeof(cmd->body);
  237. cmd->body.cid = res->id;
  238. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  239. vmw_fifo_resource_inc(dev_priv);
  240. res->hw_destroy = vmw_hw_context_destroy;
  241. return 0;
  242. out_early:
  243. if (res_free == NULL)
  244. kfree(res);
  245. else
  246. res_free(res);
  247. return ret;
  248. }
  249. /*
  250. * GB context.
  251. */
  252. static int vmw_gb_context_create(struct vmw_resource *res)
  253. {
  254. struct vmw_private *dev_priv = res->dev_priv;
  255. int ret;
  256. struct {
  257. SVGA3dCmdHeader header;
  258. SVGA3dCmdDefineGBContext body;
  259. } *cmd;
  260. if (likely(res->id != -1))
  261. return 0;
  262. ret = vmw_resource_alloc_id(res);
  263. if (unlikely(ret != 0)) {
  264. DRM_ERROR("Failed to allocate a context id.\n");
  265. goto out_no_id;
  266. }
  267. if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  268. ret = -EBUSY;
  269. goto out_no_fifo;
  270. }
  271. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  272. if (unlikely(cmd == NULL)) {
  273. ret = -ENOMEM;
  274. goto out_no_fifo;
  275. }
  276. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  277. cmd->header.size = sizeof(cmd->body);
  278. cmd->body.cid = res->id;
  279. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  280. vmw_fifo_resource_inc(dev_priv);
  281. return 0;
  282. out_no_fifo:
  283. vmw_resource_release_id(res);
  284. out_no_id:
  285. return ret;
  286. }
  287. static int vmw_gb_context_bind(struct vmw_resource *res,
  288. struct ttm_validate_buffer *val_buf)
  289. {
  290. struct vmw_private *dev_priv = res->dev_priv;
  291. struct {
  292. SVGA3dCmdHeader header;
  293. SVGA3dCmdBindGBContext body;
  294. } *cmd;
  295. struct ttm_buffer_object *bo = val_buf->bo;
  296. BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
  297. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  298. if (unlikely(cmd == NULL))
  299. return -ENOMEM;
  300. cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  301. cmd->header.size = sizeof(cmd->body);
  302. cmd->body.cid = res->id;
  303. cmd->body.mobid = bo->resource->start;
  304. cmd->body.validContents = res->backup_dirty;
  305. res->backup_dirty = false;
  306. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  307. return 0;
  308. }
  309. static int vmw_gb_context_unbind(struct vmw_resource *res,
  310. bool readback,
  311. struct ttm_validate_buffer *val_buf)
  312. {
  313. struct vmw_private *dev_priv = res->dev_priv;
  314. struct ttm_buffer_object *bo = val_buf->bo;
  315. struct vmw_fence_obj *fence;
  316. struct vmw_user_context *uctx =
  317. container_of(res, struct vmw_user_context, res);
  318. struct {
  319. SVGA3dCmdHeader header;
  320. SVGA3dCmdReadbackGBContext body;
  321. } *cmd1;
  322. struct {
  323. SVGA3dCmdHeader header;
  324. SVGA3dCmdBindGBContext body;
  325. } *cmd2;
  326. uint32_t submit_size;
  327. uint8_t *cmd;
  328. BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
  329. mutex_lock(&dev_priv->binding_mutex);
  330. vmw_binding_state_scrub(uctx->cbs);
  331. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  332. cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
  333. if (unlikely(cmd == NULL)) {
  334. mutex_unlock(&dev_priv->binding_mutex);
  335. return -ENOMEM;
  336. }
  337. cmd2 = (void *) cmd;
  338. if (readback) {
  339. cmd1 = (void *) cmd;
  340. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  341. cmd1->header.size = sizeof(cmd1->body);
  342. cmd1->body.cid = res->id;
  343. cmd2 = (void *) (&cmd1[1]);
  344. }
  345. cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  346. cmd2->header.size = sizeof(cmd2->body);
  347. cmd2->body.cid = res->id;
  348. cmd2->body.mobid = SVGA3D_INVALID_ID;
  349. vmw_cmd_commit(dev_priv, submit_size);
  350. mutex_unlock(&dev_priv->binding_mutex);
  351. /*
  352. * Create a fence object and fence the backup buffer.
  353. */
  354. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  355. &fence, NULL);
  356. vmw_bo_fence_single(bo, fence);
  357. if (likely(fence != NULL))
  358. vmw_fence_obj_unreference(&fence);
  359. return 0;
  360. }
  361. static int vmw_gb_context_destroy(struct vmw_resource *res)
  362. {
  363. struct vmw_private *dev_priv = res->dev_priv;
  364. struct {
  365. SVGA3dCmdHeader header;
  366. SVGA3dCmdDestroyGBContext body;
  367. } *cmd;
  368. if (likely(res->id == -1))
  369. return 0;
  370. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  371. if (unlikely(cmd == NULL))
  372. return -ENOMEM;
  373. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  374. cmd->header.size = sizeof(cmd->body);
  375. cmd->body.cid = res->id;
  376. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  377. if (dev_priv->query_cid == res->id)
  378. dev_priv->query_cid_valid = false;
  379. vmw_resource_release_id(res);
  380. vmw_fifo_resource_dec(dev_priv);
  381. return 0;
  382. }
  383. /*
  384. * DX context.
  385. */
  386. static int vmw_dx_context_create(struct vmw_resource *res)
  387. {
  388. struct vmw_private *dev_priv = res->dev_priv;
  389. int ret;
  390. struct {
  391. SVGA3dCmdHeader header;
  392. SVGA3dCmdDXDefineContext body;
  393. } *cmd;
  394. if (likely(res->id != -1))
  395. return 0;
  396. ret = vmw_resource_alloc_id(res);
  397. if (unlikely(ret != 0)) {
  398. DRM_ERROR("Failed to allocate a context id.\n");
  399. goto out_no_id;
  400. }
  401. if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
  402. ret = -EBUSY;
  403. goto out_no_fifo;
  404. }
  405. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  406. if (unlikely(cmd == NULL)) {
  407. ret = -ENOMEM;
  408. goto out_no_fifo;
  409. }
  410. cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
  411. cmd->header.size = sizeof(cmd->body);
  412. cmd->body.cid = res->id;
  413. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  414. vmw_fifo_resource_inc(dev_priv);
  415. return 0;
  416. out_no_fifo:
  417. vmw_resource_release_id(res);
  418. out_no_id:
  419. return ret;
  420. }
  421. static int vmw_dx_context_bind(struct vmw_resource *res,
  422. struct ttm_validate_buffer *val_buf)
  423. {
  424. struct vmw_private *dev_priv = res->dev_priv;
  425. struct {
  426. SVGA3dCmdHeader header;
  427. SVGA3dCmdDXBindContext body;
  428. } *cmd;
  429. struct ttm_buffer_object *bo = val_buf->bo;
  430. BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
  431. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  432. if (unlikely(cmd == NULL))
  433. return -ENOMEM;
  434. cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  435. cmd->header.size = sizeof(cmd->body);
  436. cmd->body.cid = res->id;
  437. cmd->body.mobid = bo->resource->start;
  438. cmd->body.validContents = res->backup_dirty;
  439. res->backup_dirty = false;
  440. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  441. return 0;
  442. }
  443. /**
  444. * vmw_dx_context_scrub_cotables - Scrub all bindings and
  445. * cotables from a context
  446. *
  447. * @ctx: Pointer to the context resource
  448. * @readback: Whether to save the otable contents on scrubbing.
  449. *
  450. * COtables must be unbound before their context, but unbinding requires
  451. * the backup buffer being reserved, whereas scrubbing does not.
  452. * This function scrubs all cotables of a context, potentially reading back
  453. * the contents into their backup buffers. However, scrubbing cotables
  454. * also makes the device context invalid, so scrub all bindings first so
  455. * that doesn't have to be done later with an invalid context.
  456. */
  457. void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
  458. bool readback)
  459. {
  460. struct vmw_user_context *uctx =
  461. container_of(ctx, struct vmw_user_context, res);
  462. u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
  463. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  464. int i;
  465. vmw_binding_state_scrub(uctx->cbs);
  466. for (i = 0; i < cotable_max; ++i) {
  467. struct vmw_resource *res;
  468. /* Avoid racing with ongoing cotable destruction. */
  469. spin_lock(&uctx->cotable_lock);
  470. res = uctx->cotables[vmw_cotable_scrub_order[i]];
  471. if (res)
  472. res = vmw_resource_reference_unless_doomed(res);
  473. spin_unlock(&uctx->cotable_lock);
  474. if (!res)
  475. continue;
  476. WARN_ON(vmw_cotable_scrub(res, readback));
  477. vmw_resource_unreference(&res);
  478. }
  479. }
  480. static int vmw_dx_context_unbind(struct vmw_resource *res,
  481. bool readback,
  482. struct ttm_validate_buffer *val_buf)
  483. {
  484. struct vmw_private *dev_priv = res->dev_priv;
  485. struct ttm_buffer_object *bo = val_buf->bo;
  486. struct vmw_fence_obj *fence;
  487. struct vmw_user_context *uctx =
  488. container_of(res, struct vmw_user_context, res);
  489. struct {
  490. SVGA3dCmdHeader header;
  491. SVGA3dCmdDXReadbackContext body;
  492. } *cmd1;
  493. struct {
  494. SVGA3dCmdHeader header;
  495. SVGA3dCmdDXBindContext body;
  496. } *cmd2;
  497. uint32_t submit_size;
  498. uint8_t *cmd;
  499. BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
  500. mutex_lock(&dev_priv->binding_mutex);
  501. vmw_dx_context_scrub_cotables(res, readback);
  502. if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
  503. readback) {
  504. WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
  505. if (vmw_query_readback_all(uctx->dx_query_mob))
  506. DRM_ERROR("Failed to read back query states\n");
  507. }
  508. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  509. cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
  510. if (unlikely(cmd == NULL)) {
  511. mutex_unlock(&dev_priv->binding_mutex);
  512. return -ENOMEM;
  513. }
  514. cmd2 = (void *) cmd;
  515. if (readback) {
  516. cmd1 = (void *) cmd;
  517. cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
  518. cmd1->header.size = sizeof(cmd1->body);
  519. cmd1->body.cid = res->id;
  520. cmd2 = (void *) (&cmd1[1]);
  521. }
  522. cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  523. cmd2->header.size = sizeof(cmd2->body);
  524. cmd2->body.cid = res->id;
  525. cmd2->body.mobid = SVGA3D_INVALID_ID;
  526. vmw_cmd_commit(dev_priv, submit_size);
  527. mutex_unlock(&dev_priv->binding_mutex);
  528. /*
  529. * Create a fence object and fence the backup buffer.
  530. */
  531. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  532. &fence, NULL);
  533. vmw_bo_fence_single(bo, fence);
  534. if (likely(fence != NULL))
  535. vmw_fence_obj_unreference(&fence);
  536. return 0;
  537. }
  538. static int vmw_dx_context_destroy(struct vmw_resource *res)
  539. {
  540. struct vmw_private *dev_priv = res->dev_priv;
  541. struct {
  542. SVGA3dCmdHeader header;
  543. SVGA3dCmdDXDestroyContext body;
  544. } *cmd;
  545. if (likely(res->id == -1))
  546. return 0;
  547. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  548. if (unlikely(cmd == NULL))
  549. return -ENOMEM;
  550. cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
  551. cmd->header.size = sizeof(cmd->body);
  552. cmd->body.cid = res->id;
  553. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  554. if (dev_priv->query_cid == res->id)
  555. dev_priv->query_cid_valid = false;
  556. vmw_resource_release_id(res);
  557. vmw_fifo_resource_dec(dev_priv);
  558. return 0;
  559. }
  560. /*
  561. * User-space context management:
  562. */
  563. static struct vmw_resource *
  564. vmw_user_context_base_to_res(struct ttm_base_object *base)
  565. {
  566. return &(container_of(base, struct vmw_user_context, base)->res);
  567. }
  568. static void vmw_user_context_free(struct vmw_resource *res)
  569. {
  570. struct vmw_user_context *ctx =
  571. container_of(res, struct vmw_user_context, res);
  572. if (ctx->cbs)
  573. vmw_binding_state_free(ctx->cbs);
  574. (void) vmw_context_bind_dx_query(res, NULL);
  575. ttm_base_object_kfree(ctx, base);
  576. }
  577. /*
  578. * This function is called when user space has no more references on the
  579. * base object. It releases the base-object's reference on the resource object.
  580. */
  581. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  582. {
  583. struct ttm_base_object *base = *p_base;
  584. struct vmw_user_context *ctx =
  585. container_of(base, struct vmw_user_context, base);
  586. struct vmw_resource *res = &ctx->res;
  587. *p_base = NULL;
  588. vmw_resource_unreference(&res);
  589. }
  590. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  591. struct drm_file *file_priv)
  592. {
  593. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  594. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  595. return ttm_ref_object_base_unref(tfile, arg->cid);
  596. }
  597. static int vmw_context_define(struct drm_device *dev, void *data,
  598. struct drm_file *file_priv, bool dx)
  599. {
  600. struct vmw_private *dev_priv = vmw_priv(dev);
  601. struct vmw_user_context *ctx;
  602. struct vmw_resource *res;
  603. struct vmw_resource *tmp;
  604. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  605. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  606. int ret;
  607. if (!has_sm4_context(dev_priv) && dx) {
  608. VMW_DEBUG_USER("DX contexts not supported by device.\n");
  609. return -EINVAL;
  610. }
  611. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  612. if (unlikely(!ctx)) {
  613. ret = -ENOMEM;
  614. goto out_ret;
  615. }
  616. res = &ctx->res;
  617. ctx->base.shareable = false;
  618. ctx->base.tfile = NULL;
  619. /*
  620. * From here on, the destructor takes over resource freeing.
  621. */
  622. ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
  623. if (unlikely(ret != 0))
  624. goto out_ret;
  625. tmp = vmw_resource_reference(&ctx->res);
  626. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  627. &vmw_user_context_base_release);
  628. if (unlikely(ret != 0)) {
  629. vmw_resource_unreference(&tmp);
  630. goto out_err;
  631. }
  632. arg->cid = ctx->base.handle;
  633. out_err:
  634. vmw_resource_unreference(&res);
  635. out_ret:
  636. return ret;
  637. }
  638. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  639. struct drm_file *file_priv)
  640. {
  641. return vmw_context_define(dev, data, file_priv, false);
  642. }
  643. int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
  644. struct drm_file *file_priv)
  645. {
  646. union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
  647. struct drm_vmw_context_arg *rep = &arg->rep;
  648. switch (arg->req) {
  649. case drm_vmw_context_legacy:
  650. return vmw_context_define(dev, rep, file_priv, false);
  651. case drm_vmw_context_dx:
  652. return vmw_context_define(dev, rep, file_priv, true);
  653. default:
  654. break;
  655. }
  656. return -EINVAL;
  657. }
  658. /**
  659. * vmw_context_binding_list - Return a list of context bindings
  660. *
  661. * @ctx: The context resource
  662. *
  663. * Returns the current list of bindings of the given context. Note that
  664. * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  665. */
  666. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  667. {
  668. struct vmw_user_context *uctx =
  669. container_of(ctx, struct vmw_user_context, res);
  670. return vmw_binding_state_list(uctx->cbs);
  671. }
  672. struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
  673. {
  674. return container_of(ctx, struct vmw_user_context, res)->man;
  675. }
  676. struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
  677. SVGACOTableType cotable_type)
  678. {
  679. u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
  680. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  681. if (cotable_type >= cotable_max)
  682. return ERR_PTR(-EINVAL);
  683. return container_of(ctx, struct vmw_user_context, res)->
  684. cotables[cotable_type];
  685. }
  686. /**
  687. * vmw_context_binding_state -
  688. * Return a pointer to a context binding state structure
  689. *
  690. * @ctx: The context resource
  691. *
  692. * Returns the current state of bindings of the given context. Note that
  693. * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  694. */
  695. struct vmw_ctx_binding_state *
  696. vmw_context_binding_state(struct vmw_resource *ctx)
  697. {
  698. return container_of(ctx, struct vmw_user_context, res)->cbs;
  699. }
  700. /**
  701. * vmw_context_bind_dx_query -
  702. * Sets query MOB for the context. If @mob is NULL, then this function will
  703. * remove the association between the MOB and the context. This function
  704. * assumes the binding_mutex is held.
  705. *
  706. * @ctx_res: The context resource
  707. * @mob: a reference to the query MOB
  708. *
  709. * Returns -EINVAL if a MOB has already been set and does not match the one
  710. * specified in the parameter. 0 otherwise.
  711. */
  712. int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  713. struct vmw_buffer_object *mob)
  714. {
  715. struct vmw_user_context *uctx =
  716. container_of(ctx_res, struct vmw_user_context, res);
  717. if (mob == NULL) {
  718. if (uctx->dx_query_mob) {
  719. uctx->dx_query_mob->dx_query_ctx = NULL;
  720. vmw_bo_unreference(&uctx->dx_query_mob);
  721. uctx->dx_query_mob = NULL;
  722. }
  723. return 0;
  724. }
  725. /* Can only have one MOB per context for queries */
  726. if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
  727. return -EINVAL;
  728. mob->dx_query_ctx = ctx_res;
  729. if (!uctx->dx_query_mob)
  730. uctx->dx_query_mob = vmw_bo_reference(mob);
  731. return 0;
  732. }
  733. /**
  734. * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
  735. *
  736. * @ctx_res: The context resource
  737. */
  738. struct vmw_buffer_object *
  739. vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
  740. {
  741. struct vmw_user_context *uctx =
  742. container_of(ctx_res, struct vmw_user_context, res);
  743. return uctx->dx_query_mob;
  744. }