vmwgfx_binding.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * This file implements the vmwgfx context binding manager,
  29. * The sole reason for having to use this code is that vmware guest
  30. * backed contexts can be swapped out to their backing mobs by the device
  31. * at any time, also swapped in at any time. At swapin time, the device
  32. * validates the context bindings to make sure they point to valid resources.
  33. * It's this outside-of-drawcall validation (that can happen at any time),
  34. * that makes this code necessary.
  35. *
  36. * We therefore need to kill any context bindings pointing to a resource
  37. * when the resource is swapped out. Furthermore, if the vmwgfx driver has
  38. * swapped out the context we can't swap it in again to kill bindings because
  39. * of backing mob reservation lockdep violations, so as part of
  40. * context swapout, also kill all bindings of a context, so that they are
  41. * already killed if a resource to which a binding points
  42. * needs to be swapped out.
  43. *
  44. * Note that a resource can be pointed to by bindings from multiple contexts,
  45. * Therefore we can't easily protect this data by a per context mutex
  46. * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
  47. * to protect all binding manager data.
  48. *
  49. * Finally, any association between a context and a global resource
  50. * (surface, shader or even DX query) is conceptually a context binding that
  51. * needs to be tracked by this code.
  52. */
  53. #include "vmwgfx_drv.h"
  54. #include "vmwgfx_binding.h"
  55. #include "device_include/svga3d_reg.h"
  56. #define VMW_BINDING_RT_BIT 0
  57. #define VMW_BINDING_PS_BIT 1
  58. #define VMW_BINDING_SO_T_BIT 2
  59. #define VMW_BINDING_VB_BIT 3
  60. #define VMW_BINDING_UAV_BIT 4
  61. #define VMW_BINDING_CS_UAV_BIT 5
  62. #define VMW_BINDING_NUM_BITS 6
  63. #define VMW_BINDING_PS_SR_BIT 0
  64. /**
  65. * struct vmw_ctx_binding_state - per context binding state
  66. *
  67. * @dev_priv: Pointer to device private structure.
  68. * @list: linked list of individual active bindings.
  69. * @render_targets: Render target bindings.
  70. * @texture_units: Texture units bindings.
  71. * @ds_view: Depth-stencil view binding.
  72. * @so_targets: StreamOutput target bindings.
  73. * @vertex_buffers: Vertex buffer bindings.
  74. * @index_buffer: Index buffer binding.
  75. * @per_shader: Per shader-type bindings.
  76. * @ua_views: UAV bindings.
  77. * @so_state: StreamOutput bindings.
  78. * @dirty: Bitmap tracking per binding-type changes that have not yet
  79. * been emitted to the device.
  80. * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
  81. * have not yet been emitted to the device.
  82. * @bind_cmd_buffer: Scratch space used to construct binding commands.
  83. * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
  84. * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
  85. * device binding slot of the first command data entry in @bind_cmd_buffer.
  86. *
  87. * Note that this structure also provides storage space for the individual
  88. * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
  89. * for individual bindings.
  90. *
  91. */
  92. struct vmw_ctx_binding_state {
  93. struct vmw_private *dev_priv;
  94. struct list_head list;
  95. struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
  96. struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
  97. struct vmw_ctx_bindinfo_view ds_view;
  98. struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
  99. struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
  100. struct vmw_ctx_bindinfo_ib index_buffer;
  101. struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
  102. struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
  103. struct vmw_ctx_bindinfo_so so_state;
  104. unsigned long dirty;
  105. DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
  106. u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
  107. u32 bind_cmd_count;
  108. u32 bind_first_slot;
  109. };
  110. static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
  111. static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  112. bool rebind);
  113. static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
  114. static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
  115. static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
  116. static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
  117. static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
  118. static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
  119. static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
  120. bool rebind);
  121. static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
  122. static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
  123. static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
  124. static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
  125. static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
  126. static void vmw_binding_build_asserts(void) __attribute__ ((unused));
  127. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
  128. /**
  129. * struct vmw_binding_info - Per binding type information for the binding
  130. * manager
  131. *
  132. * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
  133. * @offsets: array[shader_slot] of offsets to the array[slot]
  134. * of struct bindings for the binding type.
  135. * @scrub_func: Pointer to the scrub function for this binding type.
  136. *
  137. * Holds static information to help optimize the binding manager and avoid
  138. * an excessive amount of switch statements.
  139. */
  140. struct vmw_binding_info {
  141. size_t size;
  142. const size_t *offsets;
  143. vmw_scrub_func scrub_func;
  144. };
  145. /*
  146. * A number of static variables that help determine the scrub func and the
  147. * location of the struct vmw_ctx_bindinfo slots for each binding type.
  148. */
  149. static const size_t vmw_binding_shader_offsets[] = {
  150. offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
  151. offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
  152. offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
  153. offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
  154. offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
  155. offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
  156. };
  157. static const size_t vmw_binding_rt_offsets[] = {
  158. offsetof(struct vmw_ctx_binding_state, render_targets),
  159. };
  160. static const size_t vmw_binding_tex_offsets[] = {
  161. offsetof(struct vmw_ctx_binding_state, texture_units),
  162. };
  163. static const size_t vmw_binding_cb_offsets[] = {
  164. offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
  165. offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
  166. offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
  167. offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
  168. offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
  169. offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
  170. };
  171. static const size_t vmw_binding_dx_ds_offsets[] = {
  172. offsetof(struct vmw_ctx_binding_state, ds_view),
  173. };
  174. static const size_t vmw_binding_sr_offsets[] = {
  175. offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
  176. offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
  177. offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
  178. offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
  179. offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
  180. offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
  181. };
  182. static const size_t vmw_binding_so_target_offsets[] = {
  183. offsetof(struct vmw_ctx_binding_state, so_targets),
  184. };
  185. static const size_t vmw_binding_vb_offsets[] = {
  186. offsetof(struct vmw_ctx_binding_state, vertex_buffers),
  187. };
  188. static const size_t vmw_binding_ib_offsets[] = {
  189. offsetof(struct vmw_ctx_binding_state, index_buffer),
  190. };
  191. static const size_t vmw_binding_uav_offsets[] = {
  192. offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
  193. };
  194. static const size_t vmw_binding_cs_uav_offsets[] = {
  195. offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
  196. };
  197. static const size_t vmw_binding_so_offsets[] = {
  198. offsetof(struct vmw_ctx_binding_state, so_state),
  199. };
  200. static const struct vmw_binding_info vmw_binding_infos[] = {
  201. [vmw_ctx_binding_shader] = {
  202. .size = sizeof(struct vmw_ctx_bindinfo_shader),
  203. .offsets = vmw_binding_shader_offsets,
  204. .scrub_func = vmw_binding_scrub_shader},
  205. [vmw_ctx_binding_rt] = {
  206. .size = sizeof(struct vmw_ctx_bindinfo_view),
  207. .offsets = vmw_binding_rt_offsets,
  208. .scrub_func = vmw_binding_scrub_render_target},
  209. [vmw_ctx_binding_tex] = {
  210. .size = sizeof(struct vmw_ctx_bindinfo_tex),
  211. .offsets = vmw_binding_tex_offsets,
  212. .scrub_func = vmw_binding_scrub_texture},
  213. [vmw_ctx_binding_cb] = {
  214. .size = sizeof(struct vmw_ctx_bindinfo_cb),
  215. .offsets = vmw_binding_cb_offsets,
  216. .scrub_func = vmw_binding_scrub_cb},
  217. [vmw_ctx_binding_dx_shader] = {
  218. .size = sizeof(struct vmw_ctx_bindinfo_shader),
  219. .offsets = vmw_binding_shader_offsets,
  220. .scrub_func = vmw_binding_scrub_dx_shader},
  221. [vmw_ctx_binding_dx_rt] = {
  222. .size = sizeof(struct vmw_ctx_bindinfo_view),
  223. .offsets = vmw_binding_rt_offsets,
  224. .scrub_func = vmw_binding_scrub_dx_rt},
  225. [vmw_ctx_binding_sr] = {
  226. .size = sizeof(struct vmw_ctx_bindinfo_view),
  227. .offsets = vmw_binding_sr_offsets,
  228. .scrub_func = vmw_binding_scrub_sr},
  229. [vmw_ctx_binding_ds] = {
  230. .size = sizeof(struct vmw_ctx_bindinfo_view),
  231. .offsets = vmw_binding_dx_ds_offsets,
  232. .scrub_func = vmw_binding_scrub_dx_rt},
  233. [vmw_ctx_binding_so_target] = {
  234. .size = sizeof(struct vmw_ctx_bindinfo_so_target),
  235. .offsets = vmw_binding_so_target_offsets,
  236. .scrub_func = vmw_binding_scrub_so_target},
  237. [vmw_ctx_binding_vb] = {
  238. .size = sizeof(struct vmw_ctx_bindinfo_vb),
  239. .offsets = vmw_binding_vb_offsets,
  240. .scrub_func = vmw_binding_scrub_vb},
  241. [vmw_ctx_binding_ib] = {
  242. .size = sizeof(struct vmw_ctx_bindinfo_ib),
  243. .offsets = vmw_binding_ib_offsets,
  244. .scrub_func = vmw_binding_scrub_ib},
  245. [vmw_ctx_binding_uav] = {
  246. .size = sizeof(struct vmw_ctx_bindinfo_view),
  247. .offsets = vmw_binding_uav_offsets,
  248. .scrub_func = vmw_binding_scrub_uav},
  249. [vmw_ctx_binding_cs_uav] = {
  250. .size = sizeof(struct vmw_ctx_bindinfo_view),
  251. .offsets = vmw_binding_cs_uav_offsets,
  252. .scrub_func = vmw_binding_scrub_cs_uav},
  253. [vmw_ctx_binding_so] = {
  254. .size = sizeof(struct vmw_ctx_bindinfo_so),
  255. .offsets = vmw_binding_so_offsets,
  256. .scrub_func = vmw_binding_scrub_so},
  257. };
  258. /**
  259. * vmw_cbs_context - Return a pointer to the context resource of a
  260. * context binding state tracker.
  261. *
  262. * @cbs: The context binding state tracker.
  263. *
  264. * Provided there are any active bindings, this function will return an
  265. * unreferenced pointer to the context resource that owns the context
  266. * binding state tracker. If there are no active bindings, this function
  267. * will return NULL. Note that the caller must somehow ensure that a reference
  268. * is held on the context resource prior to calling this function.
  269. */
  270. static const struct vmw_resource *
  271. vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
  272. {
  273. if (list_empty(&cbs->list))
  274. return NULL;
  275. return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
  276. ctx_list)->ctx;
  277. }
  278. /**
  279. * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
  280. *
  281. * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
  282. * @bt: The binding type.
  283. * @shader_slot: The shader slot of the binding. If none, then set to 0.
  284. * @slot: The slot of the binding.
  285. */
  286. static struct vmw_ctx_bindinfo *
  287. vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
  288. enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
  289. {
  290. const struct vmw_binding_info *b = &vmw_binding_infos[bt];
  291. size_t offset = b->offsets[shader_slot] + b->size*slot;
  292. return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
  293. }
  294. /**
  295. * vmw_binding_drop: Stop tracking a context binding
  296. *
  297. * @bi: Pointer to binding tracker storage.
  298. *
  299. * Stops tracking a context binding, and re-initializes its storage.
  300. * Typically used when the context binding is replaced with a binding to
  301. * another (or the same, for that matter) resource.
  302. */
  303. static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
  304. {
  305. list_del(&bi->ctx_list);
  306. if (!list_empty(&bi->res_list))
  307. list_del(&bi->res_list);
  308. bi->ctx = NULL;
  309. }
  310. /**
  311. * vmw_binding_add: Start tracking a context binding
  312. *
  313. * @cbs: Pointer to the context binding state tracker.
  314. * @bi: Information about the binding to track.
  315. * @shader_slot: The shader slot of the binding.
  316. * @slot: The slot of the binding.
  317. *
  318. * Starts tracking the binding in the context binding
  319. * state structure @cbs.
  320. */
  321. void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
  322. const struct vmw_ctx_bindinfo *bi,
  323. u32 shader_slot, u32 slot)
  324. {
  325. struct vmw_ctx_bindinfo *loc =
  326. vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
  327. const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
  328. if (loc->ctx != NULL)
  329. vmw_binding_drop(loc);
  330. memcpy(loc, bi, b->size);
  331. loc->scrubbed = false;
  332. list_add(&loc->ctx_list, &cbs->list);
  333. INIT_LIST_HEAD(&loc->res_list);
  334. }
  335. /**
  336. * vmw_binding_cb_offset_update: Update the offset of a cb binding
  337. *
  338. * @cbs: Pointer to the context binding state tracker.
  339. * @shader_slot: The shader slot of the binding.
  340. * @slot: The slot of the binding.
  341. * @offsetInBytes: The new offset of the binding.
  342. *
  343. * Updates the offset of an existing cb binding in the context binding
  344. * state structure @cbs.
  345. */
  346. void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs,
  347. u32 shader_slot, u32 slot, u32 offsetInBytes)
  348. {
  349. struct vmw_ctx_bindinfo *loc =
  350. vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot);
  351. struct vmw_ctx_bindinfo_cb *loc_cb =
  352. (struct vmw_ctx_bindinfo_cb *)((u8 *) loc);
  353. loc_cb->offset = offsetInBytes;
  354. }
  355. /**
  356. * vmw_binding_add_uav_index - Add UAV index for tracking.
  357. * @cbs: Pointer to the context binding state tracker.
  358. * @slot: UAV type to which bind this index.
  359. * @index: The splice index to track.
  360. */
  361. void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
  362. uint32 index)
  363. {
  364. cbs->ua_views[slot].index = index;
  365. }
  366. /**
  367. * vmw_binding_transfer: Transfer a context binding tracking entry.
  368. *
  369. * @cbs: Pointer to the persistent context binding state tracker.
  370. * @from: Staged binding info built during execbuf
  371. * @bi: Information about the binding to track.
  372. *
  373. */
  374. static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
  375. const struct vmw_ctx_binding_state *from,
  376. const struct vmw_ctx_bindinfo *bi)
  377. {
  378. size_t offset = (unsigned long)bi - (unsigned long)from;
  379. struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
  380. ((unsigned long) cbs + offset);
  381. if (loc->ctx != NULL) {
  382. WARN_ON(bi->scrubbed);
  383. vmw_binding_drop(loc);
  384. }
  385. if (bi->res != NULL) {
  386. memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
  387. list_add_tail(&loc->ctx_list, &cbs->list);
  388. list_add_tail(&loc->res_list, &loc->res->binding_head);
  389. }
  390. }
  391. /**
  392. * vmw_binding_state_kill - Kill all bindings associated with a
  393. * struct vmw_ctx_binding state structure, and re-initialize the structure.
  394. *
  395. * @cbs: Pointer to the context binding state tracker.
  396. *
  397. * Emits commands to scrub all bindings associated with the
  398. * context binding state tracker. Then re-initializes the whole structure.
  399. */
  400. void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  401. {
  402. struct vmw_ctx_bindinfo *entry, *next;
  403. vmw_binding_state_scrub(cbs);
  404. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  405. vmw_binding_drop(entry);
  406. }
  407. /**
  408. * vmw_binding_state_scrub - Scrub all bindings associated with a
  409. * struct vmw_ctx_binding state structure.
  410. *
  411. * @cbs: Pointer to the context binding state tracker.
  412. *
  413. * Emits commands to scrub all bindings associated with the
  414. * context binding state tracker.
  415. */
  416. void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
  417. {
  418. struct vmw_ctx_bindinfo *entry;
  419. list_for_each_entry(entry, &cbs->list, ctx_list) {
  420. if (!entry->scrubbed) {
  421. (void) vmw_binding_infos[entry->bt].scrub_func
  422. (entry, false);
  423. entry->scrubbed = true;
  424. }
  425. }
  426. (void) vmw_binding_emit_dirty(cbs);
  427. }
  428. /**
  429. * vmw_binding_res_list_kill - Kill all bindings on a
  430. * resource binding list
  431. *
  432. * @head: list head of resource binding list
  433. *
  434. * Kills all bindings associated with a specific resource. Typically
  435. * called before the resource is destroyed.
  436. */
  437. void vmw_binding_res_list_kill(struct list_head *head)
  438. {
  439. struct vmw_ctx_bindinfo *entry, *next;
  440. vmw_binding_res_list_scrub(head);
  441. list_for_each_entry_safe(entry, next, head, res_list)
  442. vmw_binding_drop(entry);
  443. }
  444. /**
  445. * vmw_binding_res_list_scrub - Scrub all bindings on a
  446. * resource binding list
  447. *
  448. * @head: list head of resource binding list
  449. *
  450. * Scrub all bindings associated with a specific resource. Typically
  451. * called before the resource is evicted.
  452. */
  453. void vmw_binding_res_list_scrub(struct list_head *head)
  454. {
  455. struct vmw_ctx_bindinfo *entry;
  456. list_for_each_entry(entry, head, res_list) {
  457. if (!entry->scrubbed) {
  458. (void) vmw_binding_infos[entry->bt].scrub_func
  459. (entry, false);
  460. entry->scrubbed = true;
  461. }
  462. }
  463. list_for_each_entry(entry, head, res_list) {
  464. struct vmw_ctx_binding_state *cbs =
  465. vmw_context_binding_state(entry->ctx);
  466. (void) vmw_binding_emit_dirty(cbs);
  467. }
  468. }
  469. /**
  470. * vmw_binding_state_commit - Commit staged binding info
  471. *
  472. * @to: Staged binding info area to copy into to.
  473. * @from: Staged binding info built during execbuf.
  474. *
  475. * Transfers binding info from a temporary structure
  476. * (typically used by execbuf) to the persistent
  477. * structure in the context. This can be done once commands have been
  478. * submitted to hardware
  479. */
  480. void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
  481. struct vmw_ctx_binding_state *from)
  482. {
  483. struct vmw_ctx_bindinfo *entry, *next;
  484. list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
  485. vmw_binding_transfer(to, from, entry);
  486. vmw_binding_drop(entry);
  487. }
  488. /* Also transfer uav splice indices */
  489. to->ua_views[0].index = from->ua_views[0].index;
  490. to->ua_views[1].index = from->ua_views[1].index;
  491. }
  492. /**
  493. * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
  494. *
  495. * @cbs: Pointer to the context binding state tracker.
  496. *
  497. * Walks through the context binding list and rebinds all scrubbed
  498. * resources.
  499. */
  500. int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
  501. {
  502. struct vmw_ctx_bindinfo *entry;
  503. int ret;
  504. list_for_each_entry(entry, &cbs->list, ctx_list) {
  505. if (likely(!entry->scrubbed))
  506. continue;
  507. if ((entry->res == NULL || entry->res->id ==
  508. SVGA3D_INVALID_ID))
  509. continue;
  510. ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
  511. if (unlikely(ret != 0))
  512. return ret;
  513. entry->scrubbed = false;
  514. }
  515. return vmw_binding_emit_dirty(cbs);
  516. }
  517. /**
  518. * vmw_binding_scrub_shader - scrub a shader binding from a context.
  519. *
  520. * @bi: single binding information.
  521. * @rebind: Whether to issue a bind instead of scrub command.
  522. */
  523. static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  524. {
  525. struct vmw_ctx_bindinfo_shader *binding =
  526. container_of(bi, typeof(*binding), bi);
  527. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  528. struct {
  529. SVGA3dCmdHeader header;
  530. SVGA3dCmdSetShader body;
  531. } *cmd;
  532. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  533. if (unlikely(cmd == NULL))
  534. return -ENOMEM;
  535. cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  536. cmd->header.size = sizeof(cmd->body);
  537. cmd->body.cid = bi->ctx->id;
  538. cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  539. cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  540. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  541. return 0;
  542. }
  543. /**
  544. * vmw_binding_scrub_render_target - scrub a render target binding
  545. * from a context.
  546. *
  547. * @bi: single binding information.
  548. * @rebind: Whether to issue a bind instead of scrub command.
  549. */
  550. static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  551. bool rebind)
  552. {
  553. struct vmw_ctx_bindinfo_view *binding =
  554. container_of(bi, typeof(*binding), bi);
  555. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  556. struct {
  557. SVGA3dCmdHeader header;
  558. SVGA3dCmdSetRenderTarget body;
  559. } *cmd;
  560. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  561. if (unlikely(cmd == NULL))
  562. return -ENOMEM;
  563. cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  564. cmd->header.size = sizeof(cmd->body);
  565. cmd->body.cid = bi->ctx->id;
  566. cmd->body.type = binding->slot;
  567. cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  568. cmd->body.target.face = 0;
  569. cmd->body.target.mipmap = 0;
  570. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  571. return 0;
  572. }
  573. /**
  574. * vmw_binding_scrub_texture - scrub a texture binding from a context.
  575. *
  576. * @bi: single binding information.
  577. * @rebind: Whether to issue a bind instead of scrub command.
  578. *
  579. * TODO: Possibly complement this function with a function that takes
  580. * a list of texture bindings and combines them to a single command.
  581. */
  582. static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
  583. bool rebind)
  584. {
  585. struct vmw_ctx_bindinfo_tex *binding =
  586. container_of(bi, typeof(*binding), bi);
  587. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  588. struct {
  589. SVGA3dCmdHeader header;
  590. struct {
  591. SVGA3dCmdSetTextureState c;
  592. SVGA3dTextureState s1;
  593. } body;
  594. } *cmd;
  595. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  596. if (unlikely(cmd == NULL))
  597. return -ENOMEM;
  598. cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  599. cmd->header.size = sizeof(cmd->body);
  600. cmd->body.c.cid = bi->ctx->id;
  601. cmd->body.s1.stage = binding->texture_stage;
  602. cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  603. cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  604. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  605. return 0;
  606. }
  607. /**
  608. * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
  609. *
  610. * @bi: single binding information.
  611. * @rebind: Whether to issue a bind instead of scrub command.
  612. */
  613. static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  614. {
  615. struct vmw_ctx_bindinfo_shader *binding =
  616. container_of(bi, typeof(*binding), bi);
  617. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  618. struct {
  619. SVGA3dCmdHeader header;
  620. SVGA3dCmdDXSetShader body;
  621. } *cmd;
  622. cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
  623. if (unlikely(cmd == NULL))
  624. return -ENOMEM;
  625. cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
  626. cmd->header.size = sizeof(cmd->body);
  627. cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  628. cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  629. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  630. return 0;
  631. }
  632. /**
  633. * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
  634. *
  635. * @bi: single binding information.
  636. * @rebind: Whether to issue a bind instead of scrub command.
  637. */
  638. static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
  639. {
  640. struct vmw_ctx_bindinfo_cb *binding =
  641. container_of(bi, typeof(*binding), bi);
  642. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  643. struct {
  644. SVGA3dCmdHeader header;
  645. SVGA3dCmdDXSetSingleConstantBuffer body;
  646. } *cmd;
  647. cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
  648. if (unlikely(cmd == NULL))
  649. return -ENOMEM;
  650. cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
  651. cmd->header.size = sizeof(cmd->body);
  652. cmd->body.slot = binding->slot;
  653. cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  654. if (rebind) {
  655. cmd->body.offsetInBytes = binding->offset;
  656. cmd->body.sizeInBytes = binding->size;
  657. cmd->body.sid = bi->res->id;
  658. } else {
  659. cmd->body.offsetInBytes = 0;
  660. cmd->body.sizeInBytes = 0;
  661. cmd->body.sid = SVGA3D_INVALID_ID;
  662. }
  663. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  664. return 0;
  665. }
  666. /**
  667. * vmw_collect_view_ids - Build view id data for a view binding command
  668. * without checking which bindings actually need to be emitted
  669. *
  670. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  671. * @biv: Pointer to where the binding info array is stored in @cbs
  672. * @max_num: Maximum number of entries in the @bi array.
  673. *
  674. * Scans the @bi array for bindings and builds a buffer of view id data.
  675. * Stops at the first non-existing binding in the @bi array.
  676. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  677. * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
  678. * contains the command data.
  679. */
  680. static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
  681. const struct vmw_ctx_bindinfo_view *biv,
  682. u32 max_num)
  683. {
  684. unsigned long i;
  685. cbs->bind_cmd_count = 0;
  686. cbs->bind_first_slot = 0;
  687. for (i = 0; i < max_num; ++i, ++biv) {
  688. if (!biv->bi.ctx)
  689. break;
  690. cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
  691. ((biv->bi.scrubbed) ?
  692. SVGA3D_INVALID_ID : biv->bi.res->id);
  693. }
  694. }
  695. /**
  696. * vmw_collect_dirty_view_ids - Build view id data for a view binding command
  697. *
  698. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  699. * @bi: Pointer to where the binding info array is stored in @cbs
  700. * @dirty: Bitmap indicating which bindings need to be emitted.
  701. * @max_num: Maximum number of entries in the @bi array.
  702. *
  703. * Scans the @bi array for bindings that need to be emitted and
  704. * builds a buffer of view id data.
  705. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  706. * emitted, @cbs->bind_first_slot indicates the index of the first emitted
  707. * binding, and @cbs->bind_cmd_buffer contains the command data.
  708. */
  709. static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
  710. const struct vmw_ctx_bindinfo *bi,
  711. unsigned long *dirty,
  712. u32 max_num)
  713. {
  714. const struct vmw_ctx_bindinfo_view *biv =
  715. container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  716. unsigned long i, next_bit;
  717. cbs->bind_cmd_count = 0;
  718. i = find_first_bit(dirty, max_num);
  719. next_bit = i;
  720. cbs->bind_first_slot = i;
  721. biv += i;
  722. for (; i < max_num; ++i, ++biv) {
  723. cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
  724. ((!biv->bi.ctx || biv->bi.scrubbed) ?
  725. SVGA3D_INVALID_ID : biv->bi.res->id);
  726. if (next_bit == i) {
  727. next_bit = find_next_bit(dirty, max_num, i + 1);
  728. if (next_bit >= max_num)
  729. break;
  730. }
  731. }
  732. }
  733. /**
  734. * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
  735. *
  736. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  737. * @shader_slot: The shader slot of the binding.
  738. */
  739. static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
  740. int shader_slot)
  741. {
  742. const struct vmw_ctx_bindinfo *loc =
  743. &cbs->per_shader[shader_slot].shader_res[0].bi;
  744. struct {
  745. SVGA3dCmdHeader header;
  746. SVGA3dCmdDXSetShaderResources body;
  747. } *cmd;
  748. size_t cmd_size, view_id_size;
  749. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  750. vmw_collect_dirty_view_ids(cbs, loc,
  751. cbs->per_shader[shader_slot].dirty_sr,
  752. SVGA3D_DX_MAX_SRVIEWS);
  753. if (cbs->bind_cmd_count == 0)
  754. return 0;
  755. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  756. cmd_size = sizeof(*cmd) + view_id_size;
  757. cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
  758. if (unlikely(cmd == NULL))
  759. return -ENOMEM;
  760. cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
  761. cmd->header.size = sizeof(cmd->body) + view_id_size;
  762. cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
  763. cmd->body.startView = cbs->bind_first_slot;
  764. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  765. vmw_cmd_commit(ctx->dev_priv, cmd_size);
  766. bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
  767. cbs->bind_first_slot, cbs->bind_cmd_count);
  768. return 0;
  769. }
  770. /**
  771. * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
  772. *
  773. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  774. */
  775. static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
  776. {
  777. const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0];
  778. struct {
  779. SVGA3dCmdHeader header;
  780. SVGA3dCmdDXSetRenderTargets body;
  781. } *cmd;
  782. size_t cmd_size, view_id_size;
  783. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  784. vmw_collect_view_ids(cbs, loc, SVGA3D_DX_MAX_RENDER_TARGETS);
  785. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  786. cmd_size = sizeof(*cmd) + view_id_size;
  787. cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
  788. if (unlikely(cmd == NULL))
  789. return -ENOMEM;
  790. cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
  791. cmd->header.size = sizeof(cmd->body) + view_id_size;
  792. if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
  793. cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
  794. else
  795. cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
  796. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  797. vmw_cmd_commit(ctx->dev_priv, cmd_size);
  798. return 0;
  799. }
  800. /**
  801. * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
  802. * without checking which bindings actually need to be emitted
  803. *
  804. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  805. * @biso: Pointer to where the binding info array is stored in @cbs
  806. * @max_num: Maximum number of entries in the @bi array.
  807. *
  808. * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
  809. * Stops at the first non-existing binding in the @bi array.
  810. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  811. * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
  812. * contains the command data.
  813. */
  814. static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
  815. const struct vmw_ctx_bindinfo_so_target *biso,
  816. u32 max_num)
  817. {
  818. unsigned long i;
  819. SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
  820. cbs->bind_cmd_count = 0;
  821. cbs->bind_first_slot = 0;
  822. for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
  823. ++cbs->bind_cmd_count) {
  824. if (!biso->bi.ctx)
  825. break;
  826. if (!biso->bi.scrubbed) {
  827. so_buffer->sid = biso->bi.res->id;
  828. so_buffer->offset = biso->offset;
  829. so_buffer->sizeInBytes = biso->size;
  830. } else {
  831. so_buffer->sid = SVGA3D_INVALID_ID;
  832. so_buffer->offset = 0;
  833. so_buffer->sizeInBytes = 0;
  834. }
  835. }
  836. }
  837. /**
  838. * vmw_emit_set_so_target - Issue delayed streamout binding commands
  839. *
  840. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  841. */
  842. static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
  843. {
  844. const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0];
  845. struct {
  846. SVGA3dCmdHeader header;
  847. SVGA3dCmdDXSetSOTargets body;
  848. } *cmd;
  849. size_t cmd_size, so_target_size;
  850. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  851. vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
  852. if (cbs->bind_cmd_count == 0)
  853. return 0;
  854. so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
  855. cmd_size = sizeof(*cmd) + so_target_size;
  856. cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
  857. if (unlikely(cmd == NULL))
  858. return -ENOMEM;
  859. cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
  860. cmd->header.size = sizeof(cmd->body) + so_target_size;
  861. memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
  862. vmw_cmd_commit(ctx->dev_priv, cmd_size);
  863. return 0;
  864. }
  865. /**
  866. * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
  867. *
  868. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  869. *
  870. */
  871. static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
  872. {
  873. struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
  874. u32 i;
  875. int ret;
  876. for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
  877. if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
  878. continue;
  879. ret = vmw_emit_set_sr(cbs, i);
  880. if (ret)
  881. break;
  882. __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
  883. }
  884. return 0;
  885. }
  886. /**
  887. * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
  888. * SVGA3dCmdDXSetVertexBuffers command
  889. *
  890. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  891. * @bi: Pointer to where the binding info array is stored in @cbs
  892. * @dirty: Bitmap indicating which bindings need to be emitted.
  893. * @max_num: Maximum number of entries in the @bi array.
  894. *
  895. * Scans the @bi array for bindings that need to be emitted and
  896. * builds a buffer of SVGA3dVertexBuffer data.
  897. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  898. * emitted, @cbs->bind_first_slot indicates the index of the first emitted
  899. * binding, and @cbs->bind_cmd_buffer contains the command data.
  900. */
  901. static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
  902. const struct vmw_ctx_bindinfo *bi,
  903. unsigned long *dirty,
  904. u32 max_num)
  905. {
  906. const struct vmw_ctx_bindinfo_vb *biv =
  907. container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
  908. unsigned long i, next_bit;
  909. SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
  910. cbs->bind_cmd_count = 0;
  911. i = find_first_bit(dirty, max_num);
  912. next_bit = i;
  913. cbs->bind_first_slot = i;
  914. biv += i;
  915. for (; i < max_num; ++i, ++biv, ++vbs) {
  916. if (!biv->bi.ctx || biv->bi.scrubbed) {
  917. vbs->sid = SVGA3D_INVALID_ID;
  918. vbs->stride = 0;
  919. vbs->offset = 0;
  920. } else {
  921. vbs->sid = biv->bi.res->id;
  922. vbs->stride = biv->stride;
  923. vbs->offset = biv->offset;
  924. }
  925. cbs->bind_cmd_count++;
  926. if (next_bit == i) {
  927. next_bit = find_next_bit(dirty, max_num, i + 1);
  928. if (next_bit >= max_num)
  929. break;
  930. }
  931. }
  932. }
  933. /**
  934. * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
  935. *
  936. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  937. *
  938. */
  939. static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
  940. {
  941. const struct vmw_ctx_bindinfo *loc =
  942. &cbs->vertex_buffers[0].bi;
  943. struct {
  944. SVGA3dCmdHeader header;
  945. SVGA3dCmdDXSetVertexBuffers body;
  946. } *cmd;
  947. size_t cmd_size, set_vb_size;
  948. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  949. vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
  950. SVGA3D_DX_MAX_VERTEXBUFFERS);
  951. if (cbs->bind_cmd_count == 0)
  952. return 0;
  953. set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
  954. cmd_size = sizeof(*cmd) + set_vb_size;
  955. cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
  956. if (unlikely(cmd == NULL))
  957. return -ENOMEM;
  958. cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
  959. cmd->header.size = sizeof(cmd->body) + set_vb_size;
  960. cmd->body.startBuffer = cbs->bind_first_slot;
  961. memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
  962. vmw_cmd_commit(ctx->dev_priv, cmd_size);
  963. bitmap_clear(cbs->dirty_vb,
  964. cbs->bind_first_slot, cbs->bind_cmd_count);
  965. return 0;
  966. }
  967. static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
  968. {
  969. const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0];
  970. struct {
  971. SVGA3dCmdHeader header;
  972. SVGA3dCmdDXSetUAViews body;
  973. } *cmd;
  974. size_t cmd_size, view_id_size;
  975. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  976. vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
  977. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  978. cmd_size = sizeof(*cmd) + view_id_size;
  979. cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
  980. if (!cmd)
  981. return -ENOMEM;
  982. cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
  983. cmd->header.size = sizeof(cmd->body) + view_id_size;
  984. /* Splice index is specified user-space */
  985. cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
  986. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  987. vmw_cmd_commit(ctx->dev_priv, cmd_size);
  988. return 0;
  989. }
  990. static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
  991. {
  992. const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0];
  993. struct {
  994. SVGA3dCmdHeader header;
  995. SVGA3dCmdDXSetCSUAViews body;
  996. } *cmd;
  997. size_t cmd_size, view_id_size;
  998. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  999. vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv));
  1000. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  1001. cmd_size = sizeof(*cmd) + view_id_size;
  1002. cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
  1003. if (!cmd)
  1004. return -ENOMEM;
  1005. cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
  1006. cmd->header.size = sizeof(cmd->body) + view_id_size;
  1007. /* Start index is specified user-space */
  1008. cmd->body.startIndex = cbs->ua_views[1].index;
  1009. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  1010. vmw_cmd_commit(ctx->dev_priv, cmd_size);
  1011. return 0;
  1012. }
  1013. /**
  1014. * vmw_binding_emit_dirty - Issue delayed binding commands
  1015. *
  1016. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  1017. *
  1018. * This function issues the delayed binding commands that arise from
  1019. * previous scrub / unscrub calls. These binding commands are typically
  1020. * commands that batch a number of bindings and therefore it makes sense
  1021. * to delay them.
  1022. */
  1023. static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
  1024. {
  1025. int ret = 0;
  1026. unsigned long hit = 0;
  1027. while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
  1028. < VMW_BINDING_NUM_BITS) {
  1029. switch (hit) {
  1030. case VMW_BINDING_RT_BIT:
  1031. ret = vmw_emit_set_rt(cbs);
  1032. break;
  1033. case VMW_BINDING_PS_BIT:
  1034. ret = vmw_binding_emit_dirty_ps(cbs);
  1035. break;
  1036. case VMW_BINDING_SO_T_BIT:
  1037. ret = vmw_emit_set_so_target(cbs);
  1038. break;
  1039. case VMW_BINDING_VB_BIT:
  1040. ret = vmw_emit_set_vb(cbs);
  1041. break;
  1042. case VMW_BINDING_UAV_BIT:
  1043. ret = vmw_emit_set_uav(cbs);
  1044. break;
  1045. case VMW_BINDING_CS_UAV_BIT:
  1046. ret = vmw_emit_set_cs_uav(cbs);
  1047. break;
  1048. default:
  1049. BUG();
  1050. }
  1051. if (ret)
  1052. return ret;
  1053. __clear_bit(hit, &cbs->dirty);
  1054. hit++;
  1055. }
  1056. return 0;
  1057. }
  1058. /**
  1059. * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
  1060. * scrub from a context
  1061. *
  1062. * @bi: single binding information.
  1063. * @rebind: Whether to issue a bind instead of scrub command.
  1064. */
  1065. static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
  1066. {
  1067. struct vmw_ctx_bindinfo_view *biv =
  1068. container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  1069. struct vmw_ctx_binding_state *cbs =
  1070. vmw_context_binding_state(bi->ctx);
  1071. __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
  1072. __set_bit(VMW_BINDING_PS_SR_BIT,
  1073. &cbs->per_shader[biv->shader_slot].dirty);
  1074. __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
  1075. return 0;
  1076. }
  1077. /**
  1078. * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
  1079. * scrub from a context
  1080. *
  1081. * @bi: single binding information.
  1082. * @rebind: Whether to issue a bind instead of scrub command.
  1083. */
  1084. static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
  1085. {
  1086. struct vmw_ctx_binding_state *cbs =
  1087. vmw_context_binding_state(bi->ctx);
  1088. __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
  1089. return 0;
  1090. }
  1091. /**
  1092. * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
  1093. * scrub from a context
  1094. *
  1095. * @bi: single binding information.
  1096. * @rebind: Whether to issue a bind instead of scrub command.
  1097. */
  1098. static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
  1099. {
  1100. struct vmw_ctx_binding_state *cbs =
  1101. vmw_context_binding_state(bi->ctx);
  1102. __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
  1103. return 0;
  1104. }
  1105. /**
  1106. * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
  1107. * scrub from a context
  1108. *
  1109. * @bi: single binding information.
  1110. * @rebind: Whether to issue a bind instead of scrub command.
  1111. */
  1112. static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
  1113. {
  1114. struct vmw_ctx_bindinfo_vb *bivb =
  1115. container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
  1116. struct vmw_ctx_binding_state *cbs =
  1117. vmw_context_binding_state(bi->ctx);
  1118. __set_bit(bivb->slot, cbs->dirty_vb);
  1119. __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
  1120. return 0;
  1121. }
  1122. /**
  1123. * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
  1124. *
  1125. * @bi: single binding information.
  1126. * @rebind: Whether to issue a bind instead of scrub command.
  1127. */
  1128. static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
  1129. {
  1130. struct vmw_ctx_bindinfo_ib *binding =
  1131. container_of(bi, typeof(*binding), bi);
  1132. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  1133. struct {
  1134. SVGA3dCmdHeader header;
  1135. SVGA3dCmdDXSetIndexBuffer body;
  1136. } *cmd;
  1137. cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
  1138. if (unlikely(cmd == NULL))
  1139. return -ENOMEM;
  1140. cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
  1141. cmd->header.size = sizeof(cmd->body);
  1142. if (rebind) {
  1143. cmd->body.sid = bi->res->id;
  1144. cmd->body.format = binding->format;
  1145. cmd->body.offset = binding->offset;
  1146. } else {
  1147. cmd->body.sid = SVGA3D_INVALID_ID;
  1148. cmd->body.format = 0;
  1149. cmd->body.offset = 0;
  1150. }
  1151. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  1152. return 0;
  1153. }
  1154. static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
  1155. {
  1156. struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
  1157. __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
  1158. return 0;
  1159. }
  1160. static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
  1161. {
  1162. struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
  1163. __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
  1164. return 0;
  1165. }
  1166. /**
  1167. * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
  1168. * @bi: Single binding information.
  1169. * @rebind: Whether to issue a bind instead of scrub command.
  1170. */
  1171. static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
  1172. {
  1173. struct vmw_ctx_bindinfo_so *binding =
  1174. container_of(bi, typeof(*binding), bi);
  1175. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  1176. struct {
  1177. SVGA3dCmdHeader header;
  1178. SVGA3dCmdDXSetStreamOutput body;
  1179. } *cmd;
  1180. cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
  1181. if (!cmd)
  1182. return -ENOMEM;
  1183. cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
  1184. cmd->header.size = sizeof(cmd->body);
  1185. cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
  1186. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  1187. return 0;
  1188. }
  1189. /**
  1190. * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
  1191. *
  1192. * @dev_priv: Pointer to a device private structure.
  1193. *
  1194. * Returns a pointer to a newly allocated struct or an error pointer on error.
  1195. */
  1196. struct vmw_ctx_binding_state *
  1197. vmw_binding_state_alloc(struct vmw_private *dev_priv)
  1198. {
  1199. struct vmw_ctx_binding_state *cbs;
  1200. cbs = vzalloc(sizeof(*cbs));
  1201. if (!cbs) {
  1202. return ERR_PTR(-ENOMEM);
  1203. }
  1204. cbs->dev_priv = dev_priv;
  1205. INIT_LIST_HEAD(&cbs->list);
  1206. return cbs;
  1207. }
  1208. /**
  1209. * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
  1210. *
  1211. * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
  1212. */
  1213. void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
  1214. {
  1215. vfree(cbs);
  1216. }
  1217. /**
  1218. * vmw_binding_state_list - Get the binding list of a
  1219. * struct vmw_ctx_binding_state
  1220. *
  1221. * @cbs: Pointer to the struct vmw_ctx_binding_state
  1222. *
  1223. * Returns the binding list which can be used to traverse through the bindings
  1224. * and access the resource information of all bindings.
  1225. */
  1226. struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
  1227. {
  1228. return &cbs->list;
  1229. }
  1230. /**
  1231. * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
  1232. *
  1233. * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
  1234. *
  1235. * Drops all bindings registered in @cbs. No device binding actions are
  1236. * performed.
  1237. */
  1238. void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
  1239. {
  1240. struct vmw_ctx_bindinfo *entry, *next;
  1241. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  1242. vmw_binding_drop(entry);
  1243. }
  1244. /**
  1245. * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
  1246. * @binding_type: The binding type
  1247. *
  1248. * Each time a resource is put on the validation list as the result of a
  1249. * context binding referencing it, we need to determine whether that resource
  1250. * will be dirtied (written to by the GPU) as a result of the corresponding
  1251. * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
  1252. * and unordered access view bindings are capable of dirtying its resource.
  1253. *
  1254. * Return: Whether the binding type dirties the resource its binding points to.
  1255. */
  1256. u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
  1257. {
  1258. static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
  1259. [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
  1260. [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
  1261. [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
  1262. [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
  1263. [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
  1264. [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
  1265. };
  1266. /* Review this function as new bindings are added. */
  1267. BUILD_BUG_ON(vmw_ctx_binding_max != 14);
  1268. return is_binding_dirtying[binding_type];
  1269. }
  1270. /*
  1271. * This function is unused at run-time, and only used to hold various build
  1272. * asserts important for code optimization assumptions.
  1273. */
  1274. static void vmw_binding_build_asserts(void)
  1275. {
  1276. BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
  1277. BUILD_BUG_ON(SVGA3D_DX_MAX_RENDER_TARGETS > SVGA3D_RT_MAX);
  1278. BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
  1279. /*
  1280. * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
  1281. * view id arrays.
  1282. */
  1283. BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
  1284. BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
  1285. BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
  1286. /*
  1287. * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
  1288. * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
  1289. */
  1290. BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
  1291. VMW_MAX_VIEW_BINDINGS*sizeof(u32));
  1292. BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
  1293. VMW_MAX_VIEW_BINDINGS*sizeof(u32));
  1294. }