vmwgfx_surface.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/ttm/ttm_placement.h>
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "vmwgfx_so.h"
  31. #include "vmwgfx_binding.h"
  32. #include "vmw_surface_cache.h"
  33. #include "device_include/svga3d_surfacedefs.h"
  34. #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
  35. #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
  36. #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
  37. (svga3d_flags & ((uint64_t)U32_MAX))
  38. /**
  39. * struct vmw_user_surface - User-space visible surface resource
  40. *
  41. * @prime: The TTM prime object.
  42. * @base: The TTM base object handling user-space visibility.
  43. * @srf: The surface metadata.
  44. * @master: Master of the creating client. Used for security check.
  45. */
  46. struct vmw_user_surface {
  47. struct ttm_prime_object prime;
  48. struct vmw_surface srf;
  49. struct drm_master *master;
  50. };
  51. /**
  52. * struct vmw_surface_offset - Backing store mip level offset info
  53. *
  54. * @face: Surface face.
  55. * @mip: Mip level.
  56. * @bo_offset: Offset into backing store of this mip level.
  57. *
  58. */
  59. struct vmw_surface_offset {
  60. uint32_t face;
  61. uint32_t mip;
  62. uint32_t bo_offset;
  63. };
  64. /**
  65. * struct vmw_surface_dirty - Surface dirty-tracker
  66. * @cache: Cached layout information of the surface.
  67. * @num_subres: Number of subresources.
  68. * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
  69. */
  70. struct vmw_surface_dirty {
  71. struct vmw_surface_cache cache;
  72. u32 num_subres;
  73. SVGA3dBox boxes[];
  74. };
  75. static void vmw_user_surface_free(struct vmw_resource *res);
  76. static struct vmw_resource *
  77. vmw_user_surface_base_to_res(struct ttm_base_object *base);
  78. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  79. struct ttm_validate_buffer *val_buf);
  80. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  81. bool readback,
  82. struct ttm_validate_buffer *val_buf);
  83. static int vmw_legacy_srf_create(struct vmw_resource *res);
  84. static int vmw_legacy_srf_destroy(struct vmw_resource *res);
  85. static int vmw_gb_surface_create(struct vmw_resource *res);
  86. static int vmw_gb_surface_bind(struct vmw_resource *res,
  87. struct ttm_validate_buffer *val_buf);
  88. static int vmw_gb_surface_unbind(struct vmw_resource *res,
  89. bool readback,
  90. struct ttm_validate_buffer *val_buf);
  91. static int vmw_gb_surface_destroy(struct vmw_resource *res);
  92. static int
  93. vmw_gb_surface_define_internal(struct drm_device *dev,
  94. struct drm_vmw_gb_surface_create_ext_req *req,
  95. struct drm_vmw_gb_surface_create_rep *rep,
  96. struct drm_file *file_priv);
  97. static int
  98. vmw_gb_surface_reference_internal(struct drm_device *dev,
  99. struct drm_vmw_surface_arg *req,
  100. struct drm_vmw_gb_surface_ref_ext_rep *rep,
  101. struct drm_file *file_priv);
  102. static void vmw_surface_dirty_free(struct vmw_resource *res);
  103. static int vmw_surface_dirty_alloc(struct vmw_resource *res);
  104. static int vmw_surface_dirty_sync(struct vmw_resource *res);
  105. static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
  106. size_t end);
  107. static int vmw_surface_clean(struct vmw_resource *res);
  108. static const struct vmw_user_resource_conv user_surface_conv = {
  109. .object_type = VMW_RES_SURFACE,
  110. .base_obj_to_res = vmw_user_surface_base_to_res,
  111. .res_free = vmw_user_surface_free
  112. };
  113. const struct vmw_user_resource_conv *user_surface_converter =
  114. &user_surface_conv;
  115. static const struct vmw_res_func vmw_legacy_surface_func = {
  116. .res_type = vmw_res_surface,
  117. .needs_backup = false,
  118. .may_evict = true,
  119. .prio = 1,
  120. .dirty_prio = 1,
  121. .type_name = "legacy surfaces",
  122. .backup_placement = &vmw_srf_placement,
  123. .create = &vmw_legacy_srf_create,
  124. .destroy = &vmw_legacy_srf_destroy,
  125. .bind = &vmw_legacy_srf_bind,
  126. .unbind = &vmw_legacy_srf_unbind
  127. };
  128. static const struct vmw_res_func vmw_gb_surface_func = {
  129. .res_type = vmw_res_surface,
  130. .needs_backup = true,
  131. .may_evict = true,
  132. .prio = 1,
  133. .dirty_prio = 2,
  134. .type_name = "guest backed surfaces",
  135. .backup_placement = &vmw_mob_placement,
  136. .create = vmw_gb_surface_create,
  137. .destroy = vmw_gb_surface_destroy,
  138. .bind = vmw_gb_surface_bind,
  139. .unbind = vmw_gb_surface_unbind,
  140. .dirty_alloc = vmw_surface_dirty_alloc,
  141. .dirty_free = vmw_surface_dirty_free,
  142. .dirty_sync = vmw_surface_dirty_sync,
  143. .dirty_range_add = vmw_surface_dirty_range_add,
  144. .clean = vmw_surface_clean,
  145. };
  146. /*
  147. * struct vmw_surface_dma - SVGA3D DMA command
  148. */
  149. struct vmw_surface_dma {
  150. SVGA3dCmdHeader header;
  151. SVGA3dCmdSurfaceDMA body;
  152. SVGA3dCopyBox cb;
  153. SVGA3dCmdSurfaceDMASuffix suffix;
  154. };
  155. /*
  156. * struct vmw_surface_define - SVGA3D Surface Define command
  157. */
  158. struct vmw_surface_define {
  159. SVGA3dCmdHeader header;
  160. SVGA3dCmdDefineSurface body;
  161. };
  162. /*
  163. * struct vmw_surface_destroy - SVGA3D Surface Destroy command
  164. */
  165. struct vmw_surface_destroy {
  166. SVGA3dCmdHeader header;
  167. SVGA3dCmdDestroySurface body;
  168. };
  169. /**
  170. * vmw_surface_dma_size - Compute fifo size for a dma command.
  171. *
  172. * @srf: Pointer to a struct vmw_surface
  173. *
  174. * Computes the required size for a surface dma command for backup or
  175. * restoration of the surface represented by @srf.
  176. */
  177. static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
  178. {
  179. return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
  180. }
  181. /**
  182. * vmw_surface_define_size - Compute fifo size for a surface define command.
  183. *
  184. * @srf: Pointer to a struct vmw_surface
  185. *
  186. * Computes the required size for a surface define command for the definition
  187. * of the surface represented by @srf.
  188. */
  189. static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
  190. {
  191. return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
  192. sizeof(SVGA3dSize);
  193. }
  194. /**
  195. * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
  196. *
  197. * Computes the required size for a surface destroy command for the destruction
  198. * of a hw surface.
  199. */
  200. static inline uint32_t vmw_surface_destroy_size(void)
  201. {
  202. return sizeof(struct vmw_surface_destroy);
  203. }
  204. /**
  205. * vmw_surface_destroy_encode - Encode a surface_destroy command.
  206. *
  207. * @id: The surface id
  208. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  209. */
  210. static void vmw_surface_destroy_encode(uint32_t id,
  211. void *cmd_space)
  212. {
  213. struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
  214. cmd_space;
  215. cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
  216. cmd->header.size = sizeof(cmd->body);
  217. cmd->body.sid = id;
  218. }
  219. /**
  220. * vmw_surface_define_encode - Encode a surface_define command.
  221. *
  222. * @srf: Pointer to a struct vmw_surface object.
  223. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  224. */
  225. static void vmw_surface_define_encode(const struct vmw_surface *srf,
  226. void *cmd_space)
  227. {
  228. struct vmw_surface_define *cmd = (struct vmw_surface_define *)
  229. cmd_space;
  230. struct drm_vmw_size *src_size;
  231. SVGA3dSize *cmd_size;
  232. uint32_t cmd_len;
  233. int i;
  234. cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
  235. sizeof(SVGA3dSize);
  236. cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
  237. cmd->header.size = cmd_len;
  238. cmd->body.sid = srf->res.id;
  239. /*
  240. * Downcast of surfaceFlags, was upcasted when received from user-space,
  241. * since driver internally stores as 64 bit.
  242. * For legacy surface define only 32 bit flag is supported.
  243. */
  244. cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
  245. cmd->body.format = srf->metadata.format;
  246. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  247. cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
  248. cmd += 1;
  249. cmd_size = (SVGA3dSize *) cmd;
  250. src_size = srf->metadata.sizes;
  251. for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
  252. cmd_size->width = src_size->width;
  253. cmd_size->height = src_size->height;
  254. cmd_size->depth = src_size->depth;
  255. }
  256. }
  257. /**
  258. * vmw_surface_dma_encode - Encode a surface_dma command.
  259. *
  260. * @srf: Pointer to a struct vmw_surface object.
  261. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  262. * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
  263. * should be placed or read from.
  264. * @to_surface: Boolean whether to DMA to the surface or from the surface.
  265. */
  266. static void vmw_surface_dma_encode(struct vmw_surface *srf,
  267. void *cmd_space,
  268. const SVGAGuestPtr *ptr,
  269. bool to_surface)
  270. {
  271. uint32_t i;
  272. struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
  273. const struct SVGA3dSurfaceDesc *desc =
  274. vmw_surface_get_desc(srf->metadata.format);
  275. for (i = 0; i < srf->metadata.num_sizes; ++i) {
  276. SVGA3dCmdHeader *header = &cmd->header;
  277. SVGA3dCmdSurfaceDMA *body = &cmd->body;
  278. SVGA3dCopyBox *cb = &cmd->cb;
  279. SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
  280. const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
  281. const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
  282. header->id = SVGA_3D_CMD_SURFACE_DMA;
  283. header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
  284. body->guest.ptr = *ptr;
  285. body->guest.ptr.offset += cur_offset->bo_offset;
  286. body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size);
  287. body->host.sid = srf->res.id;
  288. body->host.face = cur_offset->face;
  289. body->host.mipmap = cur_offset->mip;
  290. body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
  291. SVGA3D_READ_HOST_VRAM);
  292. cb->x = 0;
  293. cb->y = 0;
  294. cb->z = 0;
  295. cb->srcx = 0;
  296. cb->srcy = 0;
  297. cb->srcz = 0;
  298. cb->w = cur_size->width;
  299. cb->h = cur_size->height;
  300. cb->d = cur_size->depth;
  301. suffix->suffixSize = sizeof(*suffix);
  302. suffix->maximumOffset =
  303. vmw_surface_get_image_buffer_size(desc, cur_size,
  304. body->guest.pitch);
  305. suffix->flags.discard = 0;
  306. suffix->flags.unsynchronized = 0;
  307. suffix->flags.reserved = 0;
  308. ++cmd;
  309. }
  310. };
  311. /**
  312. * vmw_hw_surface_destroy - destroy a Device surface
  313. *
  314. * @res: Pointer to a struct vmw_resource embedded in a struct
  315. * vmw_surface.
  316. *
  317. * Destroys a the device surface associated with a struct vmw_surface if
  318. * any, and adjusts resource count accordingly.
  319. */
  320. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  321. {
  322. struct vmw_private *dev_priv = res->dev_priv;
  323. void *cmd;
  324. if (res->func->destroy == vmw_gb_surface_destroy) {
  325. (void) vmw_gb_surface_destroy(res);
  326. return;
  327. }
  328. if (res->id != -1) {
  329. cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
  330. if (unlikely(!cmd))
  331. return;
  332. vmw_surface_destroy_encode(res->id, cmd);
  333. vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
  334. /*
  335. * used_memory_size_atomic, or separate lock
  336. * to avoid taking dev_priv::cmdbuf_mutex in
  337. * the destroy path.
  338. */
  339. mutex_lock(&dev_priv->cmdbuf_mutex);
  340. dev_priv->used_memory_size -= res->backup_size;
  341. mutex_unlock(&dev_priv->cmdbuf_mutex);
  342. }
  343. }
  344. /**
  345. * vmw_legacy_srf_create - Create a device surface as part of the
  346. * resource validation process.
  347. *
  348. * @res: Pointer to a struct vmw_surface.
  349. *
  350. * If the surface doesn't have a hw id.
  351. *
  352. * Returns -EBUSY if there wasn't sufficient device resources to
  353. * complete the validation. Retry after freeing up resources.
  354. *
  355. * May return other errors if the kernel is out of guest resources.
  356. */
  357. static int vmw_legacy_srf_create(struct vmw_resource *res)
  358. {
  359. struct vmw_private *dev_priv = res->dev_priv;
  360. struct vmw_surface *srf;
  361. uint32_t submit_size;
  362. uint8_t *cmd;
  363. int ret;
  364. if (likely(res->id != -1))
  365. return 0;
  366. srf = vmw_res_to_srf(res);
  367. if (unlikely(dev_priv->used_memory_size + res->backup_size >=
  368. dev_priv->memory_size))
  369. return -EBUSY;
  370. /*
  371. * Alloc id for the resource.
  372. */
  373. ret = vmw_resource_alloc_id(res);
  374. if (unlikely(ret != 0)) {
  375. DRM_ERROR("Failed to allocate a surface id.\n");
  376. goto out_no_id;
  377. }
  378. if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
  379. ret = -EBUSY;
  380. goto out_no_fifo;
  381. }
  382. /*
  383. * Encode surface define- commands.
  384. */
  385. submit_size = vmw_surface_define_size(srf);
  386. cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
  387. if (unlikely(!cmd)) {
  388. ret = -ENOMEM;
  389. goto out_no_fifo;
  390. }
  391. vmw_surface_define_encode(srf, cmd);
  392. vmw_cmd_commit(dev_priv, submit_size);
  393. vmw_fifo_resource_inc(dev_priv);
  394. /*
  395. * Surface memory usage accounting.
  396. */
  397. dev_priv->used_memory_size += res->backup_size;
  398. return 0;
  399. out_no_fifo:
  400. vmw_resource_release_id(res);
  401. out_no_id:
  402. return ret;
  403. }
  404. /**
  405. * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
  406. *
  407. * @res: Pointer to a struct vmw_res embedded in a struct
  408. * vmw_surface.
  409. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  410. * information about the backup buffer.
  411. * @bind: Boolean wether to DMA to the surface.
  412. *
  413. * Transfer backup data to or from a legacy surface as part of the
  414. * validation process.
  415. * May return other errors if the kernel is out of guest resources.
  416. * The backup buffer will be fenced or idle upon successful completion,
  417. * and if the surface needs persistent backup storage, the backup buffer
  418. * will also be returned reserved iff @bind is true.
  419. */
  420. static int vmw_legacy_srf_dma(struct vmw_resource *res,
  421. struct ttm_validate_buffer *val_buf,
  422. bool bind)
  423. {
  424. SVGAGuestPtr ptr;
  425. struct vmw_fence_obj *fence;
  426. uint32_t submit_size;
  427. struct vmw_surface *srf = vmw_res_to_srf(res);
  428. uint8_t *cmd;
  429. struct vmw_private *dev_priv = res->dev_priv;
  430. BUG_ON(!val_buf->bo);
  431. submit_size = vmw_surface_dma_size(srf);
  432. cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
  433. if (unlikely(!cmd))
  434. return -ENOMEM;
  435. vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
  436. vmw_surface_dma_encode(srf, cmd, &ptr, bind);
  437. vmw_cmd_commit(dev_priv, submit_size);
  438. /*
  439. * Create a fence object and fence the backup buffer.
  440. */
  441. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  442. &fence, NULL);
  443. vmw_bo_fence_single(val_buf->bo, fence);
  444. if (likely(fence != NULL))
  445. vmw_fence_obj_unreference(&fence);
  446. return 0;
  447. }
  448. /**
  449. * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
  450. * surface validation process.
  451. *
  452. * @res: Pointer to a struct vmw_res embedded in a struct
  453. * vmw_surface.
  454. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  455. * information about the backup buffer.
  456. *
  457. * This function will copy backup data to the surface if the
  458. * backup buffer is dirty.
  459. */
  460. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  461. struct ttm_validate_buffer *val_buf)
  462. {
  463. if (!res->backup_dirty)
  464. return 0;
  465. return vmw_legacy_srf_dma(res, val_buf, true);
  466. }
  467. /**
  468. * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
  469. * surface eviction process.
  470. *
  471. * @res: Pointer to a struct vmw_res embedded in a struct
  472. * vmw_surface.
  473. * @readback: Readback - only true if dirty
  474. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  475. * information about the backup buffer.
  476. *
  477. * This function will copy backup data from the surface.
  478. */
  479. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  480. bool readback,
  481. struct ttm_validate_buffer *val_buf)
  482. {
  483. if (unlikely(readback))
  484. return vmw_legacy_srf_dma(res, val_buf, false);
  485. return 0;
  486. }
  487. /**
  488. * vmw_legacy_srf_destroy - Destroy a device surface as part of a
  489. * resource eviction process.
  490. *
  491. * @res: Pointer to a struct vmw_res embedded in a struct
  492. * vmw_surface.
  493. */
  494. static int vmw_legacy_srf_destroy(struct vmw_resource *res)
  495. {
  496. struct vmw_private *dev_priv = res->dev_priv;
  497. uint32_t submit_size;
  498. uint8_t *cmd;
  499. BUG_ON(res->id == -1);
  500. /*
  501. * Encode the dma- and surface destroy commands.
  502. */
  503. submit_size = vmw_surface_destroy_size();
  504. cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
  505. if (unlikely(!cmd))
  506. return -ENOMEM;
  507. vmw_surface_destroy_encode(res->id, cmd);
  508. vmw_cmd_commit(dev_priv, submit_size);
  509. /*
  510. * Surface memory usage accounting.
  511. */
  512. dev_priv->used_memory_size -= res->backup_size;
  513. /*
  514. * Release the surface ID.
  515. */
  516. vmw_resource_release_id(res);
  517. vmw_fifo_resource_dec(dev_priv);
  518. return 0;
  519. }
  520. /**
  521. * vmw_surface_init - initialize a struct vmw_surface
  522. *
  523. * @dev_priv: Pointer to a device private struct.
  524. * @srf: Pointer to the struct vmw_surface to initialize.
  525. * @res_free: Pointer to a resource destructor used to free
  526. * the object.
  527. */
  528. static int vmw_surface_init(struct vmw_private *dev_priv,
  529. struct vmw_surface *srf,
  530. void (*res_free) (struct vmw_resource *res))
  531. {
  532. int ret;
  533. struct vmw_resource *res = &srf->res;
  534. BUG_ON(!res_free);
  535. ret = vmw_resource_init(dev_priv, res, true, res_free,
  536. (dev_priv->has_mob) ? &vmw_gb_surface_func :
  537. &vmw_legacy_surface_func);
  538. if (unlikely(ret != 0)) {
  539. res_free(res);
  540. return ret;
  541. }
  542. /*
  543. * The surface won't be visible to hardware until a
  544. * surface validate.
  545. */
  546. INIT_LIST_HEAD(&srf->view_list);
  547. res->hw_destroy = vmw_hw_surface_destroy;
  548. return ret;
  549. }
  550. /**
  551. * vmw_user_surface_base_to_res - TTM base object to resource converter for
  552. * user visible surfaces
  553. *
  554. * @base: Pointer to a TTM base object
  555. *
  556. * Returns the struct vmw_resource embedded in a struct vmw_surface
  557. * for the user-visible object identified by the TTM base object @base.
  558. */
  559. static struct vmw_resource *
  560. vmw_user_surface_base_to_res(struct ttm_base_object *base)
  561. {
  562. return &(container_of(base, struct vmw_user_surface,
  563. prime.base)->srf.res);
  564. }
  565. /**
  566. * vmw_user_surface_free - User visible surface resource destructor
  567. *
  568. * @res: A struct vmw_resource embedded in a struct vmw_surface.
  569. */
  570. static void vmw_user_surface_free(struct vmw_resource *res)
  571. {
  572. struct vmw_surface *srf = vmw_res_to_srf(res);
  573. struct vmw_user_surface *user_srf =
  574. container_of(srf, struct vmw_user_surface, srf);
  575. WARN_ON_ONCE(res->dirty);
  576. if (user_srf->master)
  577. drm_master_put(&user_srf->master);
  578. kfree(srf->offsets);
  579. kfree(srf->metadata.sizes);
  580. kfree(srf->snooper.image);
  581. ttm_prime_object_kfree(user_srf, prime);
  582. }
  583. /**
  584. * vmw_user_surface_base_release - User visible surface TTM base object destructor
  585. *
  586. * @p_base: Pointer to a pointer to a TTM base object
  587. * embedded in a struct vmw_user_surface.
  588. *
  589. * Drops the base object's reference on its resource, and the
  590. * pointer pointed to by *p_base is set to NULL.
  591. */
  592. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  593. {
  594. struct ttm_base_object *base = *p_base;
  595. struct vmw_user_surface *user_srf =
  596. container_of(base, struct vmw_user_surface, prime.base);
  597. struct vmw_resource *res = &user_srf->srf.res;
  598. if (res && res->backup)
  599. drm_gem_object_put(&res->backup->base.base);
  600. *p_base = NULL;
  601. vmw_resource_unreference(&res);
  602. }
  603. /**
  604. * vmw_surface_destroy_ioctl - Ioctl function implementing
  605. * the user surface destroy functionality.
  606. *
  607. * @dev: Pointer to a struct drm_device.
  608. * @data: Pointer to data copied from / to user-space.
  609. * @file_priv: Pointer to a drm file private structure.
  610. */
  611. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  612. struct drm_file *file_priv)
  613. {
  614. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  615. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  616. return ttm_ref_object_base_unref(tfile, arg->sid);
  617. }
  618. /**
  619. * vmw_surface_define_ioctl - Ioctl function implementing
  620. * the user surface define functionality.
  621. *
  622. * @dev: Pointer to a struct drm_device.
  623. * @data: Pointer to data copied from / to user-space.
  624. * @file_priv: Pointer to a drm file private structure.
  625. */
  626. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  627. struct drm_file *file_priv)
  628. {
  629. struct vmw_private *dev_priv = vmw_priv(dev);
  630. struct vmw_user_surface *user_srf;
  631. struct vmw_surface *srf;
  632. struct vmw_surface_metadata *metadata;
  633. struct vmw_resource *res;
  634. struct vmw_resource *tmp;
  635. union drm_vmw_surface_create_arg *arg =
  636. (union drm_vmw_surface_create_arg *)data;
  637. struct drm_vmw_surface_create_req *req = &arg->req;
  638. struct drm_vmw_surface_arg *rep = &arg->rep;
  639. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  640. int ret;
  641. int i, j;
  642. uint32_t cur_bo_offset;
  643. struct drm_vmw_size *cur_size;
  644. struct vmw_surface_offset *cur_offset;
  645. uint32_t num_sizes;
  646. const SVGA3dSurfaceDesc *desc;
  647. num_sizes = 0;
  648. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  649. if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
  650. return -EINVAL;
  651. num_sizes += req->mip_levels[i];
  652. }
  653. if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
  654. num_sizes == 0)
  655. return -EINVAL;
  656. desc = vmw_surface_get_desc(req->format);
  657. if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
  658. VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
  659. req->format);
  660. return -EINVAL;
  661. }
  662. user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
  663. if (unlikely(!user_srf)) {
  664. ret = -ENOMEM;
  665. goto out_unlock;
  666. }
  667. srf = &user_srf->srf;
  668. metadata = &srf->metadata;
  669. res = &srf->res;
  670. /* Driver internally stores as 64-bit flags */
  671. metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
  672. metadata->format = req->format;
  673. metadata->scanout = req->scanout;
  674. memcpy(metadata->mip_levels, req->mip_levels,
  675. sizeof(metadata->mip_levels));
  676. metadata->num_sizes = num_sizes;
  677. metadata->sizes =
  678. memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
  679. req->size_addr,
  680. metadata->num_sizes, sizeof(*metadata->sizes));
  681. if (IS_ERR(metadata->sizes)) {
  682. ret = PTR_ERR(metadata->sizes);
  683. goto out_no_sizes;
  684. }
  685. srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
  686. GFP_KERNEL);
  687. if (unlikely(!srf->offsets)) {
  688. ret = -ENOMEM;
  689. goto out_no_offsets;
  690. }
  691. metadata->base_size = *srf->metadata.sizes;
  692. metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
  693. metadata->multisample_count = 0;
  694. metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
  695. metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
  696. cur_bo_offset = 0;
  697. cur_offset = srf->offsets;
  698. cur_size = metadata->sizes;
  699. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  700. for (j = 0; j < metadata->mip_levels[i]; ++j) {
  701. uint32_t stride = vmw_surface_calculate_pitch(
  702. desc, cur_size);
  703. cur_offset->face = i;
  704. cur_offset->mip = j;
  705. cur_offset->bo_offset = cur_bo_offset;
  706. cur_bo_offset += vmw_surface_get_image_buffer_size
  707. (desc, cur_size, stride);
  708. ++cur_offset;
  709. ++cur_size;
  710. }
  711. }
  712. res->backup_size = cur_bo_offset;
  713. if (metadata->scanout &&
  714. metadata->num_sizes == 1 &&
  715. metadata->sizes[0].width == 64 &&
  716. metadata->sizes[0].height == 64 &&
  717. metadata->format == SVGA3D_A8R8G8B8) {
  718. srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
  719. if (!srf->snooper.image) {
  720. DRM_ERROR("Failed to allocate cursor_image\n");
  721. ret = -ENOMEM;
  722. goto out_no_copy;
  723. }
  724. } else {
  725. srf->snooper.image = NULL;
  726. }
  727. user_srf->prime.base.shareable = false;
  728. user_srf->prime.base.tfile = NULL;
  729. if (drm_is_primary_client(file_priv))
  730. user_srf->master = drm_file_get_master(file_priv);
  731. /**
  732. * From this point, the generic resource management functions
  733. * destroy the object on failure.
  734. */
  735. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  736. if (unlikely(ret != 0))
  737. goto out_unlock;
  738. /*
  739. * A gb-aware client referencing a shared surface will
  740. * expect a backup buffer to be present.
  741. */
  742. if (dev_priv->has_mob && req->shareable) {
  743. uint32_t backup_handle;
  744. ret = vmw_gem_object_create_with_handle(dev_priv,
  745. file_priv,
  746. res->backup_size,
  747. &backup_handle,
  748. &res->backup);
  749. if (unlikely(ret != 0)) {
  750. vmw_resource_unreference(&res);
  751. goto out_unlock;
  752. }
  753. vmw_bo_reference(res->backup);
  754. /*
  755. * We don't expose the handle to the userspace and surface
  756. * already holds a gem reference
  757. */
  758. drm_gem_handle_delete(file_priv, backup_handle);
  759. }
  760. tmp = vmw_resource_reference(&srf->res);
  761. ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
  762. req->shareable, VMW_RES_SURFACE,
  763. &vmw_user_surface_base_release);
  764. if (unlikely(ret != 0)) {
  765. vmw_resource_unreference(&tmp);
  766. vmw_resource_unreference(&res);
  767. goto out_unlock;
  768. }
  769. rep->sid = user_srf->prime.base.handle;
  770. vmw_resource_unreference(&res);
  771. return 0;
  772. out_no_copy:
  773. kfree(srf->offsets);
  774. out_no_offsets:
  775. kfree(metadata->sizes);
  776. out_no_sizes:
  777. ttm_prime_object_kfree(user_srf, prime);
  778. out_unlock:
  779. return ret;
  780. }
  781. static int
  782. vmw_surface_handle_reference(struct vmw_private *dev_priv,
  783. struct drm_file *file_priv,
  784. uint32_t u_handle,
  785. enum drm_vmw_handle_type handle_type,
  786. struct ttm_base_object **base_p)
  787. {
  788. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  789. struct vmw_user_surface *user_srf;
  790. uint32_t handle;
  791. struct ttm_base_object *base;
  792. int ret;
  793. if (handle_type == DRM_VMW_HANDLE_PRIME) {
  794. ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
  795. if (unlikely(ret != 0))
  796. return ret;
  797. } else {
  798. handle = u_handle;
  799. }
  800. ret = -EINVAL;
  801. base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
  802. if (unlikely(!base)) {
  803. VMW_DEBUG_USER("Could not find surface to reference.\n");
  804. goto out_no_lookup;
  805. }
  806. if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
  807. VMW_DEBUG_USER("Referenced object is not a surface.\n");
  808. goto out_bad_resource;
  809. }
  810. if (handle_type != DRM_VMW_HANDLE_PRIME) {
  811. bool require_exist = false;
  812. user_srf = container_of(base, struct vmw_user_surface,
  813. prime.base);
  814. /* Error out if we are unauthenticated primary */
  815. if (drm_is_primary_client(file_priv) &&
  816. !file_priv->authenticated) {
  817. ret = -EACCES;
  818. goto out_bad_resource;
  819. }
  820. /*
  821. * Make sure the surface creator has the same
  822. * authenticating master, or is already registered with us.
  823. */
  824. if (drm_is_primary_client(file_priv) &&
  825. user_srf->master != file_priv->master)
  826. require_exist = true;
  827. if (unlikely(drm_is_render_client(file_priv)))
  828. require_exist = true;
  829. ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
  830. if (unlikely(ret != 0)) {
  831. DRM_ERROR("Could not add a reference to a surface.\n");
  832. goto out_bad_resource;
  833. }
  834. }
  835. *base_p = base;
  836. return 0;
  837. out_bad_resource:
  838. ttm_base_object_unref(&base);
  839. out_no_lookup:
  840. if (handle_type == DRM_VMW_HANDLE_PRIME)
  841. (void) ttm_ref_object_base_unref(tfile, handle);
  842. return ret;
  843. }
  844. /**
  845. * vmw_surface_reference_ioctl - Ioctl function implementing
  846. * the user surface reference functionality.
  847. *
  848. * @dev: Pointer to a struct drm_device.
  849. * @data: Pointer to data copied from / to user-space.
  850. * @file_priv: Pointer to a drm file private structure.
  851. */
  852. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  853. struct drm_file *file_priv)
  854. {
  855. struct vmw_private *dev_priv = vmw_priv(dev);
  856. union drm_vmw_surface_reference_arg *arg =
  857. (union drm_vmw_surface_reference_arg *)data;
  858. struct drm_vmw_surface_arg *req = &arg->req;
  859. struct drm_vmw_surface_create_req *rep = &arg->rep;
  860. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  861. struct vmw_surface *srf;
  862. struct vmw_user_surface *user_srf;
  863. struct drm_vmw_size __user *user_sizes;
  864. struct ttm_base_object *base;
  865. int ret;
  866. ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
  867. req->handle_type, &base);
  868. if (unlikely(ret != 0))
  869. return ret;
  870. user_srf = container_of(base, struct vmw_user_surface, prime.base);
  871. srf = &user_srf->srf;
  872. /* Downcast of flags when sending back to user space */
  873. rep->flags = (uint32_t)srf->metadata.flags;
  874. rep->format = srf->metadata.format;
  875. memcpy(rep->mip_levels, srf->metadata.mip_levels,
  876. sizeof(srf->metadata.mip_levels));
  877. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  878. rep->size_addr;
  879. if (user_sizes)
  880. ret = copy_to_user(user_sizes, &srf->metadata.base_size,
  881. sizeof(srf->metadata.base_size));
  882. if (unlikely(ret != 0)) {
  883. VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
  884. srf->metadata.num_sizes);
  885. ttm_ref_object_base_unref(tfile, base->handle);
  886. ret = -EFAULT;
  887. }
  888. ttm_base_object_unref(&base);
  889. return ret;
  890. }
  891. /**
  892. * vmw_gb_surface_create - Encode a surface_define command.
  893. *
  894. * @res: Pointer to a struct vmw_resource embedded in a struct
  895. * vmw_surface.
  896. */
  897. static int vmw_gb_surface_create(struct vmw_resource *res)
  898. {
  899. struct vmw_private *dev_priv = res->dev_priv;
  900. struct vmw_surface *srf = vmw_res_to_srf(res);
  901. struct vmw_surface_metadata *metadata = &srf->metadata;
  902. uint32_t cmd_len, cmd_id, submit_len;
  903. int ret;
  904. struct {
  905. SVGA3dCmdHeader header;
  906. SVGA3dCmdDefineGBSurface body;
  907. } *cmd;
  908. struct {
  909. SVGA3dCmdHeader header;
  910. SVGA3dCmdDefineGBSurface_v2 body;
  911. } *cmd2;
  912. struct {
  913. SVGA3dCmdHeader header;
  914. SVGA3dCmdDefineGBSurface_v3 body;
  915. } *cmd3;
  916. struct {
  917. SVGA3dCmdHeader header;
  918. SVGA3dCmdDefineGBSurface_v4 body;
  919. } *cmd4;
  920. if (likely(res->id != -1))
  921. return 0;
  922. vmw_fifo_resource_inc(dev_priv);
  923. ret = vmw_resource_alloc_id(res);
  924. if (unlikely(ret != 0)) {
  925. DRM_ERROR("Failed to allocate a surface id.\n");
  926. goto out_no_id;
  927. }
  928. if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
  929. ret = -EBUSY;
  930. goto out_no_fifo;
  931. }
  932. if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
  933. cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
  934. cmd_len = sizeof(cmd4->body);
  935. submit_len = sizeof(*cmd4);
  936. } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
  937. cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
  938. cmd_len = sizeof(cmd3->body);
  939. submit_len = sizeof(*cmd3);
  940. } else if (metadata->array_size > 0) {
  941. /* VMW_SM_4 support verified at creation time. */
  942. cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
  943. cmd_len = sizeof(cmd2->body);
  944. submit_len = sizeof(*cmd2);
  945. } else {
  946. cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
  947. cmd_len = sizeof(cmd->body);
  948. submit_len = sizeof(*cmd);
  949. }
  950. cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
  951. cmd2 = (typeof(cmd2))cmd;
  952. cmd3 = (typeof(cmd3))cmd;
  953. cmd4 = (typeof(cmd4))cmd;
  954. if (unlikely(!cmd)) {
  955. ret = -ENOMEM;
  956. goto out_no_fifo;
  957. }
  958. if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
  959. cmd4->header.id = cmd_id;
  960. cmd4->header.size = cmd_len;
  961. cmd4->body.sid = srf->res.id;
  962. cmd4->body.surfaceFlags = metadata->flags;
  963. cmd4->body.format = metadata->format;
  964. cmd4->body.numMipLevels = metadata->mip_levels[0];
  965. cmd4->body.multisampleCount = metadata->multisample_count;
  966. cmd4->body.multisamplePattern = metadata->multisample_pattern;
  967. cmd4->body.qualityLevel = metadata->quality_level;
  968. cmd4->body.autogenFilter = metadata->autogen_filter;
  969. cmd4->body.size.width = metadata->base_size.width;
  970. cmd4->body.size.height = metadata->base_size.height;
  971. cmd4->body.size.depth = metadata->base_size.depth;
  972. cmd4->body.arraySize = metadata->array_size;
  973. cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
  974. } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
  975. cmd3->header.id = cmd_id;
  976. cmd3->header.size = cmd_len;
  977. cmd3->body.sid = srf->res.id;
  978. cmd3->body.surfaceFlags = metadata->flags;
  979. cmd3->body.format = metadata->format;
  980. cmd3->body.numMipLevels = metadata->mip_levels[0];
  981. cmd3->body.multisampleCount = metadata->multisample_count;
  982. cmd3->body.multisamplePattern = metadata->multisample_pattern;
  983. cmd3->body.qualityLevel = metadata->quality_level;
  984. cmd3->body.autogenFilter = metadata->autogen_filter;
  985. cmd3->body.size.width = metadata->base_size.width;
  986. cmd3->body.size.height = metadata->base_size.height;
  987. cmd3->body.size.depth = metadata->base_size.depth;
  988. cmd3->body.arraySize = metadata->array_size;
  989. } else if (metadata->array_size > 0) {
  990. cmd2->header.id = cmd_id;
  991. cmd2->header.size = cmd_len;
  992. cmd2->body.sid = srf->res.id;
  993. cmd2->body.surfaceFlags = metadata->flags;
  994. cmd2->body.format = metadata->format;
  995. cmd2->body.numMipLevels = metadata->mip_levels[0];
  996. cmd2->body.multisampleCount = metadata->multisample_count;
  997. cmd2->body.autogenFilter = metadata->autogen_filter;
  998. cmd2->body.size.width = metadata->base_size.width;
  999. cmd2->body.size.height = metadata->base_size.height;
  1000. cmd2->body.size.depth = metadata->base_size.depth;
  1001. cmd2->body.arraySize = metadata->array_size;
  1002. } else {
  1003. cmd->header.id = cmd_id;
  1004. cmd->header.size = cmd_len;
  1005. cmd->body.sid = srf->res.id;
  1006. cmd->body.surfaceFlags = metadata->flags;
  1007. cmd->body.format = metadata->format;
  1008. cmd->body.numMipLevels = metadata->mip_levels[0];
  1009. cmd->body.multisampleCount = metadata->multisample_count;
  1010. cmd->body.autogenFilter = metadata->autogen_filter;
  1011. cmd->body.size.width = metadata->base_size.width;
  1012. cmd->body.size.height = metadata->base_size.height;
  1013. cmd->body.size.depth = metadata->base_size.depth;
  1014. }
  1015. vmw_cmd_commit(dev_priv, submit_len);
  1016. return 0;
  1017. out_no_fifo:
  1018. vmw_resource_release_id(res);
  1019. out_no_id:
  1020. vmw_fifo_resource_dec(dev_priv);
  1021. return ret;
  1022. }
  1023. static int vmw_gb_surface_bind(struct vmw_resource *res,
  1024. struct ttm_validate_buffer *val_buf)
  1025. {
  1026. struct vmw_private *dev_priv = res->dev_priv;
  1027. struct {
  1028. SVGA3dCmdHeader header;
  1029. SVGA3dCmdBindGBSurface body;
  1030. } *cmd1;
  1031. struct {
  1032. SVGA3dCmdHeader header;
  1033. SVGA3dCmdUpdateGBSurface body;
  1034. } *cmd2;
  1035. uint32_t submit_size;
  1036. struct ttm_buffer_object *bo = val_buf->bo;
  1037. BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
  1038. submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
  1039. cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
  1040. if (unlikely(!cmd1))
  1041. return -ENOMEM;
  1042. cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
  1043. cmd1->header.size = sizeof(cmd1->body);
  1044. cmd1->body.sid = res->id;
  1045. cmd1->body.mobid = bo->resource->start;
  1046. if (res->backup_dirty) {
  1047. cmd2 = (void *) &cmd1[1];
  1048. cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
  1049. cmd2->header.size = sizeof(cmd2->body);
  1050. cmd2->body.sid = res->id;
  1051. }
  1052. vmw_cmd_commit(dev_priv, submit_size);
  1053. if (res->backup->dirty && res->backup_dirty) {
  1054. /* We've just made a full upload. Cear dirty regions. */
  1055. vmw_bo_dirty_clear_res(res);
  1056. }
  1057. res->backup_dirty = false;
  1058. return 0;
  1059. }
  1060. static int vmw_gb_surface_unbind(struct vmw_resource *res,
  1061. bool readback,
  1062. struct ttm_validate_buffer *val_buf)
  1063. {
  1064. struct vmw_private *dev_priv = res->dev_priv;
  1065. struct ttm_buffer_object *bo = val_buf->bo;
  1066. struct vmw_fence_obj *fence;
  1067. struct {
  1068. SVGA3dCmdHeader header;
  1069. SVGA3dCmdReadbackGBSurface body;
  1070. } *cmd1;
  1071. struct {
  1072. SVGA3dCmdHeader header;
  1073. SVGA3dCmdInvalidateGBSurface body;
  1074. } *cmd2;
  1075. struct {
  1076. SVGA3dCmdHeader header;
  1077. SVGA3dCmdBindGBSurface body;
  1078. } *cmd3;
  1079. uint32_t submit_size;
  1080. uint8_t *cmd;
  1081. BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
  1082. submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
  1083. cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
  1084. if (unlikely(!cmd))
  1085. return -ENOMEM;
  1086. if (readback) {
  1087. cmd1 = (void *) cmd;
  1088. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
  1089. cmd1->header.size = sizeof(cmd1->body);
  1090. cmd1->body.sid = res->id;
  1091. cmd3 = (void *) &cmd1[1];
  1092. } else {
  1093. cmd2 = (void *) cmd;
  1094. cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
  1095. cmd2->header.size = sizeof(cmd2->body);
  1096. cmd2->body.sid = res->id;
  1097. cmd3 = (void *) &cmd2[1];
  1098. }
  1099. cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
  1100. cmd3->header.size = sizeof(cmd3->body);
  1101. cmd3->body.sid = res->id;
  1102. cmd3->body.mobid = SVGA3D_INVALID_ID;
  1103. vmw_cmd_commit(dev_priv, submit_size);
  1104. /*
  1105. * Create a fence object and fence the backup buffer.
  1106. */
  1107. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  1108. &fence, NULL);
  1109. vmw_bo_fence_single(val_buf->bo, fence);
  1110. if (likely(fence != NULL))
  1111. vmw_fence_obj_unreference(&fence);
  1112. return 0;
  1113. }
  1114. static int vmw_gb_surface_destroy(struct vmw_resource *res)
  1115. {
  1116. struct vmw_private *dev_priv = res->dev_priv;
  1117. struct vmw_surface *srf = vmw_res_to_srf(res);
  1118. struct {
  1119. SVGA3dCmdHeader header;
  1120. SVGA3dCmdDestroyGBSurface body;
  1121. } *cmd;
  1122. if (likely(res->id == -1))
  1123. return 0;
  1124. mutex_lock(&dev_priv->binding_mutex);
  1125. vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
  1126. vmw_binding_res_list_scrub(&res->binding_head);
  1127. cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
  1128. if (unlikely(!cmd)) {
  1129. mutex_unlock(&dev_priv->binding_mutex);
  1130. return -ENOMEM;
  1131. }
  1132. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
  1133. cmd->header.size = sizeof(cmd->body);
  1134. cmd->body.sid = res->id;
  1135. vmw_cmd_commit(dev_priv, sizeof(*cmd));
  1136. mutex_unlock(&dev_priv->binding_mutex);
  1137. vmw_resource_release_id(res);
  1138. vmw_fifo_resource_dec(dev_priv);
  1139. return 0;
  1140. }
  1141. /**
  1142. * vmw_gb_surface_define_ioctl - Ioctl function implementing
  1143. * the user surface define functionality.
  1144. *
  1145. * @dev: Pointer to a struct drm_device.
  1146. * @data: Pointer to data copied from / to user-space.
  1147. * @file_priv: Pointer to a drm file private structure.
  1148. */
  1149. int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
  1150. struct drm_file *file_priv)
  1151. {
  1152. union drm_vmw_gb_surface_create_arg *arg =
  1153. (union drm_vmw_gb_surface_create_arg *)data;
  1154. struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
  1155. struct drm_vmw_gb_surface_create_ext_req req_ext;
  1156. req_ext.base = arg->req;
  1157. req_ext.version = drm_vmw_gb_surface_v1;
  1158. req_ext.svga3d_flags_upper_32_bits = 0;
  1159. req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
  1160. req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
  1161. req_ext.buffer_byte_stride = 0;
  1162. req_ext.must_be_zero = 0;
  1163. return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
  1164. }
  1165. /**
  1166. * vmw_gb_surface_reference_ioctl - Ioctl function implementing
  1167. * the user surface reference functionality.
  1168. *
  1169. * @dev: Pointer to a struct drm_device.
  1170. * @data: Pointer to data copied from / to user-space.
  1171. * @file_priv: Pointer to a drm file private structure.
  1172. */
  1173. int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
  1174. struct drm_file *file_priv)
  1175. {
  1176. union drm_vmw_gb_surface_reference_arg *arg =
  1177. (union drm_vmw_gb_surface_reference_arg *)data;
  1178. struct drm_vmw_surface_arg *req = &arg->req;
  1179. struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
  1180. struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
  1181. int ret;
  1182. ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
  1183. if (unlikely(ret != 0))
  1184. return ret;
  1185. rep->creq = rep_ext.creq.base;
  1186. rep->crep = rep_ext.crep;
  1187. return ret;
  1188. }
  1189. /**
  1190. * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
  1191. * the user surface define functionality.
  1192. *
  1193. * @dev: Pointer to a struct drm_device.
  1194. * @data: Pointer to data copied from / to user-space.
  1195. * @file_priv: Pointer to a drm file private structure.
  1196. */
  1197. int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
  1198. struct drm_file *file_priv)
  1199. {
  1200. union drm_vmw_gb_surface_create_ext_arg *arg =
  1201. (union drm_vmw_gb_surface_create_ext_arg *)data;
  1202. struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
  1203. struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
  1204. return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
  1205. }
  1206. /**
  1207. * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
  1208. * the user surface reference functionality.
  1209. *
  1210. * @dev: Pointer to a struct drm_device.
  1211. * @data: Pointer to data copied from / to user-space.
  1212. * @file_priv: Pointer to a drm file private structure.
  1213. */
  1214. int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
  1215. struct drm_file *file_priv)
  1216. {
  1217. union drm_vmw_gb_surface_reference_ext_arg *arg =
  1218. (union drm_vmw_gb_surface_reference_ext_arg *)data;
  1219. struct drm_vmw_surface_arg *req = &arg->req;
  1220. struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
  1221. return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
  1222. }
  1223. /**
  1224. * vmw_gb_surface_define_internal - Ioctl function implementing
  1225. * the user surface define functionality.
  1226. *
  1227. * @dev: Pointer to a struct drm_device.
  1228. * @req: Request argument from user-space.
  1229. * @rep: Response argument to user-space.
  1230. * @file_priv: Pointer to a drm file private structure.
  1231. */
  1232. static int
  1233. vmw_gb_surface_define_internal(struct drm_device *dev,
  1234. struct drm_vmw_gb_surface_create_ext_req *req,
  1235. struct drm_vmw_gb_surface_create_rep *rep,
  1236. struct drm_file *file_priv)
  1237. {
  1238. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1239. struct vmw_private *dev_priv = vmw_priv(dev);
  1240. struct vmw_user_surface *user_srf;
  1241. struct vmw_surface_metadata metadata = {0};
  1242. struct vmw_surface *srf;
  1243. struct vmw_resource *res;
  1244. struct vmw_resource *tmp;
  1245. int ret = 0;
  1246. uint32_t backup_handle = 0;
  1247. SVGA3dSurfaceAllFlags svga3d_flags_64 =
  1248. SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
  1249. req->base.svga3d_flags);
  1250. /* array_size must be null for non-GL3 host. */
  1251. if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
  1252. VMW_DEBUG_USER("SM4 surface not supported.\n");
  1253. return -EINVAL;
  1254. }
  1255. if (!has_sm4_1_context(dev_priv)) {
  1256. if (req->svga3d_flags_upper_32_bits != 0)
  1257. ret = -EINVAL;
  1258. if (req->base.multisample_count != 0)
  1259. ret = -EINVAL;
  1260. if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
  1261. ret = -EINVAL;
  1262. if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
  1263. ret = -EINVAL;
  1264. if (ret) {
  1265. VMW_DEBUG_USER("SM4.1 surface not supported.\n");
  1266. return ret;
  1267. }
  1268. }
  1269. if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
  1270. VMW_DEBUG_USER("SM5 surface not supported.\n");
  1271. return -EINVAL;
  1272. }
  1273. if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
  1274. req->base.multisample_count == 0) {
  1275. VMW_DEBUG_USER("Invalid sample count.\n");
  1276. return -EINVAL;
  1277. }
  1278. if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
  1279. VMW_DEBUG_USER("Invalid mip level.\n");
  1280. return -EINVAL;
  1281. }
  1282. metadata.flags = svga3d_flags_64;
  1283. metadata.format = req->base.format;
  1284. metadata.mip_levels[0] = req->base.mip_levels;
  1285. metadata.multisample_count = req->base.multisample_count;
  1286. metadata.multisample_pattern = req->multisample_pattern;
  1287. metadata.quality_level = req->quality_level;
  1288. metadata.array_size = req->base.array_size;
  1289. metadata.buffer_byte_stride = req->buffer_byte_stride;
  1290. metadata.num_sizes = 1;
  1291. metadata.base_size = req->base.base_size;
  1292. metadata.scanout = req->base.drm_surface_flags &
  1293. drm_vmw_surface_flag_scanout;
  1294. /* Define a surface based on the parameters. */
  1295. ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
  1296. if (ret != 0) {
  1297. VMW_DEBUG_USER("Failed to define surface.\n");
  1298. return ret;
  1299. }
  1300. user_srf = container_of(srf, struct vmw_user_surface, srf);
  1301. if (drm_is_primary_client(file_priv))
  1302. user_srf->master = drm_file_get_master(file_priv);
  1303. res = &user_srf->srf.res;
  1304. if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
  1305. ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
  1306. &res->backup);
  1307. if (ret == 0) {
  1308. if (res->backup->base.base.size < res->backup_size) {
  1309. VMW_DEBUG_USER("Surface backup buffer too small.\n");
  1310. vmw_bo_unreference(&res->backup);
  1311. ret = -EINVAL;
  1312. goto out_unlock;
  1313. } else {
  1314. backup_handle = req->base.buffer_handle;
  1315. }
  1316. }
  1317. } else if (req->base.drm_surface_flags &
  1318. (drm_vmw_surface_flag_create_buffer |
  1319. drm_vmw_surface_flag_coherent)) {
  1320. ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
  1321. res->backup_size,
  1322. &backup_handle,
  1323. &res->backup);
  1324. if (ret == 0)
  1325. vmw_bo_reference(res->backup);
  1326. }
  1327. if (unlikely(ret != 0)) {
  1328. vmw_resource_unreference(&res);
  1329. goto out_unlock;
  1330. }
  1331. if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
  1332. struct vmw_buffer_object *backup = res->backup;
  1333. ttm_bo_reserve(&backup->base, false, false, NULL);
  1334. if (!res->func->dirty_alloc)
  1335. ret = -EINVAL;
  1336. if (!ret)
  1337. ret = vmw_bo_dirty_add(backup);
  1338. if (!ret) {
  1339. res->coherent = true;
  1340. ret = res->func->dirty_alloc(res);
  1341. }
  1342. ttm_bo_unreserve(&backup->base);
  1343. if (ret) {
  1344. vmw_resource_unreference(&res);
  1345. goto out_unlock;
  1346. }
  1347. }
  1348. tmp = vmw_resource_reference(res);
  1349. ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
  1350. req->base.drm_surface_flags &
  1351. drm_vmw_surface_flag_shareable,
  1352. VMW_RES_SURFACE,
  1353. &vmw_user_surface_base_release);
  1354. if (unlikely(ret != 0)) {
  1355. vmw_resource_unreference(&tmp);
  1356. vmw_resource_unreference(&res);
  1357. goto out_unlock;
  1358. }
  1359. rep->handle = user_srf->prime.base.handle;
  1360. rep->backup_size = res->backup_size;
  1361. if (res->backup) {
  1362. rep->buffer_map_handle =
  1363. drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
  1364. rep->buffer_size = res->backup->base.base.size;
  1365. rep->buffer_handle = backup_handle;
  1366. } else {
  1367. rep->buffer_map_handle = 0;
  1368. rep->buffer_size = 0;
  1369. rep->buffer_handle = SVGA3D_INVALID_ID;
  1370. }
  1371. vmw_resource_unreference(&res);
  1372. out_unlock:
  1373. return ret;
  1374. }
  1375. /**
  1376. * vmw_gb_surface_reference_internal - Ioctl function implementing
  1377. * the user surface reference functionality.
  1378. *
  1379. * @dev: Pointer to a struct drm_device.
  1380. * @req: Pointer to user-space request surface arg.
  1381. * @rep: Pointer to response to user-space.
  1382. * @file_priv: Pointer to a drm file private structure.
  1383. */
  1384. static int
  1385. vmw_gb_surface_reference_internal(struct drm_device *dev,
  1386. struct drm_vmw_surface_arg *req,
  1387. struct drm_vmw_gb_surface_ref_ext_rep *rep,
  1388. struct drm_file *file_priv)
  1389. {
  1390. struct vmw_private *dev_priv = vmw_priv(dev);
  1391. struct vmw_surface *srf;
  1392. struct vmw_user_surface *user_srf;
  1393. struct vmw_surface_metadata *metadata;
  1394. struct ttm_base_object *base;
  1395. u32 backup_handle;
  1396. int ret;
  1397. ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
  1398. req->handle_type, &base);
  1399. if (unlikely(ret != 0))
  1400. return ret;
  1401. user_srf = container_of(base, struct vmw_user_surface, prime.base);
  1402. srf = &user_srf->srf;
  1403. if (!srf->res.backup) {
  1404. DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
  1405. goto out_bad_resource;
  1406. }
  1407. metadata = &srf->metadata;
  1408. mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
  1409. ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
  1410. &backup_handle);
  1411. mutex_unlock(&dev_priv->cmdbuf_mutex);
  1412. if (ret != 0) {
  1413. drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
  1414. req->sid);
  1415. goto out_bad_resource;
  1416. }
  1417. rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
  1418. rep->creq.base.format = metadata->format;
  1419. rep->creq.base.mip_levels = metadata->mip_levels[0];
  1420. rep->creq.base.drm_surface_flags = 0;
  1421. rep->creq.base.multisample_count = metadata->multisample_count;
  1422. rep->creq.base.autogen_filter = metadata->autogen_filter;
  1423. rep->creq.base.array_size = metadata->array_size;
  1424. rep->creq.base.buffer_handle = backup_handle;
  1425. rep->creq.base.base_size = metadata->base_size;
  1426. rep->crep.handle = user_srf->prime.base.handle;
  1427. rep->crep.backup_size = srf->res.backup_size;
  1428. rep->crep.buffer_handle = backup_handle;
  1429. rep->crep.buffer_map_handle =
  1430. drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
  1431. rep->crep.buffer_size = srf->res.backup->base.base.size;
  1432. rep->creq.version = drm_vmw_gb_surface_v1;
  1433. rep->creq.svga3d_flags_upper_32_bits =
  1434. SVGA3D_FLAGS_UPPER_32(metadata->flags);
  1435. rep->creq.multisample_pattern = metadata->multisample_pattern;
  1436. rep->creq.quality_level = metadata->quality_level;
  1437. rep->creq.must_be_zero = 0;
  1438. out_bad_resource:
  1439. ttm_base_object_unref(&base);
  1440. return ret;
  1441. }
  1442. /**
  1443. * vmw_subres_dirty_add - Add a dirty region to a subresource
  1444. * @dirty: The surfaces's dirty tracker.
  1445. * @loc_start: The location corresponding to the start of the region.
  1446. * @loc_end: The location corresponding to the end of the region.
  1447. *
  1448. * As we are assuming that @loc_start and @loc_end represent a sequential
  1449. * range of backing store memory, if the region spans multiple lines then
  1450. * regardless of the x coordinate, the full lines are dirtied.
  1451. * Correspondingly if the region spans multiple z slices, then full rather
  1452. * than partial z slices are dirtied.
  1453. */
  1454. static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
  1455. const struct vmw_surface_loc *loc_start,
  1456. const struct vmw_surface_loc *loc_end)
  1457. {
  1458. const struct vmw_surface_cache *cache = &dirty->cache;
  1459. SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
  1460. u32 mip = loc_start->sub_resource % cache->num_mip_levels;
  1461. const struct drm_vmw_size *size = &cache->mip[mip].size;
  1462. u32 box_c2 = box->z + box->d;
  1463. if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
  1464. return;
  1465. if (box->d == 0 || box->z > loc_start->z)
  1466. box->z = loc_start->z;
  1467. if (box_c2 < loc_end->z)
  1468. box->d = loc_end->z - box->z;
  1469. if (loc_start->z + 1 == loc_end->z) {
  1470. box_c2 = box->y + box->h;
  1471. if (box->h == 0 || box->y > loc_start->y)
  1472. box->y = loc_start->y;
  1473. if (box_c2 < loc_end->y)
  1474. box->h = loc_end->y - box->y;
  1475. if (loc_start->y + 1 == loc_end->y) {
  1476. box_c2 = box->x + box->w;
  1477. if (box->w == 0 || box->x > loc_start->x)
  1478. box->x = loc_start->x;
  1479. if (box_c2 < loc_end->x)
  1480. box->w = loc_end->x - box->x;
  1481. } else {
  1482. box->x = 0;
  1483. box->w = size->width;
  1484. }
  1485. } else {
  1486. box->y = 0;
  1487. box->h = size->height;
  1488. box->x = 0;
  1489. box->w = size->width;
  1490. }
  1491. }
  1492. /**
  1493. * vmw_subres_dirty_full - Mark a full subresource as dirty
  1494. * @dirty: The surface's dirty tracker.
  1495. * @subres: The subresource
  1496. */
  1497. static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
  1498. {
  1499. const struct vmw_surface_cache *cache = &dirty->cache;
  1500. u32 mip = subres % cache->num_mip_levels;
  1501. const struct drm_vmw_size *size = &cache->mip[mip].size;
  1502. SVGA3dBox *box = &dirty->boxes[subres];
  1503. box->x = 0;
  1504. box->y = 0;
  1505. box->z = 0;
  1506. box->w = size->width;
  1507. box->h = size->height;
  1508. box->d = size->depth;
  1509. }
  1510. /*
  1511. * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
  1512. * surfaces.
  1513. */
  1514. static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
  1515. size_t start, size_t end)
  1516. {
  1517. struct vmw_surface_dirty *dirty =
  1518. (struct vmw_surface_dirty *) res->dirty;
  1519. size_t backup_end = res->backup_offset + res->backup_size;
  1520. struct vmw_surface_loc loc1, loc2;
  1521. const struct vmw_surface_cache *cache;
  1522. start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
  1523. end = min(end, backup_end) - res->backup_offset;
  1524. cache = &dirty->cache;
  1525. vmw_surface_get_loc(cache, &loc1, start);
  1526. vmw_surface_get_loc(cache, &loc2, end - 1);
  1527. vmw_surface_inc_loc(cache, &loc2);
  1528. if (loc1.sheet != loc2.sheet) {
  1529. u32 sub_res;
  1530. /*
  1531. * Multiple multisample sheets. To do this in an optimized
  1532. * fashion, compute the dirty region for each sheet and the
  1533. * resulting union. Since this is not a common case, just dirty
  1534. * the whole surface.
  1535. */
  1536. for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
  1537. vmw_subres_dirty_full(dirty, sub_res);
  1538. return;
  1539. }
  1540. if (loc1.sub_resource + 1 == loc2.sub_resource) {
  1541. /* Dirty range covers a single sub-resource */
  1542. vmw_subres_dirty_add(dirty, &loc1, &loc2);
  1543. } else {
  1544. /* Dirty range covers multiple sub-resources */
  1545. struct vmw_surface_loc loc_min, loc_max;
  1546. u32 sub_res;
  1547. vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max);
  1548. vmw_subres_dirty_add(dirty, &loc1, &loc_max);
  1549. vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
  1550. vmw_subres_dirty_add(dirty, &loc_min, &loc2);
  1551. for (sub_res = loc1.sub_resource + 1;
  1552. sub_res < loc2.sub_resource - 1; ++sub_res)
  1553. vmw_subres_dirty_full(dirty, sub_res);
  1554. }
  1555. }
  1556. /*
  1557. * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
  1558. * surfaces.
  1559. */
  1560. static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
  1561. size_t start, size_t end)
  1562. {
  1563. struct vmw_surface_dirty *dirty =
  1564. (struct vmw_surface_dirty *) res->dirty;
  1565. const struct vmw_surface_cache *cache = &dirty->cache;
  1566. size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
  1567. SVGA3dBox *box = &dirty->boxes[0];
  1568. u32 box_c2;
  1569. box->h = box->d = 1;
  1570. start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
  1571. end = min(end, backup_end) - res->backup_offset;
  1572. box_c2 = box->x + box->w;
  1573. if (box->w == 0 || box->x > start)
  1574. box->x = start;
  1575. if (box_c2 < end)
  1576. box->w = end - box->x;
  1577. }
  1578. /*
  1579. * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
  1580. */
  1581. static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
  1582. size_t end)
  1583. {
  1584. struct vmw_surface *srf = vmw_res_to_srf(res);
  1585. if (WARN_ON(end <= res->backup_offset ||
  1586. start >= res->backup_offset + res->backup_size))
  1587. return;
  1588. if (srf->metadata.format == SVGA3D_BUFFER)
  1589. vmw_surface_buf_dirty_range_add(res, start, end);
  1590. else
  1591. vmw_surface_tex_dirty_range_add(res, start, end);
  1592. }
  1593. /*
  1594. * vmw_surface_dirty_sync - The surface's dirty_sync callback.
  1595. */
  1596. static int vmw_surface_dirty_sync(struct vmw_resource *res)
  1597. {
  1598. struct vmw_private *dev_priv = res->dev_priv;
  1599. u32 i, num_dirty;
  1600. struct vmw_surface_dirty *dirty =
  1601. (struct vmw_surface_dirty *) res->dirty;
  1602. size_t alloc_size;
  1603. const struct vmw_surface_cache *cache = &dirty->cache;
  1604. struct {
  1605. SVGA3dCmdHeader header;
  1606. SVGA3dCmdDXUpdateSubResource body;
  1607. } *cmd1;
  1608. struct {
  1609. SVGA3dCmdHeader header;
  1610. SVGA3dCmdUpdateGBImage body;
  1611. } *cmd2;
  1612. void *cmd;
  1613. num_dirty = 0;
  1614. for (i = 0; i < dirty->num_subres; ++i) {
  1615. const SVGA3dBox *box = &dirty->boxes[i];
  1616. if (box->d)
  1617. num_dirty++;
  1618. }
  1619. if (!num_dirty)
  1620. goto out;
  1621. alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
  1622. cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
  1623. if (!cmd)
  1624. return -ENOMEM;
  1625. cmd1 = cmd;
  1626. cmd2 = cmd;
  1627. for (i = 0; i < dirty->num_subres; ++i) {
  1628. const SVGA3dBox *box = &dirty->boxes[i];
  1629. if (!box->d)
  1630. continue;
  1631. /*
  1632. * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
  1633. * UPDATE_GB_IMAGE is not.
  1634. */
  1635. if (has_sm4_context(dev_priv)) {
  1636. cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
  1637. cmd1->header.size = sizeof(cmd1->body);
  1638. cmd1->body.sid = res->id;
  1639. cmd1->body.subResource = i;
  1640. cmd1->body.box = *box;
  1641. cmd1++;
  1642. } else {
  1643. cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
  1644. cmd2->header.size = sizeof(cmd2->body);
  1645. cmd2->body.image.sid = res->id;
  1646. cmd2->body.image.face = i / cache->num_mip_levels;
  1647. cmd2->body.image.mipmap = i -
  1648. (cache->num_mip_levels * cmd2->body.image.face);
  1649. cmd2->body.box = *box;
  1650. cmd2++;
  1651. }
  1652. }
  1653. vmw_cmd_commit(dev_priv, alloc_size);
  1654. out:
  1655. memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
  1656. dirty->num_subres);
  1657. return 0;
  1658. }
  1659. /*
  1660. * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
  1661. */
  1662. static int vmw_surface_dirty_alloc(struct vmw_resource *res)
  1663. {
  1664. struct vmw_surface *srf = vmw_res_to_srf(res);
  1665. const struct vmw_surface_metadata *metadata = &srf->metadata;
  1666. struct vmw_surface_dirty *dirty;
  1667. u32 num_layers = 1;
  1668. u32 num_mip;
  1669. u32 num_subres;
  1670. u32 num_samples;
  1671. size_t dirty_size;
  1672. int ret;
  1673. if (metadata->array_size)
  1674. num_layers = metadata->array_size;
  1675. else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
  1676. num_layers *= SVGA3D_MAX_SURFACE_FACES;
  1677. num_mip = metadata->mip_levels[0];
  1678. if (!num_mip)
  1679. num_mip = 1;
  1680. num_subres = num_layers * num_mip;
  1681. dirty_size = struct_size(dirty, boxes, num_subres);
  1682. dirty = kvzalloc(dirty_size, GFP_KERNEL);
  1683. if (!dirty) {
  1684. ret = -ENOMEM;
  1685. goto out_no_dirty;
  1686. }
  1687. num_samples = max_t(u32, 1, metadata->multisample_count);
  1688. ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
  1689. num_mip, num_layers, num_samples,
  1690. &dirty->cache);
  1691. if (ret)
  1692. goto out_no_cache;
  1693. dirty->num_subres = num_subres;
  1694. res->dirty = (struct vmw_resource_dirty *) dirty;
  1695. return 0;
  1696. out_no_cache:
  1697. kvfree(dirty);
  1698. out_no_dirty:
  1699. return ret;
  1700. }
  1701. /*
  1702. * vmw_surface_dirty_free - The surface's dirty_free callback
  1703. */
  1704. static void vmw_surface_dirty_free(struct vmw_resource *res)
  1705. {
  1706. struct vmw_surface_dirty *dirty =
  1707. (struct vmw_surface_dirty *) res->dirty;
  1708. kvfree(dirty);
  1709. res->dirty = NULL;
  1710. }
  1711. /*
  1712. * vmw_surface_clean - The surface's clean callback
  1713. */
  1714. static int vmw_surface_clean(struct vmw_resource *res)
  1715. {
  1716. struct vmw_private *dev_priv = res->dev_priv;
  1717. size_t alloc_size;
  1718. struct {
  1719. SVGA3dCmdHeader header;
  1720. SVGA3dCmdReadbackGBSurface body;
  1721. } *cmd;
  1722. alloc_size = sizeof(*cmd);
  1723. cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
  1724. if (!cmd)
  1725. return -ENOMEM;
  1726. cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
  1727. cmd->header.size = sizeof(cmd->body);
  1728. cmd->body.sid = res->id;
  1729. vmw_cmd_commit(dev_priv, alloc_size);
  1730. return 0;
  1731. }
  1732. /*
  1733. * vmw_gb_surface_define - Define a private GB surface
  1734. *
  1735. * @dev_priv: Pointer to a device private.
  1736. * @metadata: Metadata representing the surface to create.
  1737. * @user_srf_out: allocated user_srf. Set to NULL on failure.
  1738. *
  1739. * GB surfaces allocated by this function will not have a user mode handle, and
  1740. * thus will only be visible to vmwgfx. For optimization reasons the
  1741. * surface may later be given a user mode handle by another function to make
  1742. * it available to user mode drivers.
  1743. */
  1744. int vmw_gb_surface_define(struct vmw_private *dev_priv,
  1745. const struct vmw_surface_metadata *req,
  1746. struct vmw_surface **srf_out)
  1747. {
  1748. struct vmw_surface_metadata *metadata;
  1749. struct vmw_user_surface *user_srf;
  1750. struct vmw_surface *srf;
  1751. u32 sample_count = 1;
  1752. u32 num_layers = 1;
  1753. int ret;
  1754. *srf_out = NULL;
  1755. if (req->scanout) {
  1756. if (!vmw_surface_is_screen_target_format(req->format)) {
  1757. VMW_DEBUG_USER("Invalid Screen Target surface format.");
  1758. return -EINVAL;
  1759. }
  1760. if (req->base_size.width > dev_priv->texture_max_width ||
  1761. req->base_size.height > dev_priv->texture_max_height) {
  1762. VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
  1763. req->base_size.width,
  1764. req->base_size.height,
  1765. dev_priv->texture_max_width,
  1766. dev_priv->texture_max_height);
  1767. return -EINVAL;
  1768. }
  1769. } else {
  1770. const SVGA3dSurfaceDesc *desc =
  1771. vmw_surface_get_desc(req->format);
  1772. if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
  1773. VMW_DEBUG_USER("Invalid surface format.\n");
  1774. return -EINVAL;
  1775. }
  1776. }
  1777. if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
  1778. return -EINVAL;
  1779. if (req->num_sizes != 1)
  1780. return -EINVAL;
  1781. if (req->sizes != NULL)
  1782. return -EINVAL;
  1783. user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
  1784. if (unlikely(!user_srf)) {
  1785. ret = -ENOMEM;
  1786. goto out_unlock;
  1787. }
  1788. *srf_out = &user_srf->srf;
  1789. user_srf->prime.base.shareable = false;
  1790. user_srf->prime.base.tfile = NULL;
  1791. srf = &user_srf->srf;
  1792. srf->metadata = *req;
  1793. srf->offsets = NULL;
  1794. metadata = &srf->metadata;
  1795. if (metadata->array_size)
  1796. num_layers = req->array_size;
  1797. else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
  1798. num_layers = SVGA3D_MAX_SURFACE_FACES;
  1799. if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
  1800. sample_count = metadata->multisample_count;
  1801. srf->res.backup_size =
  1802. vmw_surface_get_serialized_size_extended(
  1803. metadata->format,
  1804. metadata->base_size,
  1805. metadata->mip_levels[0],
  1806. num_layers,
  1807. sample_count);
  1808. if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
  1809. srf->res.backup_size += sizeof(SVGA3dDXSOState);
  1810. /*
  1811. * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
  1812. * size greater than STDU max width/height. This is really a workaround
  1813. * to support creation of big framebuffer requested by some user-space
  1814. * for whole topology. That big framebuffer won't really be used for
  1815. * binding with screen target as during prepare_fb a separate surface is
  1816. * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
  1817. */
  1818. if (dev_priv->active_display_unit == vmw_du_screen_target &&
  1819. metadata->scanout &&
  1820. metadata->base_size.width <= dev_priv->stdu_max_width &&
  1821. metadata->base_size.height <= dev_priv->stdu_max_height)
  1822. metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
  1823. /*
  1824. * From this point, the generic resource management functions
  1825. * destroy the object on failure.
  1826. */
  1827. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  1828. return ret;
  1829. out_unlock:
  1830. return ret;
  1831. }