kgsl_drawobj.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. /*
  7. * KGSL drawobj management
  8. * A drawobj is a single submission from userland. The drawobj
  9. * encapsulates everything about the submission : command buffers, flags and
  10. * sync points.
  11. *
  12. * Sync points are events that need to expire before the
  13. * drawobj can be queued to the hardware. All synpoints are contained in an
  14. * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
  15. * multiple types of events both internal ones (GPU events) and external
  16. * triggers. As the events expire bits are cleared in a pending bitmap stored
  17. * in the drawobj. The GPU will submit the command as soon as the bitmap
  18. * goes to zero indicating no more pending events.
  19. */
  20. #include <linux/slab.h>
  21. #include <linux/dma-fence-array.h>
  22. #include "adreno_drawctxt.h"
  23. #include "kgsl_compat.h"
  24. #include "kgsl_device.h"
  25. #include "kgsl_drawobj.h"
  26. #include "kgsl_eventlog.h"
  27. #include "kgsl_sync.h"
  28. #include "kgsl_timeline.h"
  29. #include "kgsl_trace.h"
  30. /*
  31. * Define an kmem cache for the memobj structures since we
  32. * allocate and free them so frequently
  33. */
  34. static struct kmem_cache *memobjs_cache;
  35. static void syncobj_destroy_object(struct kgsl_drawobj *drawobj)
  36. {
  37. struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
  38. int i;
  39. for (i = 0; i < syncobj->numsyncs; i++) {
  40. struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
  41. if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) {
  42. struct event_fence_info *priv = event->priv;
  43. if (priv) {
  44. kfree(priv->fences);
  45. kfree(priv);
  46. }
  47. if (event->handle) {
  48. struct kgsl_sync_fence_cb *kcb = event->handle;
  49. dma_fence_put(kcb->fence);
  50. kfree(kcb);
  51. }
  52. } else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMELINE) {
  53. kfree(event->priv);
  54. }
  55. }
  56. kfree(syncobj->synclist);
  57. kfree(syncobj);
  58. }
  59. static void cmdobj_destroy_object(struct kgsl_drawobj *drawobj)
  60. {
  61. kfree(CMDOBJ(drawobj));
  62. }
  63. static void bindobj_destroy_object(struct kgsl_drawobj *drawobj)
  64. {
  65. kfree(BINDOBJ(drawobj));
  66. }
  67. static void timelineobj_destroy_object(struct kgsl_drawobj *drawobj)
  68. {
  69. kfree(TIMELINEOBJ(drawobj));
  70. }
  71. void kgsl_drawobj_destroy_object(struct kref *kref)
  72. {
  73. struct kgsl_drawobj *drawobj = container_of(kref,
  74. struct kgsl_drawobj, refcount);
  75. kgsl_context_put(drawobj->context);
  76. drawobj->destroy_object(drawobj);
  77. }
  78. void kgsl_dump_syncpoints(struct kgsl_device *device,
  79. struct kgsl_drawobj_sync *syncobj)
  80. {
  81. struct kgsl_drawobj_sync_event *event;
  82. unsigned int i;
  83. for (i = 0; i < syncobj->numsyncs; i++) {
  84. event = &syncobj->synclist[i];
  85. if (!kgsl_drawobj_event_pending(syncobj, i))
  86. continue;
  87. switch (event->type) {
  88. case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
  89. unsigned int retired;
  90. kgsl_readtimestamp(event->device,
  91. event->context, KGSL_TIMESTAMP_RETIRED,
  92. &retired);
  93. dev_err(device->dev,
  94. " [timestamp] context %u timestamp %u (retired %u)\n",
  95. event->context->id, event->timestamp,
  96. retired);
  97. break;
  98. }
  99. case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
  100. int j;
  101. struct event_fence_info *info = event->priv;
  102. for (j = 0; info && j < info->num_fences; j++)
  103. dev_err(device->dev, "[%d] fence: %s\n",
  104. i, info->fences[j].name);
  105. break;
  106. }
  107. case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: {
  108. int j;
  109. struct event_timeline_info *info = event->priv;
  110. for (j = 0; info && info[j].timeline; j++)
  111. dev_err(device->dev, "[%d] timeline: %d seqno %lld\n",
  112. i, info[j].timeline, info[j].seqno);
  113. break;
  114. }
  115. }
  116. }
  117. }
  118. static void syncobj_timer(struct timer_list *t)
  119. {
  120. struct kgsl_device *device;
  121. struct kgsl_drawobj_sync *syncobj = from_timer(syncobj, t, timer);
  122. struct kgsl_drawobj *drawobj;
  123. struct kgsl_drawobj_sync_event *event;
  124. unsigned int i;
  125. if (syncobj == NULL)
  126. return;
  127. drawobj = DRAWOBJ(syncobj);
  128. if (!kref_get_unless_zero(&drawobj->refcount))
  129. return;
  130. if (drawobj->context == NULL) {
  131. kgsl_drawobj_put(drawobj);
  132. return;
  133. }
  134. device = drawobj->context->device;
  135. dev_err(device->dev,
  136. "kgsl: possible gpu syncpoint deadlock for context %u timestamp %u\n",
  137. drawobj->context->id, drawobj->timestamp);
  138. set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
  139. kgsl_context_dump(drawobj->context);
  140. clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
  141. dev_err(device->dev, " pending events:\n");
  142. for (i = 0; i < syncobj->numsyncs; i++) {
  143. event = &syncobj->synclist[i];
  144. if (!kgsl_drawobj_event_pending(syncobj, i))
  145. continue;
  146. switch (event->type) {
  147. case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
  148. dev_err(device->dev, " [%u] TIMESTAMP %u:%u\n",
  149. i, event->context->id, event->timestamp);
  150. break;
  151. case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
  152. int j;
  153. struct event_fence_info *info = event->priv;
  154. for (j = 0; info && j < info->num_fences; j++)
  155. dev_err(device->dev, " [%u] FENCE %s\n",
  156. i, info->fences[j].name);
  157. break;
  158. }
  159. case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: {
  160. int j;
  161. struct event_timeline_info *info = event->priv;
  162. struct dma_fence *fence = event->fence;
  163. bool retired = false;
  164. bool signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
  165. &fence->flags);
  166. const char *str = NULL;
  167. if (fence->ops->signaled && fence->ops->signaled(fence))
  168. retired = true;
  169. if (!retired)
  170. str = "not retired";
  171. else if (retired && signaled)
  172. str = "signaled";
  173. else if (retired && !signaled)
  174. str = "retired but not signaled";
  175. dev_err(device->dev, " [%u] FENCE %s\n",
  176. i, str);
  177. for (j = 0; info && info[j].timeline; j++)
  178. dev_err(device->dev, " TIMELINE %d SEQNO %lld\n",
  179. info[j].timeline, info[j].seqno);
  180. break;
  181. }
  182. }
  183. }
  184. kgsl_drawobj_put(drawobj);
  185. dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
  186. }
  187. /*
  188. * a generic function to retire a pending sync event and (possibly) kick the
  189. * dispatcher.
  190. * Returns false if the event was already marked for cancellation in another
  191. * thread. This function should return true if this thread is responsible for
  192. * freeing up the memory, and the event will not be cancelled.
  193. */
  194. static bool drawobj_sync_expire(struct kgsl_device *device,
  195. struct kgsl_drawobj_sync_event *event)
  196. {
  197. struct kgsl_drawobj_sync *syncobj = event->syncobj;
  198. /*
  199. * Clear the event from the pending mask - if it is already clear, then
  200. * leave without doing anything useful
  201. */
  202. if (!test_and_clear_bit(event->id, &syncobj->pending))
  203. return false;
  204. /*
  205. * If no more pending events, delete the timer and schedule the command
  206. * for dispatch
  207. */
  208. if (!kgsl_drawobj_events_pending(event->syncobj)) {
  209. del_timer(&syncobj->timer);
  210. if (device->ftbl->drawctxt_sched)
  211. device->ftbl->drawctxt_sched(device,
  212. event->syncobj->base.context);
  213. }
  214. return true;
  215. }
  216. /*
  217. * This function is called by the GPU event when the sync event timestamp
  218. * expires
  219. */
  220. static void drawobj_sync_func(struct kgsl_device *device,
  221. struct kgsl_event_group *group, void *priv, int result)
  222. {
  223. struct kgsl_drawobj_sync_event *event = priv;
  224. trace_syncpoint_timestamp_expire(event->syncobj,
  225. event->context, event->timestamp);
  226. /*
  227. * Put down the context ref count only if
  228. * this thread successfully clears the pending bit mask.
  229. */
  230. if (drawobj_sync_expire(device, event))
  231. kgsl_context_put(event->context);
  232. kgsl_drawobj_put(&event->syncobj->base);
  233. }
  234. static void drawobj_sync_timeline_fence_work(struct work_struct *work)
  235. {
  236. struct kgsl_drawobj_sync_event *event = container_of(work,
  237. struct kgsl_drawobj_sync_event, work);
  238. dma_fence_put(event->fence);
  239. kgsl_drawobj_put(&event->syncobj->base);
  240. }
  241. static void trace_syncpoint_timeline_fence(struct kgsl_drawobj_sync *syncobj,
  242. struct dma_fence *f, bool expire)
  243. {
  244. struct dma_fence_array *array = to_dma_fence_array(f);
  245. struct dma_fence **fences = &f;
  246. u32 num_fences = 1;
  247. int i;
  248. if (array) {
  249. num_fences = array->num_fences;
  250. fences = array->fences;
  251. }
  252. for (i = 0; i < num_fences; i++) {
  253. char fence_name[KGSL_FENCE_NAME_LEN];
  254. snprintf(fence_name, sizeof(fence_name), "%s:%llu",
  255. fences[i]->ops->get_timeline_name(fences[i]),
  256. fences[i]->seqno);
  257. if (expire) {
  258. trace_syncpoint_fence_expire(syncobj, fence_name);
  259. log_kgsl_syncpoint_fence_expire_event(
  260. syncobj->base.context->id, fence_name);
  261. } else {
  262. trace_syncpoint_fence(syncobj, fence_name);
  263. log_kgsl_syncpoint_fence_event(
  264. syncobj->base.context->id, fence_name);
  265. }
  266. }
  267. }
  268. static void drawobj_sync_timeline_fence_callback(struct dma_fence *f,
  269. struct dma_fence_cb *cb)
  270. {
  271. struct kgsl_drawobj_sync_event *event = container_of(cb,
  272. struct kgsl_drawobj_sync_event, cb);
  273. trace_syncpoint_timeline_fence(event->syncobj, f, true);
  274. /*
  275. * Mark the event as synced and then fire off a worker to handle
  276. * removing the fence
  277. */
  278. if (drawobj_sync_expire(event->device, event))
  279. queue_work(kgsl_driver.lockless_workqueue, &event->work);
  280. }
  281. static void syncobj_destroy(struct kgsl_drawobj *drawobj)
  282. {
  283. struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
  284. unsigned int i;
  285. /* Zap the canary timer */
  286. del_timer_sync(&syncobj->timer);
  287. /*
  288. * Clear all pending events - this will render any subsequent async
  289. * callbacks harmless
  290. */
  291. for (i = 0; i < syncobj->numsyncs; i++) {
  292. struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
  293. /*
  294. * Don't do anything if the event has already expired.
  295. * If this thread clears the pending bit mask then it is
  296. * responsible for doing context put.
  297. */
  298. if (!test_and_clear_bit(i, &syncobj->pending))
  299. continue;
  300. switch (event->type) {
  301. case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
  302. kgsl_cancel_event(drawobj->device,
  303. &event->context->events, event->timestamp,
  304. drawobj_sync_func, event);
  305. /*
  306. * Do context put here to make sure the context is alive
  307. * till this thread cancels kgsl event.
  308. */
  309. kgsl_context_put(event->context);
  310. break;
  311. case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
  312. kgsl_sync_fence_async_cancel(event->handle);
  313. kgsl_drawobj_put(drawobj);
  314. break;
  315. case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE:
  316. dma_fence_remove_callback(event->fence, &event->cb);
  317. dma_fence_put(event->fence);
  318. kgsl_drawobj_put(drawobj);
  319. break;
  320. }
  321. }
  322. /*
  323. * If we cancelled an event, there's a good chance that the context is
  324. * on a dispatcher queue, so schedule to get it removed.
  325. */
  326. if (!bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS) &&
  327. drawobj->device->ftbl->drawctxt_sched)
  328. drawobj->device->ftbl->drawctxt_sched(drawobj->device,
  329. drawobj->context);
  330. }
  331. static void _drawobj_timelineobj_retire(struct kref *kref)
  332. {
  333. int i;
  334. struct kgsl_drawobj_timeline *timelineobj = container_of(kref,
  335. struct kgsl_drawobj_timeline, sig_refcount);
  336. for (i = 0; i < timelineobj->count; i++) {
  337. kgsl_timeline_signal(timelineobj->timelines[i].timeline,
  338. timelineobj->timelines[i].seqno);
  339. kgsl_timeline_put(timelineobj->timelines[i].timeline);
  340. kgsl_context_put(timelineobj->timelines[i].context);
  341. }
  342. kvfree(timelineobj->timelines);
  343. timelineobj->timelines = NULL;
  344. timelineobj->count = 0;
  345. }
  346. static void kgsl_timelineobj_signal(struct kgsl_drawobj_timeline *timelineobj)
  347. {
  348. kref_put(&timelineobj->sig_refcount, _drawobj_timelineobj_retire);
  349. }
  350. static void timelineobj_destroy(struct kgsl_drawobj *drawobj)
  351. {
  352. struct kgsl_drawobj_timeline *timelineobj = TIMELINEOBJ(drawobj);
  353. int i;
  354. /*
  355. * At this point any syncobjs blocking this timelinobj have been
  356. * signaled. The timelineobj now only needs all preceding timestamps to
  357. * retire before signaling the timelines. Notify timelines to keep them
  358. * in sync with the timestamps as they retire.
  359. */
  360. for (i = 0; i < timelineobj->count; i++)
  361. kgsl_timeline_add_signal(&timelineobj->timelines[i]);
  362. /*
  363. * The scheduler is done with the timelineobj. Put the initial
  364. * sig_refcount to continue with the signaling process.
  365. */
  366. kgsl_timelineobj_signal(timelineobj);
  367. }
  368. static void bindobj_destroy(struct kgsl_drawobj *drawobj)
  369. {
  370. struct kgsl_drawobj_bind *bindobj = BINDOBJ(drawobj);
  371. kgsl_sharedmem_put_bind_op(bindobj->bind);
  372. }
  373. static void cmdobj_destroy(struct kgsl_drawobj *drawobj)
  374. {
  375. struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
  376. struct kgsl_memobj_node *mem, *tmpmem;
  377. /*
  378. * Release the refcount on the mem entry associated with the
  379. * ib profiling buffer
  380. */
  381. if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
  382. kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
  383. /* Destroy the command list */
  384. list_for_each_entry_safe(mem, tmpmem, &cmdobj->cmdlist, node) {
  385. list_del_init(&mem->node);
  386. kmem_cache_free(memobjs_cache, mem);
  387. }
  388. /* Destroy the memory list */
  389. list_for_each_entry_safe(mem, tmpmem, &cmdobj->memlist, node) {
  390. list_del_init(&mem->node);
  391. kmem_cache_free(memobjs_cache, mem);
  392. }
  393. if (drawobj->type & CMDOBJ_TYPE) {
  394. atomic_dec(&drawobj->context->proc_priv->cmd_count);
  395. atomic_dec(&drawobj->context->proc_priv->period->active_cmds);
  396. }
  397. }
  398. /**
  399. * kgsl_drawobj_destroy() - Destroy a kgsl object structure
  400. * @obj: Pointer to the kgsl object to destroy
  401. *
  402. * Start the process of destroying a command batch. Cancel any pending events
  403. * and decrement the refcount. Asynchronous events can still signal after
  404. * kgsl_drawobj_destroy has returned.
  405. */
  406. void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
  407. {
  408. if (IS_ERR_OR_NULL(drawobj))
  409. return;
  410. drawobj->destroy(drawobj);
  411. kgsl_drawobj_put(drawobj);
  412. }
  413. static bool drawobj_sync_fence_func(void *priv)
  414. {
  415. struct kgsl_drawobj_sync_event *event = priv;
  416. struct event_fence_info *info = event->priv;
  417. int i;
  418. for (i = 0; info && i < info->num_fences; i++) {
  419. trace_syncpoint_fence_expire(event->syncobj,
  420. info->fences[i].name);
  421. log_kgsl_syncpoint_fence_expire_event(
  422. event->syncobj->base.context->id, info->fences[i].name);
  423. }
  424. /*
  425. * Only call kgsl_drawobj_put() if it's not marked for cancellation
  426. * in another thread.
  427. */
  428. if (drawobj_sync_expire(event->device, event)) {
  429. kgsl_drawobj_put(&event->syncobj->base);
  430. return true;
  431. }
  432. return false;
  433. }
  434. static struct event_timeline_info *
  435. drawobj_get_sync_timeline_priv(void __user *uptr, u64 usize, u32 count)
  436. {
  437. int i;
  438. struct event_timeline_info *priv;
  439. /* Make sure we don't accidently overflow count */
  440. if (count == UINT_MAX)
  441. return NULL;
  442. priv = kcalloc(count + 1, sizeof(*priv), GFP_KERNEL);
  443. if (!priv)
  444. return NULL;
  445. for (i = 0; i < count; i++, uptr += usize) {
  446. struct kgsl_timeline_val val;
  447. if (copy_struct_from_user(&val, sizeof(val), uptr, usize))
  448. continue;
  449. priv[i].timeline = val.timeline;
  450. priv[i].seqno = val.seqno;
  451. }
  452. priv[i].timeline = 0;
  453. return priv;
  454. }
  455. static int drawobj_add_sync_timeline(struct kgsl_device *device,
  456. struct kgsl_drawobj_sync *syncobj, void __user *uptr,
  457. u64 usize)
  458. {
  459. struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
  460. struct kgsl_cmd_syncpoint_timeline sync;
  461. struct kgsl_drawobj_sync_event *event;
  462. struct dma_fence *fence;
  463. unsigned int id;
  464. int ret;
  465. if (copy_struct_from_user(&sync, sizeof(sync), uptr, usize))
  466. return -EFAULT;
  467. fence = kgsl_timelines_to_fence_array(device, sync.timelines,
  468. sync.count, sync.timelines_size, false);
  469. if (IS_ERR(fence))
  470. return PTR_ERR(fence);
  471. kref_get(&drawobj->refcount);
  472. id = syncobj->numsyncs++;
  473. event = &syncobj->synclist[id];
  474. event->id = id;
  475. event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMELINE;
  476. event->syncobj = syncobj;
  477. event->device = device;
  478. event->context = NULL;
  479. event->fence = fence;
  480. INIT_WORK(&event->work, drawobj_sync_timeline_fence_work);
  481. INIT_LIST_HEAD(&event->cb.node);
  482. event->priv =
  483. drawobj_get_sync_timeline_priv(u64_to_user_ptr(sync.timelines),
  484. sync.timelines_size, sync.count);
  485. /* Set pending flag before adding callback to avoid race */
  486. set_bit(event->id, &syncobj->pending);
  487. /* Get a dma_fence refcount to hand over to the callback */
  488. dma_fence_get(event->fence);
  489. ret = dma_fence_add_callback(event->fence,
  490. &event->cb, drawobj_sync_timeline_fence_callback);
  491. if (ret) {
  492. clear_bit(event->id, &syncobj->pending);
  493. if (dma_fence_is_signaled(event->fence)) {
  494. trace_syncpoint_fence_expire(syncobj, "signaled");
  495. log_kgsl_syncpoint_fence_expire_event(
  496. syncobj->base.context->id, "signaled");
  497. dma_fence_put(event->fence);
  498. ret = 0;
  499. }
  500. /* Put the refcount from fence creation */
  501. dma_fence_put(event->fence);
  502. kgsl_drawobj_put(drawobj);
  503. return ret;
  504. }
  505. trace_syncpoint_timeline_fence(event->syncobj, event->fence, false);
  506. /* Put the refcount from fence creation */
  507. dma_fence_put(event->fence);
  508. return 0;
  509. }
  510. static int drawobj_add_sync_fence(struct kgsl_device *device,
  511. struct kgsl_drawobj_sync *syncobj, void __user *data,
  512. u64 datasize)
  513. {
  514. struct kgsl_cmd_syncpoint_fence sync;
  515. struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
  516. struct kgsl_drawobj_sync_event *event;
  517. struct event_fence_info *priv;
  518. unsigned int id, i;
  519. if (copy_struct_from_user(&sync, sizeof(sync), data, datasize))
  520. return -EFAULT;
  521. kref_get(&drawobj->refcount);
  522. id = syncobj->numsyncs++;
  523. event = &syncobj->synclist[id];
  524. event->id = id;
  525. event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
  526. event->syncobj = syncobj;
  527. event->device = device;
  528. event->context = NULL;
  529. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  530. set_bit(event->id, &syncobj->pending);
  531. event->handle = kgsl_sync_fence_async_wait(sync.fd, drawobj_sync_fence_func, event);
  532. event->priv = priv;
  533. if (IS_ERR_OR_NULL(event->handle)) {
  534. int ret = PTR_ERR(event->handle);
  535. clear_bit(event->id, &syncobj->pending);
  536. event->handle = NULL;
  537. kgsl_drawobj_put(drawobj);
  538. /*
  539. * If ret == 0 the fence was already signaled - print a trace
  540. * message so we can track that
  541. */
  542. if (ret == 0) {
  543. trace_syncpoint_fence_expire(syncobj, "signaled");
  544. log_kgsl_syncpoint_fence_expire_event(
  545. syncobj->base.context->id, "signaled");
  546. }
  547. return ret;
  548. }
  549. kgsl_get_fence_info(event);
  550. for (i = 0; priv && i < priv->num_fences; i++) {
  551. trace_syncpoint_fence(syncobj, priv->fences[i].name);
  552. log_kgsl_syncpoint_fence_event(syncobj->base.context->id,
  553. priv->fences[i].name);
  554. }
  555. return 0;
  556. }
  557. /* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
  558. * @device: KGSL device
  559. * @syncobj: KGSL sync obj to add the sync point to
  560. * @priv: Private structure passed by the user
  561. *
  562. * Add a new sync point timestamp event to the sync obj.
  563. */
  564. static int drawobj_add_sync_timestamp(struct kgsl_device *device,
  565. struct kgsl_drawobj_sync *syncobj,
  566. struct kgsl_cmd_syncpoint_timestamp *timestamp)
  567. {
  568. struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
  569. struct kgsl_context *context = kgsl_context_get(device,
  570. timestamp->context_id);
  571. struct kgsl_drawobj_sync_event *event;
  572. int ret = -EINVAL;
  573. unsigned int id;
  574. if (context == NULL)
  575. return -EINVAL;
  576. /*
  577. * We allow somebody to create a sync point on their own context.
  578. * This has the effect of delaying a command from submitting until the
  579. * dependent command has cleared. That said we obviously can't let them
  580. * create a sync point on a future timestamp.
  581. */
  582. if (context == drawobj->context) {
  583. unsigned int queued;
  584. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
  585. &queued);
  586. if (timestamp_cmp(timestamp->timestamp, queued) > 0) {
  587. dev_err(device->dev,
  588. "Cannot create syncpoint for future timestamp %d (current %d)\n",
  589. timestamp->timestamp, queued);
  590. goto done;
  591. }
  592. }
  593. kref_get(&drawobj->refcount);
  594. id = syncobj->numsyncs++;
  595. event = &syncobj->synclist[id];
  596. event->id = id;
  597. event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
  598. event->syncobj = syncobj;
  599. event->context = context;
  600. event->timestamp = timestamp->timestamp;
  601. event->device = device;
  602. set_bit(event->id, &syncobj->pending);
  603. ret = kgsl_add_event(device, &context->events, timestamp->timestamp,
  604. drawobj_sync_func, event);
  605. if (ret) {
  606. clear_bit(event->id, &syncobj->pending);
  607. kgsl_drawobj_put(drawobj);
  608. } else {
  609. trace_syncpoint_timestamp(syncobj, context,
  610. timestamp->timestamp);
  611. }
  612. done:
  613. if (ret)
  614. kgsl_context_put(context);
  615. return ret;
  616. }
  617. static int drawobj_add_sync_timestamp_from_user(struct kgsl_device *device,
  618. struct kgsl_drawobj_sync *syncobj, void __user *data,
  619. u64 datasize)
  620. {
  621. struct kgsl_cmd_syncpoint_timestamp timestamp;
  622. if (copy_struct_from_user(&timestamp, sizeof(timestamp),
  623. data, datasize))
  624. return -EFAULT;
  625. return drawobj_add_sync_timestamp(device, syncobj, &timestamp);
  626. }
  627. /**
  628. * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
  629. * batch
  630. * @device: Pointer to the KGSL device struct for the GPU
  631. * @syncobj: Pointer to the sync obj
  632. * @sync: Pointer to the user-specified struct defining the syncpoint
  633. *
  634. * Create a new sync point in the sync obj based on the
  635. * user specified parameters
  636. */
  637. int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
  638. struct kgsl_drawobj_sync *syncobj,
  639. struct kgsl_cmd_syncpoint *sync)
  640. {
  641. struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
  642. if (sync->type != KGSL_CMD_SYNCPOINT_TYPE_FENCE)
  643. syncobj->flags |= KGSL_SYNCOBJ_SW;
  644. if (sync->type == KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP)
  645. return drawobj_add_sync_timestamp_from_user(device,
  646. syncobj, sync->priv, sync->size);
  647. else if (sync->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE)
  648. return drawobj_add_sync_fence(device,
  649. syncobj, sync->priv, sync->size);
  650. else if (sync->type == KGSL_CMD_SYNCPOINT_TYPE_TIMELINE)
  651. return drawobj_add_sync_timeline(device,
  652. syncobj, sync->priv, sync->size);
  653. dev_err(device->dev, "bad syncpoint type %d for ctxt %u\n",
  654. sync->type, drawobj->context->id);
  655. return -EINVAL;
  656. }
  657. static void add_profiling_buffer(struct kgsl_device *device,
  658. struct kgsl_drawobj_cmd *cmdobj,
  659. uint64_t gpuaddr, uint64_t size,
  660. unsigned int id, uint64_t offset)
  661. {
  662. struct kgsl_mem_entry *entry;
  663. struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
  664. u64 start;
  665. if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
  666. return;
  667. /* Only the first buffer entry counts - ignore the rest */
  668. if (cmdobj->profiling_buf_entry != NULL)
  669. return;
  670. if (id != 0)
  671. entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
  672. id);
  673. else
  674. entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
  675. gpuaddr);
  676. if (entry != NULL) {
  677. start = id ? (entry->memdesc.gpuaddr + offset) : gpuaddr;
  678. /*
  679. * Make sure there is enough room in the object to store the
  680. * entire profiling buffer object
  681. */
  682. if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size) ||
  683. !kgsl_gpuaddr_in_memdesc(&entry->memdesc, start,
  684. sizeof(struct kgsl_drawobj_profiling_buffer))) {
  685. kgsl_mem_entry_put(entry);
  686. entry = NULL;
  687. }
  688. }
  689. if (entry == NULL) {
  690. dev_err(device->dev,
  691. "ignore bad profile buffer ctxt %u id %d offset %lld gpuaddr %llx size %lld\n",
  692. drawobj->context->id, id, offset, gpuaddr, size);
  693. return;
  694. }
  695. cmdobj->profiling_buffer_gpuaddr = start;
  696. cmdobj->profiling_buf_entry = entry;
  697. }
  698. /**
  699. * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
  700. * batch
  701. * @cmdobj: Pointer to the ib
  702. * @ibdesc: Pointer to the user-specified struct defining the memory or IB
  703. *
  704. * Create a new memory entry in the ib based on the
  705. * user specified parameters
  706. */
  707. int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
  708. struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
  709. {
  710. uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
  711. uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
  712. struct kgsl_memobj_node *mem;
  713. struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
  714. /* sanitize the ibdesc ctrl flags */
  715. ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
  716. if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
  717. ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
  718. if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
  719. add_profiling_buffer(device, cmdobj,
  720. gpuaddr, size, 0, 0);
  721. return 0;
  722. }
  723. }
  724. /* Ignore if SYNC or MARKER is specified */
  725. if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
  726. return 0;
  727. mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
  728. if (mem == NULL)
  729. return -ENOMEM;
  730. mem->gpuaddr = gpuaddr;
  731. mem->size = size;
  732. mem->priv = 0;
  733. mem->id = 0;
  734. mem->offset = 0;
  735. mem->flags = 0;
  736. if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
  737. ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
  738. /* add to the memlist */
  739. list_add_tail(&mem->node, &cmdobj->memlist);
  740. else {
  741. /* set the preamble flag if directed to */
  742. if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
  743. list_empty(&cmdobj->cmdlist))
  744. mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
  745. /* add to the cmd list */
  746. list_add_tail(&mem->node, &cmdobj->cmdlist);
  747. }
  748. return 0;
  749. }
  750. static int drawobj_init(struct kgsl_device *device,
  751. struct kgsl_context *context, struct kgsl_drawobj *drawobj,
  752. int type)
  753. {
  754. /*
  755. * Increase the reference count on the context so it doesn't disappear
  756. * during the lifetime of this object
  757. */
  758. if (!_kgsl_context_get(context))
  759. return -ENOENT;
  760. kref_init(&drawobj->refcount);
  761. drawobj->device = device;
  762. drawobj->context = context;
  763. drawobj->type = type;
  764. return 0;
  765. }
  766. static int get_aux_command(void __user *ptr, u64 generic_size,
  767. int type, void *auxcmd, size_t auxcmd_size)
  768. {
  769. struct kgsl_gpu_aux_command_generic generic;
  770. u64 size;
  771. if (copy_struct_from_user(&generic, sizeof(generic), ptr, generic_size))
  772. return -EFAULT;
  773. if (generic.type != type)
  774. return -EINVAL;
  775. size = min_t(u64, auxcmd_size, generic.size);
  776. if (copy_from_user(auxcmd, u64_to_user_ptr(generic.priv), size))
  777. return -EFAULT;
  778. return 0;
  779. }
  780. struct kgsl_drawobj_timeline *
  781. kgsl_drawobj_timeline_create(struct kgsl_device *device,
  782. struct kgsl_context *context)
  783. {
  784. int ret;
  785. struct kgsl_drawobj_timeline *timelineobj =
  786. kzalloc(sizeof(*timelineobj), GFP_KERNEL);
  787. if (!timelineobj)
  788. return ERR_PTR(-ENOMEM);
  789. ret = drawobj_init(device, context, &timelineobj->base,
  790. TIMELINEOBJ_TYPE);
  791. if (ret) {
  792. kfree(timelineobj);
  793. return ERR_PTR(ret);
  794. }
  795. /*
  796. * Initialize the sig_refcount that triggers the timeline signal.
  797. * This refcount goes to 0 when:
  798. * 1) This timelineobj is popped off the context queue. This implies
  799. * any syncobj blocking this timelineobj was already signaled, or
  800. * the context queue is cleaned up at detach time.
  801. * 2) The cmdobjs queued on this context before this timeline object
  802. * are retired.
  803. */
  804. kref_init(&timelineobj->sig_refcount);
  805. timelineobj->base.destroy = timelineobj_destroy;
  806. timelineobj->base.destroy_object = timelineobj_destroy_object;
  807. return timelineobj;
  808. }
  809. static void _timeline_signaled(struct kgsl_device *device,
  810. struct kgsl_event_group *group, void *priv, int ret)
  811. {
  812. struct kgsl_drawobj_timeline *timelineobj = priv;
  813. struct kgsl_drawobj *drawobj = DRAWOBJ(timelineobj);
  814. /* Put the sig_refcount we took when registering this event */
  815. kgsl_timelineobj_signal(timelineobj);
  816. /* Put the drawobj refcount we took when registering this event */
  817. kgsl_drawobj_put(drawobj);
  818. }
  819. int kgsl_drawobj_add_timeline(struct kgsl_device_private *dev_priv,
  820. struct kgsl_drawobj_timeline *timelineobj,
  821. void __user *src, u64 cmdsize)
  822. {
  823. struct kgsl_device *device = dev_priv->device;
  824. struct kgsl_gpu_aux_command_timeline cmd;
  825. struct kgsl_drawobj *drawobj = DRAWOBJ(timelineobj);
  826. struct kgsl_context *context = drawobj->context;
  827. int i, ret;
  828. u32 queued;
  829. memset(&cmd, 0, sizeof(cmd));
  830. ret = get_aux_command(src, cmdsize,
  831. KGSL_GPU_AUX_COMMAND_TIMELINE, &cmd, sizeof(cmd));
  832. if (ret)
  833. return ret;
  834. if (!cmd.count)
  835. return -EINVAL;
  836. timelineobj->timelines = kvcalloc(cmd.count,
  837. sizeof(*timelineobj->timelines),
  838. GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
  839. if (!timelineobj->timelines)
  840. return -ENOMEM;
  841. src = u64_to_user_ptr(cmd.timelines);
  842. /* Get the last queued timestamp on the drawobj context */
  843. ret = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queued);
  844. if (ret)
  845. return ret;
  846. for (i = 0; i < cmd.count; i++) {
  847. struct kgsl_timeline_val val;
  848. if (copy_struct_from_user(&val, sizeof(val), src,
  849. cmd.timelines_size)) {
  850. ret = -EFAULT;
  851. goto err;
  852. }
  853. if (val.padding) {
  854. ret = -EINVAL;
  855. goto err;
  856. }
  857. timelineobj->timelines[i].timeline =
  858. kgsl_timeline_by_id(dev_priv->device,
  859. val.timeline);
  860. if (!timelineobj->timelines[i].timeline) {
  861. ret = -ENODEV;
  862. goto err;
  863. }
  864. /* Get a context refcount so we can use the context pointer */
  865. if (!_kgsl_context_get(context)) {
  866. ret = -ENODEV;
  867. goto err;
  868. }
  869. trace_kgsl_drawobj_timeline(val.timeline, val.seqno);
  870. timelineobj->timelines[i].seqno = val.seqno;
  871. timelineobj->timelines[i].context = context;
  872. timelineobj->timelines[i].timestamp = queued;
  873. src += cmd.timelines_size;
  874. }
  875. timelineobj->count = cmd.count;
  876. /*
  877. * Register a kgsl_event to notify us when the last queued timestamp
  878. * retires. Take a refcount on the drawobj to keep it valid for the
  879. * callback, and take the sig_refcount to synchronize with the
  880. * timelineobj retire. Both these refcounts are put in the callback.
  881. */
  882. kref_get(&drawobj->refcount);
  883. kref_get(&timelineobj->sig_refcount);
  884. ret = kgsl_add_event(device, &context->events, queued,
  885. _timeline_signaled, timelineobj);
  886. if (ret)
  887. goto event_err;
  888. return 0;
  889. event_err:
  890. /*
  891. * If there was an error, put back sig_refcount and drawobj refcounts.
  892. * The caller still holds initial refcounts on both and puts them in
  893. * kgsl_drawobj_destroy(). Clean up the timelinelines array since we
  894. * do not want to signal anything now.
  895. */
  896. kgsl_timelineobj_signal(timelineobj);
  897. kgsl_drawobj_put(drawobj);
  898. err:
  899. for (i = 0; i < cmd.count; i++) {
  900. kgsl_timeline_put(timelineobj->timelines[i].timeline);
  901. kgsl_context_put(timelineobj->timelines[i].context);
  902. }
  903. kvfree(timelineobj->timelines);
  904. timelineobj->timelines = NULL;
  905. return ret;
  906. }
  907. static void kgsl_drawobj_bind_callback(struct kgsl_sharedmem_bind_op *op)
  908. {
  909. struct kgsl_drawobj_bind *bindobj = op->data;
  910. struct kgsl_drawobj *drawobj = DRAWOBJ(bindobj);
  911. struct kgsl_device *device = drawobj->device;
  912. set_bit(KGSL_BINDOBJ_STATE_DONE, &bindobj->state);
  913. /* Re-schedule the context */
  914. if (device->ftbl->drawctxt_sched)
  915. device->ftbl->drawctxt_sched(device,
  916. drawobj->context);
  917. /* Put back the reference we took when we started the operation */
  918. kgsl_context_put(drawobj->context);
  919. kgsl_drawobj_put(drawobj);
  920. }
  921. int kgsl_drawobj_add_bind(struct kgsl_device_private *dev_priv,
  922. struct kgsl_drawobj_bind *bindobj,
  923. void __user *src, u64 cmdsize)
  924. {
  925. struct kgsl_gpu_aux_command_bind cmd;
  926. struct kgsl_process_private *private = dev_priv->process_priv;
  927. struct kgsl_sharedmem_bind_op *op;
  928. int ret;
  929. ret = get_aux_command(src, cmdsize,
  930. KGSL_GPU_AUX_COMMAND_BIND, &cmd, sizeof(cmd));
  931. if (ret)
  932. return ret;
  933. op = kgsl_sharedmem_create_bind_op(private, cmd.target,
  934. u64_to_user_ptr(cmd.rangeslist), cmd.numranges,
  935. cmd.rangesize);
  936. if (IS_ERR(op))
  937. return PTR_ERR(op);
  938. op->callback = kgsl_drawobj_bind_callback;
  939. op->data = bindobj;
  940. bindobj->bind = op;
  941. return 0;
  942. }
  943. struct kgsl_drawobj_bind *kgsl_drawobj_bind_create(struct kgsl_device *device,
  944. struct kgsl_context *context)
  945. {
  946. int ret;
  947. struct kgsl_drawobj_bind *bindobj =
  948. kzalloc(sizeof(*bindobj), GFP_KERNEL);
  949. if (!bindobj)
  950. return ERR_PTR(-ENOMEM);
  951. ret = drawobj_init(device, context, &bindobj->base, BINDOBJ_TYPE);
  952. if (ret) {
  953. kfree(bindobj);
  954. return ERR_PTR(ret);
  955. }
  956. bindobj->base.destroy = bindobj_destroy;
  957. bindobj->base.destroy_object = bindobj_destroy_object;
  958. return bindobj;
  959. }
  960. /**
  961. * kgsl_drawobj_sync_create() - Create a new sync obj
  962. * structure
  963. * @device: Pointer to a KGSL device struct
  964. * @context: Pointer to a KGSL context struct
  965. *
  966. * Allocate an new kgsl_drawobj_sync structure
  967. */
  968. struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
  969. struct kgsl_context *context)
  970. {
  971. struct kgsl_drawobj_sync *syncobj =
  972. kzalloc(sizeof(*syncobj), GFP_KERNEL);
  973. int ret;
  974. if (!syncobj)
  975. return ERR_PTR(-ENOMEM);
  976. ret = drawobj_init(device, context, &syncobj->base, SYNCOBJ_TYPE);
  977. if (ret) {
  978. kfree(syncobj);
  979. return ERR_PTR(ret);
  980. }
  981. syncobj->base.destroy = syncobj_destroy;
  982. syncobj->base.destroy_object = syncobj_destroy_object;
  983. timer_setup(&syncobj->timer, syncobj_timer, 0);
  984. return syncobj;
  985. }
  986. /**
  987. * kgsl_drawobj_cmd_create() - Create a new command obj
  988. * structure
  989. * @device: Pointer to a KGSL device struct
  990. * @context: Pointer to a KGSL context struct
  991. * @flags: Flags for the command obj
  992. * @type: type of cmdobj MARKER/CMD
  993. *
  994. * Allocate a new kgsl_drawobj_cmd structure
  995. */
  996. struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
  997. struct kgsl_context *context, unsigned int flags,
  998. unsigned int type)
  999. {
  1000. struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
  1001. int ret;
  1002. if (!cmdobj)
  1003. return ERR_PTR(-ENOMEM);
  1004. ret = drawobj_init(device, context, &cmdobj->base,
  1005. (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)));
  1006. if (ret) {
  1007. kfree(cmdobj);
  1008. return ERR_PTR(ret);
  1009. }
  1010. cmdobj->base.destroy = cmdobj_destroy;
  1011. cmdobj->base.destroy_object = cmdobj_destroy_object;
  1012. /* sanitize our flags for drawobjs */
  1013. cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
  1014. | KGSL_DRAWOBJ_MARKER
  1015. | KGSL_DRAWOBJ_END_OF_FRAME
  1016. | KGSL_DRAWOBJ_PWR_CONSTRAINT
  1017. | KGSL_DRAWOBJ_MEMLIST
  1018. | KGSL_DRAWOBJ_PROFILING
  1019. | KGSL_DRAWOBJ_PROFILING_KTIME
  1020. | KGSL_DRAWOBJ_START_RECURRING
  1021. | KGSL_DRAWOBJ_STOP_RECURRING);
  1022. INIT_LIST_HEAD(&cmdobj->cmdlist);
  1023. INIT_LIST_HEAD(&cmdobj->memlist);
  1024. cmdobj->requeue_cnt = 0;
  1025. if (!(type & CMDOBJ_TYPE))
  1026. return cmdobj;
  1027. atomic_inc(&context->proc_priv->cmd_count);
  1028. atomic_inc(&context->proc_priv->period->active_cmds);
  1029. spin_lock(&device->work_period_lock);
  1030. if (!__test_and_set_bit(KGSL_WORK_PERIOD, &device->flags)) {
  1031. mod_timer(&device->work_period_timer,
  1032. jiffies + msecs_to_jiffies(KGSL_WORK_PERIOD_MS));
  1033. device->gpu_period.begin = ktime_get_ns();
  1034. }
  1035. /* Take a refcount here and put it back in kgsl_work_period_timer() */
  1036. if (!__test_and_set_bit(KGSL_WORK_PERIOD, &context->proc_priv->period->flags))
  1037. kref_get(&context->proc_priv->period->refcount);
  1038. spin_unlock(&device->work_period_lock);
  1039. return cmdobj;
  1040. }
  1041. #ifdef CONFIG_COMPAT
  1042. static int add_ibdesc_list_compat(struct kgsl_device *device,
  1043. struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
  1044. {
  1045. int i, ret = 0;
  1046. struct kgsl_ibdesc_compat ibdesc32;
  1047. struct kgsl_ibdesc ibdesc;
  1048. for (i = 0; i < count; i++) {
  1049. memset(&ibdesc32, 0, sizeof(ibdesc32));
  1050. if (copy_from_user(&ibdesc32, ptr, sizeof(ibdesc32))) {
  1051. ret = -EFAULT;
  1052. break;
  1053. }
  1054. ibdesc.gpuaddr = (unsigned long) ibdesc32.gpuaddr;
  1055. ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
  1056. ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
  1057. ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
  1058. if (ret)
  1059. break;
  1060. ptr += sizeof(ibdesc32);
  1061. }
  1062. return ret;
  1063. }
  1064. static int add_syncpoints_compat(struct kgsl_device *device,
  1065. struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
  1066. {
  1067. struct kgsl_cmd_syncpoint_compat sync32;
  1068. struct kgsl_cmd_syncpoint sync;
  1069. int i, ret = 0;
  1070. for (i = 0; i < count; i++) {
  1071. memset(&sync32, 0, sizeof(sync32));
  1072. if (copy_from_user(&sync32, ptr, sizeof(sync32))) {
  1073. ret = -EFAULT;
  1074. break;
  1075. }
  1076. sync.type = sync32.type;
  1077. sync.priv = compat_ptr(sync32.priv);
  1078. sync.size = (size_t) sync32.size;
  1079. ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
  1080. if (ret)
  1081. break;
  1082. ptr += sizeof(sync32);
  1083. }
  1084. return ret;
  1085. }
  1086. #else
  1087. static int add_ibdesc_list_compat(struct kgsl_device *device,
  1088. struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
  1089. {
  1090. return -EINVAL;
  1091. }
  1092. static int add_syncpoints_compat(struct kgsl_device *device,
  1093. struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
  1094. {
  1095. return -EINVAL;
  1096. }
  1097. #endif
  1098. /* Returns:
  1099. * -EINVAL: Bad data
  1100. * 0: All data fields are empty (nothing to do)
  1101. * 1: All list information is valid
  1102. */
  1103. static int _verify_input_list(unsigned int count, void __user *ptr,
  1104. unsigned int size)
  1105. {
  1106. /* Return early if nothing going on */
  1107. if (count == 0 && ptr == NULL && size == 0)
  1108. return 0;
  1109. /* Sanity check inputs */
  1110. if (count == 0 || ptr == NULL || size == 0)
  1111. return -EINVAL;
  1112. return 1;
  1113. }
  1114. int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
  1115. struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
  1116. {
  1117. struct kgsl_ibdesc ibdesc;
  1118. struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
  1119. int i, ret;
  1120. /* Ignore everything if this is a MARKER */
  1121. if (baseobj->type & MARKEROBJ_TYPE)
  1122. return 0;
  1123. ret = _verify_input_list(count, ptr, sizeof(ibdesc));
  1124. if (ret <= 0)
  1125. return -EINVAL;
  1126. if (is_compat_task())
  1127. return add_ibdesc_list_compat(device, cmdobj, ptr, count);
  1128. for (i = 0; i < count; i++) {
  1129. memset(&ibdesc, 0, sizeof(ibdesc));
  1130. if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
  1131. return -EFAULT;
  1132. ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
  1133. if (ret)
  1134. return ret;
  1135. ptr += sizeof(ibdesc);
  1136. }
  1137. return 0;
  1138. }
  1139. int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
  1140. struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
  1141. {
  1142. struct kgsl_cmd_syncpoint sync;
  1143. int i, ret;
  1144. if (count == 0)
  1145. return 0;
  1146. syncobj->synclist = kcalloc(count,
  1147. sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
  1148. if (syncobj->synclist == NULL)
  1149. return -ENOMEM;
  1150. if (is_compat_task())
  1151. return add_syncpoints_compat(device, syncobj, ptr, count);
  1152. for (i = 0; i < count; i++) {
  1153. memset(&sync, 0, sizeof(sync));
  1154. if (copy_from_user(&sync, ptr, sizeof(sync)))
  1155. return -EFAULT;
  1156. ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
  1157. if (ret)
  1158. return ret;
  1159. ptr += sizeof(sync);
  1160. }
  1161. return 0;
  1162. }
  1163. static int kgsl_drawobj_add_memobject(struct list_head *head,
  1164. struct kgsl_command_object *obj)
  1165. {
  1166. struct kgsl_memobj_node *mem;
  1167. mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
  1168. if (mem == NULL)
  1169. return -ENOMEM;
  1170. mem->gpuaddr = obj->gpuaddr;
  1171. mem->size = obj->size;
  1172. mem->id = obj->id;
  1173. mem->offset = obj->offset;
  1174. mem->flags = obj->flags;
  1175. mem->priv = 0;
  1176. list_add_tail(&mem->node, head);
  1177. return 0;
  1178. }
  1179. #define CMDLIST_FLAGS \
  1180. (KGSL_CMDLIST_IB | \
  1181. KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
  1182. KGSL_CMDLIST_IB_PREAMBLE)
  1183. /* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
  1184. int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
  1185. struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
  1186. unsigned int size, unsigned int count)
  1187. {
  1188. struct kgsl_command_object obj;
  1189. struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
  1190. int i, ret;
  1191. /* Ignore everything if this is a MARKER */
  1192. if (baseobj->type & MARKEROBJ_TYPE)
  1193. return 0;
  1194. ret = _verify_input_list(count, ptr, size);
  1195. if (ret <= 0)
  1196. return ret;
  1197. for (i = 0; i < count; i++) {
  1198. if (copy_struct_from_user(&obj, sizeof(obj), ptr, size))
  1199. return -EFAULT;
  1200. /* Sanity check the flags */
  1201. if (!(obj.flags & CMDLIST_FLAGS)) {
  1202. dev_err(device->dev,
  1203. "invalid cmdobj ctxt %u flags %d id %d offset %llu addr %llx size %llu\n",
  1204. baseobj->context->id, obj.flags, obj.id,
  1205. obj.offset, obj.gpuaddr, obj.size);
  1206. return -EINVAL;
  1207. }
  1208. ret = kgsl_drawobj_add_memobject(&cmdobj->cmdlist, &obj);
  1209. if (ret)
  1210. return ret;
  1211. ptr += sizeof(obj);
  1212. }
  1213. return 0;
  1214. }
  1215. int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
  1216. struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
  1217. unsigned int size, unsigned int count)
  1218. {
  1219. struct kgsl_command_object obj;
  1220. struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
  1221. int i, ret;
  1222. /* Ignore everything if this is a MARKER */
  1223. if (baseobj->type & MARKEROBJ_TYPE)
  1224. return 0;
  1225. ret = _verify_input_list(count, ptr, size);
  1226. if (ret <= 0)
  1227. return ret;
  1228. for (i = 0; i < count; i++) {
  1229. if (copy_struct_from_user(&obj, sizeof(obj), ptr, size))
  1230. return -EFAULT;
  1231. if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
  1232. dev_err(device->dev,
  1233. "invalid memobj ctxt %u flags %d id %d offset %lld addr %lld size %lld\n",
  1234. DRAWOBJ(cmdobj)->context->id, obj.flags,
  1235. obj.id, obj.offset, obj.gpuaddr,
  1236. obj.size);
  1237. return -EINVAL;
  1238. }
  1239. if (obj.flags & KGSL_OBJLIST_PROFILE)
  1240. add_profiling_buffer(device, cmdobj, obj.gpuaddr,
  1241. obj.size, obj.id, obj.offset);
  1242. else {
  1243. ret = kgsl_drawobj_add_memobject(&cmdobj->memlist,
  1244. &obj);
  1245. if (ret)
  1246. return ret;
  1247. }
  1248. ptr += sizeof(obj);
  1249. }
  1250. return 0;
  1251. }
  1252. struct kgsl_drawobj_sync *
  1253. kgsl_drawobj_create_timestamp_syncobj(struct kgsl_device *device,
  1254. struct kgsl_context *context, unsigned int timestamp)
  1255. {
  1256. struct kgsl_drawobj_sync *syncobj;
  1257. struct kgsl_cmd_syncpoint_timestamp priv;
  1258. int ret;
  1259. syncobj = kgsl_drawobj_sync_create(device, context);
  1260. if (IS_ERR(syncobj))
  1261. return syncobj;
  1262. syncobj->synclist = kzalloc(sizeof(*syncobj->synclist), GFP_KERNEL);
  1263. if (!syncobj->synclist) {
  1264. kgsl_drawobj_destroy(DRAWOBJ(syncobj));
  1265. return ERR_PTR(-ENOMEM);
  1266. }
  1267. priv.timestamp = timestamp;
  1268. priv.context_id = context->id;
  1269. ret = drawobj_add_sync_timestamp(device, syncobj, &priv);
  1270. if (ret) {
  1271. kgsl_drawobj_destroy(DRAWOBJ(syncobj));
  1272. return ERR_PTR(ret);
  1273. }
  1274. return syncobj;
  1275. }
  1276. int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
  1277. struct kgsl_drawobj_sync *syncobj, void __user *ptr,
  1278. unsigned int size, unsigned int count)
  1279. {
  1280. struct kgsl_command_syncpoint syncpoint;
  1281. struct kgsl_cmd_syncpoint sync;
  1282. int i, ret;
  1283. /* If creating a sync and the data is not there or wrong then error */
  1284. ret = _verify_input_list(count, ptr, size);
  1285. if (ret <= 0)
  1286. return -EINVAL;
  1287. syncobj->synclist = kcalloc(count,
  1288. sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
  1289. if (syncobj->synclist == NULL)
  1290. return -ENOMEM;
  1291. for (i = 0; i < count; i++) {
  1292. if (copy_struct_from_user(&syncpoint, sizeof(syncpoint), ptr, size))
  1293. return -EFAULT;
  1294. sync.type = syncpoint.type;
  1295. sync.priv = u64_to_user_ptr(syncpoint.priv);
  1296. sync.size = syncpoint.size;
  1297. ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
  1298. if (ret)
  1299. return ret;
  1300. ptr += sizeof(syncpoint);
  1301. }
  1302. return 0;
  1303. }
  1304. void kgsl_drawobjs_cache_exit(void)
  1305. {
  1306. kmem_cache_destroy(memobjs_cache);
  1307. }
  1308. int kgsl_drawobjs_cache_init(void)
  1309. {
  1310. memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
  1311. if (!memobjs_cache)
  1312. return -ENOMEM;
  1313. return 0;
  1314. }