ttm_resource.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Copyright 2020 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <linux/iosys-map.h>
  25. #include <linux/io-mapping.h>
  26. #include <linux/scatterlist.h>
  27. #include <drm/ttm/ttm_resource.h>
  28. #include <drm/ttm/ttm_bo_driver.h>
  29. /**
  30. * ttm_lru_bulk_move_init - initialize a bulk move structure
  31. * @bulk: the structure to init
  32. *
  33. * For now just memset the structure to zero.
  34. */
  35. void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
  36. {
  37. memset(bulk, 0, sizeof(*bulk));
  38. }
  39. EXPORT_SYMBOL(ttm_lru_bulk_move_init);
  40. /**
  41. * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
  42. *
  43. * @bulk: bulk move structure
  44. *
  45. * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
  46. * resource order never changes. Should be called with &ttm_device.lru_lock held.
  47. */
  48. void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
  49. {
  50. unsigned i, j;
  51. for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
  52. for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
  53. struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
  54. struct ttm_resource_manager *man;
  55. if (!pos->first)
  56. continue;
  57. lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
  58. dma_resv_assert_held(pos->first->bo->base.resv);
  59. dma_resv_assert_held(pos->last->bo->base.resv);
  60. man = ttm_manager_type(pos->first->bo->bdev, i);
  61. list_bulk_move_tail(&man->lru[j], &pos->first->lru,
  62. &pos->last->lru);
  63. }
  64. }
  65. }
  66. EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
  67. /* Return the bulk move pos object for this resource */
  68. static struct ttm_lru_bulk_move_pos *
  69. ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
  70. {
  71. return &bulk->pos[res->mem_type][res->bo->priority];
  72. }
  73. /* Move the resource to the tail of the bulk move range */
  74. static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
  75. struct ttm_resource *res)
  76. {
  77. if (pos->last != res) {
  78. if (pos->first == res)
  79. pos->first = list_next_entry(res, lru);
  80. list_move(&res->lru, &pos->last->lru);
  81. pos->last = res;
  82. }
  83. }
  84. /* Add the resource to a bulk_move cursor */
  85. static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
  86. struct ttm_resource *res)
  87. {
  88. struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
  89. if (!pos->first) {
  90. pos->first = res;
  91. pos->last = res;
  92. } else {
  93. ttm_lru_bulk_move_pos_tail(pos, res);
  94. }
  95. }
  96. /* Remove the resource from a bulk_move range */
  97. static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
  98. struct ttm_resource *res)
  99. {
  100. struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
  101. if (unlikely(WARN_ON(!pos->first || !pos->last) ||
  102. (pos->first == res && pos->last == res))) {
  103. pos->first = NULL;
  104. pos->last = NULL;
  105. } else if (pos->first == res) {
  106. pos->first = list_next_entry(res, lru);
  107. } else if (pos->last == res) {
  108. pos->last = list_prev_entry(res, lru);
  109. } else {
  110. list_move(&res->lru, &pos->last->lru);
  111. }
  112. }
  113. /* Add the resource to a bulk move if the BO is configured for it */
  114. void ttm_resource_add_bulk_move(struct ttm_resource *res,
  115. struct ttm_buffer_object *bo)
  116. {
  117. if (bo->bulk_move && !bo->pin_count)
  118. ttm_lru_bulk_move_add(bo->bulk_move, res);
  119. }
  120. /* Remove the resource from a bulk move if the BO is configured for it */
  121. void ttm_resource_del_bulk_move(struct ttm_resource *res,
  122. struct ttm_buffer_object *bo)
  123. {
  124. if (bo->bulk_move && !bo->pin_count)
  125. ttm_lru_bulk_move_del(bo->bulk_move, res);
  126. }
  127. /* Move a resource to the LRU or bulk tail */
  128. void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
  129. {
  130. struct ttm_buffer_object *bo = res->bo;
  131. struct ttm_device *bdev = bo->bdev;
  132. lockdep_assert_held(&bo->bdev->lru_lock);
  133. if (bo->pin_count) {
  134. list_move_tail(&res->lru, &bdev->pinned);
  135. } else if (bo->bulk_move) {
  136. struct ttm_lru_bulk_move_pos *pos =
  137. ttm_lru_bulk_move_pos(bo->bulk_move, res);
  138. ttm_lru_bulk_move_pos_tail(pos, res);
  139. } else {
  140. struct ttm_resource_manager *man;
  141. man = ttm_manager_type(bdev, res->mem_type);
  142. list_move_tail(&res->lru, &man->lru[bo->priority]);
  143. }
  144. }
  145. /**
  146. * ttm_resource_init - resource object constructure
  147. * @bo: buffer object this resources is allocated for
  148. * @place: placement of the resource
  149. * @res: the resource object to inistilize
  150. *
  151. * Initialize a new resource object. Counterpart of ttm_resource_fini().
  152. */
  153. void ttm_resource_init(struct ttm_buffer_object *bo,
  154. const struct ttm_place *place,
  155. struct ttm_resource *res)
  156. {
  157. struct ttm_resource_manager *man;
  158. res->start = 0;
  159. res->num_pages = PFN_UP(bo->base.size);
  160. res->mem_type = place->mem_type;
  161. res->placement = place->flags;
  162. res->bus.addr = NULL;
  163. res->bus.offset = 0;
  164. res->bus.is_iomem = false;
  165. res->bus.caching = ttm_cached;
  166. res->bo = bo;
  167. man = ttm_manager_type(bo->bdev, place->mem_type);
  168. spin_lock(&bo->bdev->lru_lock);
  169. if (bo->pin_count)
  170. list_add_tail(&res->lru, &bo->bdev->pinned);
  171. else
  172. list_add_tail(&res->lru, &man->lru[bo->priority]);
  173. man->usage += res->num_pages << PAGE_SHIFT;
  174. spin_unlock(&bo->bdev->lru_lock);
  175. }
  176. EXPORT_SYMBOL(ttm_resource_init);
  177. /**
  178. * ttm_resource_fini - resource destructor
  179. * @man: the resource manager this resource belongs to
  180. * @res: the resource to clean up
  181. *
  182. * Should be used by resource manager backends to clean up the TTM resource
  183. * objects before freeing the underlying structure. Makes sure the resource is
  184. * removed from the LRU before destruction.
  185. * Counterpart of ttm_resource_init().
  186. */
  187. void ttm_resource_fini(struct ttm_resource_manager *man,
  188. struct ttm_resource *res)
  189. {
  190. struct ttm_device *bdev = man->bdev;
  191. spin_lock(&bdev->lru_lock);
  192. list_del_init(&res->lru);
  193. man->usage -= res->num_pages << PAGE_SHIFT;
  194. spin_unlock(&bdev->lru_lock);
  195. }
  196. EXPORT_SYMBOL(ttm_resource_fini);
  197. int ttm_resource_alloc(struct ttm_buffer_object *bo,
  198. const struct ttm_place *place,
  199. struct ttm_resource **res_ptr)
  200. {
  201. struct ttm_resource_manager *man =
  202. ttm_manager_type(bo->bdev, place->mem_type);
  203. int ret;
  204. ret = man->func->alloc(man, bo, place, res_ptr);
  205. if (ret)
  206. return ret;
  207. spin_lock(&bo->bdev->lru_lock);
  208. ttm_resource_add_bulk_move(*res_ptr, bo);
  209. spin_unlock(&bo->bdev->lru_lock);
  210. return 0;
  211. }
  212. void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
  213. {
  214. struct ttm_resource_manager *man;
  215. if (!*res)
  216. return;
  217. spin_lock(&bo->bdev->lru_lock);
  218. ttm_resource_del_bulk_move(*res, bo);
  219. spin_unlock(&bo->bdev->lru_lock);
  220. man = ttm_manager_type(bo->bdev, (*res)->mem_type);
  221. man->func->free(man, *res);
  222. *res = NULL;
  223. }
  224. EXPORT_SYMBOL(ttm_resource_free);
  225. /**
  226. * ttm_resource_intersects - test for intersection
  227. *
  228. * @bdev: TTM device structure
  229. * @res: The resource to test
  230. * @place: The placement to test
  231. * @size: How many bytes the new allocation needs.
  232. *
  233. * Test if @res intersects with @place and @size. Used for testing if evictions
  234. * are valueable or not.
  235. *
  236. * Returns true if the res placement intersects with @place and @size.
  237. */
  238. bool ttm_resource_intersects(struct ttm_device *bdev,
  239. struct ttm_resource *res,
  240. const struct ttm_place *place,
  241. size_t size)
  242. {
  243. struct ttm_resource_manager *man;
  244. if (!res)
  245. return false;
  246. man = ttm_manager_type(bdev, res->mem_type);
  247. if (!place || !man->func->intersects)
  248. return true;
  249. return man->func->intersects(man, res, place, size);
  250. }
  251. /**
  252. * ttm_resource_compatible - test for compatibility
  253. *
  254. * @bdev: TTM device structure
  255. * @res: The resource to test
  256. * @place: The placement to test
  257. * @size: How many bytes the new allocation needs.
  258. *
  259. * Test if @res compatible with @place and @size.
  260. *
  261. * Returns true if the res placement compatible with @place and @size.
  262. */
  263. bool ttm_resource_compatible(struct ttm_device *bdev,
  264. struct ttm_resource *res,
  265. const struct ttm_place *place,
  266. size_t size)
  267. {
  268. struct ttm_resource_manager *man;
  269. if (!res || !place)
  270. return false;
  271. man = ttm_manager_type(bdev, res->mem_type);
  272. if (!man->func->compatible)
  273. return true;
  274. return man->func->compatible(man, res, place, size);
  275. }
  276. static bool ttm_resource_places_compat(struct ttm_resource *res,
  277. const struct ttm_place *places,
  278. unsigned num_placement)
  279. {
  280. struct ttm_buffer_object *bo = res->bo;
  281. struct ttm_device *bdev = bo->bdev;
  282. unsigned i;
  283. if (res->placement & TTM_PL_FLAG_TEMPORARY)
  284. return false;
  285. for (i = 0; i < num_placement; i++) {
  286. const struct ttm_place *heap = &places[i];
  287. if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
  288. continue;
  289. if ((res->mem_type == heap->mem_type) &&
  290. (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
  291. (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
  292. return true;
  293. }
  294. return false;
  295. }
  296. /**
  297. * ttm_resource_compat - check if resource is compatible with placement
  298. *
  299. * @res: the resource to check
  300. * @placement: the placement to check against
  301. *
  302. * Returns true if the placement is compatible.
  303. */
  304. bool ttm_resource_compat(struct ttm_resource *res,
  305. struct ttm_placement *placement)
  306. {
  307. if (ttm_resource_places_compat(res, placement->placement,
  308. placement->num_placement))
  309. return true;
  310. if ((placement->busy_placement != placement->placement ||
  311. placement->num_busy_placement > placement->num_placement) &&
  312. ttm_resource_places_compat(res, placement->busy_placement,
  313. placement->num_busy_placement))
  314. return true;
  315. return false;
  316. }
  317. EXPORT_SYMBOL(ttm_resource_compat);
  318. void ttm_resource_set_bo(struct ttm_resource *res,
  319. struct ttm_buffer_object *bo)
  320. {
  321. spin_lock(&bo->bdev->lru_lock);
  322. res->bo = bo;
  323. spin_unlock(&bo->bdev->lru_lock);
  324. }
  325. /**
  326. * ttm_resource_manager_init
  327. *
  328. * @man: memory manager object to init
  329. * @bdev: ttm device this manager belongs to
  330. * @size: size of managed resources in arbitrary units
  331. *
  332. * Initialise core parts of a manager object.
  333. */
  334. void ttm_resource_manager_init(struct ttm_resource_manager *man,
  335. struct ttm_device *bdev,
  336. uint64_t size)
  337. {
  338. unsigned i;
  339. spin_lock_init(&man->move_lock);
  340. man->bdev = bdev;
  341. man->size = size;
  342. man->usage = 0;
  343. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  344. INIT_LIST_HEAD(&man->lru[i]);
  345. man->move = NULL;
  346. }
  347. EXPORT_SYMBOL(ttm_resource_manager_init);
  348. /*
  349. * ttm_resource_manager_evict_all
  350. *
  351. * @bdev - device to use
  352. * @man - manager to use
  353. *
  354. * Evict all the objects out of a memory manager until it is empty.
  355. * Part of memory manager cleanup sequence.
  356. */
  357. int ttm_resource_manager_evict_all(struct ttm_device *bdev,
  358. struct ttm_resource_manager *man)
  359. {
  360. struct ttm_operation_ctx ctx = {
  361. .interruptible = false,
  362. .no_wait_gpu = false,
  363. .force_alloc = true
  364. };
  365. struct dma_fence *fence;
  366. int ret;
  367. unsigned i;
  368. /*
  369. * Can't use standard list traversal since we're unlocking.
  370. */
  371. spin_lock(&bdev->lru_lock);
  372. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
  373. while (!list_empty(&man->lru[i])) {
  374. spin_unlock(&bdev->lru_lock);
  375. ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
  376. NULL);
  377. if (ret)
  378. return ret;
  379. spin_lock(&bdev->lru_lock);
  380. }
  381. }
  382. spin_unlock(&bdev->lru_lock);
  383. spin_lock(&man->move_lock);
  384. fence = dma_fence_get(man->move);
  385. spin_unlock(&man->move_lock);
  386. if (fence) {
  387. ret = dma_fence_wait(fence, false);
  388. dma_fence_put(fence);
  389. if (ret)
  390. return ret;
  391. }
  392. return 0;
  393. }
  394. EXPORT_SYMBOL(ttm_resource_manager_evict_all);
  395. /**
  396. * ttm_resource_manager_usage
  397. *
  398. * @man: A memory manager object.
  399. *
  400. * Return how many resources are currently used.
  401. */
  402. uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
  403. {
  404. uint64_t usage;
  405. spin_lock(&man->bdev->lru_lock);
  406. usage = man->usage;
  407. spin_unlock(&man->bdev->lru_lock);
  408. return usage;
  409. }
  410. EXPORT_SYMBOL(ttm_resource_manager_usage);
  411. /**
  412. * ttm_resource_manager_debug
  413. *
  414. * @man: manager type to dump.
  415. * @p: printer to use for debug.
  416. */
  417. void ttm_resource_manager_debug(struct ttm_resource_manager *man,
  418. struct drm_printer *p)
  419. {
  420. drm_printf(p, " use_type: %d\n", man->use_type);
  421. drm_printf(p, " use_tt: %d\n", man->use_tt);
  422. drm_printf(p, " size: %llu\n", man->size);
  423. drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
  424. if (man->func->debug)
  425. man->func->debug(man, p);
  426. }
  427. EXPORT_SYMBOL(ttm_resource_manager_debug);
  428. /**
  429. * ttm_resource_manager_first
  430. *
  431. * @man: resource manager to iterate over
  432. * @cursor: cursor to record the position
  433. *
  434. * Returns the first resource from the resource manager.
  435. */
  436. struct ttm_resource *
  437. ttm_resource_manager_first(struct ttm_resource_manager *man,
  438. struct ttm_resource_cursor *cursor)
  439. {
  440. struct ttm_resource *res;
  441. lockdep_assert_held(&man->bdev->lru_lock);
  442. for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
  443. ++cursor->priority)
  444. list_for_each_entry(res, &man->lru[cursor->priority], lru)
  445. return res;
  446. return NULL;
  447. }
  448. /**
  449. * ttm_resource_manager_next
  450. *
  451. * @man: resource manager to iterate over
  452. * @cursor: cursor to record the position
  453. * @res: the current resource pointer
  454. *
  455. * Returns the next resource from the resource manager.
  456. */
  457. struct ttm_resource *
  458. ttm_resource_manager_next(struct ttm_resource_manager *man,
  459. struct ttm_resource_cursor *cursor,
  460. struct ttm_resource *res)
  461. {
  462. lockdep_assert_held(&man->bdev->lru_lock);
  463. list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
  464. return res;
  465. for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
  466. ++cursor->priority)
  467. list_for_each_entry(res, &man->lru[cursor->priority], lru)
  468. return res;
  469. return NULL;
  470. }
  471. static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
  472. struct iosys_map *dmap,
  473. pgoff_t i)
  474. {
  475. struct ttm_kmap_iter_iomap *iter_io =
  476. container_of(iter, typeof(*iter_io), base);
  477. void __iomem *addr;
  478. retry:
  479. while (i >= iter_io->cache.end) {
  480. iter_io->cache.sg = iter_io->cache.sg ?
  481. sg_next(iter_io->cache.sg) : iter_io->st->sgl;
  482. iter_io->cache.i = iter_io->cache.end;
  483. iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
  484. PAGE_SHIFT;
  485. iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
  486. iter_io->start;
  487. }
  488. if (i < iter_io->cache.i) {
  489. iter_io->cache.end = 0;
  490. iter_io->cache.sg = NULL;
  491. goto retry;
  492. }
  493. addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
  494. (((resource_size_t)i - iter_io->cache.i)
  495. << PAGE_SHIFT));
  496. iosys_map_set_vaddr_iomem(dmap, addr);
  497. }
  498. static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
  499. struct iosys_map *map)
  500. {
  501. io_mapping_unmap_local(map->vaddr_iomem);
  502. }
  503. static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
  504. .map_local = ttm_kmap_iter_iomap_map_local,
  505. .unmap_local = ttm_kmap_iter_iomap_unmap_local,
  506. .maps_tt = false,
  507. };
  508. /**
  509. * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
  510. * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
  511. * @iomap: The struct io_mapping representing the underlying linear io_memory.
  512. * @st: sg_table into @iomap, representing the memory of the struct
  513. * ttm_resource.
  514. * @start: Offset that needs to be subtracted from @st to make
  515. * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
  516. *
  517. * Return: Pointer to the embedded struct ttm_kmap_iter.
  518. */
  519. struct ttm_kmap_iter *
  520. ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
  521. struct io_mapping *iomap,
  522. struct sg_table *st,
  523. resource_size_t start)
  524. {
  525. iter_io->base.ops = &ttm_kmap_iter_io_ops;
  526. iter_io->iomap = iomap;
  527. iter_io->st = st;
  528. iter_io->start = start;
  529. memset(&iter_io->cache, 0, sizeof(iter_io->cache));
  530. return &iter_io->base;
  531. }
  532. EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
  533. /**
  534. * DOC: Linear io iterator
  535. *
  536. * This code should die in the not too near future. Best would be if we could
  537. * make io-mapping use memremap for all io memory, and have memremap
  538. * implement a kmap_local functionality. We could then strip a huge amount of
  539. * code. These linear io iterators are implemented to mimic old functionality,
  540. * and they don't use kmap_local semantics at all internally. Rather ioremap or
  541. * friends, and at least on 32-bit they add global TLB flushes and points
  542. * of failure.
  543. */
  544. static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
  545. struct iosys_map *dmap,
  546. pgoff_t i)
  547. {
  548. struct ttm_kmap_iter_linear_io *iter_io =
  549. container_of(iter, typeof(*iter_io), base);
  550. *dmap = iter_io->dmap;
  551. iosys_map_incr(dmap, i * PAGE_SIZE);
  552. }
  553. static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
  554. .map_local = ttm_kmap_iter_linear_io_map_local,
  555. .maps_tt = false,
  556. };
  557. /**
  558. * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
  559. * @iter_io: The iterator to initialize
  560. * @bdev: The TTM device
  561. * @mem: The ttm resource representing the iomap.
  562. *
  563. * This function is for internal TTM use only. It sets up a memcpy kmap iterator
  564. * pointing at a linear chunk of io memory.
  565. *
  566. * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
  567. * failure.
  568. */
  569. struct ttm_kmap_iter *
  570. ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
  571. struct ttm_device *bdev,
  572. struct ttm_resource *mem)
  573. {
  574. int ret;
  575. ret = ttm_mem_io_reserve(bdev, mem);
  576. if (ret)
  577. goto out_err;
  578. if (!mem->bus.is_iomem) {
  579. ret = -EINVAL;
  580. goto out_io_free;
  581. }
  582. if (mem->bus.addr) {
  583. iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
  584. iter_io->needs_unmap = false;
  585. } else {
  586. size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
  587. iter_io->needs_unmap = true;
  588. memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
  589. if (mem->bus.caching == ttm_write_combined)
  590. iosys_map_set_vaddr_iomem(&iter_io->dmap,
  591. ioremap_wc(mem->bus.offset,
  592. bus_size));
  593. else if (mem->bus.caching == ttm_cached)
  594. iosys_map_set_vaddr(&iter_io->dmap,
  595. memremap(mem->bus.offset, bus_size,
  596. MEMREMAP_WB |
  597. MEMREMAP_WT |
  598. MEMREMAP_WC));
  599. /* If uncached requested or if mapping cached or wc failed */
  600. if (iosys_map_is_null(&iter_io->dmap))
  601. iosys_map_set_vaddr_iomem(&iter_io->dmap,
  602. ioremap(mem->bus.offset,
  603. bus_size));
  604. if (iosys_map_is_null(&iter_io->dmap)) {
  605. ret = -ENOMEM;
  606. goto out_io_free;
  607. }
  608. }
  609. iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
  610. return &iter_io->base;
  611. out_io_free:
  612. ttm_mem_io_free(bdev, mem);
  613. out_err:
  614. return ERR_PTR(ret);
  615. }
  616. /**
  617. * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
  618. * @iter_io: The iterator to initialize
  619. * @bdev: The TTM device
  620. * @mem: The ttm resource representing the iomap.
  621. *
  622. * This function is for internal TTM use only. It cleans up a memcpy kmap
  623. * iterator initialized by ttm_kmap_iter_linear_io_init.
  624. */
  625. void
  626. ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
  627. struct ttm_device *bdev,
  628. struct ttm_resource *mem)
  629. {
  630. if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
  631. if (iter_io->dmap.is_iomem)
  632. iounmap(iter_io->dmap.vaddr_iomem);
  633. else
  634. memunmap(iter_io->dmap.vaddr);
  635. }
  636. ttm_mem_io_free(bdev, mem);
  637. }
  638. #if defined(CONFIG_DEBUG_FS)
  639. static int ttm_resource_manager_show(struct seq_file *m, void *unused)
  640. {
  641. struct ttm_resource_manager *man =
  642. (struct ttm_resource_manager *)m->private;
  643. struct drm_printer p = drm_seq_file_printer(m);
  644. ttm_resource_manager_debug(man, &p);
  645. return 0;
  646. }
  647. DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
  648. #endif
  649. /**
  650. * ttm_resource_manager_create_debugfs - Create debugfs entry for specified
  651. * resource manager.
  652. * @man: The TTM resource manager for which the debugfs stats file be creates
  653. * @parent: debugfs directory in which the file will reside
  654. * @name: The filename to create.
  655. *
  656. * This function setups up a debugfs file that can be used to look
  657. * at debug statistics of the specified ttm_resource_manager.
  658. */
  659. void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
  660. struct dentry * parent,
  661. const char *name)
  662. {
  663. #if defined(CONFIG_DEBUG_FS)
  664. debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
  665. #endif
  666. }
  667. EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);