nouveau_ttm.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
  4. * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial portions
  15. * of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  20. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  21. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  23. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <linux/limits.h>
  26. #include <linux/swiotlb.h>
  27. #include <drm/ttm/ttm_range_manager.h>
  28. #include "nouveau_drv.h"
  29. #include "nouveau_gem.h"
  30. #include "nouveau_mem.h"
  31. #include "nouveau_ttm.h"
  32. #include <core/tegra.h>
  33. static void
  34. nouveau_manager_del(struct ttm_resource_manager *man,
  35. struct ttm_resource *reg)
  36. {
  37. nouveau_mem_del(man, reg);
  38. }
  39. static bool
  40. nouveau_manager_intersects(struct ttm_resource_manager *man,
  41. struct ttm_resource *res,
  42. const struct ttm_place *place,
  43. size_t size)
  44. {
  45. return nouveau_mem_intersects(res, place, size);
  46. }
  47. static bool
  48. nouveau_manager_compatible(struct ttm_resource_manager *man,
  49. struct ttm_resource *res,
  50. const struct ttm_place *place,
  51. size_t size)
  52. {
  53. return nouveau_mem_compatible(res, place, size);
  54. }
  55. static int
  56. nouveau_vram_manager_new(struct ttm_resource_manager *man,
  57. struct ttm_buffer_object *bo,
  58. const struct ttm_place *place,
  59. struct ttm_resource **res)
  60. {
  61. struct nouveau_bo *nvbo = nouveau_bo(bo);
  62. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  63. int ret;
  64. if (drm->client.device.info.ram_size == 0)
  65. return -ENOMEM;
  66. ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
  67. if (ret)
  68. return ret;
  69. ttm_resource_init(bo, place, *res);
  70. ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
  71. if (ret) {
  72. nouveau_mem_del(man, *res);
  73. return ret;
  74. }
  75. return 0;
  76. }
  77. const struct ttm_resource_manager_func nouveau_vram_manager = {
  78. .alloc = nouveau_vram_manager_new,
  79. .free = nouveau_manager_del,
  80. .intersects = nouveau_manager_intersects,
  81. .compatible = nouveau_manager_compatible,
  82. };
  83. static int
  84. nouveau_gart_manager_new(struct ttm_resource_manager *man,
  85. struct ttm_buffer_object *bo,
  86. const struct ttm_place *place,
  87. struct ttm_resource **res)
  88. {
  89. struct nouveau_bo *nvbo = nouveau_bo(bo);
  90. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  91. int ret;
  92. ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
  93. if (ret)
  94. return ret;
  95. ttm_resource_init(bo, place, *res);
  96. (*res)->start = 0;
  97. return 0;
  98. }
  99. const struct ttm_resource_manager_func nouveau_gart_manager = {
  100. .alloc = nouveau_gart_manager_new,
  101. .free = nouveau_manager_del,
  102. .intersects = nouveau_manager_intersects,
  103. .compatible = nouveau_manager_compatible,
  104. };
  105. static int
  106. nv04_gart_manager_new(struct ttm_resource_manager *man,
  107. struct ttm_buffer_object *bo,
  108. const struct ttm_place *place,
  109. struct ttm_resource **res)
  110. {
  111. struct nouveau_bo *nvbo = nouveau_bo(bo);
  112. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  113. struct nouveau_mem *mem;
  114. int ret;
  115. ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
  116. if (ret)
  117. return ret;
  118. mem = nouveau_mem(*res);
  119. ttm_resource_init(bo, place, *res);
  120. ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
  121. (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
  122. if (ret) {
  123. nouveau_mem_del(man, *res);
  124. return ret;
  125. }
  126. (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
  127. return 0;
  128. }
  129. const struct ttm_resource_manager_func nv04_gart_manager = {
  130. .alloc = nv04_gart_manager_new,
  131. .free = nouveau_manager_del,
  132. .intersects = nouveau_manager_intersects,
  133. .compatible = nouveau_manager_compatible,
  134. };
  135. static int
  136. nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
  137. {
  138. struct nvif_mmu *mmu = &drm->client.mmu;
  139. int typei;
  140. typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
  141. kind | NVIF_MEM_COHERENT);
  142. if (typei < 0)
  143. return -ENOSYS;
  144. drm->ttm.type_host[!!kind] = typei;
  145. typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
  146. if (typei < 0)
  147. return -ENOSYS;
  148. drm->ttm.type_ncoh[!!kind] = typei;
  149. return 0;
  150. }
  151. static int
  152. nouveau_ttm_init_vram(struct nouveau_drm *drm)
  153. {
  154. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
  155. struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
  156. if (!man)
  157. return -ENOMEM;
  158. man->func = &nouveau_vram_manager;
  159. ttm_resource_manager_init(man, &drm->ttm.bdev,
  160. drm->gem.vram_available >> PAGE_SHIFT);
  161. ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
  162. ttm_resource_manager_set_used(man, true);
  163. return 0;
  164. } else {
  165. return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
  166. drm->gem.vram_available >> PAGE_SHIFT);
  167. }
  168. }
  169. static void
  170. nouveau_ttm_fini_vram(struct nouveau_drm *drm)
  171. {
  172. struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
  173. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
  174. ttm_resource_manager_set_used(man, false);
  175. ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
  176. ttm_resource_manager_cleanup(man);
  177. ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
  178. kfree(man);
  179. } else
  180. ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
  181. }
  182. static int
  183. nouveau_ttm_init_gtt(struct nouveau_drm *drm)
  184. {
  185. struct ttm_resource_manager *man;
  186. unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
  187. const struct ttm_resource_manager_func *func = NULL;
  188. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
  189. func = &nouveau_gart_manager;
  190. else if (!drm->agp.bridge)
  191. func = &nv04_gart_manager;
  192. else
  193. return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
  194. size_pages);
  195. man = kzalloc(sizeof(*man), GFP_KERNEL);
  196. if (!man)
  197. return -ENOMEM;
  198. man->func = func;
  199. man->use_tt = true;
  200. ttm_resource_manager_init(man, &drm->ttm.bdev, size_pages);
  201. ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
  202. ttm_resource_manager_set_used(man, true);
  203. return 0;
  204. }
  205. static void
  206. nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
  207. {
  208. struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
  209. if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
  210. drm->agp.bridge)
  211. ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
  212. else {
  213. ttm_resource_manager_set_used(man, false);
  214. ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
  215. ttm_resource_manager_cleanup(man);
  216. ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
  217. kfree(man);
  218. }
  219. }
  220. int
  221. nouveau_ttm_init(struct nouveau_drm *drm)
  222. {
  223. struct nvkm_device *device = nvxx_device(&drm->client.device);
  224. struct nvkm_pci *pci = device->pci;
  225. struct nvif_mmu *mmu = &drm->client.mmu;
  226. struct drm_device *dev = drm->dev;
  227. bool need_swiotlb = false;
  228. int typei, ret;
  229. ret = nouveau_ttm_init_host(drm, 0);
  230. if (ret)
  231. return ret;
  232. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
  233. drm->client.device.info.chipset != 0x50) {
  234. ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
  235. if (ret)
  236. return ret;
  237. }
  238. if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
  239. drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
  240. typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
  241. NVIF_MEM_KIND |
  242. NVIF_MEM_COMP |
  243. NVIF_MEM_DISP);
  244. if (typei < 0)
  245. return -ENOSYS;
  246. drm->ttm.type_vram = typei;
  247. } else {
  248. drm->ttm.type_vram = -1;
  249. }
  250. if (pci && pci->agp.bridge) {
  251. drm->agp.bridge = pci->agp.bridge;
  252. drm->agp.base = pci->agp.base;
  253. drm->agp.size = pci->agp.size;
  254. drm->agp.cma = pci->agp.cma;
  255. }
  256. #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
  257. need_swiotlb = is_swiotlb_active(dev->dev);
  258. #endif
  259. ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
  260. dev->anon_inode->i_mapping,
  261. dev->vma_offset_manager, need_swiotlb,
  262. drm->client.mmu.dmabits <= 32);
  263. if (ret) {
  264. NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
  265. return ret;
  266. }
  267. /* VRAM init */
  268. drm->gem.vram_available = drm->client.device.info.ram_user;
  269. arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
  270. device->func->resource_size(device, 1));
  271. ret = nouveau_ttm_init_vram(drm);
  272. if (ret) {
  273. NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
  274. return ret;
  275. }
  276. drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
  277. device->func->resource_size(device, 1));
  278. /* GART init */
  279. if (!drm->agp.bridge) {
  280. drm->gem.gart_available = drm->client.vmm.vmm.limit;
  281. } else {
  282. drm->gem.gart_available = drm->agp.size;
  283. }
  284. ret = nouveau_ttm_init_gtt(drm);
  285. if (ret) {
  286. NV_ERROR(drm, "GART mm init failed, %d\n", ret);
  287. return ret;
  288. }
  289. mutex_init(&drm->ttm.io_reserve_mutex);
  290. INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
  291. NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
  292. NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
  293. return 0;
  294. }
  295. void
  296. nouveau_ttm_fini(struct nouveau_drm *drm)
  297. {
  298. struct nvkm_device *device = nvxx_device(&drm->client.device);
  299. nouveau_ttm_fini_vram(drm);
  300. nouveau_ttm_fini_gtt(drm);
  301. ttm_device_fini(&drm->ttm.bdev);
  302. arch_phys_wc_del(drm->ttm.mtrr);
  303. drm->ttm.mtrr = 0;
  304. arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
  305. device->func->resource_size(device, 1));
  306. }