kgsl_sharedmem.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <asm/cacheflush.h>
  7. #include <linux/of_platform.h>
  8. #include <linux/highmem.h>
  9. #include <linux/slab.h>
  10. #include <linux/random.h>
  11. #include <linux/shmem_fs.h>
  12. #include <linux/sched/signal.h>
  13. #include <linux/version.h>
  14. #include "kgsl_device.h"
  15. #include "kgsl_pool.h"
  16. #include "kgsl_reclaim.h"
  17. #include "kgsl_sharedmem.h"
  18. /*
  19. * The user can set this from debugfs to force failed memory allocations to
  20. * fail without trying OOM first. This is a debug setting useful for
  21. * stress applications that want to test failure cases without pushing the
  22. * system into unrecoverable OOM panics
  23. */
  24. bool kgsl_sharedmem_noretry_flag;
  25. static DEFINE_MUTEX(kernel_map_global_lock);
  26. #define MEMTYPE(_type, _name) \
  27. static struct kgsl_memtype memtype_##_name = { \
  28. .type = _type, \
  29. .attr = { .name = __stringify(_name), .mode = 0444 } \
  30. }
  31. struct kgsl_memtype {
  32. unsigned int type;
  33. struct attribute attr;
  34. };
  35. /* We can not use macro MEMTYPE for "any(0)" because of special characters */
  36. static struct kgsl_memtype memtype_any0 = {
  37. .type = KGSL_MEMTYPE_OBJECTANY,
  38. .attr = { .name = "any(0)", .mode = 0444 },
  39. };
  40. MEMTYPE(KGSL_MEMTYPE_FRAMEBUFFER, framebuffer);
  41. MEMTYPE(KGSL_MEMTYPE_RENDERBUFFER, renderbuffer);
  42. MEMTYPE(KGSL_MEMTYPE_ARRAYBUFFER, arraybuffer);
  43. MEMTYPE(KGSL_MEMTYPE_ELEMENTARRAYBUFFER, elementarraybuffer);
  44. MEMTYPE(KGSL_MEMTYPE_VERTEXARRAYBUFFER, vertexarraybuffer);
  45. MEMTYPE(KGSL_MEMTYPE_TEXTURE, texture);
  46. MEMTYPE(KGSL_MEMTYPE_SURFACE, surface);
  47. MEMTYPE(KGSL_MEMTYPE_EGL_SURFACE, egl_surface);
  48. MEMTYPE(KGSL_MEMTYPE_GL, gl);
  49. MEMTYPE(KGSL_MEMTYPE_CL, cl);
  50. MEMTYPE(KGSL_MEMTYPE_CL_BUFFER_MAP, cl_buffer_map);
  51. MEMTYPE(KGSL_MEMTYPE_CL_BUFFER_NOMAP, cl_buffer_nomap);
  52. MEMTYPE(KGSL_MEMTYPE_CL_IMAGE_MAP, cl_image_map);
  53. MEMTYPE(KGSL_MEMTYPE_CL_IMAGE_NOMAP, cl_image_nomap);
  54. MEMTYPE(KGSL_MEMTYPE_CL_KERNEL_STACK, cl_kernel_stack);
  55. MEMTYPE(KGSL_MEMTYPE_COMMAND, command);
  56. MEMTYPE(KGSL_MEMTYPE_2D, 2d);
  57. MEMTYPE(KGSL_MEMTYPE_EGL_IMAGE, egl_image);
  58. MEMTYPE(KGSL_MEMTYPE_EGL_SHADOW, egl_shadow);
  59. MEMTYPE(KGSL_MEMTYPE_MULTISAMPLE, egl_multisample);
  60. MEMTYPE(KGSL_MEMTYPE_KERNEL, kernel);
  61. static struct attribute *memtype_attrs[] = {
  62. &memtype_any0.attr,
  63. &memtype_framebuffer.attr,
  64. &memtype_renderbuffer.attr,
  65. &memtype_arraybuffer.attr,
  66. &memtype_elementarraybuffer.attr,
  67. &memtype_vertexarraybuffer.attr,
  68. &memtype_texture.attr,
  69. &memtype_surface.attr,
  70. &memtype_egl_surface.attr,
  71. &memtype_gl.attr,
  72. &memtype_cl.attr,
  73. &memtype_cl_buffer_map.attr,
  74. &memtype_cl_buffer_nomap.attr,
  75. &memtype_cl_image_map.attr,
  76. &memtype_cl_image_nomap.attr,
  77. &memtype_cl_kernel_stack.attr,
  78. &memtype_command.attr,
  79. &memtype_2d.attr,
  80. &memtype_egl_image.attr,
  81. &memtype_egl_shadow.attr,
  82. &memtype_egl_multisample.attr,
  83. &memtype_kernel.attr,
  84. NULL,
  85. };
  86. ATTRIBUTE_GROUPS(memtype);
  87. /* An attribute for showing per-process memory statistics */
  88. struct kgsl_mem_entry_attribute {
  89. struct kgsl_process_attribute attr;
  90. int memtype;
  91. ssize_t (*show)(struct kgsl_process_private *priv,
  92. int type, char *buf);
  93. };
  94. static inline struct kgsl_process_attribute *to_process_attr(
  95. struct attribute *attr)
  96. {
  97. return container_of(attr, struct kgsl_process_attribute, attr);
  98. }
  99. #define to_mem_entry_attr(a) \
  100. container_of(a, struct kgsl_mem_entry_attribute, attr)
  101. #define __MEM_ENTRY_ATTR(_type, _name, _show) \
  102. { \
  103. .attr = __ATTR(_name, 0444, mem_entry_sysfs_show, NULL), \
  104. .memtype = _type, \
  105. .show = _show, \
  106. }
  107. #define MEM_ENTRY_ATTR(_type, _name, _show) \
  108. static struct kgsl_mem_entry_attribute mem_entry_##_name = \
  109. __MEM_ENTRY_ATTR(_type, _name, _show)
  110. static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
  111. struct kgsl_process_attribute *attr, char *buf)
  112. {
  113. struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
  114. struct kgsl_process_private *priv =
  115. container_of(kobj, struct kgsl_process_private, kobj);
  116. return pattr->show(priv, pattr->memtype, buf);
  117. }
  118. struct deferred_work {
  119. struct kgsl_process_private *private;
  120. struct work_struct work;
  121. };
  122. static void process_private_deferred_put(struct work_struct *work)
  123. {
  124. struct deferred_work *free_work =
  125. container_of(work, struct deferred_work, work);
  126. kgsl_process_private_put(free_work->private);
  127. kfree(free_work);
  128. }
  129. static ssize_t memtype_sysfs_show(struct kobject *kobj,
  130. struct attribute *attr, char *buf)
  131. {
  132. struct kgsl_process_private *priv;
  133. struct kgsl_memtype *memtype;
  134. struct kgsl_mem_entry *entry;
  135. u64 size = 0;
  136. int id = 0;
  137. struct deferred_work *work = kzalloc(sizeof(struct deferred_work),
  138. GFP_KERNEL);
  139. if (!work)
  140. return -ENOMEM;
  141. priv = container_of(kobj, struct kgsl_process_private, kobj_memtype);
  142. memtype = container_of(attr, struct kgsl_memtype, attr);
  143. /*
  144. * Take a process refcount here and put it back in a deferred manner.
  145. * This is to avoid a deadlock where we put back last reference of the
  146. * process private (via kgsl_mem_entry_put) here and end up trying to
  147. * remove sysfs kobject while we are still in the middle of reading one
  148. * of the sysfs files.
  149. */
  150. if (!kgsl_process_private_get(priv)) {
  151. kfree(work);
  152. return -ENOENT;
  153. }
  154. work->private = priv;
  155. INIT_WORK(&work->work, process_private_deferred_put);
  156. spin_lock(&priv->mem_lock);
  157. for (entry = idr_get_next(&priv->mem_idr, &id); entry;
  158. id++, entry = idr_get_next(&priv->mem_idr, &id)) {
  159. struct kgsl_memdesc *memdesc;
  160. unsigned int type;
  161. if (!kgsl_mem_entry_get(entry))
  162. continue;
  163. spin_unlock(&priv->mem_lock);
  164. memdesc = &entry->memdesc;
  165. type = kgsl_memdesc_get_memtype(memdesc);
  166. if (type == memtype->type)
  167. size += memdesc->size;
  168. kgsl_mem_entry_put(entry);
  169. spin_lock(&priv->mem_lock);
  170. }
  171. spin_unlock(&priv->mem_lock);
  172. queue_work(kgsl_driver.lockless_workqueue, &work->work);
  173. return scnprintf(buf, PAGE_SIZE, "%llu\n", size);
  174. }
  175. static const struct sysfs_ops memtype_sysfs_ops = {
  176. .show = memtype_sysfs_show,
  177. };
  178. static struct kobj_type ktype_memtype = {
  179. .sysfs_ops = &memtype_sysfs_ops,
  180. .default_groups = memtype_groups,
  181. };
  182. static ssize_t
  183. imported_mem_show(struct kgsl_process_private *priv,
  184. int type, char *buf)
  185. {
  186. struct kgsl_mem_entry *entry;
  187. uint64_t imported_mem = 0;
  188. int id = 0;
  189. struct deferred_work *work = kzalloc(sizeof(struct deferred_work),
  190. GFP_KERNEL);
  191. if (!work)
  192. return -ENOMEM;
  193. /*
  194. * Take a process refcount here and put it back in a deferred manner.
  195. * This is to avoid a deadlock where we put back last reference of the
  196. * process private (via kgsl_mem_entry_put) here and end up trying to
  197. * remove sysfs kobject while we are still in the middle of reading one
  198. * of the sysfs files.
  199. */
  200. if (!kgsl_process_private_get(priv)) {
  201. kfree(work);
  202. return -ENOENT;
  203. }
  204. work->private = priv;
  205. INIT_WORK(&work->work, process_private_deferred_put);
  206. spin_lock(&priv->mem_lock);
  207. for (entry = idr_get_next(&priv->mem_idr, &id); entry;
  208. id++, entry = idr_get_next(&priv->mem_idr, &id)) {
  209. int egl_surface_count = 0, egl_image_count = 0;
  210. struct kgsl_memdesc *m;
  211. if (!kgsl_mem_entry_get(entry))
  212. continue;
  213. spin_unlock(&priv->mem_lock);
  214. m = &entry->memdesc;
  215. if (kgsl_memdesc_usermem_type(m) == KGSL_MEM_ENTRY_ION) {
  216. kgsl_get_egl_counts(entry, &egl_surface_count,
  217. &egl_image_count);
  218. if (kgsl_memdesc_get_memtype(m) ==
  219. KGSL_MEMTYPE_EGL_SURFACE)
  220. imported_mem += m->size;
  221. else if (egl_surface_count == 0) {
  222. uint64_t size = m->size;
  223. do_div(size, (egl_image_count ?
  224. egl_image_count : 1));
  225. imported_mem += size;
  226. }
  227. }
  228. kgsl_mem_entry_put(entry);
  229. spin_lock(&priv->mem_lock);
  230. }
  231. spin_unlock(&priv->mem_lock);
  232. queue_work(kgsl_driver.lockless_workqueue, &work->work);
  233. return scnprintf(buf, PAGE_SIZE, "%llu\n", imported_mem);
  234. }
  235. static ssize_t
  236. gpumem_mapped_show(struct kgsl_process_private *priv,
  237. int type, char *buf)
  238. {
  239. return scnprintf(buf, PAGE_SIZE, "%lld\n",
  240. atomic64_read(&priv->gpumem_mapped));
  241. }
  242. static ssize_t
  243. gpumem_unmapped_show(struct kgsl_process_private *priv, int type, char *buf)
  244. {
  245. u64 gpumem_total = atomic64_read(&priv->stats[type].cur);
  246. u64 gpumem_mapped = atomic64_read(&priv->gpumem_mapped);
  247. if (gpumem_mapped > gpumem_total)
  248. return -EIO;
  249. return scnprintf(buf, PAGE_SIZE, "%llu\n",
  250. gpumem_total - gpumem_mapped);
  251. }
  252. /**
  253. * Show the current amount of memory allocated for the given memtype
  254. */
  255. static ssize_t
  256. mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
  257. {
  258. return scnprintf(buf, PAGE_SIZE, "%lld\n",
  259. atomic64_read(&priv->stats[type].cur));
  260. }
  261. /**
  262. * Show the maximum memory allocated for the given memtype through the life of
  263. * the process
  264. */
  265. static ssize_t
  266. mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
  267. {
  268. return scnprintf(buf, PAGE_SIZE, "%llu\n", priv->stats[type].max);
  269. }
  270. static ssize_t process_sysfs_show(struct kobject *kobj,
  271. struct attribute *attr, char *buf)
  272. {
  273. struct kgsl_process_attribute *pattr = to_process_attr(attr);
  274. return pattr->show(kobj, pattr, buf);
  275. }
  276. static ssize_t process_sysfs_store(struct kobject *kobj,
  277. struct attribute *attr, const char *buf, size_t count)
  278. {
  279. struct kgsl_process_attribute *pattr = to_process_attr(attr);
  280. if (pattr->store)
  281. return pattr->store(kobj, pattr, buf, count);
  282. return -EIO;
  283. }
  284. /* Dummy release function - we have nothing to do here */
  285. static void process_sysfs_release(struct kobject *kobj)
  286. {
  287. }
  288. static const struct sysfs_ops process_sysfs_ops = {
  289. .show = process_sysfs_show,
  290. .store = process_sysfs_store,
  291. };
  292. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_KERNEL, kernel, mem_entry_show);
  293. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_KERNEL, kernel_max, mem_entry_max_show);
  294. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_USER, user, mem_entry_show);
  295. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_USER, user_max, mem_entry_max_show);
  296. #ifdef CONFIG_ION
  297. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_USER, ion, mem_entry_show);
  298. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_USER, ion_max, mem_entry_max_show);
  299. #endif
  300. MEM_ENTRY_ATTR(0, imported_mem, imported_mem_show);
  301. MEM_ENTRY_ATTR(0, gpumem_mapped, gpumem_mapped_show);
  302. MEM_ENTRY_ATTR(KGSL_MEM_ENTRY_KERNEL, gpumem_unmapped, gpumem_unmapped_show);
  303. static struct attribute *mem_entry_attrs[] = {
  304. &mem_entry_kernel.attr.attr,
  305. &mem_entry_kernel_max.attr.attr,
  306. &mem_entry_user.attr.attr,
  307. &mem_entry_user_max.attr.attr,
  308. #ifdef CONFIG_ION
  309. &mem_entry_ion.attr.attr,
  310. &mem_entry_ion_max.attr.attr,
  311. #endif
  312. &mem_entry_imported_mem.attr.attr,
  313. &mem_entry_gpumem_mapped.attr.attr,
  314. &mem_entry_gpumem_unmapped.attr.attr,
  315. NULL,
  316. };
  317. ATTRIBUTE_GROUPS(mem_entry);
  318. static struct kobj_type process_ktype = {
  319. .sysfs_ops = &process_sysfs_ops,
  320. .release = &process_sysfs_release,
  321. .default_groups = mem_entry_groups,
  322. };
  323. #ifdef CONFIG_QCOM_KGSL_PROCESS_RECLAIM
  324. static struct device_attribute dev_attr_max_reclaim_limit = {
  325. .attr = { .name = "max_reclaim_limit", .mode = 0644 },
  326. .show = kgsl_proc_max_reclaim_limit_show,
  327. .store = kgsl_proc_max_reclaim_limit_store,
  328. };
  329. static struct device_attribute dev_attr_page_reclaim_per_call = {
  330. .attr = { .name = "page_reclaim_per_call", .mode = 0644 },
  331. .show = kgsl_nr_to_scan_show,
  332. .store = kgsl_nr_to_scan_store,
  333. };
  334. #endif
  335. /**
  336. * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
  337. *
  338. * @device: Pointer to kgsl device struct
  339. * @private: Pointer to the structure for the process
  340. *
  341. * kgsl_process_init_sysfs() is called at the time of creating the
  342. * process struct when a process opens the kgsl device for the first time.
  343. * This function creates the sysfs files for the process.
  344. */
  345. void kgsl_process_init_sysfs(struct kgsl_device *device,
  346. struct kgsl_process_private *private)
  347. {
  348. if (kobject_init_and_add(&private->kobj, &process_ktype,
  349. kgsl_driver.prockobj, "%d", pid_nr(private->pid))) {
  350. dev_err(device->dev, "Unable to add sysfs for process %d\n",
  351. pid_nr(private->pid));
  352. }
  353. kgsl_reclaim_proc_sysfs_init(private);
  354. if (kobject_init_and_add(&private->kobj_memtype, &ktype_memtype,
  355. &private->kobj, "memtype")) {
  356. dev_err(device->dev, "Unable to add memtype sysfs for process %d\n",
  357. pid_nr(private->pid));
  358. }
  359. }
  360. static ssize_t memstat_show(struct device *dev,
  361. struct device_attribute *attr, char *buf)
  362. {
  363. uint64_t val = 0;
  364. if (!strcmp(attr->attr.name, "vmalloc"))
  365. val = atomic_long_read(&kgsl_driver.stats.vmalloc);
  366. else if (!strcmp(attr->attr.name, "vmalloc_max"))
  367. val = atomic_long_read(&kgsl_driver.stats.vmalloc_max);
  368. else if (!strcmp(attr->attr.name, "page_alloc"))
  369. val = atomic_long_read(&kgsl_driver.stats.page_alloc);
  370. else if (!strcmp(attr->attr.name, "page_alloc_max"))
  371. val = atomic_long_read(&kgsl_driver.stats.page_alloc_max);
  372. else if (!strcmp(attr->attr.name, "coherent"))
  373. val = atomic_long_read(&kgsl_driver.stats.coherent);
  374. else if (!strcmp(attr->attr.name, "coherent_max"))
  375. val = atomic_long_read(&kgsl_driver.stats.coherent_max);
  376. else if (!strcmp(attr->attr.name, "secure"))
  377. val = atomic_long_read(&kgsl_driver.stats.secure);
  378. else if (!strcmp(attr->attr.name, "secure_max"))
  379. val = atomic_long_read(&kgsl_driver.stats.secure_max);
  380. else if (!strcmp(attr->attr.name, "mapped"))
  381. val = atomic_long_read(&kgsl_driver.stats.mapped);
  382. else if (!strcmp(attr->attr.name, "mapped_max"))
  383. val = atomic_long_read(&kgsl_driver.stats.mapped_max);
  384. return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
  385. }
  386. static ssize_t full_cache_threshold_store(struct device *dev,
  387. struct device_attribute *attr,
  388. const char *buf, size_t count)
  389. {
  390. int ret;
  391. unsigned int thresh = 0;
  392. ret = kstrtou32(buf, 0, &thresh);
  393. if (ret)
  394. return ret;
  395. kgsl_driver.full_cache_threshold = thresh;
  396. return count;
  397. }
  398. static ssize_t full_cache_threshold_show(struct device *dev,
  399. struct device_attribute *attr,
  400. char *buf)
  401. {
  402. return scnprintf(buf, PAGE_SIZE, "%d\n",
  403. kgsl_driver.full_cache_threshold);
  404. }
  405. static DEVICE_ATTR(vmalloc, 0444, memstat_show, NULL);
  406. static DEVICE_ATTR(vmalloc_max, 0444, memstat_show, NULL);
  407. static DEVICE_ATTR(page_alloc, 0444, memstat_show, NULL);
  408. static DEVICE_ATTR(page_alloc_max, 0444, memstat_show, NULL);
  409. static DEVICE_ATTR(coherent, 0444, memstat_show, NULL);
  410. static DEVICE_ATTR(coherent_max, 0444, memstat_show, NULL);
  411. static DEVICE_ATTR(secure, 0444, memstat_show, NULL);
  412. static DEVICE_ATTR(secure_max, 0444, memstat_show, NULL);
  413. static DEVICE_ATTR(mapped, 0444, memstat_show, NULL);
  414. static DEVICE_ATTR(mapped_max, 0444, memstat_show, NULL);
  415. static DEVICE_ATTR_RW(full_cache_threshold);
  416. static const struct attribute *drv_attr_list[] = {
  417. &dev_attr_vmalloc.attr,
  418. &dev_attr_vmalloc_max.attr,
  419. &dev_attr_page_alloc.attr,
  420. &dev_attr_page_alloc_max.attr,
  421. &dev_attr_coherent.attr,
  422. &dev_attr_coherent_max.attr,
  423. &dev_attr_secure.attr,
  424. &dev_attr_secure_max.attr,
  425. &dev_attr_mapped.attr,
  426. &dev_attr_mapped_max.attr,
  427. &dev_attr_full_cache_threshold.attr,
  428. #ifdef CONFIG_QCOM_KGSL_PROCESS_RECLAIM
  429. &dev_attr_max_reclaim_limit.attr,
  430. &dev_attr_page_reclaim_per_call.attr,
  431. #endif
  432. NULL,
  433. };
  434. int
  435. kgsl_sharedmem_init_sysfs(void)
  436. {
  437. return sysfs_create_files(&kgsl_driver.virtdev.kobj, drv_attr_list);
  438. }
  439. static vm_fault_t kgsl_paged_vmfault(struct kgsl_memdesc *memdesc,
  440. struct vm_area_struct *vma,
  441. struct vm_fault *vmf)
  442. {
  443. int pgoff, ret;
  444. struct page *page;
  445. unsigned int offset = vmf->address - vma->vm_start;
  446. if (offset >= memdesc->size)
  447. return VM_FAULT_SIGBUS;
  448. pgoff = offset >> PAGE_SHIFT;
  449. spin_lock(&memdesc->lock);
  450. if (memdesc->pages[pgoff]) {
  451. page = memdesc->pages[pgoff];
  452. get_page(page);
  453. } else {
  454. struct kgsl_process_private *priv =
  455. ((struct kgsl_mem_entry *)vma->vm_private_data)->priv;
  456. /* We are here because page was reclaimed */
  457. memdesc->priv |= KGSL_MEMDESC_SKIP_RECLAIM;
  458. spin_unlock(&memdesc->lock);
  459. page = shmem_read_mapping_page_gfp(
  460. memdesc->shmem_filp->f_mapping, pgoff,
  461. kgsl_gfp_mask(0));
  462. if (IS_ERR(page))
  463. return VM_FAULT_SIGBUS;
  464. kgsl_page_sync(memdesc->dev, page, PAGE_SIZE, DMA_BIDIRECTIONAL);
  465. spin_lock(&memdesc->lock);
  466. /*
  467. * Update the pages array only if the page was
  468. * not already brought back.
  469. */
  470. if (!memdesc->pages[pgoff]) {
  471. memdesc->pages[pgoff] = page;
  472. atomic_dec(&priv->unpinned_page_count);
  473. get_page(page);
  474. }
  475. }
  476. spin_unlock(&memdesc->lock);
  477. ret = vmf_insert_page(vma, vmf->address, page);
  478. put_page(page);
  479. return ret;
  480. }
  481. static void kgsl_paged_unmap_kernel(struct kgsl_memdesc *memdesc)
  482. {
  483. mutex_lock(&kernel_map_global_lock);
  484. if (!memdesc->hostptr) {
  485. /* If already unmapped the refcount should be 0 */
  486. WARN_ON(memdesc->hostptr_count);
  487. goto done;
  488. }
  489. memdesc->hostptr_count--;
  490. if (memdesc->hostptr_count)
  491. goto done;
  492. vunmap(memdesc->hostptr);
  493. atomic_long_sub(memdesc->size, &kgsl_driver.stats.vmalloc);
  494. memdesc->hostptr = NULL;
  495. done:
  496. mutex_unlock(&kernel_map_global_lock);
  497. }
  498. #if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
  499. #include <soc/qcom/secure_buffer.h>
  500. int kgsl_lock_sgt(struct sg_table *sgt, u64 size)
  501. {
  502. int dest_perms = PERM_READ | PERM_WRITE;
  503. int source_vm = VMID_HLOS;
  504. int dest_vm = VMID_CP_PIXEL;
  505. int ret;
  506. do {
  507. ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm,
  508. &dest_perms, 1);
  509. } while (ret == -EAGAIN);
  510. if (ret) {
  511. /*
  512. * If returned error code is EADDRNOTAVAIL, then this
  513. * memory may no longer be in a usable state as security
  514. * state of the pages is unknown after this failure. This
  515. * memory can neither be added back to the pool nor buddy
  516. * system.
  517. */
  518. if (ret == -EADDRNOTAVAIL)
  519. pr_err("Failure to lock secure GPU memory 0x%llx bytes will not be recoverable\n",
  520. size);
  521. return ret;
  522. }
  523. return 0;
  524. }
  525. int kgsl_unlock_sgt(struct sg_table *sgt)
  526. {
  527. int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
  528. int source_vm = VMID_CP_PIXEL;
  529. int dest_vm = VMID_HLOS;
  530. int ret;
  531. do {
  532. ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm,
  533. &dest_perms, 1);
  534. } while (ret == -EAGAIN);
  535. if (ret)
  536. return ret;
  537. return 0;
  538. }
  539. #endif
  540. static int kgsl_paged_map_kernel(struct kgsl_memdesc *memdesc)
  541. {
  542. int ret = 0;
  543. /* Sanity check - don't map more than we could possibly chew */
  544. if (memdesc->size > ULONG_MAX)
  545. return -ENOMEM;
  546. mutex_lock(&kernel_map_global_lock);
  547. if ((!memdesc->hostptr) && (memdesc->pages != NULL)) {
  548. pgprot_t page_prot;
  549. u64 cache;
  550. /* Determine user-side caching policy */
  551. cache = kgsl_memdesc_get_cachemode(memdesc);
  552. switch (cache) {
  553. case KGSL_CACHEMODE_WRITETHROUGH:
  554. page_prot = PAGE_KERNEL;
  555. WARN_ONCE(1, "WRITETHROUGH is deprecated for arm64");
  556. break;
  557. case KGSL_CACHEMODE_WRITEBACK:
  558. page_prot = PAGE_KERNEL;
  559. break;
  560. case KGSL_CACHEMODE_UNCACHED:
  561. case KGSL_CACHEMODE_WRITECOMBINE:
  562. default:
  563. page_prot = pgprot_writecombine(PAGE_KERNEL);
  564. break;
  565. }
  566. memdesc->hostptr = vmap(memdesc->pages, memdesc->page_count,
  567. VM_IOREMAP, page_prot);
  568. if (memdesc->hostptr)
  569. KGSL_STATS_ADD(memdesc->size,
  570. &kgsl_driver.stats.vmalloc,
  571. &kgsl_driver.stats.vmalloc_max);
  572. else
  573. ret = -ENOMEM;
  574. }
  575. if (memdesc->hostptr)
  576. memdesc->hostptr_count++;
  577. mutex_unlock(&kernel_map_global_lock);
  578. return ret;
  579. }
  580. static vm_fault_t kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
  581. struct vm_area_struct *vma,
  582. struct vm_fault *vmf)
  583. {
  584. unsigned long offset, pfn;
  585. offset = ((unsigned long) vmf->address - vma->vm_start) >>
  586. PAGE_SHIFT;
  587. pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
  588. return vmf_insert_pfn(vma, vmf->address, pfn);
  589. }
  590. static void _dma_cache_op(struct device *dev, struct page *page,
  591. unsigned int op)
  592. {
  593. struct scatterlist sgl;
  594. sg_init_table(&sgl, 1);
  595. sg_set_page(&sgl, page, PAGE_SIZE, 0);
  596. sg_dma_address(&sgl) = page_to_phys(page);
  597. /*
  598. * APIs for Cache Maintenance Operations are updated in kernel
  599. * version 6.1. Prior to 6.1, dma_sync_sg_for_device() with
  600. * DMA_FROM_DEVICE as direction triggers cache invalidate and
  601. * clean whereas in kernel version 6.1, it triggers only cache
  602. * clean. Hence use dma_sync_sg_for_cpu() for cache invalidate
  603. * for kernel version 6.1 and above.
  604. */
  605. switch (op) {
  606. case KGSL_CACHE_OP_FLUSH:
  607. dma_sync_sg_for_device(dev, &sgl, 1, DMA_TO_DEVICE);
  608. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  609. dma_sync_sg_for_cpu(dev, &sgl, 1, DMA_FROM_DEVICE);
  610. #else
  611. dma_sync_sg_for_device(dev, &sgl, 1, DMA_FROM_DEVICE);
  612. #endif
  613. break;
  614. case KGSL_CACHE_OP_CLEAN:
  615. dma_sync_sg_for_device(dev, &sgl, 1, DMA_TO_DEVICE);
  616. break;
  617. case KGSL_CACHE_OP_INV:
  618. dma_sync_sg_for_device(dev, &sgl, 1, DMA_FROM_DEVICE);
  619. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  620. dma_sync_sg_for_cpu(dev, &sgl, 1, DMA_FROM_DEVICE);
  621. #endif
  622. break;
  623. }
  624. }
  625. int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
  626. uint64_t size, unsigned int op)
  627. {
  628. int i;
  629. if (memdesc->flags & KGSL_MEMFLAGS_IOCOHERENT)
  630. return 0;
  631. if (size == 0 || size > UINT_MAX)
  632. return -EINVAL;
  633. /* Make sure that the offset + size does not overflow */
  634. if ((offset + size < offset) || (offset + size < size))
  635. return -ERANGE;
  636. /* Check that offset+length does not exceed memdesc->size */
  637. if (offset + size > memdesc->size)
  638. return -ERANGE;
  639. size += offset & PAGE_MASK;
  640. offset &= ~PAGE_MASK;
  641. /* If there is a sgt, use for_each_sg_page to walk it */
  642. if (memdesc->sgt) {
  643. struct sg_page_iter sg_iter;
  644. for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
  645. PAGE_ALIGN(size) >> PAGE_SHIFT, offset >> PAGE_SHIFT)
  646. _dma_cache_op(memdesc->dev, sg_page_iter_page(&sg_iter), op);
  647. return 0;
  648. }
  649. /* Otherwise just walk through the list of pages */
  650. for (i = 0; i < memdesc->page_count; i++) {
  651. u64 cur = (i << PAGE_SHIFT);
  652. if ((cur < offset) || (cur >= (offset + size)))
  653. continue;
  654. _dma_cache_op(memdesc->dev, memdesc->pages[i], op);
  655. }
  656. return 0;
  657. }
  658. void kgsl_memdesc_init(struct kgsl_device *device,
  659. struct kgsl_memdesc *memdesc, uint64_t flags)
  660. {
  661. struct kgsl_mmu *mmu = &device->mmu;
  662. unsigned int align;
  663. memset(memdesc, 0, sizeof(*memdesc));
  664. /* Turn off SVM if the system doesn't support it */
  665. if (!kgsl_mmu_is_perprocess(mmu))
  666. flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
  667. /* Secure memory disables advanced addressing modes */
  668. if (flags & KGSL_MEMFLAGS_SECURE)
  669. flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
  670. /* Disable IO coherence if it is not supported on the chip */
  671. if (!kgsl_mmu_has_feature(device, KGSL_MMU_IO_COHERENT)) {
  672. flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);
  673. WARN_ONCE(IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT),
  674. "I/O coherency is not supported on this target\n");
  675. } else if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT))
  676. flags |= KGSL_MEMFLAGS_IOCOHERENT;
  677. /*
  678. * We can't enable I/O coherency on uncached surfaces because of
  679. * situations where hardware might snoop the cpu caches which can
  680. * have stale data. This happens primarily due to the limitations
  681. * of dma caching APIs available on arm64
  682. */
  683. if (!kgsl_cachemode_is_cached(flags))
  684. flags &= ~((u64) KGSL_MEMFLAGS_IOCOHERENT);
  685. if (kgsl_mmu_has_feature(device, KGSL_MMU_NEED_GUARD_PAGE) ||
  686. (flags & KGSL_MEMFLAGS_GUARD_PAGE))
  687. memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;
  688. if (flags & KGSL_MEMFLAGS_SECURE)
  689. memdesc->priv |= KGSL_MEMDESC_SECURE;
  690. memdesc->flags = flags;
  691. /*
  692. * For io-coherent buffers don't set memdesc->dev, so that we skip DMA
  693. * cache operations at allocation time
  694. */
  695. if (!(flags & KGSL_MEMFLAGS_IOCOHERENT))
  696. memdesc->dev = &device->pdev->dev;
  697. align = max_t(unsigned int,
  698. kgsl_memdesc_get_align(memdesc), ilog2(PAGE_SIZE));
  699. kgsl_memdesc_set_align(memdesc, align);
  700. spin_lock_init(&memdesc->lock);
  701. }
  702. void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
  703. {
  704. if (!memdesc || !memdesc->size)
  705. return;
  706. /* Assume if no operations were specified something went bad early */
  707. if (!memdesc->ops)
  708. return;
  709. if (memdesc->ops->put_gpuaddr)
  710. memdesc->ops->put_gpuaddr(memdesc);
  711. if (memdesc->ops->free)
  712. memdesc->ops->free(memdesc);
  713. }
  714. int
  715. kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
  716. uint32_t *dst,
  717. uint64_t offsetbytes)
  718. {
  719. uint32_t *src;
  720. if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL ||
  721. dst == NULL))
  722. return -EINVAL;
  723. WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
  724. if (offsetbytes % sizeof(uint32_t) != 0)
  725. return -EINVAL;
  726. WARN_ON(offsetbytes > (memdesc->size - sizeof(uint32_t)));
  727. if (offsetbytes > (memdesc->size - sizeof(uint32_t)))
  728. return -ERANGE;
  729. /*
  730. * We are reading shared memory between CPU and GPU.
  731. * Make sure reads before this are complete
  732. */
  733. rmb();
  734. src = (uint32_t *)(memdesc->hostptr + offsetbytes);
  735. *dst = *src;
  736. return 0;
  737. }
  738. void
  739. kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
  740. uint64_t offsetbytes,
  741. uint32_t src)
  742. {
  743. /* Quietly return if the memdesc isn't valid */
  744. if (IS_ERR_OR_NULL(memdesc) || WARN_ON(!memdesc->hostptr))
  745. return;
  746. if (WARN_ON(!IS_ALIGNED(offsetbytes, sizeof(u32))))
  747. return;
  748. if (WARN_ON(offsetbytes > (memdesc->size - sizeof(u32))))
  749. return;
  750. *((u32 *) (memdesc->hostptr + offsetbytes)) = src;
  751. /* Make sure the write is posted before continuing */
  752. wmb();
  753. }
  754. int
  755. kgsl_sharedmem_readq(const struct kgsl_memdesc *memdesc,
  756. uint64_t *dst,
  757. uint64_t offsetbytes)
  758. {
  759. uint64_t *src;
  760. if (WARN_ON(memdesc == NULL || memdesc->hostptr == NULL ||
  761. dst == NULL))
  762. return -EINVAL;
  763. WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
  764. if (offsetbytes % sizeof(uint32_t) != 0)
  765. return -EINVAL;
  766. WARN_ON(offsetbytes > (memdesc->size - sizeof(uint32_t)));
  767. if (offsetbytes > (memdesc->size - sizeof(uint32_t)))
  768. return -ERANGE;
  769. /*
  770. * We are reading shared memory between CPU and GPU.
  771. * Make sure reads before this are complete
  772. */
  773. rmb();
  774. src = (uint64_t *)(memdesc->hostptr + offsetbytes);
  775. *dst = *src;
  776. return 0;
  777. }
  778. void
  779. kgsl_sharedmem_writeq(const struct kgsl_memdesc *memdesc,
  780. uint64_t offsetbytes,
  781. uint64_t src)
  782. {
  783. /* Quietly return if the memdesc isn't valid */
  784. if (IS_ERR_OR_NULL(memdesc) || WARN_ON(!memdesc->hostptr))
  785. return;
  786. if (WARN_ON(!IS_ALIGNED(offsetbytes, sizeof(u64))))
  787. return;
  788. if (WARN_ON(offsetbytes > (memdesc->size - sizeof(u64))))
  789. return;
  790. *((u64 *) (memdesc->hostptr + offsetbytes)) = src;
  791. /* Make sure the write is posted before continuing */
  792. wmb();
  793. }
  794. void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
  795. {
  796. unsigned int type = FIELD_GET(KGSL_MEMTYPE_MASK, memflags);
  797. struct kgsl_memtype *memtype;
  798. int i;
  799. for (i = 0; memtype_attrs[i]; i++) {
  800. memtype = container_of(memtype_attrs[i], struct kgsl_memtype, attr);
  801. if (memtype->type == type) {
  802. strscpy(name, memtype->attr.name, name_size);
  803. return;
  804. }
  805. }
  806. snprintf(name, name_size, "VK/others(%3d)", type);
  807. }
  808. int kgsl_memdesc_sg_dma(struct kgsl_memdesc *memdesc,
  809. phys_addr_t addr, u64 size)
  810. {
  811. int ret;
  812. struct page *page = phys_to_page(addr);
  813. memdesc->sgt = kmalloc(sizeof(*memdesc->sgt), GFP_KERNEL);
  814. if (memdesc->sgt == NULL)
  815. return -ENOMEM;
  816. ret = sg_alloc_table(memdesc->sgt, 1, GFP_KERNEL);
  817. if (ret) {
  818. kfree(memdesc->sgt);
  819. memdesc->sgt = NULL;
  820. return ret;
  821. }
  822. sg_set_page(memdesc->sgt->sgl, page, (size_t) size, 0);
  823. return 0;
  824. }
  825. static void _kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
  826. {
  827. dma_free_attrs(memdesc->dev, memdesc->size,
  828. memdesc->hostptr, memdesc->physaddr,
  829. memdesc->attrs);
  830. sg_free_table(memdesc->sgt);
  831. kfree(memdesc->sgt);
  832. memdesc->sgt = NULL;
  833. }
  834. static void kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
  835. {
  836. if (!memdesc->hostptr)
  837. return;
  838. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  839. return;
  840. atomic_long_sub(memdesc->size, &kgsl_driver.stats.coherent);
  841. _kgsl_contiguous_free(memdesc);
  842. }
  843. #ifdef CONFIG_QCOM_KGSL_USE_SHMEM
  844. #include <linux/shmem_fs.h>
  845. #include <trace/hooks/mm.h>
  846. static int _kgsl_shmem_alloc_page(struct kgsl_memdesc *memdesc, u32 order)
  847. {
  848. int pcount;
  849. struct page *page;
  850. gfp_t gfp_mask = kgsl_gfp_mask(order);
  851. if (fatal_signal_pending(current))
  852. return -ENOMEM;
  853. /* Allocate non compound page to split 4K page chunks */
  854. gfp_mask &= ~__GFP_COMP;
  855. page = alloc_pages(gfp_mask, order);
  856. if (page == NULL) {
  857. /* Retry with lower order pages */
  858. if (order > 1)
  859. return -EAGAIN;
  860. else
  861. return -ENOMEM;
  862. }
  863. /* Split non-compound higher-order pages to 4k pages */
  864. split_page(page, order);
  865. for (pcount = 0; pcount < (1 << order); pcount++) {
  866. clear_highpage(&page[pcount]);
  867. list_add_tail(&page[pcount].lru, &memdesc->shmem_page_list);
  868. }
  869. return pcount;
  870. }
  871. static int kgsl_shmem_alloc_pages(struct kgsl_memdesc *memdesc)
  872. {
  873. int ret, count = 0;
  874. u32 size, align, order;
  875. /* Length of remaining unallocated memdesc pages */
  876. u64 len = memdesc->size - ((u64)memdesc->page_count << PAGE_SHIFT);
  877. /* 4k allocation managed by the SHMEM */
  878. if (len == PAGE_SIZE)
  879. return 0;
  880. /* Start with 1MB alignment to get the biggest page we can */
  881. align = ilog2(SZ_1M);
  882. size = kgsl_get_page_size(len, align);
  883. order = get_order(size);
  884. while (len) {
  885. ret = _kgsl_shmem_alloc_page(memdesc, order);
  886. if (ret == -EAGAIN) {
  887. size = PAGE_SIZE << --order;
  888. size = kgsl_get_page_size(size, ilog2(size));
  889. align = ilog2(size);
  890. continue;
  891. } else if (ret <= 0) {
  892. return -ENOMEM;
  893. }
  894. count += ret;
  895. len -= size;
  896. size = kgsl_get_page_size(len, align);
  897. align = ilog2(size);
  898. order = get_order(size);
  899. }
  900. return count;
  901. }
  902. static void kgsl_shmem_fill_page(void *ptr,
  903. struct shmem_inode_info *inode, struct folio **folio)
  904. {
  905. struct kgsl_memdesc *memdesc = (struct kgsl_memdesc *)inode->android_vendor_data1;
  906. if (IS_ERR_OR_NULL(memdesc))
  907. return;
  908. if (list_empty(&memdesc->shmem_page_list)) {
  909. int ret = kgsl_shmem_alloc_pages(memdesc);
  910. if (ret <= 0)
  911. return;
  912. }
  913. *folio = list_first_entry(&memdesc->shmem_page_list, struct folio, lru);
  914. list_del(&(*folio)->lru);
  915. }
  916. void kgsl_register_shmem_callback(void)
  917. {
  918. register_trace_android_rvh_shmem_get_folio(kgsl_shmem_fill_page, NULL);
  919. }
  920. static int kgsl_alloc_page(struct kgsl_memdesc *memdesc, int *page_size,
  921. struct page **pages, unsigned int pages_len,
  922. unsigned int *align, unsigned int page_off)
  923. {
  924. struct page *page;
  925. u32 pcount = (memdesc->size >> PAGE_SHIFT) - memdesc->page_count;
  926. if (pages == NULL)
  927. return -EINVAL;
  928. if (fatal_signal_pending(current))
  929. return -ENOMEM;
  930. page = shmem_read_mapping_page_gfp(memdesc->shmem_filp->f_mapping, page_off,
  931. kgsl_gfp_mask(0));
  932. if (IS_ERR(page))
  933. return PTR_ERR(page);
  934. /* Clear only shmem driver allocated pages */
  935. if ((memdesc->size == PAGE_SIZE) ||
  936. (list_empty(&memdesc->shmem_page_list) && (pcount > 1)))
  937. clear_highpage(page);
  938. kgsl_page_sync(memdesc->dev, page, PAGE_SIZE, DMA_TO_DEVICE);
  939. *page_size = PAGE_SIZE;
  940. *pages = page;
  941. return 1;
  942. }
  943. static int kgsl_memdesc_file_setup(struct kgsl_memdesc *memdesc)
  944. {
  945. int ret;
  946. memdesc->shmem_filp = shmem_file_setup("kgsl-3d0", memdesc->size,
  947. VM_NORESERVE);
  948. if (IS_ERR(memdesc->shmem_filp)) {
  949. ret = PTR_ERR(memdesc->shmem_filp);
  950. memdesc->shmem_filp = NULL;
  951. return ret;
  952. }
  953. INIT_LIST_HEAD(&memdesc->shmem_page_list);
  954. SHMEM_I(memdesc->shmem_filp->f_mapping->host)->android_vendor_data1 = (u64)memdesc;
  955. mapping_set_unevictable(memdesc->shmem_filp->f_mapping);
  956. return 0;
  957. }
  958. static void kgsl_free_page(struct page *p)
  959. {
  960. put_page(p);
  961. }
  962. static void _kgsl_free_pages(struct kgsl_memdesc *memdesc)
  963. {
  964. int i;
  965. WARN(!list_empty(&memdesc->shmem_page_list),
  966. "KGSL shmem page list is not empty\n");
  967. for (i = 0; i < memdesc->page_count; i++)
  968. if (memdesc->pages[i])
  969. put_page(memdesc->pages[i]);
  970. SHMEM_I(memdesc->shmem_filp->f_mapping->host)->android_vendor_data1 = 0;
  971. fput(memdesc->shmem_filp);
  972. }
  973. /* If CONFIG_QCOM_KGSL_USE_SHMEM is defined we don't use compound pages */
  974. static u32 kgsl_get_page_order(struct page *page)
  975. {
  976. return 0;
  977. }
  978. #else
  979. void kgsl_register_shmem_callback(void) { }
  980. static int kgsl_alloc_page(struct kgsl_memdesc *memdesc, int *page_size,
  981. struct page **pages, unsigned int pages_len,
  982. unsigned int *align, unsigned int page_off)
  983. {
  984. if (fatal_signal_pending(current))
  985. return -ENOMEM;
  986. return kgsl_pool_alloc_page(page_size, pages,
  987. pages_len, align, memdesc->dev);
  988. }
  989. static int kgsl_memdesc_file_setup(struct kgsl_memdesc *memdesc)
  990. {
  991. return 0;
  992. }
  993. static void kgsl_free_page(struct page *p)
  994. {
  995. kgsl_pool_free_page(p);
  996. }
  997. static void _kgsl_free_pages(struct kgsl_memdesc *memdesc)
  998. {
  999. kgsl_pool_free_pages(memdesc->pages, memdesc->page_count);
  1000. }
  1001. static u32 kgsl_get_page_order(struct page *page)
  1002. {
  1003. return compound_order(page);
  1004. }
  1005. #endif
  1006. void kgsl_page_sync(struct device *dev, struct page *page,
  1007. size_t size, enum dma_data_direction dir)
  1008. {
  1009. struct scatterlist sg;
  1010. /* The caller may choose not to specify a device on purpose */
  1011. if (!dev)
  1012. return;
  1013. sg_init_table(&sg, 1);
  1014. sg_set_page(&sg, page, size, 0);
  1015. sg_dma_address(&sg) = page_to_phys(page);
  1016. /*
  1017. * APIs for Cache Maintenance Operations are updated in kernel
  1018. * version 6.1. Prior to 6.1, dma_sync_sg_for_device() with
  1019. * DMA_BIDIRECTIONAL as direction triggers cache invalidate and
  1020. * clean whereas in kernel version 6.1, it triggers only cache
  1021. * clean. Hence use dma_sync_sg_for_cpu() for cache invalidate
  1022. * for kernel version 6.1 and above.
  1023. */
  1024. if ((dir == DMA_BIDIRECTIONAL) &&
  1025. KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) {
  1026. dma_sync_sg_for_device(dev, &sg, 1, DMA_TO_DEVICE);
  1027. dma_sync_sg_for_cpu(dev, &sg, 1, DMA_FROM_DEVICE);
  1028. } else
  1029. dma_sync_sg_for_device(dev, &sg, 1, dir);
  1030. }
  1031. void kgsl_zero_page(struct page *p, unsigned int order,
  1032. struct device *dev)
  1033. {
  1034. int i;
  1035. for (i = 0; i < (1 << order); i++) {
  1036. struct page *page = nth_page(p, i);
  1037. clear_highpage(page);
  1038. }
  1039. kgsl_page_sync(dev, p, PAGE_SIZE << order, DMA_TO_DEVICE);
  1040. }
  1041. gfp_t kgsl_gfp_mask(int page_order)
  1042. {
  1043. gfp_t gfp_mask = __GFP_HIGHMEM;
  1044. if (page_order > 0) {
  1045. gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
  1046. gfp_mask &= ~__GFP_RECLAIM;
  1047. } else
  1048. gfp_mask |= GFP_KERNEL;
  1049. if (kgsl_sharedmem_noretry_flag)
  1050. gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
  1051. return gfp_mask;
  1052. }
  1053. static int _kgsl_alloc_pages(struct kgsl_memdesc *memdesc,
  1054. struct page ***pages)
  1055. {
  1056. int count = 0;
  1057. int npages = memdesc->size >> PAGE_SHIFT;
  1058. struct page **local = kvcalloc(npages, sizeof(*local), GFP_KERNEL);
  1059. u32 page_size, align;
  1060. u64 len = memdesc->size;
  1061. bool memwq_flush_done = false;
  1062. if (!local)
  1063. return -ENOMEM;
  1064. count = kgsl_memdesc_file_setup(memdesc);
  1065. if (count) {
  1066. kvfree(local);
  1067. return count;
  1068. }
  1069. /* Start with 1MB alignment to get the biggest page we can */
  1070. align = ilog2(SZ_1M);
  1071. page_size = kgsl_get_page_size(len, align);
  1072. while (len) {
  1073. int ret = kgsl_alloc_page(memdesc, &page_size, &local[count],
  1074. npages, &align, count);
  1075. if (ret == -EAGAIN)
  1076. continue;
  1077. else if (ret <= 0) {
  1078. int i;
  1079. /* if OOM, retry once after flushing lockless_workqueue */
  1080. if (ret == -ENOMEM && !memwq_flush_done) {
  1081. flush_workqueue(kgsl_driver.lockless_workqueue);
  1082. memwq_flush_done = true;
  1083. continue;
  1084. }
  1085. for (i = 0; i < count; ) {
  1086. int n = 1 << kgsl_get_page_order(local[i]);
  1087. kgsl_free_page(local[i]);
  1088. i += n;
  1089. }
  1090. kvfree(local);
  1091. if (!kgsl_sharedmem_noretry_flag)
  1092. pr_err_ratelimited("kgsl: out of memory: only allocated %lldKb of %lldKb requested\n",
  1093. (memdesc->size - len) >> 10, memdesc->size >> 10);
  1094. if (memdesc->shmem_filp)
  1095. fput(memdesc->shmem_filp);
  1096. return -ENOMEM;
  1097. }
  1098. count += ret;
  1099. memdesc->page_count += ret;
  1100. npages -= ret;
  1101. len -= page_size;
  1102. page_size = kgsl_get_page_size(len, align);
  1103. }
  1104. *pages = local;
  1105. return count;
  1106. }
  1107. static void kgsl_free_pages(struct kgsl_memdesc *memdesc)
  1108. {
  1109. kgsl_paged_unmap_kernel(memdesc);
  1110. WARN_ON(memdesc->hostptr);
  1111. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  1112. return;
  1113. atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
  1114. _kgsl_free_pages(memdesc);
  1115. memdesc->page_count = 0;
  1116. kvfree(memdesc->pages);
  1117. memdesc->pages = NULL;
  1118. }
  1119. static void kgsl_free_system_pages(struct kgsl_memdesc *memdesc)
  1120. {
  1121. int i;
  1122. kgsl_paged_unmap_kernel(memdesc);
  1123. WARN_ON(memdesc->hostptr);
  1124. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  1125. return;
  1126. atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
  1127. for (i = 0; i < memdesc->page_count; i++)
  1128. __free_pages(memdesc->pages[i], get_order(PAGE_SIZE));
  1129. memdesc->page_count = 0;
  1130. kvfree(memdesc->pages);
  1131. memdesc->pages = NULL;
  1132. }
  1133. void kgsl_unmap_and_put_gpuaddr(struct kgsl_memdesc *memdesc)
  1134. {
  1135. if (!memdesc->size || !memdesc->gpuaddr)
  1136. return;
  1137. if (WARN_ON(kgsl_memdesc_is_global(memdesc)))
  1138. return;
  1139. /*
  1140. * Don't release the GPU address if the memory fails to unmap because
  1141. * the IOMMU driver will BUG later if we reallocated the address and
  1142. * tried to map it
  1143. */
  1144. if (!kgsl_memdesc_is_reclaimed(memdesc) &&
  1145. kgsl_mmu_unmap(memdesc->pagetable, memdesc))
  1146. return;
  1147. kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
  1148. memdesc->gpuaddr = 0;
  1149. memdesc->pagetable = NULL;
  1150. }
  1151. static const struct kgsl_memdesc_ops kgsl_contiguous_ops = {
  1152. .free = kgsl_contiguous_free,
  1153. .vmflags = VM_DONTDUMP | VM_PFNMAP | VM_DONTEXPAND | VM_DONTCOPY,
  1154. .vmfault = kgsl_contiguous_vmfault,
  1155. .put_gpuaddr = kgsl_unmap_and_put_gpuaddr,
  1156. };
  1157. #if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
  1158. static void kgsl_free_pages_from_sgt(struct kgsl_memdesc *memdesc)
  1159. {
  1160. int i;
  1161. struct scatterlist *sg;
  1162. if (WARN_ON(!memdesc->sgt))
  1163. return;
  1164. for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
  1165. /*
  1166. * sg_alloc_table_from_pages() will collapse any physically
  1167. * adjacent pages into a single scatterlist entry. We cannot
  1168. * just call __free_pages() on the entire set since we cannot
  1169. * ensure that the size is a whole order. Instead, free each
  1170. * page or compound page group individually.
  1171. */
  1172. struct page *p = sg_page(sg), *next;
  1173. unsigned int count;
  1174. unsigned int j = 0;
  1175. while (j < (sg->length/PAGE_SIZE)) {
  1176. count = 1 << compound_order(p);
  1177. next = nth_page(p, count);
  1178. kgsl_free_page(p);
  1179. p = next;
  1180. j += count;
  1181. }
  1182. }
  1183. if (memdesc->shmem_filp)
  1184. fput(memdesc->shmem_filp);
  1185. }
  1186. static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
  1187. {
  1188. int i;
  1189. struct scatterlist *sg;
  1190. int ret;
  1191. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  1192. return;
  1193. ret = kgsl_unlock_sgt(memdesc->sgt);
  1194. if (ret) {
  1195. /*
  1196. * Unlock of the secure buffer failed. This buffer will
  1197. * be stuck in secure side forever and is unrecoverable.
  1198. * Give up on the buffer and don't return it to the
  1199. * pool.
  1200. */
  1201. pr_err("kgsl: secure buf unlock failed: gpuaddr: %llx size: %llx ret: %d\n",
  1202. memdesc->gpuaddr, memdesc->size, ret);
  1203. return;
  1204. }
  1205. atomic_long_sub(memdesc->size, &kgsl_driver.stats.secure);
  1206. for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
  1207. struct page *page = sg_page(sg);
  1208. __free_pages(page, get_order(PAGE_SIZE));
  1209. }
  1210. sg_free_table(memdesc->sgt);
  1211. kfree(memdesc->sgt);
  1212. memdesc->sgt = NULL;
  1213. }
  1214. static void kgsl_free_secure_pages(struct kgsl_memdesc *memdesc)
  1215. {
  1216. int ret;
  1217. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  1218. return;
  1219. ret = kgsl_unlock_sgt(memdesc->sgt);
  1220. if (ret) {
  1221. /*
  1222. * Unlock of the secure buffer failed. This buffer will
  1223. * be stuck in secure side forever and is unrecoverable.
  1224. * Give up on the buffer and don't return it to the
  1225. * pool.
  1226. */
  1227. pr_err("kgsl: secure buf unlock failed: gpuaddr: %llx size: %llx ret: %d\n",
  1228. memdesc->gpuaddr, memdesc->size, ret);
  1229. return;
  1230. }
  1231. atomic_long_sub(memdesc->size, &kgsl_driver.stats.secure);
  1232. kgsl_free_pages_from_sgt(memdesc);
  1233. sg_free_table(memdesc->sgt);
  1234. kfree(memdesc->sgt);
  1235. memdesc->sgt = NULL;
  1236. }
  1237. void kgsl_free_secure_page(struct page *page)
  1238. {
  1239. struct sg_table sgt;
  1240. struct scatterlist sgl;
  1241. if (!page)
  1242. return;
  1243. sgt.sgl = &sgl;
  1244. sgt.nents = 1;
  1245. sgt.orig_nents = 1;
  1246. sg_init_table(&sgl, 1);
  1247. sg_set_page(&sgl, page, PAGE_SIZE, 0);
  1248. kgsl_unlock_sgt(&sgt);
  1249. __free_page(page);
  1250. }
  1251. struct page *kgsl_alloc_secure_page(void)
  1252. {
  1253. struct page *page;
  1254. struct sg_table sgt;
  1255. struct scatterlist sgl;
  1256. int status;
  1257. page = alloc_page(GFP_KERNEL | __GFP_ZERO |
  1258. __GFP_NORETRY | __GFP_HIGHMEM);
  1259. if (!page)
  1260. return NULL;
  1261. sgt.sgl = &sgl;
  1262. sgt.nents = 1;
  1263. sgt.orig_nents = 1;
  1264. sg_init_table(&sgl, 1);
  1265. sg_set_page(&sgl, page, PAGE_SIZE, 0);
  1266. status = kgsl_lock_sgt(&sgt, PAGE_SIZE);
  1267. if (status) {
  1268. if (status == -EADDRNOTAVAIL)
  1269. return NULL;
  1270. __free_page(page);
  1271. return NULL;
  1272. }
  1273. return page;
  1274. }
  1275. static const struct kgsl_memdesc_ops kgsl_secure_system_ops = {
  1276. .free = kgsl_free_secure_system_pages,
  1277. /* FIXME: Make sure vmflags / vmfault does the right thing here */
  1278. };
  1279. static const struct kgsl_memdesc_ops kgsl_secure_page_ops = {
  1280. .free = kgsl_free_secure_pages,
  1281. /* FIXME: Make sure vmflags / vmfault does the right thing here */
  1282. .put_gpuaddr = kgsl_unmap_and_put_gpuaddr,
  1283. };
  1284. #else
  1285. void kgsl_free_secure_page(struct page *page)
  1286. {
  1287. }
  1288. struct page *kgsl_alloc_secure_page(void)
  1289. {
  1290. return NULL;
  1291. }
  1292. #endif
  1293. static const struct kgsl_memdesc_ops kgsl_page_ops = {
  1294. .free = kgsl_free_pages,
  1295. .vmflags = VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP,
  1296. .vmfault = kgsl_paged_vmfault,
  1297. .map_kernel = kgsl_paged_map_kernel,
  1298. .unmap_kernel = kgsl_paged_unmap_kernel,
  1299. .put_gpuaddr = kgsl_unmap_and_put_gpuaddr,
  1300. };
  1301. static const struct kgsl_memdesc_ops kgsl_system_ops = {
  1302. .free = kgsl_free_system_pages,
  1303. .vmflags = VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP,
  1304. .vmfault = kgsl_paged_vmfault,
  1305. .map_kernel = kgsl_paged_map_kernel,
  1306. .unmap_kernel = kgsl_paged_unmap_kernel,
  1307. };
  1308. static int kgsl_system_alloc_pages(struct kgsl_memdesc *memdesc, struct page ***pages)
  1309. {
  1310. struct page **local;
  1311. int i, npages = memdesc->size >> PAGE_SHIFT;
  1312. local = kvcalloc(npages, sizeof(*pages), GFP_KERNEL | __GFP_NORETRY);
  1313. if (!local)
  1314. return -ENOMEM;
  1315. for (i = 0; i < npages; i++) {
  1316. gfp_t gfp = __GFP_ZERO | __GFP_HIGHMEM |
  1317. GFP_KERNEL | __GFP_NORETRY;
  1318. if (!fatal_signal_pending(current))
  1319. local[i] = alloc_pages(gfp, get_order(PAGE_SIZE));
  1320. else
  1321. local[i] = NULL;
  1322. if (!local[i]) {
  1323. for (i = i - 1; i >= 0; i--)
  1324. __free_pages(local[i], get_order(PAGE_SIZE));
  1325. kvfree(local);
  1326. return -ENOMEM;
  1327. }
  1328. /* Make sure the cache is clean */
  1329. kgsl_page_sync(memdesc->dev, local[i], PAGE_SIZE, DMA_TO_DEVICE);
  1330. }
  1331. *pages = local;
  1332. memdesc->page_count = npages;
  1333. return npages;
  1334. }
  1335. #if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
  1336. static int kgsl_alloc_secure_pages(struct kgsl_device *device,
  1337. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
  1338. {
  1339. struct page **pages;
  1340. int count;
  1341. struct sg_table *sgt;
  1342. int ret;
  1343. size = PAGE_ALIGN(size);
  1344. if (!size || size > UINT_MAX)
  1345. return -EINVAL;
  1346. kgsl_memdesc_init(device, memdesc, flags);
  1347. memdesc->priv |= priv;
  1348. memdesc->size = size;
  1349. if (priv & KGSL_MEMDESC_SYSMEM) {
  1350. memdesc->ops = &kgsl_secure_system_ops;
  1351. count = kgsl_system_alloc_pages(memdesc, &pages);
  1352. } else {
  1353. memdesc->ops = &kgsl_secure_page_ops;
  1354. count = _kgsl_alloc_pages(memdesc, &pages);
  1355. }
  1356. if (count < 0)
  1357. return count;
  1358. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  1359. if (!sgt) {
  1360. _kgsl_free_pages(memdesc);
  1361. kvfree(pages);
  1362. return -ENOMEM;
  1363. }
  1364. ret = sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL);
  1365. if (ret) {
  1366. kfree(sgt);
  1367. _kgsl_free_pages(memdesc);
  1368. kvfree(pages);
  1369. return ret;
  1370. }
  1371. /* Now that we've moved to a sg table don't need the pages anymore */
  1372. kvfree(pages);
  1373. memdesc->sgt = sgt;
  1374. ret = kgsl_lock_sgt(sgt, size);
  1375. if (ret) {
  1376. if (ret != -EADDRNOTAVAIL)
  1377. kgsl_free_pages_from_sgt(memdesc);
  1378. sg_free_table(sgt);
  1379. kfree(sgt);
  1380. memdesc->sgt = NULL;
  1381. return ret;
  1382. }
  1383. KGSL_STATS_ADD(size, &kgsl_driver.stats.secure,
  1384. &kgsl_driver.stats.secure_max);
  1385. return 0;
  1386. }
  1387. static int kgsl_allocate_secure(struct kgsl_device *device,
  1388. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
  1389. {
  1390. return kgsl_alloc_secure_pages(device, memdesc, size, flags, priv);
  1391. }
  1392. #else
  1393. static int kgsl_allocate_secure(struct kgsl_device *device,
  1394. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
  1395. {
  1396. return -ENODEV;
  1397. }
  1398. #endif
  1399. static int kgsl_alloc_pages(struct kgsl_device *device,
  1400. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
  1401. {
  1402. struct page **pages;
  1403. int count;
  1404. size = PAGE_ALIGN(size);
  1405. if (!size || size > UINT_MAX)
  1406. return -EINVAL;
  1407. kgsl_memdesc_init(device, memdesc, flags);
  1408. memdesc->priv |= priv;
  1409. memdesc->size = size;
  1410. if (priv & KGSL_MEMDESC_SYSMEM) {
  1411. memdesc->ops = &kgsl_system_ops;
  1412. count = kgsl_system_alloc_pages(memdesc, &pages);
  1413. } else {
  1414. memdesc->ops = &kgsl_page_ops;
  1415. count = _kgsl_alloc_pages(memdesc, &pages);
  1416. }
  1417. if (count < 0)
  1418. return count;
  1419. memdesc->pages = pages;
  1420. KGSL_STATS_ADD(size, &kgsl_driver.stats.page_alloc,
  1421. &kgsl_driver.stats.page_alloc_max);
  1422. return 0;
  1423. }
  1424. static int _kgsl_alloc_contiguous(struct device *dev,
  1425. struct kgsl_memdesc *memdesc, u64 size, unsigned long attrs)
  1426. {
  1427. int ret;
  1428. phys_addr_t phys;
  1429. void *ptr;
  1430. ptr = dma_alloc_attrs(dev, (size_t) size, &phys,
  1431. GFP_KERNEL, attrs);
  1432. if (!ptr)
  1433. return -ENOMEM;
  1434. memdesc->size = size;
  1435. memdesc->dev = dev;
  1436. memdesc->hostptr = ptr;
  1437. memdesc->physaddr = phys;
  1438. memdesc->gpuaddr = phys;
  1439. memdesc->attrs = attrs;
  1440. ret = kgsl_memdesc_sg_dma(memdesc, phys, size);
  1441. if (ret)
  1442. dma_free_attrs(dev, (size_t) size, ptr, phys, attrs);
  1443. return ret;
  1444. }
  1445. static int kgsl_alloc_contiguous(struct kgsl_device *device,
  1446. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
  1447. {
  1448. int ret;
  1449. size = PAGE_ALIGN(size);
  1450. if (!size || size > UINT_MAX)
  1451. return -EINVAL;
  1452. kgsl_memdesc_init(device, memdesc, flags);
  1453. memdesc->priv |= priv;
  1454. memdesc->ops = &kgsl_contiguous_ops;
  1455. ret = _kgsl_alloc_contiguous(&device->pdev->dev, memdesc, size, 0);
  1456. if (!ret)
  1457. KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
  1458. &kgsl_driver.stats.coherent_max);
  1459. return ret;
  1460. }
  1461. int kgsl_allocate_user(struct kgsl_device *device, struct kgsl_memdesc *memdesc,
  1462. u64 size, u64 flags, u32 priv)
  1463. {
  1464. if (device->mmu.type == KGSL_MMU_TYPE_NONE)
  1465. return kgsl_alloc_contiguous(device, memdesc, size, flags,
  1466. priv);
  1467. else if (flags & KGSL_MEMFLAGS_SECURE)
  1468. return kgsl_allocate_secure(device, memdesc, size, flags, priv);
  1469. return kgsl_alloc_pages(device, memdesc, size, flags, priv);
  1470. }
  1471. int kgsl_allocate_kernel(struct kgsl_device *device,
  1472. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
  1473. {
  1474. int ret;
  1475. ret = kgsl_allocate_user(device, memdesc, size, flags, priv);
  1476. if (ret)
  1477. return ret;
  1478. if (memdesc->ops->map_kernel) {
  1479. ret = memdesc->ops->map_kernel(memdesc);
  1480. if (ret) {
  1481. kgsl_sharedmem_free(memdesc);
  1482. return ret;
  1483. }
  1484. }
  1485. return 0;
  1486. }
  1487. int kgsl_memdesc_init_fixed(struct kgsl_device *device,
  1488. struct platform_device *pdev, const char *resource,
  1489. struct kgsl_memdesc *memdesc)
  1490. {
  1491. u32 entry[2];
  1492. if (of_property_read_u32_array(pdev->dev.of_node,
  1493. resource, entry, 2))
  1494. return -ENODEV;
  1495. kgsl_memdesc_init(device, memdesc, 0);
  1496. memdesc->physaddr = entry[0];
  1497. memdesc->size = entry[1];
  1498. return kgsl_memdesc_sg_dma(memdesc, entry[0], entry[1]);
  1499. }
  1500. struct kgsl_memdesc *kgsl_allocate_global_fixed(struct kgsl_device *device,
  1501. const char *resource, const char *name)
  1502. {
  1503. struct kgsl_global_memdesc *gmd = kzalloc(sizeof(*gmd), GFP_KERNEL);
  1504. int ret;
  1505. if (!gmd)
  1506. return ERR_PTR(-ENOMEM);
  1507. ret = kgsl_memdesc_init_fixed(device, device->pdev, resource,
  1508. &gmd->memdesc);
  1509. if (ret) {
  1510. kfree(gmd);
  1511. return ERR_PTR(ret);
  1512. }
  1513. gmd->memdesc.priv = KGSL_MEMDESC_GLOBAL;
  1514. gmd->name = name;
  1515. /*
  1516. * No lock here, because this function is only called during probe/init
  1517. * while the caller is holding the mutex
  1518. */
  1519. list_add_tail(&gmd->node, &device->globals);
  1520. kgsl_mmu_map_global(device, &gmd->memdesc, 0);
  1521. return &gmd->memdesc;
  1522. }
  1523. static struct kgsl_memdesc *
  1524. kgsl_allocate_secure_global(struct kgsl_device *device,
  1525. u64 size, u64 flags, u32 priv, const char *name)
  1526. {
  1527. struct kgsl_global_memdesc *md;
  1528. int ret;
  1529. md = kzalloc(sizeof(*md), GFP_KERNEL);
  1530. if (!md)
  1531. return ERR_PTR(-ENOMEM);
  1532. /* Make sure that we get global memory from system memory */
  1533. priv |= KGSL_MEMDESC_GLOBAL | KGSL_MEMDESC_SYSMEM;
  1534. ret = kgsl_allocate_secure(device, &md->memdesc, size, flags, priv);
  1535. if (ret) {
  1536. kfree(md);
  1537. return ERR_PTR(ret);
  1538. }
  1539. md->name = name;
  1540. /*
  1541. * No lock here, because this function is only called during probe/init
  1542. * while the caller is holding the mutex
  1543. */
  1544. list_add_tail(&md->node, &device->globals);
  1545. /*
  1546. * No offset needed, we'll get an address inside of the pagetable
  1547. * normally
  1548. */
  1549. kgsl_mmu_map_global(device, &md->memdesc, 0);
  1550. kgsl_trace_gpu_mem_total(device, md->memdesc.size);
  1551. return &md->memdesc;
  1552. }
  1553. struct kgsl_memdesc *kgsl_allocate_global(struct kgsl_device *device,
  1554. u64 size, u32 padding, u64 flags, u32 priv, const char *name)
  1555. {
  1556. int ret;
  1557. struct kgsl_global_memdesc *md;
  1558. if (flags & KGSL_MEMFLAGS_SECURE)
  1559. return kgsl_allocate_secure_global(device, size, flags, priv,
  1560. name);
  1561. md = kzalloc(sizeof(*md), GFP_KERNEL);
  1562. if (!md)
  1563. return ERR_PTR(-ENOMEM);
  1564. /*
  1565. * Make sure that we get global memory from system memory to keep from
  1566. * taking up pool memory for the life of the driver
  1567. */
  1568. priv |= KGSL_MEMDESC_GLOBAL | KGSL_MEMDESC_SYSMEM;
  1569. ret = kgsl_allocate_kernel(device, &md->memdesc, size, flags, priv);
  1570. if (ret) {
  1571. kfree(md);
  1572. return ERR_PTR(ret);
  1573. }
  1574. md->name = name;
  1575. /*
  1576. * No lock here, because this function is only called during probe/init
  1577. * while the caller is holding the mute
  1578. */
  1579. list_add_tail(&md->node, &device->globals);
  1580. kgsl_mmu_map_global(device, &md->memdesc, padding);
  1581. kgsl_trace_gpu_mem_total(device, md->memdesc.size);
  1582. return &md->memdesc;
  1583. }
  1584. void kgsl_free_globals(struct kgsl_device *device)
  1585. {
  1586. struct kgsl_global_memdesc *md, *tmp;
  1587. list_for_each_entry_safe(md, tmp, &device->globals, node) {
  1588. kgsl_sharedmem_free(&md->memdesc);
  1589. list_del(&md->node);
  1590. kfree(md);
  1591. }
  1592. }