arm-smmu-qcom.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/acpi.h>
  7. #include <linux/adreno-smmu-priv.h>
  8. #include <linux/delay.h>
  9. #include <linux/bitfield.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/of.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of_device.h>
  17. #include <linux/qcom_scm.h>
  18. #include <linux/slab.h>
  19. #include <linux/workqueue.h>
  20. #include "arm-smmu.h"
  21. #include "arm-smmu-qcom.h"
  22. #define QCOM_DUMMY_VAL -1
  23. #define IMPL_DEF4_MICRO_MMU_CTRL 0
  24. #define IMPL_DEF4_CLK_ON_STATUS 0x50
  25. #define IMPL_DEF4_CLK_ON_CLIENT_STATUS 0x54
  26. #define MICRO_MMU_CTRL_LOCAL_HALT_REQ BIT(2)
  27. #define MICRO_MMU_CTRL_IDLE BIT(3)
  28. #include "arm-smmu-debug.h"
  29. #include <linux/debugfs.h>
  30. #include <linux/uaccess.h>
  31. /* Definitions for implementation-defined registers */
  32. #define ACTLR_QCOM_OSH BIT(28)
  33. #define ACTLR_QCOM_ISH BIT(29)
  34. #define ACTLR_QCOM_NSH BIT(30)
  35. struct arm_smmu_impl_def_reg {
  36. u32 offset;
  37. u32 value;
  38. };
  39. struct qsmmuv2_archdata {
  40. spinlock_t atos_lock;
  41. struct arm_smmu_impl_def_reg *impl_def_attach_registers;
  42. unsigned int num_impl_def_attach_registers;
  43. struct arm_smmu_device smmu;
  44. };
  45. #define to_qsmmuv2_archdata(smmu) \
  46. container_of(smmu, struct qsmmuv2_archdata, smmu)
  47. static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
  48. {
  49. void __iomem *reg = arm_smmu_page(smmu, ARM_SMMU_IMPL_DEF4);
  50. struct device *dev = smmu->dev;
  51. u32 tmp;
  52. if (readl_poll_timeout_atomic(reg + IMPL_DEF4_MICRO_MMU_CTRL, tmp,
  53. (tmp & MICRO_MMU_CTRL_IDLE), 0, 30000)) {
  54. dev_err(dev, "Couldn't halt SMMU!\n");
  55. return -EBUSY;
  56. }
  57. return 0;
  58. }
  59. static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
  60. {
  61. u32 val;
  62. val = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF4,
  63. IMPL_DEF4_MICRO_MMU_CTRL);
  64. val |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
  65. arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF4, IMPL_DEF4_MICRO_MMU_CTRL,
  66. val);
  67. return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
  68. }
  69. static int qsmmuv2_halt(struct arm_smmu_device *smmu)
  70. {
  71. return __qsmmuv2_halt(smmu, true);
  72. }
  73. static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
  74. {
  75. return __qsmmuv2_halt(smmu, false);
  76. }
  77. static void qsmmuv2_resume(struct arm_smmu_device *smmu)
  78. {
  79. u32 val;
  80. val = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF4,
  81. IMPL_DEF4_MICRO_MMU_CTRL);
  82. val &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
  83. arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF4, IMPL_DEF4_MICRO_MMU_CTRL,
  84. val);
  85. }
  86. static phys_addr_t __qsmmuv2_iova_to_phys_hard(
  87. struct arm_smmu_domain *smmu_domain,
  88. dma_addr_t iova)
  89. {
  90. struct arm_smmu_device *smmu = smmu_domain->smmu;
  91. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  92. struct device *dev = smmu->dev;
  93. int idx = cfg->cbndx;
  94. void __iomem *reg;
  95. u32 tmp;
  96. u64 phys;
  97. unsigned long va;
  98. /* ATS1 registers can only be written atomically */
  99. va = iova & ~0xfffUL;
  100. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
  101. arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
  102. else
  103. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
  104. reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx));
  105. if (readl_poll_timeout_atomic(reg + ARM_SMMU_CB_ATSR, tmp,
  106. !(tmp & ARM_SMMU_ATSR_ACTIVE), 5, 50)) {
  107. dev_err(dev, "iova to phys timed out on %pad.\n", &iova);
  108. phys = 0;
  109. return phys;
  110. }
  111. phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
  112. if (phys & ARM_SMMU_CB_PAR_F) {
  113. dev_err(dev, "translation fault!\n");
  114. dev_err(dev, "PAR = 0x%llx\n", phys);
  115. phys = 0;
  116. } else {
  117. phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
  118. }
  119. return phys;
  120. }
  121. static phys_addr_t qsmmuv2_iova_to_phys_hard(
  122. struct arm_smmu_domain *smmu_domain,
  123. struct qcom_iommu_atos_txn *txn)
  124. {
  125. struct arm_smmu_device *smmu = smmu_domain->smmu;
  126. struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu);
  127. int idx = smmu_domain->cfg.cbndx;
  128. dma_addr_t iova = txn->addr;
  129. phys_addr_t phys = 0;
  130. unsigned long flags;
  131. u32 sctlr, sctlr_orig, fsr;
  132. spin_lock_irqsave(&data->atos_lock, flags);
  133. qsmmuv2_halt_nowait(smmu);
  134. /* disable stall mode momentarily */
  135. sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
  136. sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG);
  137. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
  138. /* clear FSR to allow ATOS to log any faults */
  139. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  140. if (fsr & ARM_SMMU_FSR_FAULT) {
  141. /* Clear pending interrupts */
  142. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  143. /*
  144. * Barrier required to ensure that the FSR is cleared
  145. * before resuming SMMU operation
  146. */
  147. wmb();
  148. /*
  149. * TBU halt takes care of resuming any stalled transcation.
  150. * Kept it here for completeness sake.
  151. */
  152. if (fsr & ARM_SMMU_FSR_SS)
  153. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
  154. ARM_SMMU_RESUME_TERMINATE);
  155. }
  156. qsmmuv2_wait_for_halt(smmu);
  157. phys = __qsmmuv2_iova_to_phys_hard(smmu_domain, iova);
  158. /* restore SCTLR */
  159. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
  160. qsmmuv2_resume(smmu);
  161. spin_unlock_irqrestore(&data->atos_lock, flags);
  162. return phys;
  163. }
  164. static void qsmmuv2_tlb_sync_timeout(struct arm_smmu_device *smmu)
  165. {
  166. u32 clk_on, clk_on_client;
  167. dev_err_ratelimited(smmu->dev,
  168. "TLB sync timed out -- SMMU may be deadlocked\n");
  169. clk_on = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF4,
  170. IMPL_DEF4_CLK_ON_STATUS);
  171. clk_on_client = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF4,
  172. IMPL_DEF4_CLK_ON_CLIENT_STATUS);
  173. dev_err_ratelimited(smmu->dev,
  174. "clk on 0x%x, clk on client 0x%x status\n",
  175. clk_on, clk_on_client);
  176. BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
  177. }
  178. static int qsmmuv2_device_reset(struct arm_smmu_device *smmu)
  179. {
  180. struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu);
  181. struct arm_smmu_impl_def_reg *regs = data->impl_def_attach_registers;
  182. u32 i;
  183. /* Program implementation defined registers */
  184. qsmmuv2_halt(smmu);
  185. for (i = 0; i < data->num_impl_def_attach_registers; ++i)
  186. arm_smmu_gr0_write(smmu, regs[i].offset, regs[i].value);
  187. qsmmuv2_resume(smmu);
  188. return 0;
  189. }
  190. static void qsmmuv2_init_cb(struct arm_smmu_domain *smmu_domain,
  191. struct device *dev)
  192. {
  193. struct arm_smmu_device *smmu = smmu_domain->smmu;
  194. int idx = smmu_domain->cfg.cbndx;
  195. const struct iommu_flush_ops *tlb;
  196. u32 val;
  197. tlb = smmu_domain->flush_ops;
  198. val = ACTLR_QCOM_ISH | ACTLR_QCOM_OSH | ACTLR_QCOM_NSH;
  199. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ACTLR, val);
  200. /*
  201. * Flush the context bank after modifying ACTLR to ensure there
  202. * are no cache entries with stale state
  203. */
  204. tlb->tlb_flush_all(smmu_domain);
  205. }
  206. static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
  207. {
  208. struct device *dev = smmu->dev;
  209. struct qsmmuv2_archdata *data = to_qsmmuv2_archdata(smmu);
  210. int i, ntuples, ret;
  211. u32 *tuples;
  212. struct arm_smmu_impl_def_reg *regs, *regit;
  213. if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
  214. return 0;
  215. ntuples /= sizeof(u32);
  216. if (ntuples % 2) {
  217. dev_err(dev,
  218. "Invalid number of attach-impl-defs registers: %d\n",
  219. ntuples);
  220. return -EINVAL;
  221. }
  222. regs = devm_kzalloc(dev, sizeof(*data->impl_def_attach_registers) *
  223. ntuples, GFP_KERNEL);
  224. if (!regs)
  225. return -ENOMEM;
  226. tuples = kzalloc(sizeof(u32) * ntuples * 2, GFP_KERNEL);
  227. if (!tuples)
  228. return -ENOMEM;
  229. ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
  230. tuples, ntuples);
  231. if (ret) {
  232. kfree(tuples);
  233. return ret;
  234. }
  235. for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
  236. regit->offset = tuples[i];
  237. regit->value = tuples[i + 1];
  238. }
  239. kfree(tuples);
  240. data->impl_def_attach_registers = regs;
  241. data->num_impl_def_attach_registers = ntuples / 2;
  242. return 0;
  243. }
  244. static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
  245. {
  246. return container_of(smmu, struct qcom_smmu, smmu);
  247. }
  248. static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
  249. int sync, int status)
  250. {
  251. unsigned int spin_cnt, delay;
  252. u32 reg;
  253. arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
  254. for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
  255. for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
  256. reg = arm_smmu_readl(smmu, page, status);
  257. if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
  258. return;
  259. cpu_relax();
  260. }
  261. udelay(delay);
  262. }
  263. qcom_smmu_tlb_sync_debug(smmu);
  264. }
  265. static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
  266. u32 reg)
  267. {
  268. struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
  269. /*
  270. * On the GPU device we want to process subsequent transactions after a
  271. * fault to keep the GPU from hanging
  272. */
  273. reg |= ARM_SMMU_SCTLR_HUPCF;
  274. if (qsmmu->stall_enabled & BIT(idx))
  275. reg |= ARM_SMMU_SCTLR_CFCFG;
  276. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
  277. }
  278. static void qcom_adreno_smmu_get_fault_info(const void *cookie,
  279. struct adreno_smmu_fault_info *info)
  280. {
  281. struct arm_smmu_domain *smmu_domain = (void *)cookie;
  282. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  283. struct arm_smmu_device *smmu = smmu_domain->smmu;
  284. info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR);
  285. info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0);
  286. info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
  287. info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
  288. info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
  289. info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
  290. info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
  291. }
  292. static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
  293. {
  294. struct arm_smmu_domain *smmu_domain = (void *)cookie;
  295. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  296. struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
  297. if (enabled)
  298. qsmmu->stall_enabled |= BIT(cfg->cbndx);
  299. else
  300. qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
  301. }
  302. static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
  303. {
  304. struct arm_smmu_domain *smmu_domain = (void *)cookie;
  305. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  306. struct arm_smmu_device *smmu = smmu_domain->smmu;
  307. u32 reg = 0;
  308. if (terminate)
  309. reg |= ARM_SMMU_RESUME_TERMINATE;
  310. arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
  311. }
  312. #define QCOM_ADRENO_SMMU_GPU_SID 0
  313. #define QCOM_ADRENO_SMMU_GPU_LPAC_SID 1
  314. static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
  315. {
  316. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  317. int i;
  318. /*
  319. * The GPU will always use SID 0 so that is a handy way to uniquely
  320. * identify it and configure it for per-instance pagetables
  321. */
  322. for (i = 0; i < fwspec->num_ids; i++) {
  323. u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
  324. if (sid == QCOM_ADRENO_SMMU_GPU_SID ||
  325. sid == QCOM_ADRENO_SMMU_GPU_LPAC_SID)
  326. return true;
  327. }
  328. return false;
  329. }
  330. static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
  331. const void *cookie)
  332. {
  333. struct arm_smmu_domain *smmu_domain = (void *)cookie;
  334. struct io_pgtable *pgtable =
  335. io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
  336. return &pgtable->cfg;
  337. }
  338. /*
  339. * Local implementation to configure TTBR0 with the specified pagetable config.
  340. * The GPU driver will call this to enable TTBR0 when per-instance pagetables
  341. * are active
  342. */
  343. static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
  344. const struct io_pgtable_cfg *pgtbl_cfg)
  345. {
  346. struct arm_smmu_domain *smmu_domain = (void *)cookie;
  347. struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
  348. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  349. struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
  350. /* The domain must have split pagetables already enabled */
  351. if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
  352. return -EINVAL;
  353. /* If the pagetable config is NULL, disable TTBR0 */
  354. if (!pgtbl_cfg) {
  355. /* Do nothing if it is already disabled */
  356. if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
  357. return -EINVAL;
  358. /* Set TCR to the original configuration */
  359. cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
  360. cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
  361. } else {
  362. u32 tcr = cb->tcr[0];
  363. /* Don't call this again if TTBR0 is already enabled */
  364. if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
  365. return -EINVAL;
  366. tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
  367. tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
  368. cb->tcr[0] = tcr;
  369. cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
  370. cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
  371. }
  372. arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
  373. return 0;
  374. }
  375. static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
  376. struct arm_smmu_device *smmu,
  377. struct device *dev, int start)
  378. {
  379. int count;
  380. /*
  381. * Assign context bank 0 and 1 to the GPU device so the GPU hardware can
  382. * switch pagetables
  383. */
  384. if (qcom_adreno_smmu_is_gpu_device(dev)) {
  385. start = 0;
  386. count = 2;
  387. } else {
  388. start = 2;
  389. count = smmu->num_context_banks;
  390. }
  391. return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
  392. }
  393. static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
  394. {
  395. const struct device_node *np = smmu->dev->of_node;
  396. if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
  397. return false;
  398. return true;
  399. }
  400. static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[];
  401. static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
  402. struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
  403. {
  404. struct adreno_smmu_priv *priv;
  405. const struct device_node *np = smmu_domain->smmu->dev->of_node;
  406. struct qcom_io_pgtable_info *input_info =
  407. container_of(pgtbl_cfg, struct qcom_io_pgtable_info, cfg);
  408. smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
  409. /* Only enable split pagetables for the GPU device (SID 0) */
  410. if (!qcom_adreno_smmu_is_gpu_device(dev))
  411. return 0;
  412. /*
  413. * All targets that use the qcom,adreno-smmu compatible string *should*
  414. * be AARCH64 stage 1 but double check because the arm-smmu code assumes
  415. * that is the case when the TTBR1 quirk is enabled
  416. */
  417. if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
  418. (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
  419. (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
  420. pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
  421. /*
  422. * Initialize private interface with GPU:
  423. */
  424. priv = dev_get_drvdata(dev);
  425. priv->cookie = smmu_domain;
  426. priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
  427. priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
  428. priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
  429. priv->pgtbl_info = *input_info;
  430. /*
  431. * These functions are only compatible with the data structures used by the
  432. * QCOM SMMU implementation hooks, and are thus not appropriate to set for other
  433. * implementations (e.g. QSMMUV500).
  434. *
  435. * Providing these functions as part of the GPU interface also makes little sense
  436. * as context banks are set to stall by default anyway.
  437. */
  438. if (of_match_node(qcom_smmu_impl_of_match, np)) {
  439. priv->set_stall = qcom_adreno_smmu_set_stall;
  440. priv->resume_translation = qcom_adreno_smmu_resume_translation;
  441. }
  442. return 0;
  443. }
  444. static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
  445. { .compatible = "qcom,adreno" },
  446. { .compatible = "qcom,mdp4" },
  447. { .compatible = "qcom,mdss" },
  448. { .compatible = "qcom,sc7180-mdss" },
  449. { .compatible = "qcom,sc7180-mss-pil" },
  450. { .compatible = "qcom,sc7280-mdss" },
  451. { .compatible = "qcom,sc7280-mss-pil" },
  452. { .compatible = "qcom,sc8180x-mdss" },
  453. { .compatible = "qcom,sm8250-mdss" },
  454. { .compatible = "qcom,sdm845-mdss" },
  455. { .compatible = "qcom,sdm845-mss-pil" },
  456. { }
  457. };
  458. static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
  459. struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
  460. {
  461. smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
  462. return 0;
  463. }
  464. static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
  465. {
  466. unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
  467. struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
  468. u32 reg;
  469. u32 smr;
  470. int i;
  471. /*
  472. * With some firmware versions writes to S2CR of type FAULT are
  473. * ignored, and writing BYPASS will end up written as FAULT in the
  474. * register. Perform a write to S2CR to detect if this is the case and
  475. * if so reserve a context bank to emulate bypass streams.
  476. */
  477. reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
  478. FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
  479. FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
  480. arm_smmu_gr0_write(smmu, last_s2cr, reg);
  481. reg = arm_smmu_gr0_read(smmu, last_s2cr);
  482. if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
  483. qsmmu->bypass_quirk = true;
  484. qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
  485. set_bit(qsmmu->bypass_cbndx, smmu->context_map);
  486. arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
  487. reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
  488. arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
  489. }
  490. for (i = 0; i < smmu->num_mapping_groups; i++) {
  491. smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
  492. if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
  493. /* Ignore valid bit for SMR mask extraction. */
  494. smr &= ~ARM_SMMU_SMR_VALID;
  495. smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
  496. smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
  497. smmu->smrs[i].valid = true;
  498. smmu->smrs[i].used = true;
  499. smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
  500. smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
  501. smmu->s2crs[i].cbndx = 0xff;
  502. }
  503. }
  504. return 0;
  505. }
  506. static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
  507. {
  508. struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
  509. struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
  510. u32 cbndx = s2cr->cbndx;
  511. u32 type = s2cr->type;
  512. u32 reg;
  513. if (qsmmu->bypass_quirk) {
  514. if (type == S2CR_TYPE_BYPASS) {
  515. /*
  516. * Firmware with quirky S2CR handling will substitute
  517. * BYPASS writes with FAULT, so point the stream to the
  518. * reserved context bank and ask for translation on the
  519. * stream
  520. */
  521. type = S2CR_TYPE_TRANS;
  522. cbndx = qsmmu->bypass_cbndx;
  523. } else if (type == S2CR_TYPE_FAULT) {
  524. /*
  525. * Firmware with quirky S2CR handling will ignore FAULT
  526. * writes, so trick it to write FAULT by asking for a
  527. * BYPASS.
  528. */
  529. type = S2CR_TYPE_BYPASS;
  530. cbndx = 0xff;
  531. }
  532. }
  533. reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
  534. FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
  535. FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
  536. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
  537. }
  538. static int qcom_smmu_def_domain_type(struct device *dev)
  539. {
  540. const struct of_device_id *match =
  541. of_match_device(qcom_smmu_client_of_match, dev);
  542. return match ? IOMMU_DOMAIN_IDENTITY : 0;
  543. }
  544. static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
  545. {
  546. int ret;
  547. /*
  548. * To address performance degradation in non-real time clients,
  549. * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
  550. * such as MTP and db845, whose firmwares implement secure monitor
  551. * call handlers to turn on/off the wait-for-safe logic.
  552. */
  553. ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
  554. if (ret)
  555. dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
  556. return ret;
  557. }
  558. static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
  559. {
  560. const struct device_node *np = smmu->dev->of_node;
  561. arm_mmu500_reset(smmu);
  562. if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
  563. return qcom_sdm845_smmu500_reset(smmu);
  564. return 0;
  565. }
  566. static const struct arm_smmu_impl qcom_smmu_impl = {
  567. .init_context = qcom_smmu_init_context,
  568. .cfg_probe = qcom_smmu_cfg_probe,
  569. .def_domain_type = qcom_smmu_def_domain_type,
  570. .reset = qcom_smmu500_reset,
  571. .write_s2cr = qcom_smmu_write_s2cr,
  572. .tlb_sync = qcom_smmu_tlb_sync,
  573. };
  574. #define TBUID_SHIFT 10
  575. #define DEBUG_SID_HALT_REG 0x0
  576. #define DEBUG_SID_HALT_REQ BIT(16)
  577. #define DEBUG_SID_HALT_SID GENMASK(9, 0)
  578. #define DEBUG_VA_ADDR_REG 0x8
  579. #define DEBUG_TXN_TRIGG_REG 0x18
  580. #define DEBUG_TXN_AXPROT GENMASK(8, 6)
  581. #define DEBUG_TXN_AXCACHE GENMASK(5, 2)
  582. #define DEBUG_TXN_WRITE BIT(1)
  583. #define DEBUG_TXN_AXPROT_PRIV 0x1
  584. #define DEBUG_TXN_AXPROT_UNPRIV 0x0
  585. #define DEBUG_TXN_AXPROT_NSEC 0x2
  586. #define DEBUG_TXN_AXPROT_SEC 0x0
  587. #define DEBUG_TXN_AXPROT_INST 0x4
  588. #define DEBUG_TXN_AXPROT_DATA 0x0
  589. #define DEBUG_TXN_READ (0x0 << 1)
  590. #define DEBUG_TXN_TRIGGER BIT(0)
  591. #define DEBUG_SR_HALT_ACK_REG 0x20
  592. #define DEBUG_SR_HALT_ACK_VAL (0x1 << 1)
  593. #define DEBUG_SR_ECATS_RUNNING_VAL (0x1 << 0)
  594. #define DEBUG_PAR_REG 0x28
  595. #define DEBUG_PAR_PA GENMASK_ULL(47, 12)
  596. #define DEBUG_PAR_FAULT_VAL BIT(0)
  597. #define DEBUG_AXUSER_REG 0x30
  598. #define DEBUG_AXUSER_CDMID GENMASK_ULL(43, 36)
  599. #define DEBUG_AXUSER_CDMID_VAL 255
  600. #define TBU_DBG_TIMEOUT_US 100
  601. #define TBU_MICRO_IDLE_DELAY_US 5
  602. /* QTB constants */
  603. #define QTB_DBG_TIMEOUT_US 100
  604. #define QTB_SWID_LOW 0x0
  605. #define QTB_OVR_DBG_FENCEREQ 0x410
  606. #define QTB_OVR_DBG_FENCEREQ_HALT BIT(0)
  607. #define QTB_OVR_DBG_FENCEACK 0x418
  608. #define QTB_OVR_DBG_FENCEACK_ACK BIT(0)
  609. #define QTB_OVR_ECATS_INFLD0 0x430
  610. #define QTB_OVR_ECATS_INFLD0_PCIE_NO_SNOOP BIT(21)
  611. #define QTB_OVR_ECATS_INFLD0_SEC_SID BIT(20)
  612. #define QTB_OVR_ECATS_INFLD0_QAD GENMASK(19, 16)
  613. #define QTB_OVR_ECATS_INFLD0_SID GENMASK(9, 0)
  614. #define QTB_OVR_ECATS_INFLD1 0x438
  615. #define QTB_OVR_ECATS_INFLD1_PNU BIT(13)
  616. #define QTB_OVR_ECATS_INFLD1_IND BIT(12)
  617. #define QTB_OVR_ECATS_INFLD1_DIRTY BIT(11)
  618. #define QTB_OVR_ECATS_INFLD1_TR_TYPE GENMASK(10, 8)
  619. #define QTB_OVR_ECATS_INFLD1_TR_TYPE_SHARED 4
  620. #define QTB_OVR_ECATS_INFLD1_ALLOC GENMASK(7, 4)
  621. #define QTB_OVR_ECATS_INFLD1_NON_SEC BIT(3)
  622. #define QTB_OVR_ECATS_INFLD1_OPC GENMASK(2, 0)
  623. #define QTB_OVR_ECATS_INFLD1_OPC_WRI 1
  624. #define QTB_OVR_ECATS_INFLD2 0x440
  625. #define QTB_OVR_ECATS_TRIGGER 0x448
  626. #define QTB_OVR_ECATS_TRIGGER_START BIT(0)
  627. #define QTB_OVR_ECATS_STATUS 0x450
  628. #define QTB_OVR_ECATS_STATUS_DONE BIT(0)
  629. #define QTB_OVR_ECATS_OUTFLD0 0x458
  630. #define QTB_OVR_ECATS_OUTFLD0_PA GENMASK_ULL(63, 12)
  631. #define QTB_OVR_ECATS_OUTFLD0_FAULT_TYPE GENMASK(5, 4)
  632. #define QTB_OVR_ECATS_OUTFLD0_FAULT BIT(0)
  633. #define QTB_NS_DBG_PORT_N_OT_SNAPSHOT(port_num) (0xc10 + (0x10 * port_num))
  634. #define TCU_TESTBUS_SEL_ALL 0x7
  635. #define TBU_TESTBUS_SEL_ALL 0x7f
  636. struct actlr_setting {
  637. struct arm_smmu_smr smr;
  638. u32 actlr;
  639. };
  640. struct qsmmuv500_archdata {
  641. struct arm_smmu_device smmu;
  642. struct list_head tbus;
  643. struct actlr_setting *actlrs;
  644. u32 actlr_tbl_size;
  645. struct work_struct outstanding_tnx_work;
  646. spinlock_t atos_lock;
  647. void __iomem *tcu_base;
  648. };
  649. #define to_qsmmuv500_archdata(smmu) \
  650. container_of(smmu, struct qsmmuv500_archdata, smmu)
  651. struct qsmmuv500_group_iommudata {
  652. bool has_actlr;
  653. u32 actlr;
  654. };
  655. #define to_qsmmuv500_group_iommudata(group) \
  656. ((struct qsmmuv500_group_iommudata *) \
  657. (iommu_group_get_iommudata(group)))
  658. struct qsmmuv500_tbu_device {
  659. struct list_head list;
  660. struct device *dev;
  661. struct arm_smmu_device *smmu;
  662. void __iomem *base;
  663. const struct qsmmuv500_tbu_impl *impl;
  664. struct arm_smmu_power_resources *pwr;
  665. u32 sid_start;
  666. u32 num_sids;
  667. u32 iova_width;
  668. /* Protects halt count */
  669. spinlock_t halt_lock;
  670. u32 halt_count;
  671. unsigned int *irqs;
  672. };
  673. struct qsmmuv500_tbu_impl {
  674. int (*halt_req)(struct qsmmuv500_tbu_device *tbu);
  675. int (*halt_poll)(struct qsmmuv500_tbu_device *tbu);
  676. void (*resume)(struct qsmmuv500_tbu_device *tbu);
  677. phys_addr_t (*trigger_atos)(struct qsmmuv500_tbu_device *tbu, dma_addr_t iova, u32 sid,
  678. unsigned long trans_flags);
  679. void (*write_sync)(struct qsmmuv500_tbu_device *tbu);
  680. void (*log_outstanding_transactions)(struct qsmmuv500_tbu_device *tbu);
  681. };
  682. struct arm_tbu_device {
  683. struct qsmmuv500_tbu_device tbu;
  684. bool has_micro_idle;
  685. };
  686. #define to_arm_tbu(tbu) container_of(tbu, struct arm_tbu_device, tbu)
  687. struct qtb500_device {
  688. struct qsmmuv500_tbu_device tbu;
  689. bool no_halt;
  690. u32 num_ports;
  691. void __iomem *debugchain_base;
  692. void __iomem *transactiontracker_base;
  693. };
  694. #define to_qtb500(tbu) container_of(tbu, struct qtb500_device, tbu)
  695. static int arm_tbu_halt_req(struct qsmmuv500_tbu_device *tbu)
  696. {
  697. void __iomem *tbu_base = tbu->base;
  698. u32 halt;
  699. halt = readl_relaxed(tbu_base + DEBUG_SID_HALT_REG);
  700. halt |= DEBUG_SID_HALT_REQ;
  701. writel_relaxed(halt, tbu_base + DEBUG_SID_HALT_REG);
  702. return 0;
  703. }
  704. static int arm_tbu_halt_poll(struct qsmmuv500_tbu_device *tbu)
  705. {
  706. void __iomem *tbu_base = tbu->base;
  707. u32 halt, status;
  708. if (readl_poll_timeout_atomic(tbu_base + DEBUG_SR_HALT_ACK_REG, status,
  709. (status & DEBUG_SR_HALT_ACK_VAL),
  710. 0, TBU_DBG_TIMEOUT_US)) {
  711. dev_err(tbu->dev, "Couldn't halt TBU!\n");
  712. halt = readl_relaxed(tbu_base + DEBUG_SID_HALT_REG);
  713. halt &= ~DEBUG_SID_HALT_REQ;
  714. writel_relaxed(halt, tbu_base + DEBUG_SID_HALT_REG);
  715. return -ETIMEDOUT;
  716. }
  717. return 0;
  718. }
  719. static void arm_tbu_resume(struct qsmmuv500_tbu_device *tbu)
  720. {
  721. void __iomem *base = tbu->base;
  722. u32 val;
  723. val = readl_relaxed(base + DEBUG_SID_HALT_REG);
  724. val &= ~DEBUG_SID_HALT_REQ;
  725. writel_relaxed(val, base + DEBUG_SID_HALT_REG);
  726. }
  727. static phys_addr_t arm_tbu_trigger_atos(struct qsmmuv500_tbu_device *tbu, dma_addr_t iova, u32 sid,
  728. unsigned long trans_flags)
  729. {
  730. void __iomem *tbu_base = tbu->base;
  731. phys_addr_t phys = 0;
  732. u64 val;
  733. ktime_t timeout;
  734. bool ecats_timedout = false;
  735. /* Set address and stream-id */
  736. val = readq_relaxed(tbu_base + DEBUG_SID_HALT_REG);
  737. val &= ~DEBUG_SID_HALT_SID;
  738. val |= FIELD_PREP(DEBUG_SID_HALT_SID, sid);
  739. writeq_relaxed(val, tbu_base + DEBUG_SID_HALT_REG);
  740. writeq_relaxed(iova, tbu_base + DEBUG_VA_ADDR_REG);
  741. val = FIELD_PREP(DEBUG_AXUSER_CDMID, DEBUG_AXUSER_CDMID_VAL);
  742. writeq_relaxed(val, tbu_base + DEBUG_AXUSER_REG);
  743. /* Write-back Read and Write-Allocate */
  744. val = FIELD_PREP(DEBUG_TXN_AXCACHE, 0xF);
  745. /* Non-secure Access */
  746. val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_NSEC);
  747. /* Write or Read Access */
  748. if (trans_flags & IOMMU_TRANS_WRITE)
  749. val |= DEBUG_TXN_WRITE;
  750. /* Priviledged or Unpriviledged Access */
  751. if (trans_flags & IOMMU_TRANS_PRIV)
  752. val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_PRIV);
  753. /* Data or Instruction Access */
  754. if (trans_flags & IOMMU_TRANS_INST)
  755. val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_INST);
  756. val |= DEBUG_TXN_TRIGGER;
  757. writeq_relaxed(val, tbu_base + DEBUG_TXN_TRIGG_REG);
  758. timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
  759. for (;;) {
  760. val = readl_relaxed(tbu_base + DEBUG_SR_HALT_ACK_REG);
  761. if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
  762. break;
  763. val = readl_relaxed(tbu_base + DEBUG_PAR_REG);
  764. if (val & DEBUG_PAR_FAULT_VAL)
  765. break;
  766. if (ktime_compare(ktime_get(), timeout) > 0) {
  767. ecats_timedout = true;
  768. break;
  769. }
  770. }
  771. val = readq_relaxed(tbu_base + DEBUG_PAR_REG);
  772. if (val & DEBUG_PAR_FAULT_VAL)
  773. dev_err(tbu->dev, "ECATS generated a fault interrupt! PAR = %llx, SID=0x%x\n",
  774. val, sid);
  775. else if (ecats_timedout)
  776. dev_err_ratelimited(tbu->dev, "ECATS translation timed out!\n");
  777. else
  778. phys = FIELD_GET(DEBUG_PAR_PA, val);
  779. /* Reset hardware */
  780. writeq_relaxed(0, tbu_base + DEBUG_TXN_TRIGG_REG);
  781. writeq_relaxed(0, tbu_base + DEBUG_VA_ADDR_REG);
  782. val = readl_relaxed(tbu_base + DEBUG_SID_HALT_REG);
  783. val &= ~DEBUG_SID_HALT_SID;
  784. writel_relaxed(val, tbu_base + DEBUG_SID_HALT_REG);
  785. return phys;
  786. }
  787. static void arm_tbu_write_sync(struct qsmmuv500_tbu_device *tbu)
  788. {
  789. readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
  790. }
  791. static void arm_tbu_log_outstanding_transactions(struct qsmmuv500_tbu_device *tbu)
  792. {
  793. void __iomem *base = tbu->base;
  794. u64 outstanding_tnxs;
  795. u64 tcr_cntl_val, res;
  796. tcr_cntl_val = readq_relaxed(base + TNX_TCR_CNTL);
  797. /* Write 1 into MATCH_MASK_UPD of TNX_TCR_CNTL */
  798. writeq_relaxed(tcr_cntl_val | TNX_TCR_CNTL_MATCH_MASK_UPD,
  799. base + TNX_TCR_CNTL);
  800. /*
  801. * Simultaneously write 0 into MATCH_MASK_UPD, 0 into
  802. * ALWAYS_CAPTURE, 0 into MATCH_MASK_VALID, and 1 into
  803. * TBU_OT_CAPTURE_EN of TNX_TCR_CNTL
  804. */
  805. tcr_cntl_val &= ~(TNX_TCR_CNTL_MATCH_MASK_UPD |
  806. TNX_TCR_CNTL_ALWAYS_CAPTURE |
  807. TNX_TCR_CNTL_MATCH_MASK_VALID);
  808. writeq_relaxed(tcr_cntl_val | TNX_TCR_CNTL_TBU_OT_CAPTURE_EN,
  809. base + TNX_TCR_CNTL);
  810. /* Poll for CAPTURE1_VALID to become 1 on TNX_TCR_CNTL_2 */
  811. if (readq_poll_timeout_atomic(base + TNX_TCR_CNTL_2, res,
  812. res & TNX_TCR_CNTL_2_CAP1_VALID,
  813. 0, TBU_DBG_TIMEOUT_US)) {
  814. dev_err_ratelimited(tbu->dev,
  815. "Timeout on TNX snapshot poll\n");
  816. goto poll_timeout;
  817. }
  818. /* Read Register CAPTURE1_SNAPSHOT_1 */
  819. outstanding_tnxs = readq_relaxed(base + CAPTURE1_SNAPSHOT_1);
  820. dev_err_ratelimited(tbu->dev,
  821. "Outstanding Transaction Bitmap: 0x%llx\n",
  822. outstanding_tnxs);
  823. poll_timeout:
  824. /* Write TBU_OT_CAPTURE_EN to 0 of TNX_TCR_CNTL */
  825. writeq_relaxed(tcr_cntl_val & ~TNX_TCR_CNTL_TBU_OT_CAPTURE_EN,
  826. tbu->base + TNX_TCR_CNTL);
  827. }
  828. static const struct qsmmuv500_tbu_impl arm_tbu_impl = {
  829. .halt_req = arm_tbu_halt_req,
  830. .halt_poll = arm_tbu_halt_poll,
  831. .resume = arm_tbu_resume,
  832. .trigger_atos = arm_tbu_trigger_atos,
  833. .write_sync = arm_tbu_write_sync,
  834. .log_outstanding_transactions = arm_tbu_log_outstanding_transactions,
  835. };
  836. /*
  837. * Prior to accessing registers in the TBU local register space,
  838. * TBU must be woken from micro idle.
  839. */
  840. static int __arm_tbu_micro_idle_cfg(struct arm_smmu_device *smmu,
  841. u32 val, u32 mask)
  842. {
  843. void __iomem *reg;
  844. u32 tmp, new;
  845. unsigned long flags;
  846. int ret;
  847. /* Protect APPS_SMMU_TBU_REG_ACCESS register. */
  848. spin_lock_irqsave(&smmu->global_sync_lock, flags);
  849. new = arm_smmu_readl(smmu, ARM_SMMU_IMPL_DEF5,
  850. APPS_SMMU_TBU_REG_ACCESS_REQ_NS);
  851. new &= ~mask;
  852. new |= val;
  853. arm_smmu_writel(smmu, ARM_SMMU_IMPL_DEF5,
  854. APPS_SMMU_TBU_REG_ACCESS_REQ_NS,
  855. new);
  856. reg = arm_smmu_page(smmu, ARM_SMMU_IMPL_DEF5);
  857. reg += APPS_SMMU_TBU_REG_ACCESS_ACK_NS;
  858. ret = readl_poll_timeout_atomic(reg, tmp, ((tmp & mask) == val), 0, 200);
  859. if (ret)
  860. dev_WARN(smmu->dev, "Timed out configuring micro idle! %x instead of %x\n", tmp,
  861. new);
  862. /*
  863. * While the micro-idle guard sequence registers may have been configured
  864. * properly, it is possible that the intended effect has not been realized
  865. * by the power management hardware due to delays in the system.
  866. *
  867. * Spin for a short amount of time to allow for the desired configuration to
  868. * take effect before proceeding.
  869. */
  870. udelay(TBU_MICRO_IDLE_DELAY_US);
  871. spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
  872. return ret;
  873. }
  874. int arm_tbu_micro_idle_wake(struct arm_smmu_power_resources *pwr)
  875. {
  876. struct qsmmuv500_tbu_device *tbu = dev_get_drvdata(pwr->dev);
  877. struct arm_tbu_device *arm_tbu = to_arm_tbu(tbu);
  878. u32 val;
  879. if (!arm_tbu->has_micro_idle)
  880. return 0;
  881. val = tbu->sid_start >> 10;
  882. val = 1 << val;
  883. return __arm_tbu_micro_idle_cfg(tbu->smmu, val, val);
  884. }
  885. void arm_tbu_micro_idle_allow(struct arm_smmu_power_resources *pwr)
  886. {
  887. struct qsmmuv500_tbu_device *tbu = dev_get_drvdata(pwr->dev);
  888. struct arm_tbu_device *arm_tbu = to_arm_tbu(tbu);
  889. u32 val;
  890. if (!arm_tbu->has_micro_idle)
  891. return;
  892. val = tbu->sid_start >> 10;
  893. val = 1 << val;
  894. __arm_tbu_micro_idle_cfg(tbu->smmu, 0, val);
  895. }
  896. static struct qsmmuv500_tbu_device *arm_tbu_impl_init(struct qsmmuv500_tbu_device *tbu)
  897. {
  898. struct arm_tbu_device *arm_tbu;
  899. struct device *dev = tbu->dev;
  900. arm_tbu = devm_krealloc(dev, tbu, sizeof(*arm_tbu), GFP_KERNEL);
  901. if (!arm_tbu)
  902. return ERR_PTR(-ENOMEM);
  903. arm_tbu->tbu.impl = &arm_tbu_impl;
  904. arm_tbu->has_micro_idle = of_property_read_bool(dev->of_node, "qcom,micro-idle");
  905. if (arm_tbu->has_micro_idle) {
  906. arm_tbu->tbu.pwr->resume = arm_tbu_micro_idle_wake;
  907. arm_tbu->tbu.pwr->suspend = arm_tbu_micro_idle_allow;
  908. }
  909. return &arm_tbu->tbu;
  910. }
  911. static int qtb500_tbu_halt_req(struct qsmmuv500_tbu_device *tbu)
  912. {
  913. void __iomem *qtb_base = tbu->base;
  914. struct qtb500_device *qtb = to_qtb500(tbu);
  915. u64 val;
  916. if (qtb->no_halt)
  917. return 0;
  918. val = readq_relaxed(qtb_base + QTB_OVR_DBG_FENCEREQ);
  919. val |= QTB_OVR_DBG_FENCEREQ_HALT;
  920. writeq_relaxed(val, qtb_base + QTB_OVR_DBG_FENCEREQ);
  921. return 0;
  922. }
  923. static int qtb500_tbu_halt_poll(struct qsmmuv500_tbu_device *tbu)
  924. {
  925. void __iomem *qtb_base = tbu->base;
  926. struct qtb500_device *qtb = to_qtb500(tbu);
  927. u64 val, status;
  928. if (qtb->no_halt)
  929. return 0;
  930. if (readq_poll_timeout_atomic(qtb_base + QTB_OVR_DBG_FENCEACK, status,
  931. (status & QTB_OVR_DBG_FENCEACK_ACK), 0,
  932. QTB_DBG_TIMEOUT_US)) {
  933. dev_err(tbu->dev, "Couldn't halt QTB\n");
  934. val = readq_relaxed(qtb_base + QTB_OVR_DBG_FENCEREQ);
  935. val &= ~QTB_OVR_DBG_FENCEREQ_HALT;
  936. writeq_relaxed(val, qtb_base + QTB_OVR_DBG_FENCEREQ);
  937. return -ETIMEDOUT;
  938. }
  939. return 0;
  940. }
  941. static void qtb500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
  942. {
  943. void __iomem *qtb_base = tbu->base;
  944. struct qtb500_device *qtb = to_qtb500(tbu);
  945. u64 val;
  946. if (qtb->no_halt)
  947. return;
  948. val = readq_relaxed(qtb_base + QTB_OVR_DBG_FENCEREQ);
  949. val &= ~QTB_OVR_DBG_FENCEREQ_HALT;
  950. writeq_relaxed(val, qtb_base + QTB_OVR_DBG_FENCEREQ);
  951. }
  952. static phys_addr_t qtb500_trigger_atos(struct qsmmuv500_tbu_device *tbu, dma_addr_t iova,
  953. u32 sid, unsigned long trans_flags)
  954. {
  955. void __iomem *qtb_base = tbu->base;
  956. u64 infld0, infld1, infld2, val;
  957. phys_addr_t phys = 0;
  958. ktime_t timeout;
  959. bool ecats_timedout = false;
  960. /*
  961. * Recommended to set:
  962. *
  963. * QTB_OVR_ECATS_INFLD0.QAD == 0 (AP Access Domain)
  964. * QTB_OVR_EACTS_INFLD0.PCIE_NO_SNOOP == 0 (IO-Coherency enabled)
  965. */
  966. infld0 = FIELD_PREP(QTB_OVR_ECATS_INFLD0_SID, sid);
  967. if (trans_flags & IOMMU_TRANS_SEC)
  968. infld0 |= QTB_OVR_ECATS_INFLD0_SEC_SID;
  969. infld1 = 0;
  970. if (trans_flags & IOMMU_TRANS_PRIV)
  971. infld1 |= QTB_OVR_ECATS_INFLD1_PNU;
  972. if (trans_flags & IOMMU_TRANS_INST)
  973. infld1 |= QTB_OVR_ECATS_INFLD1_IND;
  974. /*
  975. * Recommended to set:
  976. *
  977. * QTB_OVR_ECATS_INFLD1.DIRTY == 0,
  978. * QTB_OVR_ECATS_INFLD1.TR_TYPE == 4 (Cacheable and Shareable memory)
  979. * QTB_OVR_ECATS_INFLD1.ALLOC == 0 (No allocation in TLB/caches)
  980. */
  981. infld1 |= FIELD_PREP(QTB_OVR_ECATS_INFLD1_TR_TYPE, QTB_OVR_ECATS_INFLD1_TR_TYPE_SHARED);
  982. if (!(trans_flags & IOMMU_TRANS_SEC))
  983. infld1 |= QTB_OVR_ECATS_INFLD1_NON_SEC;
  984. if (trans_flags & IOMMU_TRANS_WRITE)
  985. infld1 |= FIELD_PREP(QTB_OVR_ECATS_INFLD1_OPC, QTB_OVR_ECATS_INFLD1_OPC_WRI);
  986. infld2 = iova;
  987. writeq_relaxed(infld0, qtb_base + QTB_OVR_ECATS_INFLD0);
  988. writeq_relaxed(infld1, qtb_base + QTB_OVR_ECATS_INFLD1);
  989. writeq_relaxed(infld2, qtb_base + QTB_OVR_ECATS_INFLD2);
  990. writeq_relaxed(QTB_OVR_ECATS_TRIGGER_START, qtb_base + QTB_OVR_ECATS_TRIGGER);
  991. timeout = ktime_add_us(ktime_get(), QTB_DBG_TIMEOUT_US);
  992. for (;;) {
  993. val = readq_relaxed(qtb_base + QTB_OVR_ECATS_STATUS);
  994. if (val & QTB_OVR_ECATS_STATUS_DONE)
  995. break;
  996. val = readq_relaxed(qtb_base + QTB_OVR_ECATS_OUTFLD0);
  997. if (val & QTB_OVR_ECATS_OUTFLD0_FAULT)
  998. break;
  999. if (ktime_compare(ktime_get(), timeout) > 0) {
  1000. ecats_timedout = true;
  1001. break;
  1002. }
  1003. }
  1004. val = readq_relaxed(qtb_base + QTB_OVR_ECATS_OUTFLD0);
  1005. if (val & QTB_OVR_ECATS_OUTFLD0_FAULT)
  1006. dev_err(tbu->dev, "ECATS generated a fault interrupt! OUTFLD0 = 0x%llx SID = 0x%x\n",
  1007. val, sid);
  1008. else if (ecats_timedout)
  1009. dev_err_ratelimited(tbu->dev, "ECATS translation timed out!\n");
  1010. else
  1011. phys = FIELD_GET(QTB_OVR_ECATS_OUTFLD0_PA, val);
  1012. /* Reset hardware for next transaction. */
  1013. writeq_relaxed(0, qtb_base + QTB_OVR_ECATS_TRIGGER);
  1014. return phys;
  1015. }
  1016. static void qtb500_tbu_write_sync(struct qsmmuv500_tbu_device *tbu)
  1017. {
  1018. readl_relaxed(tbu->base + QTB_SWID_LOW);
  1019. }
  1020. static void qtb500_log_outstanding_transactions(struct qsmmuv500_tbu_device *tbu)
  1021. {
  1022. void __iomem *qtb_base = tbu->base;
  1023. struct qtb500_device *qtb = to_qtb500(tbu);
  1024. u64 outstanding_tnx;
  1025. int i;
  1026. for (i = 0; i < qtb->num_ports; i++) {
  1027. outstanding_tnx = readq_relaxed(qtb_base + QTB_NS_DBG_PORT_N_OT_SNAPSHOT(i));
  1028. dev_err(tbu->dev, "port %d outstanding transactions bitmap: 0x%llx\n", i,
  1029. outstanding_tnx);
  1030. }
  1031. }
  1032. static const struct qsmmuv500_tbu_impl qtb500_impl = {
  1033. .halt_req = qtb500_tbu_halt_req,
  1034. .halt_poll = qtb500_tbu_halt_poll,
  1035. .resume = qtb500_tbu_resume,
  1036. .trigger_atos = qtb500_trigger_atos,
  1037. .write_sync = qtb500_tbu_write_sync,
  1038. .log_outstanding_transactions = qtb500_log_outstanding_transactions,
  1039. };
  1040. static struct qsmmuv500_tbu_device *qtb500_impl_init(struct qsmmuv500_tbu_device *tbu)
  1041. {
  1042. struct resource *ttres;
  1043. struct qtb500_device *qtb;
  1044. struct device *dev = tbu->dev;
  1045. struct platform_device *pdev = to_platform_device(dev);
  1046. #ifdef CONFIG_ARM_SMMU_TESTBUS
  1047. struct resource *res;
  1048. #endif
  1049. int ret;
  1050. qtb = devm_krealloc(dev, tbu, sizeof(*qtb), GFP_KERNEL);
  1051. if (!qtb)
  1052. return ERR_PTR(-ENOMEM);
  1053. qtb->tbu.impl = &qtb500_impl;
  1054. ret = of_property_read_u32(dev->of_node, "qcom,num-qtb-ports", &qtb->num_ports);
  1055. if (ret)
  1056. return ERR_PTR(ret);
  1057. qtb->no_halt = of_property_read_bool(dev->of_node, "qcom,no-qtb-atos-halt");
  1058. #ifdef CONFIG_ARM_SMMU_TESTBUS
  1059. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "debugchain-base");
  1060. if (!res) {
  1061. dev_info(dev, "Unable to get the debugchain-base\n");
  1062. return ERR_PTR(-EINVAL);
  1063. goto end;
  1064. }
  1065. qtb->debugchain_base = devm_ioremap_resource(dev, res);
  1066. if (IS_ERR(qtb->debugchain_base)) {
  1067. dev_info(dev, "devm_ioremap failure, overlapping regs\n");
  1068. /*
  1069. * use ioremap for qtb's sharing same debug chain register space
  1070. * for eg : sf and hf qtb's on mmnoc.
  1071. */
  1072. qtb->debugchain_base = ioremap(res->start, resource_size(res));
  1073. if (IS_ERR(qtb->debugchain_base)) {
  1074. dev_err(dev, "unable to ioremap the debugchain-base\n");
  1075. return ERR_PTR(-EINVAL);
  1076. }
  1077. }
  1078. end:
  1079. #endif
  1080. ttres = platform_get_resource_byname(pdev, IORESOURCE_MEM, "transactiontracker-base");
  1081. if (ttres) {
  1082. qtb->transactiontracker_base = devm_ioremap_resource(dev, ttres);
  1083. if (IS_ERR(qtb->transactiontracker_base))
  1084. dev_info(dev, "devm_ioremap failure for transaction tracker\n");
  1085. } else {
  1086. qtb->transactiontracker_base = NULL;
  1087. dev_info(dev, "Unable to get the transactiontracker-base\n");
  1088. }
  1089. return &qtb->tbu;
  1090. }
  1091. static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(struct arm_smmu_device *smmu, u32 sid)
  1092. {
  1093. struct qsmmuv500_tbu_device *tbu = NULL;
  1094. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  1095. if (!list_empty(&data->tbus)) {
  1096. list_for_each_entry(tbu, &data->tbus, list) {
  1097. if (tbu->sid_start <= sid &&
  1098. sid < tbu->sid_start + tbu->num_sids)
  1099. return tbu;
  1100. }
  1101. }
  1102. return NULL;
  1103. }
  1104. static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
  1105. struct arm_smmu_domain *smmu_domain)
  1106. {
  1107. unsigned long flags;
  1108. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1109. int ret = 0, idx = smmu_domain->cfg.cbndx;
  1110. u32 fsr;
  1111. if (of_property_read_bool(tbu->dev->of_node, "qcom,opt-out-tbu-halting")) {
  1112. dev_notice(tbu->dev, "TBU opted-out for halting!\n");
  1113. return -EBUSY;
  1114. }
  1115. spin_lock_irqsave(&tbu->halt_lock, flags);
  1116. if (tbu->halt_count) {
  1117. tbu->halt_count++;
  1118. goto out;
  1119. }
  1120. ret = tbu->impl->halt_req(tbu);
  1121. if (ret)
  1122. goto out;
  1123. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  1124. if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) {
  1125. u32 sctlr_orig, sctlr;
  1126. /*
  1127. * We are in a fault; Our request to halt the bus will not
  1128. * complete until transactions in front of us (such as the fault
  1129. * itself) have completed. Disable iommu faults and terminate
  1130. * any existing transactions.
  1131. */
  1132. sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
  1133. sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
  1134. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
  1135. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  1136. /*
  1137. * Barrier required to ensure that the FSR is cleared
  1138. * before resuming SMMU operation
  1139. */
  1140. wmb();
  1141. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
  1142. ARM_SMMU_RESUME_TERMINATE);
  1143. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
  1144. }
  1145. ret = tbu->impl->halt_poll(tbu);
  1146. if (ret)
  1147. goto out;
  1148. tbu->halt_count = 1;
  1149. out:
  1150. spin_unlock_irqrestore(&tbu->halt_lock, flags);
  1151. return ret;
  1152. }
  1153. static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
  1154. {
  1155. unsigned long flags;
  1156. spin_lock_irqsave(&tbu->halt_lock, flags);
  1157. if (WARN(!tbu->halt_count, "%s bad tbu->halt_count", dev_name(tbu->dev))) {
  1158. goto out;
  1159. } else if (tbu->halt_count > 1) {
  1160. tbu->halt_count--;
  1161. goto out;
  1162. }
  1163. tbu->impl->resume(tbu);
  1164. tbu->halt_count = 0;
  1165. out:
  1166. spin_unlock_irqrestore(&tbu->halt_lock, flags);
  1167. }
  1168. /*
  1169. * Provides mutually exclusive access to the registers used by the
  1170. * outstanding transaction snapshot feature and the transaction
  1171. * snapshot capture feature.
  1172. */
  1173. static DEFINE_MUTEX(capture_reg_lock);
  1174. static DEFINE_SPINLOCK(testbus_lock);
  1175. __maybe_unused static struct dentry *get_iommu_debug_dir(void)
  1176. {
  1177. struct dentry *iommu_debug_dir;
  1178. int ret;
  1179. iommu_debug_dir = debugfs_lookup("iommu-debug", NULL);
  1180. if (IS_ERR_OR_NULL(iommu_debug_dir)) {
  1181. iommu_debug_dir = debugfs_create_dir("iommu-debug", NULL);
  1182. if (IS_ERR_OR_NULL(iommu_debug_dir)) {
  1183. ret = PTR_ERR(iommu_debug_dir);
  1184. pr_err_ratelimited("Unable to create iommu-debug directory, ret=%d\n",
  1185. ret);
  1186. return NULL;
  1187. }
  1188. }
  1189. return iommu_debug_dir;
  1190. }
  1191. #ifdef CONFIG_ARM_SMMU_TESTBUS_DEBUGFS
  1192. static struct dentry *debugfs_testbus_dir;
  1193. static ssize_t arm_smmu_debug_debugchain_read(struct file *file,
  1194. char __user *ubuf, size_t count, loff_t *offset)
  1195. {
  1196. char *buf;
  1197. ssize_t retval;
  1198. size_t buflen;
  1199. int buf_len;
  1200. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1201. struct qtb500_device *qtb = to_qtb500(tbu);
  1202. void __iomem *debugchain_base = qtb->debugchain_base;
  1203. long chain_length = 0;
  1204. u64 val;
  1205. if (*offset)
  1206. return 0;
  1207. arm_smmu_power_on(tbu->pwr);
  1208. chain_length = arm_smmu_debug_qtb_debugchain_load(debugchain_base);
  1209. buf_len = chain_length * sizeof(u64);
  1210. buf = kzalloc(buf_len, GFP_KERNEL);
  1211. if (!buf)
  1212. return -ENOMEM;
  1213. arm_smmu_debug_qtb_debugchain_dump(debugchain_base);
  1214. do {
  1215. val = arm_smmu_debug_qtb_debugchain_dump(debugchain_base);
  1216. scnprintf(buf + strlen(buf), buf_len - strlen(buf), "0x%0x\n", val);
  1217. } while (chain_length--);
  1218. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1219. buflen = min(count, strlen(buf));
  1220. if (copy_to_user(ubuf, buf, buflen)) {
  1221. pr_err_ratelimited("Couldn't copy_to_user\n");
  1222. retval = -EFAULT;
  1223. } else {
  1224. *offset = 1;
  1225. retval = buflen;
  1226. }
  1227. return retval;
  1228. }
  1229. static ssize_t arm_smmu_debug_testbus_read(struct file *file,
  1230. char __user *ubuf, size_t count, loff_t *offset,
  1231. enum testbus_sel tbu, enum testbus_ops ops)
  1232. {
  1233. char buf[100];
  1234. ssize_t retval;
  1235. size_t buflen;
  1236. int buf_len = sizeof(buf);
  1237. if (*offset)
  1238. return 0;
  1239. memset(buf, 0, buf_len);
  1240. if (tbu == SEL_TBU) {
  1241. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1242. void __iomem *tbu_base = tbu->base;
  1243. long val;
  1244. arm_smmu_power_on(tbu->pwr);
  1245. if (ops == TESTBUS_SELECT)
  1246. val = arm_smmu_debug_tbu_testbus_select(tbu_base,
  1247. READ, 0);
  1248. else
  1249. val = arm_smmu_debug_tbu_testbus_output(tbu_base);
  1250. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1251. scnprintf(buf, buf_len, "0x%0x\n", val);
  1252. } else {
  1253. struct arm_smmu_device *smmu = file->private_data;
  1254. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  1255. phys_addr_t phys_addr = smmu->phys_addr;
  1256. void __iomem *tcu_base = data->tcu_base;
  1257. arm_smmu_power_on(smmu->pwr);
  1258. if (ops == TESTBUS_SELECT) {
  1259. scnprintf(buf, buf_len, "TCU clk testbus sel: 0x%0x\n",
  1260. arm_smmu_debug_tcu_testbus_select(phys_addr,
  1261. tcu_base, CLK_TESTBUS, READ, 0));
  1262. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1263. "TCU testbus sel : 0x%0x\n",
  1264. arm_smmu_debug_tcu_testbus_select(phys_addr,
  1265. tcu_base, PTW_AND_CACHE_TESTBUS,
  1266. READ, 0));
  1267. } else {
  1268. scnprintf(buf, buf_len, "0x%0x\n",
  1269. arm_smmu_debug_tcu_testbus_output(phys_addr));
  1270. }
  1271. arm_smmu_power_off(smmu, smmu->pwr);
  1272. }
  1273. buflen = min(count, strlen(buf));
  1274. if (copy_to_user(ubuf, buf, buflen)) {
  1275. pr_err_ratelimited("Couldn't copy_to_user\n");
  1276. retval = -EFAULT;
  1277. } else {
  1278. *offset = 1;
  1279. retval = buflen;
  1280. }
  1281. return retval;
  1282. }
  1283. static ssize_t arm_smmu_debug_tcu_testbus_sel_write(struct file *file,
  1284. const char __user *ubuf, size_t count, loff_t *offset)
  1285. {
  1286. struct arm_smmu_device *smmu = file->private_data;
  1287. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  1288. void __iomem *tcu_base = data->tcu_base;
  1289. phys_addr_t phys_addr = smmu->phys_addr;
  1290. char *comma;
  1291. char buf[100];
  1292. u64 sel, val;
  1293. if (count >= 100) {
  1294. pr_err_ratelimited("Value too large\n");
  1295. return -EINVAL;
  1296. }
  1297. memset(buf, 0, 100);
  1298. if (copy_from_user(buf, ubuf, count)) {
  1299. pr_err_ratelimited("Couldn't copy from user\n");
  1300. return -EFAULT;
  1301. }
  1302. comma = strnchr(buf, count, ',');
  1303. if (!comma)
  1304. goto invalid_format;
  1305. /* split up the words */
  1306. *comma = '\0';
  1307. if (kstrtou64(buf, 0, &sel))
  1308. goto invalid_format;
  1309. if (sel != 1 && sel != 2)
  1310. goto invalid_format;
  1311. if (kstrtou64(comma + 1, 0, &val))
  1312. goto invalid_format;
  1313. arm_smmu_power_on(smmu->pwr);
  1314. if (sel == 1)
  1315. arm_smmu_debug_tcu_testbus_select(phys_addr,
  1316. tcu_base, CLK_TESTBUS, WRITE, val);
  1317. else if (sel == 2)
  1318. arm_smmu_debug_tcu_testbus_select(phys_addr,
  1319. tcu_base, PTW_AND_CACHE_TESTBUS, WRITE, val);
  1320. arm_smmu_power_off(smmu, smmu->pwr);
  1321. return count;
  1322. invalid_format:
  1323. pr_err_ratelimited("Invalid format. Expected: <1, testbus select> for tcu CLK testbus (or) <2, testbus select> for tcu PTW/CACHE testbuses\n");
  1324. return -EINVAL;
  1325. }
  1326. static ssize_t arm_smmu_debug_tcu_testbus_sel_read(struct file *file,
  1327. char __user *ubuf, size_t count, loff_t *offset)
  1328. {
  1329. return arm_smmu_debug_testbus_read(file, ubuf,
  1330. count, offset, SEL_TCU, TESTBUS_SELECT);
  1331. }
  1332. static const struct file_operations arm_smmu_debug_tcu_testbus_sel_fops = {
  1333. .open = simple_open,
  1334. .write = arm_smmu_debug_tcu_testbus_sel_write,
  1335. .read = arm_smmu_debug_tcu_testbus_sel_read,
  1336. };
  1337. static ssize_t arm_smmu_debug_tcu_testbus_read(struct file *file,
  1338. char __user *ubuf, size_t count, loff_t *offset)
  1339. {
  1340. return arm_smmu_debug_testbus_read(file, ubuf,
  1341. count, offset, SEL_TCU, TESTBUS_OUTPUT);
  1342. }
  1343. static const struct file_operations arm_smmu_debug_tcu_testbus_fops = {
  1344. .open = simple_open,
  1345. .read = arm_smmu_debug_tcu_testbus_read,
  1346. };
  1347. static int qsmmuv500_tcu_testbus_init(struct arm_smmu_device *smmu)
  1348. {
  1349. struct dentry *testbus_dir, *iommu_debug_dir;
  1350. iommu_debug_dir = get_iommu_debug_dir();
  1351. if (!iommu_debug_dir)
  1352. return 0;
  1353. if (!debugfs_testbus_dir) {
  1354. debugfs_testbus_dir = debugfs_create_dir("testbus",
  1355. iommu_debug_dir);
  1356. if (IS_ERR(debugfs_testbus_dir)) {
  1357. pr_err_ratelimited("Couldn't create iommu/testbus debugfs directory\n");
  1358. return -ENODEV;
  1359. }
  1360. }
  1361. testbus_dir = debugfs_create_dir(dev_name(smmu->dev),
  1362. debugfs_testbus_dir);
  1363. if (IS_ERR(testbus_dir)) {
  1364. pr_err_ratelimited("Couldn't create iommu/testbus/%s debugfs directory\n",
  1365. dev_name(smmu->dev));
  1366. goto err;
  1367. }
  1368. if (IS_ERR(debugfs_create_file("tcu_testbus_sel", 0400,
  1369. testbus_dir, smmu,
  1370. &arm_smmu_debug_tcu_testbus_sel_fops))) {
  1371. pr_err_ratelimited("Couldn't create iommu/testbus/%s/tcu_testbus_sel debugfs file\n",
  1372. dev_name(smmu->dev));
  1373. goto err_rmdir;
  1374. }
  1375. if (IS_ERR(debugfs_create_file("tcu_testbus_output", 0400,
  1376. testbus_dir, smmu,
  1377. &arm_smmu_debug_tcu_testbus_fops))) {
  1378. pr_err_ratelimited("Couldn't create iommu/testbus/%s/tcu_testbus_output debugfs file\n",
  1379. dev_name(smmu->dev));
  1380. goto err_rmdir;
  1381. }
  1382. return 0;
  1383. err_rmdir:
  1384. debugfs_remove_recursive(testbus_dir);
  1385. err:
  1386. return 0;
  1387. }
  1388. static ssize_t arm_smmu_debug_tbu_testbus_sel_write(struct file *file,
  1389. const char __user *ubuf, size_t count, loff_t *offset)
  1390. {
  1391. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1392. void __iomem *tbu_base = tbu->base;
  1393. u64 val;
  1394. if (kstrtoull_from_user(ubuf, count, 0, &val)) {
  1395. pr_err_ratelimited("Invalid format for tbu testbus select\n");
  1396. return -EINVAL;
  1397. }
  1398. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500"))
  1399. return 0;
  1400. arm_smmu_power_on(tbu->pwr);
  1401. arm_smmu_debug_tbu_testbus_select(tbu_base, WRITE, val);
  1402. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1403. return count;
  1404. }
  1405. static ssize_t arm_smmu_debug_tbu_testbus_sel_read(struct file *file,
  1406. char __user *ubuf, size_t count, loff_t *offset)
  1407. {
  1408. return arm_smmu_debug_testbus_read(file, ubuf,
  1409. count, offset, SEL_TBU, TESTBUS_SELECT);
  1410. }
  1411. static const struct file_operations arm_smmu_debug_tbu_testbus_sel_fops = {
  1412. .open = simple_open,
  1413. .write = arm_smmu_debug_tbu_testbus_sel_write,
  1414. .read = arm_smmu_debug_tbu_testbus_sel_read,
  1415. };
  1416. static ssize_t arm_smmu_debug_tbu_testbus_read(struct file *file,
  1417. char __user *ubuf, size_t count, loff_t *offset)
  1418. {
  1419. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1420. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500"))
  1421. return arm_smmu_debug_debugchain_read(file, ubuf,
  1422. count, offset);
  1423. else
  1424. return arm_smmu_debug_testbus_read(file, ubuf,
  1425. count, offset, SEL_TBU, TESTBUS_OUTPUT);
  1426. }
  1427. static const struct file_operations arm_smmu_debug_tbu_testbus_fops = {
  1428. .open = simple_open,
  1429. .read = arm_smmu_debug_tbu_testbus_read,
  1430. };
  1431. static int qsmmuv500_tbu_testbus_init(struct qsmmuv500_tbu_device *tbu)
  1432. {
  1433. struct dentry *testbus_dir, *iommu_debug_dir;
  1434. iommu_debug_dir = get_iommu_debug_dir();
  1435. if (!iommu_debug_dir)
  1436. return 0;
  1437. if (!debugfs_testbus_dir) {
  1438. debugfs_testbus_dir = debugfs_create_dir("testbus",
  1439. iommu_debug_dir);
  1440. if (IS_ERR(debugfs_testbus_dir)) {
  1441. pr_err_ratelimited("Couldn't create iommu/testbus debugfs directory\n");
  1442. return -ENODEV;
  1443. }
  1444. }
  1445. testbus_dir = debugfs_create_dir(dev_name(tbu->dev),
  1446. debugfs_testbus_dir);
  1447. if (IS_ERR(testbus_dir)) {
  1448. pr_err_ratelimited("Couldn't create iommu/testbus/%s debugfs directory\n",
  1449. dev_name(tbu->dev));
  1450. goto err;
  1451. }
  1452. if (IS_ERR(debugfs_create_file("tbu_testbus_sel", 0400,
  1453. testbus_dir, tbu,
  1454. &arm_smmu_debug_tbu_testbus_sel_fops))) {
  1455. pr_err_ratelimited("Couldn't create iommu/testbus/%s/tbu_testbus_sel debugfs file\n",
  1456. dev_name(tbu->dev));
  1457. goto err_rmdir;
  1458. }
  1459. if (IS_ERR(debugfs_create_file("tbu_testbus_output", 0400,
  1460. testbus_dir, tbu,
  1461. &arm_smmu_debug_tbu_testbus_fops))) {
  1462. pr_err_ratelimited("Couldn't create iommu/testbus/%s/tbu_testbus_output debugfs file\n",
  1463. dev_name(tbu->dev));
  1464. goto err_rmdir;
  1465. }
  1466. return 0;
  1467. err_rmdir:
  1468. debugfs_remove_recursive(testbus_dir);
  1469. err:
  1470. return 0;
  1471. }
  1472. #else
  1473. static int qsmmuv500_tcu_testbus_init(struct arm_smmu_device *smmu)
  1474. {
  1475. return 0;
  1476. }
  1477. static int qsmmuv500_tbu_testbus_init(struct qsmmuv500_tbu_device *tbu)
  1478. {
  1479. return 0;
  1480. }
  1481. #endif
  1482. static void arm_smmu_testbus_dump(struct arm_smmu_device *smmu, u16 sid)
  1483. {
  1484. if (smmu->model == QCOM_SMMUV500 &&
  1485. IS_ENABLED(CONFIG_ARM_SMMU_TESTBUS_DUMP)) {
  1486. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  1487. struct qsmmuv500_tbu_device *tbu;
  1488. tbu = qsmmuv500_find_tbu(smmu, sid);
  1489. spin_lock(&testbus_lock);
  1490. if (tbu) {
  1491. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500")) {
  1492. struct qtb500_device *qtb = to_qtb500(tbu);
  1493. qtb500_tbu_halt_req(tbu);
  1494. if (!qtb500_tbu_halt_poll(tbu)) {
  1495. arm_smmu_debug_dump_debugchain(tbu->dev,
  1496. qtb->debugchain_base);
  1497. qtb500_tbu_resume(tbu);
  1498. }
  1499. arm_smmu_debug_dump_qtb_regs(tbu->dev, tbu->base);
  1500. } else {
  1501. arm_smmu_debug_dump_tbu_testbus(tbu->dev,
  1502. tbu->base,
  1503. TBU_TESTBUS_SEL_ALL);
  1504. }
  1505. } else {
  1506. arm_smmu_debug_dump_tcu_testbus(smmu->dev,
  1507. smmu->phys_addr,
  1508. data->tcu_base,
  1509. TCU_TESTBUS_SEL_ALL);
  1510. }
  1511. spin_unlock(&testbus_lock);
  1512. }
  1513. }
  1514. static void qsmmuv500_log_outstanding_transactions(struct work_struct *work)
  1515. {
  1516. struct qsmmuv500_tbu_device *tbu = NULL;
  1517. struct qsmmuv500_archdata *data = container_of(work,
  1518. struct qsmmuv500_archdata,
  1519. outstanding_tnx_work);
  1520. struct arm_smmu_device *smmu = &data->smmu;
  1521. if (!mutex_trylock(&capture_reg_lock)) {
  1522. dev_warn_ratelimited(smmu->dev,
  1523. "Tnx snapshot regs in use, not dumping OT tnxs.\n");
  1524. goto bug;
  1525. }
  1526. if (arm_smmu_power_on(smmu->pwr)) {
  1527. dev_err_ratelimited(smmu->dev,
  1528. "%s: Failed to power on SMMU.\n",
  1529. __func__);
  1530. goto unlock;
  1531. }
  1532. if (!list_empty(&data->tbus)) {
  1533. list_for_each_entry(tbu, &data->tbus, list) {
  1534. if (arm_smmu_power_on(tbu->pwr)) {
  1535. dev_err_ratelimited(tbu->dev,
  1536. "%s: Failed to power on TBU.\n", __func__);
  1537. continue;
  1538. }
  1539. tbu->impl->log_outstanding_transactions(tbu);
  1540. arm_smmu_power_off(smmu, tbu->pwr);
  1541. }
  1542. }
  1543. arm_smmu_power_off(smmu, smmu->pwr);
  1544. unlock:
  1545. mutex_unlock(&capture_reg_lock);
  1546. bug:
  1547. BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
  1548. }
  1549. static struct qsmmuv500_tbu_device *qsmmuv500_tbu_impl_init(struct qsmmuv500_tbu_device *tbu)
  1550. {
  1551. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500"))
  1552. return qtb500_impl_init(tbu);
  1553. return arm_tbu_impl_init(tbu);
  1554. }
  1555. static int qsmmuv500_tbu_probe(struct platform_device *pdev)
  1556. {
  1557. struct device *dev = &pdev->dev;
  1558. struct qsmmuv500_tbu_device *tbu;
  1559. struct resource *res;
  1560. const __be32 *cell;
  1561. int ret, len;
  1562. tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
  1563. if (!tbu)
  1564. return -ENOMEM;
  1565. tbu->dev = dev;
  1566. /*
  1567. * ARM TBUs need to have power resources initialized before its
  1568. * implementation defined initialization occurs to setup the
  1569. * suspend and resure power callbacks.
  1570. */
  1571. tbu->pwr = arm_smmu_init_power_resources(dev);
  1572. if (IS_ERR(tbu->pwr))
  1573. return PTR_ERR(tbu->pwr);
  1574. tbu = qsmmuv500_tbu_impl_init(tbu);
  1575. if (IS_ERR(tbu))
  1576. return PTR_ERR(tbu);
  1577. INIT_LIST_HEAD(&tbu->list);
  1578. spin_lock_init(&tbu->halt_lock);
  1579. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1580. if (!res)
  1581. return -EINVAL;
  1582. tbu->base = devm_ioremap(dev, res->start, resource_size(res));
  1583. if (!tbu->base)
  1584. return -ENOMEM;
  1585. cell = of_get_property(dev->of_node, "qcom,stream-id-range", &len);
  1586. if (!cell || len < 8)
  1587. return -EINVAL;
  1588. tbu->sid_start = of_read_number(cell, 1);
  1589. tbu->num_sids = of_read_number(cell + 1, 1);
  1590. ret = of_property_read_u32(dev->of_node, "qcom,iova-width", &tbu->iova_width);
  1591. if (ret < 0)
  1592. return ret;
  1593. dev_set_drvdata(dev, tbu);
  1594. return 0;
  1595. }
  1596. static const struct of_device_id qsmmuv500_tbu_of_match[] = {
  1597. {.compatible = "qcom,qsmmuv500-tbu"},
  1598. {}
  1599. };
  1600. struct platform_driver qsmmuv500_tbu_driver = {
  1601. .driver = {
  1602. .name = "qsmmuv500-tbu",
  1603. .of_match_table = of_match_ptr(qsmmuv500_tbu_of_match),
  1604. },
  1605. .probe = qsmmuv500_tbu_probe,
  1606. };
  1607. static ssize_t arm_smmu_debug_capturebus_snapshot_read(struct file *file,
  1608. char __user *ubuf, size_t count, loff_t *offset)
  1609. {
  1610. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1611. struct arm_smmu_device *smmu = tbu->smmu;
  1612. void __iomem *tbu_base = tbu->base;
  1613. u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT];
  1614. u64 gfxttlogs[TTQTB_Capture_Points][2*TTQTB_Regs_Per_Capture_Points];
  1615. u64 ttlogs[TTQTB_Capture_Points][4*TTQTB_Regs_Per_Capture_Points];
  1616. u64 ttlogs_time[2*TTQTB_Capture_Points];
  1617. struct qtb500_device *qtb = to_qtb500(tbu);
  1618. void __iomem *transactiontracker_base = qtb->transactiontracker_base;
  1619. char buf[8192];
  1620. ssize_t retval;
  1621. size_t buflen;
  1622. int buf_len = sizeof(buf);
  1623. int qtb_type;
  1624. int i, j, x, y;
  1625. if (*offset)
  1626. return 0;
  1627. memset(buf, 0, buf_len);
  1628. if (arm_smmu_power_on(smmu->pwr))
  1629. return -EINVAL;
  1630. if (arm_smmu_power_on(tbu->pwr)) {
  1631. arm_smmu_power_off(smmu, smmu->pwr);
  1632. return -EINVAL;
  1633. }
  1634. if (!mutex_trylock(&capture_reg_lock)) {
  1635. dev_warn_ratelimited(smmu->dev,
  1636. "capture bus regs in use, not dumping it.\n");
  1637. return -EBUSY;
  1638. }
  1639. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500")) {
  1640. if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu"))
  1641. qtb_type = 1;
  1642. else
  1643. qtb_type = 2;
  1644. arm_smmu_debug_qtb_transtrac_collect(transactiontracker_base, gfxttlogs, ttlogs,
  1645. ttlogs_time, qtb_type);
  1646. arm_smmu_debug_qtb_transtrac_reset(transactiontracker_base);
  1647. mutex_unlock(&capture_reg_lock);
  1648. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1649. arm_smmu_power_off(smmu, smmu->pwr);
  1650. for (i = 0, x = 0; i < TTQTB_Capture_Points &&
  1651. x < 2*TTQTB_Capture_Points; i++, x += 2) {
  1652. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1653. "Latency_%d : 0x%lx\n",
  1654. i, ttlogs_time[x]);
  1655. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1656. "Timestamp_%d : 0x%lx\n",
  1657. i, ttlogs_time[x+1]);
  1658. if (qtb_type == 1) {
  1659. for (j = 0, y = 0; j < TTQTB_Regs_Per_Capture_Points &&
  1660. y < 2*TTQTB_Regs_Per_Capture_Points; j++, y += 2) {
  1661. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1662. "LogIn_%d_%d : 0x%lx\n",
  1663. i, j, gfxttlogs[i][y]);
  1664. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1665. "LogOut_%d_%d : 0x%lx\n",
  1666. i, j, gfxttlogs[i][y+1]);
  1667. }
  1668. } else if (qtb_type == 2) {
  1669. for (j = 0, y = 0; j < TTQTB_Regs_Per_Capture_Points &&
  1670. y < 4*TTQTB_Regs_Per_Capture_Points; j++, y += 4) {
  1671. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1672. "LogIn_%d_%d_Low : 0x%lx\n",
  1673. i, j, ttlogs[i][y]);
  1674. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1675. "LogIn_%d_%d_High : 0x%lx\n",
  1676. i, j, ttlogs[i][y+1]);
  1677. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1678. "LogOut_%d_%d_Low : 0x%lx\n",
  1679. i, j, ttlogs[i][y+2]);
  1680. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1681. "LogOut_%d_%d_High : 0x%lx\n",
  1682. i, j, ttlogs[i][y+3]);
  1683. }
  1684. }
  1685. }
  1686. buflen = min(count, strlen(buf));
  1687. if (copy_to_user(ubuf, buf, buflen)) {
  1688. dev_err_ratelimited(smmu->dev, "Couldn't copy_to_user\n");
  1689. retval = -EFAULT;
  1690. } else {
  1691. *offset = 1;
  1692. retval = buflen;
  1693. }
  1694. return retval;
  1695. }
  1696. else {
  1697. arm_smmu_debug_get_capture_snapshot(tbu_base, snapshot);
  1698. mutex_unlock(&capture_reg_lock);
  1699. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1700. arm_smmu_power_off(smmu, smmu->pwr);
  1701. for (i = 0; i < NO_OF_CAPTURE_POINTS ; ++i) {
  1702. for (j = 0; j < REGS_PER_CAPTURE_POINT; ++j) {
  1703. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1704. "Capture_%d_Snapshot_%d : 0x%0llx\n",
  1705. i+1, j+1, snapshot[i][j]);
  1706. }
  1707. }
  1708. buflen = min(count, strlen(buf));
  1709. if (copy_to_user(ubuf, buf, buflen)) {
  1710. dev_err_ratelimited(smmu->dev, "Couldn't copy_to_user\n");
  1711. retval = -EFAULT;
  1712. } else {
  1713. *offset = 1;
  1714. retval = buflen;
  1715. }
  1716. return retval;
  1717. }
  1718. }
  1719. static const struct file_operations arm_smmu_debug_capturebus_snapshot_fops = {
  1720. .open = simple_open,
  1721. .read = arm_smmu_debug_capturebus_snapshot_read,
  1722. };
  1723. static ssize_t arm_smmu_debug_capturebus_config_write(struct file *file,
  1724. const char __user *ubuf, size_t count, loff_t *offset)
  1725. {
  1726. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1727. struct arm_smmu_device *smmu = tbu->smmu;
  1728. void __iomem *tbu_base = tbu->base;
  1729. struct qtb500_device *qtb = to_qtb500(tbu);
  1730. void __iomem *transactiontracker_base = qtb->transactiontracker_base;
  1731. char *comma1, *comma2;
  1732. char buf[100];
  1733. u64 sel, mask, match, val;
  1734. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500")) {
  1735. if (count >= sizeof(buf)) {
  1736. dev_err_ratelimited(smmu->dev, "Input too large\n");
  1737. goto invalid_format;
  1738. }
  1739. memset(buf, 0, sizeof(buf));
  1740. if (copy_from_user(buf, ubuf, count)) {
  1741. dev_err_ratelimited(smmu->dev, "Couldn't copy from user\n");
  1742. return -EFAULT;
  1743. }
  1744. if (kstrtou64(buf, 0, &sel))
  1745. goto invalid_tt_format;
  1746. if (sel == 0 || sel == 1)
  1747. goto transtracker_configure;
  1748. else
  1749. goto invalid_tt_format;
  1750. transtracker_configure:
  1751. if (arm_smmu_power_on(smmu->pwr))
  1752. return -EINVAL;
  1753. if (arm_smmu_power_on(tbu->pwr)) {
  1754. arm_smmu_power_off(smmu, smmu->pwr);
  1755. return -EINVAL;
  1756. }
  1757. if (!mutex_trylock(&capture_reg_lock)) {
  1758. dev_warn_ratelimited(smmu->dev,
  1759. "Transaction Tracker regs in use, not configuring it.\n");
  1760. return -EBUSY;
  1761. }
  1762. arm_smmu_debug_qtb_transtracker_set_config(transactiontracker_base, sel);
  1763. mutex_unlock(&capture_reg_lock);
  1764. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1765. arm_smmu_power_off(smmu, smmu->pwr);
  1766. return count;
  1767. invalid_tt_format:
  1768. dev_err_ratelimited(smmu->dev, "Invalid format\n");
  1769. dev_err_ratelimited(smmu->dev, "This is QTB equipped device\n");
  1770. dev_err_ratelimited(smmu->dev,
  1771. "Expected:<0/1> 0 for Default configuration, 1 for custom configuration\n");
  1772. return -EINVAL;
  1773. }
  1774. else {
  1775. if (count >= sizeof(buf)) {
  1776. dev_err_ratelimited(smmu->dev, "Input too large\n");
  1777. goto invalid_format;
  1778. }
  1779. memset(buf, 0, sizeof(buf));
  1780. if (copy_from_user(buf, ubuf, count)) {
  1781. dev_err_ratelimited(smmu->dev, "Couldn't copy from user\n");
  1782. return -EFAULT;
  1783. }
  1784. comma1 = strnchr(buf, count, ',');
  1785. if (!comma1)
  1786. goto invalid_format;
  1787. *comma1 = '\0';
  1788. if (kstrtou64(buf, 0, &sel))
  1789. goto invalid_format;
  1790. if (sel > 4) {
  1791. goto invalid_format;
  1792. } else if (sel == 4) {
  1793. if (kstrtou64(comma1 + 1, 0, &val))
  1794. goto invalid_format;
  1795. goto program_capturebus;
  1796. }
  1797. comma2 = strnchr(comma1 + 1, count, ',');
  1798. if (!comma2)
  1799. goto invalid_format;
  1800. /* split up the words */
  1801. *comma2 = '\0';
  1802. if (kstrtou64(comma1 + 1, 0, &mask))
  1803. goto invalid_format;
  1804. if (kstrtou64(comma2 + 1, 0, &match))
  1805. goto invalid_format;
  1806. program_capturebus:
  1807. if (arm_smmu_power_on(smmu->pwr))
  1808. return -EINVAL;
  1809. if (arm_smmu_power_on(tbu->pwr)) {
  1810. arm_smmu_power_off(smmu, smmu->pwr);
  1811. return -EINVAL;
  1812. }
  1813. if (!mutex_trylock(&capture_reg_lock)) {
  1814. dev_warn_ratelimited(smmu->dev,
  1815. "capture bus regs in use, not configuring it.\n");
  1816. return -EBUSY;
  1817. }
  1818. if (sel == 4)
  1819. arm_smmu_debug_set_tnx_tcr_cntl(tbu_base, val);
  1820. else
  1821. arm_smmu_debug_set_mask_and_match(tbu_base, sel, mask, match);
  1822. mutex_unlock(&capture_reg_lock);
  1823. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1824. arm_smmu_power_off(smmu, smmu->pwr);
  1825. return count;
  1826. invalid_format:
  1827. dev_err_ratelimited(smmu->dev, "Invalid format\n");
  1828. dev_err_ratelimited(smmu->dev,
  1829. "Expected:<1/2/3,Mask,Match> <4,TNX_TCR_CNTL>\n");
  1830. return -EINVAL;
  1831. }
  1832. }
  1833. static ssize_t arm_smmu_debug_capturebus_config_read(struct file *file,
  1834. char __user *ubuf, size_t count, loff_t *offset)
  1835. {
  1836. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1837. struct arm_smmu_device *smmu = tbu->smmu;
  1838. void __iomem *tbu_base = tbu->base;
  1839. struct qtb500_device *qtb = to_qtb500(tbu);
  1840. void __iomem *transactiontracker_base = qtb->transactiontracker_base;
  1841. u64 val;
  1842. u64 config;
  1843. u64 mask[NO_OF_MASK_AND_MATCH], match[NO_OF_MASK_AND_MATCH];
  1844. char buf[400];
  1845. ssize_t retval;
  1846. size_t buflen;
  1847. int buf_len = sizeof(buf);
  1848. int i;
  1849. if (*offset)
  1850. return 0;
  1851. memset(buf, 0, buf_len);
  1852. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500")) {
  1853. if (arm_smmu_power_on(smmu->pwr))
  1854. return -EINVAL;
  1855. if (arm_smmu_power_on(tbu->pwr)) {
  1856. arm_smmu_power_off(smmu, smmu->pwr);
  1857. return -EINVAL;
  1858. }
  1859. if (!mutex_trylock(&capture_reg_lock)) {
  1860. dev_warn_ratelimited(smmu->dev,
  1861. "capture bus regs in use, not configuring it.\n");
  1862. return -EBUSY;
  1863. }
  1864. config = arm_smmu_debug_qtb_transtracker_get_config(transactiontracker_base);
  1865. mutex_unlock(&capture_reg_lock);
  1866. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1867. arm_smmu_power_off(smmu, smmu->pwr);
  1868. if (config == (TTQTB_GlbEn | TTQTB_IgnoreCtiTrigIn0 | TTQTB_LogAsstEn))
  1869. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1870. "Custom configuration selected, MainCtl filter val: 0x%0lx\n",
  1871. config);
  1872. else if (config == (TTQTB_GlbEn | TTQTB_IgnoreCtiTrigIn0 | TTQTB_LogAll))
  1873. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1874. "Default configuration selected, MainCtl filter val : 0x%0lx\n",
  1875. config);
  1876. buflen = min(count, strlen(buf));
  1877. if (copy_to_user(ubuf, buf, buflen)) {
  1878. dev_err_ratelimited(smmu->dev, "Couldn't copy_to_user\n");
  1879. retval = -EFAULT;
  1880. } else {
  1881. *offset = 1;
  1882. retval = buflen;
  1883. }
  1884. return retval;
  1885. }
  1886. else {
  1887. if (arm_smmu_power_on(smmu->pwr))
  1888. return -EINVAL;
  1889. if (arm_smmu_power_on(tbu->pwr)) {
  1890. arm_smmu_power_off(smmu, smmu->pwr);
  1891. return -EINVAL;
  1892. }
  1893. if (!mutex_trylock(&capture_reg_lock)) {
  1894. dev_warn_ratelimited(smmu->dev,
  1895. "capture bus regs in use, not configuring it.\n");
  1896. return -EBUSY;
  1897. }
  1898. arm_smmu_debug_get_mask_and_match(tbu_base,
  1899. mask, match);
  1900. val = arm_smmu_debug_get_tnx_tcr_cntl(tbu_base);
  1901. mutex_unlock(&capture_reg_lock);
  1902. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1903. arm_smmu_power_off(smmu, smmu->pwr);
  1904. for (i = 0; i < NO_OF_MASK_AND_MATCH; ++i) {
  1905. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1906. "Mask_%d : 0x%0llx\t", i+1, mask[i]);
  1907. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  1908. "Match_%d : 0x%0llx\n", i+1, match[i]);
  1909. }
  1910. scnprintf(buf + strlen(buf), buf_len - strlen(buf), "0x%0lx\n", val);
  1911. buflen = min(count, strlen(buf));
  1912. if (copy_to_user(ubuf, buf, buflen)) {
  1913. dev_err_ratelimited(smmu->dev, "Couldn't copy_to_user\n");
  1914. retval = -EFAULT;
  1915. } else {
  1916. *offset = 1;
  1917. retval = buflen;
  1918. }
  1919. return retval;
  1920. }
  1921. }
  1922. static ssize_t arm_smmu_debug_capturebus_filter_write(struct file *file,
  1923. const char __user *ubuf, size_t count, loff_t *offset)
  1924. {
  1925. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1926. struct arm_smmu_device *smmu = tbu->smmu;
  1927. struct qtb500_device *qtb = to_qtb500(tbu);
  1928. void __iomem *transactiontracker_base = qtb->transactiontracker_base;
  1929. char *comma1;
  1930. char buf[200];
  1931. u64 sel, filter;
  1932. int qtb_type = 0;
  1933. if (count >= sizeof(buf)) {
  1934. dev_err_ratelimited(smmu->dev, "Input too large\n");
  1935. goto invalid_filter_format;
  1936. }
  1937. memset(buf, 0, sizeof(buf));
  1938. if (copy_from_user(buf, ubuf, count)) {
  1939. dev_err_ratelimited(smmu->dev, "Couldn't copy from user\n");
  1940. return -EFAULT;
  1941. }
  1942. comma1 = strnchr(buf, count, ',');
  1943. if (!comma1)
  1944. goto invalid_filter_format;
  1945. *comma1 = '\0';
  1946. if (kstrtou64(buf, 0, &sel))
  1947. goto invalid_filter_format;
  1948. if (sel == 1 || sel == 2 || sel == 3) {
  1949. if (kstrtou64(comma1 + 1, 0, &filter))
  1950. goto invalid_filter_format;
  1951. goto set_capturebus_filter;
  1952. }
  1953. else
  1954. goto invalid_filter_format;
  1955. set_capturebus_filter:
  1956. if (arm_smmu_power_on(smmu->pwr))
  1957. return -EINVAL;
  1958. if (arm_smmu_power_on(tbu->pwr)) {
  1959. arm_smmu_power_off(smmu, smmu->pwr);
  1960. return -EINVAL;
  1961. }
  1962. if (!mutex_trylock(&capture_reg_lock)) {
  1963. dev_warn_ratelimited(smmu->dev,
  1964. "Transaction Tracker regs in use, not configuring it.\n");
  1965. return -EBUSY;
  1966. }
  1967. if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu"))
  1968. qtb_type = 1;
  1969. else
  1970. qtb_type = 2;
  1971. arm_smmu_debug_qtb_transtracker_setfilter(transactiontracker_base,
  1972. sel, filter, qtb_type);
  1973. mutex_unlock(&capture_reg_lock);
  1974. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  1975. arm_smmu_power_off(smmu, smmu->pwr);
  1976. return count;
  1977. invalid_filter_format:
  1978. dev_err_ratelimited(smmu->dev, "Invalid format\n");
  1979. dev_err_ratelimited(smmu->dev, "This is QTB equipped device\n");
  1980. dev_err_ratelimited(smmu->dev,
  1981. "Expected:<1/2/3,TrType/AddressMin/AddressMax>\n");
  1982. return -EINVAL;
  1983. }
  1984. static ssize_t arm_smmu_debug_capturebus_filter_read(struct file *file,
  1985. char __user *ubuf, size_t count, loff_t *offset)
  1986. {
  1987. struct qsmmuv500_tbu_device *tbu = file->private_data;
  1988. struct arm_smmu_device *smmu = tbu->smmu;
  1989. struct qtb500_device *qtb = to_qtb500(tbu);
  1990. void __iomem *transactiontracker_base = qtb->transactiontracker_base;
  1991. u64 filter[3];
  1992. char buf[400];
  1993. ssize_t retval;
  1994. size_t buflen;
  1995. int buf_len = sizeof(buf);
  1996. int i = 0, qtb_type = 0;
  1997. if (*offset)
  1998. return 0;
  1999. memset(buf, 0, buf_len);
  2000. if (arm_smmu_power_on(smmu->pwr))
  2001. return -EINVAL;
  2002. if (arm_smmu_power_on(tbu->pwr)) {
  2003. arm_smmu_power_off(smmu, smmu->pwr);
  2004. return -EINVAL;
  2005. }
  2006. if (!mutex_trylock(&capture_reg_lock)) {
  2007. dev_warn_ratelimited(smmu->dev,
  2008. "Transaction Tracker regs in use, not configuring it.\n");
  2009. return -EBUSY;
  2010. }
  2011. if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu"))
  2012. qtb_type = 1;
  2013. else
  2014. qtb_type = 2;
  2015. arm_smmu_debug_qtb_transtracker_getfilter(transactiontracker_base, filter, qtb_type);
  2016. mutex_unlock(&capture_reg_lock);
  2017. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  2018. arm_smmu_power_off(smmu, smmu->pwr);
  2019. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  2020. "Filter_TrType : 0x%lx\n", filter[i]);
  2021. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  2022. "Filter_AddressMin : 0x%lx\n", filter[i+1]);
  2023. scnprintf(buf + strlen(buf), buf_len - strlen(buf),
  2024. "Filter_AddressMax : 0x%lx\n", filter[i+2]);
  2025. buflen = min(count, strlen(buf));
  2026. if (copy_to_user(ubuf, buf, buflen)) {
  2027. dev_err_ratelimited(smmu->dev, "Couldn't copy_to_user\n");
  2028. retval = -EFAULT;
  2029. } else {
  2030. *offset = 1;
  2031. retval = buflen;
  2032. }
  2033. return retval;
  2034. }
  2035. static const struct file_operations arm_smmu_debug_capturebus_config_fops = {
  2036. .open = simple_open,
  2037. .write = arm_smmu_debug_capturebus_config_write,
  2038. .read = arm_smmu_debug_capturebus_config_read,
  2039. };
  2040. static const struct file_operations arm_smmu_debug_capturebus_filter_fops = {
  2041. .open = simple_open,
  2042. .write = arm_smmu_debug_capturebus_filter_write,
  2043. .read = arm_smmu_debug_capturebus_filter_read,
  2044. };
  2045. #ifdef CONFIG_ARM_SMMU_CAPTUREBUS_DEBUGFS
  2046. static struct dentry *debugfs_capturebus_dir;
  2047. static int qsmmuv500_capturebus_init(struct qsmmuv500_tbu_device *tbu)
  2048. {
  2049. struct dentry *capturebus_dir, *iommu_debug_dir;
  2050. iommu_debug_dir = get_iommu_debug_dir();
  2051. if (!iommu_debug_dir)
  2052. return 0;
  2053. if (!debugfs_capturebus_dir) {
  2054. debugfs_capturebus_dir = debugfs_create_dir(
  2055. "capturebus", iommu_debug_dir);
  2056. if (IS_ERR(debugfs_capturebus_dir)) {
  2057. dev_err_ratelimited(tbu->dev, "Couldn't create iommu/capturebus debugfs directory\n");
  2058. return PTR_ERR(debugfs_capturebus_dir);
  2059. }
  2060. }
  2061. capturebus_dir = debugfs_create_dir(dev_name(tbu->dev),
  2062. debugfs_capturebus_dir);
  2063. if (IS_ERR(capturebus_dir)) {
  2064. dev_err_ratelimited(tbu->dev, "Couldn't create iommu/capturebus/%s debugfs directory\n",
  2065. dev_name(tbu->dev));
  2066. goto err;
  2067. }
  2068. if (IS_ERR(debugfs_create_file("config", 0400, capturebus_dir, tbu,
  2069. &arm_smmu_debug_capturebus_config_fops))) {
  2070. dev_err_ratelimited(tbu->dev, "Couldn't create iommu/capturebus/%s/config debugfs file\n",
  2071. dev_name(tbu->dev));
  2072. goto err_rmdir;
  2073. }
  2074. if (of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500")) {
  2075. if (IS_ERR(debugfs_create_file("filter", 0400, capturebus_dir, tbu,
  2076. &arm_smmu_debug_capturebus_filter_fops))) {
  2077. dev_err_ratelimited(tbu->dev, "Couldn't create iommu/capturebus/%s/filter debugfs file\n",
  2078. dev_name(tbu->dev));
  2079. goto err_rmdir;
  2080. }
  2081. }
  2082. if (IS_ERR(debugfs_create_file("snapshot", 0400, capturebus_dir, tbu,
  2083. &arm_smmu_debug_capturebus_snapshot_fops))) {
  2084. dev_err_ratelimited(tbu->dev, "Couldn't create iommu/capturebus/%s/snapshot debugfs file\n",
  2085. dev_name(tbu->dev));
  2086. goto err_rmdir;
  2087. }
  2088. return 0;
  2089. err_rmdir:
  2090. debugfs_remove_recursive(capturebus_dir);
  2091. err:
  2092. return -ENODEV;
  2093. }
  2094. #else
  2095. static int qsmmuv500_capturebus_init(struct qsmmuv500_tbu_device *tbu)
  2096. {
  2097. return 0;
  2098. }
  2099. #endif
  2100. static irqreturn_t arm_smmu_debug_capture_bus_match(int irq, void *dev)
  2101. {
  2102. struct qsmmuv500_tbu_device *tbu = dev;
  2103. struct arm_smmu_device *smmu = tbu->smmu;
  2104. void __iomem *tbu_base = tbu->base;
  2105. u64 mask[NO_OF_MASK_AND_MATCH], match[NO_OF_MASK_AND_MATCH];
  2106. u64 snapshot[NO_OF_CAPTURE_POINTS][REGS_PER_CAPTURE_POINT];
  2107. int i, j;
  2108. u64 val;
  2109. if (arm_smmu_power_on(smmu->pwr))
  2110. return IRQ_NONE;
  2111. if (arm_smmu_power_on(tbu->pwr)) {
  2112. arm_smmu_power_off(smmu, smmu->pwr);
  2113. return IRQ_NONE;
  2114. }
  2115. if (!mutex_trylock(&capture_reg_lock)) {
  2116. dev_warn_ratelimited(smmu->dev,
  2117. "capture bus regs in use, not dumping it.\n");
  2118. return IRQ_NONE;
  2119. }
  2120. val = arm_smmu_debug_get_tnx_tcr_cntl(tbu_base);
  2121. arm_smmu_debug_get_mask_and_match(tbu_base, mask, match);
  2122. arm_smmu_debug_get_capture_snapshot(tbu_base, snapshot);
  2123. arm_smmu_debug_clear_intr_and_validbits(tbu_base);
  2124. mutex_unlock(&capture_reg_lock);
  2125. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  2126. arm_smmu_power_off(smmu, smmu->pwr);
  2127. dev_info(tbu->dev, "TNX_TCR_CNTL : 0x%0llx\n", val);
  2128. for (i = 0; i < NO_OF_MASK_AND_MATCH; ++i) {
  2129. dev_info(tbu->dev,
  2130. "Mask_%d : 0x%0llx\n", i+1, mask[i]);
  2131. dev_info(tbu->dev,
  2132. "Match_%d : 0x%0llx\n", i+1, match[i]);
  2133. }
  2134. for (i = 0; i < NO_OF_CAPTURE_POINTS ; ++i) {
  2135. for (j = 0; j < REGS_PER_CAPTURE_POINT; ++j) {
  2136. dev_info(tbu->dev,
  2137. "Capture_%d_Snapshot_%d : 0x%0llx\n",
  2138. i+1, j+1, snapshot[i][j]);
  2139. }
  2140. }
  2141. return IRQ_HANDLED;
  2142. }
  2143. static void qsmmuv500_tlb_sync_timeout(struct arm_smmu_device *smmu)
  2144. {
  2145. u32 sync_inv_ack, tbu_pwr_status, sync_inv_progress;
  2146. u32 tbu_inv_pending = 0, tbu_sync_pending = 0;
  2147. u32 tbu_inv_acked = 0, tbu_sync_acked = 0;
  2148. u32 tcu_inv_pending = 0, tcu_sync_pending = 0;
  2149. unsigned long tbu_ids = 0;
  2150. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  2151. int ret;
  2152. static DEFINE_RATELIMIT_STATE(_rs,
  2153. DEFAULT_RATELIMIT_INTERVAL,
  2154. DEFAULT_RATELIMIT_BURST);
  2155. dev_err_ratelimited(smmu->dev,
  2156. "TLB sync timed out -- SMMU may be deadlocked\n");
  2157. sync_inv_ack = arm_smmu_readl(smmu,
  2158. ARM_SMMU_IMPL_DEF5,
  2159. ARM_SMMU_STATS_SYNC_INV_TBU_ACK);
  2160. if (sync_inv_ack) {
  2161. tbu_inv_pending = FIELD_GET(TBU_INV_REQ, sync_inv_ack);
  2162. tbu_inv_acked = FIELD_GET(TBU_INV_ACK, sync_inv_ack);
  2163. tbu_sync_pending = FIELD_GET(TBU_SYNC_REQ, sync_inv_ack);
  2164. tbu_sync_acked = FIELD_GET(TBU_SYNC_ACK, sync_inv_ack);
  2165. }
  2166. ret = qcom_scm_io_readl((unsigned long)(smmu->phys_addr +
  2167. ARM_SMMU_TBU_PWR_STATUS), &tbu_pwr_status);
  2168. if (ret) {
  2169. dev_err_ratelimited(smmu->dev,
  2170. "SCM read of TBU power status fails: %d\n",
  2171. ret);
  2172. goto out;
  2173. }
  2174. ret = qcom_scm_io_readl((unsigned long)(smmu->phys_addr +
  2175. ARM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR),
  2176. &sync_inv_progress);
  2177. if (ret) {
  2178. dev_err_ratelimited(smmu->dev,
  2179. "SCM read of TBU sync/inv prog fails: %d\n",
  2180. ret);
  2181. goto out;
  2182. }
  2183. if (tbu_pwr_status) {
  2184. if (tbu_sync_pending)
  2185. tbu_ids = tbu_pwr_status & ~tbu_sync_acked;
  2186. else if (tbu_inv_pending)
  2187. tbu_ids = tbu_pwr_status & ~tbu_inv_acked;
  2188. }
  2189. tcu_inv_pending = FIELD_GET(TCU_INV_IN_PRGSS, sync_inv_progress);
  2190. tcu_sync_pending = FIELD_GET(TCU_SYNC_IN_PRGSS, sync_inv_progress);
  2191. if (__ratelimit(&_rs)) {
  2192. unsigned long tbu_id;
  2193. dev_err(smmu->dev,
  2194. "TBU ACK 0x%x TBU PWR 0x%x TCU sync_inv 0x%x\n",
  2195. sync_inv_ack, tbu_pwr_status, sync_inv_progress);
  2196. dev_err(smmu->dev,
  2197. "TCU invalidation %s, TCU sync %s\n",
  2198. tcu_inv_pending?"pending":"completed",
  2199. tcu_sync_pending?"pending":"completed");
  2200. for_each_set_bit(tbu_id, &tbu_ids, sizeof(tbu_ids) *
  2201. BITS_PER_BYTE) {
  2202. struct qsmmuv500_tbu_device *tbu;
  2203. tbu = qsmmuv500_find_tbu(smmu,
  2204. (u16)(tbu_id << TBUID_SHIFT));
  2205. if (tbu) {
  2206. dev_err(smmu->dev,
  2207. "TBU %s ack pending for TBU %s, %s\n",
  2208. tbu_sync_pending?"sync" : "inv",
  2209. dev_name(tbu->dev),
  2210. tbu_sync_pending ?
  2211. "check pending transactions on TBU"
  2212. : "check for TBU power status");
  2213. arm_smmu_testbus_dump(smmu,
  2214. (u16)(tbu_id << TBUID_SHIFT));
  2215. }
  2216. }
  2217. /*dump TCU testbus*/
  2218. arm_smmu_testbus_dump(smmu, U16_MAX);
  2219. }
  2220. if (tcu_sync_pending) {
  2221. schedule_work(&data->outstanding_tnx_work);
  2222. return;
  2223. }
  2224. out:
  2225. if (ret) {
  2226. if (sync_inv_ack) {
  2227. /* TBU PWR status is not available so just dump raw
  2228. * fields
  2229. */
  2230. dev_err(smmu->dev,
  2231. "TBU %s ack pending, got ack for TBUs %d, %s\n",
  2232. tbu_sync_pending ? "sync" : "inv",
  2233. tbu_sync_pending ? tbu_sync_acked:tbu_inv_acked,
  2234. tbu_sync_pending ?
  2235. "check pending transactions on TBU"
  2236. : "check for TBU power status");
  2237. }
  2238. dev_err(smmu->dev, "TBU SYNC_INV_ACK reg 0x%x\n", sync_inv_ack);
  2239. }
  2240. BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
  2241. }
  2242. static void qsmmuv500_device_remove(struct arm_smmu_device *smmu)
  2243. {
  2244. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  2245. cancel_work_sync(&data->outstanding_tnx_work);
  2246. }
  2247. /*
  2248. * Checks whether smr2 is a subset of smr
  2249. */
  2250. static bool smr_is_subset(struct arm_smmu_smr *smr2, struct arm_smmu_smr *smr)
  2251. {
  2252. return (smr->mask & smr2->mask) == smr2->mask &&
  2253. !((smr->id ^ smr2->id) & ~smr->mask);
  2254. }
  2255. /*
  2256. * Zero means failure.
  2257. */
  2258. static phys_addr_t qsmmuv500_iova_to_phys(struct arm_smmu_domain *smmu_domain, dma_addr_t iova,
  2259. u32 sid, unsigned long trans_flags)
  2260. {
  2261. struct arm_smmu_device *smmu = smmu_domain->smmu;
  2262. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  2263. struct qsmmuv500_archdata *data = to_qsmmuv500_archdata(smmu);
  2264. struct qsmmuv500_tbu_device *tbu;
  2265. phys_addr_t phys = 0;
  2266. int idx = cfg->cbndx;
  2267. int needs_redo = 0;
  2268. u32 sctlr_orig, sctlr, fsr;
  2269. unsigned long spinlock_flags;
  2270. tbu = qsmmuv500_find_tbu(smmu, sid);
  2271. if (!tbu)
  2272. return 0;
  2273. if (arm_smmu_power_on(tbu->pwr))
  2274. return 0;
  2275. if (iova >= (1ULL << tbu->iova_width)) {
  2276. dev_err_ratelimited(tbu->dev, "ECATS: address too large: %pad\n", &iova);
  2277. return 0;
  2278. }
  2279. if (qsmmuv500_tbu_halt(tbu, smmu_domain))
  2280. goto out_power_off;
  2281. /*
  2282. * ECATS can trigger the fault interrupt, so disable it temporarily
  2283. * and check for an interrupt manually.
  2284. */
  2285. sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
  2286. sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
  2287. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
  2288. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  2289. if (fsr & ARM_SMMU_FSR_FAULT) {
  2290. /* Clear pending interrupts */
  2291. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  2292. /*
  2293. * Barrier required to ensure that the FSR is cleared
  2294. * before resuming SMMU operation.
  2295. */
  2296. wmb();
  2297. /*
  2298. * TBU halt takes care of resuming any stalled transcation.
  2299. * Kept it here for completeness sake.
  2300. */
  2301. if (fsr & ARM_SMMU_FSR_SS)
  2302. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
  2303. ARM_SMMU_RESUME_TERMINATE);
  2304. }
  2305. /* Only one concurrent atos operation */
  2306. spin_lock_irqsave(&data->atos_lock, spinlock_flags);
  2307. /*
  2308. * After a failed translation, the next successful translation will
  2309. * incorrectly be reported as a failure.
  2310. */
  2311. do {
  2312. phys = tbu->impl->trigger_atos(tbu, iova, sid, trans_flags);
  2313. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  2314. if (fsr & ARM_SMMU_FSR_FAULT) {
  2315. /* Clear pending interrupts */
  2316. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  2317. /*
  2318. * Barrier required to ensure that the FSR is cleared
  2319. * before resuming SMMU operation.
  2320. */
  2321. wmb();
  2322. if (fsr & ARM_SMMU_FSR_SS)
  2323. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
  2324. ARM_SMMU_RESUME_TERMINATE);
  2325. }
  2326. } while (!phys && needs_redo++ < 2);
  2327. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
  2328. spin_unlock_irqrestore(&data->atos_lock, spinlock_flags);
  2329. qsmmuv500_tbu_resume(tbu);
  2330. out_power_off:
  2331. /* Read to complete prior write transcations */
  2332. tbu->impl->write_sync(tbu);
  2333. /* Wait for read to complete before off */
  2334. rmb();
  2335. arm_smmu_power_off(tbu->smmu, tbu->pwr);
  2336. return phys;
  2337. }
  2338. static phys_addr_t qsmmuv500_iova_to_phys_hard(
  2339. struct arm_smmu_domain *smmu_domain,
  2340. struct qcom_iommu_atos_txn *txn)
  2341. {
  2342. return qsmmuv500_iova_to_phys(smmu_domain, txn->addr, txn->id,
  2343. txn->flags);
  2344. }
  2345. static void qsmmuv500_release_group_iommudata(void *data)
  2346. {
  2347. kfree(data);
  2348. }
  2349. /* If a device has a valid actlr, it must match */
  2350. static int qsmmuv500_device_group(struct device *dev,
  2351. struct iommu_group *group)
  2352. {
  2353. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  2354. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  2355. struct arm_smmu_device *smmu;
  2356. struct qsmmuv500_archdata *data;
  2357. struct qsmmuv500_group_iommudata *iommudata;
  2358. u32 actlr, i, j, idx;
  2359. struct arm_smmu_smr *smr, *smr2;
  2360. if (!fwspec || !cfg)
  2361. return -EINVAL;
  2362. smmu = cfg->smmu;
  2363. data = to_qsmmuv500_archdata(smmu);
  2364. iommudata = to_qsmmuv500_group_iommudata(group);
  2365. if (!iommudata) {
  2366. iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
  2367. if (!iommudata)
  2368. return -ENOMEM;
  2369. iommu_group_set_iommudata(group, iommudata,
  2370. qsmmuv500_release_group_iommudata);
  2371. }
  2372. for (i = 0; i < data->actlr_tbl_size; i++) {
  2373. smr = &data->actlrs[i].smr;
  2374. actlr = data->actlrs[i].actlr;
  2375. for_each_cfg_sme(cfg, fwspec, j, idx) {
  2376. smr2 = &smmu->smrs[idx];
  2377. if (!smr_is_subset(smr2, smr))
  2378. continue;
  2379. dev_dbg(dev, "Matched actlr sid=%x mask=%x actlr=%x\n",
  2380. smr->id, smr->mask, actlr);
  2381. if (!iommudata->has_actlr) {
  2382. iommudata->actlr = actlr;
  2383. iommudata->has_actlr = true;
  2384. } else if (iommudata->actlr != actlr) {
  2385. dev_err(dev, "Invalid actlr setting\n");
  2386. return -EINVAL;
  2387. }
  2388. }
  2389. }
  2390. return 0;
  2391. }
  2392. static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
  2393. struct device *dev)
  2394. {
  2395. struct arm_smmu_device *smmu = smmu_domain->smmu;
  2396. struct qsmmuv500_group_iommudata *iommudata =
  2397. to_qsmmuv500_group_iommudata(dev->iommu_group);
  2398. int idx = smmu_domain->cfg.cbndx;
  2399. if (!iommudata->has_actlr)
  2400. return;
  2401. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ACTLR, iommudata->actlr);
  2402. /*
  2403. * Flush the context bank after modifying ACTLR to ensure there
  2404. * are no cache entries with stale state
  2405. */
  2406. iommu_flush_iotlb_all(&smmu_domain->domain);
  2407. }
  2408. static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
  2409. {
  2410. struct resource *res;
  2411. struct qsmmuv500_tbu_device *tbu;
  2412. struct qsmmuv500_archdata *data = cookie;
  2413. struct platform_device *pdev = to_platform_device(dev);
  2414. int i, err, num_irqs = 0;
  2415. if (!dev->driver) {
  2416. dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
  2417. return -EINVAL;
  2418. }
  2419. tbu = dev_get_drvdata(dev);
  2420. INIT_LIST_HEAD(&tbu->list);
  2421. tbu->smmu = &data->smmu;
  2422. list_add(&tbu->list, &data->tbus);
  2423. while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs)))
  2424. num_irqs++;
  2425. tbu->irqs = devm_kzalloc(dev, sizeof(*tbu->irqs) * num_irqs,
  2426. GFP_KERNEL);
  2427. if (!tbu->irqs)
  2428. return -ENOMEM;
  2429. for (i = 0; i < num_irqs; ++i) {
  2430. int irq = platform_get_irq(pdev, i);
  2431. if (irq < 0) {
  2432. dev_err(dev, "failed to get irq index %d\n", i);
  2433. return -ENODEV;
  2434. }
  2435. tbu->irqs[i] = irq;
  2436. err = devm_request_threaded_irq(tbu->dev, tbu->irqs[i],
  2437. NULL, arm_smmu_debug_capture_bus_match,
  2438. IRQF_ONESHOT | IRQF_SHARED,
  2439. "capture bus", tbu);
  2440. if (err) {
  2441. dev_err(dev, "failed to request capture bus irq%d (%u)\n",
  2442. i, tbu->irqs[i]);
  2443. return err;
  2444. }
  2445. }
  2446. /*
  2447. * Create testbus debugfs only if debugchain base
  2448. * property is set in devicetree in case of qtb500.
  2449. */
  2450. if (!of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500") ||
  2451. to_qtb500(tbu)->debugchain_base)
  2452. qsmmuv500_tbu_testbus_init(tbu);
  2453. /*
  2454. * Create transactiontracker debugfs only if
  2455. * transactiontracker base is set in devicetree in case of qtb500.
  2456. */
  2457. if (!of_device_is_compatible(tbu->dev->of_node, "qcom,qtb500") ||
  2458. to_qtb500(tbu)->transactiontracker_base)
  2459. qsmmuv500_capturebus_init(tbu);
  2460. return 0;
  2461. }
  2462. static int qsmmuv500_read_actlr_tbl(struct qsmmuv500_archdata *data)
  2463. {
  2464. int len, i;
  2465. struct device *dev = data->smmu.dev;
  2466. struct actlr_setting *actlrs;
  2467. const __be32 *cell;
  2468. cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
  2469. if (!cell)
  2470. return 0;
  2471. len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
  2472. sizeof(u32) * 3);
  2473. if (len < 0)
  2474. return 0;
  2475. actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
  2476. if (!actlrs)
  2477. return -ENOMEM;
  2478. for (i = 0; i < len; i++) {
  2479. actlrs[i].smr.id = of_read_number(cell++, 1);
  2480. actlrs[i].smr.mask = of_read_number(cell++, 1);
  2481. actlrs[i].actlr = of_read_number(cell++, 1);
  2482. }
  2483. data->actlrs = actlrs;
  2484. data->actlr_tbl_size = len;
  2485. return 0;
  2486. }
  2487. static int qsmmuv500_cfg_probe(struct arm_smmu_device *smmu)
  2488. {
  2489. u32 val;
  2490. val = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
  2491. val &= ~ARM_MMU500_ACR_CACHE_LOCK;
  2492. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, val);
  2493. val = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
  2494. /*
  2495. * Modifiying the nonsecure copy of the sACR register is only
  2496. * allowed if permission is given in the secure sACR register.
  2497. * Attempt to detect if we were able to update the value.
  2498. */
  2499. WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
  2500. return 0;
  2501. }
  2502. /*
  2503. * Case 1)
  2504. * Client wants to use the standard upstream dma ops from
  2505. * drivers/iommu/dma-iommu.c
  2506. *
  2507. * This requires domain->type == IOMMU_DOMAIN_DMA.
  2508. *
  2509. * Case 2)
  2510. * Client doesn't want to use the default dma domain, and wants to
  2511. * allocate their own via iommu_domain_alloc()
  2512. *
  2513. * There are insufficient context banks to "waste" one on a default
  2514. * dma domain that isn't going to be used. Therefore, give it
  2515. * IOMMU_DOMAIN_IDENTITY. Note that IOMMU_DOMAIN_IDENTITY is treated as
  2516. * IOMMU_DOMAIN_BLOCKED by our hypervisor, which doesn't allow setting
  2517. * S2CR.TYPE = 0x1.
  2518. *
  2519. * Case 3)
  2520. * Client wants to use our fastmap dma ops
  2521. *
  2522. * Per case 1) we cannot use IOMMU_DOMAIN_DMA, since those imply using
  2523. * the standard upstream dma ops. So use IOMMU_DOMAIN_UNMANAGED instead.
  2524. *
  2525. * Case 4)
  2526. * Client wants to use S1 bypass
  2527. *
  2528. * Same as Case 3, except use the platform dma ops.
  2529. *
  2530. * This function can be used for qsmmuv500 and qsmmuv2.
  2531. */
  2532. static int qcom_def_domain_type(struct device *dev)
  2533. {
  2534. const char *str;
  2535. struct device_node *np;
  2536. /* Default to iommu_def_domain_type */
  2537. np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
  2538. if (!np)
  2539. np = dev->of_node;
  2540. if (of_property_read_string(np, "qcom,iommu-dma", &str))
  2541. str = "default";
  2542. if (!strcmp(str, "fastmap") &&
  2543. IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_FAST))
  2544. return IOMMU_DOMAIN_UNMANAGED;
  2545. if (!strcmp(str, "bypass"))
  2546. return IOMMU_DOMAIN_UNMANAGED;
  2547. if (!strcmp(str, "atomic"))
  2548. return IOMMU_DOMAIN_DMA;
  2549. if (!strcmp(str, "disabled"))
  2550. return IOMMU_DOMAIN_IDENTITY;
  2551. return IOMMU_DOMAIN_DMA;
  2552. }
  2553. static const struct arm_smmu_impl qsmmuv500_impl = {
  2554. .cfg_probe = qsmmuv500_cfg_probe,
  2555. .init_context_bank = qsmmuv500_init_cb,
  2556. .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
  2557. .tlb_sync_timeout = qsmmuv500_tlb_sync_timeout,
  2558. .device_remove = qsmmuv500_device_remove,
  2559. .device_group = qsmmuv500_device_group,
  2560. .def_domain_type = qcom_def_domain_type,
  2561. };
  2562. static const struct arm_smmu_impl qsmmuv500_adreno_impl = {
  2563. .init_context = qcom_adreno_smmu_init_context,
  2564. .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
  2565. .cfg_probe = qsmmuv500_cfg_probe,
  2566. .init_context_bank = qsmmuv500_init_cb,
  2567. .iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
  2568. .tlb_sync_timeout = qsmmuv500_tlb_sync_timeout,
  2569. .device_remove = qsmmuv500_device_remove,
  2570. .device_group = qsmmuv500_device_group,
  2571. .def_domain_type = qcom_def_domain_type,
  2572. };
  2573. static const struct arm_smmu_impl qsmmuv2_impl = {
  2574. .init_context_bank = qsmmuv2_init_cb,
  2575. .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
  2576. .tlb_sync_timeout = qsmmuv2_tlb_sync_timeout,
  2577. .reset = qsmmuv2_device_reset,
  2578. .def_domain_type = qcom_def_domain_type,
  2579. };
  2580. static const struct arm_smmu_impl qsmmuv2_adreno_impl = {
  2581. .init_context = qcom_adreno_smmu_init_context,
  2582. .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
  2583. .init_context_bank = qsmmuv2_init_cb,
  2584. .iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
  2585. .tlb_sync_timeout = qsmmuv2_tlb_sync_timeout,
  2586. .reset = qsmmuv2_device_reset,
  2587. .def_domain_type = qcom_def_domain_type,
  2588. };
  2589. /* We only have access to arm-architected registers */
  2590. static const struct arm_smmu_impl qsmmuv500_virt_impl = {
  2591. .cfg_probe = qsmmuv500_cfg_probe,
  2592. .init_context_bank = qsmmuv500_init_cb,
  2593. .device_group = qsmmuv500_device_group,
  2594. .def_domain_type = qcom_def_domain_type,
  2595. };
  2596. struct arm_smmu_device *qsmmuv500_create(struct arm_smmu_device *smmu,
  2597. const struct arm_smmu_impl *impl)
  2598. {
  2599. struct device *dev = smmu->dev;
  2600. struct qsmmuv500_archdata *data;
  2601. int ret;
  2602. #if IS_ENABLED(CONFIG_ARM_SMMU_TESTBUS)
  2603. struct platform_device *pdev;
  2604. #endif
  2605. /*
  2606. * devm_krealloc() invokes devm_kmalloc(), so we pass __GFP_ZERO
  2607. * to ensure that fields after smmu are initialized, even if we don't
  2608. * initialize them (e.g. ACTLR related fields).
  2609. */
  2610. data = devm_krealloc(dev, smmu, sizeof(*data), GFP_KERNEL | __GFP_ZERO);
  2611. if (!data)
  2612. return ERR_PTR(-ENOMEM);
  2613. INIT_LIST_HEAD(&data->tbus);
  2614. spin_lock_init(&data->atos_lock);
  2615. INIT_WORK(&data->outstanding_tnx_work,
  2616. qsmmuv500_log_outstanding_transactions);
  2617. data->smmu.impl = impl;
  2618. #if IS_ENABLED(CONFIG_ARM_SMMU_TESTBUS)
  2619. pdev = to_platform_device(dev);
  2620. data->tcu_base = devm_platform_ioremap_resource_byname(pdev, "tcu-base");
  2621. if (IS_ERR(data->tcu_base)) {
  2622. dev_err(dev, "Unable to get the tcu-base\n");
  2623. return ERR_PTR(-EINVAL);
  2624. }
  2625. #endif
  2626. qsmmuv500_tcu_testbus_init(&data->smmu);
  2627. ret = qsmmuv500_read_actlr_tbl(data);
  2628. if (ret)
  2629. return ERR_PTR(ret);
  2630. ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
  2631. if (ret)
  2632. return ERR_PTR(ret);
  2633. /* Attempt to register child devices */
  2634. ret = device_for_each_child(dev, data, qsmmuv500_tbu_register);
  2635. if (ret)
  2636. return ERR_PTR(-EPROBE_DEFER);
  2637. return &data->smmu;
  2638. }
  2639. static struct arm_smmu_device *qsmmuv500_virt_create(struct arm_smmu_device *smmu,
  2640. const struct arm_smmu_impl *impl)
  2641. {
  2642. struct device *dev = smmu->dev;
  2643. struct qsmmuv500_archdata *data;
  2644. int ret;
  2645. data = devm_krealloc(dev, smmu, sizeof(*data), GFP_KERNEL | __GFP_ZERO);
  2646. if (!data)
  2647. return ERR_PTR(-ENOMEM);
  2648. INIT_LIST_HEAD(&data->tbus);
  2649. spin_lock_init(&data->atos_lock);
  2650. INIT_WORK(&data->outstanding_tnx_work,
  2651. qsmmuv500_log_outstanding_transactions);
  2652. data->smmu.impl = impl;
  2653. ret = qsmmuv500_read_actlr_tbl(data);
  2654. if (ret)
  2655. return ERR_PTR(ret);
  2656. return &data->smmu;
  2657. }
  2658. struct arm_smmu_device *qsmmuv500_impl_init(struct arm_smmu_device *smmu)
  2659. {
  2660. if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu"))
  2661. return qsmmuv500_create(smmu, &qsmmuv500_adreno_impl);
  2662. if (of_device_is_compatible(smmu->dev->of_node, "qcom,virt-smmu"))
  2663. return qsmmuv500_virt_create(smmu, &qsmmuv500_virt_impl);
  2664. return qsmmuv500_create(smmu, &qsmmuv500_impl);
  2665. }
  2666. static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
  2667. .init_context = qcom_adreno_smmu_init_context,
  2668. .def_domain_type = qcom_smmu_def_domain_type,
  2669. .reset = qcom_smmu500_reset,
  2670. .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
  2671. .write_sctlr = qcom_adreno_smmu_write_sctlr,
  2672. .tlb_sync = qcom_smmu_tlb_sync,
  2673. };
  2674. static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
  2675. const struct arm_smmu_impl *impl)
  2676. {
  2677. struct qcom_smmu *qsmmu;
  2678. /* Check to make sure qcom_scm has finished probing */
  2679. if (!qcom_scm_is_available())
  2680. return ERR_PTR(-EPROBE_DEFER);
  2681. qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
  2682. if (!qsmmu)
  2683. return ERR_PTR(-ENOMEM);
  2684. qsmmu->smmu.impl = impl;
  2685. qsmmu->cfg = qcom_smmu_impl_data(smmu);
  2686. return &qsmmu->smmu;
  2687. }
  2688. static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
  2689. { .compatible = "qcom,msm8998-smmu-v2" },
  2690. { .compatible = "qcom,qcm2290-smmu-500" },
  2691. { .compatible = "qcom,sc7180-smmu-500" },
  2692. { .compatible = "qcom,sc7280-smmu-500" },
  2693. { .compatible = "qcom,sc8180x-smmu-500" },
  2694. { .compatible = "qcom,sc8280xp-smmu-500" },
  2695. { .compatible = "qcom,sdm630-smmu-v2" },
  2696. { .compatible = "qcom,sdm845-smmu-500" },
  2697. { .compatible = "qcom,sm6125-smmu-500" },
  2698. { .compatible = "qcom,sm6350-smmu-500" },
  2699. { .compatible = "qcom,sm6375-smmu-500" },
  2700. { .compatible = "qcom,sm8150-smmu-500" },
  2701. { .compatible = "qcom,sm8250-smmu-500" },
  2702. { .compatible = "qcom,sm8350-smmu-500" },
  2703. { .compatible = "qcom,sm8450-smmu-500" },
  2704. { }
  2705. };
  2706. #ifdef CONFIG_ACPI
  2707. static struct acpi_platform_list qcom_acpi_platlist[] = {
  2708. { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
  2709. { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
  2710. { }
  2711. };
  2712. #endif
  2713. struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
  2714. {
  2715. const struct device_node *np = smmu->dev->of_node;
  2716. #ifdef CONFIG_ACPI
  2717. if (np == NULL) {
  2718. /* Match platform for ACPI boot */
  2719. if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
  2720. return qcom_smmu_create(smmu, &qcom_smmu_impl);
  2721. }
  2722. #endif
  2723. /*
  2724. * Do not change this order of implementation, i.e., first adreno
  2725. * smmu impl and then apss smmu since we can have both implementing
  2726. * arm,mmu-500 in which case we will miss setting adreno smmu specific
  2727. * features if the order is changed.
  2728. */
  2729. if (of_device_is_compatible(np, "qcom,adreno-smmu"))
  2730. return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
  2731. if (of_match_node(qcom_smmu_impl_of_match, np))
  2732. return qcom_smmu_create(smmu, &qcom_smmu_impl);
  2733. return smmu;
  2734. }
  2735. struct arm_smmu_device *qsmmuv2_impl_init(struct arm_smmu_device *smmu)
  2736. {
  2737. struct device *dev = smmu->dev;
  2738. struct qsmmuv2_archdata *data;
  2739. struct platform_device *pdev;
  2740. int ret;
  2741. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  2742. if (!data)
  2743. return ERR_PTR(-ENOMEM);
  2744. pdev = to_platform_device(dev);
  2745. spin_lock_init(&data->atos_lock);
  2746. data->smmu = *smmu;
  2747. if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu"))
  2748. data->smmu.impl = &qsmmuv2_adreno_impl;
  2749. else
  2750. data->smmu.impl = &qsmmuv2_impl;
  2751. ret = arm_smmu_parse_impl_def_registers(&data->smmu);
  2752. if (ret)
  2753. return ERR_PTR(ret);
  2754. return &data->smmu;
  2755. }