arm-smmu.c 111 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * IOMMU API for ARM architected SMMU implementations.
  4. *
  5. * Copyright (C) 2013 ARM Limited
  6. *
  7. * Author: Will Deacon <[email protected]>
  8. *
  9. * This driver currently supports:
  10. * - SMMUv1 and v2 implementations
  11. * - Stream-matching and stream-indexing
  12. * - v7/v8 long-descriptor format
  13. * - Non-secure access to the SMMU
  14. * - Context fault reporting
  15. * - Extended Stream ID (16 bit)
  16. *
  17. * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  18. */
  19. #define pr_fmt(fmt) "arm-smmu: " fmt
  20. #include <linux/acpi.h>
  21. #include <linux/acpi_iort.h>
  22. #include <linux/bitfield.h>
  23. #include <linux/delay.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/dma-mapping-fast.h>
  26. #include <linux/dma-map-ops.h>
  27. #include <linux/err.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/io.h>
  30. #include <linux/iopoll.h>
  31. #include <linux/module.h>
  32. #include <linux/of.h>
  33. #include <linux/of_address.h>
  34. #include <linux/of_device.h>
  35. #include <linux/pci.h>
  36. #include <linux/platform_device.h>
  37. #include <linux/pm_runtime.h>
  38. #include <linux/ratelimit.h>
  39. #include <linux/slab.h>
  40. #include <soc/qcom/secure_buffer.h>
  41. #include <linux/irq.h>
  42. #include <linux/wait.h>
  43. #include <trace/hooks/iommu.h>
  44. #include <linux/fsl/mc.h>
  45. #include "arm-smmu.h"
  46. #include "../../dma-iommu.h"
  47. #include "../../iommu-logger.h"
  48. #include "../../qcom-dma-iommu-generic.h"
  49. #include "../../qcom-io-pgtable-alloc.h"
  50. #include <linux/qcom-iommu-util.h>
  51. #include <linux/suspend.h>
  52. #define CREATE_TRACE_POINTS
  53. #include "arm-smmu-trace.h"
  54. /*
  55. * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
  56. * global register space are still, in fact, using a hypervisor to mediate it
  57. * by trapping and emulating register accesses. Sadly, some deployed versions
  58. * of said trapping code have bugs wherein they go horribly wrong for stores
  59. * using r31 (i.e. XZR/WZR) as the source register.
  60. */
  61. #define QCOM_DUMMY_VAL -1
  62. #define MSI_IOVA_BASE 0x8000000
  63. #define MSI_IOVA_LENGTH 0x100000
  64. static int force_stage;
  65. module_param(force_stage, int, S_IRUGO);
  66. MODULE_PARM_DESC(force_stage,
  67. "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
  68. static bool disable_bypass =
  69. IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
  70. module_param(disable_bypass, bool, S_IRUGO);
  71. MODULE_PARM_DESC(disable_bypass,
  72. "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
  73. #define s2cr_init_val (struct arm_smmu_s2cr){ \
  74. .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
  75. }
  76. static bool using_legacy_binding, using_generic_binding;
  77. struct arm_smmu_option_prop {
  78. u32 opt;
  79. const char *prop;
  80. };
  81. static struct arm_smmu_option_prop arm_smmu_options[] = {
  82. { ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
  83. { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
  84. { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
  85. { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
  86. { ARM_SMMU_OPT_CONTEXT_FAULT_RETRY, "qcom,context-fault-retry" },
  87. { ARM_SMMU_OPT_MULTI_MATCH_HANDOFF_SMR, "qcom,multi-match-handoff-smr" },
  88. { ARM_SMMU_OPT_IGNORE_NUMPAGENDXB, "qcom,ignore-numpagendxb" },
  89. { 0, NULL},
  90. };
  91. static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
  92. dma_addr_t iova);
  93. static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
  94. struct qcom_iommu_atos_txn *txn);
  95. static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
  96. static int arm_smmu_setup_default_domain(struct device *dev,
  97. struct iommu_domain *domain);
  98. static void __arm_smmu_flush_iotlb_all(struct iommu_domain *domain, bool force);
  99. static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
  100. {
  101. if (pm_runtime_enabled(smmu->dev))
  102. return pm_runtime_resume_and_get(smmu->dev);
  103. return 0;
  104. }
  105. static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
  106. {
  107. if (pm_runtime_enabled(smmu->dev)) {
  108. pm_runtime_mark_last_busy(smmu->dev);
  109. pm_runtime_put_autosuspend(smmu->dev);
  110. }
  111. }
  112. static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
  113. {
  114. return container_of(dom, struct arm_smmu_domain, domain);
  115. }
  116. static struct arm_smmu_domain *cb_cfg_to_smmu_domain(struct arm_smmu_cfg *cfg)
  117. {
  118. return container_of(cfg, struct arm_smmu_domain, cfg);
  119. }
  120. static void parse_driver_options(struct arm_smmu_device *smmu)
  121. {
  122. int i = 0;
  123. do {
  124. if (of_property_read_bool(smmu->dev->of_node,
  125. arm_smmu_options[i].prop)) {
  126. smmu->options |= arm_smmu_options[i].opt;
  127. dev_dbg(smmu->dev, "option %s\n",
  128. arm_smmu_options[i].prop);
  129. }
  130. } while (arm_smmu_options[++i].opt);
  131. }
  132. static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
  133. {
  134. if (smmu_domain->force_coherent_walk)
  135. return true;
  136. else if (smmu_domain->smmu && smmu_domain->smmu->dev)
  137. return dev_is_dma_coherent(smmu_domain->smmu->dev);
  138. return false;
  139. }
  140. static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain)
  141. {
  142. return (smmu_domain->secure_vmid != VMID_INVAL);
  143. }
  144. #ifdef CONFIG_ARM_SMMU_SELFTEST
  145. static int selftest;
  146. module_param_named(selftest, selftest, int, 0644);
  147. static int irq_count;
  148. struct arm_smmu_cf_selftest_data {
  149. struct arm_smmu_device *smmu;
  150. int cbndx;
  151. };
  152. static DECLARE_WAIT_QUEUE_HEAD(wait_int);
  153. static irqreturn_t arm_smmu_cf_selftest(int irq, void *data)
  154. {
  155. u32 fsr;
  156. struct irq_data *irq_data = irq_get_irq_data(irq);
  157. struct arm_smmu_cf_selftest_data *cb_data = data;
  158. struct arm_smmu_device *smmu = cb_data->smmu;
  159. int idx = cb_data->cbndx;
  160. unsigned long hwirq = ULONG_MAX;
  161. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  162. irq_count++;
  163. if (irq_data)
  164. hwirq = irq_data->hwirq;
  165. pr_info("Interrupt (irq:%d hwirq:%ld) received, fsr:0x%x\n",
  166. irq, hwirq, fsr);
  167. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  168. wake_up(&wait_int);
  169. return IRQ_HANDLED;
  170. }
  171. static void arm_smmu_interrupt_selftest(struct arm_smmu_device *smmu)
  172. {
  173. int cb;
  174. int cb_count = 0;
  175. struct arm_smmu_cf_selftest_data *cb_data;
  176. if (!selftest)
  177. return;
  178. cb = smmu->num_s2_context_banks;
  179. if (smmu->version < ARM_SMMU_V2)
  180. return;
  181. cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
  182. if (!cb_data)
  183. return;
  184. cb_data->smmu = smmu;
  185. for_each_clear_bit_from(cb, smmu->context_map,
  186. smmu->num_context_banks) {
  187. int irq;
  188. int ret;
  189. u32 reg;
  190. u32 reg_orig;
  191. int irq_cnt;
  192. irq = smmu->irqs[cb];
  193. cb_data->cbndx = cb;
  194. ret = request_threaded_irq(irq, NULL, arm_smmu_cf_selftest,
  195. IRQF_ONESHOT | IRQF_SHARED,
  196. "arm-smmu-context-fault", cb_data);
  197. if (ret < 0) {
  198. dev_err(smmu->dev,
  199. "Failed to request cntx IRQ %d (%u)\n",
  200. cb, irq);
  201. continue;
  202. }
  203. cb_count++;
  204. irq_cnt = irq_count;
  205. reg_orig = arm_smmu_cb_read(smmu, cb, ARM_SMMU_CB_SCTLR);
  206. reg = reg_orig | ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE;
  207. arm_smmu_cb_write(smmu, cb, ARM_SMMU_CB_SCTLR, reg);
  208. dev_info(smmu->dev, "Testing cntx %d irq %d\n", cb, irq);
  209. /* Make sure ARM_SMMU_CB_SCTLR is configured */
  210. wmb();
  211. arm_smmu_cb_write(smmu, cb, ARM_SMMU_CB_FSRRESTORE,
  212. ARM_SMMU_FSR_TF);
  213. if (!wait_event_timeout(wait_int, (irq_count > irq_cnt),
  214. msecs_to_jiffies(1000))) {
  215. u32 fsr;
  216. fsr = arm_smmu_cb_read(smmu, cb, ARM_SMMU_CB_FSR);
  217. dev_info(smmu->dev, "timeout cb:%d, irq:%d, fsr:0x%x\n",
  218. cb, irq_cnt, fsr);
  219. if (!fsr)
  220. dev_err(smmu->dev, "SCTLR = 0x%08x\n",
  221. arm_smmu_cb_read(smmu, cb,
  222. ARM_SMMU_CB_SCTLR));
  223. }
  224. /* Make sure ARM_SMMU_CB_FSRRESTORE is written to */
  225. wmb();
  226. arm_smmu_cb_write(smmu, cb, ARM_SMMU_CB_SCTLR, reg_orig);
  227. free_irq(irq, cb_data);
  228. }
  229. kfree(cb_data);
  230. dev_info(smmu->dev,
  231. "Interrupt selftest completed...\n");
  232. dev_info(smmu->dev,
  233. "Tested %d contexts, received %d interrupts\n",
  234. cb_count, irq_count);
  235. WARN_ON(cb_count != irq_count);
  236. irq_count = 0;
  237. }
  238. #else
  239. static void arm_smmu_interrupt_selftest(struct arm_smmu_device *smmu)
  240. {
  241. }
  242. #endif
  243. static struct platform_driver arm_smmu_driver;
  244. static struct qcom_iommu_ops arm_smmu_ops;
  245. #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
  246. static struct device_node *dev_get_dev_node(struct device *dev)
  247. {
  248. if (dev_is_pci(dev)) {
  249. struct pci_bus *bus = to_pci_dev(dev)->bus;
  250. while (!pci_is_root_bus(bus))
  251. bus = bus->parent;
  252. return of_node_get(bus->bridge->parent->of_node);
  253. }
  254. return of_node_get(dev->of_node);
  255. }
  256. static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
  257. {
  258. *((__be32 *)data) = cpu_to_be32(alias);
  259. return 0; /* Continue walking */
  260. }
  261. static int __find_legacy_master_phandle(struct device *dev, void *data)
  262. {
  263. struct of_phandle_iterator *it = *(void **)data;
  264. struct device_node *np = it->node;
  265. int err;
  266. of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
  267. "#stream-id-cells", -1)
  268. if (it->node == np) {
  269. *(void **)data = dev;
  270. return 1;
  271. }
  272. it->node = np;
  273. return err == -ENOENT ? 0 : err;
  274. }
  275. static int arm_smmu_register_legacy_master(struct device *dev,
  276. struct arm_smmu_device **smmu)
  277. {
  278. struct device *smmu_dev;
  279. struct device_node *np;
  280. struct of_phandle_iterator it;
  281. void *data = &it;
  282. u32 *sids;
  283. __be32 pci_sid;
  284. int err;
  285. np = dev_get_dev_node(dev);
  286. if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
  287. of_node_put(np);
  288. return -ENODEV;
  289. }
  290. it.node = np;
  291. err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
  292. __find_legacy_master_phandle);
  293. smmu_dev = data;
  294. of_node_put(np);
  295. if (err == 0)
  296. return -ENODEV;
  297. if (err < 0)
  298. return err;
  299. if (dev_is_pci(dev)) {
  300. /* "mmu-masters" assumes Stream ID == Requester ID */
  301. pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
  302. &pci_sid);
  303. it.cur = &pci_sid;
  304. it.cur_count = 1;
  305. }
  306. err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
  307. &arm_smmu_ops.iommu_ops);
  308. if (err)
  309. return err;
  310. sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
  311. if (!sids)
  312. return -ENOMEM;
  313. *smmu = dev_get_drvdata(smmu_dev);
  314. of_phandle_iterator_args(&it, sids, it.cur_count);
  315. err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
  316. kfree(sids);
  317. return err;
  318. }
  319. #else
  320. static int arm_smmu_register_legacy_master(struct device *dev,
  321. struct arm_smmu_device **smmu)
  322. {
  323. return -ENODEV;
  324. }
  325. #endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
  326. static int __arm_smmu_alloc_cb(unsigned long *map, int start, int end,
  327. struct device *dev,
  328. struct arm_smmu_domain *smmu_domain)
  329. {
  330. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  331. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  332. struct arm_smmu_device *smmu = cfg->smmu;
  333. int idx;
  334. int i;
  335. for_each_cfg_sme(cfg, fwspec, i, idx) {
  336. if (smmu->s2crs[idx].pinned)
  337. return smmu->s2crs[idx].cbndx;
  338. }
  339. return __arm_smmu_alloc_bitmap(map, start, end);
  340. }
  341. static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
  342. {
  343. clear_bit(idx, map);
  344. }
  345. /* Wait for any pending TLB invalidations to complete */
  346. static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
  347. int sync, int status)
  348. {
  349. unsigned int inc, delay;
  350. u32 reg;
  351. /*
  352. * Allowing an unbounded number of sync requests to be submitted when a
  353. * TBU is not processing sync requests can cause a TBU's command queue
  354. * to fill up. Once the queue is full, subsequent sync requests can
  355. * stall the CPU indefinitely. Avoid this by gating subsequent sync
  356. * requests after the first sync timeout on an SMMU.
  357. */
  358. if (IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG) &&
  359. test_bit(0, &smmu->sync_timed_out))
  360. return -EINVAL;
  361. arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
  362. for (delay = 1, inc = 1; delay < TLB_LOOP_TIMEOUT; delay += inc) {
  363. reg = arm_smmu_readl(smmu, page, status);
  364. if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
  365. return 0;
  366. cpu_relax();
  367. udelay(inc);
  368. if (inc < TLB_LOOP_INC_MAX)
  369. inc *= 2;
  370. }
  371. if (IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG) &&
  372. test_and_set_bit_lock(0, &smmu->sync_timed_out))
  373. goto out;
  374. trace_tlbsync_timeout(smmu->dev);
  375. if (smmu->impl && smmu->impl->tlb_sync_timeout)
  376. smmu->impl->tlb_sync_timeout(smmu);
  377. out:
  378. return -EINVAL;
  379. }
  380. static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
  381. {
  382. unsigned long flags;
  383. spin_lock_irqsave(&smmu->global_sync_lock, flags);
  384. if (__arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
  385. ARM_SMMU_GR0_sTLBGSTATUS))
  386. dev_err_ratelimited(smmu->dev,
  387. "TLB global sync failed!\n");
  388. spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
  389. }
  390. static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
  391. {
  392. struct arm_smmu_device *smmu = smmu_domain->smmu;
  393. unsigned long flags;
  394. spin_lock_irqsave(&smmu_domain->sync_lock, flags);
  395. if (__arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
  396. ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS))
  397. dev_err_ratelimited(smmu->dev,
  398. "TLB sync on cb%d failed for device %s\n",
  399. smmu_domain->cfg.cbndx,
  400. dev_name(smmu_domain->dev));
  401. spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
  402. }
  403. static void arm_smmu_tlb_inv_context_s1(void *cookie)
  404. {
  405. struct arm_smmu_domain *smmu_domain = cookie;
  406. /*
  407. * The TLBI write may be relaxed, so ensure that PTEs cleared by the
  408. * current CPU are visible beforehand.
  409. */
  410. wmb();
  411. trace_tlbi_start(smmu_domain);
  412. arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
  413. ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
  414. arm_smmu_tlb_sync_context(smmu_domain);
  415. trace_tlbi_end(smmu_domain);
  416. }
  417. static void arm_smmu_tlb_inv_context_s2(void *cookie)
  418. {
  419. struct arm_smmu_domain *smmu_domain = cookie;
  420. struct arm_smmu_device *smmu = smmu_domain->smmu;
  421. /* See above */
  422. wmb();
  423. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
  424. arm_smmu_tlb_sync_global(smmu);
  425. }
  426. static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
  427. size_t granule, void *cookie, int reg)
  428. {
  429. struct arm_smmu_domain *smmu_domain = cookie;
  430. struct arm_smmu_device *smmu = smmu_domain->smmu;
  431. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  432. int idx = cfg->cbndx;
  433. if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
  434. wmb();
  435. if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
  436. iova = (iova >> 12) << 12;
  437. iova |= cfg->asid;
  438. do {
  439. arm_smmu_cb_write(smmu, idx, reg, iova);
  440. iova += granule;
  441. } while (size -= granule);
  442. } else {
  443. iova >>= 12;
  444. iova |= (u64)cfg->asid << 48;
  445. do {
  446. arm_smmu_cb_writeq(smmu, idx, reg, iova);
  447. iova += granule >> 12;
  448. } while (size -= granule);
  449. }
  450. }
  451. static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
  452. size_t granule, void *cookie, int reg)
  453. {
  454. struct arm_smmu_domain *smmu_domain = cookie;
  455. struct arm_smmu_device *smmu = smmu_domain->smmu;
  456. int idx = smmu_domain->cfg.cbndx;
  457. if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
  458. wmb();
  459. iova >>= 12;
  460. do {
  461. if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
  462. arm_smmu_cb_writeq(smmu, idx, reg, iova);
  463. else
  464. arm_smmu_cb_write(smmu, idx, reg, iova);
  465. iova += granule >> 12;
  466. } while (size -= granule);
  467. }
  468. static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
  469. size_t granule, void *cookie)
  470. {
  471. struct arm_smmu_domain *smmu_domain = cookie;
  472. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  473. if (cfg->flush_walk_prefer_tlbiasid) {
  474. arm_smmu_tlb_inv_context_s1(cookie);
  475. } else {
  476. arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
  477. ARM_SMMU_CB_S1_TLBIVA);
  478. arm_smmu_tlb_sync_context(cookie);
  479. }
  480. }
  481. static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
  482. unsigned long iova, size_t granule,
  483. void *cookie)
  484. {
  485. arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
  486. ARM_SMMU_CB_S1_TLBIVAL);
  487. }
  488. static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
  489. size_t granule, void *cookie)
  490. {
  491. arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
  492. ARM_SMMU_CB_S2_TLBIIPAS2);
  493. arm_smmu_tlb_sync_context(cookie);
  494. }
  495. static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
  496. unsigned long iova, size_t granule,
  497. void *cookie)
  498. {
  499. arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
  500. ARM_SMMU_CB_S2_TLBIIPAS2L);
  501. }
  502. static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
  503. size_t granule, void *cookie)
  504. {
  505. arm_smmu_tlb_inv_context_s2(cookie);
  506. }
  507. /*
  508. * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
  509. * almost negligible, but the benefit of getting the first one in as far ahead
  510. * of the sync as possible is significant, hence we don't just make this a
  511. * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
  512. * think.
  513. */
  514. static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
  515. unsigned long iova, size_t granule,
  516. void *cookie)
  517. {
  518. struct arm_smmu_domain *smmu_domain = cookie;
  519. struct arm_smmu_device *smmu = smmu_domain->smmu;
  520. if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
  521. wmb();
  522. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
  523. }
  524. static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
  525. .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
  526. .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
  527. .tlb_add_page = arm_smmu_tlb_add_page_s1,
  528. };
  529. static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
  530. .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
  531. .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
  532. .tlb_add_page = arm_smmu_tlb_add_page_s2,
  533. };
  534. static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
  535. .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
  536. .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
  537. .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
  538. };
  539. static void print_fault_regs(struct arm_smmu_domain *smmu_domain,
  540. struct arm_smmu_device *smmu, int idx)
  541. {
  542. u32 fsr, fsynr0, fsynr1, cbfrsynra;
  543. unsigned long iova;
  544. struct arm_smmu_cfg *cfg = smmu->cbs[idx].cfg;
  545. bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
  546. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  547. fsynr0 = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
  548. fsynr1 = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR1);
  549. iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
  550. cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
  551. dev_err(smmu->dev, "Unhandled arm-smmu context fault from %s!\n",
  552. dev_name(smmu_domain->dev));
  553. dev_err(smmu->dev, "FAR = 0x%016lx\n",
  554. iova);
  555. dev_err(smmu->dev, "PAR = 0x%pK\n",
  556. (void *) arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR));
  557. dev_err(smmu->dev,
  558. "FSR = 0x%08x [%s%s%s%s%s%s%s%s%s%s]\n",
  559. fsr,
  560. (fsr & ARM_SMMU_FSR_TF) ? (fsynr0 & ARM_SMMU_FSYNR0_WNR ?
  561. "TF W " : "TF R ") : "",
  562. (fsr & ARM_SMMU_FSR_AFF) ? "AFF " : "",
  563. (fsr & ARM_SMMU_FSR_PF) ? (fsynr0 & ARM_SMMU_FSYNR0_WNR ?
  564. "PF W " : "PF R ") : "",
  565. (fsr & ARM_SMMU_FSR_EF) ? "EF " : "",
  566. (fsr & ARM_SMMU_FSR_TLBMCF) ? "TLBMCF " : "",
  567. (fsr & ARM_SMMU_FSR_TLBLKF) ? "TLBLKF " : "",
  568. (fsr & ARM_SMMU_FSR_ASF) ? "ASF " : "",
  569. (fsr & ARM_SMMU_FSR_UUT) ? "UUT " : "",
  570. (fsr & ARM_SMMU_FSR_SS) ? "SS " : "",
  571. (fsr & ARM_SMMU_FSR_MULTI) ? "MULTI " : "");
  572. dev_err(smmu->dev, "FSYNR0 = 0x%x\n", fsynr0);
  573. dev_err(smmu->dev, "FSYNR1 = 0x%x\n", fsynr1);
  574. dev_err(smmu->dev, "context bank# = 0x%x\n", idx);
  575. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
  576. dev_err(smmu->dev, "TTBR0 = 0x%pK\n",
  577. (void *) (unsigned long)
  578. arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_TTBR0));
  579. dev_err(smmu->dev, "TTBR1 = 0x%pK\n",
  580. (void *) (unsigned long)
  581. arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_TTBR1));
  582. } else {
  583. dev_err(smmu->dev, "TTBR0 = 0x%pK\n",
  584. (void *) arm_smmu_cb_readq(smmu, idx,
  585. ARM_SMMU_CB_TTBR0));
  586. if (stage1)
  587. dev_err(smmu->dev, "TTBR1 = 0x%pK\n",
  588. (void *) arm_smmu_cb_readq(smmu, idx,
  589. ARM_SMMU_CB_TTBR1));
  590. }
  591. dev_err(smmu->dev, "SCTLR = 0x%08x ACTLR = 0x%08x\n",
  592. arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR),
  593. arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_ACTLR));
  594. dev_err(smmu->dev, "CBAR = 0x%08x\n",
  595. arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBAR(idx)));
  596. dev_err(smmu->dev, "MAIR0 = 0x%08x MAIR1 = 0x%08x\n",
  597. arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_S1_MAIR0),
  598. arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_S1_MAIR1));
  599. dev_err(smmu->dev, "SID = 0x%x\n",
  600. cbfrsynra & CBFRSYNRA_SID_MASK);
  601. dev_err(smmu->dev, "Client info: BID=0x%lx, PID=0x%lx, MID=0x%lx\n",
  602. FIELD_GET(ARM_SMMU_FSYNR1_BID, fsynr1),
  603. FIELD_GET(ARM_SMMU_FSYNR1_PID, fsynr1),
  604. FIELD_GET(ARM_SMMU_FSYNR1_MID, fsynr1));
  605. }
  606. /*
  607. * Iommu HW has generated a fault. If HW and SW states are in sync,
  608. * then a SW page table walk should yield 0.
  609. *
  610. * WARNING!!! This check is racy!!!!
  611. * For a faulting address x, the dma layer mapping may map address x
  612. * into the iommu page table in parallel to the fault handler running.
  613. * This is frequently seen due to dma-iommu's address reuse policy.
  614. * Thus, arm_smmu_iova_to_phys() returning nonzero is not necessarily
  615. * indicative of an issue.
  616. */
  617. static void arm_smmu_verify_fault(struct arm_smmu_domain *smmu_domain,
  618. struct arm_smmu_device *smmu, int idx)
  619. {
  620. u32 fsynr, cbfrsynra;
  621. unsigned long iova;
  622. struct iommu_domain *domain = &smmu_domain->domain;
  623. phys_addr_t phys_soft;
  624. phys_addr_t phys_stimu, phys_hard_priv, phys_stimu_post_tlbiall;
  625. unsigned long flags = 0;
  626. struct qcom_iommu_atos_txn txn = {0};
  627. fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
  628. iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
  629. cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
  630. phys_soft = arm_smmu_iova_to_phys(domain, iova);
  631. dev_err(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
  632. /* Get the transaction type */
  633. if (fsynr & ARM_SMMU_FSYNR0_WNR)
  634. flags |= IOMMU_TRANS_WRITE;
  635. if (fsynr & ARM_SMMU_FSYNR0_PNU)
  636. flags |= IOMMU_TRANS_PRIV;
  637. if (fsynr & ARM_SMMU_FSYNR0_IND)
  638. flags |= IOMMU_TRANS_INST;
  639. txn.addr = iova;
  640. txn.flags = flags;
  641. txn.id = cbfrsynra & CBFRSYNRA_SID_MASK;
  642. /* Now replicate the faulty transaction */
  643. phys_stimu = arm_smmu_iova_to_phys_hard(domain, &txn);
  644. /*
  645. * If the replicated transaction fails, it could be due to legitimate
  646. * unmapped access (translation fault) or stale TLB with insufficient
  647. * privileges (permission fault). Try ATOS operation with full access
  648. * privileges to rule out stale entry with insufficient privileges case.
  649. */
  650. if (!phys_stimu) {
  651. txn.flags = QCOM_IOMMU_ATOS_TRANS_DEFAULT |
  652. QCOM_IOMMU_ATOS_TRANS_PRIV;
  653. phys_hard_priv = arm_smmu_iova_to_phys_hard(domain, &txn);
  654. }
  655. /* Now replicate the faulty transaction post tlbiall */
  656. iommu_flush_iotlb_all(domain);
  657. txn.flags = flags;
  658. phys_stimu_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, &txn);
  659. if (!phys_stimu && phys_hard_priv) {
  660. dev_err(smmu->dev,
  661. "ATOS results differed across access privileges...\n"
  662. "Before: %pa After: %pa\n",
  663. &phys_stimu, &phys_hard_priv);
  664. }
  665. if (phys_stimu != phys_stimu_post_tlbiall) {
  666. dev_err(smmu->dev,
  667. "ATOS results differed across TLBIALL...\n"
  668. "Before: %pa After: %pa\n", &phys_stimu,
  669. &phys_stimu_post_tlbiall);
  670. }
  671. dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
  672. phys_stimu ? &phys_stimu : &phys_stimu_post_tlbiall);
  673. }
  674. static int report_iommu_fault_helper(struct arm_smmu_domain *smmu_domain,
  675. struct arm_smmu_device *smmu, int idx)
  676. {
  677. u32 fsr, fsynr;
  678. unsigned long iova;
  679. int flags;
  680. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  681. fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
  682. iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
  683. flags = fsynr & ARM_SMMU_FSYNR0_WNR ?
  684. IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
  685. if (fsr & ARM_SMMU_FSR_TF)
  686. flags |= IOMMU_FAULT_TRANSLATION;
  687. if (fsr & ARM_SMMU_FSR_PF)
  688. flags |= IOMMU_FAULT_PERMISSION;
  689. if (fsr & ARM_SMMU_FSR_EF)
  690. flags |= IOMMU_FAULT_EXTERNAL;
  691. if (fsr & ARM_SMMU_FSR_SS)
  692. flags |= IOMMU_FAULT_TRANSACTION_STALLED;
  693. return report_iommu_fault(&smmu_domain->domain,
  694. smmu->dev, iova, flags);
  695. }
  696. static int arm_smmu_get_fault_ids(struct iommu_domain *domain,
  697. struct qcom_iommu_fault_ids *f_ids)
  698. {
  699. struct arm_smmu_domain *smmu_domain;
  700. struct arm_smmu_device *smmu;
  701. u32 fsr, fsynr1;
  702. int idx, ret;
  703. if (!domain || !f_ids)
  704. return -EINVAL;
  705. smmu_domain = to_smmu_domain(domain);
  706. smmu = smmu_domain->smmu;
  707. idx = smmu_domain->cfg.cbndx;
  708. ret = arm_smmu_rpm_get(smmu);
  709. if (ret < 0)
  710. return ret;
  711. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  712. if (!(fsr & ARM_SMMU_FSR_FAULT)) {
  713. arm_smmu_rpm_put(smmu);
  714. return -EINVAL;
  715. }
  716. fsynr1 = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR1);
  717. arm_smmu_rpm_put(smmu);
  718. f_ids->bid = FIELD_GET(ARM_SMMU_FSYNR1_BID, fsynr1);
  719. f_ids->pid = FIELD_GET(ARM_SMMU_FSYNR1_PID, fsynr1);
  720. f_ids->mid = FIELD_GET(ARM_SMMU_FSYNR1_MID, fsynr1);
  721. return 0;
  722. }
  723. #ifdef CONFIG_ARM_SMMU_CONTEXT_FAULT_RETRY
  724. /*
  725. * Retry faulting address after tlb invalidate.
  726. * Applicable to: Waipio
  727. */
  728. static irqreturn_t arm_smmu_context_fault_retry(struct arm_smmu_domain *smmu_domain)
  729. {
  730. struct arm_smmu_device *smmu = smmu_domain->smmu;
  731. int idx = smmu_domain->cfg.cbndx;
  732. u64 iova;
  733. u32 fsr;
  734. if (!(smmu->options & ARM_SMMU_OPT_CONTEXT_FAULT_RETRY) ||
  735. (smmu_domain->fault_model.no_stall))
  736. return IRQ_NONE;
  737. iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
  738. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  739. if (iova != smmu_domain->prev_fault_address ||
  740. !smmu_domain->fault_retry_counter) {
  741. smmu_domain->prev_fault_address = iova;
  742. smmu_domain->fault_retry_counter++;
  743. arm_smmu_tlb_inv_context_s1(smmu_domain);
  744. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  745. /*
  746. * Barrier required to ensure that the FSR is cleared
  747. * before resuming SMMU operation
  748. */
  749. wmb();
  750. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
  751. ARM_SMMU_RESUME_RESUME);
  752. return IRQ_HANDLED;
  753. }
  754. return IRQ_NONE;
  755. }
  756. #else
  757. static irqreturn_t arm_smmu_context_fault_retry(struct arm_smmu_domain *smmu_domain)
  758. {
  759. return IRQ_NONE;
  760. }
  761. #endif
  762. static __always_inline void __sec_debug_bug_on_enosys(struct arm_smmu_domain *smmu_domain, int idx);
  763. static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
  764. {
  765. u32 fsr;
  766. struct iommu_domain *domain = dev;
  767. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  768. struct arm_smmu_device *smmu = smmu_domain->smmu;
  769. int idx = smmu_domain->cfg.cbndx;
  770. static DEFINE_RATELIMIT_STATE(_rs,
  771. DEFAULT_RATELIMIT_INTERVAL,
  772. DEFAULT_RATELIMIT_BURST);
  773. int ret;
  774. ret = arm_smmu_rpm_get(smmu);
  775. if (ret < 0)
  776. return IRQ_NONE;
  777. fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
  778. if (!(fsr & ARM_SMMU_FSR_FAULT)) {
  779. ret = IRQ_NONE;
  780. goto out_power_off;
  781. }
  782. if ((smmu->options & ARM_SMMU_OPT_FATAL_ASF) &&
  783. (fsr & ARM_SMMU_FSR_ASF)) {
  784. dev_err(smmu->dev,
  785. "Took an address size fault. Refusing to recover.\n");
  786. BUG();
  787. }
  788. ret = arm_smmu_context_fault_retry(smmu_domain);
  789. if (ret == IRQ_HANDLED)
  790. goto out_power_off;
  791. /*
  792. * If the fault helper returns -ENOSYS, then no client fault helper was
  793. * registered. In that case, print the default report.
  794. *
  795. * If the client returns -EBUSY, do not clear FSR and do not RESUME
  796. * if stalled. This is required to keep the IOMMU client stalled on
  797. * the outstanding fault. This gives the client a chance to take any
  798. * debug action and then terminate the stalled transaction.
  799. * So, the sequence in case of stall on fault should be:
  800. * 1) Do not clear FSR or write to RESUME here
  801. * 2) Client takes any debug action
  802. * 3) Client terminates the stalled transaction and resumes the IOMMU
  803. * 4) Client clears FSR. The FSR should only be cleared after 3) and
  804. * not before so that the fault remains outstanding. This ensures
  805. * SCTLR.HUPCF has the desired effect if subsequent transactions also
  806. * need to be terminated.
  807. */
  808. ret = report_iommu_fault_helper(smmu_domain, smmu, idx);
  809. if (ret == -ENOSYS) {
  810. if (__ratelimit(&_rs)) {
  811. print_fault_regs(smmu_domain, smmu, idx);
  812. arm_smmu_verify_fault(smmu_domain, smmu, idx);
  813. }
  814. #if IS_ENABLED(CONFIG_SEC_DEBUG)
  815. __sec_debug_bug_on_enosys(smmu_domain, idx);
  816. #else
  817. BUG_ON(!smmu_domain->fault_model.non_fatal);
  818. #endif
  819. }
  820. if (ret != -EBUSY) {
  821. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
  822. /*
  823. * Barrier required to ensure that the FSR is cleared
  824. * before resuming SMMU operation
  825. */
  826. wmb();
  827. if (fsr & ARM_SMMU_FSR_SS)
  828. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
  829. ARM_SMMU_RESUME_TERMINATE);
  830. }
  831. ret = IRQ_HANDLED;
  832. out_power_off:
  833. arm_smmu_rpm_put(smmu);
  834. return ret;
  835. }
  836. static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
  837. {
  838. u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
  839. struct arm_smmu_device *smmu = dev;
  840. static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
  841. DEFAULT_RATELIMIT_BURST);
  842. int ret;
  843. ret = arm_smmu_rpm_get(smmu);
  844. if (ret < 0)
  845. return IRQ_NONE;
  846. gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
  847. gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
  848. gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
  849. gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
  850. if (!gfsr) {
  851. arm_smmu_rpm_put(smmu);
  852. return IRQ_NONE;
  853. }
  854. if (__ratelimit(&rs)) {
  855. if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
  856. (gfsr & ARM_SMMU_sGFSR_USF))
  857. dev_err(smmu->dev,
  858. "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
  859. (u16)gfsynr1);
  860. else
  861. dev_err(smmu->dev,
  862. "Unexpected global fault, this could be serious\n");
  863. dev_err(smmu->dev,
  864. "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
  865. gfsr, gfsynr0, gfsynr1, gfsynr2);
  866. }
  867. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
  868. arm_smmu_rpm_put(smmu);
  869. return IRQ_HANDLED;
  870. }
  871. static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
  872. struct io_pgtable_cfg *pgtbl_cfg)
  873. {
  874. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  875. struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
  876. bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
  877. cb->cfg = cfg;
  878. /* TCR */
  879. if (stage1) {
  880. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
  881. cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
  882. } else {
  883. cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
  884. cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
  885. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
  886. cb->tcr[1] |= ARM_SMMU_TCR2_AS;
  887. else
  888. cb->tcr[0] |= ARM_SMMU_TCR_EAE;
  889. }
  890. } else {
  891. cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
  892. }
  893. /* TTBRs */
  894. if (stage1) {
  895. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
  896. cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
  897. cb->ttbr[1] = 0;
  898. } else {
  899. cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
  900. cfg->asid);
  901. cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
  902. cfg->asid);
  903. if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
  904. cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
  905. else
  906. cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
  907. }
  908. } else {
  909. cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
  910. }
  911. /* MAIRs (stage-1 only) */
  912. if (stage1) {
  913. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
  914. cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
  915. cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
  916. } else {
  917. cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
  918. cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
  919. }
  920. }
  921. memset(&cfg->sctlr, 0, sizeof(cfg->sctlr));
  922. /*
  923. * Override cacheability, shareability, r/w allocation for
  924. * clients who are io-coherent. Otherwise set NSH to force io-coherency
  925. * to be disabled.
  926. * These settings only take effect during bypass mode, when sctlr.M = 0
  927. */
  928. if (of_dma_is_coherent(smmu_domain->dev->of_node)) {
  929. cfg->sctlr.wacfg = ARM_SMMU_SCTLR_WACFG_WA;
  930. cfg->sctlr.racfg = ARM_SMMU_SCTLR_RACFG_RA;
  931. cfg->sctlr.shcfg = ARM_SMMU_SCTLR_SHCFG_OSH;
  932. cfg->sctlr.mtcfg = 1;
  933. cfg->sctlr.memattr = ARM_SMMU_SCTLR_MEM_ATTR_OISH_WB_CACHE;
  934. } else {
  935. cfg->sctlr.shcfg = ARM_SMMU_SCTLR_SHCFG_NSH;
  936. }
  937. cfg->sctlr.cfre = !smmu_domain->fault_model.no_cfre;
  938. cfg->sctlr.cfcfg = !smmu_domain->fault_model.no_stall;
  939. cfg->sctlr.hupcf = smmu_domain->fault_model.hupcf;
  940. if ((!smmu_domain->mapping_cfg.s1_bypass && !smmu_domain->delayed_s1_trans_enable) ||
  941. !stage1)
  942. cfg->sctlr.m = 1;
  943. cb->sctlr = arm_smmu_lpae_sctlr(cfg);
  944. }
  945. void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
  946. {
  947. u32 reg;
  948. bool stage1;
  949. struct arm_smmu_cb *cb = &smmu->cbs[idx];
  950. struct arm_smmu_cfg *cfg = cb->cfg;
  951. /* Unassigned context banks only need disabling */
  952. if (!cfg) {
  953. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
  954. return;
  955. }
  956. stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
  957. /* CBA2R */
  958. if (smmu->version > ARM_SMMU_V1) {
  959. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
  960. reg = ARM_SMMU_CBA2R_VA64;
  961. else
  962. reg = 0;
  963. /* 16-bit VMIDs live in CBA2R */
  964. if (smmu->features & ARM_SMMU_FEAT_VMID16)
  965. reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
  966. arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
  967. }
  968. /* CBAR */
  969. reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
  970. if (smmu->version < ARM_SMMU_V2)
  971. reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
  972. /*
  973. * Use the weakest shareability/memory types, so they are
  974. * overridden by the ttbcr/pte.
  975. */
  976. if (stage1) {
  977. reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
  978. ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
  979. FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
  980. ARM_SMMU_CBAR_S1_MEMATTR_WB);
  981. } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
  982. /* 8-bit VMIDs live in CBAR */
  983. reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
  984. }
  985. arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
  986. /*
  987. * TCR
  988. * We must write this before the TTBRs, since it determines the
  989. * access behaviour of some fields (in particular, ASID[15:8]).
  990. */
  991. if (stage1 && smmu->version > ARM_SMMU_V1)
  992. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
  993. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
  994. /* TTBRs */
  995. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
  996. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
  997. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
  998. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
  999. } else {
  1000. arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
  1001. if (stage1)
  1002. arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
  1003. cb->ttbr[1]);
  1004. }
  1005. /* MAIRs (stage-1 only) */
  1006. if (stage1) {
  1007. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
  1008. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
  1009. }
  1010. /* SCTLR */
  1011. if (smmu->impl && smmu->impl->write_sctlr)
  1012. smmu->impl->write_sctlr(smmu, idx, cb->sctlr);
  1013. else
  1014. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, cb->sctlr);
  1015. }
  1016. /* This function assumes that the domain's init mutex is held */
  1017. static int arm_smmu_get_dma_cookie(struct device *dev,
  1018. struct arm_smmu_domain *smmu_domain,
  1019. struct io_pgtable_ops *pgtbl_ops)
  1020. {
  1021. bool fast = smmu_domain->mapping_cfg.fast;
  1022. /* DMA cookie is allocated by the IOMMU core for DMA domains. */
  1023. return fast ? fast_smmu_init_mapping(dev, &smmu_domain->domain, pgtbl_ops) : 0;
  1024. }
  1025. static void arm_smmu_put_dma_cookie(struct iommu_domain *domain)
  1026. {
  1027. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1028. /* DMA cookie is freed by the IOMMU core for DMA domains. */
  1029. if (smmu_domain->mapping_cfg.fast)
  1030. fast_smmu_put_dma_cookie(domain);
  1031. }
  1032. static void arm_smmu_log_new_table(void *cookie, void *virt, unsigned long iova, size_t granule)
  1033. {
  1034. struct arm_smmu_domain *smmu_domain = cookie;
  1035. trace_iommu_pgtable_add(smmu_domain, iova, __pa(virt), granule);
  1036. }
  1037. static void arm_smmu_log_remove_table(void *cookie, void *virt, unsigned long iova, size_t granule)
  1038. {
  1039. struct arm_smmu_domain *smmu_domain = cookie;
  1040. trace_iommu_pgtable_remove(smmu_domain, iova, __pa(virt), granule);
  1041. }
  1042. static void arm_smmu_tlb_add_walk_page(void *cookie, void *virt)
  1043. {
  1044. struct arm_smmu_domain *smmu_domain = cookie;
  1045. struct page *page = virt_to_page(virt);
  1046. unsigned long flags;
  1047. spin_lock_irqsave(&smmu_domain->iotlb_gather_lock, flags);
  1048. smmu_domain->deferred_flush = true;
  1049. page->lru.next = smmu_domain->freelist;
  1050. smmu_domain->freelist = &page->lru;
  1051. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1052. }
  1053. static void arm_smmu_qcom_tlb_add_inv(void *cookie)
  1054. {
  1055. struct arm_smmu_domain *smmu_domain = cookie;
  1056. unsigned long flags;
  1057. spin_lock_irqsave(&smmu_domain->iotlb_gather_lock, flags);
  1058. if (smmu_domain->skip_tlb_management) {
  1059. smmu_domain->deferred_flush = false;
  1060. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1061. return;
  1062. }
  1063. smmu_domain->deferred_flush = true;
  1064. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1065. }
  1066. static void arm_smmu_qcom_tlb_sync(void *cookie)
  1067. {
  1068. struct arm_smmu_domain *smmu_domain = cookie;
  1069. unsigned long flags;
  1070. spin_lock_irqsave(&smmu_domain->iotlb_gather_lock, flags);
  1071. if (smmu_domain->skip_tlb_management) {
  1072. smmu_domain->deferred_flush = false;
  1073. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1074. return;
  1075. }
  1076. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1077. arm_smmu_rpm_get(smmu_domain->smmu);
  1078. __arm_smmu_flush_iotlb_all(&smmu_domain->domain, false);
  1079. arm_smmu_rpm_put(smmu_domain->smmu);
  1080. }
  1081. static const struct qcom_iommu_pgtable_log_ops arm_smmu_pgtable_log_ops = {
  1082. .log_new_table = arm_smmu_log_new_table,
  1083. .log_remove_table = arm_smmu_log_remove_table,
  1084. };
  1085. static const struct qcom_iommu_flush_ops arm_smmu_iotlb_ops = {
  1086. .tlb_add_walk_page = arm_smmu_tlb_add_walk_page,
  1087. .tlb_add_inv = arm_smmu_qcom_tlb_add_inv,
  1088. .tlb_sync = arm_smmu_qcom_tlb_sync,
  1089. };
  1090. static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
  1091. struct arm_smmu_device *smmu,
  1092. struct device *dev, unsigned int start)
  1093. {
  1094. if (smmu->impl && smmu->impl->alloc_context_bank)
  1095. return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
  1096. return __arm_smmu_alloc_cb(smmu->context_map, start, smmu->num_context_banks, dev,
  1097. smmu_domain);
  1098. }
  1099. static irqreturn_t arm_smmu_context_fault_irq(int irq, void *dev)
  1100. {
  1101. struct iommu_domain *domain = dev;
  1102. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1103. /* call the handler that is requested in non-thread irq context */
  1104. if (smmu_domain->fault_handler_irq)
  1105. smmu_domain->fault_handler_irq(domain, smmu_domain->handler_irq_token);
  1106. return IRQ_WAKE_THREAD;
  1107. }
  1108. static int arm_smmu_init_domain_context(struct iommu_domain *domain,
  1109. struct arm_smmu_device *smmu,
  1110. struct device *dev)
  1111. {
  1112. int irq, start, ret = 0;
  1113. unsigned long ias, oas;
  1114. struct io_pgtable_ops *pgtbl_ops;
  1115. enum io_pgtable_fmt fmt;
  1116. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1117. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  1118. struct qcom_io_pgtable_info *pgtbl_info = &smmu_domain->pgtbl_info;
  1119. struct io_pgtable_cfg *pgtbl_cfg = &pgtbl_info->cfg;
  1120. irqreturn_t (*context_fault)(int irq, void *dev);
  1121. struct io_pgtable *iop;
  1122. mutex_lock(&smmu_domain->init_mutex);
  1123. if (smmu_domain->smmu)
  1124. goto out_unlock;
  1125. smmu_domain->dev = dev;
  1126. ret = arm_smmu_setup_default_domain(dev, domain);
  1127. if (ret) {
  1128. dev_err(dev, "%s: default domain setup failed\n",
  1129. __func__);
  1130. goto out_unlock;
  1131. }
  1132. if (domain->type == IOMMU_DOMAIN_IDENTITY) {
  1133. smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
  1134. smmu_domain->smmu = smmu;
  1135. goto out_unlock;
  1136. }
  1137. /*
  1138. * Mapping the requested stage onto what we support is surprisingly
  1139. * complicated, mainly because the spec allows S1+S2 SMMUs without
  1140. * support for nested translation. That means we end up with the
  1141. * following table:
  1142. *
  1143. * Requested Supported Actual
  1144. * S1 N S1
  1145. * S1 S1+S2 S1
  1146. * S1 S2 S2
  1147. * S1 S1 S1
  1148. * N N N
  1149. * N S1+S2 S2
  1150. * N S2 S2
  1151. * N S1 S1
  1152. *
  1153. * Note that you can't actually request stage-2 mappings.
  1154. */
  1155. if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
  1156. smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
  1157. if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
  1158. smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
  1159. /*
  1160. * Choosing a suitable context format is even more fiddly. Until we
  1161. * grow some way for the caller to express a preference, and/or move
  1162. * the decision into the io-pgtable code where it arguably belongs,
  1163. * just aim for the closest thing to the rest of the system, and hope
  1164. * that the hardware isn't esoteric enough that we can't assume AArch64
  1165. * support to be a superset of AArch32 support...
  1166. */
  1167. if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
  1168. cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
  1169. if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
  1170. !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
  1171. (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
  1172. (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
  1173. cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
  1174. if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
  1175. (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
  1176. ARM_SMMU_FEAT_FMT_AARCH64_16K |
  1177. ARM_SMMU_FEAT_FMT_AARCH64_4K)))
  1178. cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
  1179. if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
  1180. ret = -EINVAL;
  1181. goto out_unlock;
  1182. }
  1183. switch (smmu_domain->stage) {
  1184. case ARM_SMMU_DOMAIN_S1:
  1185. cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
  1186. start = smmu->num_s2_context_banks;
  1187. ias = smmu->va_size;
  1188. oas = smmu->ipa_size;
  1189. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
  1190. fmt = QCOM_ARM_64_LPAE_S1;
  1191. if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
  1192. ias = min(ias, 39UL);
  1193. } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
  1194. fmt = ARM_32_LPAE_S1;
  1195. ias = min(ias, 32UL);
  1196. oas = min(oas, 40UL);
  1197. } else {
  1198. fmt = ARM_V7S;
  1199. ias = min(ias, 32UL);
  1200. oas = min(oas, 32UL);
  1201. }
  1202. smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
  1203. break;
  1204. case ARM_SMMU_DOMAIN_NESTED:
  1205. /*
  1206. * We will likely want to change this if/when KVM gets
  1207. * involved.
  1208. */
  1209. case ARM_SMMU_DOMAIN_S2:
  1210. cfg->cbar = CBAR_TYPE_S2_TRANS;
  1211. start = 0;
  1212. ias = smmu->ipa_size;
  1213. oas = smmu->pa_size;
  1214. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
  1215. fmt = ARM_64_LPAE_S2;
  1216. } else {
  1217. fmt = ARM_32_LPAE_S2;
  1218. ias = min(ias, 40UL);
  1219. oas = min(oas, 40UL);
  1220. }
  1221. if (smmu->version == ARM_SMMU_V2)
  1222. smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
  1223. else
  1224. smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
  1225. break;
  1226. default:
  1227. ret = -EINVAL;
  1228. goto out_unlock;
  1229. }
  1230. if (smmu_domain->mapping_cfg.fast) {
  1231. fmt = ARM_V8L_FAST;
  1232. ret = qcom_iommu_get_fast_iova_range(dev,
  1233. &pgtbl_info->iova_base,
  1234. &pgtbl_info->iova_end);
  1235. if (ret < 0)
  1236. goto out_unlock;
  1237. } else if (arm_smmu_has_secure_vmid(smmu_domain)) {
  1238. pgtbl_info->vmid = smmu_domain->secure_vmid;
  1239. }
  1240. ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
  1241. if (ret < 0) {
  1242. goto out_unlock;
  1243. }
  1244. smmu_domain->smmu = smmu;
  1245. cfg->cbndx = ret;
  1246. if (smmu->version < ARM_SMMU_V2) {
  1247. cfg->irptndx = atomic_inc_return(&smmu->irptndx);
  1248. cfg->irptndx %= smmu->num_context_irqs;
  1249. } else {
  1250. cfg->irptndx = cfg->cbndx;
  1251. }
  1252. if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
  1253. cfg->vmid = cfg->cbndx + 1;
  1254. else
  1255. cfg->asid = cfg->cbndx;
  1256. pgtbl_info->iommu_tlb_ops = &arm_smmu_iotlb_ops;
  1257. pgtbl_info->pgtable_log_ops = &arm_smmu_pgtable_log_ops;
  1258. pgtbl_info->cfg = (struct io_pgtable_cfg) {
  1259. .pgsize_bitmap = smmu->pgsize_bitmap,
  1260. .ias = ias,
  1261. .oas = oas,
  1262. .coherent_walk = is_iommu_pt_coherent(smmu_domain),
  1263. .tlb = smmu_domain->flush_ops,
  1264. .iommu_dev = smmu->dev,
  1265. };
  1266. if (smmu->impl && smmu->impl->init_context) {
  1267. ret = smmu->impl->init_context(smmu_domain, pgtbl_cfg, dev);
  1268. if (ret)
  1269. goto out_clear_smmu;
  1270. }
  1271. if (smmu_domain->pgtbl_quirks)
  1272. pgtbl_cfg->quirks |= smmu_domain->pgtbl_quirks;
  1273. pgtbl_ops = qcom_alloc_io_pgtable_ops(fmt, pgtbl_info, smmu_domain);
  1274. if (!pgtbl_ops) {
  1275. ret = -ENOMEM;
  1276. goto out_clear_smmu;
  1277. }
  1278. smmu_domain->pgtbl_fmt = fmt;
  1279. iop = container_of(pgtbl_ops, struct io_pgtable, ops);
  1280. ret = iommu_logger_register(&smmu_domain->logger, domain,
  1281. smmu_domain->dev, iop);
  1282. if (ret) {
  1283. dev_err(dev, "Log registration failed\n");
  1284. goto out_free_io_pgtable;
  1285. }
  1286. /* Update the domain's page sizes to reflect the page table format */
  1287. domain->pgsize_bitmap = pgtbl_cfg->pgsize_bitmap;
  1288. if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
  1289. domain->geometry.aperture_start = ~0UL << ias;
  1290. domain->geometry.aperture_end = ~0UL;
  1291. } else {
  1292. domain->geometry.aperture_end = (1UL << ias) - 1;
  1293. }
  1294. domain->geometry.force_aperture = true;
  1295. ret = arm_smmu_get_dma_cookie(dev, smmu_domain, pgtbl_ops);
  1296. if (ret)
  1297. goto out_logger;
  1298. /*
  1299. * Matches with call to arm_smmu_rpm_put in
  1300. * arm_smmu_destroy_domain_context.
  1301. */
  1302. if (smmu_domain->mapping_cfg.atomic) {
  1303. smmu_domain->rpm_always_on = true;
  1304. arm_smmu_rpm_get(smmu);
  1305. }
  1306. /* Initialise the context bank with our page table cfg */
  1307. arm_smmu_init_context_bank(smmu_domain, pgtbl_cfg);
  1308. if (smmu->impl && smmu->impl->init_context_bank)
  1309. smmu->impl->init_context_bank(smmu_domain, dev);
  1310. arm_smmu_write_context_bank(smmu, cfg->cbndx);
  1311. /*
  1312. * Request context fault interrupt. Do this last to avoid the
  1313. * handler seeing a half-initialised domain state.
  1314. */
  1315. irq = smmu->irqs[cfg->irptndx];
  1316. if (smmu->impl && smmu->impl->context_fault)
  1317. context_fault = smmu->impl->context_fault;
  1318. else
  1319. context_fault = arm_smmu_context_fault;
  1320. ret = devm_request_threaded_irq(smmu->dev, irq, arm_smmu_context_fault_irq,
  1321. context_fault, IRQF_ONESHOT | IRQF_SHARED,
  1322. "arm-smmu-context-fault", domain);
  1323. if (ret < 0) {
  1324. dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
  1325. cfg->irptndx, irq);
  1326. cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
  1327. }
  1328. mutex_unlock(&smmu_domain->init_mutex);
  1329. /* Publish page table ops for map/unmap */
  1330. smmu_domain->pgtbl_ops = pgtbl_ops;
  1331. return 0;
  1332. out_logger:
  1333. iommu_logger_unregister(smmu_domain->logger);
  1334. smmu_domain->logger = NULL;
  1335. out_free_io_pgtable:
  1336. qcom_free_io_pgtable_ops(smmu_domain->pgtbl_ops);
  1337. out_clear_smmu:
  1338. __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
  1339. smmu_domain->smmu = NULL;
  1340. out_unlock:
  1341. mutex_unlock(&smmu_domain->init_mutex);
  1342. return ret;
  1343. }
  1344. static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
  1345. {
  1346. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1347. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1348. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  1349. int ret, irq, i;
  1350. bool pinned = false;
  1351. if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
  1352. return;
  1353. ret = arm_smmu_rpm_get(smmu);
  1354. if (ret < 0)
  1355. return;
  1356. /*
  1357. * Matches with call to arm_smmu_rpm_get in
  1358. * arm_smmu_init_domain_contxt.
  1359. */
  1360. if (smmu_domain->rpm_always_on)
  1361. arm_smmu_rpm_put(smmu);
  1362. /*
  1363. * Disable the context bank and free the page tables before freeing
  1364. * it.
  1365. */
  1366. smmu->cbs[cfg->cbndx].cfg = NULL;
  1367. arm_smmu_write_context_bank(smmu, cfg->cbndx);
  1368. if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
  1369. irq = smmu->irqs[cfg->irptndx];
  1370. devm_free_irq(smmu->dev, irq, domain);
  1371. }
  1372. qcom_free_io_pgtable_ops(smmu_domain->pgtbl_ops);
  1373. for (i = 0; i < smmu->num_mapping_groups; i++)
  1374. if ((cfg->cbndx == smmu->s2crs[i].cbndx) &&
  1375. (smmu->s2crs[i].pinned)) {
  1376. pinned = true;
  1377. }
  1378. if (!pinned)
  1379. __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
  1380. arm_smmu_rpm_put(smmu);
  1381. }
  1382. static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
  1383. {
  1384. struct arm_smmu_domain *smmu_domain;
  1385. if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
  1386. if (using_legacy_binding ||
  1387. (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_DMA_FQ))
  1388. return NULL;
  1389. }
  1390. /*
  1391. * Allocate the domain and initialise some of its data structures.
  1392. * We can't really do anything meaningful until we've added a
  1393. * master.
  1394. */
  1395. smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
  1396. if (!smmu_domain)
  1397. return NULL;
  1398. mutex_init(&smmu_domain->init_mutex);
  1399. spin_lock_init(&smmu_domain->cb_lock);
  1400. spin_lock_init(&smmu_domain->sync_lock);
  1401. spin_lock_init(&smmu_domain->iotlb_gather_lock);
  1402. smmu_domain->secure_vmid = VMID_INVAL;
  1403. return &smmu_domain->domain;
  1404. }
  1405. static void arm_smmu_domain_free(struct iommu_domain *domain)
  1406. {
  1407. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1408. /*
  1409. * Free the domain resources. We assume that all devices have
  1410. * already been detached.
  1411. */
  1412. arm_smmu_put_dma_cookie(domain);
  1413. arm_smmu_destroy_domain_context(domain);
  1414. iommu_logger_unregister(smmu_domain->logger);
  1415. kfree(smmu_domain);
  1416. }
  1417. static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
  1418. {
  1419. struct arm_smmu_smr *smr = smmu->smrs + idx;
  1420. u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
  1421. FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
  1422. if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
  1423. reg |= ARM_SMMU_SMR_VALID;
  1424. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
  1425. }
  1426. static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
  1427. {
  1428. struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
  1429. u32 reg;
  1430. if (smmu->impl && smmu->impl->write_s2cr) {
  1431. smmu->impl->write_s2cr(smmu, idx);
  1432. return;
  1433. }
  1434. reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
  1435. FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
  1436. FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg) |
  1437. FIELD_PREP(ARM_SMMU_S2CR_SHCFG, ARM_SMMU_S2CR_SHCFG_NSH);
  1438. if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
  1439. smmu->smrs[idx].valid)
  1440. reg |= ARM_SMMU_S2CR_EXIDVALID;
  1441. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
  1442. }
  1443. static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
  1444. {
  1445. arm_smmu_write_s2cr(smmu, idx);
  1446. if (smmu->smrs)
  1447. arm_smmu_write_smr(smmu, idx);
  1448. }
  1449. /*
  1450. * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
  1451. * should be called after sCR0 is written.
  1452. */
  1453. static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
  1454. {
  1455. u32 smr;
  1456. int i;
  1457. if (!smmu->smrs)
  1458. return;
  1459. /*
  1460. * If we've had to accommodate firmware memory regions, we may
  1461. * have live SMRs by now; tread carefully...
  1462. *
  1463. * Somewhat perversely, not having a free SMR for this test implies we
  1464. * can get away without it anyway, as we'll only be able to 'allocate'
  1465. * these SMRs for the ID/mask values we're already trusting to be OK.
  1466. */
  1467. for (i = 0; i < smmu->num_mapping_groups; i++)
  1468. if (!smmu->smrs[i].valid)
  1469. goto smr_ok;
  1470. return;
  1471. smr_ok:
  1472. /*
  1473. * SMR.ID bits may not be preserved if the corresponding MASK
  1474. * bits are set, so check each one separately. We can reject
  1475. * masters later if they try to claim IDs outside these masks.
  1476. */
  1477. smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
  1478. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
  1479. smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
  1480. smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
  1481. smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
  1482. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
  1483. smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
  1484. smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
  1485. }
  1486. static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
  1487. {
  1488. struct arm_smmu_smr *smrs = smmu->smrs;
  1489. int i, free_idx = -ENOSPC;
  1490. /* Stream indexing is blissfully easy */
  1491. if (!smrs)
  1492. return id;
  1493. /* Validating SMRs is... less so */
  1494. for (i = 0; i < smmu->num_mapping_groups; ++i) {
  1495. if (!smrs[i].used) {
  1496. /*
  1497. * Note the first free entry we come across, which
  1498. * we'll claim in the end if nothing else matches.
  1499. */
  1500. if (free_idx < 0)
  1501. free_idx = i;
  1502. continue;
  1503. }
  1504. /*
  1505. * If the new entry is _entirely_ matched by an existing entry,
  1506. * then reuse that, with the guarantee that there also cannot
  1507. * be any subsequent conflicting entries. In normal use we'd
  1508. * expect simply identical entries for this case, but there's
  1509. * no harm in accommodating the generalisation.
  1510. */
  1511. if ((mask & smrs[i].mask) == mask &&
  1512. !((id ^ smrs[i].id) & ~smrs[i].mask))
  1513. return i;
  1514. /*
  1515. * If the new entry has any other overlap with an existing one,
  1516. * though, then there always exists at least one stream ID
  1517. * which would cause a conflict, and we can't allow that risk.
  1518. */
  1519. if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
  1520. return -EINVAL;
  1521. }
  1522. return free_idx;
  1523. }
  1524. static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
  1525. {
  1526. bool pinned = smmu->s2crs[idx].pinned;
  1527. u8 cbndx = smmu->s2crs[idx].cbndx;
  1528. if (--smmu->s2crs[idx].count)
  1529. return false;
  1530. smmu->s2crs[idx] = s2cr_init_val;
  1531. if (pinned) {
  1532. smmu->s2crs[idx].pinned = true;
  1533. smmu->s2crs[idx].cbndx = cbndx;
  1534. } else if (smmu->smrs) {
  1535. smmu->smrs[idx].valid = false;
  1536. smmu->smrs[idx].used = false;
  1537. }
  1538. return true;
  1539. }
  1540. static struct device_node *arm_smmu_get_of_node(struct device *dev)
  1541. {
  1542. struct device_node *np;
  1543. if (!dev->of_node)
  1544. return NULL;
  1545. np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
  1546. return np ? np : dev->of_node;
  1547. }
  1548. static bool dev_defer_smr_configuration(struct device *dev)
  1549. {
  1550. struct device_node *np = arm_smmu_get_of_node(dev);
  1551. return of_property_read_bool(np, "qcom,iommu-defer-smr-config");
  1552. }
  1553. static int arm_smmu_master_alloc_smes(struct device *dev)
  1554. {
  1555. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  1556. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  1557. struct arm_smmu_device *smmu = cfg->smmu;
  1558. struct arm_smmu_smr *smrs = smmu->smrs;
  1559. bool config_smrs = !dev_defer_smr_configuration(dev);
  1560. int i, idx, ret;
  1561. mutex_lock(&smmu->stream_map_mutex);
  1562. /* Figure out a viable stream map entry allocation */
  1563. for_each_cfg_sme(cfg, fwspec, i, idx) {
  1564. u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
  1565. u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
  1566. if (idx != INVALID_SMENDX) {
  1567. ret = -EEXIST;
  1568. goto out_err;
  1569. }
  1570. ret = arm_smmu_find_sme(smmu, sid, mask);
  1571. if (ret < 0)
  1572. goto out_err;
  1573. idx = ret;
  1574. if (smrs && smmu->s2crs[idx].count == 0) {
  1575. smrs[idx].id = sid;
  1576. smrs[idx].mask = mask;
  1577. smrs[idx].valid = config_smrs;
  1578. smrs[idx].used = true;
  1579. } else if (smrs && WARN_ON(smrs[idx].valid != config_smrs)) {
  1580. ret = -EINVAL;
  1581. goto out_err;
  1582. }
  1583. smmu->s2crs[idx].count++;
  1584. cfg->smendx[i] = (s16)idx;
  1585. }
  1586. mutex_unlock(&smmu->stream_map_mutex);
  1587. /* It worked! Now, poke the actual hardware */
  1588. for_each_cfg_sme(cfg, fwspec, i, idx)
  1589. arm_smmu_write_sme(smmu, idx);
  1590. return 0;
  1591. out_err:
  1592. while (i--) {
  1593. arm_smmu_free_sme(smmu, cfg->smendx[i]);
  1594. cfg->smendx[i] = INVALID_SMENDX;
  1595. }
  1596. mutex_unlock(&smmu->stream_map_mutex);
  1597. return ret;
  1598. }
  1599. static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
  1600. struct iommu_fwspec *fwspec)
  1601. {
  1602. struct arm_smmu_device *smmu = cfg->smmu;
  1603. int i, idx;
  1604. mutex_lock(&smmu->stream_map_mutex);
  1605. for_each_cfg_sme(cfg, fwspec, i, idx) {
  1606. if (arm_smmu_free_sme(smmu, idx))
  1607. arm_smmu_write_sme(smmu, idx);
  1608. cfg->smendx[i] = INVALID_SMENDX;
  1609. }
  1610. mutex_unlock(&smmu->stream_map_mutex);
  1611. }
  1612. static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
  1613. struct arm_smmu_master_cfg *cfg,
  1614. struct iommu_fwspec *fwspec)
  1615. {
  1616. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1617. struct arm_smmu_s2cr *s2cr = smmu->s2crs;
  1618. u8 cbndx = smmu_domain->cfg.cbndx;
  1619. enum arm_smmu_s2cr_type type;
  1620. int i, idx;
  1621. if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
  1622. type = S2CR_TYPE_BYPASS;
  1623. else
  1624. type = S2CR_TYPE_TRANS;
  1625. mutex_lock(&smmu->stream_map_mutex);
  1626. for_each_cfg_sme(cfg, fwspec, i, idx) {
  1627. if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
  1628. continue;
  1629. s2cr[idx].type = type;
  1630. s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
  1631. s2cr[idx].cbndx = cbndx;
  1632. arm_smmu_write_s2cr(smmu, idx);
  1633. }
  1634. mutex_unlock(&smmu->stream_map_mutex);
  1635. return 0;
  1636. }
  1637. static int arm_smmu_setup_default_domain(struct device *dev,
  1638. struct iommu_domain *domain)
  1639. {
  1640. struct device_node *np;
  1641. int ret;
  1642. const char *str;
  1643. u32 val;
  1644. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1645. np = arm_smmu_get_of_node(dev);
  1646. if (!np)
  1647. return 0;
  1648. ret = of_property_read_string(np, "qcom,iommu-dma", &str);
  1649. if (ret)
  1650. str = "default";
  1651. if (!strcmp(str, "bypass")) {
  1652. smmu_domain->mapping_cfg.s1_bypass = 1;
  1653. } else if (!strcmp(str, "fastmap")) {
  1654. /*
  1655. * Fallback to the upstream dma-allocator if fastmap is not enabled.
  1656. * "fastmap" implies "atomic" due to it not calling arm_smmu_rpm_get()
  1657. * in its map/unmap functions. Its clients may or may not actually
  1658. * use iommu apis from atomic context.
  1659. */
  1660. smmu_domain->mapping_cfg.atomic = 1;
  1661. if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_FAST))
  1662. smmu_domain->mapping_cfg.fast = 1;
  1663. } else if (!strcmp(str, "atomic")) {
  1664. smmu_domain->mapping_cfg.atomic = 1;
  1665. } else if (!strcmp(str, "disabled")) {
  1666. /* DT properties only intended for use by default-domains */
  1667. return 0;
  1668. }
  1669. /*
  1670. * default value:
  1671. * Stall-on-fault
  1672. * faults trigger kernel panic
  1673. * return abort
  1674. */
  1675. if (of_property_match_string(np, "qcom,iommu-faults",
  1676. "stall-disable") >= 0)
  1677. smmu_domain->fault_model.no_stall = 1;
  1678. if (of_property_match_string(np, "qcom,iommu-faults", "no-CFRE") >= 0)
  1679. smmu_domain->fault_model.no_cfre = 1;
  1680. if (of_property_match_string(np, "qcom,iommu-faults", "HUPCF") >= 0)
  1681. smmu_domain->fault_model.hupcf = 1;
  1682. if (of_property_match_string(np, "qcom,iommu-faults", "non-fatal") >= 0)
  1683. smmu_domain->fault_model.non_fatal = 1;
  1684. /* Default value: disabled */
  1685. ret = of_property_read_u32(np, "qcom,iommu-vmid", &val);
  1686. if (!ret)
  1687. smmu_domain->secure_vmid = val;
  1688. /* Default value: disabled */
  1689. ret = of_property_read_string(np, "qcom,iommu-pagetable", &str);
  1690. if (ret)
  1691. str = "disabled";
  1692. if (!strcmp(str, "coherent"))
  1693. smmu_domain->force_coherent_walk = true;
  1694. else if (!strcmp(str, "LLC"))
  1695. smmu_domain->pgtbl_quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
  1696. else if (!strcmp(str, "LLC_NWA"))
  1697. smmu_domain->pgtbl_quirks = IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA;
  1698. /* Default value: disabled */
  1699. if (of_property_read_bool(np, "qcom,iommu-earlymap"))
  1700. smmu_domain->delayed_s1_trans_enable = true;
  1701. return 0;
  1702. }
  1703. struct lookup_iommu_group_data {
  1704. struct device_node *np;
  1705. struct iommu_group *group;
  1706. };
  1707. /* This isn't a "fast lookup" since its N^2, but probably good enough */
  1708. static int __bus_lookup_iommu_group(struct device *dev, void *priv)
  1709. {
  1710. struct lookup_iommu_group_data *data = priv;
  1711. struct device_node *np;
  1712. struct iommu_group *group;
  1713. group = iommu_group_get(dev);
  1714. if (!group)
  1715. return 0;
  1716. np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
  1717. if (np != data->np) {
  1718. iommu_group_put(group);
  1719. return 0;
  1720. }
  1721. data->group = group;
  1722. return 1;
  1723. }
  1724. static struct iommu_group *of_get_device_group(struct device *dev)
  1725. {
  1726. struct lookup_iommu_group_data data = {
  1727. .np = NULL,
  1728. .group = NULL,
  1729. };
  1730. struct iommu_group *group;
  1731. int ret;
  1732. data.np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
  1733. if (!data.np)
  1734. return NULL;
  1735. ret = bus_for_each_dev(&platform_bus_type, NULL, &data,
  1736. __bus_lookup_iommu_group);
  1737. if (ret > 0)
  1738. return data.group;
  1739. #ifdef CONFIG_PCI
  1740. ret = bus_for_each_dev(&pci_bus_type, NULL, &data,
  1741. __bus_lookup_iommu_group);
  1742. if (ret > 0)
  1743. return data.group;
  1744. #endif
  1745. group = generic_device_group(dev);
  1746. if (IS_ERR(group))
  1747. return NULL;
  1748. return group;
  1749. }
  1750. static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
  1751. {
  1752. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1753. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  1754. struct arm_smmu_master_cfg *cfg;
  1755. struct arm_smmu_device *smmu;
  1756. int ret;
  1757. if (!fwspec || fwspec->ops != &arm_smmu_ops.iommu_ops) {
  1758. dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
  1759. return -ENXIO;
  1760. }
  1761. /*
  1762. * FIXME: The arch/arm DMA API code tries to attach devices to its own
  1763. * domains between of_xlate() and probe_device() - we have no way to cope
  1764. * with that, so until ARM gets converted to rely on groups and default
  1765. * domains, just say no (but more politely than by dereferencing NULL).
  1766. * This should be at least a WARN_ON once that's sorted.
  1767. */
  1768. cfg = dev_iommu_priv_get(dev);
  1769. if (!cfg)
  1770. return -ENODEV;
  1771. smmu = cfg->smmu;
  1772. ret = arm_smmu_rpm_get(smmu);
  1773. if (ret < 0)
  1774. return ret;
  1775. /* Ensure that the domain is finalised */
  1776. ret = arm_smmu_init_domain_context(domain, smmu, dev);
  1777. if (ret < 0)
  1778. goto rpm_put;
  1779. /*
  1780. * Sanity check the domain. We don't support domains across
  1781. * different SMMUs.
  1782. */
  1783. if (smmu_domain->smmu != smmu) {
  1784. dev_err(dev,
  1785. "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
  1786. dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
  1787. ret = -EINVAL;
  1788. goto rpm_put;
  1789. }
  1790. /* Looks ok, so add the device to the domain */
  1791. ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
  1792. /*
  1793. * Setup an autosuspend delay to avoid bouncing runpm state.
  1794. * Otherwise, if a driver for a suspended consumer device
  1795. * unmaps buffers, it will runpm resume/suspend for each one.
  1796. *
  1797. * For example, when used by a GPU device, when an application
  1798. * or game exits, it can trigger unmapping 100s or 1000s of
  1799. * buffers. With a runpm cycle for each buffer, that adds up
  1800. * to 5-10sec worth of reprogramming the context bank, while
  1801. * the system appears to be locked up to the user.
  1802. */
  1803. pm_runtime_set_autosuspend_delay(smmu->dev, 20);
  1804. pm_runtime_use_autosuspend(smmu->dev);
  1805. rpm_put:
  1806. arm_smmu_rpm_put(smmu);
  1807. return ret;
  1808. }
  1809. static gfp_t arm_smmu_domain_gfp_flags(struct arm_smmu_domain *smmu_domain)
  1810. {
  1811. /*
  1812. * The dma layer always uses GFP_ATOMIC, which isn't indicative of
  1813. * the actual client needs.
  1814. */
  1815. if (smmu_domain->mapping_cfg.atomic)
  1816. return GFP_ATOMIC;
  1817. return GFP_KERNEL;
  1818. }
  1819. static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
  1820. phys_addr_t paddr, size_t pgsize, size_t pgcount,
  1821. int prot, gfp_t gfp, size_t *mapped)
  1822. {
  1823. struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
  1824. struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
  1825. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1826. int ret;
  1827. if (!ops)
  1828. return -ENODEV;
  1829. ret = arm_smmu_rpm_get(smmu);
  1830. if (ret < 0)
  1831. return ret;
  1832. gfp = arm_smmu_domain_gfp_flags(smmu_domain);
  1833. ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
  1834. arm_smmu_rpm_put(smmu);
  1835. if (!ret)
  1836. trace_map_pages(smmu_domain, iova, pgsize, pgcount);
  1837. return ret;
  1838. }
  1839. static int __maybe_unused arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
  1840. struct scatterlist *sg, unsigned int nents, int prot,
  1841. gfp_t gfp, size_t *mapped)
  1842. {
  1843. struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
  1844. struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
  1845. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1846. int ret, i;
  1847. struct scatterlist *tmp;
  1848. if (!ops)
  1849. return -ENODEV;
  1850. ret = arm_smmu_rpm_get(smmu);
  1851. if (ret < 0)
  1852. return ret;
  1853. gfp = arm_smmu_domain_gfp_flags(smmu_domain);
  1854. /*
  1855. * Use ops->map_sg() when it is available. While this function is unreachable, this
  1856. * has to be implemented as such to avoid the usage of ops->map_sg(), as it
  1857. * does not exist.
  1858. */
  1859. for_each_sg(sg, tmp, nents, i) {
  1860. ret = ops->map(ops, iova + *mapped, sg_phys(tmp), tmp->length, prot, gfp);
  1861. if (ret)
  1862. break;
  1863. *mapped += tmp->length;
  1864. }
  1865. arm_smmu_rpm_put(smmu);
  1866. if (!ret)
  1867. trace_map_sg(smmu_domain, iova, sg, nents);
  1868. return ret;
  1869. }
  1870. static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
  1871. size_t pgsize, size_t pgcount,
  1872. struct iommu_iotlb_gather *gather)
  1873. {
  1874. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1875. struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
  1876. size_t ret;
  1877. if (!ops)
  1878. return 0;
  1879. ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
  1880. if (ret)
  1881. trace_unmap_pages(smmu_domain, iova, pgsize, pgcount);
  1882. return ret;
  1883. }
  1884. static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
  1885. {
  1886. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1887. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1888. unsigned long flags;
  1889. spin_lock_irqsave(&smmu_domain->iotlb_gather_lock, flags);
  1890. if (smmu_domain->skip_tlb_management) {
  1891. smmu_domain->deferred_flush = false;
  1892. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1893. return;
  1894. }
  1895. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1896. if (smmu_domain->flush_ops) {
  1897. arm_smmu_rpm_get(smmu);
  1898. __arm_smmu_flush_iotlb_all(domain, true);
  1899. arm_smmu_rpm_put(smmu);
  1900. }
  1901. }
  1902. /*
  1903. * Caller must call arm_smmu_rpm_get().
  1904. */
  1905. static void __arm_smmu_flush_iotlb_all(struct iommu_domain *domain, bool force)
  1906. {
  1907. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1908. struct page *page;
  1909. unsigned long flags;
  1910. spin_lock_irqsave(&smmu_domain->iotlb_gather_lock, flags);
  1911. /*
  1912. * iommu_flush_iotlb_all currently has 2 users which do not set
  1913. * deferred_flush through qcom_iommu_pgtable_ops->tlb_add_inv
  1914. * 1) GPU - old implementation uses upstream io-pgtable-arm.c
  1915. * 2) fastmap
  1916. * once these users have gone away, force parameter can be removed.
  1917. *
  1918. * Also return, If skip_tlb_management is set.
  1919. */
  1920. if (smmu_domain->skip_tlb_management || (!force && !smmu_domain->deferred_flush)) {
  1921. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1922. return;
  1923. }
  1924. smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
  1925. smmu_domain->deferred_flush = false;
  1926. while (smmu_domain->freelist) {
  1927. page = container_of(smmu_domain->freelist, struct page, lru);
  1928. smmu_domain->freelist = smmu_domain->freelist->next;
  1929. qcom_io_pgtable_free_page(page);
  1930. }
  1931. smmu_domain->freelist = NULL;
  1932. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  1933. }
  1934. static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
  1935. struct iommu_iotlb_gather *gather)
  1936. {
  1937. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1938. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1939. if (!smmu)
  1940. return;
  1941. arm_smmu_flush_iotlb_all(domain);
  1942. }
  1943. static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
  1944. dma_addr_t iova)
  1945. {
  1946. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1947. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1948. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  1949. struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
  1950. struct device *dev = smmu->dev;
  1951. void __iomem *reg;
  1952. u32 tmp;
  1953. u64 phys;
  1954. unsigned long va, flags;
  1955. int idx = cfg->cbndx;
  1956. phys_addr_t addr = 0;
  1957. spin_lock_irqsave(&smmu_domain->cb_lock, flags);
  1958. va = iova & ~0xfffUL;
  1959. if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
  1960. arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
  1961. else
  1962. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
  1963. reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
  1964. if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
  1965. 5, 50)) {
  1966. spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
  1967. dev_err(dev,
  1968. "iova to phys timed out on %pad. Falling back to software table walk.\n",
  1969. &iova);
  1970. return ops->iova_to_phys(ops, iova);
  1971. }
  1972. phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
  1973. spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
  1974. if (phys & ARM_SMMU_CB_PAR_F) {
  1975. dev_err(dev, "translation fault!\n");
  1976. dev_err(dev, "PAR = 0x%llx\n", phys);
  1977. goto out;
  1978. }
  1979. addr = (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
  1980. out:
  1981. return addr;
  1982. }
  1983. static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
  1984. dma_addr_t iova)
  1985. {
  1986. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  1987. struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
  1988. struct arm_smmu_device *smmu = smmu_domain->smmu;
  1989. phys_addr_t phys;
  1990. if (!ops)
  1991. return 0;
  1992. if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
  1993. smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
  1994. if (arm_smmu_rpm_get(smmu) < 0)
  1995. return 0;
  1996. phys = __arm_smmu_iova_to_phys_hard(domain, iova);
  1997. arm_smmu_rpm_put(smmu);
  1998. return phys;
  1999. }
  2000. return ops->iova_to_phys(ops, iova);
  2001. }
  2002. /*
  2003. * This function can sleep, and cannot be called from atomic context. Will
  2004. * power on register block if required. This restriction does not apply to the
  2005. * original iova_to_phys() op.
  2006. */
  2007. static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
  2008. struct qcom_iommu_atos_txn *txn)
  2009. {
  2010. phys_addr_t ret = 0;
  2011. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2012. struct arm_smmu_device *smmu = smmu_domain->smmu;
  2013. if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS)
  2014. return 0;
  2015. if (arm_smmu_rpm_get(smmu) < 0)
  2016. return 0;
  2017. if (smmu->impl && smmu->impl->iova_to_phys_hard) {
  2018. ret = smmu->impl->iova_to_phys_hard(smmu_domain, txn);
  2019. goto out;
  2020. }
  2021. if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
  2022. smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
  2023. ret = __arm_smmu_iova_to_phys_hard(domain, txn->addr);
  2024. out:
  2025. arm_smmu_rpm_put(smmu);
  2026. return ret;
  2027. }
  2028. static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
  2029. {
  2030. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  2031. switch (cap) {
  2032. case IOMMU_CAP_CACHE_COHERENCY:
  2033. /*
  2034. * It's overwhelmingly the case in practice that when the pagetable
  2035. * walk interface is connected to a coherent interconnect, all the
  2036. * translation interfaces are too. Furthermore if the device is
  2037. * natively coherent, then its translation interface must also be.
  2038. */
  2039. return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
  2040. device_get_dma_attr(dev) == DEV_DMA_COHERENT;
  2041. case IOMMU_CAP_NOEXEC:
  2042. return true;
  2043. default:
  2044. return false;
  2045. }
  2046. }
  2047. static
  2048. struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
  2049. {
  2050. struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
  2051. fwnode);
  2052. put_device(dev);
  2053. return dev ? dev_get_drvdata(dev) : NULL;
  2054. }
  2055. static struct iommu_device *arm_smmu_probe_device(struct device *dev)
  2056. {
  2057. struct arm_smmu_device *smmu = NULL;
  2058. struct arm_smmu_master_cfg *cfg;
  2059. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  2060. int i, ret;
  2061. if (using_legacy_binding) {
  2062. ret = arm_smmu_register_legacy_master(dev, &smmu);
  2063. /*
  2064. * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
  2065. * will allocate/initialise a new one. Thus we need to update fwspec for
  2066. * later use.
  2067. */
  2068. fwspec = dev_iommu_fwspec_get(dev);
  2069. if (ret)
  2070. goto out_free;
  2071. } else if (fwspec && fwspec->ops == &arm_smmu_ops.iommu_ops) {
  2072. smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
  2073. if (!smmu)
  2074. return ERR_PTR(-ENODEV);
  2075. } else {
  2076. return ERR_PTR(-ENODEV);
  2077. }
  2078. ret = -EINVAL;
  2079. for (i = 0; i < fwspec->num_ids; i++) {
  2080. u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
  2081. u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
  2082. if (sid & ~smmu->streamid_mask) {
  2083. dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
  2084. sid, smmu->streamid_mask);
  2085. goto out_free;
  2086. }
  2087. if (mask & ~smmu->smr_mask_mask) {
  2088. dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
  2089. mask, smmu->smr_mask_mask);
  2090. goto out_free;
  2091. }
  2092. }
  2093. ret = -ENOMEM;
  2094. cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
  2095. GFP_KERNEL);
  2096. if (!cfg)
  2097. goto out_free;
  2098. cfg->smmu = smmu;
  2099. dev_iommu_priv_set(dev, cfg);
  2100. while (i--)
  2101. cfg->smendx[i] = INVALID_SMENDX;
  2102. ret = arm_smmu_rpm_get(smmu);
  2103. if (ret < 0)
  2104. goto out_cfg_free;
  2105. ret = arm_smmu_master_alloc_smes(dev);
  2106. arm_smmu_rpm_put(smmu);
  2107. if (ret)
  2108. goto out_cfg_free;
  2109. device_link_add(dev, smmu->dev,
  2110. DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
  2111. return &smmu->iommu;
  2112. out_cfg_free:
  2113. kfree(cfg);
  2114. out_free:
  2115. iommu_fwspec_free(dev);
  2116. return ERR_PTR(ret);
  2117. }
  2118. static void arm_smmu_release_device(struct device *dev)
  2119. {
  2120. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  2121. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  2122. int ret;
  2123. if (!fwspec || fwspec->ops != &arm_smmu_ops.iommu_ops)
  2124. return;
  2125. cfg = dev_iommu_priv_get(dev);
  2126. ret = arm_smmu_rpm_get(cfg->smmu);
  2127. if (ret < 0)
  2128. return;
  2129. arm_smmu_master_free_smes(cfg, fwspec);
  2130. arm_smmu_rpm_put(cfg->smmu);
  2131. dev_iommu_priv_set(dev, NULL);
  2132. kfree(cfg);
  2133. }
  2134. static void arm_smmu_probe_finalize(struct device *dev)
  2135. {
  2136. struct arm_smmu_master_cfg *cfg;
  2137. struct arm_smmu_device *smmu;
  2138. cfg = dev_iommu_priv_get(dev);
  2139. smmu = cfg->smmu;
  2140. if (smmu->impl && smmu->impl->probe_finalize)
  2141. smmu->impl->probe_finalize(smmu, dev);
  2142. }
  2143. static struct iommu_group *arm_smmu_device_group(struct device *dev)
  2144. {
  2145. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  2146. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  2147. struct arm_smmu_device *smmu = cfg->smmu;
  2148. struct iommu_group *group = NULL;
  2149. int i, idx;
  2150. mutex_lock(&smmu->stream_map_mutex);
  2151. group = of_get_device_group(dev);
  2152. if (group)
  2153. goto finish;
  2154. for_each_cfg_sme(cfg, fwspec, i, idx) {
  2155. if (group && smmu->s2crs[idx].group &&
  2156. group != smmu->s2crs[idx].group) {
  2157. dev_err(dev, "ID:%x IDX:%x is already in a group!\n",
  2158. fwspec->ids[i], idx);
  2159. mutex_unlock(&smmu->stream_map_mutex);
  2160. return ERR_PTR(-EINVAL);
  2161. }
  2162. group = smmu->s2crs[idx].group;
  2163. }
  2164. if (group) {
  2165. iommu_group_ref_get(group);
  2166. } else {
  2167. if (dev_is_pci(dev))
  2168. group = pci_device_group(dev);
  2169. else if (dev_is_fsl_mc(dev))
  2170. group = fsl_mc_device_group(dev);
  2171. else
  2172. group = generic_device_group(dev);
  2173. if (IS_ERR(group)) {
  2174. mutex_unlock(&smmu->stream_map_mutex);
  2175. return NULL;
  2176. }
  2177. }
  2178. finish:
  2179. if (!IS_ERR(group) && smmu->impl && smmu->impl->device_group &&
  2180. smmu->impl->device_group(dev, group)) {
  2181. iommu_group_put(group);
  2182. mutex_unlock(&smmu->stream_map_mutex);
  2183. return ERR_PTR(-EINVAL);
  2184. }
  2185. /* Remember group for faster lookups */
  2186. if (!IS_ERR(group))
  2187. for_each_cfg_sme(cfg, fwspec, i, idx)
  2188. smmu->s2crs[idx].group = group;
  2189. mutex_unlock(&smmu->stream_map_mutex);
  2190. return group;
  2191. }
  2192. static int arm_smmu_enable_nesting(struct iommu_domain *domain)
  2193. {
  2194. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2195. int ret = 0;
  2196. mutex_lock(&smmu_domain->init_mutex);
  2197. if (smmu_domain->smmu)
  2198. ret = -EPERM;
  2199. else
  2200. smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
  2201. mutex_unlock(&smmu_domain->init_mutex);
  2202. return ret;
  2203. }
  2204. static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
  2205. unsigned long quirks)
  2206. {
  2207. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2208. int ret = 0;
  2209. mutex_lock(&smmu_domain->init_mutex);
  2210. if (smmu_domain->smmu)
  2211. ret = -EPERM;
  2212. else
  2213. smmu_domain->pgtbl_quirks = quirks;
  2214. mutex_unlock(&smmu_domain->init_mutex);
  2215. return ret;
  2216. }
  2217. static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
  2218. {
  2219. u32 mask, fwid = 0;
  2220. if (args->args_count > 0)
  2221. fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
  2222. if (args->args_count > 1)
  2223. fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
  2224. else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
  2225. fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
  2226. return iommu_fwspec_add_ids(dev, &fwid, 1);
  2227. }
  2228. static void arm_smmu_get_resv_regions(struct device *dev,
  2229. struct list_head *head)
  2230. {
  2231. struct iommu_resv_region *region;
  2232. int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  2233. region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
  2234. prot, IOMMU_RESV_SW_MSI, GFP_KERNEL);
  2235. if (!region)
  2236. return;
  2237. list_add_tail(&region->list, head);
  2238. iommu_dma_get_resv_regions(dev, head);
  2239. qcom_iommu_generate_resv_regions(dev, head);
  2240. }
  2241. static int arm_smmu_def_domain_type(struct device *dev)
  2242. {
  2243. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  2244. const struct arm_smmu_impl *impl;
  2245. if (!cfg)
  2246. return 0;
  2247. impl = cfg->smmu->impl;
  2248. if (using_legacy_binding)
  2249. return IOMMU_DOMAIN_IDENTITY;
  2250. if (impl && impl->def_domain_type)
  2251. return impl->def_domain_type(dev);
  2252. return 0;
  2253. }
  2254. static inline void __arm_smmu_sid_switch_touch_cbar(struct arm_smmu_device *smmu,
  2255. struct arm_smmu_master_cfg *cfg,
  2256. struct iommu_fwspec *fwspec)
  2257. {
  2258. int i, idx;
  2259. u8 cbndx;
  2260. /* Use for_each_cfg_sme() to look up the context bank index */
  2261. for_each_cfg_sme(cfg, fwspec, i, idx) {
  2262. cbndx = smmu->s2crs[idx].cbndx;
  2263. /* Touch the CBAR by calling arm_smmu_write_context_bank() */
  2264. arm_smmu_write_context_bank(smmu, cbndx);
  2265. break;
  2266. }
  2267. }
  2268. static int __arm_smmu_sid_switch(struct device *dev, void *data)
  2269. {
  2270. struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  2271. struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
  2272. struct arm_smmu_device *smmu;
  2273. enum sid_switch_direction dir = (typeof(dir))data;
  2274. int i, idx;
  2275. if (!fwspec || !cfg)
  2276. return 0;
  2277. smmu = cfg->smmu;
  2278. arm_smmu_rpm_get(smmu);
  2279. mutex_lock(&smmu->stream_map_mutex);
  2280. /*
  2281. * When activating an SMR on a VM, we need to ensure that the SMR's
  2282. * corresponding CBAR has the right VMID value, which is provided by the
  2283. * hypervisor. Touch the CBAR register before doing the SID-switch to do
  2284. * this.
  2285. */
  2286. if (dir == SID_ACQUIRE)
  2287. __arm_smmu_sid_switch_touch_cbar(smmu, cfg, fwspec);
  2288. for_each_cfg_sme(cfg, fwspec, i, idx) {
  2289. smmu->smrs[idx].valid = dir == SID_ACQUIRE;
  2290. arm_smmu_write_sme(smmu, idx);
  2291. }
  2292. mutex_unlock(&smmu->stream_map_mutex);
  2293. /* Add barrier to ensure that the SMR register writes is completed. */
  2294. wmb();
  2295. arm_smmu_rpm_put(smmu);
  2296. return 0;
  2297. }
  2298. /*
  2299. * Some devices support operation with different levels of security. In some
  2300. * modes, HLOS is no longer responsible for managing the S1 translations for
  2301. * a device. Unfortunately, the device may still use the same set of SIDS, so
  2302. * to prevent a potential stream-match conflict fault, HLOS needs to remove
  2303. * the SIDs fromits SMRs. Enforcement of this policy is implemented through
  2304. * virtualization of the SMR/S2CR regigisters.
  2305. */
  2306. static int arm_smmu_sid_switch(struct device *dev,
  2307. enum sid_switch_direction dir)
  2308. {
  2309. struct iommu_group *group;
  2310. int ret;
  2311. group = iommu_group_get(dev);
  2312. ret = iommu_group_for_each_dev(group, (void *)dir,
  2313. __arm_smmu_sid_switch);
  2314. iommu_group_put(group);
  2315. return ret;
  2316. }
  2317. static int arm_smmu_get_context_bank_nr(struct iommu_domain *domain)
  2318. {
  2319. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2320. int ret;
  2321. mutex_lock(&smmu_domain->init_mutex);
  2322. if (!smmu_domain->smmu)
  2323. ret = -EINVAL;
  2324. else
  2325. ret = smmu_domain->cfg.cbndx;
  2326. mutex_unlock(&smmu_domain->init_mutex);
  2327. return ret;
  2328. }
  2329. static int arm_smmu_get_asid_nr(struct iommu_domain *domain)
  2330. {
  2331. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2332. int ret;
  2333. mutex_lock(&smmu_domain->init_mutex);
  2334. if (!smmu_domain->smmu)
  2335. ret = -EINVAL;
  2336. else
  2337. ret = smmu_domain->cfg.asid;
  2338. mutex_unlock(&smmu_domain->init_mutex);
  2339. return ret;
  2340. }
  2341. static int arm_smmu_set_secure_vmid(struct iommu_domain *domain, enum vmid vmid)
  2342. {
  2343. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2344. int ret = 0;
  2345. mutex_lock(&smmu_domain->init_mutex);
  2346. if (smmu_domain->smmu)
  2347. ret = -EPERM;
  2348. else if (WARN(smmu_domain->secure_vmid != VMID_INVAL, "secure vmid already set"))
  2349. ret = -EPERM;
  2350. else
  2351. smmu_domain->secure_vmid = vmid;
  2352. mutex_unlock(&smmu_domain->init_mutex);
  2353. return ret;
  2354. }
  2355. static int arm_smmu_set_fault_model(struct iommu_domain *domain, int fault_model)
  2356. {
  2357. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2358. struct arm_smmu_fault_model *domain_model = &smmu_domain->fault_model;
  2359. int ret = 0;
  2360. mutex_lock(&smmu_domain->init_mutex);
  2361. if (smmu_domain->smmu) {
  2362. ret = -EPERM;
  2363. } else {
  2364. domain_model->non_fatal = FIELD_GET(QCOM_IOMMU_FAULT_MODEL_NON_FATAL, fault_model);
  2365. domain_model->no_cfre = FIELD_GET(QCOM_IOMMU_FAULT_MODEL_NO_CFRE, fault_model);
  2366. domain_model->no_stall = FIELD_GET(QCOM_IOMMU_FAULT_MODEL_NO_STALL, fault_model);
  2367. domain_model->hupcf = FIELD_GET(QCOM_IOMMU_FAULT_MODEL_HUPCF, fault_model);
  2368. }
  2369. mutex_unlock(&smmu_domain->init_mutex);
  2370. return ret;
  2371. }
  2372. static void arm_smmu_set_fault_handler_irq(struct iommu_domain *domain,
  2373. fault_handler_irq_t handler_irq, void *token)
  2374. {
  2375. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2376. smmu_domain->fault_handler_irq = handler_irq;
  2377. smmu_domain->handler_irq_token = token;
  2378. }
  2379. static int arm_smmu_enable_s1_translation(struct iommu_domain *domain)
  2380. {
  2381. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2382. struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
  2383. struct arm_smmu_device *smmu = smmu_domain->smmu;
  2384. int idx;
  2385. struct arm_smmu_cb *cb;
  2386. int ret;
  2387. mutex_lock(&smmu_domain->init_mutex);
  2388. if (!smmu_domain->smmu) {
  2389. ret = -EPERM;
  2390. goto out;
  2391. } else if (!smmu_domain->delayed_s1_trans_enable) {
  2392. ret = 0;
  2393. goto out;
  2394. }
  2395. ret = arm_smmu_rpm_get(smmu);
  2396. if (ret < 0)
  2397. goto out;
  2398. idx = cfg->cbndx;
  2399. cfg->sctlr.m = 1;
  2400. cb = &smmu->cbs[idx];
  2401. cb->sctlr = arm_smmu_lpae_sctlr(cfg);
  2402. arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, cb->sctlr);
  2403. arm_smmu_rpm_put(smmu);
  2404. smmu_domain->delayed_s1_trans_enable = false;
  2405. out:
  2406. mutex_unlock(&smmu_domain->init_mutex);
  2407. return ret;
  2408. }
  2409. static int arm_smmu_get_mappings_configuration(struct iommu_domain *domain)
  2410. {
  2411. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2412. int ret = 0;
  2413. mutex_lock(&smmu_domain->init_mutex);
  2414. if (!smmu_domain->smmu) {
  2415. ret = -EPERM;
  2416. } else {
  2417. ret |= smmu_domain->mapping_cfg.s1_bypass ? QCOM_IOMMU_MAPPING_CONF_S1_BYPASS : 0;
  2418. ret |= smmu_domain->mapping_cfg.atomic ? QCOM_IOMMU_MAPPING_CONF_ATOMIC : 0;
  2419. ret |= smmu_domain->mapping_cfg.fast ? QCOM_IOMMU_MAPPING_CONF_FAST : 0;
  2420. }
  2421. mutex_unlock(&smmu_domain->init_mutex);
  2422. return ret;
  2423. }
  2424. static void arm_smmu_skip_tlb_management(struct iommu_domain *domain,
  2425. bool skip)
  2426. {
  2427. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  2428. unsigned long flags;
  2429. spin_lock_irqsave(&smmu_domain->iotlb_gather_lock, flags);
  2430. smmu_domain->skip_tlb_management = skip;
  2431. spin_unlock_irqrestore(&smmu_domain->iotlb_gather_lock, flags);
  2432. }
  2433. static struct qcom_iommu_ops arm_smmu_ops = {
  2434. .iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
  2435. .sid_switch = arm_smmu_sid_switch,
  2436. .get_fault_ids = arm_smmu_get_fault_ids,
  2437. .get_context_bank_nr = arm_smmu_get_context_bank_nr,
  2438. .get_asid_nr = arm_smmu_get_asid_nr,
  2439. .set_secure_vmid = arm_smmu_set_secure_vmid,
  2440. .set_fault_model = arm_smmu_set_fault_model,
  2441. .set_fault_handler_irq = arm_smmu_set_fault_handler_irq,
  2442. .enable_s1_translation = arm_smmu_enable_s1_translation,
  2443. .get_mappings_configuration = arm_smmu_get_mappings_configuration,
  2444. .skip_tlb_management = arm_smmu_skip_tlb_management,
  2445. .domain_ops = {
  2446. .attach_dev = arm_smmu_attach_dev,
  2447. .map_pages = arm_smmu_map_pages,
  2448. .unmap_pages = arm_smmu_unmap_pages,
  2449. .flush_iotlb_all = arm_smmu_flush_iotlb_all,
  2450. .iotlb_sync = arm_smmu_iotlb_sync,
  2451. .iova_to_phys = arm_smmu_iova_to_phys,
  2452. .enable_nesting = arm_smmu_enable_nesting,
  2453. .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
  2454. .free = arm_smmu_domain_free,
  2455. },
  2456. .iommu_ops = {
  2457. .capable = arm_smmu_capable,
  2458. .domain_alloc = arm_smmu_domain_alloc,
  2459. .probe_device = arm_smmu_probe_device,
  2460. .release_device = arm_smmu_release_device,
  2461. .probe_finalize = arm_smmu_probe_finalize,
  2462. .device_group = arm_smmu_device_group,
  2463. .of_xlate = arm_smmu_of_xlate,
  2464. .get_resv_regions = arm_smmu_get_resv_regions,
  2465. .def_domain_type = arm_smmu_def_domain_type,
  2466. .pgsize_bitmap = -1UL, /* Restricted during device attach */
  2467. .owner = THIS_MODULE,
  2468. .default_domain_ops = &arm_smmu_ops.domain_ops,
  2469. },
  2470. };
  2471. static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
  2472. {
  2473. #if !defined CONFIG_QTI_QUIN_GVM
  2474. int i;
  2475. #endif
  2476. u32 reg;
  2477. /* clear global FSR */
  2478. reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
  2479. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
  2480. #if !defined CONFIG_QTI_QUIN_GVM
  2481. /*
  2482. * Reset stream mapping groups: Initial values mark all SMRn as
  2483. * invalid and all S2CRn as bypass unless overridden.
  2484. */
  2485. mutex_lock(&smmu->stream_map_mutex);
  2486. for (i = 0; i < smmu->num_mapping_groups; ++i)
  2487. arm_smmu_write_sme(smmu, i);
  2488. mutex_unlock(&smmu->stream_map_mutex);
  2489. /* Make sure all context banks are disabled and clear CB_FSR */
  2490. for (i = 0; i < smmu->num_context_banks; ++i) {
  2491. arm_smmu_write_context_bank(smmu, i);
  2492. arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
  2493. }
  2494. #endif
  2495. /* Invalidate the TLB, just in case */
  2496. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
  2497. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
  2498. reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
  2499. /* Enable fault reporting */
  2500. reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
  2501. ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
  2502. /* Disable TLB broadcasting. */
  2503. reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
  2504. /* Enable client access, handling unmatched streams as appropriate */
  2505. reg &= ~ARM_SMMU_sCR0_CLIENTPD;
  2506. if (disable_bypass)
  2507. reg |= ARM_SMMU_sCR0_USFCFG;
  2508. else
  2509. reg &= ~ARM_SMMU_sCR0_USFCFG;
  2510. /* Disable forced broadcasting */
  2511. reg &= ~ARM_SMMU_sCR0_FB;
  2512. /* Don't upgrade barriers */
  2513. reg &= ~(ARM_SMMU_sCR0_BSU);
  2514. if (smmu->features & ARM_SMMU_FEAT_VMID16)
  2515. reg |= ARM_SMMU_sCR0_VMID16EN;
  2516. if (smmu->features & ARM_SMMU_FEAT_EXIDS)
  2517. reg |= ARM_SMMU_sCR0_EXIDENABLE;
  2518. /* Force bypass transaction to be Non-Shareable & not io-coherent */
  2519. reg &= ~ARM_SMMU_sCR0_SHCFG;
  2520. reg |= FIELD_PREP(ARM_SMMU_sCR0_SHCFG, ARM_SMMU_sCR0_SHCFG_NSH);
  2521. if (smmu->impl && smmu->impl->reset)
  2522. smmu->impl->reset(smmu);
  2523. /* Push the button */
  2524. arm_smmu_tlb_sync_global(smmu);
  2525. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
  2526. }
  2527. static int arm_smmu_id_size_to_bits(int size)
  2528. {
  2529. switch (size) {
  2530. case 0:
  2531. return 32;
  2532. case 1:
  2533. return 36;
  2534. case 2:
  2535. return 40;
  2536. case 3:
  2537. return 42;
  2538. case 4:
  2539. return 44;
  2540. case 5:
  2541. default:
  2542. return 48;
  2543. }
  2544. }
  2545. static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
  2546. {
  2547. u32 i, smr, s2cr;
  2548. u32 index;
  2549. struct arm_smmu_smr smrs;
  2550. struct arm_smmu_smr *handoff_smrs;
  2551. int num_handoff_smrs;
  2552. const __be32 *cell;
  2553. cell = of_get_property(smmu->dev->of_node, "qcom,handoff-smrs", NULL);
  2554. if (!cell)
  2555. return 0;
  2556. num_handoff_smrs = of_property_count_elems_of_size(smmu->dev->of_node,
  2557. "qcom,handoff-smrs", sizeof(u32) * 2);
  2558. if (num_handoff_smrs < 0)
  2559. return 0;
  2560. handoff_smrs = kcalloc(num_handoff_smrs, sizeof(*handoff_smrs),
  2561. GFP_KERNEL);
  2562. if (!handoff_smrs)
  2563. return -ENOMEM;
  2564. for (i = 0; i < num_handoff_smrs; i++) {
  2565. handoff_smrs[i].id = of_read_number(cell++, 1);
  2566. handoff_smrs[i].mask = of_read_number(cell++, 1);
  2567. handoff_smrs[i].valid = true;
  2568. handoff_smrs[i].used = true;
  2569. }
  2570. for (i = 0; i < smmu->num_mapping_groups; ++i) {
  2571. smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
  2572. if (smmu->features & ARM_SMMU_FEAT_EXIDS) {
  2573. s2cr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_S2CR(i));
  2574. smrs.valid = FIELD_GET(ARM_SMMU_S2CR_EXIDVALID, s2cr);
  2575. if (!smrs.valid)
  2576. continue;
  2577. smrs.used = true;
  2578. smrs.id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
  2579. smrs.mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
  2580. } else {
  2581. smrs.valid = FIELD_GET(ARM_SMMU_SMR_VALID, smr);
  2582. if (!smrs.valid)
  2583. continue;
  2584. smrs.used = true;
  2585. smrs.id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
  2586. /*
  2587. * The SMR mask covers bits 30:16 when extended stream
  2588. * matching is not enabled.
  2589. */
  2590. smrs.mask = FIELD_GET(ARM_SMMU_SMR_MASK,
  2591. smr & ~ARM_SMMU_SMR_VALID);
  2592. }
  2593. for (index = 0; index < num_handoff_smrs; index++) {
  2594. if (!handoff_smrs[index].valid)
  2595. continue;
  2596. /* smrs is subset of handoff_smrs */
  2597. if ((handoff_smrs[index].mask & smrs.mask) == smrs.mask &&
  2598. !((handoff_smrs[index].id ^ smrs.id) & ~handoff_smrs[index].mask)) {
  2599. dev_dbg(smmu->dev,
  2600. "handoff-smrs match idx %d, id, 0x%x, mask 0x%x\n",
  2601. i, smrs.id, smrs.mask);
  2602. s2cr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_S2CR(i));
  2603. smmu->smrs[i] = smrs;
  2604. smmu->s2crs[i].group = NULL;
  2605. smmu->s2crs[i].count = 0;
  2606. smmu->s2crs[i].type = FIELD_GET(ARM_SMMU_S2CR_TYPE, s2cr);
  2607. smmu->s2crs[i].privcfg = FIELD_GET(ARM_SMMU_S2CR_PRIVCFG, s2cr);
  2608. smmu->s2crs[i].cbndx = FIELD_GET(ARM_SMMU_S2CR_CBNDX, s2cr);
  2609. smmu->s2crs[i].pinned = true;
  2610. bitmap_set(smmu->context_map, smmu->s2crs[i].cbndx, 1);
  2611. if (!(smmu->options & ARM_SMMU_OPT_MULTI_MATCH_HANDOFF_SMR)) {
  2612. handoff_smrs[index].valid = false;
  2613. handoff_smrs[index].used = false;
  2614. }
  2615. break;
  2616. } else {
  2617. dev_dbg(smmu->dev,
  2618. "handoff-smrs no match idx %d, id, 0x%x, mask 0x%x\n",
  2619. i, smrs.id, smrs.mask);
  2620. }
  2621. }
  2622. }
  2623. kfree(handoff_smrs);
  2624. return 0;
  2625. }
  2626. static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
  2627. {
  2628. unsigned int size;
  2629. u32 id;
  2630. bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
  2631. int i, ret;
  2632. unsigned int num_mapping_groups_override = 0;
  2633. unsigned int num_context_banks_override = 0;
  2634. dev_notice(smmu->dev, "probing hardware configuration...\n");
  2635. dev_notice(smmu->dev, "SMMUv%d with:\n",
  2636. smmu->version == ARM_SMMU_V2 ? 2 : 1);
  2637. /* ID0 */
  2638. id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
  2639. /* Restrict available stages based on module parameter */
  2640. if (force_stage == 1)
  2641. id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
  2642. else if (force_stage == 2)
  2643. id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
  2644. if (id & ARM_SMMU_ID0_S1TS) {
  2645. smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
  2646. dev_notice(smmu->dev, "\tstage 1 translation\n");
  2647. }
  2648. if (id & ARM_SMMU_ID0_S2TS) {
  2649. smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
  2650. dev_notice(smmu->dev, "\tstage 2 translation\n");
  2651. }
  2652. if (id & ARM_SMMU_ID0_NTS) {
  2653. smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
  2654. dev_notice(smmu->dev, "\tnested translation\n");
  2655. }
  2656. if (!(smmu->features &
  2657. (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
  2658. dev_err(smmu->dev, "\tno translation support!\n");
  2659. return -ENODEV;
  2660. }
  2661. if ((id & ARM_SMMU_ID0_S1TS) &&
  2662. ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
  2663. smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
  2664. dev_notice(smmu->dev, "\taddress translation ops\n");
  2665. }
  2666. /*
  2667. * In order for DMA API calls to work properly, we must defer to what
  2668. * the FW says about coherency, regardless of what the hardware claims.
  2669. * Fortunately, this also opens up a workaround for systems where the
  2670. * ID register value has ended up configured incorrectly.
  2671. */
  2672. cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
  2673. if (cttw_fw || cttw_reg)
  2674. dev_notice(smmu->dev, "\t%scoherent table walk\n",
  2675. cttw_fw ? "" : "non-");
  2676. if (cttw_fw != cttw_reg)
  2677. dev_notice(smmu->dev,
  2678. "\t(IDR0.CTTW overridden by FW configuration)\n");
  2679. /* Max. number of entries we have for stream matching/indexing */
  2680. if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
  2681. smmu->features |= ARM_SMMU_FEAT_EXIDS;
  2682. size = 1 << 16;
  2683. } else {
  2684. size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
  2685. }
  2686. smmu->streamid_mask = size - 1;
  2687. if (id & ARM_SMMU_ID0_SMS) {
  2688. smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
  2689. size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
  2690. if (size == 0) {
  2691. dev_err(smmu->dev,
  2692. "stream-matching supported, but no SMRs present!\n");
  2693. return -ENODEV;
  2694. }
  2695. /* Zero-initialised to mark as invalid */
  2696. smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
  2697. GFP_KERNEL);
  2698. if (!smmu->smrs)
  2699. return -ENOMEM;
  2700. dev_notice(smmu->dev,
  2701. "\tstream matching with %u register groups", size);
  2702. }
  2703. /* s2cr->type == 0 means translation, so initialise explicitly */
  2704. smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
  2705. GFP_KERNEL);
  2706. if (!smmu->s2crs)
  2707. return -ENOMEM;
  2708. for (i = 0; i < size; i++)
  2709. smmu->s2crs[i] = s2cr_init_val;
  2710. ret = of_property_read_u32(smmu->dev->of_node, "qcom,num-smr-override",
  2711. &num_mapping_groups_override);
  2712. if (!ret && size > num_mapping_groups_override) {
  2713. dev_dbg(smmu->dev, "%d mapping groups overridden to %d\n",
  2714. size, num_mapping_groups_override);
  2715. size = min(size, num_mapping_groups_override);
  2716. }
  2717. smmu->num_mapping_groups = size;
  2718. mutex_init(&smmu->stream_map_mutex);
  2719. spin_lock_init(&smmu->global_sync_lock);
  2720. if (smmu->version < ARM_SMMU_V2 ||
  2721. !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
  2722. smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
  2723. if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
  2724. smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
  2725. }
  2726. /* ID1 */
  2727. id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
  2728. smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
  2729. /* Check for size mismatch of SMMU address space from mapped region */
  2730. size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
  2731. if (smmu->numpage != 2 * size << smmu->pgshift)
  2732. dev_warn(smmu->dev,
  2733. "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
  2734. 2 * size << smmu->pgshift, smmu->numpage);
  2735. /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
  2736. if (!(smmu->options & ARM_SMMU_OPT_IGNORE_NUMPAGENDXB))
  2737. smmu->numpage = size;
  2738. else
  2739. smmu->numpage = (smmu->numpage / 2) >> smmu->pgshift;
  2740. smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
  2741. smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
  2742. ret = of_property_read_u32(smmu->dev->of_node,
  2743. "qcom,num-context-banks-override",
  2744. &num_context_banks_override);
  2745. if (!ret && smmu->num_context_banks > num_context_banks_override) {
  2746. dev_dbg(smmu->dev, "%d context banks overridden to %d\n",
  2747. smmu->num_context_banks,
  2748. num_context_banks_override);
  2749. smmu->num_context_banks = min(smmu->num_context_banks,
  2750. num_context_banks_override);
  2751. }
  2752. if (smmu->num_s2_context_banks > smmu->num_context_banks) {
  2753. dev_err(smmu->dev, "impossible number of S2 context banks!\n");
  2754. return -ENODEV;
  2755. }
  2756. dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
  2757. smmu->num_context_banks, smmu->num_s2_context_banks);
  2758. smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
  2759. sizeof(*smmu->cbs), GFP_KERNEL);
  2760. if (!smmu->cbs)
  2761. return -ENOMEM;
  2762. /* ID2 */
  2763. id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
  2764. size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
  2765. smmu->ipa_size = size;
  2766. /* The output mask is also applied for bypass */
  2767. size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
  2768. smmu->pa_size = size;
  2769. if (id & ARM_SMMU_ID2_VMID16)
  2770. smmu->features |= ARM_SMMU_FEAT_VMID16;
  2771. /*
  2772. * What the page table walker can address actually depends on which
  2773. * descriptor format is in use, but since a) we don't know that yet,
  2774. * and b) it can vary per context bank, this will have to do...
  2775. */
  2776. if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
  2777. dev_warn(smmu->dev,
  2778. "failed to set DMA mask for table walker\n");
  2779. if (smmu->version < ARM_SMMU_V2) {
  2780. smmu->va_size = smmu->ipa_size;
  2781. if (smmu->version == ARM_SMMU_V1_64K)
  2782. smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
  2783. } else {
  2784. size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
  2785. smmu->va_size = arm_smmu_id_size_to_bits(size);
  2786. if (id & ARM_SMMU_ID2_PTFS_4K)
  2787. smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
  2788. if (id & ARM_SMMU_ID2_PTFS_16K)
  2789. smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
  2790. if (id & ARM_SMMU_ID2_PTFS_64K)
  2791. smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
  2792. }
  2793. if (smmu->impl && smmu->impl->cfg_probe) {
  2794. ret = smmu->impl->cfg_probe(smmu);
  2795. if (ret)
  2796. return ret;
  2797. }
  2798. /* Now we've corralled the various formats, what'll it do? */
  2799. if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
  2800. smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
  2801. if (smmu->features &
  2802. (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
  2803. smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
  2804. if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
  2805. smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
  2806. if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
  2807. smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
  2808. if (arm_smmu_ops.iommu_ops.pgsize_bitmap == -1UL)
  2809. arm_smmu_ops.iommu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
  2810. else
  2811. arm_smmu_ops.iommu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
  2812. dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
  2813. smmu->pgsize_bitmap);
  2814. if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
  2815. dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
  2816. smmu->va_size, smmu->ipa_size);
  2817. if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
  2818. dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
  2819. smmu->ipa_size, smmu->pa_size);
  2820. return 0;
  2821. }
  2822. struct arm_smmu_match_data {
  2823. enum arm_smmu_arch_version version;
  2824. enum arm_smmu_implementation model;
  2825. };
  2826. #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
  2827. static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
  2828. ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
  2829. ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
  2830. ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
  2831. ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
  2832. ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
  2833. ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500);
  2834. ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
  2835. static const struct of_device_id arm_smmu_of_match[] = {
  2836. { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
  2837. { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
  2838. { .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
  2839. { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
  2840. { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
  2841. { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
  2842. { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
  2843. { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
  2844. { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
  2845. { },
  2846. };
  2847. MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
  2848. #ifdef CONFIG_ACPI
  2849. static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
  2850. {
  2851. int ret = 0;
  2852. switch (model) {
  2853. case ACPI_IORT_SMMU_V1:
  2854. case ACPI_IORT_SMMU_CORELINK_MMU400:
  2855. smmu->version = ARM_SMMU_V1;
  2856. smmu->model = GENERIC_SMMU;
  2857. break;
  2858. case ACPI_IORT_SMMU_CORELINK_MMU401:
  2859. smmu->version = ARM_SMMU_V1_64K;
  2860. smmu->model = GENERIC_SMMU;
  2861. break;
  2862. case ACPI_IORT_SMMU_V2:
  2863. smmu->version = ARM_SMMU_V2;
  2864. smmu->model = GENERIC_SMMU;
  2865. break;
  2866. case ACPI_IORT_SMMU_CORELINK_MMU500:
  2867. smmu->version = ARM_SMMU_V2;
  2868. smmu->model = ARM_MMU500;
  2869. break;
  2870. case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
  2871. smmu->version = ARM_SMMU_V2;
  2872. smmu->model = CAVIUM_SMMUV2;
  2873. break;
  2874. default:
  2875. ret = -ENODEV;
  2876. }
  2877. return ret;
  2878. }
  2879. static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
  2880. u32 *global_irqs, u32 *pmu_irqs)
  2881. {
  2882. struct device *dev = smmu->dev;
  2883. struct acpi_iort_node *node =
  2884. *(struct acpi_iort_node **)dev_get_platdata(dev);
  2885. struct acpi_iort_smmu *iort_smmu;
  2886. int ret;
  2887. /* Retrieve SMMU1/2 specific data */
  2888. iort_smmu = (struct acpi_iort_smmu *)node->node_data;
  2889. ret = acpi_smmu_get_data(iort_smmu->model, smmu);
  2890. if (ret < 0)
  2891. return ret;
  2892. /* Ignore the configuration access interrupt */
  2893. *global_irqs = 1;
  2894. *pmu_irqs = 0;
  2895. if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
  2896. smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
  2897. return 0;
  2898. }
  2899. #else
  2900. static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
  2901. u32 *global_irqs, u32 *pmu_irqs)
  2902. {
  2903. return -ENODEV;
  2904. }
  2905. #endif
  2906. static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
  2907. u32 *global_irqs, u32 *pmu_irqs)
  2908. {
  2909. const struct arm_smmu_match_data *data;
  2910. struct device *dev = smmu->dev;
  2911. bool legacy_binding;
  2912. if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
  2913. return dev_err_probe(dev, -ENODEV,
  2914. "missing #global-interrupts property\n");
  2915. *pmu_irqs = 0;
  2916. data = of_device_get_match_data(dev);
  2917. smmu->version = data->version;
  2918. smmu->model = data->model;
  2919. legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
  2920. if (legacy_binding && !using_generic_binding) {
  2921. if (!using_legacy_binding) {
  2922. pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
  2923. IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
  2924. }
  2925. using_legacy_binding = true;
  2926. } else if (!legacy_binding && !using_legacy_binding) {
  2927. using_generic_binding = true;
  2928. } else {
  2929. dev_err(dev, "not probing due to mismatched DT properties\n");
  2930. return -ENODEV;
  2931. }
  2932. if (of_dma_is_coherent(dev->of_node))
  2933. smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
  2934. parse_driver_options(smmu);
  2935. return 0;
  2936. }
  2937. static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
  2938. {
  2939. struct list_head rmr_list;
  2940. struct iommu_resv_region *e;
  2941. int idx, cnt = 0;
  2942. u32 reg;
  2943. INIT_LIST_HEAD(&rmr_list);
  2944. iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
  2945. /*
  2946. * Rather than trying to look at existing mappings that
  2947. * are setup by the firmware and then invalidate the ones
  2948. * that do no have matching RMR entries, just disable the
  2949. * SMMU until it gets enabled again in the reset routine.
  2950. */
  2951. reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
  2952. reg |= ARM_SMMU_sCR0_CLIENTPD;
  2953. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
  2954. list_for_each_entry(e, &rmr_list, list) {
  2955. struct iommu_iort_rmr_data *rmr;
  2956. int i;
  2957. rmr = container_of(e, struct iommu_iort_rmr_data, rr);
  2958. for (i = 0; i < rmr->num_sids; i++) {
  2959. idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
  2960. if (idx < 0)
  2961. continue;
  2962. if (smmu->s2crs[idx].count == 0) {
  2963. smmu->smrs[idx].id = rmr->sids[i];
  2964. smmu->smrs[idx].mask = 0;
  2965. smmu->smrs[idx].valid = true;
  2966. smmu->smrs[idx].used = true;
  2967. }
  2968. smmu->s2crs[idx].count++;
  2969. smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
  2970. smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
  2971. cnt++;
  2972. }
  2973. }
  2974. dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
  2975. cnt == 1 ? "" : "s");
  2976. iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
  2977. }
  2978. /* skip pcie iommu bus probe if smmuv2 and smmuv3 both enabled. */
  2979. #if IS_ENABLED(CONFIG_ARM_PARAVIRT_SMMU_V3)
  2980. static void arm_smmu_iommu_pcie_device_probe(void *data, struct iommu_device *iommu,
  2981. struct bus_type *bus, bool *skip)
  2982. {
  2983. if (iommu != (struct iommu_device *)data)
  2984. return;
  2985. *skip = !strcmp(bus->name, "pci");
  2986. }
  2987. #endif
  2988. static int arm_smmu_device_probe(struct platform_device *pdev)
  2989. {
  2990. struct resource *res;
  2991. struct arm_smmu_device *smmu;
  2992. struct device *dev = &pdev->dev;
  2993. int num_irqs, i, err;
  2994. u32 global_irqs, pmu_irqs;
  2995. irqreturn_t (*global_fault)(int irq, void *dev);
  2996. /* We depend on this device for fastmap */
  2997. if (!qcom_dma_iommu_is_ready())
  2998. return -EPROBE_DEFER;
  2999. smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
  3000. if (!smmu) {
  3001. dev_err(dev, "failed to allocate arm_smmu_device\n");
  3002. return -ENOMEM;
  3003. }
  3004. smmu->dev = dev;
  3005. if (dev->of_node)
  3006. err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
  3007. else
  3008. err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
  3009. if (err)
  3010. return err;
  3011. smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
  3012. if (IS_ERR(smmu->base))
  3013. return PTR_ERR(smmu->base);
  3014. smmu->ioaddr = res->start;
  3015. /*
  3016. * The resource size should effectively match the value of SMMU_TOP;
  3017. * stash that temporarily until we know PAGESIZE to validate it with.
  3018. */
  3019. smmu->numpage = resource_size(res);
  3020. smmu = arm_smmu_impl_init(smmu);
  3021. if (IS_ERR(smmu))
  3022. return PTR_ERR(smmu);
  3023. num_irqs = platform_irq_count(pdev);
  3024. smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
  3025. if (smmu->num_context_irqs <= 0)
  3026. return dev_err_probe(dev, -ENODEV,
  3027. "found %d interrupts but expected at least %d\n",
  3028. num_irqs, global_irqs + pmu_irqs + 1);
  3029. smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
  3030. sizeof(*smmu->irqs), GFP_KERNEL);
  3031. if (!smmu->irqs)
  3032. return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
  3033. smmu->num_context_irqs);
  3034. for (i = 0; i < smmu->num_context_irqs; i++) {
  3035. int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
  3036. if (irq < 0) {
  3037. dev_err(dev, "failed to get irq index %d, error: %d\n",
  3038. global_irqs + pmu_irqs + i, irq);
  3039. return irq;
  3040. }
  3041. smmu->irqs[i] = irq;
  3042. }
  3043. smmu->pwr = arm_smmu_init_power_resources(dev);
  3044. if (IS_ERR(smmu->pwr))
  3045. return PTR_ERR(smmu->pwr);
  3046. /*
  3047. * We can't use arm_smmu_rpm_get() because pm-runtime isn't
  3048. * enabled yet.
  3049. */
  3050. err = arm_smmu_power_on(smmu->pwr);
  3051. if (err)
  3052. return err;
  3053. err = arm_smmu_device_cfg_probe(smmu);
  3054. if (err)
  3055. goto out_power_off;
  3056. if (smmu->version == ARM_SMMU_V2) {
  3057. if (smmu->num_context_banks > smmu->num_context_irqs) {
  3058. dev_err(dev,
  3059. "found only %d context irq(s) but %d required\n",
  3060. smmu->num_context_irqs, smmu->num_context_banks);
  3061. err = -ENODEV;
  3062. goto out_power_off;
  3063. }
  3064. /* Ignore superfluous interrupts */
  3065. smmu->num_context_irqs = smmu->num_context_banks;
  3066. }
  3067. if (smmu->impl && smmu->impl->global_fault)
  3068. global_fault = smmu->impl->global_fault;
  3069. else
  3070. global_fault = arm_smmu_global_fault;
  3071. for (i = 0; i < global_irqs; i++) {
  3072. int irq = platform_get_irq(pdev, i);
  3073. if (irq < 0)
  3074. return irq;
  3075. err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
  3076. "arm-smmu global fault", smmu);
  3077. if (err)
  3078. return dev_err_probe(dev, err,
  3079. "failed to request global IRQ %d (%u)\n",
  3080. i, irq);
  3081. }
  3082. /* QCOM Additions */
  3083. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3084. if (!res) {
  3085. err = -ENXIO;
  3086. dev_err(dev, "Failed to get mem resource\n");
  3087. goto out_power_off;
  3088. }
  3089. smmu->phys_addr = res->start;
  3090. err = arm_smmu_handoff_cbs(smmu);
  3091. if (err)
  3092. goto out_power_off;
  3093. err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
  3094. "smmu.%pa", &smmu->ioaddr);
  3095. if (err) {
  3096. dev_err(dev, "Failed to register iommu in sysfs\n");
  3097. goto out_power_off;
  3098. }
  3099. #if IS_ENABLED(CONFIG_ARM_PARAVIRT_SMMU_V3)
  3100. if (of_find_compatible_node(NULL, NULL, "arm,virt-smmu-v3"))
  3101. register_trace_android_vh_bus_iommu_probe(arm_smmu_iommu_pcie_device_probe,
  3102. (void *)&smmu->iommu);
  3103. #endif
  3104. err = iommu_device_register(&smmu->iommu, &arm_smmu_ops.iommu_ops, dev);
  3105. if (err) {
  3106. dev_err(dev, "Failed to register iommu\n");
  3107. goto remove_iommu_sysfs_node;
  3108. }
  3109. platform_set_drvdata(pdev, smmu);
  3110. /* Check for RMRs and install bypass SMRs if any */
  3111. arm_smmu_rmr_install_bypass_smr(smmu);
  3112. arm_smmu_device_reset(smmu);
  3113. arm_smmu_test_smr_masks(smmu);
  3114. arm_smmu_interrupt_selftest(smmu);
  3115. /*
  3116. * We want to avoid touching dev->power.lock in fastpaths unless
  3117. * it's really going to do something useful - pm_runtime_enabled()
  3118. * can serve as an ideal proxy for that decision. So, conditionally
  3119. * enable pm_runtime.
  3120. */
  3121. /*
  3122. * QCOM's nonupstream gdsc driver doesn't support pm_domains.
  3123. * So check for presence of gdsc instead.
  3124. */
  3125. if (smmu->pwr->num_gdscs) {
  3126. pm_runtime_set_active(dev);
  3127. pm_runtime_enable(dev);
  3128. }
  3129. return 0;
  3130. remove_iommu_sysfs_node:
  3131. iommu_device_sysfs_remove(&smmu->iommu);
  3132. out_power_off:
  3133. arm_smmu_power_off(smmu, smmu->pwr);
  3134. return err;
  3135. }
  3136. static void arm_smmu_device_shutdown(struct platform_device *pdev)
  3137. {
  3138. struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
  3139. if (!smmu)
  3140. return;
  3141. if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
  3142. dev_notice(&pdev->dev, "disabling translation\n");
  3143. if (smmu->impl && smmu->impl->device_remove)
  3144. smmu->impl->device_remove(smmu);
  3145. arm_smmu_rpm_get(smmu);
  3146. /* Turn the thing off */
  3147. arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
  3148. arm_smmu_rpm_put(smmu);
  3149. if (pm_runtime_enabled(smmu->dev))
  3150. pm_runtime_force_suspend(smmu->dev);
  3151. else
  3152. arm_smmu_power_off(smmu, smmu->pwr);
  3153. }
  3154. static int arm_smmu_device_remove(struct platform_device *pdev)
  3155. {
  3156. struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
  3157. if (!smmu)
  3158. return -ENODEV;
  3159. iommu_device_unregister(&smmu->iommu);
  3160. iommu_device_sysfs_remove(&smmu->iommu);
  3161. arm_smmu_device_shutdown(pdev);
  3162. return 0;
  3163. }
  3164. static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
  3165. {
  3166. struct arm_smmu_device *smmu = dev_get_drvdata(dev);
  3167. int ret;
  3168. ret = arm_smmu_power_on(smmu->pwr);
  3169. if (ret)
  3170. return ret;
  3171. return 0;
  3172. }
  3173. static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
  3174. {
  3175. struct arm_smmu_device *smmu = dev_get_drvdata(dev);
  3176. arm_smmu_power_off(smmu, smmu->pwr);
  3177. return 0;
  3178. }
  3179. static int __maybe_unused arm_smmu_pm_resume_common(struct device *dev)
  3180. {
  3181. int ret;
  3182. struct arm_smmu_device *smmu = dev_get_drvdata(dev);
  3183. ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
  3184. if (ret)
  3185. return ret;
  3186. if (pm_runtime_suspended(dev))
  3187. return 0;
  3188. ret = arm_smmu_runtime_resume(dev);
  3189. if (ret) {
  3190. clk_bulk_unprepare(smmu->num_clks, smmu->clks);
  3191. return ret;
  3192. }
  3193. /*
  3194. * QCOM HW supports register retention. So we really only need to
  3195. * re-program the registers for hibernation. Don't do this during
  3196. * runtime_resume to avoid latency.
  3197. */
  3198. arm_smmu_device_reset(smmu);
  3199. return ret;
  3200. }
  3201. static int arm_smmu_pm_prepare(struct device *dev)
  3202. {
  3203. if (!of_device_is_compatible(dev->of_node, "qcom,adreno-smmu"))
  3204. return 0;
  3205. /*
  3206. * In case of GFX smmu, race between rpm_suspend and system suspend could
  3207. * cause a deadlock where cx vote is never put down causing timeout. So,
  3208. * abort system suspend here if dev->power.usage_count is 1 as this indicates
  3209. * rpm_suspend is in progress and prepare is the one incrementing this counter.
  3210. * Now rpm_suspend can continue and put down cx vote. System suspend will resume
  3211. * later and complete.
  3212. */
  3213. if (pm_runtime_suspended(dev))
  3214. return 0;
  3215. return (atomic_read(&dev->power.usage_count) == 1) ? -EINPROGRESS : 0;
  3216. }
  3217. static int __maybe_unused arm_smmu_pm_restore_early(struct device *dev)
  3218. {
  3219. struct arm_smmu_device *smmu = dev_get_drvdata(dev);
  3220. struct arm_smmu_domain *smmu_domain;
  3221. struct io_pgtable_ops *pgtbl_ops;
  3222. struct io_pgtable_cfg *pgtbl_cfg;
  3223. struct arm_smmu_cb *cb;
  3224. int idx, ret;
  3225. /*
  3226. * Restore the page tables for secure vmids as they are lost
  3227. * after hibernation in secure code context.
  3228. */
  3229. for (idx = 0; idx < smmu->num_context_banks; idx++) {
  3230. cb = &smmu->cbs[idx];
  3231. if (!cb->cfg)
  3232. continue;
  3233. smmu_domain = cb_cfg_to_smmu_domain(cb->cfg);
  3234. if (!arm_smmu_has_secure_vmid(smmu_domain))
  3235. continue;
  3236. pgtbl_cfg = &smmu_domain->pgtbl_info.cfg;
  3237. pgtbl_ops = qcom_alloc_io_pgtable_ops(smmu_domain->pgtbl_fmt,
  3238. &smmu_domain->pgtbl_info, smmu_domain);
  3239. if (!pgtbl_ops) {
  3240. dev_err(smmu->dev,
  3241. "failed to allocate page tables during pm restore for cxt %d %s\n",
  3242. idx, dev_name(dev));
  3243. return -ENOMEM;
  3244. }
  3245. smmu_domain->pgtbl_ops = pgtbl_ops;
  3246. arm_smmu_init_context_bank(smmu_domain, pgtbl_cfg);
  3247. }
  3248. arm_smmu_pm_resume_common(dev);
  3249. ret = arm_smmu_runtime_suspend(dev);
  3250. if (ret) {
  3251. dev_err(dev, "Failed to suspend\n");
  3252. return ret;
  3253. }
  3254. return 0;
  3255. }
  3256. static int __maybe_unused arm_smmu_pm_freeze_late(struct device *dev)
  3257. {
  3258. struct arm_smmu_device *smmu = dev_get_drvdata(dev);
  3259. struct arm_smmu_domain *smmu_domain;
  3260. struct arm_smmu_cb *cb;
  3261. int idx, ret;
  3262. ret = arm_smmu_power_on(smmu->pwr);
  3263. if (ret) {
  3264. dev_err(smmu->dev, "Couldn't power on the smmu during pm freeze: %d\n", ret);
  3265. return ret;
  3266. }
  3267. for (idx = 0; idx < smmu->num_context_banks; idx++) {
  3268. cb = &smmu->cbs[idx];
  3269. if (cb && cb->cfg) {
  3270. smmu_domain = cb_cfg_to_smmu_domain(cb->cfg);
  3271. if (smmu_domain &&
  3272. arm_smmu_has_secure_vmid(smmu_domain)) {
  3273. qcom_free_io_pgtable_ops(smmu_domain->pgtbl_ops);
  3274. }
  3275. }
  3276. }
  3277. arm_smmu_power_off(smmu, smmu->pwr);
  3278. return 0;
  3279. }
  3280. static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
  3281. {
  3282. int ret = 0;
  3283. struct arm_smmu_device *smmu = dev_get_drvdata(dev);
  3284. if (pm_suspend_target_state == PM_SUSPEND_MEM)
  3285. return arm_smmu_pm_freeze_late(dev);
  3286. if (pm_runtime_suspended(dev))
  3287. goto clk_unprepare;
  3288. ret = arm_smmu_runtime_suspend(dev);
  3289. if (ret)
  3290. return ret;
  3291. clk_unprepare:
  3292. clk_bulk_unprepare(smmu->num_clks, smmu->clks);
  3293. return ret;
  3294. }
  3295. static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
  3296. {
  3297. if (pm_suspend_target_state == PM_SUSPEND_MEM)
  3298. return arm_smmu_pm_restore_early(dev);
  3299. else
  3300. return arm_smmu_pm_resume_common(dev);
  3301. }
  3302. static const struct dev_pm_ops arm_smmu_pm_ops = {
  3303. SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
  3304. arm_smmu_runtime_resume, NULL)
  3305. .prepare = arm_smmu_pm_prepare,
  3306. .suspend = arm_smmu_pm_suspend,
  3307. .resume = arm_smmu_pm_resume,
  3308. .thaw_early = arm_smmu_pm_restore_early,
  3309. .freeze_late = arm_smmu_pm_freeze_late,
  3310. .restore_early = arm_smmu_pm_restore_early,
  3311. };
  3312. static struct platform_driver arm_smmu_driver = {
  3313. .driver = {
  3314. .name = "arm-smmu",
  3315. .of_match_table = arm_smmu_of_match,
  3316. .pm = &arm_smmu_pm_ops,
  3317. .suppress_bind_attrs = true,
  3318. },
  3319. .probe = arm_smmu_device_probe,
  3320. .remove = arm_smmu_device_remove,
  3321. .shutdown = arm_smmu_device_shutdown,
  3322. };
  3323. static int __init arm_smmu_init(void)
  3324. {
  3325. int ret;
  3326. ktime_t cur;
  3327. cur = ktime_get();
  3328. ret = platform_driver_register(&qsmmuv500_tbu_driver);
  3329. if (ret)
  3330. return ret;
  3331. ret = platform_driver_register(&arm_smmu_driver);
  3332. if (ret) {
  3333. platform_driver_unregister(&qsmmuv500_tbu_driver);
  3334. return ret;
  3335. }
  3336. trace_smmu_init(ktime_us_delta(ktime_get(), cur));
  3337. return ret;
  3338. }
  3339. subsys_initcall(arm_smmu_init);
  3340. static void __exit arm_smmu_exit(void)
  3341. {
  3342. platform_driver_unregister(&arm_smmu_driver);
  3343. platform_driver_unregister(&qsmmuv500_tbu_driver);
  3344. }
  3345. module_exit(arm_smmu_exit);
  3346. MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
  3347. MODULE_AUTHOR("Will Deacon <[email protected]>");
  3348. MODULE_ALIAS("platform:arm-smmu");
  3349. MODULE_LICENSE("GPL v2");
  3350. static const char *__arm_smmu_get_devname(struct device *dev)
  3351. {
  3352. const char *token;
  3353. const char *delim = ":,.";
  3354. const char *devname;
  3355. token = dev_name(dev);
  3356. if (!token)
  3357. return "No Name";
  3358. pr_info("smmu client name - %s\n", token);
  3359. if (dev_is_pci(dev))
  3360. return token;
  3361. while (true) {
  3362. devname = token;
  3363. token = strpbrk(token, delim);
  3364. if (!token)
  3365. break;
  3366. token++; /* skip delimiter */
  3367. }
  3368. return devname;
  3369. }
  3370. static const char *arm_smmu_get_devname(const struct arm_smmu_domain *smmu_domain,
  3371. u32 sid)
  3372. {
  3373. struct iommu_fwspec *fwspec = NULL;
  3374. struct device* dev = NULL;
  3375. unsigned int i;
  3376. if (smmu_domain->dev)
  3377. fwspec = dev_iommu_fwspec_get(smmu_domain->dev);
  3378. for (i = 0; fwspec && i < fwspec->num_ids; i++) {
  3379. if ((fwspec->ids[i] & smmu_domain->smmu->streamid_mask) == sid) {
  3380. dev = smmu_domain->dev;
  3381. break;
  3382. }
  3383. }
  3384. if (!fwspec || !dev)
  3385. return "No Device";
  3386. return __arm_smmu_get_devname(dev);
  3387. }
  3388. static __always_inline void __sec_debug_bug_on_enosys(
  3389. struct arm_smmu_domain *smmu_domain, int idx)
  3390. {
  3391. bool cond = !smmu_domain->fault_model.non_fatal;
  3392. struct arm_smmu_device *smmu = smmu_domain->smmu;
  3393. u32 cbfrsynra;
  3394. u32 sid;
  3395. if (likely(!cond))
  3396. return;
  3397. cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
  3398. sid = cbfrsynra & CBFRSYNRA_SID_MASK;
  3399. panic("%s SMMU Fault - SID=0x%x", arm_smmu_get_devname(smmu_domain, sid), sid);
  3400. }