adreno.c 100 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/component.h>
  7. #include <linux/delay.h>
  8. #include <linux/firmware.h>
  9. #include <linux/input.h>
  10. #include <linux/interconnect.h>
  11. #include <linux/io.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of_fdt.h>
  15. #include <linux/module.h>
  16. #include <linux/msm_kgsl.h>
  17. #include <linux/regulator/consumer.h>
  18. #include <linux/nvmem-consumer.h>
  19. #include <linux/reset.h>
  20. #include <linux/trace.h>
  21. #include <linux/units.h>
  22. #include <linux/version.h>
  23. #include <soc/qcom/dcvs.h>
  24. #include <soc/qcom/socinfo.h>
  25. #include <linux/suspend.h>
  26. #include "adreno.h"
  27. #include "adreno_a3xx.h"
  28. #include "adreno_a5xx.h"
  29. #include "adreno_a6xx.h"
  30. #include "adreno_compat.h"
  31. #include "adreno_pm4types.h"
  32. #include "adreno_trace.h"
  33. #include "kgsl_bus.h"
  34. #include "kgsl_reclaim.h"
  35. #include "kgsl_trace.h"
  36. #include "kgsl_util.h"
  37. /* Include the master list of GPU cores that are supported */
  38. #include "adreno-gpulist.h"
  39. static void adreno_unbind(struct device *dev);
  40. static void adreno_input_work(struct work_struct *work);
  41. static int adreno_soft_reset(struct kgsl_device *device);
  42. static unsigned int counter_delta(struct kgsl_device *device,
  43. unsigned int reg, unsigned int *counter);
  44. static struct device_node *
  45. adreno_get_gpu_model_node(struct platform_device *pdev);
  46. static struct adreno_device device_3d0;
  47. static bool adreno_preemption_enable;
  48. /* Nice level for the higher priority GPU start thread */
  49. int adreno_wake_nice = -7;
  50. /* Number of milliseconds to stay active after a wake on touch */
  51. unsigned int adreno_wake_timeout = 100;
  52. static u32 get_ucode_version(const u32 *data)
  53. {
  54. u32 version;
  55. version = data[1];
  56. if ((version & 0xf) != 0xa)
  57. return version;
  58. version &= ~0xfff;
  59. return version | ((data[3] & 0xfff000) >> 12);
  60. }
  61. int adreno_get_firmware(struct adreno_device *adreno_dev,
  62. const char *fwfile, struct adreno_firmware *firmware)
  63. {
  64. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  65. const struct firmware *fw = NULL;
  66. int ret;
  67. if (!IS_ERR_OR_NULL(firmware->memdesc))
  68. return 0;
  69. ret = request_firmware(&fw, fwfile, &device->pdev->dev);
  70. if (ret) {
  71. dev_err(device->dev, "request_firmware(%s) failed: %d\n",
  72. fwfile, ret);
  73. return ret;
  74. }
  75. firmware->memdesc = kgsl_allocate_global(device, fw->size - 4, 0,
  76. KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_UCODE,
  77. "ucode");
  78. ret = PTR_ERR_OR_ZERO(firmware->memdesc);
  79. if (!ret) {
  80. memcpy(firmware->memdesc->hostptr, &fw->data[4], fw->size - 4);
  81. firmware->size = (fw->size - 4) / sizeof(u32);
  82. firmware->version = get_ucode_version((u32 *)fw->data);
  83. }
  84. release_firmware(fw);
  85. return ret;
  86. }
  87. int adreno_zap_shader_load(struct adreno_device *adreno_dev,
  88. const char *name)
  89. {
  90. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  91. int ret;
  92. if (!name || adreno_dev->zap_loaded)
  93. return 0;
  94. ret = kgsl_zap_shader_load(&device->pdev->dev, name);
  95. if (!ret)
  96. adreno_dev->zap_loaded = true;
  97. return ret;
  98. }
  99. #if (IS_ENABLED(CONFIG_QCOM_KGSL_HIBERNATION) || IS_ENABLED(CONFIG_DEEPSLEEP))
  100. static void adreno_zap_shader_unload(struct adreno_device *adreno_dev)
  101. {
  102. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  103. int ret;
  104. if (adreno_dev->zap_loaded) {
  105. ret = kgsl_zap_shader_unload(&device->pdev->dev);
  106. if (!ret)
  107. adreno_dev->zap_loaded = false;
  108. }
  109. }
  110. #endif
  111. /**
  112. * adreno_readreg64() - Read a 64bit register by getting its offset from the
  113. * offset array defined in gpudev node
  114. * @adreno_dev: Pointer to the adreno device
  115. * @lo: lower 32bit register enum that is to be read
  116. * @hi: higher 32bit register enum that is to be read
  117. * @val: 64 bit Register value read is placed here
  118. */
  119. void adreno_readreg64(struct adreno_device *adreno_dev,
  120. enum adreno_regs lo, enum adreno_regs hi, uint64_t *val)
  121. {
  122. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  123. unsigned int val_lo = 0, val_hi = 0;
  124. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  125. if (adreno_checkreg_off(adreno_dev, lo))
  126. kgsl_regread(device, gpudev->reg_offsets[lo], &val_lo);
  127. if (adreno_checkreg_off(adreno_dev, hi))
  128. kgsl_regread(device, gpudev->reg_offsets[hi], &val_hi);
  129. *val = (val_lo | ((uint64_t)val_hi << 32));
  130. }
  131. /**
  132. * adreno_get_rptr() - Get the current ringbuffer read pointer
  133. * @rb: Pointer the ringbuffer to query
  134. *
  135. * Get the latest rptr
  136. */
  137. unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb)
  138. {
  139. struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
  140. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  141. u32 rptr = 0;
  142. if (adreno_is_a3xx(adreno_dev))
  143. kgsl_regread(device, A3XX_CP_RB_RPTR, &rptr);
  144. else
  145. kgsl_sharedmem_readl(device->scratch, &rptr,
  146. SCRATCH_RB_OFFSET(rb->id, rptr));
  147. return rptr;
  148. }
  149. static void adreno_touch_wakeup(struct adreno_device *adreno_dev)
  150. {
  151. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  152. /*
  153. * Don't schedule adreno_start in a high priority workqueue, we are
  154. * already in a workqueue which should be sufficient
  155. */
  156. kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
  157. /*
  158. * When waking up from a touch event we want to stay active long enough
  159. * for the user to send a draw command. The default idle timer timeout
  160. * is shorter than we want so go ahead and push the idle timer out
  161. * further for this special case
  162. */
  163. mod_timer(&device->idle_timer,
  164. jiffies + msecs_to_jiffies(adreno_wake_timeout));
  165. }
  166. /*
  167. * A workqueue callback responsible for actually turning on the GPU after a
  168. * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any
  169. * active_count protection to avoid the need to maintain state. Either
  170. * somebody will start using the GPU or the idle timer will fire and put the
  171. * GPU back into slumber.
  172. */
  173. static void adreno_input_work(struct work_struct *work)
  174. {
  175. struct adreno_device *adreno_dev = container_of(work,
  176. struct adreno_device, input_work);
  177. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  178. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  179. mutex_lock(&device->mutex);
  180. device->pwrctrl.wake_on_touch = true;
  181. ops->touch_wakeup(adreno_dev);
  182. mutex_unlock(&device->mutex);
  183. }
  184. /* Wake up the touch event kworker to initiate GPU wakeup */
  185. void adreno_touch_wake(struct kgsl_device *device)
  186. {
  187. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  188. /*
  189. * Don't do anything if anything hasn't been rendered since we've been
  190. * here before
  191. */
  192. if (device->pwrctrl.wake_on_touch)
  193. return;
  194. if (gmu_core_isenabled(device) || (device->state == KGSL_STATE_SLUMBER))
  195. schedule_work(&adreno_dev->input_work);
  196. }
  197. /*
  198. * Process input events and schedule work if needed. At this point we are only
  199. * interested in groking EV_ABS touchscreen events
  200. */
  201. static void adreno_input_event(struct input_handle *handle, unsigned int type,
  202. unsigned int code, int value)
  203. {
  204. struct kgsl_device *device = handle->handler->private;
  205. /* Only consider EV_ABS (touch) events */
  206. if (type == EV_ABS)
  207. adreno_touch_wake(device);
  208. }
  209. #ifdef CONFIG_INPUT
  210. static int adreno_input_connect(struct input_handler *handler,
  211. struct input_dev *dev, const struct input_device_id *id)
  212. {
  213. struct input_handle *handle;
  214. int ret;
  215. handle = kzalloc(sizeof(*handle), GFP_KERNEL);
  216. if (handle == NULL)
  217. return -ENOMEM;
  218. handle->dev = dev;
  219. handle->handler = handler;
  220. handle->name = handler->name;
  221. ret = input_register_handle(handle);
  222. if (ret) {
  223. kfree(handle);
  224. return ret;
  225. }
  226. ret = input_open_device(handle);
  227. if (ret) {
  228. input_unregister_handle(handle);
  229. kfree(handle);
  230. }
  231. return ret;
  232. }
  233. static void adreno_input_disconnect(struct input_handle *handle)
  234. {
  235. input_close_device(handle);
  236. input_unregister_handle(handle);
  237. kfree(handle);
  238. }
  239. #else
  240. static int adreno_input_connect(struct input_handler *handler,
  241. struct input_dev *dev, const struct input_device_id *id)
  242. {
  243. return 0;
  244. }
  245. static void adreno_input_disconnect(struct input_handle *handle) {}
  246. #endif
  247. /*
  248. * We are only interested in EV_ABS events so only register handlers for those
  249. * input devices that have EV_ABS events
  250. */
  251. static const struct input_device_id adreno_input_ids[] = {
  252. {
  253. .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
  254. .evbit = { BIT_MASK(EV_ABS) },
  255. /* assumption: MT_.._X & MT_.._Y are in the same long */
  256. .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
  257. BIT_MASK(ABS_MT_POSITION_X) |
  258. BIT_MASK(ABS_MT_POSITION_Y) },
  259. },
  260. { },
  261. };
  262. static struct input_handler adreno_input_handler = {
  263. .event = adreno_input_event,
  264. .connect = adreno_input_connect,
  265. .disconnect = adreno_input_disconnect,
  266. .name = "kgsl",
  267. .id_table = adreno_input_ids,
  268. };
  269. /*
  270. * _soft_reset() - Soft reset GPU
  271. * @adreno_dev: Pointer to adreno device
  272. *
  273. * Soft reset the GPU by doing a AHB write of value 1 to RBBM_SW_RESET
  274. * register. This is used when we want to reset the GPU without
  275. * turning off GFX power rail. The reset when asserted resets
  276. * all the HW logic, restores GPU registers to default state and
  277. * flushes out pending VBIF transactions.
  278. */
  279. static void _soft_reset(struct adreno_device *adreno_dev)
  280. {
  281. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  282. unsigned int reg;
  283. adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
  284. /*
  285. * Do a dummy read to get a brief read cycle delay for the
  286. * reset to take effect
  287. */
  288. adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
  289. adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
  290. /* The SP/TP regulator gets turned off after a soft reset */
  291. clear_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED, &adreno_dev->priv);
  292. if (gpudev->regulator_enable)
  293. gpudev->regulator_enable(adreno_dev);
  294. }
  295. /**
  296. * adreno_irqctrl() - Enables/disables the RBBM interrupt mask
  297. * @adreno_dev: Pointer to an adreno_device
  298. * @state: 1 for masked or 0 for unmasked
  299. * Power: The caller of this function must make sure to use OOBs
  300. * so that we know that the GPU is powered on
  301. */
  302. void adreno_irqctrl(struct adreno_device *adreno_dev, int state)
  303. {
  304. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  305. if (!adreno_dev->irq_mask)
  306. return;
  307. adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK,
  308. state ? adreno_dev->irq_mask : 0);
  309. if (gpudev->swfuse_irqctrl)
  310. gpudev->swfuse_irqctrl(adreno_dev, state);
  311. }
  312. /*
  313. * adreno_hang_int_callback() - Isr for fatal interrupts that hang GPU
  314. * @adreno_dev: Pointer to device
  315. * @bit: Interrupt bit
  316. */
  317. void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit)
  318. {
  319. dev_crit_ratelimited(KGSL_DEVICE(adreno_dev)->dev,
  320. "MISC: GPU hang detected\n");
  321. adreno_irqctrl(adreno_dev, 0);
  322. /* Trigger a fault in the dispatcher - this will effect a restart */
  323. adreno_dispatcher_fault(adreno_dev, ADRENO_HARD_FAULT);
  324. }
  325. /*
  326. * adreno_cp_callback() - CP interrupt handler
  327. * @adreno_dev: Adreno device pointer
  328. * @irq: irq number
  329. *
  330. * Handle the cp interrupt generated by GPU.
  331. */
  332. void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
  333. {
  334. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  335. adreno_dispatcher_schedule(device);
  336. }
  337. static irqreturn_t adreno_irq_handler(int irq, void *data)
  338. {
  339. struct kgsl_device *device = data;
  340. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  341. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  342. irqreturn_t ret;
  343. atomic_inc(&adreno_dev->pending_irq_refcnt);
  344. /* Ensure this increment is done before the IRQ status is updated */
  345. smp_mb__after_atomic();
  346. ret = gpudev->irq_handler(adreno_dev);
  347. /* Make sure the regwrites are done before the decrement */
  348. smp_mb__before_atomic();
  349. atomic_dec(&adreno_dev->pending_irq_refcnt);
  350. /* Ensure other CPUs see the decrement */
  351. smp_mb__after_atomic();
  352. return ret;
  353. }
  354. static irqreturn_t adreno_freq_limiter_irq_handler(int irq, void *data)
  355. {
  356. struct kgsl_device *device = data;
  357. KGSL_PWRCTRL_LOG_FREQLIM(device);
  358. reset_control_reset(device->freq_limiter_irq_clear);
  359. return IRQ_HANDLED;
  360. }
  361. irqreturn_t adreno_irq_callbacks(struct adreno_device *adreno_dev,
  362. const struct adreno_irq_funcs *funcs, u32 status)
  363. {
  364. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  365. irqreturn_t ret = IRQ_NONE;
  366. /* Loop through all set interrupts and call respective handlers */
  367. while (status) {
  368. int i = fls(status) - 1;
  369. if (funcs[i].func) {
  370. if (adreno_dev->irq_mask & BIT(i))
  371. funcs[i].func(adreno_dev, i);
  372. } else
  373. dev_crit_ratelimited(device->dev,
  374. "Unhandled interrupt bit %x\n", i);
  375. ret = IRQ_HANDLED;
  376. status &= ~BIT(i);
  377. }
  378. return ret;
  379. }
  380. static int adreno_get_chipid(struct platform_device *pdev, u32 *chipid);
  381. static inline bool _rev_match(unsigned int id, unsigned int entry)
  382. {
  383. return (entry == ANY_ID || entry == id);
  384. }
  385. static const struct adreno_gpu_core *
  386. _get_gpu_core(struct platform_device *pdev, u32 *chipid)
  387. {
  388. int i;
  389. struct device_node *node;
  390. /*
  391. * When "qcom,gpu-models" is defined, use gpu model node to match
  392. * on a compatible string, otherwise match using legacy way.
  393. */
  394. node = adreno_get_gpu_model_node(pdev);
  395. if (!node || !of_find_property(node, "compatible", NULL))
  396. node = pdev->dev.of_node;
  397. *chipid = 0;
  398. /* Check to see if any of the entries match on a compatible string */
  399. for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
  400. if (adreno_gpulist[i]->compatible &&
  401. of_device_is_compatible(node,
  402. adreno_gpulist[i]->compatible)) {
  403. /*
  404. * We matched compat string, set chipid based on
  405. * dtsi, else fail.
  406. */
  407. if (!adreno_get_chipid(pdev, chipid))
  408. return adreno_gpulist[i];
  409. dev_crit(&pdev->dev,
  410. "No chipid associated with %s\n",
  411. adreno_gpulist[i]->compatible);
  412. return NULL;
  413. }
  414. }
  415. /* No compatible string so try and match on chipid */
  416. if (!adreno_get_chipid(pdev, chipid)) {
  417. unsigned int core = ADRENO_CHIPID_CORE(*chipid);
  418. unsigned int major = ADRENO_CHIPID_MAJOR(*chipid);
  419. unsigned int minor = ADRENO_CHIPID_MINOR(*chipid);
  420. unsigned int patchid = ADRENO_CHIPID_PATCH(*chipid);
  421. for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
  422. if (core == adreno_gpulist[i]->core &&
  423. _rev_match(major, adreno_gpulist[i]->major) &&
  424. _rev_match(minor, adreno_gpulist[i]->minor) &&
  425. _rev_match(patchid, adreno_gpulist[i]->patchid))
  426. return adreno_gpulist[i];
  427. }
  428. }
  429. dev_crit(&pdev->dev, "Unknown GPU chip ID %8.8x\n", *chipid);
  430. return NULL;
  431. }
  432. static struct {
  433. unsigned int quirk;
  434. const char *prop;
  435. } adreno_quirks[] = {
  436. { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
  437. { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
  438. { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
  439. { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
  440. "qcom,gpu-quirk-dp2clockgating-disable" },
  441. { ADRENO_QUIRK_DISABLE_LMLOADKILL,
  442. "qcom,gpu-quirk-lmloadkill-disable" },
  443. { ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
  444. { ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
  445. { ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW,
  446. "qcom,gpu-quirk-limit-uche-gbif-rw" },
  447. { ADRENO_QUIRK_CX_GDSC, "qcom,gpu-quirk-cx-gdsc" },
  448. };
  449. static int adreno_get_chipid(struct platform_device *pdev, u32 *chipid)
  450. {
  451. u32 id;
  452. if (!of_property_read_u32(pdev->dev.of_node, "qcom,chipid", chipid))
  453. return 0;
  454. id = socinfo_get_partinfo_chip_id(SOCINFO_PART_GPU);
  455. if (id)
  456. *chipid = id;
  457. return id ? 0 : -EINVAL;
  458. }
  459. static void
  460. adreno_update_soc_hw_revision_quirks(struct adreno_device *adreno_dev,
  461. struct platform_device *pdev)
  462. {
  463. struct device_node *node = pdev->dev.of_node;
  464. int i;
  465. /* update quirk */
  466. for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
  467. if (of_property_read_bool(node, adreno_quirks[i].prop))
  468. adreno_dev->quirks |= adreno_quirks[i].quirk;
  469. }
  470. }
  471. static const struct adreno_gpu_core *
  472. adreno_identify_gpu(struct platform_device *pdev, u32 *chipid)
  473. {
  474. const struct adreno_gpu_core *gpucore;
  475. gpucore = _get_gpu_core(pdev, chipid);
  476. if (!gpucore)
  477. return ERR_PTR(-ENODEV);
  478. /*
  479. * Identify non-longer supported targets and spins and print a helpful
  480. * message
  481. */
  482. if (gpucore->features & ADRENO_DEPRECATED) {
  483. if (gpucore->compatible)
  484. dev_err(&pdev->dev,
  485. "Support for GPU %s has been deprecated\n",
  486. gpucore->compatible);
  487. else
  488. dev_err(&pdev->dev,
  489. "Support for GPU %x.%d.%x.%d has been deprecated\n",
  490. gpucore->core, gpucore->major,
  491. gpucore->minor, gpucore->patchid);
  492. return ERR_PTR(-ENODEV);
  493. }
  494. return gpucore;
  495. }
  496. static const struct of_device_id adreno_match_table[] = {
  497. { .compatible = "qcom,kgsl-3d0", .data = &device_3d0 },
  498. { },
  499. };
  500. MODULE_DEVICE_TABLE(of, adreno_match_table);
  501. /* Dynamically build the OPP table for the GPU device */
  502. static void adreno_build_opp_table(struct device *dev, struct kgsl_pwrctrl *pwr)
  503. {
  504. int i;
  505. /* Skip if the table has already been populated */
  506. if (dev_pm_opp_get_opp_count(dev) > 0)
  507. return;
  508. /* Add all the supported frequencies into the tree */
  509. for (i = 0; i < pwr->num_pwrlevels; i++)
  510. dev_pm_opp_add(dev, pwr->pwrlevels[i].gpu_freq, 0);
  511. }
  512. static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
  513. struct device_node *node)
  514. {
  515. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  516. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  517. struct device_node *child;
  518. int ret;
  519. pwr->num_pwrlevels = 0;
  520. for_each_child_of_node(node, child) {
  521. u32 index, freq = 0, voltage, bus;
  522. struct kgsl_pwrlevel *level;
  523. ret = of_property_read_u32(child, "reg", &index);
  524. if (ret) {
  525. dev_err(device->dev, "%pOF: powerlevel index not found\n",
  526. child);
  527. goto out;
  528. }
  529. ret = of_property_read_u32(child, "qcom,gpu-freq", &freq);
  530. if (ret) {
  531. dev_err(device->dev, "%pOF: Unable to read qcom,gpu-freq\n",
  532. child);
  533. goto out;
  534. }
  535. /* Ignore "zero" powerlevels */
  536. if (!freq)
  537. continue;
  538. ret = of_property_read_u32(child, "qcom,level", &voltage);
  539. if (ret) {
  540. dev_err(device->dev, "%pOF: Unable to read qcom,level\n",
  541. child);
  542. goto out;
  543. }
  544. ret = kgsl_of_property_read_ddrtype(child, "qcom,bus-freq",
  545. &bus);
  546. if (ret) {
  547. dev_err(device->dev, "%pOF:Unable to read qcom,bus-freq\n",
  548. child);
  549. goto out;
  550. }
  551. if (index >= ARRAY_SIZE(pwr->pwrlevels)) {
  552. dev_err(device->dev, "%pOF: Pwrlevel index %d is out of range\n",
  553. child, index);
  554. continue;
  555. }
  556. if (index >= pwr->num_pwrlevels)
  557. pwr->num_pwrlevels = index + 1;
  558. level = &pwr->pwrlevels[index];
  559. level->gpu_freq = freq;
  560. level->bus_freq = bus;
  561. level->voltage_level = voltage;
  562. level->cx_level = 0xffffffff;
  563. of_property_read_u32(child, "qcom,acd-level",
  564. &level->acd_level);
  565. of_property_read_u32(child, "qcom,cx-level",
  566. &level->cx_level);
  567. level->bus_min = level->bus_freq;
  568. kgsl_of_property_read_ddrtype(child,
  569. "qcom,bus-min", &level->bus_min);
  570. level->bus_max = level->bus_freq;
  571. kgsl_of_property_read_ddrtype(child,
  572. "qcom,bus-max", &level->bus_max);
  573. }
  574. adreno_build_opp_table(&device->pdev->dev, pwr);
  575. return 0;
  576. out:
  577. of_node_put(child);
  578. return ret;
  579. }
  580. static void adreno_of_get_initial_pwrlevels(struct kgsl_pwrctrl *pwr,
  581. struct device_node *node)
  582. {
  583. int level;
  584. /* Get and set the initial power level */
  585. if (of_property_read_u32(node, "qcom,initial-pwrlevel", &level))
  586. level = 1;
  587. if (level < 0 || level >= pwr->num_pwrlevels)
  588. level = 1;
  589. pwr->active_pwrlevel = level;
  590. pwr->default_pwrlevel = level;
  591. /* Set the max power level */
  592. pwr->max_pwrlevel = 0;
  593. /* Get and set the min power level */
  594. if (of_property_read_u32(node, "qcom,initial-min-pwrlevel", &level))
  595. level = pwr->num_pwrlevels - 1;
  596. if (level < 0 || level >= pwr->num_pwrlevels || level < pwr->default_pwrlevel)
  597. level = pwr->num_pwrlevels - 1;
  598. pwr->min_render_pwrlevel = level;
  599. pwr->min_pwrlevel = level;
  600. }
  601. static void adreno_of_get_limits(struct adreno_device *adreno_dev,
  602. struct device_node *node)
  603. {
  604. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  605. struct kgsl_pwrctrl *pwrctrl = &device->pwrctrl;
  606. unsigned int throttle_level;
  607. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) || of_property_read_u32(node,
  608. "qcom,throttle-pwrlevel", &throttle_level))
  609. return;
  610. throttle_level = min(throttle_level, pwrctrl->num_pwrlevels - 1);
  611. pwrctrl->throttle_mask = GENMASK(pwrctrl->num_pwrlevels - 1,
  612. pwrctrl->num_pwrlevels - 1 - throttle_level);
  613. adreno_dev->lm_enabled = true;
  614. }
  615. static int adreno_of_get_legacy_pwrlevels(struct adreno_device *adreno_dev,
  616. struct device_node *parent)
  617. {
  618. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  619. struct device_node *node;
  620. int ret;
  621. node = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
  622. if (node == NULL) {
  623. dev_err(&device->pdev->dev,
  624. "Unable to find 'qcom,gpu-pwrlevels'\n");
  625. return -EINVAL;
  626. }
  627. ret = adreno_of_parse_pwrlevels(adreno_dev, node);
  628. if (!ret) {
  629. adreno_of_get_initial_pwrlevels(&device->pwrctrl, parent);
  630. adreno_of_get_limits(adreno_dev, parent);
  631. }
  632. of_node_put(node);
  633. return ret;
  634. }
  635. static int adreno_of_get_pwrlevels(struct adreno_device *adreno_dev,
  636. struct device_node *parent)
  637. {
  638. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  639. struct device_node *node, *child;
  640. int feature_code, pcode;
  641. node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
  642. if (node == NULL)
  643. return adreno_of_get_legacy_pwrlevels(adreno_dev, parent);
  644. feature_code = max_t(int, socinfo_get_feature_code(), SOCINFO_FC_UNKNOWN);
  645. pcode = (feature_code >= SOCINFO_FC_Y0 && feature_code < SOCINFO_FC_INT_RESERVE) ?
  646. max_t(int, socinfo_get_pcode(), SOCINFO_PCODE_UNKNOWN) : SOCINFO_PCODE_UNKNOWN;
  647. device->soc_code = FIELD_PREP(GENMASK(31, 16), pcode) |
  648. FIELD_PREP(GENMASK(15, 0), feature_code);
  649. for_each_child_of_node(node, child) {
  650. bool match = false;
  651. int tbl_size;
  652. u32 bin = 0;
  653. /* Check if the bin has a speed-bin requirement */
  654. if (!of_property_read_u32(child, "qcom,speed-bin", &bin))
  655. match = (bin == device->speed_bin);
  656. /* Check if the bin has a sku-code requirement */
  657. if (of_get_property(child, "qcom,sku-codes", &tbl_size)) {
  658. int num_codes = tbl_size / sizeof(u32);
  659. int i;
  660. u32 sku_code;
  661. /*
  662. * If we have a speed-bin requirement that did not match
  663. * keep searching.
  664. */
  665. if (bin && !match)
  666. continue;
  667. /* Check if the soc_code matches any of the sku codes */
  668. match = false;
  669. for (i = 0; i < num_codes; i++) {
  670. if (!of_property_read_u32_index(child, "qcom,sku-codes",
  671. i, &sku_code) &&
  672. (sku_code == 0 || device->soc_code == sku_code)) {
  673. match = true;
  674. break;
  675. }
  676. }
  677. }
  678. if (match) {
  679. int ret;
  680. ret = adreno_of_parse_pwrlevels(adreno_dev, child);
  681. if (ret) {
  682. of_node_put(child);
  683. return ret;
  684. }
  685. adreno_of_get_initial_pwrlevels(&device->pwrctrl, child);
  686. /*
  687. * Check for global throttle-pwrlevel first and override
  688. * with speedbin specific one if found.
  689. */
  690. adreno_of_get_limits(adreno_dev, parent);
  691. adreno_of_get_limits(adreno_dev, child);
  692. of_node_put(child);
  693. return 0;
  694. }
  695. }
  696. dev_err(&device->pdev->dev,
  697. "No match for speed_bin:%d and soc_code:0x%x\n",
  698. device->speed_bin, device->soc_code);
  699. return -ENODEV;
  700. }
  701. static int register_l3_voter(struct kgsl_device *device)
  702. {
  703. int ret = 0;
  704. mutex_lock(&device->mutex);
  705. if (!device->l3_vote)
  706. goto done;
  707. /* This indicates that we are already set up */
  708. if (device->num_l3_pwrlevels != 0)
  709. goto done;
  710. memset(device->l3_freq, 0x0, sizeof(device->l3_freq));
  711. ret = qcom_dcvs_register_voter(KGSL_L3_DEVICE, DCVS_L3, DCVS_SLOW_PATH);
  712. if (ret) {
  713. dev_err_once(&device->pdev->dev,
  714. "Unable to register l3 dcvs voter: %d\n", ret);
  715. goto done;
  716. }
  717. ret = qcom_dcvs_hw_minmax_get(DCVS_L3, &device->l3_freq[1],
  718. &device->l3_freq[2]);
  719. if (ret) {
  720. dev_err_once(&device->pdev->dev,
  721. "Unable to get min/max for l3 dcvs: %d\n", ret);
  722. qcom_dcvs_unregister_voter(KGSL_L3_DEVICE, DCVS_L3,
  723. DCVS_SLOW_PATH);
  724. memset(device->l3_freq, 0x0, sizeof(device->l3_freq));
  725. goto done;
  726. }
  727. device->num_l3_pwrlevels = 3;
  728. done:
  729. mutex_unlock(&device->mutex);
  730. return ret;
  731. }
  732. static int adreno_of_get_power(struct adreno_device *adreno_dev,
  733. struct platform_device *pdev)
  734. {
  735. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  736. int ret;
  737. ret = adreno_of_get_pwrlevels(adreno_dev, pdev->dev.of_node);
  738. if (ret)
  739. return ret;
  740. device->pwrctrl.interval_timeout = CONFIG_QCOM_KGSL_IDLE_TIMEOUT;
  741. /* Set default bus control to true on all targets */
  742. device->pwrctrl.bus_control = true;
  743. return 0;
  744. }
  745. static void adreno_cx_misc_probe(struct kgsl_device *device)
  746. {
  747. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  748. struct resource *res;
  749. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
  750. "cx_misc");
  751. if (res == NULL)
  752. return;
  753. adreno_dev->cx_misc_len = resource_size(res);
  754. adreno_dev->cx_misc_virt = devm_ioremap(&device->pdev->dev,
  755. res->start, adreno_dev->cx_misc_len);
  756. }
  757. static void adreno_isense_probe(struct kgsl_device *device)
  758. {
  759. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  760. struct resource *res;
  761. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
  762. "isense_cntl");
  763. if (res == NULL)
  764. return;
  765. adreno_dev->isense_base = res->start - device->regmap.base->start;
  766. adreno_dev->isense_len = resource_size(res);
  767. adreno_dev->isense_virt = devm_ioremap(&device->pdev->dev, res->start,
  768. adreno_dev->isense_len);
  769. if (adreno_dev->isense_virt == NULL)
  770. dev_warn(device->dev, "isense ioremap failed\n");
  771. }
  772. /* Read the fuse through the new and fancy nvmem method */
  773. static int adreno_read_speed_bin(struct platform_device *pdev)
  774. {
  775. struct nvmem_cell *cell = nvmem_cell_get(&pdev->dev, "speed_bin");
  776. int ret = PTR_ERR_OR_ZERO(cell);
  777. void *buf;
  778. int val = 0;
  779. size_t len;
  780. if (ret) {
  781. if (ret == -ENOENT)
  782. return 0;
  783. return ret;
  784. }
  785. buf = nvmem_cell_read(cell, &len);
  786. nvmem_cell_put(cell);
  787. if (IS_ERR(buf))
  788. return PTR_ERR(buf);
  789. memcpy(&val, buf, min(len, sizeof(val)));
  790. kfree(buf);
  791. return val;
  792. }
  793. static int adreno_read_gpu_model_fuse(struct platform_device *pdev)
  794. {
  795. struct nvmem_cell *cell = nvmem_cell_get(&pdev->dev, "gpu_model");
  796. void *buf;
  797. int val = 0;
  798. size_t len;
  799. if (IS_ERR(cell))
  800. return PTR_ERR(cell);
  801. buf = nvmem_cell_read(cell, &len);
  802. nvmem_cell_put(cell);
  803. if (IS_ERR(buf))
  804. return PTR_ERR(buf);
  805. memcpy(&val, buf, min(len, sizeof(val)));
  806. kfree(buf);
  807. return val;
  808. }
  809. static struct device_node *
  810. adreno_get_gpu_model_node(struct platform_device *pdev)
  811. {
  812. struct device_node *node, *child;
  813. int fuse_model = adreno_read_gpu_model_fuse(pdev);
  814. if (fuse_model < 0)
  815. return NULL;
  816. node = of_find_node_by_name(pdev->dev.of_node, "qcom,gpu-models");
  817. if (node == NULL)
  818. return NULL;
  819. for_each_child_of_node(node, child) {
  820. u32 model;
  821. if (of_property_read_u32(child, "qcom,gpu-model-id", &model))
  822. continue;
  823. if (model == fuse_model) {
  824. of_node_put(node);
  825. return child;
  826. }
  827. }
  828. of_node_put(node);
  829. return NULL;
  830. }
  831. const char *adreno_get_gpu_model(struct kgsl_device *device)
  832. {
  833. struct device_node *node;
  834. static char gpu_model[32];
  835. const char *model;
  836. int ret;
  837. if (strlen(gpu_model))
  838. return gpu_model;
  839. node = adreno_get_gpu_model_node(device->pdev);
  840. if (!node)
  841. node = of_node_get(device->pdev->dev.of_node);
  842. ret = of_property_read_string(node, "qcom,gpu-model", &model);
  843. of_node_put(node);
  844. if (!ret)
  845. goto done;
  846. model = socinfo_get_partinfo_part_name(SOCINFO_PART_GPU);
  847. if (model)
  848. goto done;
  849. scnprintf(gpu_model, sizeof(gpu_model), "Adreno%u%u%uv%u",
  850. (u32)ADRENO_CHIPID_CORE(ADRENO_DEVICE(device)->chipid),
  851. (u32)ADRENO_CHIPID_MAJOR(ADRENO_DEVICE(device)->chipid),
  852. (u32)ADRENO_CHIPID_MINOR(ADRENO_DEVICE(device)->chipid),
  853. (u32)ADRENO_CHIPID_PATCH(ADRENO_DEVICE(device)->chipid) + 1);
  854. return gpu_model;
  855. done:
  856. strscpy(gpu_model, model, sizeof(gpu_model));
  857. return gpu_model;
  858. }
  859. static u32 adreno_get_vk_device_id(struct kgsl_device *device)
  860. {
  861. struct device_node *node;
  862. static u32 device_id;
  863. u32 vk_id;
  864. int ret;
  865. if (device_id)
  866. return device_id;
  867. node = adreno_get_gpu_model_node(device->pdev);
  868. if (!node)
  869. node = of_node_get(device->pdev->dev.of_node);
  870. ret = of_property_read_u32(node, "qcom,vk-device-id", &device_id);
  871. of_node_put(node);
  872. if (!ret)
  873. return device_id;
  874. vk_id = socinfo_get_partinfo_vulkan_id(SOCINFO_PART_GPU);
  875. device_id = vk_id ? vk_id : ADRENO_DEVICE(device)->chipid;
  876. return device_id;
  877. }
  878. #if IS_ENABLED(CONFIG_QCOM_LLCC)
  879. static int adreno_probe_llcc(struct adreno_device *adreno_dev,
  880. struct platform_device *pdev)
  881. {
  882. int ret;
  883. /* Get the system cache slice descriptor for GPU */
  884. adreno_dev->gpu_llc_slice = llcc_slice_getd(LLCC_GPU);
  885. ret = PTR_ERR_OR_ZERO(adreno_dev->gpu_llc_slice);
  886. if (ret) {
  887. /* Propagate EPROBE_DEFER back to the probe function */
  888. if (ret == -EPROBE_DEFER)
  889. return ret;
  890. if (ret != -ENOENT)
  891. dev_warn(&pdev->dev,
  892. "Unable to get the GPU LLC slice: %d\n", ret);
  893. } else
  894. adreno_dev->gpu_llc_slice_enable = true;
  895. /* Get the system cache slice descriptor for GPU pagetables */
  896. adreno_dev->gpuhtw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
  897. ret = PTR_ERR_OR_ZERO(adreno_dev->gpuhtw_llc_slice);
  898. if (ret) {
  899. if (ret == -EPROBE_DEFER) {
  900. llcc_slice_putd(adreno_dev->gpu_llc_slice);
  901. return ret;
  902. }
  903. if (ret != -ENOENT)
  904. dev_warn(&pdev->dev,
  905. "Unable to get GPU HTW LLC slice: %d\n", ret);
  906. } else
  907. adreno_dev->gpuhtw_llc_slice_enable = true;
  908. return 0;
  909. }
  910. #else
  911. static int adreno_probe_llcc(struct adreno_device *adreno_dev,
  912. struct platform_device *pdev)
  913. {
  914. return 0;
  915. }
  916. #endif
  917. static void adreno_regmap_op_preaccess(struct kgsl_regmap_region *region)
  918. {
  919. struct kgsl_device *device = region->priv;
  920. /*
  921. * kgsl panic notifier will be called in atomic context to get
  922. * GPU snapshot. Also panic handler will skip snapshot dumping
  923. * incase GPU is in SLUMBER state. So we can safely ignore the
  924. * kgsl_pre_hwaccess().
  925. */
  926. if (!device->snapshot_atomic && !in_interrupt())
  927. kgsl_pre_hwaccess(device);
  928. }
  929. static const struct kgsl_regmap_ops adreno_regmap_ops = {
  930. .preaccess = adreno_regmap_op_preaccess,
  931. };
  932. static const struct kgsl_functable adreno_functable;
  933. static void adreno_setup_device(struct adreno_device *adreno_dev)
  934. {
  935. u32 i;
  936. adreno_dev->dev.name = "kgsl-3d0";
  937. adreno_dev->dev.ftbl = &adreno_functable;
  938. init_completion(&adreno_dev->dev.hwaccess_gate);
  939. init_completion(&adreno_dev->dev.halt_gate);
  940. idr_init(&adreno_dev->dev.context_idr);
  941. mutex_init(&adreno_dev->dev.mutex);
  942. INIT_LIST_HEAD(&adreno_dev->dev.globals);
  943. /* Set the fault tolerance policy to replay, skip, throttle */
  944. adreno_dev->ft_policy = BIT(KGSL_FT_REPLAY) |
  945. BIT(KGSL_FT_SKIPCMD) | BIT(KGSL_FT_THROTTLE);
  946. /* Enable command timeouts by default */
  947. adreno_dev->long_ib_detect = true;
  948. INIT_WORK(&adreno_dev->input_work, adreno_input_work);
  949. INIT_LIST_HEAD(&adreno_dev->active_list);
  950. spin_lock_init(&adreno_dev->active_list_lock);
  951. for (i = 0; i < ARRAY_SIZE(adreno_dev->ringbuffers); i++) {
  952. struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[i];
  953. INIT_LIST_HEAD(&rb->events.group);
  954. }
  955. /*
  956. * Some GPUs needs specific alignment for UCHE GMEM base address.
  957. * Configure UCHE GMEM base based on GMEM size and align it accordingly.
  958. * This needs to be done based on GMEM size to avoid overlap between
  959. * RB and UCHE GMEM range.
  960. */
  961. if (adreno_dev->gpucore->uche_gmem_alignment)
  962. adreno_dev->uche_gmem_base =
  963. ALIGN(adreno_dev->gpucore->gmem_size,
  964. adreno_dev->gpucore->uche_gmem_alignment);
  965. }
  966. static const struct of_device_id adreno_component_match[] = {
  967. { .compatible = "qcom,gen8-gmu" },
  968. { .compatible = "qcom,gen7-gmu" },
  969. { .compatible = "qcom,gpu-gmu" },
  970. { .compatible = "qcom,gpu-rgmu" },
  971. { .compatible = "qcom,kgsl-smmu-v2" },
  972. { .compatible = "qcom,smmu-kgsl-cb" },
  973. {},
  974. };
  975. static int adreno_irq_setup(struct platform_device *pdev,
  976. struct adreno_device *adreno_dev)
  977. {
  978. if (!adreno_dev->irq_mask)
  979. return 0;
  980. return kgsl_request_irq(pdev, "kgsl_3d0_irq", adreno_irq_handler, KGSL_DEVICE(adreno_dev));
  981. }
  982. int adreno_device_probe(struct platform_device *pdev,
  983. struct adreno_device *adreno_dev)
  984. {
  985. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  986. struct device *dev = &pdev->dev;
  987. unsigned int priv = 0;
  988. int status;
  989. u32 size;
  990. KGSL_BOOT_MARKER("GPU Init");
  991. /* Initialize the adreno device structure */
  992. adreno_setup_device(adreno_dev);
  993. dev_set_drvdata(dev, device);
  994. device->pdev = pdev;
  995. adreno_update_soc_hw_revision_quirks(adreno_dev, pdev);
  996. status = adreno_read_speed_bin(pdev);
  997. if (status < 0)
  998. goto err;
  999. device->speed_bin = status;
  1000. status = adreno_of_get_power(adreno_dev, pdev);
  1001. if (status)
  1002. goto err;
  1003. status = kgsl_bus_init(device, pdev);
  1004. if (status)
  1005. goto err;
  1006. status = kgsl_regmap_init(pdev, &device->regmap, "kgsl_3d0_reg_memory",
  1007. &adreno_regmap_ops, device);
  1008. if (status)
  1009. goto err_bus_close;
  1010. /*
  1011. * The SMMU APIs use unsigned long for virtual addresses which means
  1012. * that we cannot use 64 bit virtual addresses on a 32 bit kernel even
  1013. * though the hardware and the rest of the KGSL driver supports it.
  1014. */
  1015. if (adreno_support_64bit(adreno_dev))
  1016. kgsl_mmu_set_feature(device, KGSL_MMU_64BIT);
  1017. /*
  1018. * Set the SMMU aperture on A6XX/Gen7 targets to use per-process
  1019. * pagetables.
  1020. */
  1021. if (ADRENO_GPUREV(adreno_dev) >= 600)
  1022. kgsl_mmu_set_feature(device, KGSL_MMU_SMMU_APERTURE);
  1023. if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
  1024. kgsl_mmu_set_feature(device, KGSL_MMU_IO_COHERENT);
  1025. /*
  1026. * Support VBOs on hardware where HLOS has access to PRR registers
  1027. * configuration.
  1028. */
  1029. if (!adreno_is_a650(adreno_dev))
  1030. kgsl_mmu_set_feature(device, KGSL_MMU_SUPPORT_VBO);
  1031. if (adreno_preemption_enable)
  1032. adreno_dev->preempt_override = true;
  1033. device->pwrctrl.bus_width = adreno_dev->gpucore->bus_width;
  1034. device->mmu.secured = (IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER) &&
  1035. ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION));
  1036. /* Probe the LLCC - this could return -EPROBE_DEFER */
  1037. status = adreno_probe_llcc(adreno_dev, pdev);
  1038. if (status)
  1039. goto err_bus_close;
  1040. /*
  1041. * IF the GPU HTW slice was successsful set the MMU feature so the
  1042. * domain can set the appropriate attributes
  1043. */
  1044. if (!IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice))
  1045. kgsl_mmu_set_feature(device, KGSL_MMU_LLCC_ENABLE);
  1046. /* Bind the components before doing the KGSL platform probe. */
  1047. status = component_bind_all(dev, NULL);
  1048. if (status)
  1049. goto err_remove_llcc;
  1050. status = adreno_irq_setup(pdev, adreno_dev);
  1051. if (status < 0)
  1052. goto err_unbind;
  1053. device->pwrctrl.interrupt_num = status;
  1054. device->freq_limiter_intr_num = kgsl_request_irq_optional(pdev, "freq_limiter_irq",
  1055. adreno_freq_limiter_irq_handler, device);
  1056. device->freq_limiter_irq_clear =
  1057. devm_reset_control_get(&pdev->dev, "freq_limiter_irq_clear");
  1058. status = kgsl_device_platform_probe(device);
  1059. if (status)
  1060. goto err_unbind;
  1061. adreno_fence_trace_array_init(device);
  1062. /* Add CX_DBGC block to the regmap*/
  1063. kgsl_regmap_add_region(&device->regmap, pdev, "cx_dbgc", NULL, NULL);
  1064. /* Probe for the optional CX_MISC block */
  1065. adreno_cx_misc_probe(device);
  1066. adreno_isense_probe(device);
  1067. /* Allocate the memstore for storing timestamps and other useful info */
  1068. if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
  1069. priv |= KGSL_MEMDESC_PRIVILEGED;
  1070. device->memstore = kgsl_allocate_global(device,
  1071. KGSL_MEMSTORE_SIZE, 0, 0, priv, "memstore");
  1072. status = PTR_ERR_OR_ZERO(device->memstore);
  1073. if (status) {
  1074. trace_array_put(device->fence_trace_array);
  1075. kgsl_device_platform_remove(device);
  1076. goto err_unbind;
  1077. }
  1078. /* Initialize the snapshot engine */
  1079. size = adreno_dev->gpucore->snapshot_size;
  1080. /*
  1081. * Use a default size if one wasn't specified, but print a warning so
  1082. * the developer knows to fix it
  1083. */
  1084. if (WARN(!size, "The snapshot size was not specified in the gpucore\n"))
  1085. size = SZ_1M;
  1086. kgsl_device_snapshot_probe(device, size);
  1087. adreno_debugfs_init(adreno_dev);
  1088. adreno_profile_init(adreno_dev);
  1089. adreno_dev->perfcounter = false;
  1090. adreno_sysfs_init(adreno_dev);
  1091. /* Ignore return value, as driver can still function without pwrscale enabled */
  1092. kgsl_pwrscale_init(device, pdev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR);
  1093. if (ADRENO_FEATURE(adreno_dev, ADRENO_L3_VOTE))
  1094. device->l3_vote = true;
  1095. #ifdef CONFIG_INPUT
  1096. if (!of_property_read_bool(pdev->dev.of_node,
  1097. "qcom,disable-wake-on-touch")) {
  1098. adreno_input_handler.private = device;
  1099. /*
  1100. * It isn't fatal if we cannot register the input handler. Sad,
  1101. * perhaps, but not fatal
  1102. */
  1103. if (input_register_handler(&adreno_input_handler)) {
  1104. adreno_input_handler.private = NULL;
  1105. dev_err(device->dev,
  1106. "Unable to register the input handler\n");
  1107. }
  1108. }
  1109. #endif
  1110. kgsl_qcom_va_md_register(device);
  1111. KGSL_BOOT_MARKER("GPU Ready");
  1112. return 0;
  1113. err_unbind:
  1114. component_unbind_all(dev, NULL);
  1115. err_remove_llcc:
  1116. if (!IS_ERR_OR_NULL(adreno_dev->gpu_llc_slice))
  1117. llcc_slice_putd(adreno_dev->gpu_llc_slice);
  1118. if (!IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice))
  1119. llcc_slice_putd(adreno_dev->gpuhtw_llc_slice);
  1120. err_bus_close:
  1121. kgsl_bus_close(device);
  1122. err:
  1123. device->pdev = NULL;
  1124. dev_err_probe(&pdev->dev, status, "adreno device probe failed\n");
  1125. return status;
  1126. }
  1127. static int adreno_bind(struct device *dev)
  1128. {
  1129. struct platform_device *pdev = to_platform_device(dev);
  1130. const struct adreno_gpu_core *gpucore;
  1131. int ret;
  1132. u32 chipid;
  1133. gpucore = adreno_identify_gpu(pdev, &chipid);
  1134. if (IS_ERR(gpucore))
  1135. return PTR_ERR(gpucore);
  1136. ret = gpucore->gpudev->probe(pdev, chipid, gpucore);
  1137. if (!ret) {
  1138. struct kgsl_device *device = dev_get_drvdata(dev);
  1139. device->pdev_loaded = true;
  1140. srcu_init_notifier_head(&device->nh);
  1141. } else {
  1142. /*
  1143. * Handle resource clean up through unbind, instead of a
  1144. * lengthy goto error path.
  1145. */
  1146. adreno_unbind(dev);
  1147. }
  1148. return ret;
  1149. }
  1150. static void adreno_unbind(struct device *dev)
  1151. {
  1152. struct adreno_device *adreno_dev;
  1153. struct kgsl_device *device;
  1154. const struct adreno_gpudev *gpudev;
  1155. device = dev_get_drvdata(dev);
  1156. if (!device)
  1157. return;
  1158. /* Return if cleanup happens in adreno_device_probe */
  1159. if (!device->pdev)
  1160. return;
  1161. if (device->pdev_loaded) {
  1162. srcu_cleanup_notifier_head(&device->nh);
  1163. device->pdev_loaded = false;
  1164. }
  1165. adreno_dev = ADRENO_DEVICE(device);
  1166. gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1167. trace_array_put(device->fence_trace_array);
  1168. if (gpudev->remove != NULL)
  1169. gpudev->remove(adreno_dev);
  1170. #ifdef CONFIG_INPUT
  1171. if (adreno_input_handler.private)
  1172. input_unregister_handler(&adreno_input_handler);
  1173. #endif
  1174. kgsl_qcom_va_md_unregister(device);
  1175. adreno_coresight_remove(adreno_dev);
  1176. adreno_profile_close(adreno_dev);
  1177. /* Release the system cache slice descriptor */
  1178. if (!IS_ERR_OR_NULL(adreno_dev->gpu_llc_slice))
  1179. llcc_slice_putd(adreno_dev->gpu_llc_slice);
  1180. if (!IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice))
  1181. llcc_slice_putd(adreno_dev->gpuhtw_llc_slice);
  1182. kgsl_pwrscale_close(device);
  1183. if (adreno_dev->dispatch_ops && adreno_dev->dispatch_ops->close)
  1184. adreno_dev->dispatch_ops->close(adreno_dev);
  1185. kgsl_device_platform_remove(device);
  1186. component_unbind_all(dev, NULL);
  1187. kgsl_bus_close(device);
  1188. device->pdev = NULL;
  1189. if (device->num_l3_pwrlevels != 0)
  1190. qcom_dcvs_unregister_voter(KGSL_L3_DEVICE, DCVS_L3,
  1191. DCVS_SLOW_PATH);
  1192. clear_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv);
  1193. clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
  1194. }
  1195. static void adreno_resume(struct adreno_device *adreno_dev)
  1196. {
  1197. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1198. if (device->state == KGSL_STATE_SUSPEND) {
  1199. adreno_put_gpu_halt(adreno_dev);
  1200. kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
  1201. } else if (device->state != KGSL_STATE_INIT) {
  1202. /*
  1203. * This is an error situation so wait for the device to idle and
  1204. * then put the device in SLUMBER state. This will get us to
  1205. * the right place when we resume.
  1206. */
  1207. if (device->state == KGSL_STATE_ACTIVE)
  1208. adreno_idle(device);
  1209. kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
  1210. dev_err(device->dev, "resume invoked without a suspend\n");
  1211. }
  1212. }
  1213. static int adreno_pm_resume(struct device *dev)
  1214. {
  1215. struct kgsl_device *device = dev_get_drvdata(dev);
  1216. struct adreno_device *adreno_dev;
  1217. const struct adreno_power_ops *ops;
  1218. if (!device)
  1219. return 0;
  1220. adreno_dev = ADRENO_DEVICE(device);
  1221. ops = ADRENO_POWER_OPS(adreno_dev);
  1222. #if IS_ENABLED(CONFIG_DEEPSLEEP)
  1223. if (pm_suspend_via_firmware()) {
  1224. struct kgsl_iommu *iommu = &device->mmu.iommu;
  1225. int status = kgsl_set_smmu_aperture(device, &iommu->user_context);
  1226. if (status)
  1227. return status;
  1228. status = kgsl_set_smmu_lpac_aperture(device, &iommu->lpac_context);
  1229. if (status < 0)
  1230. return status;
  1231. }
  1232. #endif
  1233. mutex_lock(&device->mutex);
  1234. ops->pm_resume(adreno_dev);
  1235. mutex_unlock(&device->mutex);
  1236. kgsl_reclaim_start();
  1237. return 0;
  1238. }
  1239. static int adreno_suspend(struct adreno_device *adreno_dev)
  1240. {
  1241. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1242. int status = kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
  1243. if (!status && device->state == KGSL_STATE_SUSPEND)
  1244. adreno_get_gpu_halt(adreno_dev);
  1245. return status;
  1246. }
  1247. static int adreno_pm_suspend(struct device *dev)
  1248. {
  1249. struct kgsl_device *device = dev_get_drvdata(dev);
  1250. struct adreno_device *adreno_dev;
  1251. const struct adreno_power_ops *ops;
  1252. int status;
  1253. if (!device)
  1254. return 0;
  1255. adreno_dev = ADRENO_DEVICE(device);
  1256. ops = ADRENO_POWER_OPS(adreno_dev);
  1257. mutex_lock(&device->mutex);
  1258. status = ops->pm_suspend(adreno_dev);
  1259. #if IS_ENABLED(CONFIG_DEEPSLEEP)
  1260. if (!status && pm_suspend_via_firmware())
  1261. adreno_zap_shader_unload(adreno_dev);
  1262. #endif
  1263. mutex_unlock(&device->mutex);
  1264. if (status)
  1265. return status;
  1266. /*
  1267. * When the device enters in suspend state, the CX can be collapsed causing
  1268. * the GPU CX timer to pause. Clear the ADRENO_DEVICE_CX_TIMER_INITIALIZED
  1269. * flag to ensure that the CX timer is reseeded during resume.
  1270. */
  1271. clear_bit(ADRENO_DEVICE_CX_TIMER_INITIALIZED, &adreno_dev->priv);
  1272. kgsl_reclaim_close();
  1273. kthread_flush_worker(device->events_worker);
  1274. flush_workqueue(kgsl_driver.lockless_workqueue);
  1275. return status;
  1276. }
  1277. void adreno_create_profile_buffer(struct adreno_device *adreno_dev)
  1278. {
  1279. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1280. unsigned int priv = 0;
  1281. if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
  1282. priv = KGSL_MEMDESC_PRIVILEGED;
  1283. adreno_allocate_global(device, &adreno_dev->profile_buffer,
  1284. PAGE_SIZE, 0, 0, priv, "alwayson");
  1285. adreno_dev->profile_index = 0;
  1286. if (!IS_ERR(adreno_dev->profile_buffer))
  1287. set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
  1288. &adreno_dev->priv);
  1289. }
  1290. static int adreno_init(struct kgsl_device *device)
  1291. {
  1292. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1293. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1294. int ret;
  1295. ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
  1296. if (ret)
  1297. return ret;
  1298. /*
  1299. * initialization only needs to be done once initially until
  1300. * device is shutdown
  1301. */
  1302. if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
  1303. return 0;
  1304. ret = gpudev->init(adreno_dev);
  1305. if (ret)
  1306. return ret;
  1307. set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
  1308. return 0;
  1309. }
  1310. static bool regulators_left_on(struct kgsl_device *device)
  1311. {
  1312. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1313. if (gmu_core_gpmu_isenabled(device))
  1314. return false;
  1315. if (!IS_ERR_OR_NULL(pwr->cx_gdsc))
  1316. if (regulator_is_enabled(pwr->cx_gdsc))
  1317. return true;
  1318. if (!IS_ERR_OR_NULL(pwr->gx_gdsc))
  1319. return regulator_is_enabled(pwr->gx_gdsc);
  1320. return false;
  1321. }
  1322. void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
  1323. {
  1324. int i;
  1325. struct adreno_ringbuffer *rb;
  1326. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1327. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  1328. if (rb->drawctxt_active)
  1329. kgsl_context_put(&(rb->drawctxt_active->base));
  1330. rb->drawctxt_active = NULL;
  1331. kgsl_sharedmem_writel(device->scratch,
  1332. SCRATCH_RB_OFFSET(rb->id, current_rb_ptname),
  1333. 0);
  1334. }
  1335. }
  1336. static int adreno_open(struct adreno_device *adreno_dev)
  1337. {
  1338. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1339. int ret;
  1340. /*
  1341. * active_cnt special case: we are starting up for the first
  1342. * time, so use this sequence instead of the kgsl_pwrctrl_wake()
  1343. * which will be called by adreno_active_count_get().
  1344. */
  1345. atomic_inc(&device->active_cnt);
  1346. memset(device->memstore->hostptr, 0, device->memstore->size);
  1347. ret = adreno_init(device);
  1348. if (ret)
  1349. goto err;
  1350. ret = adreno_start(device, 0);
  1351. if (ret)
  1352. goto err;
  1353. complete_all(&device->hwaccess_gate);
  1354. kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
  1355. adreno_active_count_put(adreno_dev);
  1356. return 0;
  1357. err:
  1358. kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
  1359. atomic_dec(&device->active_cnt);
  1360. return ret;
  1361. }
  1362. static int adreno_first_open(struct kgsl_device *device)
  1363. {
  1364. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1365. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  1366. if (!device->pdev_loaded)
  1367. return -ENODEV;
  1368. return ops->first_open(adreno_dev);
  1369. }
  1370. static int adreno_close(struct adreno_device *adreno_dev)
  1371. {
  1372. return kgsl_pwrctrl_change_state(KGSL_DEVICE(adreno_dev),
  1373. KGSL_STATE_INIT);
  1374. }
  1375. static int adreno_last_close(struct kgsl_device *device)
  1376. {
  1377. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1378. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  1379. /*
  1380. * Wait up to 1 second for the active count to go low
  1381. * and then start complaining about it
  1382. */
  1383. if (kgsl_active_count_wait(device, 0, HZ)) {
  1384. dev_err(device->dev,
  1385. "Waiting for the active count to become 0\n");
  1386. while (kgsl_active_count_wait(device, 0, HZ))
  1387. dev_err(device->dev,
  1388. "Still waiting for the active count\n");
  1389. }
  1390. return ops->last_close(adreno_dev);
  1391. }
  1392. static int adreno_pwrctrl_active_count_get(struct adreno_device *adreno_dev)
  1393. {
  1394. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1395. int ret = 0;
  1396. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  1397. return -EINVAL;
  1398. if ((atomic_read(&device->active_cnt) == 0) &&
  1399. (device->state != KGSL_STATE_ACTIVE)) {
  1400. mutex_unlock(&device->mutex);
  1401. wait_for_completion(&device->hwaccess_gate);
  1402. mutex_lock(&device->mutex);
  1403. device->pwrctrl.superfast = true;
  1404. ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
  1405. }
  1406. if (ret == 0)
  1407. atomic_inc(&device->active_cnt);
  1408. trace_kgsl_active_count(device,
  1409. (unsigned long) __builtin_return_address(0));
  1410. return ret;
  1411. }
  1412. static void adreno_pwrctrl_active_count_put(struct adreno_device *adreno_dev)
  1413. {
  1414. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1415. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  1416. return;
  1417. if (WARN(atomic_read(&device->active_cnt) == 0,
  1418. "Unbalanced get/put calls to KGSL active count\n"))
  1419. return;
  1420. if (atomic_dec_and_test(&device->active_cnt)) {
  1421. kgsl_pwrscale_update_stats(device);
  1422. kgsl_pwrscale_update(device);
  1423. kgsl_start_idle_timer(device);
  1424. }
  1425. trace_kgsl_active_count(device,
  1426. (unsigned long) __builtin_return_address(0));
  1427. wake_up(&device->active_cnt_wq);
  1428. }
  1429. int adreno_active_count_get(struct adreno_device *adreno_dev)
  1430. {
  1431. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  1432. return ops->active_count_get(adreno_dev);
  1433. }
  1434. void adreno_active_count_put(struct adreno_device *adreno_dev)
  1435. {
  1436. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  1437. ops->active_count_put(adreno_dev);
  1438. }
  1439. void adreno_get_bus_counters(struct adreno_device *adreno_dev)
  1440. {
  1441. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1442. int ret = 0;
  1443. if (!device->pwrctrl.bus_control)
  1444. return;
  1445. /* VBIF waiting for RAM */
  1446. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1447. KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 0,
  1448. &adreno_dev->starved_ram_lo, NULL);
  1449. /* Target has GBIF */
  1450. if (adreno_is_gen8(adreno_dev) || adreno_is_gen7(adreno_dev) ||
  1451. (adreno_is_a6xx(adreno_dev) && !adreno_is_a630(adreno_dev))) {
  1452. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1453. KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 1,
  1454. &adreno_dev->starved_ram_lo_ch1, NULL);
  1455. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1456. KGSL_PERFCOUNTER_GROUP_VBIF,
  1457. GBIF_AXI0_READ_DATA_TOTAL_BEATS,
  1458. &adreno_dev->ram_cycles_lo, NULL);
  1459. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1460. KGSL_PERFCOUNTER_GROUP_VBIF,
  1461. GBIF_AXI1_READ_DATA_TOTAL_BEATS,
  1462. &adreno_dev->ram_cycles_lo_ch1_read, NULL);
  1463. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1464. KGSL_PERFCOUNTER_GROUP_VBIF,
  1465. GBIF_AXI0_WRITE_DATA_TOTAL_BEATS,
  1466. &adreno_dev->ram_cycles_lo_ch0_write, NULL);
  1467. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1468. KGSL_PERFCOUNTER_GROUP_VBIF,
  1469. GBIF_AXI1_WRITE_DATA_TOTAL_BEATS,
  1470. &adreno_dev->ram_cycles_lo_ch1_write, NULL);
  1471. } else {
  1472. /* VBIF DDR cycles */
  1473. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1474. KGSL_PERFCOUNTER_GROUP_VBIF,
  1475. VBIF_AXI_TOTAL_BEATS,
  1476. &adreno_dev->ram_cycles_lo, NULL);
  1477. }
  1478. if (ret)
  1479. dev_err(KGSL_DEVICE(adreno_dev)->dev,
  1480. "Unable to get perf counters for bus DCVS\n");
  1481. }
  1482. #define ADRENO_AHB_MIN_TIMEOUT_VAL_USEC 1000
  1483. u32 adreno_get_ahb_timeout_val(struct adreno_device *adreno_dev, u32 noc_timeout_us)
  1484. {
  1485. u64 cycles, hub_clk_freq = adreno_dev->gmu_hub_clk_freq;
  1486. u32 timeout_val;
  1487. if (!noc_timeout_us)
  1488. return 0;
  1489. do_div(hub_clk_freq, HZ_PER_MHZ);
  1490. cycles = hub_clk_freq * noc_timeout_us;
  1491. /*
  1492. * Get max possible AHB timeout value which is less than the GPU NOC timeout value.
  1493. * When cycles are exact power of two, the calculated AHB timeout value will be same
  1494. * as GPU config NOC timeout. Just reduce one cycle to make sure we do not program AHB
  1495. * timeout same as GPU config NOC timeout.
  1496. */
  1497. if (is_power_of_2(cycles))
  1498. cycles -= 1;
  1499. timeout_val = ilog2(cycles);
  1500. /*
  1501. * Make sure, AHB timeout value fits into bit fields and it is not too low
  1502. * which can cause false timeouts.
  1503. */
  1504. if ((timeout_val > GENMASK(4, 0)) ||
  1505. ((ADRENO_AHB_MIN_TIMEOUT_VAL_USEC * hub_clk_freq) > (1 << timeout_val))) {
  1506. dev_warn(adreno_dev->dev.dev, "Invalid AHB timeout_val %u\n", timeout_val);
  1507. return 0;
  1508. }
  1509. /*
  1510. * Return (timeout_val - 1). Based on timeout_val programmed, a timeout will occur if
  1511. * an AHB transaction is not completed in 2 ^ (timeout_val + 1) cycles.
  1512. */
  1513. return (timeout_val - 1);
  1514. }
  1515. /**
  1516. * _adreno_start - Power up the GPU and prepare to accept commands
  1517. * @adreno_dev: Pointer to an adreno_device structure
  1518. *
  1519. * The core function that powers up and initalizes the GPU. This function is
  1520. * called at init and after coming out of SLUMBER
  1521. */
  1522. static int _adreno_start(struct adreno_device *adreno_dev)
  1523. {
  1524. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1525. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1526. int status;
  1527. unsigned int state = device->state;
  1528. bool regulator_left_on;
  1529. /* make sure ADRENO_DEVICE_STARTED is not set here */
  1530. WARN_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
  1531. regulator_left_on = regulators_left_on(device);
  1532. /* Clear any GPU faults that might have been left over */
  1533. adreno_clear_gpu_fault(adreno_dev);
  1534. /* Put the GPU in a responsive state */
  1535. status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
  1536. if (status)
  1537. goto error_pwr_off;
  1538. /* Set any stale active contexts to NULL */
  1539. adreno_set_active_ctxs_null(adreno_dev);
  1540. /* Set the bit to indicate that we've just powered on */
  1541. set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
  1542. /* Clear the busy_data stats - we're starting over from scratch */
  1543. memset(&adreno_dev->busy_data, 0, sizeof(adreno_dev->busy_data));
  1544. /* Soft reset the GPU if a regulator is stuck on*/
  1545. if (regulator_left_on)
  1546. _soft_reset(adreno_dev);
  1547. /* Start the GPU */
  1548. status = gpudev->start(adreno_dev);
  1549. if (status)
  1550. goto error_pwr_off;
  1551. /* Re-initialize the coresight registers if applicable */
  1552. adreno_coresight_start(adreno_dev);
  1553. adreno_irqctrl(adreno_dev, 1);
  1554. adreno_perfcounter_start(adreno_dev);
  1555. /* Clear FSR here in case it is set from a previous pagefault */
  1556. kgsl_mmu_clear_fsr(&device->mmu);
  1557. status = gpudev->rb_start(adreno_dev);
  1558. if (status)
  1559. goto error_pwr_off;
  1560. /*
  1561. * At this point it is safe to assume that we recovered. Setting
  1562. * this field allows us to take a new snapshot for the next failure
  1563. * if we are prioritizing the first unrecoverable snapshot.
  1564. */
  1565. if (device->snapshot)
  1566. device->snapshot->recovered = true;
  1567. /* Start the dispatcher */
  1568. adreno_dispatcher_start(device);
  1569. device->reset_counter++;
  1570. set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  1571. /*
  1572. * There is a possible deadlock scenario during kgsl firmware reading
  1573. * (request_firmware) and devfreq update calls. During first boot, kgsl
  1574. * device mutex is held and then request_firmware is called for reading
  1575. * firmware. request_firmware internally takes dev_pm_qos_mtx lock.
  1576. * Whereas in case of devfreq update calls triggered by thermal/bcl or
  1577. * devfreq sysfs, it first takes the same dev_pm_qos_mtx lock and then
  1578. * tries to take kgsl device mutex as part of get_dev_status/target
  1579. * calls. This results in deadlock when both thread are unable to acquire
  1580. * the mutex held by other thread. Enable devfreq updates now as we are
  1581. * done reading all firmware files.
  1582. */
  1583. device->pwrscale.devfreq_enabled = true;
  1584. return 0;
  1585. error_pwr_off:
  1586. /* set the state back to original state */
  1587. kgsl_pwrctrl_change_state(device, state);
  1588. return status;
  1589. }
  1590. /**
  1591. * adreno_start() - Power up and initialize the GPU
  1592. * @device: Pointer to the KGSL device to power up
  1593. * @priority: Boolean flag to specify of the start should be scheduled in a low
  1594. * latency work queue
  1595. *
  1596. * Power up the GPU and initialize it. If priority is specified then elevate
  1597. * the thread priority for the duration of the start operation
  1598. */
  1599. int adreno_start(struct kgsl_device *device, int priority)
  1600. {
  1601. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1602. int nice = task_nice(current);
  1603. int ret;
  1604. if (priority && (adreno_wake_nice < nice))
  1605. set_user_nice(current, adreno_wake_nice);
  1606. ret = _adreno_start(adreno_dev);
  1607. if (priority)
  1608. set_user_nice(current, nice);
  1609. return ret;
  1610. }
  1611. static int adreno_stop(struct kgsl_device *device)
  1612. {
  1613. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1614. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1615. int error = 0;
  1616. if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
  1617. return 0;
  1618. kgsl_pwrscale_update_stats(device);
  1619. adreno_irqctrl(adreno_dev, 0);
  1620. /* Save active coresight registers if applicable */
  1621. adreno_coresight_stop(adreno_dev);
  1622. /* Save physical performance counter values before GPU power down*/
  1623. adreno_perfcounter_save(adreno_dev);
  1624. if (gpudev->clear_pending_transactions)
  1625. gpudev->clear_pending_transactions(adreno_dev);
  1626. adreno_dispatcher_stop(adreno_dev);
  1627. adreno_ringbuffer_stop(adreno_dev);
  1628. adreno_llcc_slice_deactivate(adreno_dev);
  1629. adreno_set_active_ctxs_null(adreno_dev);
  1630. clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  1631. return error;
  1632. }
  1633. /**
  1634. * adreno_reset() - Helper function to reset the GPU
  1635. * @device: Pointer to the KGSL device structure for the GPU
  1636. * @fault: Type of fault. Needed to skip soft reset for MMU fault
  1637. *
  1638. * Try to reset the GPU to recover from a fault. First, try to do a low latency
  1639. * soft reset. If the soft reset fails for some reason, then bring out the big
  1640. * guns and toggle the footswitch.
  1641. */
  1642. int adreno_reset(struct kgsl_device *device, int fault)
  1643. {
  1644. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1645. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1646. int ret = -EINVAL;
  1647. int i;
  1648. if (gpudev->reset)
  1649. return gpudev->reset(adreno_dev);
  1650. /*
  1651. * Try soft reset first Do not do soft reset for a IOMMU fault (because
  1652. * the IOMMU hardware needs a reset too)
  1653. */
  1654. if (!(fault & ADRENO_IOMMU_PAGE_FAULT))
  1655. ret = adreno_soft_reset(device);
  1656. if (ret) {
  1657. /* If soft reset failed/skipped, then pull the power */
  1658. kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
  1659. /* since device is officially off now clear start bit */
  1660. clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  1661. /* Try to reset the device */
  1662. ret = adreno_start(device, 0);
  1663. for (i = 0; ret && i < 4; i++) {
  1664. msleep(20);
  1665. ret = adreno_start(device, 0);
  1666. }
  1667. if (ret)
  1668. return ret;
  1669. if (i != 0)
  1670. dev_warn(device->dev,
  1671. "Device hard reset tried %d tries\n", i);
  1672. }
  1673. /*
  1674. * If active_cnt is non-zero then the system was active before
  1675. * going into a reset - put it back in that state
  1676. */
  1677. if (atomic_read(&device->active_cnt))
  1678. kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
  1679. return ret;
  1680. }
  1681. static int copy_prop(struct kgsl_device_getproperty *param,
  1682. void *src, size_t size)
  1683. {
  1684. if (copy_to_user(param->value, src,
  1685. min_t(u32, size, param->sizebytes)))
  1686. return -EFAULT;
  1687. return 0;
  1688. }
  1689. static int adreno_prop_device_info(struct kgsl_device *device,
  1690. struct kgsl_device_getproperty *param)
  1691. {
  1692. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1693. struct kgsl_devinfo devinfo = {
  1694. .device_id = device->id + 1,
  1695. .chip_id = adreno_dev->chipid,
  1696. .mmu_enabled = kgsl_mmu_has_feature(device, KGSL_MMU_PAGED),
  1697. .gmem_gpubaseaddr = 0,
  1698. .gmem_sizebytes = adreno_dev->gpucore->gmem_size,
  1699. };
  1700. return copy_prop(param, &devinfo, sizeof(devinfo));
  1701. }
  1702. static int adreno_prop_gpu_model(struct kgsl_device *device,
  1703. struct kgsl_device_getproperty *param)
  1704. {
  1705. struct kgsl_gpu_model model = {0};
  1706. strscpy(model.gpu_model, adreno_get_gpu_model(device),
  1707. sizeof(model.gpu_model));
  1708. return copy_prop(param, &model, sizeof(model));
  1709. }
  1710. static int adreno_prop_device_shadow(struct kgsl_device *device,
  1711. struct kgsl_device_getproperty *param)
  1712. {
  1713. struct kgsl_shadowprop shadowprop = { 0 };
  1714. if (device->memstore->hostptr) {
  1715. /* Pass a dummy address to identify memstore */
  1716. shadowprop.gpuaddr = KGSL_MEMSTORE_TOKEN_ADDRESS;
  1717. shadowprop.size = device->memstore->size;
  1718. shadowprop.flags = KGSL_FLAGS_INITIALIZED |
  1719. KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
  1720. }
  1721. return copy_prop(param, &shadowprop, sizeof(shadowprop));
  1722. }
  1723. static int adreno_prop_device_qdss_stm(struct kgsl_device *device,
  1724. struct kgsl_device_getproperty *param)
  1725. {
  1726. struct kgsl_qdss_stm_prop qdssprop = {0};
  1727. if (!IS_ERR_OR_NULL(device->qdss_desc)) {
  1728. qdssprop.gpuaddr = device->qdss_desc->gpuaddr;
  1729. qdssprop.size = device->qdss_desc->size;
  1730. }
  1731. return copy_prop(param, &qdssprop, sizeof(qdssprop));
  1732. }
  1733. static int adreno_prop_device_qtimer(struct kgsl_device *device,
  1734. struct kgsl_device_getproperty *param)
  1735. {
  1736. struct kgsl_qtimer_prop qtimerprop = {0};
  1737. if (!IS_ERR_OR_NULL(device->qtimer_desc)) {
  1738. qtimerprop.gpuaddr = device->qtimer_desc->gpuaddr;
  1739. qtimerprop.size = device->qtimer_desc->size;
  1740. }
  1741. return copy_prop(param, &qtimerprop, sizeof(qtimerprop));
  1742. }
  1743. static int adreno_prop_s32(struct kgsl_device *device,
  1744. struct kgsl_device_getproperty *param)
  1745. {
  1746. int val = 0;
  1747. if (param->type == KGSL_PROP_MMU_ENABLE)
  1748. val = kgsl_mmu_has_feature(device, KGSL_MMU_PAGED);
  1749. else if (param->type == KGSL_PROP_INTERRUPT_WAITS)
  1750. val = 1;
  1751. return copy_prop(param, &val, sizeof(val));
  1752. }
  1753. static int adreno_prop_uche_gmem_addr(struct kgsl_device *device,
  1754. struct kgsl_device_getproperty *param)
  1755. {
  1756. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1757. return copy_prop(param, &adreno_dev->uche_gmem_base,
  1758. sizeof(adreno_dev->uche_gmem_base));
  1759. }
  1760. static int adreno_prop_ucode_version(struct kgsl_device *device,
  1761. struct kgsl_device_getproperty *param)
  1762. {
  1763. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1764. struct kgsl_ucode_version ucode = {
  1765. .pfp = adreno_dev->fw[ADRENO_FW_PFP].version,
  1766. .pm4 = adreno_dev->fw[ADRENO_FW_PM4].version,
  1767. };
  1768. return copy_prop(param, &ucode, sizeof(ucode));
  1769. }
  1770. static int adreno_prop_gaming_bin(struct kgsl_device *device,
  1771. struct kgsl_device_getproperty *param)
  1772. {
  1773. void *buf;
  1774. size_t len;
  1775. int ret;
  1776. struct nvmem_cell *cell;
  1777. cell = nvmem_cell_get(&device->pdev->dev, "gaming_bin");
  1778. if (IS_ERR(cell))
  1779. return -EINVAL;
  1780. buf = nvmem_cell_read(cell, &len);
  1781. nvmem_cell_put(cell);
  1782. if (!IS_ERR(buf)) {
  1783. ret = copy_prop(param, buf, len);
  1784. kfree(buf);
  1785. return ret;
  1786. }
  1787. dev_err(device->dev, "failed to read gaming_bin nvmem cell\n");
  1788. return -EINVAL;
  1789. }
  1790. static int adreno_prop_u32(struct kgsl_device *device,
  1791. struct kgsl_device_getproperty *param)
  1792. {
  1793. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1794. u32 val = 0;
  1795. if (param->type == KGSL_PROP_HIGHEST_BANK_BIT) {
  1796. val = adreno_dev->highest_bank_bit;
  1797. } else if (param->type == KGSL_PROP_MIN_ACCESS_LENGTH)
  1798. of_property_read_u32(device->pdev->dev.of_node,
  1799. "qcom,min-access-length", &val);
  1800. else if (param->type == KGSL_PROP_UBWC_MODE)
  1801. of_property_read_u32(device->pdev->dev.of_node,
  1802. "qcom,ubwc-mode", &val);
  1803. else if (param->type == KGSL_PROP_DEVICE_BITNESS)
  1804. val = adreno_support_64bit(adreno_dev) ? 48 : 32;
  1805. else if (param->type == KGSL_PROP_SPEED_BIN)
  1806. val = device->speed_bin;
  1807. else if (param->type == KGSL_PROP_VK_DEVICE_ID)
  1808. val = adreno_get_vk_device_id(device);
  1809. else if (param->type == KGSL_PROP_IS_LPAC_ENABLED)
  1810. val = adreno_dev->lpac_enabled ? 1 : 0;
  1811. else if (param->type == KGSL_PROP_IS_RAYTRACING_ENABLED)
  1812. val = adreno_dev->raytracing_enabled ? 1 : 0;
  1813. else if (param->type == KGSL_PROP_IS_FASTBLEND_ENABLED)
  1814. val = adreno_dev->fastblend_enabled ? 1 : 0;
  1815. else if (param->type == KGSL_PROP_IS_AQE_ENABLED)
  1816. val = ADRENO_FEATURE(adreno_dev, ADRENO_AQE) ? 1 : 0;
  1817. return copy_prop(param, &val, sizeof(val));
  1818. }
  1819. static int adreno_prop_uche_trap_base(struct kgsl_device *device,
  1820. struct kgsl_device_getproperty *param)
  1821. {
  1822. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1823. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1824. u64 val = 0;
  1825. if (!gpudev->get_uche_trap_base)
  1826. return -EINVAL;
  1827. val = gpudev->get_uche_trap_base();
  1828. return copy_prop(param, &val, sizeof(val));
  1829. }
  1830. static const struct {
  1831. int type;
  1832. int (*func)(struct kgsl_device *device,
  1833. struct kgsl_device_getproperty *param);
  1834. } adreno_property_funcs[] = {
  1835. { KGSL_PROP_DEVICE_INFO, adreno_prop_device_info },
  1836. { KGSL_PROP_DEVICE_SHADOW, adreno_prop_device_shadow },
  1837. { KGSL_PROP_DEVICE_QDSS_STM, adreno_prop_device_qdss_stm },
  1838. { KGSL_PROP_DEVICE_QTIMER, adreno_prop_device_qtimer },
  1839. { KGSL_PROP_MMU_ENABLE, adreno_prop_s32 },
  1840. { KGSL_PROP_INTERRUPT_WAITS, adreno_prop_s32 },
  1841. { KGSL_PROP_UCHE_GMEM_VADDR, adreno_prop_uche_gmem_addr },
  1842. { KGSL_PROP_UCODE_VERSION, adreno_prop_ucode_version },
  1843. { KGSL_PROP_HIGHEST_BANK_BIT, adreno_prop_u32 },
  1844. { KGSL_PROP_MIN_ACCESS_LENGTH, adreno_prop_u32 },
  1845. { KGSL_PROP_UBWC_MODE, adreno_prop_u32 },
  1846. { KGSL_PROP_DEVICE_BITNESS, adreno_prop_u32 },
  1847. { KGSL_PROP_SPEED_BIN, adreno_prop_u32 },
  1848. { KGSL_PROP_GAMING_BIN, adreno_prop_gaming_bin },
  1849. { KGSL_PROP_GPU_MODEL, adreno_prop_gpu_model},
  1850. { KGSL_PROP_VK_DEVICE_ID, adreno_prop_u32},
  1851. { KGSL_PROP_IS_LPAC_ENABLED, adreno_prop_u32 },
  1852. { KGSL_PROP_IS_RAYTRACING_ENABLED, adreno_prop_u32},
  1853. { KGSL_PROP_IS_FASTBLEND_ENABLED, adreno_prop_u32},
  1854. { KGSL_PROP_UCHE_TRAP_BASE, adreno_prop_uche_trap_base },
  1855. { KGSL_PROP_IS_AQE_ENABLED, adreno_prop_u32 },
  1856. };
  1857. static int adreno_getproperty(struct kgsl_device *device,
  1858. struct kgsl_device_getproperty *param)
  1859. {
  1860. int i;
  1861. for (i = 0; i < ARRAY_SIZE(adreno_property_funcs); i++) {
  1862. if (param->type == adreno_property_funcs[i].type)
  1863. return adreno_property_funcs[i].func(device, param);
  1864. }
  1865. return -ENODEV;
  1866. }
  1867. static int adreno_query_property_list(struct kgsl_device *device, u32 *list,
  1868. u32 count)
  1869. {
  1870. int i;
  1871. if (!list)
  1872. return ARRAY_SIZE(adreno_property_funcs);
  1873. for (i = 0; i < count && i < ARRAY_SIZE(adreno_property_funcs); i++)
  1874. list[i] = adreno_property_funcs[i].type;
  1875. return i;
  1876. }
  1877. int adreno_set_constraint(struct kgsl_device *device,
  1878. struct kgsl_context *context,
  1879. struct kgsl_device_constraint *constraint)
  1880. {
  1881. int status = 0;
  1882. switch (constraint->type) {
  1883. case KGSL_CONSTRAINT_PWRLEVEL: {
  1884. struct kgsl_device_constraint_pwrlevel pwr;
  1885. if (constraint->size != sizeof(pwr)) {
  1886. status = -EINVAL;
  1887. break;
  1888. }
  1889. if (copy_from_user(&pwr,
  1890. (void __user *)constraint->data,
  1891. sizeof(pwr))) {
  1892. status = -EFAULT;
  1893. break;
  1894. }
  1895. if (pwr.level >= KGSL_CONSTRAINT_PWR_MAXLEVELS) {
  1896. status = -EINVAL;
  1897. break;
  1898. }
  1899. context->pwr_constraint.type =
  1900. KGSL_CONSTRAINT_PWRLEVEL;
  1901. context->pwr_constraint.sub_type = pwr.level;
  1902. trace_kgsl_user_pwrlevel_constraint(device,
  1903. context->id,
  1904. context->pwr_constraint.type,
  1905. context->pwr_constraint.sub_type);
  1906. }
  1907. break;
  1908. case KGSL_CONSTRAINT_NONE:
  1909. if (context->pwr_constraint.type == KGSL_CONSTRAINT_PWRLEVEL)
  1910. trace_kgsl_user_pwrlevel_constraint(device,
  1911. context->id,
  1912. KGSL_CONSTRAINT_NONE,
  1913. context->pwr_constraint.sub_type);
  1914. context->pwr_constraint.type = KGSL_CONSTRAINT_NONE;
  1915. break;
  1916. case KGSL_CONSTRAINT_L3_PWRLEVEL: {
  1917. struct kgsl_device_constraint_pwrlevel pwr;
  1918. if (constraint->size != sizeof(pwr)) {
  1919. status = -EINVAL;
  1920. break;
  1921. }
  1922. if (copy_from_user(&pwr, constraint->data, sizeof(pwr))) {
  1923. status = -EFAULT;
  1924. break;
  1925. }
  1926. status = register_l3_voter(device);
  1927. if (status)
  1928. break;
  1929. if (pwr.level >= KGSL_CONSTRAINT_PWR_MAXLEVELS)
  1930. pwr.level = KGSL_CONSTRAINT_PWR_MAXLEVELS - 1;
  1931. context->l3_pwr_constraint.type = KGSL_CONSTRAINT_L3_PWRLEVEL;
  1932. context->l3_pwr_constraint.sub_type = pwr.level;
  1933. trace_kgsl_user_pwrlevel_constraint(device, context->id,
  1934. context->l3_pwr_constraint.type,
  1935. context->l3_pwr_constraint.sub_type);
  1936. }
  1937. break;
  1938. case KGSL_CONSTRAINT_L3_NONE: {
  1939. unsigned int type = context->l3_pwr_constraint.type;
  1940. if (type == KGSL_CONSTRAINT_L3_PWRLEVEL)
  1941. trace_kgsl_user_pwrlevel_constraint(device, context->id,
  1942. KGSL_CONSTRAINT_L3_NONE,
  1943. context->l3_pwr_constraint.sub_type);
  1944. context->l3_pwr_constraint.type = KGSL_CONSTRAINT_L3_NONE;
  1945. }
  1946. break;
  1947. default:
  1948. status = -EINVAL;
  1949. break;
  1950. }
  1951. /* If a new constraint has been set for a context, cancel the old one */
  1952. if ((status == 0) &&
  1953. (context->id == device->pwrctrl.constraint.owner_id)) {
  1954. trace_kgsl_constraint(device, device->pwrctrl.constraint.type,
  1955. device->pwrctrl.active_pwrlevel, 0);
  1956. device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
  1957. }
  1958. return status;
  1959. }
  1960. static int adreno_setproperty(struct kgsl_device_private *dev_priv,
  1961. unsigned int type,
  1962. void __user *value,
  1963. unsigned int sizebytes)
  1964. {
  1965. int status = -EINVAL;
  1966. struct kgsl_device *device = dev_priv->device;
  1967. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1968. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1969. switch (type) {
  1970. case KGSL_PROP_PWR_CONSTRAINT:
  1971. case KGSL_PROP_L3_PWR_CONSTRAINT: {
  1972. struct kgsl_device_constraint constraint;
  1973. struct kgsl_context *context;
  1974. if (sizebytes != sizeof(constraint))
  1975. break;
  1976. if (copy_from_user(&constraint, value,
  1977. sizeof(constraint))) {
  1978. status = -EFAULT;
  1979. break;
  1980. }
  1981. context = kgsl_context_get_owner(dev_priv,
  1982. constraint.context_id);
  1983. if (context == NULL)
  1984. break;
  1985. status = adreno_set_constraint(device, context,
  1986. &constraint);
  1987. kgsl_context_put(context);
  1988. }
  1989. break;
  1990. default:
  1991. status = gpudev->setproperty(dev_priv, type, value, sizebytes);
  1992. break;
  1993. }
  1994. return status;
  1995. }
  1996. /*
  1997. * adreno_soft_reset - Do a soft reset of the GPU hardware
  1998. * @device: KGSL device to soft reset
  1999. *
  2000. * "soft reset" the GPU hardware - this is a fast path GPU reset
  2001. * The GPU hardware is reset but we never pull power so we can skip
  2002. * a lot of the standard adreno_stop/adreno_start sequence
  2003. */
  2004. static int adreno_soft_reset(struct kgsl_device *device)
  2005. {
  2006. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2007. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2008. int ret;
  2009. /*
  2010. * Don't allow a soft reset for a304 because the SMMU needs to be hard
  2011. * reset
  2012. */
  2013. if (adreno_is_a304(adreno_dev))
  2014. return -ENODEV;
  2015. if (gpudev->clear_pending_transactions) {
  2016. ret = gpudev->clear_pending_transactions(adreno_dev);
  2017. if (ret)
  2018. return ret;
  2019. }
  2020. kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
  2021. adreno_set_active_ctxs_null(adreno_dev);
  2022. adreno_irqctrl(adreno_dev, 0);
  2023. adreno_clear_gpu_fault(adreno_dev);
  2024. /* since device is oficially off now clear start bit */
  2025. clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  2026. /* save physical performance counter values before GPU soft reset */
  2027. adreno_perfcounter_save(adreno_dev);
  2028. _soft_reset(adreno_dev);
  2029. /* Clear the busy_data stats - we're starting over from scratch */
  2030. adreno_dev->busy_data.gpu_busy = 0;
  2031. adreno_dev->busy_data.bif_ram_cycles = 0;
  2032. adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
  2033. adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
  2034. adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
  2035. adreno_dev->busy_data.bif_starved_ram = 0;
  2036. adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
  2037. /* Reinitialize the GPU */
  2038. gpudev->start(adreno_dev);
  2039. /* Re-initialize the coresight registers if applicable */
  2040. adreno_coresight_start(adreno_dev);
  2041. /* Enable IRQ */
  2042. adreno_irqctrl(adreno_dev, 1);
  2043. /* stop all ringbuffers to cancel RB events */
  2044. adreno_ringbuffer_stop(adreno_dev);
  2045. /* Start the ringbuffer(s) again */
  2046. ret = gpudev->rb_start(adreno_dev);
  2047. if (ret == 0) {
  2048. device->reset_counter++;
  2049. set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
  2050. }
  2051. /* Restore physical performance counter values after soft reset */
  2052. adreno_perfcounter_restore(adreno_dev);
  2053. if (ret)
  2054. dev_err(device->dev, "Device soft reset failed: %d\n", ret);
  2055. return ret;
  2056. }
  2057. bool adreno_isidle(struct adreno_device *adreno_dev)
  2058. {
  2059. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2060. struct adreno_ringbuffer *rb;
  2061. int i;
  2062. if (!kgsl_state_is_awake(KGSL_DEVICE(adreno_dev)))
  2063. return true;
  2064. /*
  2065. * wptr is updated when we add commands to ringbuffer, add a barrier
  2066. * to make sure updated wptr is compared to rptr
  2067. */
  2068. smp_mb();
  2069. /*
  2070. * ringbuffer is truly idle when all ringbuffers read and write
  2071. * pointers are equal
  2072. */
  2073. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  2074. if (!adreno_rb_empty(rb))
  2075. return false;
  2076. }
  2077. return gpudev->hw_isidle(adreno_dev);
  2078. }
  2079. /**
  2080. * adreno_spin_idle() - Spin wait for the GPU to idle
  2081. * @adreno_dev: Pointer to an adreno device
  2082. * @timeout: milliseconds to wait before returning error
  2083. *
  2084. * Spin the CPU waiting for the RBBM status to return idle
  2085. */
  2086. int adreno_spin_idle(struct adreno_device *adreno_dev, unsigned int timeout)
  2087. {
  2088. unsigned long wait = jiffies + msecs_to_jiffies(timeout);
  2089. do {
  2090. /*
  2091. * If we fault, stop waiting and return an error. The dispatcher
  2092. * will clean up the fault from the work queue, but we need to
  2093. * make sure we don't block it by waiting for an idle that
  2094. * will never come.
  2095. */
  2096. if (adreno_gpu_fault(adreno_dev) != 0)
  2097. return -EDEADLK;
  2098. if (adreno_isidle(adreno_dev))
  2099. return 0;
  2100. } while (time_before(jiffies, wait));
  2101. /*
  2102. * Under rare conditions, preemption can cause the while loop to exit
  2103. * without checking if the gpu is idle. check one last time before we
  2104. * return failure.
  2105. */
  2106. if (adreno_gpu_fault(adreno_dev) != 0)
  2107. return -EDEADLK;
  2108. if (adreno_isidle(adreno_dev))
  2109. return 0;
  2110. return -ETIMEDOUT;
  2111. }
  2112. /**
  2113. * adreno_idle() - wait for the GPU hardware to go idle
  2114. * @device: Pointer to the KGSL device structure for the GPU
  2115. *
  2116. * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
  2117. * Caller must hold the device mutex, and must not hold the dispatcher mutex.
  2118. */
  2119. int adreno_idle(struct kgsl_device *device)
  2120. {
  2121. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2122. int ret;
  2123. /*
  2124. * Make sure the device mutex is held so the dispatcher can't send any
  2125. * more commands to the hardware
  2126. */
  2127. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  2128. return -EDEADLK;
  2129. /* Check if we are already idle before idling dispatcher */
  2130. if (adreno_isidle(adreno_dev))
  2131. return 0;
  2132. /*
  2133. * Wait for dispatcher to finish completing commands
  2134. * already submitted
  2135. */
  2136. ret = adreno_dispatcher_idle(adreno_dev);
  2137. if (ret)
  2138. return ret;
  2139. return adreno_spin_idle(adreno_dev, ADRENO_IDLE_TIMEOUT);
  2140. }
  2141. static int adreno_drain_and_idle(struct kgsl_device *device)
  2142. {
  2143. int ret;
  2144. reinit_completion(&device->halt_gate);
  2145. ret = kgsl_active_count_wait(device, 0, HZ);
  2146. if (ret)
  2147. return ret;
  2148. return adreno_idle(device);
  2149. }
  2150. /* Caller must hold the device mutex. */
  2151. int adreno_suspend_context(struct kgsl_device *device)
  2152. {
  2153. /* process any profiling results that are available */
  2154. adreno_profile_process_results(ADRENO_DEVICE(device));
  2155. /* Wait for the device to go idle */
  2156. return adreno_idle(device);
  2157. }
  2158. void adreno_cx_misc_regread(struct adreno_device *adreno_dev,
  2159. unsigned int offsetwords, unsigned int *value)
  2160. {
  2161. unsigned int cx_misc_offset;
  2162. WARN_ONCE(!adreno_dev->cx_misc_virt,
  2163. "cx_misc region is not defined in device tree");
  2164. cx_misc_offset = (offsetwords << 2);
  2165. if (!adreno_dev->cx_misc_virt ||
  2166. (cx_misc_offset >= adreno_dev->cx_misc_len))
  2167. return;
  2168. *value = __raw_readl(adreno_dev->cx_misc_virt + cx_misc_offset);
  2169. /*
  2170. * ensure this read finishes before the next one.
  2171. * i.e. act like normal readl()
  2172. */
  2173. rmb();
  2174. }
  2175. void adreno_isense_regread(struct adreno_device *adreno_dev,
  2176. unsigned int offsetwords, unsigned int *value)
  2177. {
  2178. unsigned int isense_offset;
  2179. isense_offset = (offsetwords << 2);
  2180. if (!adreno_dev->isense_virt ||
  2181. (isense_offset >= adreno_dev->isense_len))
  2182. return;
  2183. *value = __raw_readl(adreno_dev->isense_virt + isense_offset);
  2184. /*
  2185. * ensure this read finishes before the next one.
  2186. * i.e. act like normal readl()
  2187. */
  2188. rmb();
  2189. }
  2190. bool adreno_gx_is_on(struct adreno_device *adreno_dev)
  2191. {
  2192. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2193. return gpudev->gx_is_on(adreno_dev);
  2194. }
  2195. void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
  2196. unsigned int offsetwords, unsigned int value)
  2197. {
  2198. unsigned int cx_misc_offset;
  2199. WARN_ONCE(!adreno_dev->cx_misc_virt,
  2200. "cx_misc region is not defined in device tree");
  2201. cx_misc_offset = (offsetwords << 2);
  2202. if (!adreno_dev->cx_misc_virt ||
  2203. (cx_misc_offset >= adreno_dev->cx_misc_len))
  2204. return;
  2205. /*
  2206. * ensure previous writes post before this one,
  2207. * i.e. act like normal writel()
  2208. */
  2209. wmb();
  2210. __raw_writel(value, adreno_dev->cx_misc_virt + cx_misc_offset);
  2211. }
  2212. void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev,
  2213. unsigned int offsetwords,
  2214. unsigned int mask, unsigned int bits)
  2215. {
  2216. unsigned int val = 0;
  2217. adreno_cx_misc_regread(adreno_dev, offsetwords, &val);
  2218. val &= ~mask;
  2219. adreno_cx_misc_regwrite(adreno_dev, offsetwords, val | bits);
  2220. }
  2221. void adreno_profile_submit_time(struct adreno_submit_time *time)
  2222. {
  2223. struct kgsl_drawobj *drawobj;
  2224. struct kgsl_drawobj_cmd *cmdobj;
  2225. struct kgsl_mem_entry *entry;
  2226. struct kgsl_drawobj_profiling_buffer *profile_buffer;
  2227. if (!time)
  2228. return;
  2229. drawobj = time->drawobj;
  2230. if (drawobj == NULL)
  2231. return;
  2232. cmdobj = CMDOBJ(drawobj);
  2233. entry = cmdobj->profiling_buf_entry;
  2234. if (!entry)
  2235. return;
  2236. profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
  2237. cmdobj->profiling_buffer_gpuaddr);
  2238. if (profile_buffer == NULL)
  2239. return;
  2240. /* Return kernel clock time to the client if requested */
  2241. if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
  2242. u64 secs = time->ktime;
  2243. profile_buffer->wall_clock_ns =
  2244. do_div(secs, NSEC_PER_SEC);
  2245. profile_buffer->wall_clock_s = secs;
  2246. } else {
  2247. profile_buffer->wall_clock_s = time->utime.tv_sec;
  2248. profile_buffer->wall_clock_ns = time->utime.tv_nsec;
  2249. }
  2250. profile_buffer->gpu_ticks_queued = time->ticks;
  2251. kgsl_memdesc_unmap(&entry->memdesc);
  2252. }
  2253. /**
  2254. * adreno_waittimestamp - sleep while waiting for the specified timestamp
  2255. * @device - pointer to a KGSL device structure
  2256. * @context - pointer to the active kgsl context
  2257. * @timestamp - GPU timestamp to wait for
  2258. * @msecs - amount of time to wait (in milliseconds)
  2259. *
  2260. * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
  2261. */
  2262. static int adreno_waittimestamp(struct kgsl_device *device,
  2263. struct kgsl_context *context,
  2264. unsigned int timestamp,
  2265. unsigned int msecs)
  2266. {
  2267. int ret;
  2268. if (context == NULL) {
  2269. /* If they are doing then complain once */
  2270. dev_WARN_ONCE(device->dev, 1,
  2271. "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
  2272. return -ENOTTY;
  2273. }
  2274. /* Return -ENOENT if the context has been detached */
  2275. if (kgsl_context_detached(context))
  2276. return -ENOENT;
  2277. ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
  2278. timestamp, msecs);
  2279. /* If the context got invalidated then return a specific error */
  2280. if (kgsl_context_invalid(context))
  2281. ret = -EDEADLK;
  2282. /*
  2283. * Return -EPROTO if the device has faulted since the last time we
  2284. * checked. Userspace uses this as a marker for performing post
  2285. * fault activities
  2286. */
  2287. if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
  2288. ret = -EPROTO;
  2289. return ret;
  2290. }
  2291. /**
  2292. * __adreno_readtimestamp() - Reads the timestamp from memstore memory
  2293. * @adreno_dev: Pointer to an adreno device
  2294. * @index: Index into the memstore memory
  2295. * @type: Type of timestamp to read
  2296. * @timestamp: The out parameter where the timestamp is read
  2297. */
  2298. static int __adreno_readtimestamp(struct adreno_device *adreno_dev, int index,
  2299. int type, unsigned int *timestamp)
  2300. {
  2301. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2302. int status = 0;
  2303. switch (type) {
  2304. case KGSL_TIMESTAMP_CONSUMED:
  2305. kgsl_sharedmem_readl(device->memstore, timestamp,
  2306. KGSL_MEMSTORE_OFFSET(index, soptimestamp));
  2307. break;
  2308. case KGSL_TIMESTAMP_RETIRED:
  2309. kgsl_sharedmem_readl(device->memstore, timestamp,
  2310. KGSL_MEMSTORE_OFFSET(index, eoptimestamp));
  2311. break;
  2312. default:
  2313. status = -EINVAL;
  2314. *timestamp = 0;
  2315. break;
  2316. }
  2317. return status;
  2318. }
  2319. /**
  2320. * adreno_rb_readtimestamp(): Return the value of given type of timestamp
  2321. * for a RB
  2322. * @adreno_dev: adreno device whose timestamp values are being queried
  2323. * @priv: The object being queried for a timestamp (expected to be a rb pointer)
  2324. * @type: The type of timestamp (one of 3) to be read
  2325. * @timestamp: Pointer to where the read timestamp is to be written to
  2326. *
  2327. * CONSUMED and RETIRED type timestamps are sorted by id and are constantly
  2328. * updated by the GPU through shared memstore memory. QUEUED type timestamps
  2329. * are read directly from context struct.
  2330. * The function returns 0 on success and timestamp value at the *timestamp
  2331. * address and returns -EINVAL on any read error/invalid type and timestamp = 0.
  2332. */
  2333. int adreno_rb_readtimestamp(struct adreno_device *adreno_dev,
  2334. void *priv, enum kgsl_timestamp_type type,
  2335. unsigned int *timestamp)
  2336. {
  2337. int status = 0;
  2338. struct adreno_ringbuffer *rb = priv;
  2339. if (type == KGSL_TIMESTAMP_QUEUED)
  2340. *timestamp = rb->timestamp;
  2341. else
  2342. status = __adreno_readtimestamp(adreno_dev,
  2343. rb->id + KGSL_MEMSTORE_MAX,
  2344. type, timestamp);
  2345. return status;
  2346. }
  2347. /**
  2348. * adreno_readtimestamp(): Return the value of given type of timestamp
  2349. * @device: GPU device whose timestamp values are being queried
  2350. * @priv: The object being queried for a timestamp (expected to be a context)
  2351. * @type: The type of timestamp (one of 3) to be read
  2352. * @timestamp: Pointer to where the read timestamp is to be written to
  2353. *
  2354. * CONSUMED and RETIRED type timestamps are sorted by id and are constantly
  2355. * updated by the GPU through shared memstore memory. QUEUED type timestamps
  2356. * are read directly from context struct.
  2357. * The function returns 0 on success and timestamp value at the *timestamp
  2358. * address and returns -EINVAL on any read error/invalid type and timestamp = 0.
  2359. */
  2360. static int adreno_readtimestamp(struct kgsl_device *device,
  2361. void *priv, enum kgsl_timestamp_type type,
  2362. unsigned int *timestamp)
  2363. {
  2364. int status = 0;
  2365. struct kgsl_context *context = priv;
  2366. if (type == KGSL_TIMESTAMP_QUEUED) {
  2367. struct adreno_context *ctxt = ADRENO_CONTEXT(context);
  2368. *timestamp = ctxt->timestamp;
  2369. } else
  2370. status = __adreno_readtimestamp(ADRENO_DEVICE(device),
  2371. context->id, type, timestamp);
  2372. return status;
  2373. }
  2374. /**
  2375. * adreno_device_private_create(): Allocate an adreno_device_private structure
  2376. */
  2377. static struct kgsl_device_private *adreno_device_private_create(void)
  2378. {
  2379. struct adreno_device_private *adreno_priv =
  2380. kzalloc(sizeof(*adreno_priv), GFP_KERNEL);
  2381. if (adreno_priv) {
  2382. INIT_LIST_HEAD(&adreno_priv->perfcounter_list);
  2383. return &adreno_priv->dev_priv;
  2384. }
  2385. return NULL;
  2386. }
  2387. /**
  2388. * adreno_device_private_destroy(): Destroy an adreno_device_private structure
  2389. * and release the perfcounters held by the kgsl fd.
  2390. * @dev_priv: The kgsl device private structure
  2391. */
  2392. static void adreno_device_private_destroy(struct kgsl_device_private *dev_priv)
  2393. {
  2394. struct kgsl_device *device = dev_priv->device;
  2395. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2396. struct adreno_device_private *adreno_priv =
  2397. container_of(dev_priv, struct adreno_device_private,
  2398. dev_priv);
  2399. struct adreno_perfcounter_list_node *p, *tmp;
  2400. mutex_lock(&device->mutex);
  2401. list_for_each_entry_safe(p, tmp, &adreno_priv->perfcounter_list, node) {
  2402. adreno_perfcounter_put(adreno_dev, p->groupid,
  2403. p->countable, PERFCOUNTER_FLAG_NONE);
  2404. list_del(&p->node);
  2405. kfree(p);
  2406. }
  2407. mutex_unlock(&device->mutex);
  2408. kfree(adreno_priv);
  2409. }
  2410. /**
  2411. * adreno_power_stats() - Reads the counters needed for freq decisions
  2412. * @device: Pointer to device whose counters are read
  2413. * @stats: Pointer to stats set that needs updating
  2414. * Power: The caller is expected to be in a clock enabled state as this
  2415. * function does reg reads
  2416. */
  2417. static void adreno_power_stats(struct kgsl_device *device,
  2418. struct kgsl_power_stats *stats)
  2419. {
  2420. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2421. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2422. return gpudev->power_stats(adreno_dev, stats);
  2423. }
  2424. static int adreno_regulator_enable(struct kgsl_device *device)
  2425. {
  2426. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2427. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2428. if (gpudev->regulator_enable)
  2429. return gpudev->regulator_enable(adreno_dev);
  2430. return 0;
  2431. }
  2432. static bool adreno_is_hw_collapsible(struct kgsl_device *device)
  2433. {
  2434. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2435. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2436. if (!gpudev->is_hw_collapsible(adreno_dev))
  2437. return false;
  2438. if (gpudev->clear_pending_transactions(adreno_dev))
  2439. return false;
  2440. adreno_dispatcher_stop_fault_timer(device);
  2441. return true;
  2442. }
  2443. static void adreno_regulator_disable(struct kgsl_device *device)
  2444. {
  2445. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2446. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2447. if (gpudev->regulator_disable)
  2448. gpudev->regulator_disable(adreno_dev);
  2449. }
  2450. static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
  2451. unsigned int prelevel, unsigned int postlevel, bool post)
  2452. {
  2453. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2454. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2455. if (gpudev->pwrlevel_change_settings)
  2456. gpudev->pwrlevel_change_settings(adreno_dev, prelevel,
  2457. postlevel, post);
  2458. }
  2459. static bool adreno_is_hwcg_on(struct kgsl_device *device)
  2460. {
  2461. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2462. return adreno_dev->hwcg_enabled;
  2463. }
  2464. static int adreno_queue_cmds(struct kgsl_device_private *dev_priv,
  2465. struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
  2466. u32 count, u32 *timestamp)
  2467. {
  2468. struct kgsl_device *device = dev_priv->device;
  2469. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2470. if (WARN_ON(!adreno_dev->dispatch_ops || !adreno_dev->dispatch_ops->queue_cmds))
  2471. return -ENODEV;
  2472. return adreno_dev->dispatch_ops->queue_cmds(dev_priv, context, drawobj,
  2473. count, timestamp);
  2474. }
  2475. static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
  2476. struct kgsl_context *context, struct kgsl_memobj_node *ib)
  2477. {
  2478. struct kgsl_device *device = dev_priv->device;
  2479. struct kgsl_process_private *private = dev_priv->process_priv;
  2480. /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
  2481. if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
  2482. pr_context(device, context, "ctxt %u invalid ib size %lld\n",
  2483. context->id, ib->size);
  2484. return false;
  2485. }
  2486. /* Make sure that the address is in range and dword aligned */
  2487. if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr,
  2488. ib->size) || !IS_ALIGNED(ib->gpuaddr, 4)) {
  2489. pr_context(device, context, "ctxt %u invalid ib gpuaddr %llX\n",
  2490. context->id, ib->gpuaddr);
  2491. return false;
  2492. }
  2493. return true;
  2494. }
  2495. int adreno_verify_cmdobj(struct kgsl_device_private *dev_priv,
  2496. struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
  2497. uint32_t count)
  2498. {
  2499. struct kgsl_device *device = dev_priv->device;
  2500. struct kgsl_memobj_node *ib;
  2501. unsigned int i;
  2502. for (i = 0; i < count; i++) {
  2503. /* Verify the IBs before they get queued */
  2504. if (drawobj[i]->type == CMDOBJ_TYPE) {
  2505. struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]);
  2506. list_for_each_entry(ib, &cmdobj->cmdlist, node)
  2507. if (!_verify_ib(dev_priv,
  2508. &ADRENO_CONTEXT(context)->base, ib))
  2509. return -EINVAL;
  2510. /*
  2511. * Clear the wake on touch bit to indicate an IB has
  2512. * been submitted since the last time we set it.
  2513. * But only clear it when we have rendering commands.
  2514. */
  2515. device->pwrctrl.wake_on_touch = false;
  2516. }
  2517. /* A3XX does not have support for drawobj profiling */
  2518. if (adreno_is_a3xx(ADRENO_DEVICE(device)) &&
  2519. (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING))
  2520. return -EOPNOTSUPP;
  2521. }
  2522. return 0;
  2523. }
  2524. static int adreno_queue_recurring_cmd(struct kgsl_device_private *dev_priv,
  2525. struct kgsl_context *context, struct kgsl_drawobj *drawobj)
  2526. {
  2527. struct kgsl_device *device = dev_priv->device;
  2528. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2529. struct adreno_hwsched *hwsched = &adreno_dev->hwsched;
  2530. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2531. struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
  2532. int ret;
  2533. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LSR))
  2534. return -EOPNOTSUPP;
  2535. if (!gpudev->send_recurring_cmdobj)
  2536. return -ENODEV;
  2537. ret = adreno_verify_cmdobj(dev_priv, context, &drawobj, 1);
  2538. if (ret)
  2539. return ret;
  2540. mutex_lock(&device->mutex);
  2541. /* Only one recurring command allowed */
  2542. if (hwsched->recurring_cmdobj) {
  2543. mutex_unlock(&device->mutex);
  2544. return -EINVAL;
  2545. }
  2546. ret = kgsl_check_context_state(context);
  2547. if (ret) {
  2548. mutex_unlock(&device->mutex);
  2549. return ret;
  2550. }
  2551. set_bit(CMDOBJ_RECURRING_START, &cmdobj->priv);
  2552. ret = gpudev->send_recurring_cmdobj(adreno_dev, cmdobj);
  2553. mutex_unlock(&device->mutex);
  2554. if (!ret)
  2555. srcu_notifier_call_chain(&device->nh, GPU_GMU_READY, NULL);
  2556. return ret;
  2557. }
  2558. static int adreno_dequeue_recurring_cmd(struct kgsl_device *device,
  2559. struct kgsl_context *context)
  2560. {
  2561. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2562. struct adreno_hwsched *hwsched = &adreno_dev->hwsched;
  2563. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2564. struct kgsl_drawobj *recurring_drawobj;
  2565. int ret;
  2566. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LSR))
  2567. return -EOPNOTSUPP;
  2568. if (!gpudev->send_recurring_cmdobj)
  2569. return -ENODEV;
  2570. mutex_lock(&device->mutex);
  2571. /* We can safely return here as recurring wokload is already untracked */
  2572. if (hwsched->recurring_cmdobj == NULL) {
  2573. mutex_unlock(&device->mutex);
  2574. return -EINVAL;
  2575. }
  2576. recurring_drawobj = DRAWOBJ(hwsched->recurring_cmdobj);
  2577. /* Check if the recurring command is for same context or not*/
  2578. if (recurring_drawobj->context != context) {
  2579. mutex_unlock(&device->mutex);
  2580. return -EINVAL;
  2581. }
  2582. ret = kgsl_check_context_state(context);
  2583. if (ret) {
  2584. mutex_unlock(&device->mutex);
  2585. return ret;
  2586. }
  2587. clear_bit(CMDOBJ_RECURRING_START, &hwsched->recurring_cmdobj->priv);
  2588. set_bit(CMDOBJ_RECURRING_STOP, &hwsched->recurring_cmdobj->priv);
  2589. ret = gpudev->send_recurring_cmdobj(adreno_dev, hwsched->recurring_cmdobj);
  2590. mutex_unlock(&device->mutex);
  2591. if (!ret)
  2592. srcu_notifier_call_chain(&device->nh, GPU_GMU_STOP, NULL);
  2593. return ret;
  2594. }
  2595. static void adreno_set_isdb_breakpoint_registers(struct kgsl_device *device)
  2596. {
  2597. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2598. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2599. if (gpudev->set_isdb_breakpoint_registers)
  2600. gpudev->set_isdb_breakpoint_registers(adreno_dev);
  2601. }
  2602. static void adreno_drawctxt_sched(struct kgsl_device *device,
  2603. struct kgsl_context *context)
  2604. {
  2605. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2606. if (WARN_ON(!adreno_dev->dispatch_ops || !adreno_dev->dispatch_ops->queue_context))
  2607. return;
  2608. adreno_dev->dispatch_ops->queue_context(adreno_dev,
  2609. ADRENO_CONTEXT(context));
  2610. }
  2611. void adreno_mark_for_coldboot(struct adreno_device *adreno_dev)
  2612. {
  2613. if (!adreno_dev->warmboot_enabled)
  2614. return;
  2615. set_bit(ADRENO_DEVICE_FORCE_COLDBOOT, &adreno_dev->priv);
  2616. }
  2617. bool adreno_smmu_is_stalled(struct adreno_device *adreno_dev)
  2618. {
  2619. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2620. struct kgsl_mmu *mmu = &device->mmu;
  2621. u32 fault, val;
  2622. /*
  2623. * RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24) to tells if GPU
  2624. * encoutnered a pagefault. Gen8 page fault status checked from
  2625. * the software condition as RBBM_STATS3 is not available.
  2626. */
  2627. if (ADRENO_GPUREV(adreno_dev) < 0x080000) {
  2628. adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &val);
  2629. return (val & BIT(24));
  2630. }
  2631. if (WARN_ON(!adreno_dev->dispatch_ops || !adreno_dev->dispatch_ops->get_fault))
  2632. return false;
  2633. fault = adreno_dev->dispatch_ops->get_fault(adreno_dev);
  2634. return ((fault & ADRENO_IOMMU_PAGE_FAULT) &&
  2635. test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &mmu->pfpolicy)) ? true : false;
  2636. }
  2637. int adreno_power_cycle(struct adreno_device *adreno_dev,
  2638. void (*callback)(struct adreno_device *adreno_dev, void *priv),
  2639. void *priv)
  2640. {
  2641. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2642. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  2643. int ret;
  2644. mutex_lock(&device->mutex);
  2645. ret = ops->pm_suspend(adreno_dev);
  2646. if (!ret) {
  2647. callback(adreno_dev, priv);
  2648. adreno_mark_for_coldboot(adreno_dev);
  2649. ops->pm_resume(adreno_dev);
  2650. }
  2651. mutex_unlock(&device->mutex);
  2652. return ret;
  2653. }
  2654. struct cycle_data {
  2655. void *ptr;
  2656. void *val;
  2657. };
  2658. static void cycle_set_bool(struct adreno_device *adreno_dev, void *priv)
  2659. {
  2660. struct cycle_data *data = priv;
  2661. *((bool *) data->ptr) = *((bool *) data->val);
  2662. }
  2663. int adreno_power_cycle_bool(struct adreno_device *adreno_dev,
  2664. bool *flag, bool val)
  2665. {
  2666. struct cycle_data data = { .ptr = flag, .val = &val };
  2667. return adreno_power_cycle(adreno_dev, cycle_set_bool, &data);
  2668. }
  2669. static void cycle_set_u32(struct adreno_device *adreno_dev, void *priv)
  2670. {
  2671. struct cycle_data *data = priv;
  2672. *((u32 *) data->ptr) = *((u32 *) data->val);
  2673. }
  2674. int adreno_power_cycle_u32(struct adreno_device *adreno_dev,
  2675. u32 *flag, u32 val)
  2676. {
  2677. struct cycle_data data = { .ptr = flag, .val = &val };
  2678. return adreno_power_cycle(adreno_dev, cycle_set_u32, &data);
  2679. }
  2680. static int adreno_gpu_clock_set(struct kgsl_device *device, u32 pwrlevel)
  2681. {
  2682. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2683. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  2684. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  2685. struct kgsl_pwrlevel *pl = &pwr->pwrlevels[pwrlevel];
  2686. int ret;
  2687. if (ops->gpu_clock_set)
  2688. return ops->gpu_clock_set(adreno_dev, pwrlevel);
  2689. ret = clk_set_rate(pwr->grp_clks[0], pl->gpu_freq);
  2690. if (ret)
  2691. dev_err(device->dev, "GPU clk freq set failure: %d\n", ret);
  2692. return ret;
  2693. }
  2694. static int adreno_interconnect_bus_set(struct adreno_device *adreno_dev,
  2695. int level, u32 ab)
  2696. {
  2697. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2698. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  2699. if ((level == pwr->cur_buslevel) && (ab == pwr->cur_ab))
  2700. return 0;
  2701. kgsl_icc_set_tag(pwr, level);
  2702. pwr->cur_buslevel = level;
  2703. pwr->cur_ab = ab;
  2704. icc_set_bw(pwr->icc_path, MBps_to_icc(ab),
  2705. kBps_to_icc(pwr->ddr_table[level]));
  2706. trace_kgsl_buslevel(device, pwr->active_pwrlevel, level, ab);
  2707. return 0;
  2708. }
  2709. static int adreno_gpu_bus_set(struct kgsl_device *device, int level, u32 ab)
  2710. {
  2711. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2712. const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
  2713. if (ops->gpu_bus_set)
  2714. return ops->gpu_bus_set(adreno_dev, level, ab);
  2715. return adreno_interconnect_bus_set(adreno_dev, level, ab);
  2716. }
  2717. static void adreno_deassert_gbif_halt(struct kgsl_device *device)
  2718. {
  2719. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2720. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2721. if (gpudev->deassert_gbif_halt)
  2722. gpudev->deassert_gbif_halt(adreno_dev);
  2723. }
  2724. static void adreno_create_hw_fence(struct kgsl_device *device, struct kgsl_sync_fence *kfence)
  2725. {
  2726. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2727. if (WARN_ON(!adreno_dev->dispatch_ops))
  2728. return;
  2729. if (adreno_dev->dispatch_ops->create_hw_fence)
  2730. adreno_dev->dispatch_ops->create_hw_fence(adreno_dev, kfence);
  2731. }
  2732. u64 adreno_read_cx_timer(struct adreno_device *adreno_dev)
  2733. {
  2734. /* Check if the CX timer is initialized */
  2735. if (!test_bit(ADRENO_DEVICE_CX_TIMER_INITIALIZED, &adreno_dev->priv))
  2736. return 0;
  2737. /* Since the GPU CX and CPU timers are synchronized return the CPU timer */
  2738. return arch_timer_read_counter();
  2739. }
  2740. static const struct kgsl_functable adreno_functable = {
  2741. /* Mandatory functions */
  2742. .suspend_context = adreno_suspend_context,
  2743. .first_open = adreno_first_open,
  2744. .start = adreno_start,
  2745. .stop = adreno_stop,
  2746. .last_close = adreno_last_close,
  2747. .getproperty = adreno_getproperty,
  2748. .getproperty_compat = adreno_getproperty_compat,
  2749. .waittimestamp = adreno_waittimestamp,
  2750. .readtimestamp = adreno_readtimestamp,
  2751. .queue_cmds = adreno_queue_cmds,
  2752. .ioctl = adreno_ioctl,
  2753. .compat_ioctl = adreno_compat_ioctl,
  2754. .power_stats = adreno_power_stats,
  2755. .snapshot = adreno_snapshot,
  2756. .drain_and_idle = adreno_drain_and_idle,
  2757. .device_private_create = adreno_device_private_create,
  2758. .device_private_destroy = adreno_device_private_destroy,
  2759. /* Optional functions */
  2760. .drawctxt_create = adreno_drawctxt_create,
  2761. .drawctxt_detach = adreno_drawctxt_detach,
  2762. .drawctxt_destroy = adreno_drawctxt_destroy,
  2763. .drawctxt_dump = adreno_drawctxt_dump,
  2764. .setproperty = adreno_setproperty,
  2765. .setproperty_compat = adreno_setproperty_compat,
  2766. .drawctxt_sched = adreno_drawctxt_sched,
  2767. .resume = adreno_dispatcher_start,
  2768. .regulator_enable = adreno_regulator_enable,
  2769. .is_hw_collapsible = adreno_is_hw_collapsible,
  2770. .regulator_disable = adreno_regulator_disable,
  2771. .pwrlevel_change_settings = adreno_pwrlevel_change_settings,
  2772. .query_property_list = adreno_query_property_list,
  2773. .is_hwcg_on = adreno_is_hwcg_on,
  2774. .gpu_clock_set = adreno_gpu_clock_set,
  2775. .gpu_bus_set = adreno_gpu_bus_set,
  2776. .deassert_gbif_halt = adreno_deassert_gbif_halt,
  2777. .queue_recurring_cmd = adreno_queue_recurring_cmd,
  2778. .dequeue_recurring_cmd = adreno_dequeue_recurring_cmd,
  2779. .set_isdb_breakpoint_registers = adreno_set_isdb_breakpoint_registers,
  2780. .create_hw_fence = adreno_create_hw_fence,
  2781. };
  2782. static const struct component_master_ops adreno_ops = {
  2783. .bind = adreno_bind,
  2784. .unbind = adreno_unbind,
  2785. };
  2786. const struct adreno_power_ops adreno_power_operations = {
  2787. .first_open = adreno_open,
  2788. .last_close = adreno_close,
  2789. .active_count_get = adreno_pwrctrl_active_count_get,
  2790. .active_count_put = adreno_pwrctrl_active_count_put,
  2791. .pm_suspend = adreno_suspend,
  2792. .pm_resume = adreno_resume,
  2793. .touch_wakeup = adreno_touch_wakeup,
  2794. };
  2795. static int _compare_of(struct device *dev, void *data)
  2796. {
  2797. return (dev->of_node == data);
  2798. }
  2799. static void _release_of(struct device *dev, void *data)
  2800. {
  2801. of_node_put(data);
  2802. }
  2803. static void adreno_add_components(struct device *dev,
  2804. struct component_match **match)
  2805. {
  2806. struct device_node *node;
  2807. /*
  2808. * Add kgsl-smmu, context banks and gmu as components, if supported.
  2809. * Master bind (adreno_bind) will be called only once all added
  2810. * components are available.
  2811. */
  2812. for_each_matching_node(node, adreno_component_match) {
  2813. if (!of_device_is_available(node))
  2814. continue;
  2815. component_match_add_release(dev, match, _release_of, _compare_of, node);
  2816. }
  2817. }
  2818. static int adreno_probe(struct platform_device *pdev)
  2819. {
  2820. struct component_match *match = NULL;
  2821. adreno_add_components(&pdev->dev, &match);
  2822. if (!match)
  2823. return -ENODEV;
  2824. return component_master_add_with_match(&pdev->dev,
  2825. &adreno_ops, match);
  2826. }
  2827. static int adreno_remove(struct platform_device *pdev)
  2828. {
  2829. component_master_del(&pdev->dev, &adreno_ops);
  2830. return 0;
  2831. }
  2832. #if IS_ENABLED(CONFIG_QCOM_KGSL_HIBERNATION)
  2833. #if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
  2834. /*
  2835. * Issue hyp_assign call to assign non-used internal/userspace secure
  2836. * buffers to kernel.
  2837. */
  2838. static int adreno_secure_pt_hibernate(struct adreno_device *adreno_dev)
  2839. {
  2840. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2841. struct kgsl_process_private *process;
  2842. struct kgsl_mem_entry *entry;
  2843. struct kgsl_global_memdesc *md;
  2844. struct kgsl_memdesc *memdesc;
  2845. int ret, id;
  2846. read_lock(&kgsl_driver.proclist_lock);
  2847. list_for_each_entry(process, &kgsl_driver.process_list, list) {
  2848. idr_for_each_entry(&process->mem_idr, entry, id) {
  2849. memdesc = &entry->memdesc;
  2850. if (!kgsl_memdesc_is_secured(memdesc) ||
  2851. (memdesc->flags & KGSL_MEMFLAGS_USERMEM_ION) ||
  2852. (memdesc->priv & KGSL_MEMDESC_HYPASSIGNED_HLOS))
  2853. continue;
  2854. read_unlock(&kgsl_driver.proclist_lock);
  2855. if (kgsl_unlock_sgt(memdesc->sgt))
  2856. dev_err(device->dev, "kgsl_unlock_sgt failed\n");
  2857. memdesc->priv |= KGSL_MEMDESC_HYPASSIGNED_HLOS;
  2858. read_lock(&kgsl_driver.proclist_lock);
  2859. }
  2860. }
  2861. read_unlock(&kgsl_driver.proclist_lock);
  2862. list_for_each_entry(md, &device->globals, node) {
  2863. memdesc = &md->memdesc;
  2864. if (kgsl_memdesc_is_secured(memdesc) &&
  2865. !(memdesc->priv & KGSL_MEMDESC_HYPASSIGNED_HLOS)) {
  2866. ret = kgsl_unlock_sgt(memdesc->sgt);
  2867. if (ret) {
  2868. dev_err(device->dev, "kgsl_unlock_sgt failed ret %d\n", ret);
  2869. goto fail;
  2870. }
  2871. memdesc->priv |= KGSL_MEMDESC_HYPASSIGNED_HLOS;
  2872. }
  2873. }
  2874. return 0;
  2875. fail:
  2876. list_for_each_entry(md, &device->globals, node) {
  2877. memdesc = &md->memdesc;
  2878. if (kgsl_memdesc_is_secured(memdesc) &&
  2879. (memdesc->priv & KGSL_MEMDESC_HYPASSIGNED_HLOS)) {
  2880. kgsl_lock_sgt(memdesc->sgt, memdesc->size);
  2881. memdesc->priv &= ~KGSL_MEMDESC_HYPASSIGNED_HLOS;
  2882. }
  2883. }
  2884. return -EBUSY;
  2885. }
  2886. static int adreno_secure_pt_restore(struct adreno_device *adreno_dev)
  2887. {
  2888. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2889. struct kgsl_process_private *process;
  2890. struct kgsl_mem_entry *entry;
  2891. struct kgsl_memdesc *memdesc;
  2892. struct kgsl_global_memdesc *md;
  2893. int ret, id;
  2894. list_for_each_entry(md, &device->globals, node) {
  2895. memdesc = &md->memdesc;
  2896. if (kgsl_memdesc_is_secured(memdesc) &&
  2897. (memdesc->priv & KGSL_MEMDESC_HYPASSIGNED_HLOS)) {
  2898. ret = kgsl_lock_sgt(memdesc->sgt, memdesc->size);
  2899. if (ret) {
  2900. dev_err(device->dev, "kgsl_lock_sgt failed ret %d\n", ret);
  2901. return ret;
  2902. }
  2903. memdesc->priv &= ~KGSL_MEMDESC_HYPASSIGNED_HLOS;
  2904. }
  2905. }
  2906. read_lock(&kgsl_driver.proclist_lock);
  2907. list_for_each_entry(process, &kgsl_driver.process_list, list) {
  2908. idr_for_each_entry(&process->mem_idr, entry, id) {
  2909. memdesc = &entry->memdesc;
  2910. if (!kgsl_memdesc_is_secured(memdesc) ||
  2911. (memdesc->flags & KGSL_MEMFLAGS_USERMEM_ION) ||
  2912. !(memdesc->priv & KGSL_MEMDESC_HYPASSIGNED_HLOS))
  2913. continue;
  2914. read_unlock(&kgsl_driver.proclist_lock);
  2915. ret = kgsl_lock_sgt(memdesc->sgt, memdesc->size);
  2916. if (ret) {
  2917. dev_err(device->dev, "kgsl_lock_sgt failed ret %d\n", ret);
  2918. return ret;
  2919. }
  2920. memdesc->priv &= ~KGSL_MEMDESC_HYPASSIGNED_HLOS;
  2921. read_lock(&kgsl_driver.proclist_lock);
  2922. }
  2923. }
  2924. read_unlock(&kgsl_driver.proclist_lock);
  2925. return 0;
  2926. }
  2927. #else
  2928. static int adreno_secure_pt_hibernate(struct adreno_device *adreno_dev)
  2929. {
  2930. return 0;
  2931. }
  2932. static int adreno_secure_pt_restore(struct adreno_device *adreno_dev)
  2933. {
  2934. return 0;
  2935. }
  2936. #endif /* IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER) */
  2937. static int adreno_hibernation_suspend(struct device *dev)
  2938. {
  2939. struct kgsl_device *device = dev_get_drvdata(dev);
  2940. struct adreno_device *adreno_dev;
  2941. const struct adreno_power_ops *ops;
  2942. int status;
  2943. if (!device)
  2944. return 0;
  2945. adreno_dev = ADRENO_DEVICE(device);
  2946. ops = ADRENO_POWER_OPS(adreno_dev);
  2947. mutex_lock(&device->mutex);
  2948. status = ops->pm_suspend(adreno_dev);
  2949. if (status)
  2950. goto err;
  2951. /*
  2952. * When the device enters in hibernation state, the CX will be collapsed causing
  2953. * the GPU CX timer to pause. Clear the ADRENO_DEVICE_CX_TIMER_INITIALIZED flag
  2954. * to ensure that the CX timer is reseeded during resume.
  2955. */
  2956. clear_bit(ADRENO_DEVICE_CX_TIMER_INITIALIZED, &adreno_dev->priv);
  2957. /*
  2958. * Unload zap shader during device hibernation and reload it
  2959. * during resume as there is possibility that TZ driver
  2960. * is not aware of the hibernation.
  2961. */
  2962. adreno_zap_shader_unload(adreno_dev);
  2963. status = adreno_secure_pt_hibernate(adreno_dev);
  2964. err:
  2965. mutex_unlock(&device->mutex);
  2966. return status;
  2967. }
  2968. static int adreno_hibernation_resume(struct device *dev)
  2969. {
  2970. struct kgsl_device *device = dev_get_drvdata(dev);
  2971. struct kgsl_iommu *iommu;
  2972. struct kgsl_pwrscale *pwrscale;
  2973. struct adreno_device *adreno_dev;
  2974. const struct adreno_power_ops *ops;
  2975. int ret;
  2976. if (!device)
  2977. return 0;
  2978. iommu = &device->mmu.iommu;
  2979. pwrscale = &device->pwrscale;
  2980. adreno_dev = ADRENO_DEVICE(device);
  2981. ops = ADRENO_POWER_OPS(adreno_dev);
  2982. mutex_lock(&device->mutex);
  2983. ret = adreno_secure_pt_restore(adreno_dev);
  2984. if (ret)
  2985. goto err;
  2986. ret = kgsl_set_smmu_aperture(device, &iommu->user_context);
  2987. if (ret)
  2988. goto err;
  2989. ret = kgsl_set_smmu_lpac_aperture(device, &iommu->lpac_context);
  2990. if (ret < 0)
  2991. goto err;
  2992. gmu_core_dev_force_first_boot(device);
  2993. msm_adreno_tz_reinit(pwrscale->devfreqptr);
  2994. ops->pm_resume(adreno_dev);
  2995. err:
  2996. mutex_unlock(&device->mutex);
  2997. return ret;
  2998. }
  2999. static const struct dev_pm_ops adreno_pm_ops = {
  3000. .suspend = adreno_pm_suspend,
  3001. .resume = adreno_pm_resume,
  3002. .freeze = adreno_hibernation_suspend,
  3003. .thaw = adreno_hibernation_resume,
  3004. .poweroff = adreno_hibernation_suspend,
  3005. .restore = adreno_hibernation_resume,
  3006. };
  3007. #else
  3008. static const struct dev_pm_ops adreno_pm_ops = {
  3009. SET_SYSTEM_SLEEP_PM_OPS(adreno_pm_suspend, adreno_pm_resume)
  3010. };
  3011. #endif /* IS_ENABLED(CONFIG_QCOM_KGSL_HIBERNATION) */
  3012. static struct platform_driver adreno_platform_driver = {
  3013. .probe = adreno_probe,
  3014. .remove = adreno_remove,
  3015. .driver = {
  3016. .name = "kgsl-3d",
  3017. .pm = &adreno_pm_ops,
  3018. .of_match_table = of_match_ptr(adreno_match_table),
  3019. }
  3020. };
  3021. static int __init kgsl_3d_init(void)
  3022. {
  3023. int ret;
  3024. ret = kgsl_core_init();
  3025. if (ret)
  3026. return ret;
  3027. ret = kgsl_mmu_init();
  3028. if (ret) {
  3029. kgsl_core_exit();
  3030. return ret;
  3031. }
  3032. gmu_core_register();
  3033. ret = platform_driver_register(&adreno_platform_driver);
  3034. if (ret) {
  3035. gmu_core_unregister();
  3036. kgsl_mmu_exit();
  3037. kgsl_core_exit();
  3038. }
  3039. return ret;
  3040. }
  3041. static void __exit kgsl_3d_exit(void)
  3042. {
  3043. platform_driver_unregister(&adreno_platform_driver);
  3044. gmu_core_unregister();
  3045. kgsl_mmu_exit();
  3046. kgsl_core_exit();
  3047. }
  3048. module_param_named(preempt_enable, adreno_preemption_enable, bool, 0600);
  3049. MODULE_PARM_DESC(preempt_enable, "Enable GPU HW Preemption");
  3050. module_init(kgsl_3d_init);
  3051. module_exit(kgsl_3d_exit);
  3052. MODULE_DESCRIPTION("3D Graphics driver");
  3053. MODULE_LICENSE("GPL v2");
  3054. MODULE_SOFTDEP("pre: arm_smmu nvmem_qfprom socinfo");
  3055. #if (KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE)
  3056. MODULE_IMPORT_NS(DMA_BUF);
  3057. #endif