kgsl.c 134 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <uapi/linux/sched/types.h>
  7. #include <linux/bitfield.h>
  8. #include <linux/ctype.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-buf.h>
  12. #include <linux/dma-map-ops.h>
  13. #include <linux/fdtable.h>
  14. #include <linux/io.h>
  15. #include <linux/mem-buf.h>
  16. #include <linux/mman.h>
  17. #include <linux/mm_types.h>
  18. #include <linux/msm_kgsl.h>
  19. #include <linux/msm_sysstats.h>
  20. #include <linux/of.h>
  21. #include <linux/of_fdt.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/qcom_dma_heap.h>
  24. #include <linux/security.h>
  25. #include <linux/sort.h>
  26. #include <linux/string_helpers.h>
  27. #include <soc/qcom/of_common.h>
  28. #include <soc/qcom/secure_buffer.h>
  29. #include "kgsl_compat.h"
  30. #include "kgsl_debugfs.h"
  31. #include "kgsl_device.h"
  32. #include "kgsl_eventlog.h"
  33. #include "kgsl_mmu.h"
  34. #include "kgsl_pool.h"
  35. #include "kgsl_reclaim.h"
  36. #include "kgsl_sync.h"
  37. #include "kgsl_sysfs.h"
  38. #include "kgsl_trace.h"
  39. /* Instantiate tracepoints */
  40. #define CREATE_TRACE_POINTS
  41. #include "kgsl_power_trace.h"
  42. #ifndef arch_mmap_check
  43. #define arch_mmap_check(addr, len, flags) (0)
  44. #endif
  45. #ifndef pgprot_writebackcache
  46. #define pgprot_writebackcache(_prot) (_prot)
  47. #endif
  48. #ifndef pgprot_writethroughcache
  49. #define pgprot_writethroughcache(_prot) (_prot)
  50. #endif
  51. #if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE)
  52. #define KGSL_DMA_BIT_MASK DMA_BIT_MASK(64)
  53. #else
  54. #define KGSL_DMA_BIT_MASK DMA_BIT_MASK(32)
  55. #endif
  56. /* List of dmabufs mapped */
  57. static LIST_HEAD(kgsl_dmabuf_list);
  58. static DEFINE_SPINLOCK(kgsl_dmabuf_lock);
  59. struct dmabuf_list_entry {
  60. struct page *firstpage;
  61. struct list_head node;
  62. struct list_head dmabuf_list;
  63. };
  64. struct kgsl_dma_buf_meta {
  65. struct kgsl_mem_entry *entry;
  66. struct dma_buf_attachment *attach;
  67. struct dma_buf *dmabuf;
  68. struct sg_table *table;
  69. struct dmabuf_list_entry *dle;
  70. struct list_head node;
  71. };
  72. static inline struct kgsl_pagetable *_get_memdesc_pagetable(
  73. struct kgsl_pagetable *pt, struct kgsl_mem_entry *entry)
  74. {
  75. /* if a secured buffer, map it to secure global pagetable */
  76. if (kgsl_memdesc_is_secured(&entry->memdesc))
  77. return pt->mmu->securepagetable;
  78. return pt;
  79. }
  80. static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
  81. static const struct vm_operations_struct kgsl_gpumem_vm_ops;
  82. /*
  83. * The memfree list contains the last N blocks of memory that have been freed.
  84. * On a GPU fault we walk the list to see if the faulting address had been
  85. * recently freed and print out a message to that effect
  86. */
  87. #define MEMFREE_ENTRIES 512
  88. static DEFINE_SPINLOCK(memfree_lock);
  89. struct memfree_entry {
  90. pid_t ptname;
  91. uint64_t gpuaddr;
  92. uint64_t size;
  93. pid_t pid;
  94. uint64_t flags;
  95. };
  96. static struct {
  97. struct memfree_entry *list;
  98. int head;
  99. int tail;
  100. } memfree;
  101. static inline bool match_memfree_addr(struct memfree_entry *entry,
  102. pid_t ptname, uint64_t gpuaddr)
  103. {
  104. return ((entry->ptname == ptname) &&
  105. (entry->size > 0) &&
  106. (gpuaddr >= entry->gpuaddr &&
  107. gpuaddr < (entry->gpuaddr + entry->size)));
  108. }
  109. int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
  110. uint64_t *size, uint64_t *flags, pid_t *pid)
  111. {
  112. int ptr;
  113. if (memfree.list == NULL)
  114. return 0;
  115. spin_lock(&memfree_lock);
  116. ptr = memfree.head - 1;
  117. if (ptr < 0)
  118. ptr = MEMFREE_ENTRIES - 1;
  119. /* Walk backwards through the list looking for the last match */
  120. while (ptr != memfree.tail) {
  121. struct memfree_entry *entry = &memfree.list[ptr];
  122. if (match_memfree_addr(entry, ptname, *gpuaddr)) {
  123. *gpuaddr = entry->gpuaddr;
  124. *flags = entry->flags;
  125. *size = entry->size;
  126. *pid = entry->pid;
  127. spin_unlock(&memfree_lock);
  128. return 1;
  129. }
  130. ptr = ptr - 1;
  131. if (ptr < 0)
  132. ptr = MEMFREE_ENTRIES - 1;
  133. }
  134. spin_unlock(&memfree_lock);
  135. return 0;
  136. }
  137. static void kgsl_memfree_purge(struct kgsl_pagetable *pagetable,
  138. uint64_t gpuaddr, uint64_t size)
  139. {
  140. pid_t ptname = pagetable ? pagetable->name : 0;
  141. int i;
  142. if (memfree.list == NULL)
  143. return;
  144. spin_lock(&memfree_lock);
  145. for (i = 0; i < MEMFREE_ENTRIES; i++) {
  146. struct memfree_entry *entry = &memfree.list[i];
  147. if (entry->ptname != ptname || entry->size == 0)
  148. continue;
  149. if (gpuaddr > entry->gpuaddr &&
  150. gpuaddr < entry->gpuaddr + entry->size) {
  151. /* truncate the end of the entry */
  152. entry->size = gpuaddr - entry->gpuaddr;
  153. } else if (gpuaddr <= entry->gpuaddr) {
  154. if (gpuaddr + size > entry->gpuaddr &&
  155. gpuaddr + size < entry->gpuaddr + entry->size)
  156. /* Truncate the beginning of the entry */
  157. entry->gpuaddr = gpuaddr + size;
  158. else if (gpuaddr + size >= entry->gpuaddr + entry->size)
  159. /* Remove the entire entry */
  160. entry->size = 0;
  161. }
  162. }
  163. spin_unlock(&memfree_lock);
  164. }
  165. static void kgsl_memfree_add(pid_t pid, pid_t ptname, uint64_t gpuaddr,
  166. uint64_t size, uint64_t flags)
  167. {
  168. struct memfree_entry *entry;
  169. if (memfree.list == NULL)
  170. return;
  171. spin_lock(&memfree_lock);
  172. entry = &memfree.list[memfree.head];
  173. entry->pid = pid;
  174. entry->ptname = ptname;
  175. entry->gpuaddr = gpuaddr;
  176. entry->size = size;
  177. entry->flags = flags;
  178. memfree.head = (memfree.head + 1) % MEMFREE_ENTRIES;
  179. if (memfree.head == memfree.tail)
  180. memfree.tail = (memfree.tail + 1) % MEMFREE_ENTRIES;
  181. spin_unlock(&memfree_lock);
  182. }
  183. int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
  184. enum kgsl_timestamp_type type, unsigned int *timestamp)
  185. {
  186. if (device)
  187. return device->ftbl->readtimestamp(device, priv, type,
  188. timestamp);
  189. return -EINVAL;
  190. }
  191. const char *kgsl_context_type(int type)
  192. {
  193. if (type == KGSL_CONTEXT_TYPE_GL)
  194. return "GL";
  195. else if (type == KGSL_CONTEXT_TYPE_CL)
  196. return "CL";
  197. else if (type == KGSL_CONTEXT_TYPE_C2D)
  198. return "C2D";
  199. else if (type == KGSL_CONTEXT_TYPE_RS)
  200. return "RS";
  201. else if (type == KGSL_CONTEXT_TYPE_VK)
  202. return "VK";
  203. return "ANY";
  204. }
  205. static struct kgsl_mem_entry *kgsl_mem_entry_create(void)
  206. {
  207. struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  208. if (entry != NULL) {
  209. kref_init(&entry->refcount);
  210. /* put this ref in userspace memory alloc and map ioctls */
  211. kref_get(&entry->refcount);
  212. atomic_set(&entry->map_count, 0);
  213. atomic_set(&entry->vbo_count, 0);
  214. }
  215. return entry;
  216. }
  217. static void add_dmabuf_list(struct kgsl_dma_buf_meta *metadata)
  218. {
  219. struct kgsl_device *device = dev_get_drvdata(metadata->attach->dev);
  220. struct dmabuf_list_entry *dle;
  221. struct page *page;
  222. /*
  223. * Get the first page. We will use it to identify the imported
  224. * buffer, since the same buffer can be mapped as different
  225. * mem entries.
  226. */
  227. page = sg_page(metadata->table->sgl);
  228. spin_lock(&kgsl_dmabuf_lock);
  229. /* Go through the list to see if we imported this buffer before */
  230. list_for_each_entry(dle, &kgsl_dmabuf_list, node) {
  231. if (dle->firstpage == page) {
  232. /* Add the dmabuf metadata to the list for this dle */
  233. metadata->dle = dle;
  234. list_add(&metadata->node, &dle->dmabuf_list);
  235. spin_unlock(&kgsl_dmabuf_lock);
  236. return;
  237. }
  238. }
  239. /* This is a new buffer. Add a new entry for it */
  240. dle = kzalloc(sizeof(*dle), GFP_ATOMIC);
  241. if (dle) {
  242. dle->firstpage = page;
  243. INIT_LIST_HEAD(&dle->dmabuf_list);
  244. list_add(&dle->node, &kgsl_dmabuf_list);
  245. metadata->dle = dle;
  246. list_add(&metadata->node, &dle->dmabuf_list);
  247. kgsl_trace_gpu_mem_total(device,
  248. metadata->entry->memdesc.size);
  249. }
  250. spin_unlock(&kgsl_dmabuf_lock);
  251. }
  252. static void remove_dmabuf_list(struct kgsl_dma_buf_meta *metadata)
  253. {
  254. struct kgsl_device *device = dev_get_drvdata(metadata->attach->dev);
  255. struct dmabuf_list_entry *dle = metadata->dle;
  256. if (!dle)
  257. return;
  258. spin_lock(&kgsl_dmabuf_lock);
  259. list_del(&metadata->node);
  260. if (list_empty(&dle->dmabuf_list)) {
  261. list_del(&dle->node);
  262. kfree(dle);
  263. kgsl_trace_gpu_mem_total(device,
  264. -(metadata->entry->memdesc.size));
  265. }
  266. spin_unlock(&kgsl_dmabuf_lock);
  267. }
  268. #ifdef CONFIG_DMA_SHARED_BUFFER
  269. static void kgsl_destroy_ion(struct kgsl_memdesc *memdesc)
  270. {
  271. struct kgsl_mem_entry *entry = container_of(memdesc,
  272. struct kgsl_mem_entry, memdesc);
  273. struct kgsl_dma_buf_meta *metadata = entry->priv_data;
  274. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  275. return;
  276. if (metadata != NULL) {
  277. remove_dmabuf_list(metadata);
  278. #if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
  279. dma_buf_unmap_attachment_unlocked(metadata->attach, memdesc->sgt,
  280. DMA_BIDIRECTIONAL);
  281. #else
  282. dma_buf_unmap_attachment(metadata->attach, memdesc->sgt, DMA_BIDIRECTIONAL);
  283. #endif
  284. dma_buf_detach(metadata->dmabuf, metadata->attach);
  285. dma_buf_put(metadata->dmabuf);
  286. kfree(metadata);
  287. }
  288. memdesc->sgt = NULL;
  289. }
  290. static const struct kgsl_memdesc_ops kgsl_dmabuf_ops = {
  291. .free = kgsl_destroy_ion,
  292. .put_gpuaddr = kgsl_unmap_and_put_gpuaddr,
  293. };
  294. #endif
  295. static void kgsl_destroy_anon(struct kgsl_memdesc *memdesc)
  296. {
  297. int i = 0, j;
  298. struct scatterlist *sg;
  299. struct page *page;
  300. if (memdesc->priv & KGSL_MEMDESC_MAPPED)
  301. return;
  302. for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
  303. page = sg_page(sg);
  304. for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) {
  305. /*
  306. * Mark the page in the scatterlist as dirty if they
  307. * were writable by the GPU.
  308. */
  309. if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
  310. set_page_dirty_lock(nth_page(page, j));
  311. /*
  312. * Put the page reference taken using get_user_pages
  313. * during memdesc_sg_virt.
  314. */
  315. put_page(nth_page(page, j));
  316. }
  317. }
  318. sg_free_table(memdesc->sgt);
  319. kfree(memdesc->sgt);
  320. memdesc->sgt = NULL;
  321. }
  322. void
  323. kgsl_mem_entry_destroy(struct kref *kref)
  324. {
  325. struct kgsl_mem_entry *entry = container_of(kref,
  326. struct kgsl_mem_entry,
  327. refcount);
  328. unsigned int memtype;
  329. if (entry == NULL)
  330. return;
  331. /* pull out the memtype before the flags get cleared */
  332. memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
  333. /*
  334. * VBO allocations at gpumem_alloc_vbo_entry are not added into stats
  335. * (using kgsl_process_add_stats) so do not subtract here. For all other
  336. * allocations subtract before freeing memdesc
  337. */
  338. if (!(entry->memdesc.flags & KGSL_MEMFLAGS_VBO))
  339. atomic64_sub(entry->memdesc.size, &entry->priv->stats[memtype].cur);
  340. /* Detach from process list */
  341. kgsl_mem_entry_detach_process(entry);
  342. if (memtype != KGSL_MEM_ENTRY_KERNEL)
  343. atomic_long_sub(entry->memdesc.size,
  344. &kgsl_driver.stats.mapped);
  345. kgsl_sharedmem_free(&entry->memdesc);
  346. kfree(entry);
  347. }
  348. /* Scheduled by kgsl_mem_entry_destroy_deferred() */
  349. static void _deferred_destroy(struct work_struct *work)
  350. {
  351. struct kgsl_mem_entry *entry =
  352. container_of(work, struct kgsl_mem_entry, work);
  353. kgsl_mem_entry_destroy(&entry->refcount);
  354. }
  355. void kgsl_mem_entry_destroy_deferred(struct kref *kref)
  356. {
  357. struct kgsl_mem_entry *entry =
  358. container_of(kref, struct kgsl_mem_entry, refcount);
  359. INIT_WORK(&entry->work, _deferred_destroy);
  360. queue_work(kgsl_driver.lockless_workqueue, &entry->work);
  361. }
  362. /* Commit the entry to the process so it can be accessed by other operations */
  363. static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
  364. {
  365. if (!entry)
  366. return;
  367. spin_lock(&entry->priv->mem_lock);
  368. idr_replace(&entry->priv->mem_idr, entry, entry->id);
  369. spin_unlock(&entry->priv->mem_lock);
  370. }
  371. static int kgsl_mem_entry_attach_to_process(struct kgsl_device *device,
  372. struct kgsl_process_private *process,
  373. struct kgsl_mem_entry *entry)
  374. {
  375. struct kgsl_memdesc *memdesc = &entry->memdesc;
  376. int ret, id;
  377. ret = kgsl_process_private_get(process);
  378. if (!ret)
  379. return -EBADF;
  380. /* Assign a gpu address */
  381. if (!kgsl_memdesc_use_cpu_map(memdesc) &&
  382. kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_NONE) {
  383. struct kgsl_pagetable *pagetable;
  384. pagetable = kgsl_memdesc_is_secured(memdesc) ?
  385. device->mmu.securepagetable : process->pagetable;
  386. ret = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
  387. if (ret) {
  388. kgsl_process_private_put(process);
  389. return ret;
  390. }
  391. }
  392. idr_preload(GFP_KERNEL);
  393. spin_lock(&process->mem_lock);
  394. /* Allocate the ID but don't attach the pointer just yet */
  395. id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
  396. spin_unlock(&process->mem_lock);
  397. idr_preload_end();
  398. if (id < 0) {
  399. if (!kgsl_memdesc_use_cpu_map(memdesc))
  400. kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
  401. kgsl_process_private_put(process);
  402. return id;
  403. }
  404. entry->id = id;
  405. entry->priv = process;
  406. return 0;
  407. }
  408. /*
  409. * Attach the memory object to a process by (possibly) getting a GPU address and
  410. * (possibly) mapping it
  411. */
  412. static int kgsl_mem_entry_attach_and_map(struct kgsl_device *device,
  413. struct kgsl_process_private *process,
  414. struct kgsl_mem_entry *entry)
  415. {
  416. struct kgsl_memdesc *memdesc = &entry->memdesc;
  417. int ret;
  418. ret = kgsl_mem_entry_attach_to_process(device, process, entry);
  419. if (ret)
  420. return ret;
  421. if (memdesc->gpuaddr) {
  422. /*
  423. * Map the memory if a GPU address is already assigned, either
  424. * through kgsl_mem_entry_attach_to_process() or via some other
  425. * SVM process
  426. */
  427. ret = kgsl_mmu_map(memdesc->pagetable, memdesc);
  428. if (ret) {
  429. kgsl_mem_entry_detach_process(entry);
  430. return ret;
  431. }
  432. }
  433. kgsl_memfree_purge(memdesc->pagetable, memdesc->gpuaddr,
  434. memdesc->size);
  435. return ret;
  436. }
  437. /* Detach a memory entry from a process and unmap it from the MMU */
  438. static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
  439. {
  440. if (entry == NULL)
  441. return;
  442. /*
  443. * First remove the entry from mem_idr list
  444. * so that no one can operate on obsolete values
  445. */
  446. spin_lock(&entry->priv->mem_lock);
  447. if (entry->id != 0)
  448. idr_remove(&entry->priv->mem_idr, entry->id);
  449. entry->id = 0;
  450. spin_unlock(&entry->priv->mem_lock);
  451. kgsl_sharedmem_put_gpuaddr(&entry->memdesc);
  452. if (entry->memdesc.priv & KGSL_MEMDESC_RECLAIMED)
  453. atomic_sub(entry->memdesc.page_count,
  454. &entry->priv->unpinned_page_count);
  455. kgsl_process_private_put(entry->priv);
  456. entry->priv = NULL;
  457. }
  458. #ifdef CONFIG_QCOM_KGSL_CONTEXT_DEBUG
  459. static void kgsl_context_debug_info(struct kgsl_device *device)
  460. {
  461. struct kgsl_context *context;
  462. struct kgsl_process_private *p;
  463. int next;
  464. /*
  465. * Keep an interval between consecutive logging to avoid
  466. * flooding the kernel log
  467. */
  468. static DEFINE_RATELIMIT_STATE(_rs, 10 * HZ, 1);
  469. if (!__ratelimit(&_rs))
  470. return;
  471. dev_info(device->dev, "KGSL active contexts:\n");
  472. dev_info(device->dev, "pid process total attached detached\n");
  473. read_lock(&kgsl_driver.proclist_lock);
  474. read_lock(&device->context_lock);
  475. list_for_each_entry(p, &kgsl_driver.process_list, list) {
  476. int total_contexts = 0, num_detached = 0;
  477. idr_for_each_entry(&device->context_idr, context, next) {
  478. if (context->proc_priv == p) {
  479. total_contexts++;
  480. if (kgsl_context_detached(context))
  481. num_detached++;
  482. }
  483. }
  484. dev_info(device->dev, "%-8u %-15.15s %-8d %-10d %-10d\n",
  485. pid_nr(p->pid), p->comm, total_contexts,
  486. total_contexts - num_detached, num_detached);
  487. }
  488. read_unlock(&device->context_lock);
  489. read_unlock(&kgsl_driver.proclist_lock);
  490. }
  491. #else
  492. static void kgsl_context_debug_info(struct kgsl_device *device)
  493. {
  494. }
  495. #endif
  496. /**
  497. * kgsl_context_dump() - dump information about a draw context
  498. * @device: KGSL device that owns the context
  499. * @context: KGSL context to dump information about
  500. *
  501. * Dump specific information about the context to the kernel log. Used for
  502. * fence timeout callbacks
  503. */
  504. void kgsl_context_dump(struct kgsl_context *context)
  505. {
  506. struct kgsl_device *device;
  507. if (_kgsl_context_get(context) == 0)
  508. return;
  509. device = context->device;
  510. if (kgsl_context_detached(context)) {
  511. dev_err(device->dev, " context[%u]: context detached\n",
  512. context->id);
  513. } else if (device->ftbl->drawctxt_dump != NULL)
  514. device->ftbl->drawctxt_dump(device, context);
  515. kgsl_context_put(context);
  516. }
  517. /* Allocate a new context ID */
  518. static int _kgsl_get_context_id(struct kgsl_device *device)
  519. {
  520. int id;
  521. idr_preload(GFP_KERNEL);
  522. write_lock(&device->context_lock);
  523. /* Allocate the slot but don't put a pointer in it yet */
  524. id = idr_alloc(&device->context_idr, NULL, 1,
  525. KGSL_GLOBAL_CTXT_ID, GFP_NOWAIT);
  526. write_unlock(&device->context_lock);
  527. idr_preload_end();
  528. return id;
  529. }
  530. /**
  531. * kgsl_context_init() - helper to initialize kgsl_context members
  532. * @dev_priv: the owner of the context
  533. * @context: the newly created context struct, should be allocated by
  534. * the device specific drawctxt_create function.
  535. *
  536. * This is a helper function for the device specific drawctxt_create
  537. * function to initialize the common members of its context struct.
  538. * If this function succeeds, reference counting is active in the context
  539. * struct and the caller should kgsl_context_put() it on error.
  540. * If it fails, the caller should just free the context structure
  541. * it passed in.
  542. */
  543. int kgsl_context_init(struct kgsl_device_private *dev_priv,
  544. struct kgsl_context *context)
  545. {
  546. struct kgsl_device *device = dev_priv->device;
  547. int ret = 0, id;
  548. struct kgsl_process_private *proc_priv = dev_priv->process_priv;
  549. /*
  550. * Read and increment the context count under lock to make sure
  551. * no process goes beyond the specified context limit.
  552. */
  553. spin_lock(&proc_priv->ctxt_count_lock);
  554. if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) {
  555. dev_err(device->dev,
  556. "Per process context limit reached for pid %u\n",
  557. pid_nr(dev_priv->process_priv->pid));
  558. spin_unlock(&proc_priv->ctxt_count_lock);
  559. kgsl_context_debug_info(device);
  560. return -ENOSPC;
  561. }
  562. atomic_inc(&proc_priv->ctxt_count);
  563. spin_unlock(&proc_priv->ctxt_count_lock);
  564. id = _kgsl_get_context_id(device);
  565. if (id == -ENOSPC) {
  566. /*
  567. * Before declaring that there are no contexts left try
  568. * flushing the event worker just in case there are
  569. * detached contexts waiting to finish
  570. */
  571. kthread_flush_worker(device->events_worker);
  572. id = _kgsl_get_context_id(device);
  573. }
  574. if (id < 0) {
  575. if (id == -ENOSPC) {
  576. dev_warn(device->dev,
  577. "cannot have more than %zu contexts due to memstore limitation\n",
  578. KGSL_MEMSTORE_MAX);
  579. kgsl_context_debug_info(device);
  580. }
  581. atomic_dec(&proc_priv->ctxt_count);
  582. return id;
  583. }
  584. context->id = id;
  585. mutex_init(&context->fault_lock);
  586. INIT_LIST_HEAD(&context->faults);
  587. kref_init(&context->refcount);
  588. /*
  589. * Get a refernce to the process private so its not destroyed, until
  590. * the context is destroyed. This will also prevent the pagetable
  591. * from being destroyed
  592. */
  593. if (!kgsl_process_private_get(dev_priv->process_priv)) {
  594. ret = -EBADF;
  595. goto out;
  596. }
  597. context->device = dev_priv->device;
  598. context->dev_priv = dev_priv;
  599. context->proc_priv = dev_priv->process_priv;
  600. context->tid = task_pid_nr(current);
  601. ret = kgsl_sync_timeline_create(context);
  602. if (ret) {
  603. kgsl_process_private_put(dev_priv->process_priv);
  604. goto out;
  605. }
  606. kgsl_add_event_group(device, &context->events, context,
  607. kgsl_readtimestamp, context, "context-%d", id);
  608. out:
  609. if (ret) {
  610. atomic_dec(&proc_priv->ctxt_count);
  611. write_lock(&device->context_lock);
  612. idr_remove(&dev_priv->device->context_idr, id);
  613. write_unlock(&device->context_lock);
  614. }
  615. return ret;
  616. }
  617. void kgsl_free_faults(struct kgsl_context *context)
  618. {
  619. struct kgsl_fault_node *p, *tmp;
  620. if (!(context->flags & KGSL_CONTEXT_FAULT_INFO))
  621. return;
  622. list_for_each_entry_safe(p, tmp, &context->faults, node) {
  623. list_del(&p->node);
  624. kfree(p->priv);
  625. kfree(p);
  626. }
  627. }
  628. /**
  629. * kgsl_context_detach() - Release the "master" context reference
  630. * @context: The context that will be detached
  631. *
  632. * This is called when a context becomes unusable, because userspace
  633. * has requested for it to be destroyed. The context itself may
  634. * exist a bit longer until its reference count goes to zero.
  635. * Other code referencing the context can detect that it has been
  636. * detached by checking the KGSL_CONTEXT_PRIV_DETACHED bit in
  637. * context->priv.
  638. */
  639. void kgsl_context_detach(struct kgsl_context *context)
  640. {
  641. struct kgsl_device *device;
  642. if (context == NULL)
  643. return;
  644. device = context->device;
  645. device->ftbl->dequeue_recurring_cmd(device, context);
  646. /*
  647. * Mark the context as detached to keep others from using
  648. * the context before it gets fully removed, and to make sure
  649. * we don't try to detach twice.
  650. */
  651. if (test_and_set_bit(KGSL_CONTEXT_PRIV_DETACHED, &context->priv))
  652. return;
  653. trace_kgsl_context_detach(device, context);
  654. context->device->ftbl->drawctxt_detach(context);
  655. /*
  656. * Cancel all pending events after the device-specific context is
  657. * detached, to avoid possibly freeing memory while it is still
  658. * in use by the GPU.
  659. */
  660. kgsl_cancel_events(device, &context->events);
  661. /* Remove the event group from the list */
  662. kgsl_del_event_group(device, &context->events);
  663. kgsl_sync_timeline_detach(context->ktimeline);
  664. kgsl_context_put(context);
  665. }
  666. void
  667. kgsl_context_destroy(struct kref *kref)
  668. {
  669. struct kgsl_context *context = container_of(kref, struct kgsl_context,
  670. refcount);
  671. struct kgsl_device *device = context->device;
  672. trace_kgsl_context_destroy(device, context);
  673. /*
  674. * It's not safe to destroy the context if it's not detached as GPU
  675. * may still be executing commands
  676. */
  677. BUG_ON(!kgsl_context_detached(context));
  678. kgsl_free_faults(context);
  679. kgsl_sync_timeline_put(context->ktimeline);
  680. write_lock(&device->context_lock);
  681. if (context->id != KGSL_CONTEXT_INVALID) {
  682. /* Clear the timestamps in the memstore during destroy */
  683. kgsl_sharedmem_writel(device->memstore,
  684. KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), 0);
  685. kgsl_sharedmem_writel(device->memstore,
  686. KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), 0);
  687. /* clear device power constraint */
  688. if (context->id == device->pwrctrl.constraint.owner_id) {
  689. trace_kgsl_constraint(device,
  690. device->pwrctrl.constraint.type,
  691. device->pwrctrl.active_pwrlevel,
  692. 0);
  693. device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
  694. }
  695. atomic_dec(&context->proc_priv->ctxt_count);
  696. idr_remove(&device->context_idr, context->id);
  697. context->id = KGSL_CONTEXT_INVALID;
  698. }
  699. write_unlock(&device->context_lock);
  700. kgsl_process_private_put(context->proc_priv);
  701. device->ftbl->drawctxt_destroy(context);
  702. }
  703. struct kgsl_device *kgsl_get_device(int dev_idx)
  704. {
  705. int i;
  706. struct kgsl_device *ret = NULL;
  707. mutex_lock(&kgsl_driver.devlock);
  708. for (i = 0; i < ARRAY_SIZE(kgsl_driver.devp); i++) {
  709. if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
  710. ret = kgsl_driver.devp[i];
  711. break;
  712. }
  713. }
  714. mutex_unlock(&kgsl_driver.devlock);
  715. return ret;
  716. }
  717. static struct kgsl_device *kgsl_get_minor(int minor)
  718. {
  719. struct kgsl_device *ret = NULL;
  720. if (minor < 0 || minor >= ARRAY_SIZE(kgsl_driver.devp))
  721. return NULL;
  722. mutex_lock(&kgsl_driver.devlock);
  723. ret = kgsl_driver.devp[minor];
  724. mutex_unlock(&kgsl_driver.devlock);
  725. return ret;
  726. }
  727. /**
  728. * kgsl_check_timestamp() - return true if the specified timestamp is retired
  729. * @device: Pointer to the KGSL device to check
  730. * @context: Pointer to the context for the timestamp
  731. * @timestamp: The timestamp to compare
  732. */
  733. bool kgsl_check_timestamp(struct kgsl_device *device,
  734. struct kgsl_context *context, unsigned int timestamp)
  735. {
  736. unsigned int ts_processed;
  737. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
  738. &ts_processed);
  739. return (timestamp_cmp(ts_processed, timestamp) >= 0);
  740. }
  741. static void kgsl_work_period_release(struct kref *kref)
  742. {
  743. struct gpu_work_period *wp = container_of(kref,
  744. struct gpu_work_period, refcount);
  745. spin_lock(&kgsl_driver.wp_list_lock);
  746. if (!list_empty(&wp->list))
  747. list_del_init(&wp->list);
  748. spin_unlock(&kgsl_driver.wp_list_lock);
  749. kfree(wp);
  750. }
  751. static void kgsl_put_work_period(struct gpu_work_period *wp)
  752. {
  753. if (!IS_ERR_OR_NULL(wp))
  754. kref_put(&wp->refcount, kgsl_work_period_release);
  755. }
  756. /**
  757. * kgsl_destroy_process_private() - Cleanup function to free process private
  758. * @kref: - Pointer to object being destroyed's kref struct
  759. * Free struct object and all other resources attached to it.
  760. * Since the function can be used when not all resources inside process
  761. * private have been allocated, there is a check to (before each resource
  762. * cleanup) see if the struct member being cleaned is in fact allocated or not.
  763. * If the value is not NULL, resource is freed.
  764. */
  765. static void kgsl_destroy_process_private(struct kref *kref)
  766. {
  767. struct kgsl_process_private *private = container_of(kref,
  768. struct kgsl_process_private, refcount);
  769. kgsl_put_work_period(private->period);
  770. /*
  771. * While removing sysfs entries, kernfs_mutex is held by sysfs apis. Since
  772. * it is a global fs mutex, sometimes it takes longer for kgsl to get hold
  773. * of the lock. Meanwhile, kgsl open thread may exhaust all its re-tries
  774. * and open can fail. To avoid this, remove sysfs entries inside process
  775. * mutex to avoid wasting re-tries when kgsl is waiting for kernfs mutex.
  776. */
  777. mutex_lock(&kgsl_driver.process_mutex);
  778. debugfs_remove_recursive(private->debug_root);
  779. kobject_put(&private->kobj_memtype);
  780. kobject_put(&private->kobj);
  781. /* When using global pagetables, do not detach global pagetable */
  782. if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
  783. kgsl_mmu_detach_pagetable(private->pagetable);
  784. /* Remove the process struct from the master list */
  785. write_lock(&kgsl_driver.proclist_lock);
  786. list_del(&private->list);
  787. write_unlock(&kgsl_driver.proclist_lock);
  788. mutex_unlock(&kgsl_driver.process_mutex);
  789. kfree(private->cmdline);
  790. put_pid(private->pid);
  791. idr_destroy(&private->mem_idr);
  792. idr_destroy(&private->syncsource_idr);
  793. /* When using global pagetables, do not put global pagetable */
  794. if (private->pagetable->name != KGSL_MMU_GLOBAL_PT)
  795. kgsl_mmu_putpagetable(private->pagetable);
  796. kfree(private);
  797. }
  798. void
  799. kgsl_process_private_put(struct kgsl_process_private *private)
  800. {
  801. if (private)
  802. kref_put(&private->refcount, kgsl_destroy_process_private);
  803. }
  804. /**
  805. * kgsl_process_private_find() - Find the process associated with the specified
  806. * name
  807. * @name: pid_t of the process to search for
  808. * Return the process struct for the given ID.
  809. */
  810. struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
  811. {
  812. struct kgsl_process_private *p, *private = NULL;
  813. read_lock(&kgsl_driver.proclist_lock);
  814. list_for_each_entry(p, &kgsl_driver.process_list, list) {
  815. if (pid_nr(p->pid) == pid) {
  816. if (kgsl_process_private_get(p))
  817. private = p;
  818. break;
  819. }
  820. }
  821. read_unlock(&kgsl_driver.proclist_lock);
  822. return private;
  823. }
  824. void kgsl_work_period_update(struct kgsl_device *device,
  825. struct gpu_work_period *period, u64 active)
  826. {
  827. spin_lock(&device->work_period_lock);
  828. if (test_bit(KGSL_WORK_PERIOD, &period->flags)) {
  829. period->active += active;
  830. period->cmds++;
  831. }
  832. spin_unlock(&device->work_period_lock);
  833. }
  834. static void _defer_work_period_put(struct work_struct *work)
  835. {
  836. struct gpu_work_period *wp =
  837. container_of(work, struct gpu_work_period, defer_ws);
  838. /* Put back the refcount that was taken in kgsl_drawobj_cmd_create() */
  839. kgsl_put_work_period(wp);
  840. }
  841. #define KGSL_GPU_ID 1
  842. static void _log_gpu_work_events(struct work_struct *work)
  843. {
  844. struct kgsl_device *device = container_of(work, struct kgsl_device,
  845. work_period_ws);
  846. struct gpu_work_period *wp;
  847. u64 active_time;
  848. bool restart = false;
  849. spin_lock(&device->work_period_lock);
  850. device->gpu_period.end = ktime_get_ns();
  851. spin_lock(&kgsl_driver.wp_list_lock);
  852. list_for_each_entry(wp, &kgsl_driver.wp_list, list) {
  853. if (!test_bit(KGSL_WORK_PERIOD, &wp->flags))
  854. continue;
  855. /* Active time in XO cycles(19.2MHz), convert to nanoseconds */
  856. active_time = wp->active * 10000;
  857. do_div(active_time, 192);
  858. /* Ensure active_time is within work period */
  859. active_time = min_t(u64, active_time,
  860. device->gpu_period.end - device->gpu_period.begin);
  861. /*
  862. * Emit GPU work period events via a kernel tracepoint
  863. * to provide information to the Android OS about how
  864. * apps are using the GPU.
  865. */
  866. if (active_time)
  867. trace_gpu_work_period(KGSL_GPU_ID, wp->uid,
  868. device->gpu_period.begin,
  869. device->gpu_period.end,
  870. active_time);
  871. /* Reset gpu work period stats */
  872. wp->active = 0;
  873. wp->cmds = 0;
  874. atomic_set(&wp->frames, 0);
  875. /* make sure other CPUs see the update */
  876. smp_wmb();
  877. if (!atomic_read(&wp->active_cmds)) {
  878. __clear_bit(KGSL_WORK_PERIOD, &wp->flags);
  879. queue_work(kgsl_driver.lockless_workqueue, &wp->defer_ws);
  880. } else {
  881. restart = true;
  882. }
  883. }
  884. spin_unlock(&kgsl_driver.wp_list_lock);
  885. if (restart) {
  886. /*
  887. * GPU work period duration (end time - begin time) must be at
  888. * most 1 second. The event for a period must be emitted within
  889. * 1 second of the end time of the period. Restart timer within
  890. * 1 second to emit gpu work period events.
  891. */
  892. mod_timer(&device->work_period_timer,
  893. jiffies + msecs_to_jiffies(KGSL_WORK_PERIOD_MS));
  894. device->gpu_period.begin = device->gpu_period.end;
  895. } else {
  896. memset(&device->gpu_period, 0, sizeof(device->gpu_period));
  897. __clear_bit(KGSL_WORK_PERIOD, &device->flags);
  898. }
  899. spin_unlock(&device->work_period_lock);
  900. }
  901. static void kgsl_work_period_timer(struct timer_list *t)
  902. {
  903. struct kgsl_device *device = from_timer(device, t, work_period_timer);
  904. queue_work(kgsl_driver.lockless_workqueue, &device->work_period_ws);
  905. }
  906. static struct gpu_work_period *kgsl_get_work_period(uid_t uid)
  907. {
  908. struct gpu_work_period *wp;
  909. spin_lock(&kgsl_driver.wp_list_lock);
  910. list_for_each_entry(wp, &kgsl_driver.wp_list, list) {
  911. if ((uid == wp->uid) && kref_get_unless_zero(&wp->refcount)) {
  912. spin_unlock(&kgsl_driver.wp_list_lock);
  913. return wp;
  914. }
  915. }
  916. wp = kzalloc(sizeof(*wp), GFP_ATOMIC);
  917. if (!wp) {
  918. spin_unlock(&kgsl_driver.wp_list_lock);
  919. return ERR_PTR(-ENOMEM);
  920. }
  921. kref_init(&wp->refcount);
  922. wp->uid = uid;
  923. INIT_WORK(&wp->defer_ws, _defer_work_period_put);
  924. list_add(&wp->list, &kgsl_driver.wp_list);
  925. spin_unlock(&kgsl_driver.wp_list_lock);
  926. return wp;
  927. }
  928. static struct kgsl_process_private *kgsl_process_private_new(
  929. struct kgsl_device *device)
  930. {
  931. struct kgsl_process_private *private;
  932. struct pid *cur_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
  933. /* Search in the process list */
  934. list_for_each_entry(private, &kgsl_driver.process_list, list) {
  935. if (private->pid == cur_pid) {
  936. if (!kgsl_process_private_get(private)) {
  937. /*
  938. * This will happen only if refcount is zero
  939. * i.e. destroy is triggered but didn't complete
  940. * yet. Return -EEXIST to indicate caller that
  941. * destroy is pending to allow caller to take
  942. * appropriate action.
  943. */
  944. private = ERR_PTR(-EEXIST);
  945. } else {
  946. mutex_lock(&private->private_mutex);
  947. private->fd_count++;
  948. mutex_unlock(&private->private_mutex);
  949. }
  950. /*
  951. * We need to hold only one reference to the PID for
  952. * each process struct to avoid overflowing the
  953. * reference counter which can lead to use-after-free.
  954. */
  955. put_pid(cur_pid);
  956. return private;
  957. }
  958. }
  959. /* Create a new object */
  960. private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
  961. if (private == NULL) {
  962. put_pid(cur_pid);
  963. return ERR_PTR(-ENOMEM);
  964. }
  965. private->period = kgsl_get_work_period(current_uid().val);
  966. if (IS_ERR(private->period)) {
  967. int err = PTR_ERR(private->period);
  968. kfree(private);
  969. return ERR_PTR(err);
  970. }
  971. kref_init(&private->refcount);
  972. private->fd_count = 1;
  973. private->pid = cur_pid;
  974. get_task_comm(private->comm, current->group_leader);
  975. private->cmdline = kstrdup_quotable_cmdline(current, GFP_KERNEL);
  976. spin_lock_init(&private->mem_lock);
  977. spin_lock_init(&private->syncsource_lock);
  978. spin_lock_init(&private->ctxt_count_lock);
  979. mutex_init(&private->private_mutex);
  980. idr_init(&private->mem_idr);
  981. idr_init(&private->syncsource_idr);
  982. kgsl_reclaim_proc_private_init(private);
  983. /* Allocate a pagetable for the new process object */
  984. private->pagetable = kgsl_mmu_getpagetable(&device->mmu, pid_nr(cur_pid));
  985. if (IS_ERR(private->pagetable)) {
  986. int err = PTR_ERR(private->pagetable);
  987. kgsl_put_work_period(private->period);
  988. idr_destroy(&private->mem_idr);
  989. idr_destroy(&private->syncsource_idr);
  990. put_pid(private->pid);
  991. kfree(private);
  992. private = ERR_PTR(err);
  993. return private;
  994. }
  995. kgsl_process_init_sysfs(device, private);
  996. kgsl_process_init_debugfs(private);
  997. write_lock(&kgsl_driver.proclist_lock);
  998. list_add(&private->list, &kgsl_driver.process_list);
  999. write_unlock(&kgsl_driver.proclist_lock);
  1000. return private;
  1001. }
  1002. static void process_release_memory(struct kgsl_process_private *private)
  1003. {
  1004. struct kgsl_mem_entry *entry;
  1005. int next = 0;
  1006. while (1) {
  1007. spin_lock(&private->mem_lock);
  1008. entry = idr_get_next(&private->mem_idr, &next);
  1009. if (entry == NULL) {
  1010. spin_unlock(&private->mem_lock);
  1011. break;
  1012. }
  1013. /*
  1014. * If the free pending flag is not set it means that user space
  1015. * did not free it's reference to this entry, in that case
  1016. * free a reference to this entry, other references are from
  1017. * within kgsl so they will be freed eventually by kgsl
  1018. */
  1019. if (!entry->pending_free) {
  1020. entry->pending_free = 1;
  1021. spin_unlock(&private->mem_lock);
  1022. kgsl_mem_entry_put(entry);
  1023. } else {
  1024. spin_unlock(&private->mem_lock);
  1025. }
  1026. next = next + 1;
  1027. }
  1028. }
  1029. static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
  1030. struct kgsl_process_private *private)
  1031. {
  1032. mutex_lock(&private->private_mutex);
  1033. if (--private->fd_count > 0) {
  1034. mutex_unlock(&private->private_mutex);
  1035. kgsl_process_private_put(private);
  1036. return;
  1037. }
  1038. /*
  1039. * If this is the last file on the process garbage collect
  1040. * any outstanding resources
  1041. */
  1042. process_release_memory(private);
  1043. /* Release all syncsource objects from process private */
  1044. kgsl_syncsource_process_release_syncsources(private);
  1045. mutex_unlock(&private->private_mutex);
  1046. kgsl_process_private_put(private);
  1047. }
  1048. static struct kgsl_process_private *_process_private_open(
  1049. struct kgsl_device *device)
  1050. {
  1051. struct kgsl_process_private *private;
  1052. mutex_lock(&kgsl_driver.process_mutex);
  1053. private = kgsl_process_private_new(device);
  1054. mutex_unlock(&kgsl_driver.process_mutex);
  1055. return private;
  1056. }
  1057. static struct kgsl_process_private *kgsl_process_private_open(
  1058. struct kgsl_device *device)
  1059. {
  1060. struct kgsl_process_private *private;
  1061. int i;
  1062. private = _process_private_open(device);
  1063. /*
  1064. * If we get error and error is -EEXIST that means previous process
  1065. * private destroy is triggered but didn't complete. Retry creating
  1066. * process private after sometime to allow previous destroy to complete.
  1067. */
  1068. for (i = 0; (PTR_ERR_OR_ZERO(private) == -EEXIST) && (i < 50); i++) {
  1069. usleep_range(10, 100);
  1070. private = _process_private_open(device);
  1071. }
  1072. return private;
  1073. }
  1074. int kgsl_gpu_frame_count(pid_t pid, u64 *frame_count)
  1075. {
  1076. struct kgsl_process_private *p;
  1077. if (!frame_count)
  1078. return -EINVAL;
  1079. p = kgsl_process_private_find(pid);
  1080. if (!p)
  1081. return -ENOENT;
  1082. *frame_count = atomic64_read(&p->frame_count);
  1083. kgsl_process_private_put(p);
  1084. return 0;
  1085. }
  1086. EXPORT_SYMBOL(kgsl_gpu_frame_count);
  1087. int kgsl_add_rcu_notifier(struct notifier_block *nb)
  1088. {
  1089. struct kgsl_device *device = kgsl_get_device(0);
  1090. if (!device)
  1091. return -ENODEV;
  1092. return srcu_notifier_chain_register(&device->nh, nb);
  1093. }
  1094. EXPORT_SYMBOL(kgsl_add_rcu_notifier);
  1095. int kgsl_del_rcu_notifier(struct notifier_block *nb)
  1096. {
  1097. struct kgsl_device *device = kgsl_get_device(0);
  1098. if (!device)
  1099. return -ENODEV;
  1100. return srcu_notifier_chain_unregister(&device->nh, nb);
  1101. }
  1102. EXPORT_SYMBOL(kgsl_del_rcu_notifier);
  1103. static int kgsl_close_device(struct kgsl_device *device)
  1104. {
  1105. int result = 0;
  1106. mutex_lock(&device->mutex);
  1107. if (device->open_count == 1)
  1108. result = device->ftbl->last_close(device);
  1109. /*
  1110. * We must decrement the open_count after last_close() has finished.
  1111. * This is because last_close() relinquishes device mutex while
  1112. * waiting for active count to become 0. This opens up a window
  1113. * where a new process can come in, see that open_count is 0, and
  1114. * initiate a first_open(). This can potentially mess up the power
  1115. * state machine. To avoid a first_open() from happening before
  1116. * last_close() has finished, decrement the open_count after
  1117. * last_close().
  1118. */
  1119. device->open_count--;
  1120. mutex_unlock(&device->mutex);
  1121. return result;
  1122. }
  1123. static void device_release_contexts(struct kgsl_device_private *dev_priv)
  1124. {
  1125. struct kgsl_device *device = dev_priv->device;
  1126. struct kgsl_context *context;
  1127. int next = 0;
  1128. int result = 0;
  1129. while (1) {
  1130. read_lock(&device->context_lock);
  1131. context = idr_get_next(&device->context_idr, &next);
  1132. if (context == NULL) {
  1133. read_unlock(&device->context_lock);
  1134. break;
  1135. } else if (context->dev_priv == dev_priv) {
  1136. /*
  1137. * Hold a reference to the context in case somebody
  1138. * tries to put it while we are detaching
  1139. */
  1140. result = _kgsl_context_get(context);
  1141. }
  1142. read_unlock(&device->context_lock);
  1143. if (result) {
  1144. kgsl_context_detach(context);
  1145. kgsl_context_put(context);
  1146. result = 0;
  1147. }
  1148. next = next + 1;
  1149. }
  1150. }
  1151. static int kgsl_release(struct inode *inodep, struct file *filep)
  1152. {
  1153. struct kgsl_device_private *dev_priv = filep->private_data;
  1154. struct kgsl_device *device = dev_priv->device;
  1155. int result;
  1156. filep->private_data = NULL;
  1157. /* Release the contexts for the file */
  1158. device_release_contexts(dev_priv);
  1159. /* Close down the process wide resources for the file */
  1160. kgsl_process_private_close(dev_priv, dev_priv->process_priv);
  1161. /* Destroy the device-specific structure */
  1162. device->ftbl->device_private_destroy(dev_priv);
  1163. result = kgsl_close_device(device);
  1164. pm_runtime_put(&device->pdev->dev);
  1165. return result;
  1166. }
  1167. static int kgsl_open_device(struct kgsl_device *device)
  1168. {
  1169. int result = 0;
  1170. mutex_lock(&device->mutex);
  1171. if (device->open_count == 0) {
  1172. result = device->ftbl->first_open(device);
  1173. if (result)
  1174. goto out;
  1175. }
  1176. device->open_count++;
  1177. out:
  1178. mutex_unlock(&device->mutex);
  1179. return result;
  1180. }
  1181. static int kgsl_open(struct inode *inodep, struct file *filep)
  1182. {
  1183. int result;
  1184. struct kgsl_device_private *dev_priv;
  1185. struct kgsl_device *device;
  1186. unsigned int minor = iminor(inodep);
  1187. device = kgsl_get_minor(minor);
  1188. if (device == NULL) {
  1189. pr_err("kgsl: No device found\n");
  1190. return -ENODEV;
  1191. }
  1192. result = pm_runtime_get_sync(&device->pdev->dev);
  1193. if (result < 0) {
  1194. dev_err(device->dev,
  1195. "Runtime PM: Unable to wake up the device, rc = %d\n",
  1196. result);
  1197. return result;
  1198. }
  1199. result = 0;
  1200. dev_priv = device->ftbl->device_private_create();
  1201. if (dev_priv == NULL) {
  1202. result = -ENOMEM;
  1203. goto err;
  1204. }
  1205. dev_priv->device = device;
  1206. filep->private_data = dev_priv;
  1207. result = kgsl_open_device(device);
  1208. if (result)
  1209. goto err;
  1210. /*
  1211. * Get file (per process) private struct. This must be done
  1212. * after the first start so that the global pagetable mappings
  1213. * are set up before we create the per-process pagetable.
  1214. */
  1215. dev_priv->process_priv = kgsl_process_private_open(device);
  1216. if (IS_ERR(dev_priv->process_priv)) {
  1217. result = PTR_ERR(dev_priv->process_priv);
  1218. kgsl_close_device(device);
  1219. goto err;
  1220. }
  1221. err:
  1222. if (result) {
  1223. filep->private_data = NULL;
  1224. kfree(dev_priv);
  1225. pm_runtime_put(&device->pdev->dev);
  1226. }
  1227. return result;
  1228. }
  1229. #define GPUADDR_IN_MEMDESC(_val, _memdesc) \
  1230. (((_val) >= (_memdesc)->gpuaddr) && \
  1231. ((_val) < ((_memdesc)->gpuaddr + (_memdesc)->size)))
  1232. /**
  1233. * kgsl_sharedmem_find() - Find a gpu memory allocation
  1234. *
  1235. * @private: private data for the process to check.
  1236. * @gpuaddr: start address of the region
  1237. *
  1238. * Find a gpu allocation. Caller must kgsl_mem_entry_put()
  1239. * the returned entry when finished using it.
  1240. */
  1241. struct kgsl_mem_entry * __must_check
  1242. kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
  1243. {
  1244. int id;
  1245. struct kgsl_mem_entry *entry, *ret = NULL;
  1246. if (!private)
  1247. return NULL;
  1248. if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr, 0) &&
  1249. !kgsl_mmu_gpuaddr_in_range(
  1250. private->pagetable->mmu->securepagetable, gpuaddr, 0))
  1251. return NULL;
  1252. spin_lock(&private->mem_lock);
  1253. idr_for_each_entry(&private->mem_idr, entry, id) {
  1254. if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
  1255. if (!entry->pending_free)
  1256. ret = kgsl_mem_entry_get(entry);
  1257. break;
  1258. }
  1259. }
  1260. spin_unlock(&private->mem_lock);
  1261. return ret;
  1262. }
  1263. static struct kgsl_mem_entry * __must_check
  1264. kgsl_sharedmem_find_id_flags(struct kgsl_process_private *process,
  1265. unsigned int id, uint64_t flags)
  1266. {
  1267. struct kgsl_mem_entry *entry, *ret = NULL;
  1268. spin_lock(&process->mem_lock);
  1269. entry = idr_find(&process->mem_idr, id);
  1270. if (entry)
  1271. if (!entry->pending_free &&
  1272. (flags & entry->memdesc.flags) == flags)
  1273. ret = kgsl_mem_entry_get(entry);
  1274. spin_unlock(&process->mem_lock);
  1275. return ret;
  1276. }
  1277. /**
  1278. * kgsl_sharedmem_find_id() - find a memory entry by id
  1279. * @process: the owning process
  1280. * @id: id to find
  1281. *
  1282. * @returns - the mem_entry or NULL
  1283. *
  1284. * Caller must kgsl_mem_entry_put() the returned entry, when finished using
  1285. * it.
  1286. */
  1287. struct kgsl_mem_entry * __must_check
  1288. kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
  1289. {
  1290. return kgsl_sharedmem_find_id_flags(process, id, 0);
  1291. }
  1292. /**
  1293. * kgsl_mem_entry_unset_pend() - Unset the pending free flag of an entry
  1294. * @entry - The memory entry
  1295. */
  1296. static inline void kgsl_mem_entry_unset_pend(struct kgsl_mem_entry *entry)
  1297. {
  1298. if (entry == NULL)
  1299. return;
  1300. spin_lock(&entry->priv->mem_lock);
  1301. entry->pending_free = 0;
  1302. spin_unlock(&entry->priv->mem_lock);
  1303. }
  1304. /**
  1305. * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
  1306. * @entry - The memory entry
  1307. *
  1308. * @returns - true if pending flag was 0 else false
  1309. *
  1310. * This function will set the pending free flag if it is previously unset. Used
  1311. * to prevent race condition between ioctls calling free/freememontimestamp
  1312. * on the same entry. Whichever thread set's the flag first will do the free.
  1313. */
  1314. static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry)
  1315. {
  1316. bool ret = false;
  1317. if (entry == NULL)
  1318. return false;
  1319. spin_lock(&entry->priv->mem_lock);
  1320. if (!entry->pending_free) {
  1321. entry->pending_free = 1;
  1322. ret = true;
  1323. }
  1324. spin_unlock(&entry->priv->mem_lock);
  1325. return ret;
  1326. }
  1327. static int kgsl_get_ctxt_fault_stats(struct kgsl_context *context,
  1328. struct kgsl_context_property *ctxt_property)
  1329. {
  1330. struct kgsl_context_property_fault fault_stats;
  1331. size_t copy;
  1332. /* Return the size of the subtype struct */
  1333. if (ctxt_property->size == 0) {
  1334. ctxt_property->size = sizeof(fault_stats);
  1335. return 0;
  1336. }
  1337. memset(&fault_stats, 0, sizeof(fault_stats));
  1338. copy = min_t(size_t, ctxt_property->size, sizeof(fault_stats));
  1339. fault_stats.faults = context->total_fault_count;
  1340. fault_stats.timestamp = context->last_faulted_cmd_ts;
  1341. /*
  1342. * Copy the context fault stats to data which also serves as
  1343. * the out parameter.
  1344. */
  1345. if (copy_to_user(u64_to_user_ptr(ctxt_property->data),
  1346. &fault_stats, copy))
  1347. return -EFAULT;
  1348. return 0;
  1349. }
  1350. static long kgsl_get_ctxt_properties(struct kgsl_device_private *dev_priv,
  1351. struct kgsl_device_getproperty *param)
  1352. {
  1353. /* Return fault stats of given context */
  1354. struct kgsl_context_property ctxt_property;
  1355. struct kgsl_context *context;
  1356. size_t copy;
  1357. long ret;
  1358. /*
  1359. * If sizebytes is zero, tell the user how big the
  1360. * ctxt_property struct should be.
  1361. */
  1362. if (param->sizebytes == 0) {
  1363. param->sizebytes = sizeof(ctxt_property);
  1364. return 0;
  1365. }
  1366. memset(&ctxt_property, 0, sizeof(ctxt_property));
  1367. copy = min_t(size_t, param->sizebytes, sizeof(ctxt_property));
  1368. /* We expect the value passed in to contain the context id */
  1369. if (copy_from_user(&ctxt_property, param->value, copy))
  1370. return -EFAULT;
  1371. /* ctxt type zero is not valid, as we consider it as uninitialized. */
  1372. if (ctxt_property.type == 0)
  1373. return -EINVAL;
  1374. context = kgsl_context_get_owner(dev_priv,
  1375. ctxt_property.contextid);
  1376. if (!context)
  1377. return -EINVAL;
  1378. if (ctxt_property.type == KGSL_CONTEXT_PROP_FAULTS)
  1379. ret = kgsl_get_ctxt_fault_stats(context, &ctxt_property);
  1380. else
  1381. ret = -EOPNOTSUPP;
  1382. kgsl_context_put(context);
  1383. return ret;
  1384. }
  1385. static long kgsl_prop_version(struct kgsl_device_private *dev_priv,
  1386. struct kgsl_device_getproperty *param)
  1387. {
  1388. struct kgsl_version version = {
  1389. .drv_major = KGSL_VERSION_MAJOR,
  1390. .drv_minor = KGSL_VERSION_MINOR,
  1391. .dev_major = 3,
  1392. .dev_minor = 1,
  1393. };
  1394. if (param->sizebytes != sizeof(version))
  1395. return -EINVAL;
  1396. if (copy_to_user(param->value, &version, sizeof(version)))
  1397. return -EFAULT;
  1398. return 0;
  1399. }
  1400. /* Return reset status of given context and clear it */
  1401. static long kgsl_prop_gpu_reset_stat(struct kgsl_device_private *dev_priv,
  1402. struct kgsl_device_getproperty *param)
  1403. {
  1404. u32 id;
  1405. struct kgsl_context *context;
  1406. if (param->sizebytes != sizeof(id))
  1407. return -EINVAL;
  1408. /* We expect the value passed in to contain the context id */
  1409. if (copy_from_user(&id, param->value, sizeof(id)))
  1410. return -EFAULT;
  1411. context = kgsl_context_get_owner(dev_priv, id);
  1412. if (!context)
  1413. return -EINVAL;
  1414. /*
  1415. * Copy the reset status to value which also serves as
  1416. * the out parameter
  1417. */
  1418. id = context->reset_status;
  1419. context->reset_status = KGSL_CTX_STAT_NO_ERROR;
  1420. kgsl_context_put(context);
  1421. if (copy_to_user(param->value, &id, sizeof(id)))
  1422. return -EFAULT;
  1423. return 0;
  1424. }
  1425. static long kgsl_prop_secure_buf_alignment(struct kgsl_device_private *dev_priv,
  1426. struct kgsl_device_getproperty *param)
  1427. {
  1428. u32 align = PAGE_SIZE;
  1429. if (param->sizebytes != sizeof(align))
  1430. return -EINVAL;
  1431. if (copy_to_user(param->value, &align, sizeof(align)))
  1432. return -EFAULT;
  1433. return 0;
  1434. }
  1435. static long kgsl_prop_secure_ctxt_support(struct kgsl_device_private *dev_priv,
  1436. struct kgsl_device_getproperty *param)
  1437. {
  1438. u32 secure;
  1439. if (param->sizebytes != sizeof(secure))
  1440. return -EINVAL;
  1441. secure = dev_priv->device->mmu.secured ? 1 : 0;
  1442. if (copy_to_user(param->value, &secure, sizeof(secure)))
  1443. return -EFAULT;
  1444. return 0;
  1445. }
  1446. static int kgsl_query_caps_properties(struct kgsl_device *device,
  1447. struct kgsl_capabilities *caps)
  1448. {
  1449. struct kgsl_capabilities_properties props;
  1450. size_t copy;
  1451. u32 count, *local;
  1452. int ret;
  1453. /* Return the size of the subtype struct */
  1454. if (caps->size == 0) {
  1455. caps->size = sizeof(props);
  1456. return 0;
  1457. }
  1458. memset(&props, 0, sizeof(props));
  1459. copy = min_t(size_t, caps->size, sizeof(props));
  1460. if (copy_from_user(&props, u64_to_user_ptr(caps->data), copy))
  1461. return -EFAULT;
  1462. /* Get the number of properties */
  1463. count = kgsl_query_property_list(device, NULL, 0);
  1464. /*
  1465. * If the incoming user count is zero, they are querying the number of
  1466. * available properties. Set it and return.
  1467. */
  1468. if (props.count == 0) {
  1469. props.count = count;
  1470. goto done;
  1471. }
  1472. /* Copy the lesser of the user or kernel property count */
  1473. if (props.count < count)
  1474. count = props.count;
  1475. /* Create a local buffer to store the property list */
  1476. local = kcalloc(count, sizeof(u32), GFP_KERNEL);
  1477. if (!local)
  1478. return -ENOMEM;
  1479. /* Get the properties */
  1480. props.count = kgsl_query_property_list(device, local, count);
  1481. ret = copy_to_user(u64_to_user_ptr(props.list), local,
  1482. props.count * sizeof(u32));
  1483. kfree(local);
  1484. if (ret)
  1485. return -EFAULT;
  1486. done:
  1487. if (copy_to_user(u64_to_user_ptr(caps->data), &props, copy))
  1488. return -EFAULT;
  1489. return 0;
  1490. }
  1491. static long kgsl_prop_query_capabilities(struct kgsl_device_private *dev_priv,
  1492. struct kgsl_device_getproperty *param)
  1493. {
  1494. struct kgsl_capabilities caps;
  1495. long ret;
  1496. size_t copy;
  1497. /*
  1498. * If sizebytes is zero, tell the user how big the capabilities struct
  1499. * should be
  1500. */
  1501. if (param->sizebytes == 0) {
  1502. param->sizebytes = sizeof(caps);
  1503. return 0;
  1504. }
  1505. memset(&caps, 0, sizeof(caps));
  1506. copy = min_t(size_t, param->sizebytes, sizeof(caps));
  1507. if (copy_from_user(&caps, param->value, copy))
  1508. return -EFAULT;
  1509. /* querytype must be non zero */
  1510. if (caps.querytype == 0)
  1511. return -EINVAL;
  1512. if (caps.querytype == KGSL_QUERY_CAPS_PROPERTIES)
  1513. ret = kgsl_query_caps_properties(dev_priv->device, &caps);
  1514. else {
  1515. /* Unsupported querytypes should return a unique return value */
  1516. return -EOPNOTSUPP;
  1517. }
  1518. if (copy_to_user(param->value, &caps, copy))
  1519. return -EFAULT;
  1520. return ret;
  1521. }
  1522. static long kgsl_get_gpu_va64_size(struct kgsl_device_private *dev_priv,
  1523. struct kgsl_device_getproperty *param)
  1524. {
  1525. u64 va_size = KGSL_IOMMU_VA_END64 - KGSL_IOMMU_VA_BASE64;
  1526. if (param->sizebytes != sizeof(va_size))
  1527. return -EINVAL;
  1528. if (copy_to_user(param->value, &va_size, sizeof(va_size)))
  1529. return -EFAULT;
  1530. return 0;
  1531. }
  1532. static long kgsl_get_gpu_secure_va_size(struct kgsl_device_private *dev_priv,
  1533. struct kgsl_device_getproperty *param)
  1534. {
  1535. u64 size = KGSL_IOMMU_SECURE_SIZE(&dev_priv->device->mmu);
  1536. if (param->sizebytes != sizeof(size))
  1537. return -EINVAL;
  1538. if (copy_to_user(param->value, &size, sizeof(size)))
  1539. return -EFAULT;
  1540. return 0;
  1541. }
  1542. static long kgsl_get_gpu_secure_va_inuse(struct kgsl_device_private *dev_priv,
  1543. struct kgsl_device_getproperty *param)
  1544. {
  1545. u64 val;
  1546. if (param->sizebytes != sizeof(val))
  1547. return -EINVAL;
  1548. val = atomic_long_read(&kgsl_driver.stats.secure);
  1549. if (copy_to_user(param->value, &val, sizeof(val)))
  1550. return -EFAULT;
  1551. return 0;
  1552. }
  1553. static const struct {
  1554. int type;
  1555. long (*func)(struct kgsl_device_private *dev_priv,
  1556. struct kgsl_device_getproperty *param);
  1557. } kgsl_property_funcs[] = {
  1558. { KGSL_PROP_VERSION, kgsl_prop_version },
  1559. { KGSL_PROP_GPU_RESET_STAT, kgsl_prop_gpu_reset_stat},
  1560. { KGSL_PROP_SECURE_BUFFER_ALIGNMENT, kgsl_prop_secure_buf_alignment },
  1561. { KGSL_PROP_SECURE_CTXT_SUPPORT, kgsl_prop_secure_ctxt_support },
  1562. { KGSL_PROP_QUERY_CAPABILITIES, kgsl_prop_query_capabilities },
  1563. { KGSL_PROP_CONTEXT_PROPERTY, kgsl_get_ctxt_properties },
  1564. { KGSL_PROP_GPU_VA64_SIZE, kgsl_get_gpu_va64_size },
  1565. { KGSL_PROP_GPU_SECURE_VA_SIZE, kgsl_get_gpu_secure_va_size },
  1566. { KGSL_PROP_GPU_SECURE_VA_INUSE, kgsl_get_gpu_secure_va_inuse },
  1567. };
  1568. /*call all ioctl sub functions with driver locked*/
  1569. long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
  1570. unsigned int cmd, void *data)
  1571. {
  1572. struct kgsl_device *device = dev_priv->device;
  1573. struct kgsl_device_getproperty *param = data;
  1574. int i;
  1575. for (i = 0; i < ARRAY_SIZE(kgsl_property_funcs); i++) {
  1576. if (param->type == kgsl_property_funcs[i].type)
  1577. return kgsl_property_funcs[i].func(dev_priv, param);
  1578. }
  1579. if (is_compat_task())
  1580. return device->ftbl->getproperty_compat(device, param);
  1581. return device->ftbl->getproperty(device, param);
  1582. }
  1583. int kgsl_query_property_list(struct kgsl_device *device, u32 *list, u32 count)
  1584. {
  1585. int num = 0;
  1586. if (!list) {
  1587. num = ARRAY_SIZE(kgsl_property_funcs);
  1588. if (device->ftbl->query_property_list)
  1589. num += device->ftbl->query_property_list(device, list,
  1590. count);
  1591. return num;
  1592. }
  1593. for (; num < count && num < ARRAY_SIZE(kgsl_property_funcs); num++)
  1594. list[num] = kgsl_property_funcs[num].type;
  1595. if (device->ftbl->query_property_list)
  1596. num += device->ftbl->query_property_list(device, &list[num],
  1597. count - num);
  1598. return num;
  1599. }
  1600. long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
  1601. unsigned int cmd, void *data)
  1602. {
  1603. int result = 0;
  1604. /* The getproperty struct is reused for setproperty too */
  1605. struct kgsl_device_getproperty *param = data;
  1606. /* Reroute to compat version if coming from compat_ioctl */
  1607. if (is_compat_task())
  1608. result = dev_priv->device->ftbl->setproperty_compat(
  1609. dev_priv, param->type, param->value,
  1610. param->sizebytes);
  1611. else if (dev_priv->device->ftbl->setproperty)
  1612. result = dev_priv->device->ftbl->setproperty(
  1613. dev_priv, param->type, param->value,
  1614. param->sizebytes);
  1615. return result;
  1616. }
  1617. long kgsl_ioctl_device_waittimestamp_ctxtid(
  1618. struct kgsl_device_private *dev_priv, unsigned int cmd,
  1619. void *data)
  1620. {
  1621. struct kgsl_device_waittimestamp_ctxtid *param = data;
  1622. struct kgsl_device *device = dev_priv->device;
  1623. long result = -EINVAL;
  1624. unsigned int temp_cur_ts = 0;
  1625. struct kgsl_context *context;
  1626. context = kgsl_context_get_owner(dev_priv, param->context_id);
  1627. if (context == NULL)
  1628. return result;
  1629. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
  1630. &temp_cur_ts);
  1631. trace_kgsl_waittimestamp_entry(device, context->id, temp_cur_ts,
  1632. param->timestamp, param->timeout);
  1633. result = device->ftbl->waittimestamp(device, context, param->timestamp,
  1634. param->timeout);
  1635. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED,
  1636. &temp_cur_ts);
  1637. trace_kgsl_waittimestamp_exit(device, temp_cur_ts, result);
  1638. kgsl_context_put(context);
  1639. return result;
  1640. }
  1641. long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
  1642. unsigned int cmd, void *data)
  1643. {
  1644. struct kgsl_ringbuffer_issueibcmds *param = data;
  1645. struct kgsl_device *device = dev_priv->device;
  1646. struct kgsl_context *context;
  1647. struct kgsl_drawobj *drawobj;
  1648. struct kgsl_drawobj_cmd *cmdobj;
  1649. long result = -EINVAL;
  1650. /* The legacy functions don't support synchronization commands */
  1651. if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
  1652. return -EINVAL;
  1653. /* Sanity check the number of IBs */
  1654. if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
  1655. (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
  1656. return -EINVAL;
  1657. /* Get the context */
  1658. context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
  1659. if (context == NULL)
  1660. return -EINVAL;
  1661. cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
  1662. CMDOBJ_TYPE);
  1663. if (IS_ERR(cmdobj)) {
  1664. kgsl_context_put(context);
  1665. return PTR_ERR(cmdobj);
  1666. }
  1667. drawobj = DRAWOBJ(cmdobj);
  1668. if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
  1669. result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
  1670. (void __user *) param->ibdesc_addr,
  1671. param->numibs);
  1672. else {
  1673. struct kgsl_ibdesc ibdesc;
  1674. /* Ultra legacy path */
  1675. ibdesc.gpuaddr = param->ibdesc_addr;
  1676. ibdesc.sizedwords = param->numibs;
  1677. ibdesc.ctrl = 0;
  1678. result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
  1679. }
  1680. if (result == 0)
  1681. result = kgsl_reclaim_to_pinned_state(dev_priv->process_priv);
  1682. if (result == 0)
  1683. result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
  1684. &drawobj, 1, &param->timestamp);
  1685. /*
  1686. * -EPROTO is a "success" error - it just tells the user that the
  1687. * context had previously faulted
  1688. */
  1689. if (result && result != -EPROTO)
  1690. kgsl_drawobj_destroy(drawobj);
  1691. kgsl_context_put(context);
  1692. return result;
  1693. }
  1694. /* Returns 0 on failure. Returns command type(s) on success */
  1695. static unsigned int _process_command_input(struct kgsl_device *device,
  1696. unsigned int flags, unsigned int numcmds,
  1697. unsigned int numobjs, unsigned int numsyncs)
  1698. {
  1699. if (numcmds > KGSL_MAX_NUMIBS ||
  1700. numobjs > KGSL_MAX_NUMIBS ||
  1701. numsyncs > KGSL_MAX_SYNCPOINTS)
  1702. return 0;
  1703. /*
  1704. * The SYNC bit is supposed to identify a dummy sync object
  1705. * so warn the user if they specified any IBs with it.
  1706. * A MARKER command can either have IBs or not but if the
  1707. * command has 0 IBs it is automatically assumed to be a marker.
  1708. */
  1709. /* If they specify the flag, go with what they say */
  1710. if (flags & KGSL_DRAWOBJ_MARKER)
  1711. return MARKEROBJ_TYPE;
  1712. else if (flags & KGSL_DRAWOBJ_SYNC)
  1713. return SYNCOBJ_TYPE;
  1714. /* If not, deduce what they meant */
  1715. if (numsyncs && numcmds)
  1716. return SYNCOBJ_TYPE | CMDOBJ_TYPE;
  1717. else if (numsyncs)
  1718. return SYNCOBJ_TYPE;
  1719. else if (numcmds)
  1720. return CMDOBJ_TYPE;
  1721. else if (numcmds == 0)
  1722. return MARKEROBJ_TYPE;
  1723. return 0;
  1724. }
  1725. long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
  1726. unsigned int cmd, void *data)
  1727. {
  1728. struct kgsl_submit_commands *param = data;
  1729. struct kgsl_device *device = dev_priv->device;
  1730. struct kgsl_context *context;
  1731. struct kgsl_drawobj *drawobj[2];
  1732. unsigned int type;
  1733. long result;
  1734. unsigned int i = 0;
  1735. type = _process_command_input(device, param->flags, param->numcmds, 0,
  1736. param->numsyncs);
  1737. if (!type)
  1738. return -EINVAL;
  1739. context = kgsl_context_get_owner(dev_priv, param->context_id);
  1740. if (context == NULL)
  1741. return -EINVAL;
  1742. if (type & SYNCOBJ_TYPE) {
  1743. struct kgsl_drawobj_sync *syncobj =
  1744. kgsl_drawobj_sync_create(device, context);
  1745. if (IS_ERR(syncobj)) {
  1746. result = PTR_ERR(syncobj);
  1747. goto done;
  1748. }
  1749. drawobj[i++] = DRAWOBJ(syncobj);
  1750. result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
  1751. param->synclist, param->numsyncs);
  1752. if (result)
  1753. goto done;
  1754. if (!(syncobj->flags & KGSL_SYNCOBJ_SW))
  1755. syncobj->flags |= KGSL_SYNCOBJ_HW;
  1756. }
  1757. if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
  1758. struct kgsl_drawobj_cmd *cmdobj =
  1759. kgsl_drawobj_cmd_create(device,
  1760. context, param->flags, type);
  1761. if (IS_ERR(cmdobj)) {
  1762. result = PTR_ERR(cmdobj);
  1763. goto done;
  1764. }
  1765. drawobj[i++] = DRAWOBJ(cmdobj);
  1766. result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
  1767. param->cmdlist, param->numcmds);
  1768. if (result)
  1769. goto done;
  1770. /* If no profiling buffer was specified, clear the flag */
  1771. if (cmdobj->profiling_buf_entry == NULL)
  1772. DRAWOBJ(cmdobj)->flags &=
  1773. ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
  1774. if (type & CMDOBJ_TYPE) {
  1775. result = kgsl_reclaim_to_pinned_state(
  1776. dev_priv->process_priv);
  1777. if (result)
  1778. goto done;
  1779. }
  1780. }
  1781. result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
  1782. i, &param->timestamp);
  1783. done:
  1784. /*
  1785. * -EPROTO is a "success" error - it just tells the user that the
  1786. * context had previously faulted
  1787. */
  1788. if (result && result != -EPROTO)
  1789. while (i--)
  1790. kgsl_drawobj_destroy(drawobj[i]);
  1791. kgsl_context_put(context);
  1792. return result;
  1793. }
  1794. long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
  1795. unsigned int cmd, void *data)
  1796. {
  1797. struct kgsl_gpu_command *param = data;
  1798. struct kgsl_device *device = dev_priv->device;
  1799. struct kgsl_context *context;
  1800. struct kgsl_drawobj *drawobj[2];
  1801. unsigned int type;
  1802. long result;
  1803. unsigned int i = 0;
  1804. type = _process_command_input(device, param->flags, param->numcmds,
  1805. param->numobjs, param->numsyncs);
  1806. if (!type)
  1807. return -EINVAL;
  1808. context = kgsl_context_get_owner(dev_priv, param->context_id);
  1809. if (context == NULL)
  1810. return -EINVAL;
  1811. if (type & SYNCOBJ_TYPE) {
  1812. struct kgsl_drawobj_sync *syncobj =
  1813. kgsl_drawobj_sync_create(device, context);
  1814. if (IS_ERR(syncobj)) {
  1815. result = PTR_ERR(syncobj);
  1816. goto done;
  1817. }
  1818. drawobj[i++] = DRAWOBJ(syncobj);
  1819. result = kgsl_drawobj_sync_add_synclist(device, syncobj,
  1820. u64_to_user_ptr(param->synclist),
  1821. param->syncsize, param->numsyncs);
  1822. if (result)
  1823. goto done;
  1824. if (!(syncobj->flags & KGSL_SYNCOBJ_SW))
  1825. syncobj->flags |= KGSL_SYNCOBJ_HW;
  1826. }
  1827. if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
  1828. struct kgsl_drawobj_cmd *cmdobj =
  1829. kgsl_drawobj_cmd_create(device,
  1830. context, param->flags, type);
  1831. if (IS_ERR(cmdobj)) {
  1832. result = PTR_ERR(cmdobj);
  1833. goto done;
  1834. }
  1835. drawobj[i++] = DRAWOBJ(cmdobj);
  1836. result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
  1837. u64_to_user_ptr(param->cmdlist),
  1838. param->cmdsize, param->numcmds);
  1839. if (result)
  1840. goto done;
  1841. result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
  1842. u64_to_user_ptr(param->objlist),
  1843. param->objsize, param->numobjs);
  1844. if (result)
  1845. goto done;
  1846. /* If no profiling buffer was specified, clear the flag */
  1847. if (cmdobj->profiling_buf_entry == NULL)
  1848. DRAWOBJ(cmdobj)->flags &=
  1849. ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
  1850. if (type & CMDOBJ_TYPE) {
  1851. result = kgsl_reclaim_to_pinned_state(
  1852. dev_priv->process_priv);
  1853. if (result)
  1854. goto done;
  1855. }
  1856. }
  1857. result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
  1858. i, &param->timestamp);
  1859. done:
  1860. /*
  1861. * -EPROTO is a "success" error - it just tells the user that the
  1862. * context had previously faulted
  1863. */
  1864. if (result && result != -EPROTO)
  1865. while (i--)
  1866. kgsl_drawobj_destroy(drawobj[i]);
  1867. kgsl_context_put(context);
  1868. return result;
  1869. }
  1870. long kgsl_ioctl_gpu_aux_command(struct kgsl_device_private *dev_priv,
  1871. unsigned int cmd, void *data)
  1872. {
  1873. struct kgsl_gpu_aux_command *param = data;
  1874. struct kgsl_device *device = dev_priv->device;
  1875. struct kgsl_context *context;
  1876. struct kgsl_drawobj **drawobjs;
  1877. void __user *cmdlist;
  1878. u32 count;
  1879. int i, index = 0;
  1880. long ret;
  1881. struct kgsl_gpu_aux_command_generic generic;
  1882. /* We support only one aux command */
  1883. if (param->numcmds != 1)
  1884. return -EINVAL;
  1885. if (!(param->flags &
  1886. (KGSL_GPU_AUX_COMMAND_BIND | KGSL_GPU_AUX_COMMAND_TIMELINE)))
  1887. return -EINVAL;
  1888. if ((param->flags & KGSL_GPU_AUX_COMMAND_SYNC) &&
  1889. (param->numsyncs > KGSL_MAX_SYNCPOINTS))
  1890. return -EINVAL;
  1891. context = kgsl_context_get_owner(dev_priv, param->context_id);
  1892. if (!context)
  1893. return -EINVAL;
  1894. /*
  1895. * param->numcmds is always one and we have one additional drawobj
  1896. * for the timestamp sync if KGSL_GPU_AUX_COMMAND_SYNC flag is passed.
  1897. * On top of that we make an implicit sync object for the last queued
  1898. * timestamp on this context.
  1899. */
  1900. count = (param->flags & KGSL_GPU_AUX_COMMAND_SYNC) ? 3 : 2;
  1901. drawobjs = kvcalloc(count, sizeof(*drawobjs),
  1902. GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
  1903. if (!drawobjs) {
  1904. kgsl_context_put(context);
  1905. return -ENOMEM;
  1906. }
  1907. trace_kgsl_aux_command(context->id, param->numcmds, param->flags,
  1908. param->timestamp);
  1909. if (param->flags & KGSL_GPU_AUX_COMMAND_SYNC) {
  1910. struct kgsl_drawobj_sync *syncobj =
  1911. kgsl_drawobj_sync_create(device, context);
  1912. if (IS_ERR(syncobj)) {
  1913. ret = PTR_ERR(syncobj);
  1914. goto err;
  1915. }
  1916. drawobjs[index++] = DRAWOBJ(syncobj);
  1917. ret = kgsl_drawobj_sync_add_synclist(device, syncobj,
  1918. u64_to_user_ptr(param->synclist),
  1919. param->syncsize, param->numsyncs);
  1920. if (ret)
  1921. goto err;
  1922. }
  1923. cmdlist = u64_to_user_ptr(param->cmdlist);
  1924. /*
  1925. * Create a draw object for KGSL_GPU_AUX_COMMAND_BIND or
  1926. * KGSL_GPU_AUX_COMMAND_TIMELINE.
  1927. */
  1928. if (copy_struct_from_user(&generic, sizeof(generic),
  1929. cmdlist, param->cmdsize)) {
  1930. ret = -EFAULT;
  1931. goto err;
  1932. }
  1933. if (generic.type == KGSL_GPU_AUX_COMMAND_BIND) {
  1934. struct kgsl_drawobj_sync *tsobj;
  1935. struct kgsl_drawobj_bind *bindobj;
  1936. u32 queued;
  1937. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
  1938. &queued);
  1939. /*
  1940. * Make an implicit sync object for the last queued timestamp
  1941. * on this context
  1942. */
  1943. tsobj = kgsl_drawobj_create_timestamp_syncobj(device,
  1944. context, queued);
  1945. if (IS_ERR(tsobj)) {
  1946. ret = PTR_ERR(tsobj);
  1947. goto err;
  1948. }
  1949. drawobjs[index++] = DRAWOBJ(tsobj);
  1950. bindobj = kgsl_drawobj_bind_create(device, context);
  1951. if (IS_ERR(bindobj)) {
  1952. ret = PTR_ERR(bindobj);
  1953. goto err;
  1954. }
  1955. drawobjs[index++] = DRAWOBJ(bindobj);
  1956. ret = kgsl_drawobj_add_bind(dev_priv, bindobj,
  1957. cmdlist, param->cmdsize);
  1958. if (ret)
  1959. goto err;
  1960. } else if (generic.type == KGSL_GPU_AUX_COMMAND_TIMELINE) {
  1961. struct kgsl_drawobj_timeline *timelineobj;
  1962. struct kgsl_drawobj_cmd *markerobj;
  1963. timelineobj = kgsl_drawobj_timeline_create(device,
  1964. context);
  1965. if (IS_ERR(timelineobj)) {
  1966. ret = PTR_ERR(timelineobj);
  1967. goto err;
  1968. }
  1969. drawobjs[index++] = DRAWOBJ(timelineobj);
  1970. ret = kgsl_drawobj_add_timeline(dev_priv, timelineobj,
  1971. cmdlist, param->cmdsize);
  1972. if (ret)
  1973. goto err;
  1974. /*
  1975. * Userspace needs a timestamp to associate with this
  1976. * submisssion. Use a marker to keep the timestamp
  1977. * bookkeeping correct.
  1978. */
  1979. markerobj = kgsl_drawobj_cmd_create(device, context,
  1980. KGSL_DRAWOBJ_MARKER, MARKEROBJ_TYPE);
  1981. if (IS_ERR(markerobj)) {
  1982. ret = PTR_ERR(markerobj);
  1983. goto err;
  1984. }
  1985. drawobjs[index++] = DRAWOBJ(markerobj);
  1986. } else {
  1987. ret = -EINVAL;
  1988. goto err;
  1989. }
  1990. ret = device->ftbl->queue_cmds(dev_priv, context,
  1991. drawobjs, index, &param->timestamp);
  1992. err:
  1993. kgsl_context_put(context);
  1994. if (ret && ret != -EPROTO) {
  1995. for (i = 0; i < count; i++)
  1996. kgsl_drawobj_destroy(drawobjs[i]);
  1997. }
  1998. kvfree(drawobjs);
  1999. return ret;
  2000. }
  2001. /* Returns 0 on failure. Returns command type(s) on success */
  2002. static unsigned int _process_recurring_input(struct kgsl_device *device,
  2003. unsigned int flags, unsigned int numcmds,
  2004. unsigned int numobjs)
  2005. {
  2006. if (numcmds > KGSL_MAX_NUMIBS ||
  2007. numobjs > KGSL_MAX_NUMIBS)
  2008. return 0;
  2009. /* SYNC and MARKER object is not allowed through recurring command */
  2010. if ((flags & KGSL_DRAWOBJ_MARKER) || (flags & KGSL_DRAWOBJ_SYNC))
  2011. return 0;
  2012. if (numcmds)
  2013. return CMDOBJ_TYPE;
  2014. return 0;
  2015. }
  2016. long kgsl_ioctl_recurring_command(struct kgsl_device_private *dev_priv,
  2017. unsigned int cmd, void *data)
  2018. {
  2019. struct kgsl_recurring_command *param = data;
  2020. struct kgsl_device *device = dev_priv->device;
  2021. struct kgsl_context *context = NULL;
  2022. struct kgsl_drawobj *drawobj = NULL;
  2023. struct kgsl_drawobj_cmd *cmdobj = NULL;
  2024. unsigned int type;
  2025. long result;
  2026. if (!(param->flags & (unsigned long)(KGSL_DRAWOBJ_START_RECURRING |
  2027. KGSL_DRAWOBJ_STOP_RECURRING)))
  2028. return -EINVAL;
  2029. context = kgsl_context_get_owner(dev_priv, param->context_id);
  2030. if (context == NULL)
  2031. return -EINVAL;
  2032. type = _process_recurring_input(device, param->flags, param->numcmds,
  2033. param->numobjs);
  2034. if (!type) {
  2035. kgsl_context_put(context);
  2036. return -EINVAL;
  2037. }
  2038. cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags, type);
  2039. if (IS_ERR(cmdobj)) {
  2040. result = PTR_ERR(cmdobj);
  2041. goto done;
  2042. }
  2043. drawobj = DRAWOBJ(cmdobj);
  2044. /* Clear the profiling flag for recurring command */
  2045. drawobj->flags &= ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
  2046. result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
  2047. u64_to_user_ptr(param->cmdlist),
  2048. param->cmdsize, param->numcmds);
  2049. if (result)
  2050. goto done;
  2051. result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
  2052. u64_to_user_ptr(param->objlist),
  2053. param->objsize, param->numobjs);
  2054. if (result)
  2055. goto done;
  2056. if (drawobj->flags & KGSL_DRAWOBJ_STOP_RECURRING) {
  2057. result = device->ftbl->dequeue_recurring_cmd(device, context);
  2058. if (!result)
  2059. kgsl_drawobj_destroy(drawobj);
  2060. } else {
  2061. result = device->ftbl->queue_recurring_cmd(dev_priv, context, drawobj);
  2062. }
  2063. done:
  2064. /*
  2065. * -EPROTO is a "success" error - it just tells the user that the
  2066. * context had previously faulted
  2067. */
  2068. if (result && result != -EPROTO)
  2069. kgsl_drawobj_destroy(drawobj);
  2070. kgsl_context_put(context);
  2071. return result;
  2072. }
  2073. long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
  2074. *dev_priv, unsigned int cmd,
  2075. void *data)
  2076. {
  2077. struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
  2078. struct kgsl_device *device = dev_priv->device;
  2079. struct kgsl_context *context;
  2080. long result = -EINVAL;
  2081. mutex_lock(&device->mutex);
  2082. context = kgsl_context_get_owner(dev_priv, param->context_id);
  2083. if (context) {
  2084. result = kgsl_readtimestamp(device, context,
  2085. param->type, &param->timestamp);
  2086. trace_kgsl_readtimestamp(device, context->id,
  2087. param->type, param->timestamp);
  2088. }
  2089. kgsl_context_put(context);
  2090. mutex_unlock(&device->mutex);
  2091. return result;
  2092. }
  2093. long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
  2094. unsigned int cmd, void *data)
  2095. {
  2096. int result = 0;
  2097. struct kgsl_drawctxt_create *param = data;
  2098. struct kgsl_context *context = NULL;
  2099. struct kgsl_device *device = dev_priv->device;
  2100. context = device->ftbl->drawctxt_create(dev_priv, &param->flags);
  2101. if (IS_ERR(context)) {
  2102. result = PTR_ERR(context);
  2103. goto done;
  2104. }
  2105. trace_kgsl_context_create(dev_priv->device, context, param->flags);
  2106. /* Commit the pointer to the context in context_idr */
  2107. write_lock(&device->context_lock);
  2108. idr_replace(&device->context_idr, context, context->id);
  2109. param->drawctxt_id = context->id;
  2110. write_unlock(&device->context_lock);
  2111. done:
  2112. return result;
  2113. }
  2114. long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
  2115. unsigned int cmd, void *data)
  2116. {
  2117. struct kgsl_drawctxt_destroy *param = data;
  2118. struct kgsl_context *context;
  2119. context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
  2120. if (context == NULL)
  2121. return -EINVAL;
  2122. kgsl_context_detach(context);
  2123. kgsl_context_put(context);
  2124. return 0;
  2125. }
  2126. long gpumem_free_entry(struct kgsl_mem_entry *entry)
  2127. {
  2128. if (!kgsl_mem_entry_set_pend(entry))
  2129. return -EBUSY;
  2130. trace_kgsl_mem_free(entry);
  2131. kgsl_memfree_add(pid_nr(entry->priv->pid),
  2132. entry->memdesc.pagetable ?
  2133. entry->memdesc.pagetable->name : 0,
  2134. entry->memdesc.gpuaddr, entry->memdesc.size,
  2135. entry->memdesc.flags);
  2136. kgsl_mem_entry_put(entry);
  2137. return 0;
  2138. }
  2139. static void gpumem_free_func(struct kgsl_device *device,
  2140. struct kgsl_event_group *group, void *priv, int ret)
  2141. {
  2142. struct kgsl_context *context = group->context;
  2143. struct kgsl_mem_entry *entry = priv;
  2144. unsigned int timestamp;
  2145. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &timestamp);
  2146. /* Free the memory for all event types */
  2147. trace_kgsl_mem_timestamp_free(device, entry, KGSL_CONTEXT_ID(context),
  2148. timestamp, 0);
  2149. kgsl_memfree_add(pid_nr(entry->priv->pid),
  2150. entry->memdesc.pagetable ?
  2151. entry->memdesc.pagetable->name : 0,
  2152. entry->memdesc.gpuaddr, entry->memdesc.size,
  2153. entry->memdesc.flags);
  2154. kgsl_mem_entry_put_deferred(entry);
  2155. }
  2156. static long gpumem_free_entry_on_timestamp(struct kgsl_device *device,
  2157. struct kgsl_mem_entry *entry,
  2158. struct kgsl_context *context, unsigned int timestamp)
  2159. {
  2160. int ret;
  2161. unsigned int temp;
  2162. if (!kgsl_mem_entry_set_pend(entry))
  2163. return -EBUSY;
  2164. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &temp);
  2165. trace_kgsl_mem_timestamp_queue(device, entry, context->id, temp,
  2166. timestamp);
  2167. ret = kgsl_add_event(device, &context->events,
  2168. timestamp, gpumem_free_func, entry);
  2169. if (ret)
  2170. kgsl_mem_entry_unset_pend(entry);
  2171. return ret;
  2172. }
  2173. long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
  2174. unsigned int cmd, void *data)
  2175. {
  2176. struct kgsl_sharedmem_free *param = data;
  2177. struct kgsl_process_private *private = dev_priv->process_priv;
  2178. struct kgsl_mem_entry *entry;
  2179. long ret;
  2180. entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
  2181. if (entry == NULL)
  2182. return -EINVAL;
  2183. ret = gpumem_free_entry(entry);
  2184. kgsl_mem_entry_put(entry);
  2185. return ret;
  2186. }
  2187. long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
  2188. unsigned int cmd, void *data)
  2189. {
  2190. struct kgsl_gpumem_free_id *param = data;
  2191. struct kgsl_process_private *private = dev_priv->process_priv;
  2192. struct kgsl_mem_entry *entry;
  2193. long ret;
  2194. entry = kgsl_sharedmem_find_id(private, param->id);
  2195. if (entry == NULL)
  2196. return -EINVAL;
  2197. ret = gpumem_free_entry(entry);
  2198. kgsl_mem_entry_put(entry);
  2199. return ret;
  2200. }
  2201. static long gpuobj_free_on_timestamp(struct kgsl_device_private *dev_priv,
  2202. struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
  2203. {
  2204. struct kgsl_gpu_event_timestamp event;
  2205. struct kgsl_context *context;
  2206. long ret;
  2207. if (copy_struct_from_user(&event, sizeof(event),
  2208. u64_to_user_ptr(param->priv), param->len))
  2209. return -EFAULT;
  2210. if (event.context_id == 0)
  2211. return -EINVAL;
  2212. context = kgsl_context_get_owner(dev_priv, event.context_id);
  2213. if (context == NULL)
  2214. return -EINVAL;
  2215. ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry, context,
  2216. event.timestamp);
  2217. kgsl_context_put(context);
  2218. return ret;
  2219. }
  2220. static bool gpuobj_free_fence_func(void *priv)
  2221. {
  2222. struct kgsl_mem_entry *entry = priv;
  2223. trace_kgsl_mem_free(entry);
  2224. kgsl_memfree_add(pid_nr(entry->priv->pid),
  2225. entry->memdesc.pagetable ?
  2226. entry->memdesc.pagetable->name : 0,
  2227. entry->memdesc.gpuaddr, entry->memdesc.size,
  2228. entry->memdesc.flags);
  2229. kgsl_mem_entry_put_deferred(entry);
  2230. return true;
  2231. }
  2232. static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
  2233. struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
  2234. {
  2235. struct kgsl_sync_fence_cb *handle;
  2236. struct kgsl_gpu_event_fence event;
  2237. if (!kgsl_mem_entry_set_pend(entry))
  2238. return -EBUSY;
  2239. if (copy_struct_from_user(&event, sizeof(event),
  2240. u64_to_user_ptr(param->priv), param->len)) {
  2241. kgsl_mem_entry_unset_pend(entry);
  2242. return -EFAULT;
  2243. }
  2244. if (event.fd < 0) {
  2245. kgsl_mem_entry_unset_pend(entry);
  2246. return -EINVAL;
  2247. }
  2248. handle = kgsl_sync_fence_async_wait(event.fd, gpuobj_free_fence_func, entry);
  2249. if (IS_ERR(handle)) {
  2250. kgsl_mem_entry_unset_pend(entry);
  2251. return PTR_ERR(handle);
  2252. }
  2253. /* if handle is NULL the fence has already signaled */
  2254. if (handle == NULL)
  2255. gpuobj_free_fence_func(entry);
  2256. return 0;
  2257. }
  2258. long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
  2259. unsigned int cmd, void *data)
  2260. {
  2261. struct kgsl_gpuobj_free *param = data;
  2262. struct kgsl_process_private *private = dev_priv->process_priv;
  2263. struct kgsl_mem_entry *entry;
  2264. long ret;
  2265. entry = kgsl_sharedmem_find_id(private, param->id);
  2266. if (entry == NULL)
  2267. return -EINVAL;
  2268. /* If no event is specified then free immediately */
  2269. if (!(param->flags & KGSL_GPUOBJ_FREE_ON_EVENT))
  2270. ret = gpumem_free_entry(entry);
  2271. else if (param->type == KGSL_GPU_EVENT_TIMESTAMP)
  2272. ret = gpuobj_free_on_timestamp(dev_priv, entry, param);
  2273. else if (param->type == KGSL_GPU_EVENT_FENCE)
  2274. ret = gpuobj_free_on_fence(dev_priv, entry, param);
  2275. else
  2276. ret = -EINVAL;
  2277. kgsl_mem_entry_put(entry);
  2278. return ret;
  2279. }
  2280. long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
  2281. struct kgsl_device_private *dev_priv,
  2282. unsigned int cmd, void *data)
  2283. {
  2284. struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
  2285. struct kgsl_context *context = NULL;
  2286. struct kgsl_mem_entry *entry;
  2287. long ret = -EINVAL;
  2288. if (param->type != KGSL_TIMESTAMP_RETIRED)
  2289. return -EINVAL;
  2290. context = kgsl_context_get_owner(dev_priv, param->context_id);
  2291. if (context == NULL)
  2292. return -EINVAL;
  2293. entry = kgsl_sharedmem_find(dev_priv->process_priv,
  2294. (uint64_t) param->gpuaddr);
  2295. if (entry == NULL) {
  2296. kgsl_context_put(context);
  2297. return -EINVAL;
  2298. }
  2299. ret = gpumem_free_entry_on_timestamp(dev_priv->device, entry,
  2300. context, param->timestamp);
  2301. kgsl_mem_entry_put(entry);
  2302. kgsl_context_put(context);
  2303. return ret;
  2304. }
  2305. static bool _vma_is_cached(struct vm_area_struct *vma)
  2306. {
  2307. pteval_t pgprot_val = pgprot_val(vma->vm_page_prot);
  2308. /*
  2309. * An uncached cpu mapping can either be marked as writecombine or noncached. If it isn't
  2310. * either, then it means it is cached.
  2311. */
  2312. if ((pgprot_val != pgprot_val(pgprot_writecombine((vma->vm_page_prot)))) &&
  2313. (pgprot_val != pgprot_val(pgprot_noncached(vma->vm_page_prot))))
  2314. return true;
  2315. return false;
  2316. }
  2317. static bool check_vma(struct kgsl_device *device, struct kgsl_memdesc *memdesc,
  2318. unsigned long hostptr)
  2319. {
  2320. struct vm_area_struct *vma;
  2321. unsigned long cur = hostptr;
  2322. bool cached;
  2323. vma = find_vma(current->mm, hostptr);
  2324. if (!vma)
  2325. return false;
  2326. /* Don't remap memory that we already own */
  2327. if (vma->vm_file && (vma->vm_ops == &kgsl_gpumem_vm_ops))
  2328. return false;
  2329. cached = _vma_is_cached(vma);
  2330. cur = vma->vm_end;
  2331. while (cur < (hostptr + memdesc->size)) {
  2332. vma = find_vma(current->mm, cur);
  2333. if (!vma)
  2334. return false;
  2335. /* Don't remap memory that we already own */
  2336. if (vma->vm_file && (vma->vm_ops == &kgsl_gpumem_vm_ops))
  2337. return false;
  2338. /*
  2339. * Make sure the entire memdesc is either cached or noncached. Bail out if there is
  2340. * a mismatch as it can lead to coherency issues.
  2341. */
  2342. if (cached != _vma_is_cached(vma))
  2343. return false;
  2344. cur = vma->vm_end;
  2345. }
  2346. /*
  2347. * If cpu side mapping is cached (and io-coherency is enabled), the gpu mapping should be
  2348. * marked io-coherent to avoid coherency issues.
  2349. */
  2350. if (cached && kgsl_mmu_has_feature(device, KGSL_MMU_IO_COHERENT) &&
  2351. IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT))
  2352. memdesc->flags |= KGSL_MEMFLAGS_IOCOHERENT;
  2353. return true;
  2354. }
  2355. static int memdesc_sg_virt(struct kgsl_device *device, struct kgsl_memdesc *memdesc,
  2356. unsigned long useraddr)
  2357. {
  2358. int ret = 0;
  2359. long npages = 0, i;
  2360. size_t sglen = (size_t) (memdesc->size / PAGE_SIZE);
  2361. struct page **pages = NULL;
  2362. int write = ((memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY) ? 0 :
  2363. FOLL_WRITE);
  2364. if (sglen == 0 || sglen >= LONG_MAX)
  2365. return -EINVAL;
  2366. pages = kvcalloc(sglen, sizeof(*pages), GFP_KERNEL);
  2367. if (pages == NULL)
  2368. return -ENOMEM;
  2369. memdesc->sgt = kmalloc(sizeof(*memdesc->sgt), GFP_KERNEL);
  2370. if (memdesc->sgt == NULL) {
  2371. ret = -ENOMEM;
  2372. goto out;
  2373. }
  2374. mmap_read_lock(current->mm);
  2375. if (!check_vma(device, memdesc, useraddr)) {
  2376. mmap_read_unlock(current->mm);
  2377. ret = -EFAULT;
  2378. goto out;
  2379. }
  2380. npages = get_user_pages(useraddr, sglen, write, pages, NULL);
  2381. mmap_read_unlock(current->mm);
  2382. ret = (npages < 0) ? (int)npages : 0;
  2383. if (ret)
  2384. goto out;
  2385. if ((unsigned long) npages != sglen) {
  2386. ret = -EINVAL;
  2387. goto out;
  2388. }
  2389. ret = sg_alloc_table_from_pages(memdesc->sgt, pages, npages,
  2390. 0, memdesc->size, GFP_KERNEL);
  2391. if (ret)
  2392. goto out;
  2393. ret = kgsl_cache_range_op(memdesc, 0, memdesc->size,
  2394. KGSL_CACHE_OP_FLUSH);
  2395. if (ret)
  2396. sg_free_table(memdesc->sgt);
  2397. out:
  2398. if (ret) {
  2399. for (i = 0; i < npages; i++)
  2400. put_page(pages[i]);
  2401. kfree(memdesc->sgt);
  2402. memdesc->sgt = NULL;
  2403. }
  2404. kvfree(pages);
  2405. return ret;
  2406. }
  2407. static const struct kgsl_memdesc_ops kgsl_usermem_ops = {
  2408. .free = kgsl_destroy_anon,
  2409. .put_gpuaddr = kgsl_unmap_and_put_gpuaddr,
  2410. };
  2411. static int kgsl_setup_anon_useraddr(struct kgsl_device *device, struct kgsl_pagetable *pagetable,
  2412. struct kgsl_mem_entry *entry, unsigned long hostptr, size_t offset, size_t size)
  2413. {
  2414. /* Map an anonymous memory chunk */
  2415. int ret;
  2416. if (size == 0 || offset != 0 ||
  2417. !IS_ALIGNED(size, PAGE_SIZE))
  2418. return -EINVAL;
  2419. entry->memdesc.pagetable = pagetable;
  2420. entry->memdesc.size = (uint64_t) size;
  2421. entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR;
  2422. entry->memdesc.ops = &kgsl_usermem_ops;
  2423. if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
  2424. /* Register the address in the database */
  2425. ret = kgsl_mmu_set_svm_region(pagetable,
  2426. (uint64_t) hostptr, (uint64_t) size);
  2427. /* if OOM, retry once after flushing lockless_workqueue */
  2428. if (ret == -ENOMEM) {
  2429. flush_workqueue(kgsl_driver.lockless_workqueue);
  2430. ret = kgsl_mmu_set_svm_region(pagetable,
  2431. (uint64_t) hostptr, (uint64_t) size);
  2432. }
  2433. if (ret)
  2434. return ret;
  2435. entry->memdesc.gpuaddr = (uint64_t) hostptr;
  2436. }
  2437. ret = memdesc_sg_virt(device, &entry->memdesc, hostptr);
  2438. if (ret && kgsl_memdesc_use_cpu_map(&entry->memdesc))
  2439. kgsl_mmu_put_gpuaddr(pagetable, &entry->memdesc);
  2440. return ret;
  2441. }
  2442. static int kgsl_setup_useraddr(struct kgsl_device *device,
  2443. struct kgsl_pagetable *pagetable,
  2444. struct kgsl_mem_entry *entry,
  2445. unsigned long hostptr, size_t offset, size_t size)
  2446. {
  2447. if (hostptr == 0 || !IS_ALIGNED(hostptr, PAGE_SIZE))
  2448. return -EINVAL;
  2449. return kgsl_setup_anon_useraddr(device, pagetable, entry,
  2450. hostptr, offset, size);
  2451. }
  2452. static long _gpuobj_map_useraddr(struct kgsl_device *device,
  2453. struct kgsl_pagetable *pagetable,
  2454. struct kgsl_mem_entry *entry,
  2455. struct kgsl_gpuobj_import *param)
  2456. {
  2457. struct kgsl_gpuobj_import_useraddr useraddr;
  2458. param->flags &= KGSL_MEMFLAGS_GPUREADONLY
  2459. | KGSL_CACHEMODE_MASK
  2460. | KGSL_MEMFLAGS_USE_CPU_MAP
  2461. | KGSL_MEMTYPE_MASK
  2462. | KGSL_MEMFLAGS_FORCE_32BIT
  2463. | KGSL_MEMFLAGS_IOCOHERENT;
  2464. /* Specifying SECURE is an explicit error */
  2465. if (param->flags & KGSL_MEMFLAGS_SECURE)
  2466. return -ENOTSUPP;
  2467. kgsl_memdesc_init(device, &entry->memdesc, param->flags);
  2468. if (copy_from_user(&useraddr,
  2469. u64_to_user_ptr(param->priv), sizeof(useraddr)))
  2470. return -EINVAL;
  2471. /* Verify that the virtaddr and len are within bounds */
  2472. if (useraddr.virtaddr > ULONG_MAX)
  2473. return -EINVAL;
  2474. return kgsl_setup_useraddr(device, pagetable, entry,
  2475. (unsigned long) useraddr.virtaddr, 0, param->priv_len);
  2476. }
  2477. static bool check_and_warn_secured(struct kgsl_device *device)
  2478. {
  2479. if (kgsl_mmu_is_secured(&device->mmu))
  2480. return true;
  2481. dev_WARN_ONCE(device->dev, 1, "Secure buffers are not supported\n");
  2482. return false;
  2483. }
  2484. #ifdef CONFIG_DMA_SHARED_BUFFER
  2485. static int kgsl_setup_dma_buf(struct kgsl_device *device,
  2486. struct kgsl_pagetable *pagetable,
  2487. struct kgsl_mem_entry *entry,
  2488. struct dma_buf *dmabuf);
  2489. static long _gpuobj_map_dma_buf(struct kgsl_device *device,
  2490. struct kgsl_pagetable *pagetable,
  2491. struct kgsl_mem_entry *entry,
  2492. struct kgsl_gpuobj_import *param,
  2493. int *fd)
  2494. {
  2495. bool iocoherent = (param->flags & KGSL_MEMFLAGS_IOCOHERENT);
  2496. struct kgsl_gpuobj_import_dma_buf buf;
  2497. struct dma_buf *dmabuf;
  2498. int ret;
  2499. param->flags &= KGSL_MEMFLAGS_GPUREADONLY |
  2500. KGSL_MEMTYPE_MASK |
  2501. KGSL_MEMALIGN_MASK |
  2502. KGSL_MEMFLAGS_SECURE |
  2503. KGSL_MEMFLAGS_FORCE_32BIT |
  2504. KGSL_MEMFLAGS_GUARD_PAGE;
  2505. kgsl_memdesc_init(device, &entry->memdesc, param->flags);
  2506. /*
  2507. * If content protection is not enabled and secure buffer
  2508. * is requested to be mapped return error.
  2509. */
  2510. if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
  2511. if (!check_and_warn_secured(device))
  2512. return -ENOTSUPP;
  2513. entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
  2514. }
  2515. if (copy_struct_from_user(&buf, sizeof(buf),
  2516. u64_to_user_ptr(param->priv), param->priv_len))
  2517. return -EFAULT;
  2518. if (buf.fd < 0)
  2519. return -EINVAL;
  2520. *fd = buf.fd;
  2521. dmabuf = dma_buf_get(buf.fd);
  2522. if (IS_ERR(dmabuf))
  2523. return PTR_ERR(dmabuf);
  2524. /*
  2525. * DMA BUFS are always cached so make sure that is reflected in
  2526. * the memdesc.
  2527. */
  2528. entry->memdesc.flags |=
  2529. FIELD_PREP(KGSL_CACHEMODE_MASK, KGSL_CACHEMODE_WRITEBACK);
  2530. /*
  2531. * Enable I/O coherency if it is 1) a thing, and either
  2532. * 2) enabled by default or 3) enabled by the caller
  2533. */
  2534. if (kgsl_mmu_has_feature(device, KGSL_MMU_IO_COHERENT) &&
  2535. (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) ||
  2536. iocoherent))
  2537. entry->memdesc.flags |= KGSL_MEMFLAGS_IOCOHERENT;
  2538. ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
  2539. if (ret)
  2540. dma_buf_put(dmabuf);
  2541. return ret;
  2542. }
  2543. #else
  2544. static long _gpuobj_map_dma_buf(struct kgsl_device *device,
  2545. struct kgsl_pagetable *pagetable,
  2546. struct kgsl_mem_entry *entry,
  2547. struct kgsl_gpuobj_import *param,
  2548. int *fd)
  2549. {
  2550. return -EINVAL;
  2551. }
  2552. #endif
  2553. static void kgsl_process_add_stats(struct kgsl_process_private *priv,
  2554. unsigned int type, uint64_t size)
  2555. {
  2556. u64 ret = atomic64_add_return(size, &priv->stats[type].cur);
  2557. if (ret > priv->stats[type].max)
  2558. priv->stats[type].max = ret;
  2559. }
  2560. u64 kgsl_get_stats(pid_t pid)
  2561. {
  2562. struct kgsl_process_private *process;
  2563. u64 ret;
  2564. if (pid < 0)
  2565. return atomic_long_read(&kgsl_driver.stats.page_alloc);
  2566. process = kgsl_process_private_find(pid);
  2567. if (!process)
  2568. return 0;
  2569. ret = atomic64_read(&process->stats[KGSL_MEM_ENTRY_KERNEL].cur);
  2570. kgsl_process_private_put(process);
  2571. return ret;
  2572. }
  2573. long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
  2574. unsigned int cmd, void *data)
  2575. {
  2576. struct kgsl_process_private *private = dev_priv->process_priv;
  2577. struct kgsl_device *device = dev_priv->device;
  2578. struct kgsl_gpuobj_import *param = data;
  2579. struct kgsl_mem_entry *entry;
  2580. int ret, fd = -1;
  2581. if (param->type != KGSL_USER_MEM_TYPE_ADDR &&
  2582. param->type != KGSL_USER_MEM_TYPE_DMABUF)
  2583. return -ENOTSUPP;
  2584. if (param->flags & KGSL_MEMFLAGS_VBO)
  2585. return -EINVAL;
  2586. entry = kgsl_mem_entry_create();
  2587. if (entry == NULL)
  2588. return -ENOMEM;
  2589. if (param->type == KGSL_USER_MEM_TYPE_ADDR)
  2590. ret = _gpuobj_map_useraddr(device, private->pagetable,
  2591. entry, param);
  2592. else
  2593. ret = _gpuobj_map_dma_buf(device, private->pagetable,
  2594. entry, param, &fd);
  2595. if (ret)
  2596. goto out;
  2597. if (entry->memdesc.size >= SZ_1M)
  2598. kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
  2599. else if (entry->memdesc.size >= SZ_64K)
  2600. kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64K));
  2601. param->flags = entry->memdesc.flags;
  2602. ret = kgsl_mem_entry_attach_and_map(device, private, entry);
  2603. if (ret)
  2604. goto unmap;
  2605. param->id = entry->id;
  2606. KGSL_STATS_ADD(entry->memdesc.size, &kgsl_driver.stats.mapped,
  2607. &kgsl_driver.stats.mapped_max);
  2608. kgsl_process_add_stats(private,
  2609. kgsl_memdesc_usermem_type(&entry->memdesc),
  2610. entry->memdesc.size);
  2611. trace_kgsl_mem_map(entry, fd);
  2612. kgsl_mem_entry_commit_process(entry);
  2613. /* Put the extra ref from kgsl_mem_entry_create() */
  2614. kgsl_mem_entry_put(entry);
  2615. return 0;
  2616. unmap:
  2617. kgsl_sharedmem_free(&entry->memdesc);
  2618. out:
  2619. kfree(entry);
  2620. return ret;
  2621. }
  2622. static long _map_usermem_addr(struct kgsl_device *device,
  2623. struct kgsl_pagetable *pagetable, struct kgsl_mem_entry *entry,
  2624. unsigned long hostptr, size_t offset, size_t size)
  2625. {
  2626. if (!kgsl_mmu_has_feature(device, KGSL_MMU_PAGED))
  2627. return -EINVAL;
  2628. /* No CPU mapped buffer could ever be secure */
  2629. if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
  2630. return -EINVAL;
  2631. return kgsl_setup_useraddr(device, pagetable, entry, hostptr,
  2632. offset, size);
  2633. }
  2634. #ifdef CONFIG_DMA_SHARED_BUFFER
  2635. static int _map_usermem_dma_buf(struct kgsl_device *device,
  2636. struct kgsl_pagetable *pagetable,
  2637. struct kgsl_mem_entry *entry,
  2638. unsigned int fd)
  2639. {
  2640. int ret;
  2641. struct dma_buf *dmabuf;
  2642. /*
  2643. * If content protection is not enabled and secure buffer
  2644. * is requested to be mapped return error.
  2645. */
  2646. if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
  2647. if (!check_and_warn_secured(device))
  2648. return -EOPNOTSUPP;
  2649. entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
  2650. }
  2651. dmabuf = dma_buf_get(fd);
  2652. if (IS_ERR_OR_NULL(dmabuf)) {
  2653. ret = PTR_ERR(dmabuf);
  2654. return ret ? ret : -EINVAL;
  2655. }
  2656. ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
  2657. if (ret)
  2658. dma_buf_put(dmabuf);
  2659. return ret;
  2660. }
  2661. #else
  2662. static int _map_usermem_dma_buf(struct kgsl_device *device,
  2663. struct kgsl_pagetable *pagetable,
  2664. struct kgsl_mem_entry *entry,
  2665. unsigned int fd)
  2666. {
  2667. return -EINVAL;
  2668. }
  2669. #endif
  2670. #ifdef CONFIG_DMA_SHARED_BUFFER
  2671. static int verify_secure_access(struct kgsl_device *device,
  2672. struct kgsl_mem_entry *entry, struct dma_buf *dmabuf)
  2673. {
  2674. bool secure = entry->memdesc.priv & KGSL_MEMDESC_SECURE;
  2675. uint32_t *vmid_list = NULL, *perms_list = NULL;
  2676. uint32_t nelems = 0;
  2677. int i;
  2678. if (mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
  2679. (int **)&perms_list, (int *)&nelems)) {
  2680. dev_info(device->dev, "Skipped access check\n");
  2681. return 0;
  2682. }
  2683. /* Check if secure buffer is accessible to CP_PIXEL */
  2684. for (i = 0; i < nelems; i++) {
  2685. if (vmid_list[i] == VMID_CP_PIXEL)
  2686. break;
  2687. }
  2688. kfree(vmid_list);
  2689. kfree(perms_list);
  2690. /*
  2691. * Do not import a buffer if it is accessible to CP_PIXEL but is being imported as
  2692. * a buffer accessible to non-secure GPU. Also, make sure if buffer is to be made
  2693. * accessible to secure GPU, it must be accessible to CP_PIXEL
  2694. */
  2695. if (!(secure ^ (i == nelems)))
  2696. return -EPERM;
  2697. if (secure && mem_buf_dma_buf_exclusive_owner(dmabuf))
  2698. return -EPERM;
  2699. return 0;
  2700. }
  2701. static int kgsl_setup_dma_buf(struct kgsl_device *device,
  2702. struct kgsl_pagetable *pagetable,
  2703. struct kgsl_mem_entry *entry,
  2704. struct dma_buf *dmabuf)
  2705. {
  2706. int ret = 0;
  2707. struct scatterlist *s;
  2708. struct sg_table *sg_table = NULL;
  2709. struct dma_buf_attachment *attach = NULL;
  2710. struct kgsl_dma_buf_meta *metadata;
  2711. metadata = kzalloc(sizeof(*metadata), GFP_KERNEL);
  2712. if (!metadata)
  2713. return -ENOMEM;
  2714. attach = dma_buf_attach(dmabuf, device->dev);
  2715. if (IS_ERR(attach)) {
  2716. ret = PTR_ERR(attach);
  2717. goto out;
  2718. }
  2719. /*
  2720. * If dma buffer is marked IO coherent, skip sync at attach,
  2721. * which involves flushing the buffer on CPU.
  2722. * HW manages coherency for IO coherent buffers.
  2723. */
  2724. if (entry->memdesc.flags & KGSL_MEMFLAGS_IOCOHERENT)
  2725. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  2726. metadata->dmabuf = dmabuf;
  2727. metadata->attach = attach;
  2728. metadata->entry = entry;
  2729. entry->priv_data = metadata;
  2730. entry->memdesc.pagetable = pagetable;
  2731. entry->memdesc.size = 0;
  2732. entry->memdesc.ops = &kgsl_dmabuf_ops;
  2733. /* USE_CPU_MAP is not impemented for ION. */
  2734. entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
  2735. entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ION;
  2736. #if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
  2737. sg_table = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
  2738. #else
  2739. sg_table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  2740. #endif
  2741. if (IS_ERR_OR_NULL(sg_table)) {
  2742. ret = PTR_ERR(sg_table);
  2743. goto out;
  2744. }
  2745. metadata->table = sg_table;
  2746. entry->priv_data = metadata;
  2747. entry->memdesc.sgt = sg_table;
  2748. ret = verify_secure_access(device, entry, dmabuf);
  2749. if (ret)
  2750. goto out;
  2751. /* Calculate the size of the memdesc from the sglist */
  2752. for (s = entry->memdesc.sgt->sgl; s != NULL; s = sg_next(s))
  2753. entry->memdesc.size += (uint64_t) s->length;
  2754. if (!entry->memdesc.size) {
  2755. ret = -EINVAL;
  2756. goto out;
  2757. }
  2758. add_dmabuf_list(metadata);
  2759. entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size);
  2760. out:
  2761. if (ret) {
  2762. if (!IS_ERR_OR_NULL(sg_table))
  2763. #if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
  2764. dma_buf_unmap_attachment_unlocked(attach, sg_table, DMA_BIDIRECTIONAL);
  2765. #else
  2766. dma_buf_unmap_attachment(attach, sg_table, DMA_BIDIRECTIONAL);
  2767. #endif
  2768. if (!IS_ERR_OR_NULL(attach))
  2769. dma_buf_detach(dmabuf, attach);
  2770. kfree(metadata);
  2771. }
  2772. return ret;
  2773. }
  2774. #endif
  2775. #ifdef CONFIG_DMA_SHARED_BUFFER
  2776. void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
  2777. int *egl_surface_count, int *egl_image_count)
  2778. {
  2779. struct kgsl_dma_buf_meta *metadata = entry->priv_data;
  2780. struct dmabuf_list_entry *dle = metadata->dle;
  2781. struct kgsl_dma_buf_meta *scan_meta;
  2782. struct kgsl_mem_entry *scan_mem_entry;
  2783. if (!dle)
  2784. return;
  2785. spin_lock(&kgsl_dmabuf_lock);
  2786. list_for_each_entry(scan_meta, &dle->dmabuf_list, node) {
  2787. scan_mem_entry = scan_meta->entry;
  2788. switch (kgsl_memdesc_get_memtype(&scan_mem_entry->memdesc)) {
  2789. case KGSL_MEMTYPE_EGL_SURFACE:
  2790. (*egl_surface_count)++;
  2791. break;
  2792. case KGSL_MEMTYPE_EGL_IMAGE:
  2793. (*egl_image_count)++;
  2794. break;
  2795. }
  2796. }
  2797. spin_unlock(&kgsl_dmabuf_lock);
  2798. }
  2799. unsigned long kgsl_get_dmabuf_inode_number(struct kgsl_mem_entry *entry)
  2800. {
  2801. struct kgsl_dma_buf_meta *metadata = entry->priv_data;
  2802. return metadata ? file_inode(metadata->dmabuf->file)->i_ino : 0;
  2803. }
  2804. #else
  2805. void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
  2806. int *egl_surface_count, int *egl_image_count)
  2807. {
  2808. }
  2809. unsigned long kgsl_get_dmabuf_inode_number(struct kgsl_mem_entry *entry)
  2810. {
  2811. }
  2812. #endif
  2813. long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
  2814. unsigned int cmd, void *data)
  2815. {
  2816. int result = -EINVAL;
  2817. struct kgsl_map_user_mem *param = data;
  2818. struct kgsl_mem_entry *entry = NULL;
  2819. struct kgsl_process_private *private = dev_priv->process_priv;
  2820. struct kgsl_device *device = dev_priv->device;
  2821. unsigned int memtype;
  2822. uint64_t flags;
  2823. /*
  2824. * If content protection is not enabled and secure buffer
  2825. * is requested to be mapped return error.
  2826. */
  2827. if (param->flags & KGSL_MEMFLAGS_SECURE) {
  2828. if (!check_and_warn_secured(device))
  2829. return -EOPNOTSUPP;
  2830. /* Can't use CPU map with secure buffers */
  2831. if (param->flags & KGSL_MEMFLAGS_USE_CPU_MAP)
  2832. return -EINVAL;
  2833. }
  2834. entry = kgsl_mem_entry_create();
  2835. if (entry == NULL)
  2836. return -ENOMEM;
  2837. /*
  2838. * Convert from enum value to KGSL_MEM_ENTRY value, so that
  2839. * we can use the latter consistently everywhere.
  2840. */
  2841. memtype = param->memtype + 1;
  2842. /*
  2843. * Mask off unknown flags from userspace. This way the caller can
  2844. * check if a flag is supported by looking at the returned flags.
  2845. * Note: CACHEMODE is ignored for this call. Caching should be
  2846. * determined by type of allocation being mapped.
  2847. */
  2848. flags = param->flags & (KGSL_MEMFLAGS_GPUREADONLY
  2849. | KGSL_MEMTYPE_MASK
  2850. | KGSL_MEMALIGN_MASK
  2851. | KGSL_MEMFLAGS_USE_CPU_MAP
  2852. | KGSL_MEMFLAGS_SECURE
  2853. | KGSL_MEMFLAGS_IOCOHERENT);
  2854. if (is_compat_task())
  2855. flags |= KGSL_MEMFLAGS_FORCE_32BIT;
  2856. kgsl_memdesc_init(device, &entry->memdesc, flags);
  2857. switch (memtype) {
  2858. case KGSL_MEM_ENTRY_USER:
  2859. result = _map_usermem_addr(device, private->pagetable,
  2860. entry, param->hostptr, param->offset, param->len);
  2861. break;
  2862. case KGSL_MEM_ENTRY_ION:
  2863. if (param->offset != 0)
  2864. result = -EINVAL;
  2865. else
  2866. result = _map_usermem_dma_buf(device,
  2867. private->pagetable, entry, param->fd);
  2868. break;
  2869. default:
  2870. result = -EOPNOTSUPP;
  2871. break;
  2872. }
  2873. if (result)
  2874. goto error;
  2875. if (entry->memdesc.size >= SZ_2M)
  2876. kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_2M));
  2877. else if (entry->memdesc.size >= SZ_1M)
  2878. kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
  2879. else if (entry->memdesc.size >= SZ_64K)
  2880. kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
  2881. /* echo back flags */
  2882. param->flags = (unsigned int) entry->memdesc.flags;
  2883. result = kgsl_mem_entry_attach_and_map(device, private,
  2884. entry);
  2885. if (result)
  2886. goto error_attach;
  2887. /* Adjust the returned value for a non 4k aligned offset */
  2888. param->gpuaddr = (unsigned long)
  2889. entry->memdesc.gpuaddr + (param->offset & PAGE_MASK);
  2890. KGSL_STATS_ADD(param->len, &kgsl_driver.stats.mapped,
  2891. &kgsl_driver.stats.mapped_max);
  2892. kgsl_process_add_stats(private,
  2893. kgsl_memdesc_usermem_type(&entry->memdesc), param->len);
  2894. trace_kgsl_mem_map(entry, param->fd);
  2895. kgsl_mem_entry_commit_process(entry);
  2896. /* Put the extra ref from kgsl_mem_entry_create() */
  2897. kgsl_mem_entry_put(entry);
  2898. return result;
  2899. error_attach:
  2900. kgsl_sharedmem_free(&entry->memdesc);
  2901. error:
  2902. /* Clear gpuaddr here so userspace doesn't get any wrong ideas */
  2903. param->gpuaddr = 0;
  2904. kfree(entry);
  2905. return result;
  2906. }
  2907. static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
  2908. uint64_t offset, uint64_t length, unsigned int op)
  2909. {
  2910. int ret = 0;
  2911. int cacheop;
  2912. if (!entry)
  2913. return 0;
  2914. /* Cache ops are not allowed on secure memory */
  2915. if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
  2916. return 0;
  2917. /*
  2918. * Flush is defined as (clean | invalidate). If both bits are set, then
  2919. * do a flush, otherwise check for the individual bits and clean or inv
  2920. * as requested
  2921. */
  2922. if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
  2923. cacheop = KGSL_CACHE_OP_FLUSH;
  2924. else if (op & KGSL_GPUMEM_CACHE_CLEAN)
  2925. cacheop = KGSL_CACHE_OP_CLEAN;
  2926. else if (op & KGSL_GPUMEM_CACHE_INV)
  2927. cacheop = KGSL_CACHE_OP_INV;
  2928. else {
  2929. ret = -EINVAL;
  2930. goto done;
  2931. }
  2932. if (!(op & KGSL_GPUMEM_CACHE_RANGE)) {
  2933. offset = 0;
  2934. length = entry->memdesc.size;
  2935. }
  2936. if (kgsl_cachemode_is_cached(entry->memdesc.flags)) {
  2937. trace_kgsl_mem_sync_cache(entry, offset, length, op);
  2938. ret = kgsl_cache_range_op(&entry->memdesc, offset,
  2939. length, cacheop);
  2940. }
  2941. done:
  2942. return ret;
  2943. }
  2944. /* New cache sync function - supports both directions (clean and invalidate) */
  2945. long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
  2946. unsigned int cmd, void *data)
  2947. {
  2948. struct kgsl_gpumem_sync_cache *param = data;
  2949. struct kgsl_process_private *private = dev_priv->process_priv;
  2950. struct kgsl_mem_entry *entry = NULL;
  2951. long ret;
  2952. if (param->id != 0)
  2953. entry = kgsl_sharedmem_find_id(private, param->id);
  2954. else if (param->gpuaddr != 0)
  2955. entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
  2956. if (entry == NULL)
  2957. return -EINVAL;
  2958. ret = _kgsl_gpumem_sync_cache(entry, (uint64_t) param->offset,
  2959. (uint64_t) param->length, param->op);
  2960. kgsl_mem_entry_put(entry);
  2961. return ret;
  2962. }
  2963. static int mem_id_cmp(const void *_a, const void *_b)
  2964. {
  2965. const unsigned int *a = _a, *b = _b;
  2966. if (*a == *b)
  2967. return 0;
  2968. return (*a > *b) ? 1 : -1;
  2969. }
  2970. #ifdef CONFIG_ARM64
  2971. /* Do not support full flush on ARM64 targets */
  2972. static inline bool check_full_flush(size_t size, int op)
  2973. {
  2974. return false;
  2975. }
  2976. #else
  2977. /* Support full flush if the size is bigger than the threshold */
  2978. static inline bool check_full_flush(size_t size, int op)
  2979. {
  2980. /* If we exceed the breakeven point, flush the entire cache */
  2981. bool ret = (kgsl_driver.full_cache_threshold != 0) &&
  2982. (size >= kgsl_driver.full_cache_threshold) &&
  2983. (op == KGSL_GPUMEM_CACHE_FLUSH);
  2984. if (ret)
  2985. flush_cache_all();
  2986. return ret;
  2987. }
  2988. #endif
  2989. long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
  2990. unsigned int cmd, void *data)
  2991. {
  2992. int i;
  2993. struct kgsl_gpumem_sync_cache_bulk *param = data;
  2994. struct kgsl_process_private *private = dev_priv->process_priv;
  2995. unsigned int id, last_id = 0, *id_list = NULL, actual_count = 0;
  2996. struct kgsl_mem_entry **entries = NULL;
  2997. long ret = 0;
  2998. uint64_t op_size = 0;
  2999. bool full_flush = false;
  3000. if (param->id_list == NULL || param->count == 0
  3001. || param->count > (PAGE_SIZE / sizeof(unsigned int)))
  3002. return -EINVAL;
  3003. id_list = kcalloc(param->count, sizeof(unsigned int), GFP_KERNEL);
  3004. if (id_list == NULL)
  3005. return -ENOMEM;
  3006. entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
  3007. if (entries == NULL) {
  3008. ret = -ENOMEM;
  3009. goto end;
  3010. }
  3011. if (copy_from_user(id_list, param->id_list,
  3012. param->count * sizeof(unsigned int))) {
  3013. ret = -EFAULT;
  3014. goto end;
  3015. }
  3016. /* sort the ids so we can weed out duplicates */
  3017. sort(id_list, param->count, sizeof(*id_list), mem_id_cmp, NULL);
  3018. for (i = 0; i < param->count; i++) {
  3019. unsigned int cachemode;
  3020. struct kgsl_mem_entry *entry = NULL;
  3021. id = id_list[i];
  3022. /* skip 0 ids or duplicates */
  3023. if (id == last_id)
  3024. continue;
  3025. entry = kgsl_sharedmem_find_id(private, id);
  3026. if (entry == NULL)
  3027. continue;
  3028. /* skip uncached memory */
  3029. cachemode = kgsl_memdesc_get_cachemode(&entry->memdesc);
  3030. if (cachemode != KGSL_CACHEMODE_WRITETHROUGH &&
  3031. cachemode != KGSL_CACHEMODE_WRITEBACK) {
  3032. kgsl_mem_entry_put(entry);
  3033. continue;
  3034. }
  3035. op_size += entry->memdesc.size;
  3036. entries[actual_count++] = entry;
  3037. full_flush = check_full_flush(op_size, param->op);
  3038. if (full_flush) {
  3039. trace_kgsl_mem_sync_full_cache(actual_count, op_size);
  3040. break;
  3041. }
  3042. last_id = id;
  3043. }
  3044. param->op &= ~KGSL_GPUMEM_CACHE_RANGE;
  3045. for (i = 0; i < actual_count; i++) {
  3046. if (!full_flush)
  3047. _kgsl_gpumem_sync_cache(entries[i], 0,
  3048. entries[i]->memdesc.size,
  3049. param->op);
  3050. kgsl_mem_entry_put(entries[i]);
  3051. }
  3052. end:
  3053. kfree(entries);
  3054. kfree(id_list);
  3055. return ret;
  3056. }
  3057. /* Legacy cache function, does a flush (clean + invalidate) */
  3058. long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
  3059. unsigned int cmd, void *data)
  3060. {
  3061. struct kgsl_sharedmem_free *param = data;
  3062. struct kgsl_process_private *private = dev_priv->process_priv;
  3063. struct kgsl_mem_entry *entry = NULL;
  3064. long ret;
  3065. entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
  3066. if (entry == NULL)
  3067. return -EINVAL;
  3068. ret = _kgsl_gpumem_sync_cache(entry, 0, entry->memdesc.size,
  3069. KGSL_GPUMEM_CACHE_FLUSH);
  3070. kgsl_mem_entry_put(entry);
  3071. return ret;
  3072. }
  3073. long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
  3074. unsigned int cmd, void *data)
  3075. {
  3076. struct kgsl_process_private *private = dev_priv->process_priv;
  3077. struct kgsl_gpuobj_sync *param = data;
  3078. struct kgsl_gpuobj_sync_obj *objs;
  3079. struct kgsl_mem_entry **entries;
  3080. long ret = 0;
  3081. uint64_t size = 0;
  3082. int i;
  3083. void __user *ptr;
  3084. if (param->count == 0 || param->count > 128)
  3085. return -EINVAL;
  3086. objs = kcalloc(param->count, sizeof(*objs), GFP_KERNEL);
  3087. if (objs == NULL)
  3088. return -ENOMEM;
  3089. entries = kcalloc(param->count, sizeof(*entries), GFP_KERNEL);
  3090. if (entries == NULL) {
  3091. kfree(objs);
  3092. return -ENOMEM;
  3093. }
  3094. ptr = u64_to_user_ptr(param->objs);
  3095. for (i = 0; i < param->count; i++) {
  3096. ret = copy_struct_from_user(&objs[i], sizeof(*objs), ptr,
  3097. param->obj_len);
  3098. if (ret)
  3099. goto out;
  3100. entries[i] = kgsl_sharedmem_find_id(private, objs[i].id);
  3101. /* Not finding the ID is not a fatal failure - just skip it */
  3102. if (entries[i] == NULL)
  3103. continue;
  3104. if (!(objs[i].op & KGSL_GPUMEM_CACHE_RANGE))
  3105. size += entries[i]->memdesc.size;
  3106. else if (objs[i].offset < entries[i]->memdesc.size)
  3107. size += (entries[i]->memdesc.size - objs[i].offset);
  3108. if (check_full_flush(size, objs[i].op)) {
  3109. trace_kgsl_mem_sync_full_cache(i, size);
  3110. goto out;
  3111. }
  3112. ptr += sizeof(*objs);
  3113. }
  3114. for (i = 0; !ret && i < param->count; i++)
  3115. ret = _kgsl_gpumem_sync_cache(entries[i],
  3116. objs[i].offset, objs[i].length, objs[i].op);
  3117. out:
  3118. for (i = 0; i < param->count; i++)
  3119. kgsl_mem_entry_put(entries[i]);
  3120. kfree(entries);
  3121. kfree(objs);
  3122. return ret;
  3123. }
  3124. static int kgsl_update_fault_details(struct kgsl_context *context,
  3125. void __user *ptr, u32 faultnents, u32 faultsize)
  3126. {
  3127. u32 size = min_t(u32, sizeof(struct kgsl_fault), faultsize);
  3128. u32 cur_idx[KGSL_FAULT_TYPE_MAX] = {0};
  3129. struct kgsl_fault_node *fault_node;
  3130. struct kgsl_fault *faults;
  3131. int i, ret = 0;
  3132. faults = kcalloc(KGSL_FAULT_TYPE_MAX, sizeof(struct kgsl_fault),
  3133. GFP_KERNEL);
  3134. if (!faults)
  3135. return -ENOMEM;
  3136. for (i = 0; i < faultnents; i++) {
  3137. struct kgsl_fault fault = {0};
  3138. if (copy_from_user(&fault, ptr + i * faultsize, size)) {
  3139. ret = -EFAULT;
  3140. goto err;
  3141. }
  3142. if (fault.type >= KGSL_FAULT_TYPE_MAX) {
  3143. ret = -EINVAL;
  3144. goto err;
  3145. }
  3146. memcpy(&faults[fault.type], &fault, sizeof(fault));
  3147. }
  3148. mutex_lock(&context->fault_lock);
  3149. list_for_each_entry(fault_node, &context->faults, node) {
  3150. u32 fault_type = fault_node->type;
  3151. if (cur_idx[fault_type] >= faults[fault_type].count)
  3152. continue;
  3153. switch (fault_type) {
  3154. case KGSL_FAULT_TYPE_PAGEFAULT:
  3155. size = sizeof(struct kgsl_pagefault_report);
  3156. }
  3157. size = min_t(u32, size, faults[fault_type].size);
  3158. if (copy_to_user(u64_to_user_ptr(faults[fault_type].fault +
  3159. cur_idx[fault_type] * faults[fault_type].size),
  3160. fault_node->priv, size)) {
  3161. ret = -EFAULT;
  3162. goto release_lock;
  3163. }
  3164. cur_idx[fault_type] += 1;
  3165. }
  3166. release_lock:
  3167. mutex_unlock(&context->fault_lock);
  3168. err:
  3169. kfree(faults);
  3170. return ret;
  3171. }
  3172. static int kgsl_update_fault_count(struct kgsl_context *context,
  3173. void __user *faults, u32 faultnents, u32 faultsize)
  3174. {
  3175. u32 size = min_t(u32, sizeof(struct kgsl_fault), faultsize);
  3176. u32 faultcount[KGSL_FAULT_TYPE_MAX] = {0};
  3177. struct kgsl_fault_node *fault_node;
  3178. int i, j;
  3179. mutex_lock(&context->fault_lock);
  3180. list_for_each_entry(fault_node, &context->faults, node)
  3181. faultcount[fault_node->type]++;
  3182. mutex_unlock(&context->fault_lock);
  3183. /* KGSL_FAULT_TYPE_NO_FAULT (i.e. 0) is not an actual fault type */
  3184. for (i = 0, j = 1; i < faultnents && j < KGSL_FAULT_TYPE_MAX; j++) {
  3185. struct kgsl_fault fault = {0};
  3186. if (!faultcount[j])
  3187. continue;
  3188. fault.type = j;
  3189. fault.count = faultcount[j];
  3190. if (copy_to_user(faults, &fault, size))
  3191. return -EFAULT;
  3192. faults += faultsize;
  3193. i++;
  3194. }
  3195. return 0;
  3196. }
  3197. long kgsl_ioctl_get_fault_report(struct kgsl_device_private *dev_priv,
  3198. unsigned int cmd, void *data)
  3199. {
  3200. struct kgsl_fault_report *param = data;
  3201. u32 size = min_t(u32, sizeof(struct kgsl_fault), param->faultsize);
  3202. void __user *ptr = u64_to_user_ptr(param->faultlist);
  3203. struct kgsl_context *context;
  3204. int i, ret = 0;
  3205. context = kgsl_context_get_owner(dev_priv, param->context_id);
  3206. if (!context)
  3207. return -EINVAL;
  3208. /* This IOCTL is valid for invalidated contexts only */
  3209. if (!(context->flags & KGSL_CONTEXT_FAULT_INFO) ||
  3210. !kgsl_context_invalid(context)) {
  3211. ret = -EINVAL;
  3212. goto err;
  3213. }
  3214. /* Return the number of fault types */
  3215. if (!param->faultlist) {
  3216. param->faultnents = KGSL_FAULT_TYPE_MAX;
  3217. kgsl_context_put(context);
  3218. return 0;
  3219. }
  3220. /* Check if it's a request to get fault counts or to fill the fault information */
  3221. for (i = 0; i < param->faultnents; i++) {
  3222. struct kgsl_fault fault = {0};
  3223. if (copy_from_user(&fault, ptr, size)) {
  3224. ret = -EFAULT;
  3225. goto err;
  3226. }
  3227. if (fault.fault)
  3228. break;
  3229. ptr += param->faultsize;
  3230. }
  3231. ptr = u64_to_user_ptr(param->faultlist);
  3232. if (i == param->faultnents)
  3233. ret = kgsl_update_fault_count(context, ptr, param->faultnents,
  3234. param->faultsize);
  3235. else
  3236. ret = kgsl_update_fault_details(context, ptr, param->faultnents,
  3237. param->faultsize);
  3238. err:
  3239. kgsl_context_put(context);
  3240. return ret;
  3241. }
  3242. int kgsl_add_fault(struct kgsl_context *context, u32 type, void *priv)
  3243. {
  3244. struct kgsl_fault_node *fault, *p, *tmp;
  3245. int length = 0;
  3246. ktime_t tout;
  3247. if (kgsl_context_is_bad(context))
  3248. return -EINVAL;
  3249. fault = kmalloc(sizeof(struct kgsl_fault_node), GFP_KERNEL);
  3250. if (!fault)
  3251. return -ENOMEM;
  3252. fault->type = type;
  3253. fault->priv = priv;
  3254. fault->time = ktime_get();
  3255. tout = ktime_sub_ms(ktime_get(), KGSL_MAX_FAULT_TIME_THRESHOLD);
  3256. mutex_lock(&context->fault_lock);
  3257. list_for_each_entry_safe(p, tmp, &context->faults, node) {
  3258. if (ktime_compare(p->time, tout) > 0) {
  3259. length++;
  3260. continue;
  3261. }
  3262. list_del(&p->node);
  3263. kfree(p->priv);
  3264. kfree(p);
  3265. }
  3266. if (length == KGSL_MAX_FAULT_ENTRIES) {
  3267. tmp = list_first_entry(&context->faults, struct kgsl_fault_node, node);
  3268. list_del(&tmp->node);
  3269. kfree(tmp->priv);
  3270. kfree(tmp);
  3271. }
  3272. list_add_tail(&fault->node, &context->faults);
  3273. mutex_unlock(&context->fault_lock);
  3274. return 0;
  3275. }
  3276. #ifdef CONFIG_ARM64
  3277. static uint64_t kgsl_filter_cachemode(uint64_t flags)
  3278. {
  3279. /*
  3280. * WRITETHROUGH is not supported in arm64, so we tell the user that we
  3281. * use WRITEBACK which is the default caching policy.
  3282. */
  3283. if (FIELD_GET(KGSL_CACHEMODE_MASK, flags) == KGSL_CACHEMODE_WRITETHROUGH) {
  3284. flags &= ~((uint64_t) KGSL_CACHEMODE_MASK);
  3285. flags |= FIELD_PREP(KGSL_CACHEMODE_MASK, KGSL_CACHEMODE_WRITEBACK);
  3286. }
  3287. return flags;
  3288. }
  3289. #else
  3290. static uint64_t kgsl_filter_cachemode(uint64_t flags)
  3291. {
  3292. return flags;
  3293. }
  3294. #endif
  3295. /* The largest allowable alignment for a GPU object is 32MB */
  3296. #define KGSL_MAX_ALIGN (32 * SZ_1M)
  3297. static u64 cap_alignment(struct kgsl_device *device, u64 flags)
  3298. {
  3299. u32 align = FIELD_GET(KGSL_MEMALIGN_MASK, flags);
  3300. if (align >= ilog2(KGSL_MAX_ALIGN)) {
  3301. /* Cap the alignment bits to the highest number we can handle */
  3302. dev_err(device->dev,
  3303. "Alignment too large; restricting to %dK\n",
  3304. KGSL_MAX_ALIGN >> 10);
  3305. align = ilog2(KGSL_MAX_ALIGN);
  3306. }
  3307. flags &= ~((u64) KGSL_MEMALIGN_MASK);
  3308. return flags | FIELD_PREP(KGSL_MEMALIGN_MASK, align);
  3309. }
  3310. static u64 gpumem_max_va_size(struct kgsl_pagetable *pt, u64 flags)
  3311. {
  3312. if (flags & KGSL_MEMFLAGS_FORCE_32BIT)
  3313. return pt->compat_va_end - pt->compat_va_start;
  3314. return pt->va_end - pt->va_start;
  3315. }
  3316. static struct kgsl_mem_entry *
  3317. gpumem_alloc_vbo_entry(struct kgsl_device_private *dev_priv,
  3318. u64 size, u64 flags)
  3319. {
  3320. struct kgsl_process_private *private = dev_priv->process_priv;
  3321. struct kgsl_device *device = dev_priv->device;
  3322. struct kgsl_memdesc *memdesc;
  3323. struct kgsl_mem_entry *entry;
  3324. struct kgsl_pagetable *pt;
  3325. int ret;
  3326. /* Disallow specific flags */
  3327. if (flags & (KGSL_MEMFLAGS_GPUREADONLY | KGSL_CACHEMODE_MASK))
  3328. return ERR_PTR(-EINVAL);
  3329. if (flags & (KGSL_MEMFLAGS_USE_CPU_MAP | KGSL_MEMFLAGS_IOCOHERENT))
  3330. return ERR_PTR(-EINVAL);
  3331. /* Quietly ignore the other flags that aren't this list */
  3332. flags &= KGSL_MEMFLAGS_SECURE |
  3333. KGSL_MEMFLAGS_VBO |
  3334. KGSL_MEMFLAGS_VBO_NO_MAP_ZERO |
  3335. KGSL_MEMTYPE_MASK |
  3336. KGSL_MEMALIGN_MASK |
  3337. KGSL_MEMFLAGS_FORCE_32BIT;
  3338. if ((flags & KGSL_MEMFLAGS_SECURE) && !check_and_warn_secured(device))
  3339. return ERR_PTR(-EOPNOTSUPP);
  3340. pt = (flags & KGSL_MEMFLAGS_SECURE) ?
  3341. device->mmu.securepagetable : private->pagetable;
  3342. if (!size || (size > gpumem_max_va_size(pt, flags)))
  3343. return ERR_PTR(-EINVAL);
  3344. flags = cap_alignment(device, flags);
  3345. entry = kgsl_mem_entry_create();
  3346. if (!entry)
  3347. return ERR_PTR(-ENOMEM);
  3348. memdesc = &entry->memdesc;
  3349. ret = kgsl_sharedmem_allocate_vbo(device, memdesc, size, flags);
  3350. if (ret) {
  3351. kfree(entry);
  3352. return ERR_PTR(ret);
  3353. }
  3354. if (flags & KGSL_MEMFLAGS_SECURE)
  3355. entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
  3356. ret = kgsl_mem_entry_attach_to_process(device, private, entry);
  3357. if (ret)
  3358. goto out;
  3359. /* Map the zero page unless explicitly asked not to */
  3360. if (!(flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
  3361. ret = kgsl_mmu_map_zero_page_to_range(memdesc->pagetable,
  3362. memdesc, 0, memdesc->size);
  3363. if (!ret) {
  3364. trace_kgsl_mem_alloc(entry);
  3365. kgsl_mem_entry_commit_process(entry);
  3366. return entry;
  3367. }
  3368. out:
  3369. kgsl_sharedmem_free(memdesc);
  3370. kfree(entry);
  3371. return ERR_PTR(ret);
  3372. }
  3373. struct kgsl_mem_entry *gpumem_alloc_entry(
  3374. struct kgsl_device_private *dev_priv,
  3375. uint64_t size, uint64_t flags)
  3376. {
  3377. int ret;
  3378. struct kgsl_process_private *private = dev_priv->process_priv;
  3379. struct kgsl_mem_entry *entry;
  3380. struct kgsl_device *device = dev_priv->device;
  3381. u32 cachemode;
  3382. /* For 32-bit kernel world nothing to do with this flag */
  3383. if (BITS_PER_LONG == 32)
  3384. flags &= ~((uint64_t) KGSL_MEMFLAGS_FORCE_32BIT);
  3385. if (flags & KGSL_MEMFLAGS_VBO)
  3386. return gpumem_alloc_vbo_entry(dev_priv, size, flags);
  3387. flags &= KGSL_MEMFLAGS_GPUREADONLY
  3388. | KGSL_CACHEMODE_MASK
  3389. | KGSL_MEMTYPE_MASK
  3390. | KGSL_MEMALIGN_MASK
  3391. | KGSL_MEMFLAGS_USE_CPU_MAP
  3392. | KGSL_MEMFLAGS_SECURE
  3393. | KGSL_MEMFLAGS_FORCE_32BIT
  3394. | KGSL_MEMFLAGS_IOCOHERENT
  3395. | KGSL_MEMFLAGS_GUARD_PAGE;
  3396. /* Return not supported error if secure memory isn't enabled */
  3397. if ((flags & KGSL_MEMFLAGS_SECURE) && !check_and_warn_secured(device))
  3398. return ERR_PTR(-EOPNOTSUPP);
  3399. flags = cap_alignment(device, flags);
  3400. /* For now only allow allocations up to 4G */
  3401. if (size == 0 || size > UINT_MAX)
  3402. return ERR_PTR(-EINVAL);
  3403. flags = kgsl_filter_cachemode(flags);
  3404. entry = kgsl_mem_entry_create();
  3405. if (entry == NULL)
  3406. return ERR_PTR(-ENOMEM);
  3407. if (IS_ENABLED(CONFIG_QCOM_KGSL_IOCOHERENCY_DEFAULT) &&
  3408. kgsl_cachemode_is_cached(flags))
  3409. flags |= KGSL_MEMFLAGS_IOCOHERENT;
  3410. ret = kgsl_allocate_user(device, &entry->memdesc,
  3411. size, flags, 0);
  3412. if (ret != 0)
  3413. goto err;
  3414. ret = kgsl_mem_entry_attach_and_map(device, private, entry);
  3415. if (ret != 0) {
  3416. kgsl_sharedmem_free(&entry->memdesc);
  3417. goto err;
  3418. }
  3419. cachemode = kgsl_memdesc_get_cachemode(&entry->memdesc);
  3420. /*
  3421. * Secure buffers cannot be reclaimed. For IO-COHERENT devices cached
  3422. * buffers can safely reclaimed. But avoid reclaim cached buffers of
  3423. * non IO-COHERENT devices as we could get request for cache operations
  3424. * on these buffers when they are reclaimed.
  3425. */
  3426. if (!(flags & KGSL_MEMFLAGS_SECURE) &&
  3427. (((flags & KGSL_MEMFLAGS_IOCOHERENT) &&
  3428. !(cachemode == KGSL_CACHEMODE_WRITETHROUGH)) ||
  3429. (!(flags & KGSL_MEMFLAGS_IOCOHERENT) &&
  3430. !(cachemode == KGSL_CACHEMODE_WRITEBACK) &&
  3431. !(cachemode == KGSL_CACHEMODE_WRITETHROUGH))))
  3432. entry->memdesc.priv |= KGSL_MEMDESC_CAN_RECLAIM;
  3433. kgsl_process_add_stats(private,
  3434. kgsl_memdesc_usermem_type(&entry->memdesc),
  3435. entry->memdesc.size);
  3436. trace_kgsl_mem_alloc(entry);
  3437. kgsl_mem_entry_commit_process(entry);
  3438. return entry;
  3439. err:
  3440. kfree(entry);
  3441. return ERR_PTR(ret);
  3442. }
  3443. static void copy_metadata(struct kgsl_mem_entry *entry, uint64_t metadata,
  3444. unsigned int len)
  3445. {
  3446. unsigned int i, size;
  3447. if (len == 0)
  3448. return;
  3449. size = min_t(unsigned int, len, sizeof(entry->metadata) - 1);
  3450. if (copy_from_user(entry->metadata, u64_to_user_ptr(metadata), size)) {
  3451. memset(entry->metadata, 0, sizeof(entry->metadata));
  3452. return;
  3453. }
  3454. /* Clean up non printable characters in the string */
  3455. for (i = 0; i < size && entry->metadata[i] != 0; i++) {
  3456. if (!isprint(entry->metadata[i]))
  3457. entry->metadata[i] = '?';
  3458. }
  3459. }
  3460. long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
  3461. unsigned int cmd, void *data)
  3462. {
  3463. struct kgsl_gpuobj_alloc *param = data;
  3464. struct kgsl_mem_entry *entry;
  3465. entry = gpumem_alloc_entry(dev_priv, param->size, param->flags);
  3466. if (IS_ERR(entry))
  3467. return PTR_ERR(entry);
  3468. copy_metadata(entry, param->metadata, param->metadata_len);
  3469. param->size = entry->memdesc.size;
  3470. param->flags = entry->memdesc.flags;
  3471. param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc);
  3472. param->id = entry->id;
  3473. /* Put the extra ref from kgsl_mem_entry_create() */
  3474. kgsl_mem_entry_put(entry);
  3475. return 0;
  3476. }
  3477. long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
  3478. unsigned int cmd, void *data)
  3479. {
  3480. struct kgsl_gpumem_alloc *param = data;
  3481. struct kgsl_mem_entry *entry;
  3482. uint64_t flags = param->flags;
  3483. /* Legacy functions doesn't support these advanced features */
  3484. flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
  3485. if (is_compat_task())
  3486. flags |= KGSL_MEMFLAGS_FORCE_32BIT;
  3487. entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);
  3488. if (IS_ERR(entry))
  3489. return PTR_ERR(entry);
  3490. param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
  3491. param->size = (size_t) entry->memdesc.size;
  3492. param->flags = (unsigned int) entry->memdesc.flags;
  3493. /* Put the extra ref from kgsl_mem_entry_create() */
  3494. kgsl_mem_entry_put(entry);
  3495. return 0;
  3496. }
  3497. long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
  3498. unsigned int cmd, void *data)
  3499. {
  3500. struct kgsl_gpumem_alloc_id *param = data;
  3501. struct kgsl_mem_entry *entry;
  3502. uint64_t flags = param->flags;
  3503. if (is_compat_task())
  3504. flags |= KGSL_MEMFLAGS_FORCE_32BIT;
  3505. entry = gpumem_alloc_entry(dev_priv, (uint64_t) param->size, flags);
  3506. if (IS_ERR(entry))
  3507. return PTR_ERR(entry);
  3508. param->id = entry->id;
  3509. param->flags = (unsigned int) entry->memdesc.flags;
  3510. param->size = (size_t) entry->memdesc.size;
  3511. param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
  3512. param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
  3513. /* Put the extra ref from kgsl_mem_entry_create() */
  3514. kgsl_mem_entry_put(entry);
  3515. return 0;
  3516. }
  3517. long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
  3518. unsigned int cmd, void *data)
  3519. {
  3520. struct kgsl_process_private *private = dev_priv->process_priv;
  3521. struct kgsl_gpumem_get_info *param = data;
  3522. struct kgsl_mem_entry *entry = NULL;
  3523. int result = 0;
  3524. if (param->id != 0)
  3525. entry = kgsl_sharedmem_find_id(private, param->id);
  3526. else if (param->gpuaddr != 0)
  3527. entry = kgsl_sharedmem_find(private, (uint64_t) param->gpuaddr);
  3528. if (entry == NULL)
  3529. return -EINVAL;
  3530. /*
  3531. * If any of the 64 bit address / sizes would end up being
  3532. * truncated, return -ERANGE. That will signal the user that they
  3533. * should use a more modern API
  3534. */
  3535. if (entry->memdesc.gpuaddr > ULONG_MAX)
  3536. result = -ERANGE;
  3537. param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
  3538. param->id = entry->id;
  3539. param->flags = (unsigned int) entry->memdesc.flags;
  3540. param->size = (size_t) entry->memdesc.size;
  3541. param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
  3542. /*
  3543. * Entries can have multiple user mappings so thre isn't any one address
  3544. * we can report. Plus, the user should already know their mappings, so
  3545. * there isn't any value in reporting it back to them.
  3546. */
  3547. param->useraddr = 0;
  3548. kgsl_mem_entry_put(entry);
  3549. return result;
  3550. }
  3551. long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
  3552. unsigned int cmd, void *data)
  3553. {
  3554. struct kgsl_process_private *private = dev_priv->process_priv;
  3555. struct kgsl_gpuobj_info *param = data;
  3556. struct kgsl_mem_entry *entry;
  3557. if (param->id == 0)
  3558. return -EINVAL;
  3559. entry = kgsl_sharedmem_find_id(private, param->id);
  3560. if (entry == NULL)
  3561. return -EINVAL;
  3562. param->id = entry->id;
  3563. param->gpuaddr = entry->memdesc.gpuaddr;
  3564. param->flags = entry->memdesc.flags;
  3565. param->size = entry->memdesc.size;
  3566. /* VBOs cannot be mapped, so don't report a va_len */
  3567. if (entry->memdesc.flags & KGSL_MEMFLAGS_VBO)
  3568. param->va_len = 0;
  3569. else
  3570. param->va_len = kgsl_memdesc_footprint(&entry->memdesc);
  3571. /*
  3572. * Entries can have multiple user mappings so thre isn't any one address
  3573. * we can report. Plus, the user should already know their mappings, so
  3574. * there isn't any value in reporting it back to them.
  3575. */
  3576. param->va_addr = 0;
  3577. kgsl_mem_entry_put(entry);
  3578. return 0;
  3579. }
  3580. long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
  3581. unsigned int cmd, void *data)
  3582. {
  3583. struct kgsl_process_private *private = dev_priv->process_priv;
  3584. struct kgsl_gpuobj_set_info *param = data;
  3585. struct kgsl_mem_entry *entry;
  3586. int ret = 0;
  3587. if (param->id == 0)
  3588. return -EINVAL;
  3589. entry = kgsl_sharedmem_find_id(private, param->id);
  3590. if (entry == NULL)
  3591. return -EINVAL;
  3592. if (param->flags & KGSL_GPUOBJ_SET_INFO_METADATA)
  3593. copy_metadata(entry, param->metadata, param->metadata_len);
  3594. if (param->flags & KGSL_GPUOBJ_SET_INFO_TYPE) {
  3595. if (FIELD_FIT(KGSL_MEMTYPE_MASK, param->type)) {
  3596. entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
  3597. entry->memdesc.flags |=
  3598. FIELD_PREP(KGSL_MEMTYPE_MASK, param->type);
  3599. } else
  3600. ret = -EINVAL;
  3601. }
  3602. kgsl_mem_entry_put(entry);
  3603. return ret;
  3604. }
  3605. /**
  3606. * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
  3607. * @dev_priv - pointer to the private device structure
  3608. * @cmd - the ioctl cmd passed from kgsl_ioctl
  3609. * @data - the user data buffer from kgsl_ioctl
  3610. * @returns 0 on success or error code on failure
  3611. */
  3612. long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
  3613. unsigned int cmd, void *data)
  3614. {
  3615. struct kgsl_timestamp_event *param = data;
  3616. int ret;
  3617. switch (param->type) {
  3618. case KGSL_TIMESTAMP_EVENT_FENCE:
  3619. ret = kgsl_add_fence_event(dev_priv->device,
  3620. param->context_id, param->timestamp, param->priv,
  3621. param->len, dev_priv);
  3622. break;
  3623. default:
  3624. ret = -EINVAL;
  3625. }
  3626. return ret;
  3627. }
  3628. static vm_fault_t
  3629. kgsl_memstore_vm_fault(struct vm_fault *vmf)
  3630. {
  3631. struct kgsl_memdesc *memdesc = vmf->vma->vm_private_data;
  3632. return memdesc->ops->vmfault(memdesc, vmf->vma, vmf);
  3633. }
  3634. static const struct vm_operations_struct kgsl_memstore_vm_ops = {
  3635. .fault = kgsl_memstore_vm_fault,
  3636. };
  3637. static inline void kgsl_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
  3638. {
  3639. #if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE)
  3640. vm_flags_clear(vma, flags);
  3641. #else
  3642. vma->vm_flags &= ~flags;
  3643. #endif
  3644. }
  3645. static inline void kgsl_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
  3646. {
  3647. #if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE)
  3648. vm_flags_set(vma, flags);
  3649. #else
  3650. vma->vm_flags |= flags;
  3651. #endif
  3652. }
  3653. static int
  3654. kgsl_mmap_memstore(struct file *file, struct kgsl_device *device,
  3655. struct vm_area_struct *vma)
  3656. {
  3657. struct kgsl_memdesc *memdesc = device->memstore;
  3658. unsigned int vma_size = vma->vm_end - vma->vm_start;
  3659. /* The memstore can only be mapped as read only */
  3660. if (vma->vm_flags & VM_WRITE)
  3661. return -EPERM;
  3662. kgsl_vm_flags_clear(vma, VM_MAYWRITE);
  3663. if (memdesc->size != vma_size) {
  3664. dev_err(device->dev, "Cannot partially map the memstore\n");
  3665. return -EINVAL;
  3666. }
  3667. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  3668. vma->vm_private_data = memdesc;
  3669. kgsl_vm_flags_set(vma, memdesc->ops->vmflags);
  3670. vma->vm_ops = &kgsl_memstore_vm_ops;
  3671. vma->vm_file = file;
  3672. return 0;
  3673. }
  3674. /*
  3675. * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
  3676. * Increase the refcount to make sure that the accounting stays correct
  3677. */
  3678. static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
  3679. {
  3680. struct kgsl_mem_entry *entry = vma->vm_private_data;
  3681. if (!kgsl_mem_entry_get(entry))
  3682. vma->vm_private_data = NULL;
  3683. atomic_inc(&entry->map_count);
  3684. }
  3685. static vm_fault_t
  3686. kgsl_gpumem_vm_fault(struct vm_fault *vmf)
  3687. {
  3688. struct kgsl_mem_entry *entry = vmf->vma->vm_private_data;
  3689. if (!entry)
  3690. return VM_FAULT_SIGBUS;
  3691. if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
  3692. return VM_FAULT_SIGBUS;
  3693. return entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf);
  3694. }
  3695. static void
  3696. kgsl_gpumem_vm_close(struct vm_area_struct *vma)
  3697. {
  3698. struct kgsl_mem_entry *entry = vma->vm_private_data;
  3699. if (!entry)
  3700. return;
  3701. /*
  3702. * Remove the memdesc from the mapped stat once all the mappings have
  3703. * gone away
  3704. */
  3705. if (!atomic_dec_return(&entry->map_count))
  3706. atomic64_sub(entry->memdesc.size, &entry->priv->gpumem_mapped);
  3707. kgsl_mem_entry_put(entry);
  3708. }
  3709. static const struct vm_operations_struct kgsl_gpumem_vm_ops = {
  3710. .open = kgsl_gpumem_vm_open,
  3711. .fault = kgsl_gpumem_vm_fault,
  3712. .close = kgsl_gpumem_vm_close,
  3713. };
  3714. static int
  3715. get_mmap_entry(struct kgsl_process_private *private,
  3716. struct kgsl_mem_entry **out_entry, unsigned long pgoff,
  3717. unsigned long len)
  3718. {
  3719. int ret = 0;
  3720. struct kgsl_mem_entry *entry;
  3721. entry = kgsl_sharedmem_find_id(private, pgoff);
  3722. if (entry == NULL)
  3723. entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
  3724. if (!entry)
  3725. return -EINVAL;
  3726. if (!entry->memdesc.ops ||
  3727. !entry->memdesc.ops->vmflags ||
  3728. !entry->memdesc.ops->vmfault) {
  3729. ret = -EINVAL;
  3730. goto err_put;
  3731. }
  3732. /* Don't allow ourselves to remap user memory */
  3733. if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_ADDR) {
  3734. ret = -EBUSY;
  3735. goto err_put;
  3736. }
  3737. if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
  3738. if (len != kgsl_memdesc_footprint(&entry->memdesc)) {
  3739. ret = -ERANGE;
  3740. goto err_put;
  3741. }
  3742. } else if (len != kgsl_memdesc_footprint(&entry->memdesc) &&
  3743. len != entry->memdesc.size) {
  3744. /*
  3745. * If cpu_map != gpumap then user can map either the
  3746. * footprint or the entry size
  3747. */
  3748. ret = -ERANGE;
  3749. goto err_put;
  3750. }
  3751. *out_entry = entry;
  3752. return 0;
  3753. err_put:
  3754. kgsl_mem_entry_put(entry);
  3755. return ret;
  3756. }
  3757. static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private,
  3758. struct kgsl_mem_entry *entry, unsigned long addr,
  3759. unsigned long size)
  3760. {
  3761. int ret;
  3762. /*
  3763. * Protect access to the gpuaddr here to prevent multiple vmas from
  3764. * trying to map a SVM region at the same time
  3765. */
  3766. spin_lock(&entry->memdesc.lock);
  3767. if (entry->memdesc.gpuaddr) {
  3768. spin_unlock(&entry->memdesc.lock);
  3769. return (unsigned long) -EBUSY;
  3770. }
  3771. ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr,
  3772. (uint64_t) size);
  3773. if (ret != 0) {
  3774. spin_unlock(&entry->memdesc.lock);
  3775. return (unsigned long) ret;
  3776. }
  3777. entry->memdesc.gpuaddr = (uint64_t) addr;
  3778. spin_unlock(&entry->memdesc.lock);
  3779. entry->memdesc.pagetable = private->pagetable;
  3780. ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
  3781. if (ret) {
  3782. kgsl_mmu_put_gpuaddr(private->pagetable, &entry->memdesc);
  3783. return (unsigned long) ret;
  3784. }
  3785. kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr,
  3786. entry->memdesc.size);
  3787. return addr;
  3788. }
  3789. static unsigned long get_align(struct kgsl_mem_entry *entry)
  3790. {
  3791. int bit = kgsl_memdesc_get_align(&entry->memdesc);
  3792. if (bit >= ilog2(SZ_2M))
  3793. return SZ_2M;
  3794. else if (bit >= ilog2(SZ_1M))
  3795. return SZ_1M;
  3796. else if (bit >= ilog2(SZ_64K))
  3797. return SZ_64K;
  3798. return SZ_4K;
  3799. }
  3800. static unsigned long set_svm_area(struct file *file,
  3801. struct kgsl_mem_entry *entry,
  3802. unsigned long addr, unsigned long len,
  3803. unsigned long flags)
  3804. {
  3805. struct kgsl_device_private *dev_priv = file->private_data;
  3806. struct kgsl_process_private *private = dev_priv->process_priv;
  3807. unsigned long ret;
  3808. /*
  3809. * Do additoinal constraints checking on the address. Passing MAP_FIXED
  3810. * ensures that the address we want gets checked
  3811. */
  3812. ret = current->mm->get_unmapped_area(file, addr, len, 0,
  3813. flags & MAP_FIXED);
  3814. /* If it passes, attempt to set the region in the SVM */
  3815. if (!IS_ERR_VALUE(ret))
  3816. return _gpu_set_svm_region(private, entry, addr, len);
  3817. return ret;
  3818. }
  3819. static unsigned long get_svm_unmapped_area(struct file *file,
  3820. struct kgsl_mem_entry *entry,
  3821. unsigned long addr, unsigned long len,
  3822. unsigned long flags)
  3823. {
  3824. struct kgsl_device_private *dev_priv = file->private_data;
  3825. struct kgsl_process_private *private = dev_priv->process_priv;
  3826. unsigned long align = get_align(entry);
  3827. unsigned long ret, iova;
  3828. u64 start = 0, end = 0;
  3829. struct vm_area_struct *vma;
  3830. if (flags & MAP_FIXED) {
  3831. /* Even fixed addresses need to obey alignment */
  3832. if (!IS_ALIGNED(addr, align))
  3833. return -EINVAL;
  3834. return set_svm_area(file, entry, addr, len, flags);
  3835. }
  3836. /* If a hint was provided, try to use that first */
  3837. if (addr) {
  3838. if (IS_ALIGNED(addr, align)) {
  3839. ret = set_svm_area(file, entry, addr, len, flags);
  3840. if (!IS_ERR_VALUE(ret))
  3841. return ret;
  3842. }
  3843. }
  3844. /* Get the SVM range for the current process */
  3845. if (kgsl_mmu_svm_range(private->pagetable, &start, &end,
  3846. entry->memdesc.flags))
  3847. return -ERANGE;
  3848. /* Find the first gap in the iova map */
  3849. iova = kgsl_mmu_find_svm_region(private->pagetable, start, end,
  3850. len, align);
  3851. while (!IS_ERR_VALUE(iova)) {
  3852. vma = find_vma_intersection(current->mm, iova, iova + len - 1);
  3853. if (vma) {
  3854. iova = vma->vm_start;
  3855. } else {
  3856. ret = set_svm_area(file, entry, iova, len, flags);
  3857. if (!IS_ERR_VALUE(ret))
  3858. return ret;
  3859. /*
  3860. * set_svm_area will return -EBUSY if we tried to set up
  3861. * SVM on an object that already has a GPU address. If
  3862. * that happens don't bother walking the rest of the
  3863. * region
  3864. */
  3865. if ((long) ret == -EBUSY)
  3866. return -EBUSY;
  3867. }
  3868. iova = kgsl_mmu_find_svm_region(private->pagetable,
  3869. start, iova - 1, len, align);
  3870. }
  3871. return -ENOMEM;
  3872. }
  3873. static unsigned long
  3874. kgsl_get_unmapped_area(struct file *file, unsigned long addr,
  3875. unsigned long len, unsigned long pgoff,
  3876. unsigned long flags)
  3877. {
  3878. unsigned long val;
  3879. unsigned long vma_offset = pgoff << PAGE_SHIFT;
  3880. struct kgsl_device_private *dev_priv = file->private_data;
  3881. struct kgsl_process_private *private = dev_priv->process_priv;
  3882. struct kgsl_device *device = dev_priv->device;
  3883. struct kgsl_mem_entry *entry = NULL;
  3884. if (vma_offset == (unsigned long) KGSL_MEMSTORE_TOKEN_ADDRESS)
  3885. return get_unmapped_area(NULL, addr, len, pgoff, flags);
  3886. val = get_mmap_entry(private, &entry, pgoff, len);
  3887. if (val)
  3888. return val;
  3889. /* Do not allow CPU mappings for secure buffers */
  3890. if (kgsl_memdesc_is_secured(&entry->memdesc)) {
  3891. kgsl_mem_entry_put(entry);
  3892. return (unsigned long) -EPERM;
  3893. }
  3894. if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
  3895. val = current->mm->get_unmapped_area(file, addr, len, 0, flags);
  3896. if (IS_ERR_VALUE(val))
  3897. dev_err_ratelimited(device->dev,
  3898. "get_unmapped_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
  3899. pid_nr(private->pid), addr, pgoff, len,
  3900. (int) val);
  3901. } else {
  3902. val = get_svm_unmapped_area(file, entry, addr, len, flags);
  3903. /* if OOM, retry once after flushing lockless_workqueue */
  3904. if (val == -ENOMEM) {
  3905. flush_workqueue(kgsl_driver.lockless_workqueue);
  3906. val = get_svm_unmapped_area(file, entry, addr, len, flags);
  3907. }
  3908. if (IS_ERR_VALUE(val))
  3909. dev_err_ratelimited(device->dev,
  3910. "_get_svm_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
  3911. pid_nr(private->pid), addr, pgoff, len,
  3912. (int) val);
  3913. }
  3914. kgsl_mem_entry_put(entry);
  3915. return val;
  3916. }
  3917. static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
  3918. {
  3919. unsigned int cache;
  3920. unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
  3921. struct kgsl_device_private *dev_priv = file->private_data;
  3922. struct kgsl_process_private *private = dev_priv->process_priv;
  3923. struct kgsl_mem_entry *entry = NULL;
  3924. struct kgsl_device *device = dev_priv->device;
  3925. uint64_t flags;
  3926. int ret;
  3927. /* Handle leagacy behavior for memstore */
  3928. if (vma_offset == (unsigned long) KGSL_MEMSTORE_TOKEN_ADDRESS)
  3929. return kgsl_mmap_memstore(file, device, vma);
  3930. /*
  3931. * The reference count on the entry that we get from
  3932. * get_mmap_entry() will be held until kgsl_gpumem_vm_close().
  3933. */
  3934. ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
  3935. vma->vm_end - vma->vm_start);
  3936. if (ret)
  3937. return ret;
  3938. kgsl_vm_flags_set(vma, entry->memdesc.ops->vmflags);
  3939. vma->vm_private_data = entry;
  3940. /* Determine user-side caching policy */
  3941. cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
  3942. switch (cache) {
  3943. case KGSL_CACHEMODE_WRITETHROUGH:
  3944. vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
  3945. if (pgprot_val(vma->vm_page_prot) ==
  3946. pgprot_val(pgprot_writebackcache(vma->vm_page_prot)))
  3947. WARN_ONCE(1, "WRITETHROUGH is deprecated for arm64");
  3948. break;
  3949. case KGSL_CACHEMODE_WRITEBACK:
  3950. vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
  3951. break;
  3952. case KGSL_CACHEMODE_UNCACHED:
  3953. case KGSL_CACHEMODE_WRITECOMBINE:
  3954. default:
  3955. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  3956. break;
  3957. }
  3958. vma->vm_ops = &kgsl_gpumem_vm_ops;
  3959. flags = entry->memdesc.flags;
  3960. if (!(flags & KGSL_MEMFLAGS_IOCOHERENT) &&
  3961. (cache == KGSL_CACHEMODE_WRITEBACK ||
  3962. cache == KGSL_CACHEMODE_WRITETHROUGH)) {
  3963. int i;
  3964. unsigned long addr = vma->vm_start;
  3965. struct kgsl_memdesc *m = &entry->memdesc;
  3966. for (i = 0; i < m->page_count; i++) {
  3967. struct page *page = m->pages[i];
  3968. vm_insert_page(vma, addr, page);
  3969. addr += PAGE_SIZE;
  3970. }
  3971. }
  3972. if (entry->memdesc.shmem_filp) {
  3973. fput(vma->vm_file);
  3974. vma->vm_file = get_file(entry->memdesc.shmem_filp);
  3975. }
  3976. /*
  3977. * kgsl gets the entry id or the gpu address through vm_pgoff.
  3978. * It is used during mmap and never needed again. But this vm_pgoff
  3979. * has different meaning at other parts of kernel. Not setting to
  3980. * zero will let way for wrong assumption when tried to unmap a page
  3981. * from this vma.
  3982. */
  3983. vma->vm_pgoff = 0;
  3984. if (atomic_inc_return(&entry->map_count) == 1)
  3985. atomic64_add(entry->memdesc.size, &entry->priv->gpumem_mapped);
  3986. trace_kgsl_mem_mmap(entry, vma->vm_start);
  3987. return 0;
  3988. }
  3989. #define KGSL_READ_MESSAGE "OH HAI GPU\n"
  3990. static ssize_t kgsl_read(struct file *filep, char __user *buf, size_t count,
  3991. loff_t *pos)
  3992. {
  3993. return simple_read_from_buffer(buf, count, pos,
  3994. KGSL_READ_MESSAGE, strlen(KGSL_READ_MESSAGE) + 1);
  3995. }
  3996. static const struct file_operations kgsl_fops = {
  3997. .owner = THIS_MODULE,
  3998. .release = kgsl_release,
  3999. .open = kgsl_open,
  4000. .mmap = kgsl_mmap,
  4001. .read = kgsl_read,
  4002. .get_unmapped_area = kgsl_get_unmapped_area,
  4003. .unlocked_ioctl = kgsl_ioctl,
  4004. .compat_ioctl = kgsl_compat_ioctl,
  4005. };
  4006. struct kgsl_driver kgsl_driver = {
  4007. .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
  4008. .proclist_lock = __RW_LOCK_UNLOCKED(kgsl_driver.proclist_lock),
  4009. .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
  4010. .wp_list_lock = __SPIN_LOCK_UNLOCKED(kgsl_driver.wp_list_lock),
  4011. .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
  4012. /*
  4013. * Full cache flushes are faster than line by line on at least
  4014. * 8064 and 8974 once the region to be flushed is > 16mb.
  4015. */
  4016. .full_cache_threshold = SZ_16M,
  4017. .stats.vmalloc = ATOMIC_LONG_INIT(0),
  4018. .stats.vmalloc_max = ATOMIC_LONG_INIT(0),
  4019. .stats.page_alloc = ATOMIC_LONG_INIT(0),
  4020. .stats.page_alloc_max = ATOMIC_LONG_INIT(0),
  4021. .stats.coherent = ATOMIC_LONG_INIT(0),
  4022. .stats.coherent_max = ATOMIC_LONG_INIT(0),
  4023. .stats.secure = ATOMIC_LONG_INIT(0),
  4024. .stats.secure_max = ATOMIC_LONG_INIT(0),
  4025. .stats.mapped = ATOMIC_LONG_INIT(0),
  4026. .stats.mapped_max = ATOMIC_LONG_INIT(0),
  4027. };
  4028. static void _unregister_device(struct kgsl_device *device)
  4029. {
  4030. int minor;
  4031. if (device->gpu_sysfs_kobj.state_initialized)
  4032. kobject_put(&device->gpu_sysfs_kobj);
  4033. mutex_lock(&kgsl_driver.devlock);
  4034. for (minor = 0; minor < ARRAY_SIZE(kgsl_driver.devp); minor++) {
  4035. if (device == kgsl_driver.devp[minor]) {
  4036. device_destroy(kgsl_driver.class,
  4037. MKDEV(MAJOR(kgsl_driver.major), minor));
  4038. kgsl_driver.devp[minor] = NULL;
  4039. break;
  4040. }
  4041. }
  4042. mutex_unlock(&kgsl_driver.devlock);
  4043. }
  4044. /* sysfs_ops for the /sys/kernel/gpu kobject */
  4045. static ssize_t kgsl_gpu_sysfs_attr_show(struct kobject *kobj,
  4046. struct attribute *__attr, char *buf)
  4047. {
  4048. struct kgsl_gpu_sysfs_attr *attr = container_of(__attr,
  4049. struct kgsl_gpu_sysfs_attr, attr);
  4050. struct kgsl_device *device = container_of(kobj,
  4051. struct kgsl_device, gpu_sysfs_kobj);
  4052. if (attr->show)
  4053. return attr->show(device, buf);
  4054. return -EIO;
  4055. }
  4056. static ssize_t kgsl_gpu_sysfs_attr_store(struct kobject *kobj,
  4057. struct attribute *__attr, const char *buf, size_t count)
  4058. {
  4059. struct kgsl_gpu_sysfs_attr *attr = container_of(__attr,
  4060. struct kgsl_gpu_sysfs_attr, attr);
  4061. struct kgsl_device *device = container_of(kobj,
  4062. struct kgsl_device, gpu_sysfs_kobj);
  4063. if (attr->store)
  4064. return attr->store(device, buf, count);
  4065. return -EIO;
  4066. }
  4067. /* Dummy release function - we have nothing to do here */
  4068. static void kgsl_gpu_sysfs_release(struct kobject *kobj)
  4069. {
  4070. }
  4071. static const struct sysfs_ops kgsl_gpu_sysfs_ops = {
  4072. .show = kgsl_gpu_sysfs_attr_show,
  4073. .store = kgsl_gpu_sysfs_attr_store,
  4074. };
  4075. static struct kobj_type kgsl_gpu_sysfs_ktype = {
  4076. .sysfs_ops = &kgsl_gpu_sysfs_ops,
  4077. .release = kgsl_gpu_sysfs_release,
  4078. };
  4079. static int _register_device(struct kgsl_device *device)
  4080. {
  4081. static u64 dma_mask = DMA_BIT_MASK(64);
  4082. static struct device_dma_parameters dma_parms;
  4083. int minor, ret;
  4084. dev_t dev;
  4085. /* Find a minor for the device */
  4086. mutex_lock(&kgsl_driver.devlock);
  4087. for (minor = 0; minor < ARRAY_SIZE(kgsl_driver.devp); minor++) {
  4088. if (kgsl_driver.devp[minor] == NULL) {
  4089. kgsl_driver.devp[minor] = device;
  4090. break;
  4091. }
  4092. }
  4093. mutex_unlock(&kgsl_driver.devlock);
  4094. if (minor == ARRAY_SIZE(kgsl_driver.devp)) {
  4095. pr_err("kgsl: minor devices exhausted\n");
  4096. return -ENODEV;
  4097. }
  4098. /* Create the device */
  4099. dev = MKDEV(MAJOR(kgsl_driver.major), minor);
  4100. device->dev = device_create(kgsl_driver.class,
  4101. &device->pdev->dev,
  4102. dev, device,
  4103. device->name);
  4104. if (IS_ERR(device->dev)) {
  4105. mutex_lock(&kgsl_driver.devlock);
  4106. kgsl_driver.devp[minor] = NULL;
  4107. mutex_unlock(&kgsl_driver.devlock);
  4108. ret = PTR_ERR(device->dev);
  4109. pr_err("kgsl: device_create(%s): %d\n", device->name, ret);
  4110. return ret;
  4111. }
  4112. device->dev->dma_mask = &dma_mask;
  4113. device->dev->dma_parms = &dma_parms;
  4114. dma_set_max_seg_size(device->dev, DMA_BIT_MASK(32));
  4115. set_dma_ops(device->dev, NULL);
  4116. WARN_ON(kobject_init_and_add(&device->gpu_sysfs_kobj, &kgsl_gpu_sysfs_ktype,
  4117. kernel_kobj, "gpu"));
  4118. return 0;
  4119. }
  4120. int kgsl_request_irq(struct platform_device *pdev, const char *name,
  4121. irq_handler_t handler, void *data)
  4122. {
  4123. int ret, num = platform_get_irq_byname(pdev, name);
  4124. if (num < 0)
  4125. return num;
  4126. ret = devm_request_irq(&pdev->dev, num, handler, IRQF_TRIGGER_HIGH,
  4127. name, data);
  4128. if (ret) {
  4129. dev_err(&pdev->dev, "Unable to get interrupt %s: %d\n",
  4130. name, ret);
  4131. return ret;
  4132. }
  4133. disable_irq(num);
  4134. return num;
  4135. }
  4136. int kgsl_request_irq_optional(struct platform_device *pdev, const char *name,
  4137. irq_handler_t handler, void *data)
  4138. {
  4139. int ret, num = platform_get_irq_byname_optional(pdev, name);
  4140. if (num < 0)
  4141. return num;
  4142. ret = devm_request_irq(&pdev->dev, num, handler, IRQF_TRIGGER_HIGH,
  4143. name, data);
  4144. if (ret) {
  4145. dev_err(&pdev->dev, "Unable to get interrupt %s: %d\n",
  4146. name, ret);
  4147. return ret;
  4148. }
  4149. disable_irq(num);
  4150. return num;
  4151. }
  4152. int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
  4153. u32 *ptr)
  4154. {
  4155. char str[32];
  4156. int ddr = of_fdt_get_ddrtype();
  4157. /* of_fdt_get_ddrtype returns error if the DDR type isn't determined */
  4158. if (ddr >= 0) {
  4159. int ret;
  4160. /* Construct expanded string for the DDR type */
  4161. ret = snprintf(str, sizeof(str), "%s-ddr%d", base, ddr);
  4162. /* WARN_ON() if the array size was too small for the string */
  4163. if (WARN_ON(ret > sizeof(str)))
  4164. return -ENOMEM;
  4165. /* Read the expanded string */
  4166. if (!of_property_read_u32(node, str, ptr))
  4167. return 0;
  4168. }
  4169. /* Read the default string */
  4170. return of_property_read_u32(node, base, ptr);
  4171. }
  4172. int kgsl_device_platform_probe(struct kgsl_device *device)
  4173. {
  4174. struct platform_device *pdev = device->pdev;
  4175. int status = -EINVAL;
  4176. status = _register_device(device);
  4177. if (status)
  4178. return status;
  4179. /* Can return -EPROBE_DEFER */
  4180. status = kgsl_pwrctrl_init(device);
  4181. if (status)
  4182. goto error;
  4183. device->events_worker = kthread_create_worker(0, "kgsl-events");
  4184. if (IS_ERR(device->events_worker)) {
  4185. status = PTR_ERR(device->events_worker);
  4186. dev_err(device->dev, "Failed to create events worker ret=%d\n", status);
  4187. goto error_pwrctrl_close;
  4188. }
  4189. sched_set_fifo(device->events_worker->task);
  4190. status = kgsl_reclaim_init();
  4191. if (status)
  4192. goto error_pwrctrl_close;
  4193. rwlock_init(&device->context_lock);
  4194. spin_lock_init(&device->submit_lock);
  4195. idr_init(&device->timelines);
  4196. spin_lock_init(&device->timelines_lock);
  4197. kgsl_device_debugfs_init(device);
  4198. dma_set_coherent_mask(&pdev->dev, KGSL_DMA_BIT_MASK);
  4199. /* Set up the GPU events for the device */
  4200. kgsl_device_events_probe(device);
  4201. /* Initialize common sysfs entries */
  4202. kgsl_pwrctrl_init_sysfs(device);
  4203. timer_setup(&device->work_period_timer, kgsl_work_period_timer, 0);
  4204. spin_lock_init(&device->work_period_lock);
  4205. INIT_WORK(&device->work_period_ws, _log_gpu_work_events);
  4206. return 0;
  4207. error_pwrctrl_close:
  4208. if (!IS_ERR(device->events_worker))
  4209. kthread_destroy_worker(device->events_worker);
  4210. kgsl_pwrctrl_close(device);
  4211. error:
  4212. _unregister_device(device);
  4213. return status;
  4214. }
  4215. void kgsl_device_platform_remove(struct kgsl_device *device)
  4216. {
  4217. del_timer(&device->work_period_timer);
  4218. kthread_destroy_worker(device->events_worker);
  4219. kgsl_device_snapshot_close(device);
  4220. idr_destroy(&device->context_idr);
  4221. idr_destroy(&device->timelines);
  4222. kgsl_device_events_remove(device);
  4223. kgsl_free_globals(device);
  4224. kgsl_pwrctrl_close(device);
  4225. kgsl_device_debugfs_close(device);
  4226. _unregister_device(device);
  4227. }
  4228. void kgsl_core_exit(void)
  4229. {
  4230. kgsl_exit_page_pools();
  4231. kgsl_eventlog_exit();
  4232. if (kgsl_driver.workqueue) {
  4233. destroy_workqueue(kgsl_driver.workqueue);
  4234. kgsl_driver.workqueue = NULL;
  4235. }
  4236. if (kgsl_driver.lockless_workqueue) {
  4237. destroy_workqueue(kgsl_driver.lockless_workqueue);
  4238. kgsl_driver.lockless_workqueue = NULL;
  4239. }
  4240. kgsl_events_exit();
  4241. kgsl_core_debugfs_close();
  4242. kgsl_reclaim_close();
  4243. /*
  4244. * We call device_unregister()
  4245. * only if kgsl_driver.virtdev has been populated.
  4246. * We check at least one member of kgsl_driver.virtdev to
  4247. * see if it is not NULL (and thus, has been populated).
  4248. */
  4249. if (kgsl_driver.virtdev.class)
  4250. device_unregister(&kgsl_driver.virtdev);
  4251. if (kgsl_driver.class) {
  4252. class_destroy(kgsl_driver.class);
  4253. kgsl_driver.class = NULL;
  4254. }
  4255. kgsl_drawobjs_cache_exit();
  4256. kfree(memfree.list);
  4257. memset(&memfree, 0, sizeof(memfree));
  4258. unregister_chrdev_region(kgsl_driver.major,
  4259. ARRAY_SIZE(kgsl_driver.devp));
  4260. sysstats_unregister_kgsl_stats_cb();
  4261. }
  4262. int __init kgsl_core_init(void)
  4263. {
  4264. int result = 0;
  4265. KGSL_BOOT_MARKER("KGSL Init");
  4266. /* alloc major and minor device numbers */
  4267. result = alloc_chrdev_region(&kgsl_driver.major, 0,
  4268. ARRAY_SIZE(kgsl_driver.devp), "kgsl");
  4269. if (result < 0) {
  4270. pr_err("kgsl: alloc_chrdev_region failed err = %d\n", result);
  4271. goto err;
  4272. }
  4273. cdev_init(&kgsl_driver.cdev, &kgsl_fops);
  4274. kgsl_driver.cdev.owner = THIS_MODULE;
  4275. kgsl_driver.cdev.ops = &kgsl_fops;
  4276. result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
  4277. ARRAY_SIZE(kgsl_driver.devp));
  4278. if (result) {
  4279. pr_err("kgsl: cdev_add() failed, dev_num= %d,result= %d\n",
  4280. kgsl_driver.major, result);
  4281. goto err;
  4282. }
  4283. kgsl_driver.class = class_create(THIS_MODULE, "kgsl");
  4284. if (IS_ERR(kgsl_driver.class)) {
  4285. result = PTR_ERR(kgsl_driver.class);
  4286. pr_err("kgsl: failed to create class for kgsl\n");
  4287. goto err;
  4288. }
  4289. /*
  4290. * Make a virtual device for managing core related things
  4291. * in sysfs
  4292. */
  4293. kgsl_driver.virtdev.class = kgsl_driver.class;
  4294. dev_set_name(&kgsl_driver.virtdev, "kgsl");
  4295. result = device_register(&kgsl_driver.virtdev);
  4296. if (result) {
  4297. put_device(&kgsl_driver.virtdev);
  4298. pr_err("kgsl: driver_register failed\n");
  4299. goto err;
  4300. }
  4301. /* Make kobjects in the virtual device for storing statistics */
  4302. kgsl_driver.ptkobj =
  4303. kobject_create_and_add("pagetables",
  4304. &kgsl_driver.virtdev.kobj);
  4305. kgsl_driver.prockobj =
  4306. kobject_create_and_add("proc",
  4307. &kgsl_driver.virtdev.kobj);
  4308. kgsl_core_debugfs_init();
  4309. kgsl_sharedmem_init_sysfs();
  4310. /* Initialize the memory pools */
  4311. kgsl_probe_page_pools();
  4312. kgsl_register_shmem_callback();
  4313. INIT_LIST_HEAD(&kgsl_driver.process_list);
  4314. INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
  4315. INIT_LIST_HEAD(&kgsl_driver.wp_list);
  4316. kgsl_driver.workqueue = alloc_workqueue("kgsl-workqueue",
  4317. WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
  4318. if (!kgsl_driver.workqueue) {
  4319. pr_err("kgsl: Failed to allocate kgsl workqueue\n");
  4320. result = -ENOMEM;
  4321. goto err;
  4322. }
  4323. /*
  4324. * The lockless workqueue is used to perform work which doesn't need to
  4325. * take the device mutex
  4326. */
  4327. kgsl_driver.lockless_workqueue = alloc_workqueue("kgsl-lockless-work",
  4328. WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
  4329. if (!kgsl_driver.lockless_workqueue) {
  4330. pr_err("kgsl: Failed to allocate lockless workqueue\n");
  4331. result = -ENOMEM;
  4332. goto err;
  4333. }
  4334. kgsl_eventlog_init();
  4335. kgsl_events_init();
  4336. result = kgsl_drawobjs_cache_init();
  4337. if (result)
  4338. goto err;
  4339. memfree.list = kcalloc(MEMFREE_ENTRIES, sizeof(struct memfree_entry),
  4340. GFP_KERNEL);
  4341. sysstats_register_kgsl_stats_cb(kgsl_get_stats);
  4342. KGSL_BOOT_MARKER("KGSL Ready");
  4343. return 0;
  4344. err:
  4345. kgsl_core_exit();
  4346. return result;
  4347. }