ti_sci.c 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments System Control Interface Protocol Driver
  4. *
  5. * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
  6. * Nishanth Menon
  7. */
  8. #define pr_fmt(fmt) "%s: " fmt, __func__
  9. #include <linux/bitmap.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/export.h>
  12. #include <linux/io.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mailbox_client.h>
  16. #include <linux/module.h>
  17. #include <linux/of_device.h>
  18. #include <linux/semaphore.h>
  19. #include <linux/slab.h>
  20. #include <linux/soc/ti/ti-msgmgr.h>
  21. #include <linux/soc/ti/ti_sci_protocol.h>
  22. #include <linux/reboot.h>
  23. #include "ti_sci.h"
  24. /* List of all TI SCI devices active in system */
  25. static LIST_HEAD(ti_sci_list);
  26. /* Protection for the entire list */
  27. static DEFINE_MUTEX(ti_sci_list_mutex);
  28. /**
  29. * struct ti_sci_xfer - Structure representing a message flow
  30. * @tx_message: Transmit message
  31. * @rx_len: Receive message length
  32. * @xfer_buf: Preallocated buffer to store receive message
  33. * Since we work with request-ACK protocol, we can
  34. * reuse the same buffer for the rx path as we
  35. * use for the tx path.
  36. * @done: completion event
  37. */
  38. struct ti_sci_xfer {
  39. struct ti_msgmgr_message tx_message;
  40. u8 rx_len;
  41. u8 *xfer_buf;
  42. struct completion done;
  43. };
  44. /**
  45. * struct ti_sci_xfers_info - Structure to manage transfer information
  46. * @sem_xfer_count: Counting Semaphore for managing max simultaneous
  47. * Messages.
  48. * @xfer_block: Preallocated Message array
  49. * @xfer_alloc_table: Bitmap table for allocated messages.
  50. * Index of this bitmap table is also used for message
  51. * sequence identifier.
  52. * @xfer_lock: Protection for message allocation
  53. */
  54. struct ti_sci_xfers_info {
  55. struct semaphore sem_xfer_count;
  56. struct ti_sci_xfer *xfer_block;
  57. unsigned long *xfer_alloc_table;
  58. /* protect transfer allocation */
  59. spinlock_t xfer_lock;
  60. };
  61. /**
  62. * struct ti_sci_desc - Description of SoC integration
  63. * @default_host_id: Host identifier representing the compute entity
  64. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  65. * @max_msgs: Maximum number of messages that can be pending
  66. * simultaneously in the system
  67. * @max_msg_size: Maximum size of data per message that can be handled.
  68. */
  69. struct ti_sci_desc {
  70. u8 default_host_id;
  71. int max_rx_timeout_ms;
  72. int max_msgs;
  73. int max_msg_size;
  74. };
  75. /**
  76. * struct ti_sci_info - Structure representing a TI SCI instance
  77. * @dev: Device pointer
  78. * @desc: SoC description for this instance
  79. * @nb: Reboot Notifier block
  80. * @d: Debugfs file entry
  81. * @debug_region: Memory region where the debug message are available
  82. * @debug_region_size: Debug region size
  83. * @debug_buffer: Buffer allocated to copy debug messages.
  84. * @handle: Instance of TI SCI handle to send to clients.
  85. * @cl: Mailbox Client
  86. * @chan_tx: Transmit mailbox channel
  87. * @chan_rx: Receive mailbox channel
  88. * @minfo: Message info
  89. * @node: list head
  90. * @host_id: Host ID
  91. * @users: Number of users of this instance
  92. */
  93. struct ti_sci_info {
  94. struct device *dev;
  95. struct notifier_block nb;
  96. const struct ti_sci_desc *desc;
  97. struct dentry *d;
  98. void __iomem *debug_region;
  99. char *debug_buffer;
  100. size_t debug_region_size;
  101. struct ti_sci_handle handle;
  102. struct mbox_client cl;
  103. struct mbox_chan *chan_tx;
  104. struct mbox_chan *chan_rx;
  105. struct ti_sci_xfers_info minfo;
  106. struct list_head node;
  107. u8 host_id;
  108. /* protected by ti_sci_list_mutex */
  109. int users;
  110. };
  111. #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
  112. #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
  113. #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
  114. #ifdef CONFIG_DEBUG_FS
  115. /**
  116. * ti_sci_debug_show() - Helper to dump the debug log
  117. * @s: sequence file pointer
  118. * @unused: unused.
  119. *
  120. * Return: 0
  121. */
  122. static int ti_sci_debug_show(struct seq_file *s, void *unused)
  123. {
  124. struct ti_sci_info *info = s->private;
  125. memcpy_fromio(info->debug_buffer, info->debug_region,
  126. info->debug_region_size);
  127. /*
  128. * We don't trust firmware to leave NULL terminated last byte (hence
  129. * we have allocated 1 extra 0 byte). Since we cannot guarantee any
  130. * specific data format for debug messages, We just present the data
  131. * in the buffer as is - we expect the messages to be self explanatory.
  132. */
  133. seq_puts(s, info->debug_buffer);
  134. return 0;
  135. }
  136. /* Provide the log file operations interface*/
  137. DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
  138. /**
  139. * ti_sci_debugfs_create() - Create log debug file
  140. * @pdev: platform device pointer
  141. * @info: Pointer to SCI entity information
  142. *
  143. * Return: 0 if all went fine, else corresponding error.
  144. */
  145. static int ti_sci_debugfs_create(struct platform_device *pdev,
  146. struct ti_sci_info *info)
  147. {
  148. struct device *dev = &pdev->dev;
  149. struct resource *res;
  150. char debug_name[50] = "ti_sci_debug@";
  151. /* Debug region is optional */
  152. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  153. "debug_messages");
  154. info->debug_region = devm_ioremap_resource(dev, res);
  155. if (IS_ERR(info->debug_region))
  156. return 0;
  157. info->debug_region_size = resource_size(res);
  158. info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
  159. sizeof(char), GFP_KERNEL);
  160. if (!info->debug_buffer)
  161. return -ENOMEM;
  162. /* Setup NULL termination */
  163. info->debug_buffer[info->debug_region_size] = 0;
  164. info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
  165. sizeof(debug_name) -
  166. sizeof("ti_sci_debug@")),
  167. 0444, NULL, info, &ti_sci_debug_fops);
  168. if (IS_ERR(info->d))
  169. return PTR_ERR(info->d);
  170. dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
  171. info->debug_region, info->debug_region_size, res);
  172. return 0;
  173. }
  174. #else /* CONFIG_DEBUG_FS */
  175. static inline int ti_sci_debugfs_create(struct platform_device *dev,
  176. struct ti_sci_info *info)
  177. {
  178. return 0;
  179. }
  180. static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
  181. struct ti_sci_info *info)
  182. {
  183. }
  184. #endif /* CONFIG_DEBUG_FS */
  185. /**
  186. * ti_sci_dump_header_dbg() - Helper to dump a message header.
  187. * @dev: Device pointer corresponding to the SCI entity
  188. * @hdr: pointer to header.
  189. */
  190. static inline void ti_sci_dump_header_dbg(struct device *dev,
  191. struct ti_sci_msg_hdr *hdr)
  192. {
  193. dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
  194. hdr->type, hdr->host, hdr->seq, hdr->flags);
  195. }
  196. /**
  197. * ti_sci_rx_callback() - mailbox client callback for receive messages
  198. * @cl: client pointer
  199. * @m: mailbox message
  200. *
  201. * Processes one received message to appropriate transfer information and
  202. * signals completion of the transfer.
  203. *
  204. * NOTE: This function will be invoked in IRQ context, hence should be
  205. * as optimal as possible.
  206. */
  207. static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
  208. {
  209. struct ti_sci_info *info = cl_to_ti_sci_info(cl);
  210. struct device *dev = info->dev;
  211. struct ti_sci_xfers_info *minfo = &info->minfo;
  212. struct ti_msgmgr_message *mbox_msg = m;
  213. struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
  214. struct ti_sci_xfer *xfer;
  215. u8 xfer_id;
  216. xfer_id = hdr->seq;
  217. /*
  218. * Are we even expecting this?
  219. * NOTE: barriers were implicit in locks used for modifying the bitmap
  220. */
  221. if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
  222. dev_err(dev, "Message for %d is not expected!\n", xfer_id);
  223. return;
  224. }
  225. xfer = &minfo->xfer_block[xfer_id];
  226. /* Is the message of valid length? */
  227. if (mbox_msg->len > info->desc->max_msg_size) {
  228. dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
  229. mbox_msg->len, info->desc->max_msg_size);
  230. ti_sci_dump_header_dbg(dev, hdr);
  231. return;
  232. }
  233. if (mbox_msg->len < xfer->rx_len) {
  234. dev_err(dev, "Recv xfer %zu < expected %d length\n",
  235. mbox_msg->len, xfer->rx_len);
  236. ti_sci_dump_header_dbg(dev, hdr);
  237. return;
  238. }
  239. ti_sci_dump_header_dbg(dev, hdr);
  240. /* Take a copy to the rx buffer.. */
  241. memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
  242. complete(&xfer->done);
  243. }
  244. /**
  245. * ti_sci_get_one_xfer() - Allocate one message
  246. * @info: Pointer to SCI entity information
  247. * @msg_type: Message type
  248. * @msg_flags: Flag to set for the message
  249. * @tx_message_size: transmit message size
  250. * @rx_message_size: receive message size
  251. *
  252. * Helper function which is used by various command functions that are
  253. * exposed to clients of this driver for allocating a message traffic event.
  254. *
  255. * This function can sleep depending on pending requests already in the system
  256. * for the SCI entity. Further, this also holds a spinlock to maintain integrity
  257. * of internal data structures.
  258. *
  259. * Return: 0 if all went fine, else corresponding error.
  260. */
  261. static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
  262. u16 msg_type, u32 msg_flags,
  263. size_t tx_message_size,
  264. size_t rx_message_size)
  265. {
  266. struct ti_sci_xfers_info *minfo = &info->minfo;
  267. struct ti_sci_xfer *xfer;
  268. struct ti_sci_msg_hdr *hdr;
  269. unsigned long flags;
  270. unsigned long bit_pos;
  271. u8 xfer_id;
  272. int ret;
  273. int timeout;
  274. /* Ensure we have sane transfer sizes */
  275. if (rx_message_size > info->desc->max_msg_size ||
  276. tx_message_size > info->desc->max_msg_size ||
  277. rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
  278. return ERR_PTR(-ERANGE);
  279. /*
  280. * Ensure we have only controlled number of pending messages.
  281. * Ideally, we might just have to wait a single message, be
  282. * conservative and wait 5 times that..
  283. */
  284. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
  285. ret = down_timeout(&minfo->sem_xfer_count, timeout);
  286. if (ret < 0)
  287. return ERR_PTR(ret);
  288. /* Keep the locked section as small as possible */
  289. spin_lock_irqsave(&minfo->xfer_lock, flags);
  290. bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
  291. info->desc->max_msgs);
  292. set_bit(bit_pos, minfo->xfer_alloc_table);
  293. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  294. /*
  295. * We already ensured in probe that we can have max messages that can
  296. * fit in hdr.seq - NOTE: this improves access latencies
  297. * to predictable O(1) access, BUT, it opens us to risk if
  298. * remote misbehaves with corrupted message sequence responses.
  299. * If that happens, we are going to be messed up anyways..
  300. */
  301. xfer_id = (u8)bit_pos;
  302. xfer = &minfo->xfer_block[xfer_id];
  303. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  304. xfer->tx_message.len = tx_message_size;
  305. xfer->tx_message.chan_rx = info->chan_rx;
  306. xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
  307. xfer->rx_len = (u8)rx_message_size;
  308. reinit_completion(&xfer->done);
  309. hdr->seq = xfer_id;
  310. hdr->type = msg_type;
  311. hdr->host = info->host_id;
  312. hdr->flags = msg_flags;
  313. return xfer;
  314. }
  315. /**
  316. * ti_sci_put_one_xfer() - Release a message
  317. * @minfo: transfer info pointer
  318. * @xfer: message that was reserved by ti_sci_get_one_xfer
  319. *
  320. * This holds a spinlock to maintain integrity of internal data structures.
  321. */
  322. static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
  323. struct ti_sci_xfer *xfer)
  324. {
  325. unsigned long flags;
  326. struct ti_sci_msg_hdr *hdr;
  327. u8 xfer_id;
  328. hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  329. xfer_id = hdr->seq;
  330. /*
  331. * Keep the locked section as small as possible
  332. * NOTE: we might escape with smp_mb and no lock here..
  333. * but just be conservative and symmetric.
  334. */
  335. spin_lock_irqsave(&minfo->xfer_lock, flags);
  336. clear_bit(xfer_id, minfo->xfer_alloc_table);
  337. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  338. /* Increment the count for the next user to get through */
  339. up(&minfo->sem_xfer_count);
  340. }
  341. /**
  342. * ti_sci_do_xfer() - Do one transfer
  343. * @info: Pointer to SCI entity information
  344. * @xfer: Transfer to initiate and wait for response
  345. *
  346. * Return: -ETIMEDOUT in case of no response, if transmit error,
  347. * return corresponding error, else if all goes well,
  348. * return 0.
  349. */
  350. static inline int ti_sci_do_xfer(struct ti_sci_info *info,
  351. struct ti_sci_xfer *xfer)
  352. {
  353. int ret;
  354. int timeout;
  355. struct device *dev = info->dev;
  356. bool done_state = true;
  357. ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
  358. if (ret < 0)
  359. return ret;
  360. ret = 0;
  361. if (system_state <= SYSTEM_RUNNING) {
  362. /* And we wait for the response. */
  363. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
  364. if (!wait_for_completion_timeout(&xfer->done, timeout))
  365. ret = -ETIMEDOUT;
  366. } else {
  367. /*
  368. * If we are !running, we cannot use wait_for_completion_timeout
  369. * during noirq phase, so we must manually poll the completion.
  370. */
  371. ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
  372. done_state, 1,
  373. info->desc->max_rx_timeout_ms * 1000,
  374. false, &xfer->done);
  375. }
  376. if (ret == -ETIMEDOUT)
  377. dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
  378. (void *)_RET_IP_);
  379. /*
  380. * NOTE: we might prefer not to need the mailbox ticker to manage the
  381. * transfer queueing since the protocol layer queues things by itself.
  382. * Unfortunately, we have to kick the mailbox framework after we have
  383. * received our message.
  384. */
  385. mbox_client_txdone(info->chan_tx, ret);
  386. return ret;
  387. }
  388. /**
  389. * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
  390. * @info: Pointer to SCI entity information
  391. *
  392. * Updates the SCI information in the internal data structure.
  393. *
  394. * Return: 0 if all went fine, else return appropriate error.
  395. */
  396. static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
  397. {
  398. struct device *dev = info->dev;
  399. struct ti_sci_handle *handle = &info->handle;
  400. struct ti_sci_version_info *ver = &handle->version;
  401. struct ti_sci_msg_resp_version *rev_info;
  402. struct ti_sci_xfer *xfer;
  403. int ret;
  404. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
  405. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  406. sizeof(struct ti_sci_msg_hdr),
  407. sizeof(*rev_info));
  408. if (IS_ERR(xfer)) {
  409. ret = PTR_ERR(xfer);
  410. dev_err(dev, "Message alloc failed(%d)\n", ret);
  411. return ret;
  412. }
  413. rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
  414. ret = ti_sci_do_xfer(info, xfer);
  415. if (ret) {
  416. dev_err(dev, "Mbox send fail %d\n", ret);
  417. goto fail;
  418. }
  419. ver->abi_major = rev_info->abi_major;
  420. ver->abi_minor = rev_info->abi_minor;
  421. ver->firmware_revision = rev_info->firmware_revision;
  422. strncpy(ver->firmware_description, rev_info->firmware_description,
  423. sizeof(ver->firmware_description));
  424. fail:
  425. ti_sci_put_one_xfer(&info->minfo, xfer);
  426. return ret;
  427. }
  428. /**
  429. * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
  430. * @r: pointer to response buffer
  431. *
  432. * Return: true if the response was an ACK, else returns false.
  433. */
  434. static inline bool ti_sci_is_response_ack(void *r)
  435. {
  436. struct ti_sci_msg_hdr *hdr = r;
  437. return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
  438. }
  439. /**
  440. * ti_sci_set_device_state() - Set device state helper
  441. * @handle: pointer to TI SCI handle
  442. * @id: Device identifier
  443. * @flags: flags to setup for the device
  444. * @state: State to move the device to
  445. *
  446. * Return: 0 if all went well, else returns appropriate error value.
  447. */
  448. static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
  449. u32 id, u32 flags, u8 state)
  450. {
  451. struct ti_sci_info *info;
  452. struct ti_sci_msg_req_set_device_state *req;
  453. struct ti_sci_msg_hdr *resp;
  454. struct ti_sci_xfer *xfer;
  455. struct device *dev;
  456. int ret = 0;
  457. if (IS_ERR(handle))
  458. return PTR_ERR(handle);
  459. if (!handle)
  460. return -EINVAL;
  461. info = handle_to_ti_sci_info(handle);
  462. dev = info->dev;
  463. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  464. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  465. sizeof(*req), sizeof(*resp));
  466. if (IS_ERR(xfer)) {
  467. ret = PTR_ERR(xfer);
  468. dev_err(dev, "Message alloc failed(%d)\n", ret);
  469. return ret;
  470. }
  471. req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
  472. req->id = id;
  473. req->state = state;
  474. ret = ti_sci_do_xfer(info, xfer);
  475. if (ret) {
  476. dev_err(dev, "Mbox send fail %d\n", ret);
  477. goto fail;
  478. }
  479. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  480. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  481. fail:
  482. ti_sci_put_one_xfer(&info->minfo, xfer);
  483. return ret;
  484. }
  485. /**
  486. * ti_sci_get_device_state() - Get device state helper
  487. * @handle: Handle to the device
  488. * @id: Device Identifier
  489. * @clcnt: Pointer to Context Loss Count
  490. * @resets: pointer to resets
  491. * @p_state: pointer to p_state
  492. * @c_state: pointer to c_state
  493. *
  494. * Return: 0 if all went fine, else return appropriate error.
  495. */
  496. static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
  497. u32 id, u32 *clcnt, u32 *resets,
  498. u8 *p_state, u8 *c_state)
  499. {
  500. struct ti_sci_info *info;
  501. struct ti_sci_msg_req_get_device_state *req;
  502. struct ti_sci_msg_resp_get_device_state *resp;
  503. struct ti_sci_xfer *xfer;
  504. struct device *dev;
  505. int ret = 0;
  506. if (IS_ERR(handle))
  507. return PTR_ERR(handle);
  508. if (!handle)
  509. return -EINVAL;
  510. if (!clcnt && !resets && !p_state && !c_state)
  511. return -EINVAL;
  512. info = handle_to_ti_sci_info(handle);
  513. dev = info->dev;
  514. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
  515. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  516. sizeof(*req), sizeof(*resp));
  517. if (IS_ERR(xfer)) {
  518. ret = PTR_ERR(xfer);
  519. dev_err(dev, "Message alloc failed(%d)\n", ret);
  520. return ret;
  521. }
  522. req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
  523. req->id = id;
  524. ret = ti_sci_do_xfer(info, xfer);
  525. if (ret) {
  526. dev_err(dev, "Mbox send fail %d\n", ret);
  527. goto fail;
  528. }
  529. resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
  530. if (!ti_sci_is_response_ack(resp)) {
  531. ret = -ENODEV;
  532. goto fail;
  533. }
  534. if (clcnt)
  535. *clcnt = resp->context_loss_count;
  536. if (resets)
  537. *resets = resp->resets;
  538. if (p_state)
  539. *p_state = resp->programmed_state;
  540. if (c_state)
  541. *c_state = resp->current_state;
  542. fail:
  543. ti_sci_put_one_xfer(&info->minfo, xfer);
  544. return ret;
  545. }
  546. /**
  547. * ti_sci_cmd_get_device() - command to request for device managed by TISCI
  548. * that can be shared with other hosts.
  549. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  550. * @id: Device Identifier
  551. *
  552. * Request for the device - NOTE: the client MUST maintain integrity of
  553. * usage count by balancing get_device with put_device. No refcounting is
  554. * managed by driver for that purpose.
  555. *
  556. * Return: 0 if all went fine, else return appropriate error.
  557. */
  558. static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
  559. {
  560. return ti_sci_set_device_state(handle, id, 0,
  561. MSG_DEVICE_SW_STATE_ON);
  562. }
  563. /**
  564. * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
  565. * TISCI that is exclusively owned by the
  566. * requesting host.
  567. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  568. * @id: Device Identifier
  569. *
  570. * Request for the device - NOTE: the client MUST maintain integrity of
  571. * usage count by balancing get_device with put_device. No refcounting is
  572. * managed by driver for that purpose.
  573. *
  574. * Return: 0 if all went fine, else return appropriate error.
  575. */
  576. static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
  577. u32 id)
  578. {
  579. return ti_sci_set_device_state(handle, id,
  580. MSG_FLAG_DEVICE_EXCLUSIVE,
  581. MSG_DEVICE_SW_STATE_ON);
  582. }
  583. /**
  584. * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
  585. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  586. * @id: Device Identifier
  587. *
  588. * Request for the device - NOTE: the client MUST maintain integrity of
  589. * usage count by balancing get_device with put_device. No refcounting is
  590. * managed by driver for that purpose.
  591. *
  592. * Return: 0 if all went fine, else return appropriate error.
  593. */
  594. static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
  595. {
  596. return ti_sci_set_device_state(handle, id, 0,
  597. MSG_DEVICE_SW_STATE_RETENTION);
  598. }
  599. /**
  600. * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
  601. * TISCI that is exclusively owned by
  602. * requesting host.
  603. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  604. * @id: Device Identifier
  605. *
  606. * Request for the device - NOTE: the client MUST maintain integrity of
  607. * usage count by balancing get_device with put_device. No refcounting is
  608. * managed by driver for that purpose.
  609. *
  610. * Return: 0 if all went fine, else return appropriate error.
  611. */
  612. static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
  613. u32 id)
  614. {
  615. return ti_sci_set_device_state(handle, id,
  616. MSG_FLAG_DEVICE_EXCLUSIVE,
  617. MSG_DEVICE_SW_STATE_RETENTION);
  618. }
  619. /**
  620. * ti_sci_cmd_put_device() - command to release a device managed by TISCI
  621. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  622. * @id: Device Identifier
  623. *
  624. * Request for the device - NOTE: the client MUST maintain integrity of
  625. * usage count by balancing get_device with put_device. No refcounting is
  626. * managed by driver for that purpose.
  627. *
  628. * Return: 0 if all went fine, else return appropriate error.
  629. */
  630. static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
  631. {
  632. return ti_sci_set_device_state(handle, id,
  633. 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
  634. }
  635. /**
  636. * ti_sci_cmd_dev_is_valid() - Is the device valid
  637. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  638. * @id: Device Identifier
  639. *
  640. * Return: 0 if all went fine and the device ID is valid, else return
  641. * appropriate error.
  642. */
  643. static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
  644. {
  645. u8 unused;
  646. /* check the device state which will also tell us if the ID is valid */
  647. return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
  648. }
  649. /**
  650. * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
  651. * @handle: Pointer to TISCI handle
  652. * @id: Device Identifier
  653. * @count: Pointer to Context Loss counter to populate
  654. *
  655. * Return: 0 if all went fine, else return appropriate error.
  656. */
  657. static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
  658. u32 *count)
  659. {
  660. return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
  661. }
  662. /**
  663. * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
  664. * @handle: Pointer to TISCI handle
  665. * @id: Device Identifier
  666. * @r_state: true if requested to be idle
  667. *
  668. * Return: 0 if all went fine, else return appropriate error.
  669. */
  670. static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
  671. bool *r_state)
  672. {
  673. int ret;
  674. u8 state;
  675. if (!r_state)
  676. return -EINVAL;
  677. ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
  678. if (ret)
  679. return ret;
  680. *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
  681. return 0;
  682. }
  683. /**
  684. * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
  685. * @handle: Pointer to TISCI handle
  686. * @id: Device Identifier
  687. * @r_state: true if requested to be stopped
  688. * @curr_state: true if currently stopped.
  689. *
  690. * Return: 0 if all went fine, else return appropriate error.
  691. */
  692. static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
  693. bool *r_state, bool *curr_state)
  694. {
  695. int ret;
  696. u8 p_state, c_state;
  697. if (!r_state && !curr_state)
  698. return -EINVAL;
  699. ret =
  700. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  701. if (ret)
  702. return ret;
  703. if (r_state)
  704. *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
  705. if (curr_state)
  706. *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
  707. return 0;
  708. }
  709. /**
  710. * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
  711. * @handle: Pointer to TISCI handle
  712. * @id: Device Identifier
  713. * @r_state: true if requested to be ON
  714. * @curr_state: true if currently ON and active
  715. *
  716. * Return: 0 if all went fine, else return appropriate error.
  717. */
  718. static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
  719. bool *r_state, bool *curr_state)
  720. {
  721. int ret;
  722. u8 p_state, c_state;
  723. if (!r_state && !curr_state)
  724. return -EINVAL;
  725. ret =
  726. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  727. if (ret)
  728. return ret;
  729. if (r_state)
  730. *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
  731. if (curr_state)
  732. *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
  733. return 0;
  734. }
  735. /**
  736. * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
  737. * @handle: Pointer to TISCI handle
  738. * @id: Device Identifier
  739. * @curr_state: true if currently transitioning.
  740. *
  741. * Return: 0 if all went fine, else return appropriate error.
  742. */
  743. static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
  744. bool *curr_state)
  745. {
  746. int ret;
  747. u8 state;
  748. if (!curr_state)
  749. return -EINVAL;
  750. ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
  751. if (ret)
  752. return ret;
  753. *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
  754. return 0;
  755. }
  756. /**
  757. * ti_sci_cmd_set_device_resets() - command to set resets for device managed
  758. * by TISCI
  759. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  760. * @id: Device Identifier
  761. * @reset_state: Device specific reset bit field
  762. *
  763. * Return: 0 if all went fine, else return appropriate error.
  764. */
  765. static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
  766. u32 id, u32 reset_state)
  767. {
  768. struct ti_sci_info *info;
  769. struct ti_sci_msg_req_set_device_resets *req;
  770. struct ti_sci_msg_hdr *resp;
  771. struct ti_sci_xfer *xfer;
  772. struct device *dev;
  773. int ret = 0;
  774. if (IS_ERR(handle))
  775. return PTR_ERR(handle);
  776. if (!handle)
  777. return -EINVAL;
  778. info = handle_to_ti_sci_info(handle);
  779. dev = info->dev;
  780. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
  781. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  782. sizeof(*req), sizeof(*resp));
  783. if (IS_ERR(xfer)) {
  784. ret = PTR_ERR(xfer);
  785. dev_err(dev, "Message alloc failed(%d)\n", ret);
  786. return ret;
  787. }
  788. req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
  789. req->id = id;
  790. req->resets = reset_state;
  791. ret = ti_sci_do_xfer(info, xfer);
  792. if (ret) {
  793. dev_err(dev, "Mbox send fail %d\n", ret);
  794. goto fail;
  795. }
  796. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  797. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  798. fail:
  799. ti_sci_put_one_xfer(&info->minfo, xfer);
  800. return ret;
  801. }
  802. /**
  803. * ti_sci_cmd_get_device_resets() - Get reset state for device managed
  804. * by TISCI
  805. * @handle: Pointer to TISCI handle
  806. * @id: Device Identifier
  807. * @reset_state: Pointer to reset state to populate
  808. *
  809. * Return: 0 if all went fine, else return appropriate error.
  810. */
  811. static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
  812. u32 id, u32 *reset_state)
  813. {
  814. return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
  815. NULL);
  816. }
  817. /**
  818. * ti_sci_set_clock_state() - Set clock state helper
  819. * @handle: pointer to TI SCI handle
  820. * @dev_id: Device identifier this request is for
  821. * @clk_id: Clock identifier for the device for this request.
  822. * Each device has it's own set of clock inputs. This indexes
  823. * which clock input to modify.
  824. * @flags: Header flags as needed
  825. * @state: State to request for the clock.
  826. *
  827. * Return: 0 if all went well, else returns appropriate error value.
  828. */
  829. static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
  830. u32 dev_id, u32 clk_id,
  831. u32 flags, u8 state)
  832. {
  833. struct ti_sci_info *info;
  834. struct ti_sci_msg_req_set_clock_state *req;
  835. struct ti_sci_msg_hdr *resp;
  836. struct ti_sci_xfer *xfer;
  837. struct device *dev;
  838. int ret = 0;
  839. if (IS_ERR(handle))
  840. return PTR_ERR(handle);
  841. if (!handle)
  842. return -EINVAL;
  843. info = handle_to_ti_sci_info(handle);
  844. dev = info->dev;
  845. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
  846. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  847. sizeof(*req), sizeof(*resp));
  848. if (IS_ERR(xfer)) {
  849. ret = PTR_ERR(xfer);
  850. dev_err(dev, "Message alloc failed(%d)\n", ret);
  851. return ret;
  852. }
  853. req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
  854. req->dev_id = dev_id;
  855. if (clk_id < 255) {
  856. req->clk_id = clk_id;
  857. } else {
  858. req->clk_id = 255;
  859. req->clk_id_32 = clk_id;
  860. }
  861. req->request_state = state;
  862. ret = ti_sci_do_xfer(info, xfer);
  863. if (ret) {
  864. dev_err(dev, "Mbox send fail %d\n", ret);
  865. goto fail;
  866. }
  867. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  868. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  869. fail:
  870. ti_sci_put_one_xfer(&info->minfo, xfer);
  871. return ret;
  872. }
  873. /**
  874. * ti_sci_cmd_get_clock_state() - Get clock state helper
  875. * @handle: pointer to TI SCI handle
  876. * @dev_id: Device identifier this request is for
  877. * @clk_id: Clock identifier for the device for this request.
  878. * Each device has it's own set of clock inputs. This indexes
  879. * which clock input to modify.
  880. * @programmed_state: State requested for clock to move to
  881. * @current_state: State that the clock is currently in
  882. *
  883. * Return: 0 if all went well, else returns appropriate error value.
  884. */
  885. static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
  886. u32 dev_id, u32 clk_id,
  887. u8 *programmed_state, u8 *current_state)
  888. {
  889. struct ti_sci_info *info;
  890. struct ti_sci_msg_req_get_clock_state *req;
  891. struct ti_sci_msg_resp_get_clock_state *resp;
  892. struct ti_sci_xfer *xfer;
  893. struct device *dev;
  894. int ret = 0;
  895. if (IS_ERR(handle))
  896. return PTR_ERR(handle);
  897. if (!handle)
  898. return -EINVAL;
  899. if (!programmed_state && !current_state)
  900. return -EINVAL;
  901. info = handle_to_ti_sci_info(handle);
  902. dev = info->dev;
  903. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
  904. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  905. sizeof(*req), sizeof(*resp));
  906. if (IS_ERR(xfer)) {
  907. ret = PTR_ERR(xfer);
  908. dev_err(dev, "Message alloc failed(%d)\n", ret);
  909. return ret;
  910. }
  911. req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
  912. req->dev_id = dev_id;
  913. if (clk_id < 255) {
  914. req->clk_id = clk_id;
  915. } else {
  916. req->clk_id = 255;
  917. req->clk_id_32 = clk_id;
  918. }
  919. ret = ti_sci_do_xfer(info, xfer);
  920. if (ret) {
  921. dev_err(dev, "Mbox send fail %d\n", ret);
  922. goto fail;
  923. }
  924. resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
  925. if (!ti_sci_is_response_ack(resp)) {
  926. ret = -ENODEV;
  927. goto fail;
  928. }
  929. if (programmed_state)
  930. *programmed_state = resp->programmed_state;
  931. if (current_state)
  932. *current_state = resp->current_state;
  933. fail:
  934. ti_sci_put_one_xfer(&info->minfo, xfer);
  935. return ret;
  936. }
  937. /**
  938. * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
  939. * @handle: pointer to TI SCI handle
  940. * @dev_id: Device identifier this request is for
  941. * @clk_id: Clock identifier for the device for this request.
  942. * Each device has it's own set of clock inputs. This indexes
  943. * which clock input to modify.
  944. * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
  945. * @can_change_freq: 'true' if frequency change is desired, else 'false'
  946. * @enable_input_term: 'true' if input termination is desired, else 'false'
  947. *
  948. * Return: 0 if all went well, else returns appropriate error value.
  949. */
  950. static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
  951. u32 clk_id, bool needs_ssc,
  952. bool can_change_freq, bool enable_input_term)
  953. {
  954. u32 flags = 0;
  955. flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
  956. flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
  957. flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
  958. return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
  959. MSG_CLOCK_SW_STATE_REQ);
  960. }
  961. /**
  962. * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
  963. * @handle: pointer to TI SCI handle
  964. * @dev_id: Device identifier this request is for
  965. * @clk_id: Clock identifier for the device for this request.
  966. * Each device has it's own set of clock inputs. This indexes
  967. * which clock input to modify.
  968. *
  969. * NOTE: This clock must have been requested by get_clock previously.
  970. *
  971. * Return: 0 if all went well, else returns appropriate error value.
  972. */
  973. static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
  974. u32 dev_id, u32 clk_id)
  975. {
  976. return ti_sci_set_clock_state(handle, dev_id, clk_id,
  977. MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
  978. MSG_CLOCK_SW_STATE_UNREQ);
  979. }
  980. /**
  981. * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
  982. * @handle: pointer to TI SCI handle
  983. * @dev_id: Device identifier this request is for
  984. * @clk_id: Clock identifier for the device for this request.
  985. * Each device has it's own set of clock inputs. This indexes
  986. * which clock input to modify.
  987. *
  988. * NOTE: This clock must have been requested by get_clock previously.
  989. *
  990. * Return: 0 if all went well, else returns appropriate error value.
  991. */
  992. static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
  993. u32 dev_id, u32 clk_id)
  994. {
  995. return ti_sci_set_clock_state(handle, dev_id, clk_id,
  996. MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
  997. MSG_CLOCK_SW_STATE_AUTO);
  998. }
  999. /**
  1000. * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
  1001. * @handle: pointer to TI SCI handle
  1002. * @dev_id: Device identifier this request is for
  1003. * @clk_id: Clock identifier for the device for this request.
  1004. * Each device has it's own set of clock inputs. This indexes
  1005. * which clock input to modify.
  1006. * @req_state: state indicating if the clock is auto managed
  1007. *
  1008. * Return: 0 if all went well, else returns appropriate error value.
  1009. */
  1010. static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
  1011. u32 dev_id, u32 clk_id, bool *req_state)
  1012. {
  1013. u8 state = 0;
  1014. int ret;
  1015. if (!req_state)
  1016. return -EINVAL;
  1017. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
  1018. if (ret)
  1019. return ret;
  1020. *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
  1021. return 0;
  1022. }
  1023. /**
  1024. * ti_sci_cmd_clk_is_on() - Is the clock ON
  1025. * @handle: pointer to TI SCI handle
  1026. * @dev_id: Device identifier this request is for
  1027. * @clk_id: Clock identifier for the device for this request.
  1028. * Each device has it's own set of clock inputs. This indexes
  1029. * which clock input to modify.
  1030. * @req_state: state indicating if the clock is managed by us and enabled
  1031. * @curr_state: state indicating if the clock is ready for operation
  1032. *
  1033. * Return: 0 if all went well, else returns appropriate error value.
  1034. */
  1035. static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
  1036. u32 clk_id, bool *req_state, bool *curr_state)
  1037. {
  1038. u8 c_state = 0, r_state = 0;
  1039. int ret;
  1040. if (!req_state && !curr_state)
  1041. return -EINVAL;
  1042. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1043. &r_state, &c_state);
  1044. if (ret)
  1045. return ret;
  1046. if (req_state)
  1047. *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
  1048. if (curr_state)
  1049. *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
  1050. return 0;
  1051. }
  1052. /**
  1053. * ti_sci_cmd_clk_is_off() - Is the clock OFF
  1054. * @handle: pointer to TI SCI handle
  1055. * @dev_id: Device identifier this request is for
  1056. * @clk_id: Clock identifier for the device for this request.
  1057. * Each device has it's own set of clock inputs. This indexes
  1058. * which clock input to modify.
  1059. * @req_state: state indicating if the clock is managed by us and disabled
  1060. * @curr_state: state indicating if the clock is NOT ready for operation
  1061. *
  1062. * Return: 0 if all went well, else returns appropriate error value.
  1063. */
  1064. static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
  1065. u32 clk_id, bool *req_state, bool *curr_state)
  1066. {
  1067. u8 c_state = 0, r_state = 0;
  1068. int ret;
  1069. if (!req_state && !curr_state)
  1070. return -EINVAL;
  1071. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1072. &r_state, &c_state);
  1073. if (ret)
  1074. return ret;
  1075. if (req_state)
  1076. *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
  1077. if (curr_state)
  1078. *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
  1079. return 0;
  1080. }
  1081. /**
  1082. * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
  1083. * @handle: pointer to TI SCI handle
  1084. * @dev_id: Device identifier this request is for
  1085. * @clk_id: Clock identifier for the device for this request.
  1086. * Each device has it's own set of clock inputs. This indexes
  1087. * which clock input to modify.
  1088. * @parent_id: Parent clock identifier to set
  1089. *
  1090. * Return: 0 if all went well, else returns appropriate error value.
  1091. */
  1092. static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
  1093. u32 dev_id, u32 clk_id, u32 parent_id)
  1094. {
  1095. struct ti_sci_info *info;
  1096. struct ti_sci_msg_req_set_clock_parent *req;
  1097. struct ti_sci_msg_hdr *resp;
  1098. struct ti_sci_xfer *xfer;
  1099. struct device *dev;
  1100. int ret = 0;
  1101. if (IS_ERR(handle))
  1102. return PTR_ERR(handle);
  1103. if (!handle)
  1104. return -EINVAL;
  1105. info = handle_to_ti_sci_info(handle);
  1106. dev = info->dev;
  1107. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
  1108. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1109. sizeof(*req), sizeof(*resp));
  1110. if (IS_ERR(xfer)) {
  1111. ret = PTR_ERR(xfer);
  1112. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1113. return ret;
  1114. }
  1115. req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
  1116. req->dev_id = dev_id;
  1117. if (clk_id < 255) {
  1118. req->clk_id = clk_id;
  1119. } else {
  1120. req->clk_id = 255;
  1121. req->clk_id_32 = clk_id;
  1122. }
  1123. if (parent_id < 255) {
  1124. req->parent_id = parent_id;
  1125. } else {
  1126. req->parent_id = 255;
  1127. req->parent_id_32 = parent_id;
  1128. }
  1129. ret = ti_sci_do_xfer(info, xfer);
  1130. if (ret) {
  1131. dev_err(dev, "Mbox send fail %d\n", ret);
  1132. goto fail;
  1133. }
  1134. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1135. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1136. fail:
  1137. ti_sci_put_one_xfer(&info->minfo, xfer);
  1138. return ret;
  1139. }
  1140. /**
  1141. * ti_sci_cmd_clk_get_parent() - Get current parent clock source
  1142. * @handle: pointer to TI SCI handle
  1143. * @dev_id: Device identifier this request is for
  1144. * @clk_id: Clock identifier for the device for this request.
  1145. * Each device has it's own set of clock inputs. This indexes
  1146. * which clock input to modify.
  1147. * @parent_id: Current clock parent
  1148. *
  1149. * Return: 0 if all went well, else returns appropriate error value.
  1150. */
  1151. static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
  1152. u32 dev_id, u32 clk_id, u32 *parent_id)
  1153. {
  1154. struct ti_sci_info *info;
  1155. struct ti_sci_msg_req_get_clock_parent *req;
  1156. struct ti_sci_msg_resp_get_clock_parent *resp;
  1157. struct ti_sci_xfer *xfer;
  1158. struct device *dev;
  1159. int ret = 0;
  1160. if (IS_ERR(handle))
  1161. return PTR_ERR(handle);
  1162. if (!handle || !parent_id)
  1163. return -EINVAL;
  1164. info = handle_to_ti_sci_info(handle);
  1165. dev = info->dev;
  1166. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
  1167. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1168. sizeof(*req), sizeof(*resp));
  1169. if (IS_ERR(xfer)) {
  1170. ret = PTR_ERR(xfer);
  1171. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1172. return ret;
  1173. }
  1174. req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
  1175. req->dev_id = dev_id;
  1176. if (clk_id < 255) {
  1177. req->clk_id = clk_id;
  1178. } else {
  1179. req->clk_id = 255;
  1180. req->clk_id_32 = clk_id;
  1181. }
  1182. ret = ti_sci_do_xfer(info, xfer);
  1183. if (ret) {
  1184. dev_err(dev, "Mbox send fail %d\n", ret);
  1185. goto fail;
  1186. }
  1187. resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
  1188. if (!ti_sci_is_response_ack(resp)) {
  1189. ret = -ENODEV;
  1190. } else {
  1191. if (resp->parent_id < 255)
  1192. *parent_id = resp->parent_id;
  1193. else
  1194. *parent_id = resp->parent_id_32;
  1195. }
  1196. fail:
  1197. ti_sci_put_one_xfer(&info->minfo, xfer);
  1198. return ret;
  1199. }
  1200. /**
  1201. * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
  1202. * @handle: pointer to TI SCI handle
  1203. * @dev_id: Device identifier this request is for
  1204. * @clk_id: Clock identifier for the device for this request.
  1205. * Each device has it's own set of clock inputs. This indexes
  1206. * which clock input to modify.
  1207. * @num_parents: Returns he number of parents to the current clock.
  1208. *
  1209. * Return: 0 if all went well, else returns appropriate error value.
  1210. */
  1211. static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
  1212. u32 dev_id, u32 clk_id,
  1213. u32 *num_parents)
  1214. {
  1215. struct ti_sci_info *info;
  1216. struct ti_sci_msg_req_get_clock_num_parents *req;
  1217. struct ti_sci_msg_resp_get_clock_num_parents *resp;
  1218. struct ti_sci_xfer *xfer;
  1219. struct device *dev;
  1220. int ret = 0;
  1221. if (IS_ERR(handle))
  1222. return PTR_ERR(handle);
  1223. if (!handle || !num_parents)
  1224. return -EINVAL;
  1225. info = handle_to_ti_sci_info(handle);
  1226. dev = info->dev;
  1227. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
  1228. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1229. sizeof(*req), sizeof(*resp));
  1230. if (IS_ERR(xfer)) {
  1231. ret = PTR_ERR(xfer);
  1232. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1233. return ret;
  1234. }
  1235. req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
  1236. req->dev_id = dev_id;
  1237. if (clk_id < 255) {
  1238. req->clk_id = clk_id;
  1239. } else {
  1240. req->clk_id = 255;
  1241. req->clk_id_32 = clk_id;
  1242. }
  1243. ret = ti_sci_do_xfer(info, xfer);
  1244. if (ret) {
  1245. dev_err(dev, "Mbox send fail %d\n", ret);
  1246. goto fail;
  1247. }
  1248. resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
  1249. if (!ti_sci_is_response_ack(resp)) {
  1250. ret = -ENODEV;
  1251. } else {
  1252. if (resp->num_parents < 255)
  1253. *num_parents = resp->num_parents;
  1254. else
  1255. *num_parents = resp->num_parents_32;
  1256. }
  1257. fail:
  1258. ti_sci_put_one_xfer(&info->minfo, xfer);
  1259. return ret;
  1260. }
  1261. /**
  1262. * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
  1263. * @handle: pointer to TI SCI handle
  1264. * @dev_id: Device identifier this request is for
  1265. * @clk_id: Clock identifier for the device for this request.
  1266. * Each device has it's own set of clock inputs. This indexes
  1267. * which clock input to modify.
  1268. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1269. * allowable programmed frequency and does not account for clock
  1270. * tolerances and jitter.
  1271. * @target_freq: The target clock frequency in Hz. A frequency will be
  1272. * processed as close to this target frequency as possible.
  1273. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1274. * allowable programmed frequency and does not account for clock
  1275. * tolerances and jitter.
  1276. * @match_freq: Frequency match in Hz response.
  1277. *
  1278. * Return: 0 if all went well, else returns appropriate error value.
  1279. */
  1280. static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
  1281. u32 dev_id, u32 clk_id, u64 min_freq,
  1282. u64 target_freq, u64 max_freq,
  1283. u64 *match_freq)
  1284. {
  1285. struct ti_sci_info *info;
  1286. struct ti_sci_msg_req_query_clock_freq *req;
  1287. struct ti_sci_msg_resp_query_clock_freq *resp;
  1288. struct ti_sci_xfer *xfer;
  1289. struct device *dev;
  1290. int ret = 0;
  1291. if (IS_ERR(handle))
  1292. return PTR_ERR(handle);
  1293. if (!handle || !match_freq)
  1294. return -EINVAL;
  1295. info = handle_to_ti_sci_info(handle);
  1296. dev = info->dev;
  1297. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
  1298. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1299. sizeof(*req), sizeof(*resp));
  1300. if (IS_ERR(xfer)) {
  1301. ret = PTR_ERR(xfer);
  1302. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1303. return ret;
  1304. }
  1305. req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
  1306. req->dev_id = dev_id;
  1307. if (clk_id < 255) {
  1308. req->clk_id = clk_id;
  1309. } else {
  1310. req->clk_id = 255;
  1311. req->clk_id_32 = clk_id;
  1312. }
  1313. req->min_freq_hz = min_freq;
  1314. req->target_freq_hz = target_freq;
  1315. req->max_freq_hz = max_freq;
  1316. ret = ti_sci_do_xfer(info, xfer);
  1317. if (ret) {
  1318. dev_err(dev, "Mbox send fail %d\n", ret);
  1319. goto fail;
  1320. }
  1321. resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
  1322. if (!ti_sci_is_response_ack(resp))
  1323. ret = -ENODEV;
  1324. else
  1325. *match_freq = resp->freq_hz;
  1326. fail:
  1327. ti_sci_put_one_xfer(&info->minfo, xfer);
  1328. return ret;
  1329. }
  1330. /**
  1331. * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
  1332. * @handle: pointer to TI SCI handle
  1333. * @dev_id: Device identifier this request is for
  1334. * @clk_id: Clock identifier for the device for this request.
  1335. * Each device has it's own set of clock inputs. This indexes
  1336. * which clock input to modify.
  1337. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1338. * allowable programmed frequency and does not account for clock
  1339. * tolerances and jitter.
  1340. * @target_freq: The target clock frequency in Hz. A frequency will be
  1341. * processed as close to this target frequency as possible.
  1342. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1343. * allowable programmed frequency and does not account for clock
  1344. * tolerances and jitter.
  1345. *
  1346. * Return: 0 if all went well, else returns appropriate error value.
  1347. */
  1348. static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
  1349. u32 dev_id, u32 clk_id, u64 min_freq,
  1350. u64 target_freq, u64 max_freq)
  1351. {
  1352. struct ti_sci_info *info;
  1353. struct ti_sci_msg_req_set_clock_freq *req;
  1354. struct ti_sci_msg_hdr *resp;
  1355. struct ti_sci_xfer *xfer;
  1356. struct device *dev;
  1357. int ret = 0;
  1358. if (IS_ERR(handle))
  1359. return PTR_ERR(handle);
  1360. if (!handle)
  1361. return -EINVAL;
  1362. info = handle_to_ti_sci_info(handle);
  1363. dev = info->dev;
  1364. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
  1365. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1366. sizeof(*req), sizeof(*resp));
  1367. if (IS_ERR(xfer)) {
  1368. ret = PTR_ERR(xfer);
  1369. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1370. return ret;
  1371. }
  1372. req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
  1373. req->dev_id = dev_id;
  1374. if (clk_id < 255) {
  1375. req->clk_id = clk_id;
  1376. } else {
  1377. req->clk_id = 255;
  1378. req->clk_id_32 = clk_id;
  1379. }
  1380. req->min_freq_hz = min_freq;
  1381. req->target_freq_hz = target_freq;
  1382. req->max_freq_hz = max_freq;
  1383. ret = ti_sci_do_xfer(info, xfer);
  1384. if (ret) {
  1385. dev_err(dev, "Mbox send fail %d\n", ret);
  1386. goto fail;
  1387. }
  1388. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1389. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1390. fail:
  1391. ti_sci_put_one_xfer(&info->minfo, xfer);
  1392. return ret;
  1393. }
  1394. /**
  1395. * ti_sci_cmd_clk_get_freq() - Get current frequency
  1396. * @handle: pointer to TI SCI handle
  1397. * @dev_id: Device identifier this request is for
  1398. * @clk_id: Clock identifier for the device for this request.
  1399. * Each device has it's own set of clock inputs. This indexes
  1400. * which clock input to modify.
  1401. * @freq: Currently frequency in Hz
  1402. *
  1403. * Return: 0 if all went well, else returns appropriate error value.
  1404. */
  1405. static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
  1406. u32 dev_id, u32 clk_id, u64 *freq)
  1407. {
  1408. struct ti_sci_info *info;
  1409. struct ti_sci_msg_req_get_clock_freq *req;
  1410. struct ti_sci_msg_resp_get_clock_freq *resp;
  1411. struct ti_sci_xfer *xfer;
  1412. struct device *dev;
  1413. int ret = 0;
  1414. if (IS_ERR(handle))
  1415. return PTR_ERR(handle);
  1416. if (!handle || !freq)
  1417. return -EINVAL;
  1418. info = handle_to_ti_sci_info(handle);
  1419. dev = info->dev;
  1420. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
  1421. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1422. sizeof(*req), sizeof(*resp));
  1423. if (IS_ERR(xfer)) {
  1424. ret = PTR_ERR(xfer);
  1425. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1426. return ret;
  1427. }
  1428. req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
  1429. req->dev_id = dev_id;
  1430. if (clk_id < 255) {
  1431. req->clk_id = clk_id;
  1432. } else {
  1433. req->clk_id = 255;
  1434. req->clk_id_32 = clk_id;
  1435. }
  1436. ret = ti_sci_do_xfer(info, xfer);
  1437. if (ret) {
  1438. dev_err(dev, "Mbox send fail %d\n", ret);
  1439. goto fail;
  1440. }
  1441. resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
  1442. if (!ti_sci_is_response_ack(resp))
  1443. ret = -ENODEV;
  1444. else
  1445. *freq = resp->freq_hz;
  1446. fail:
  1447. ti_sci_put_one_xfer(&info->minfo, xfer);
  1448. return ret;
  1449. }
  1450. static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
  1451. {
  1452. struct ti_sci_info *info;
  1453. struct ti_sci_msg_req_reboot *req;
  1454. struct ti_sci_msg_hdr *resp;
  1455. struct ti_sci_xfer *xfer;
  1456. struct device *dev;
  1457. int ret = 0;
  1458. if (IS_ERR(handle))
  1459. return PTR_ERR(handle);
  1460. if (!handle)
  1461. return -EINVAL;
  1462. info = handle_to_ti_sci_info(handle);
  1463. dev = info->dev;
  1464. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
  1465. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1466. sizeof(*req), sizeof(*resp));
  1467. if (IS_ERR(xfer)) {
  1468. ret = PTR_ERR(xfer);
  1469. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1470. return ret;
  1471. }
  1472. req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
  1473. ret = ti_sci_do_xfer(info, xfer);
  1474. if (ret) {
  1475. dev_err(dev, "Mbox send fail %d\n", ret);
  1476. goto fail;
  1477. }
  1478. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1479. if (!ti_sci_is_response_ack(resp))
  1480. ret = -ENODEV;
  1481. else
  1482. ret = 0;
  1483. fail:
  1484. ti_sci_put_one_xfer(&info->minfo, xfer);
  1485. return ret;
  1486. }
  1487. /**
  1488. * ti_sci_get_resource_range - Helper to get a range of resources assigned
  1489. * to a host. Resource is uniquely identified by
  1490. * type and subtype.
  1491. * @handle: Pointer to TISCI handle.
  1492. * @dev_id: TISCI device ID.
  1493. * @subtype: Resource assignment subtype that is being requested
  1494. * from the given device.
  1495. * @s_host: Host processor ID to which the resources are allocated
  1496. * @desc: Pointer to ti_sci_resource_desc to be updated with the
  1497. * resource range start index and number of resources
  1498. *
  1499. * Return: 0 if all went fine, else return appropriate error.
  1500. */
  1501. static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
  1502. u32 dev_id, u8 subtype, u8 s_host,
  1503. struct ti_sci_resource_desc *desc)
  1504. {
  1505. struct ti_sci_msg_resp_get_resource_range *resp;
  1506. struct ti_sci_msg_req_get_resource_range *req;
  1507. struct ti_sci_xfer *xfer;
  1508. struct ti_sci_info *info;
  1509. struct device *dev;
  1510. int ret = 0;
  1511. if (IS_ERR(handle))
  1512. return PTR_ERR(handle);
  1513. if (!handle || !desc)
  1514. return -EINVAL;
  1515. info = handle_to_ti_sci_info(handle);
  1516. dev = info->dev;
  1517. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
  1518. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1519. sizeof(*req), sizeof(*resp));
  1520. if (IS_ERR(xfer)) {
  1521. ret = PTR_ERR(xfer);
  1522. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1523. return ret;
  1524. }
  1525. req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
  1526. req->secondary_host = s_host;
  1527. req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
  1528. req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
  1529. ret = ti_sci_do_xfer(info, xfer);
  1530. if (ret) {
  1531. dev_err(dev, "Mbox send fail %d\n", ret);
  1532. goto fail;
  1533. }
  1534. resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
  1535. if (!ti_sci_is_response_ack(resp)) {
  1536. ret = -ENODEV;
  1537. } else if (!resp->range_num && !resp->range_num_sec) {
  1538. /* Neither of the two resource range is valid */
  1539. ret = -ENODEV;
  1540. } else {
  1541. desc->start = resp->range_start;
  1542. desc->num = resp->range_num;
  1543. desc->start_sec = resp->range_start_sec;
  1544. desc->num_sec = resp->range_num_sec;
  1545. }
  1546. fail:
  1547. ti_sci_put_one_xfer(&info->minfo, xfer);
  1548. return ret;
  1549. }
  1550. /**
  1551. * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
  1552. * that is same as ti sci interface host.
  1553. * @handle: Pointer to TISCI handle.
  1554. * @dev_id: TISCI device ID.
  1555. * @subtype: Resource assignment subtype that is being requested
  1556. * from the given device.
  1557. * @desc: Pointer to ti_sci_resource_desc to be updated with the
  1558. * resource range start index and number of resources
  1559. *
  1560. * Return: 0 if all went fine, else return appropriate error.
  1561. */
  1562. static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
  1563. u32 dev_id, u8 subtype,
  1564. struct ti_sci_resource_desc *desc)
  1565. {
  1566. return ti_sci_get_resource_range(handle, dev_id, subtype,
  1567. TI_SCI_IRQ_SECONDARY_HOST_INVALID,
  1568. desc);
  1569. }
  1570. /**
  1571. * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
  1572. * assigned to a specified host.
  1573. * @handle: Pointer to TISCI handle.
  1574. * @dev_id: TISCI device ID.
  1575. * @subtype: Resource assignment subtype that is being requested
  1576. * from the given device.
  1577. * @s_host: Host processor ID to which the resources are allocated
  1578. * @desc: Pointer to ti_sci_resource_desc to be updated with the
  1579. * resource range start index and number of resources
  1580. *
  1581. * Return: 0 if all went fine, else return appropriate error.
  1582. */
  1583. static
  1584. int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
  1585. u32 dev_id, u8 subtype, u8 s_host,
  1586. struct ti_sci_resource_desc *desc)
  1587. {
  1588. return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
  1589. }
  1590. /**
  1591. * ti_sci_manage_irq() - Helper api to configure/release the irq route between
  1592. * the requested source and destination
  1593. * @handle: Pointer to TISCI handle.
  1594. * @valid_params: Bit fields defining the validity of certain params
  1595. * @src_id: Device ID of the IRQ source
  1596. * @src_index: IRQ source index within the source device
  1597. * @dst_id: Device ID of the IRQ destination
  1598. * @dst_host_irq: IRQ number of the destination device
  1599. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1600. * @vint: Virtual interrupt to be used within the IA
  1601. * @global_event: Global event number to be used for the requesting event
  1602. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1603. * @s_host: Secondary host ID to which the irq/event is being
  1604. * requested for.
  1605. * @type: Request type irq set or release.
  1606. *
  1607. * Return: 0 if all went fine, else return appropriate error.
  1608. */
  1609. static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
  1610. u32 valid_params, u16 src_id, u16 src_index,
  1611. u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
  1612. u16 global_event, u8 vint_status_bit, u8 s_host,
  1613. u16 type)
  1614. {
  1615. struct ti_sci_msg_req_manage_irq *req;
  1616. struct ti_sci_msg_hdr *resp;
  1617. struct ti_sci_xfer *xfer;
  1618. struct ti_sci_info *info;
  1619. struct device *dev;
  1620. int ret = 0;
  1621. if (IS_ERR(handle))
  1622. return PTR_ERR(handle);
  1623. if (!handle)
  1624. return -EINVAL;
  1625. info = handle_to_ti_sci_info(handle);
  1626. dev = info->dev;
  1627. xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1628. sizeof(*req), sizeof(*resp));
  1629. if (IS_ERR(xfer)) {
  1630. ret = PTR_ERR(xfer);
  1631. dev_err(dev, "Message alloc failed(%d)\n", ret);
  1632. return ret;
  1633. }
  1634. req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
  1635. req->valid_params = valid_params;
  1636. req->src_id = src_id;
  1637. req->src_index = src_index;
  1638. req->dst_id = dst_id;
  1639. req->dst_host_irq = dst_host_irq;
  1640. req->ia_id = ia_id;
  1641. req->vint = vint;
  1642. req->global_event = global_event;
  1643. req->vint_status_bit = vint_status_bit;
  1644. req->secondary_host = s_host;
  1645. ret = ti_sci_do_xfer(info, xfer);
  1646. if (ret) {
  1647. dev_err(dev, "Mbox send fail %d\n", ret);
  1648. goto fail;
  1649. }
  1650. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1651. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  1652. fail:
  1653. ti_sci_put_one_xfer(&info->minfo, xfer);
  1654. return ret;
  1655. }
  1656. /**
  1657. * ti_sci_set_irq() - Helper api to configure the irq route between the
  1658. * requested source and destination
  1659. * @handle: Pointer to TISCI handle.
  1660. * @valid_params: Bit fields defining the validity of certain params
  1661. * @src_id: Device ID of the IRQ source
  1662. * @src_index: IRQ source index within the source device
  1663. * @dst_id: Device ID of the IRQ destination
  1664. * @dst_host_irq: IRQ number of the destination device
  1665. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1666. * @vint: Virtual interrupt to be used within the IA
  1667. * @global_event: Global event number to be used for the requesting event
  1668. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1669. * @s_host: Secondary host ID to which the irq/event is being
  1670. * requested for.
  1671. *
  1672. * Return: 0 if all went fine, else return appropriate error.
  1673. */
  1674. static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
  1675. u16 src_id, u16 src_index, u16 dst_id,
  1676. u16 dst_host_irq, u16 ia_id, u16 vint,
  1677. u16 global_event, u8 vint_status_bit, u8 s_host)
  1678. {
  1679. pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
  1680. __func__, valid_params, src_id, src_index,
  1681. dst_id, dst_host_irq, ia_id, vint, global_event,
  1682. vint_status_bit);
  1683. return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
  1684. dst_id, dst_host_irq, ia_id, vint,
  1685. global_event, vint_status_bit, s_host,
  1686. TI_SCI_MSG_SET_IRQ);
  1687. }
  1688. /**
  1689. * ti_sci_free_irq() - Helper api to free the irq route between the
  1690. * requested source and destination
  1691. * @handle: Pointer to TISCI handle.
  1692. * @valid_params: Bit fields defining the validity of certain params
  1693. * @src_id: Device ID of the IRQ source
  1694. * @src_index: IRQ source index within the source device
  1695. * @dst_id: Device ID of the IRQ destination
  1696. * @dst_host_irq: IRQ number of the destination device
  1697. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1698. * @vint: Virtual interrupt to be used within the IA
  1699. * @global_event: Global event number to be used for the requesting event
  1700. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1701. * @s_host: Secondary host ID to which the irq/event is being
  1702. * requested for.
  1703. *
  1704. * Return: 0 if all went fine, else return appropriate error.
  1705. */
  1706. static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
  1707. u16 src_id, u16 src_index, u16 dst_id,
  1708. u16 dst_host_irq, u16 ia_id, u16 vint,
  1709. u16 global_event, u8 vint_status_bit, u8 s_host)
  1710. {
  1711. pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
  1712. __func__, valid_params, src_id, src_index,
  1713. dst_id, dst_host_irq, ia_id, vint, global_event,
  1714. vint_status_bit);
  1715. return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
  1716. dst_id, dst_host_irq, ia_id, vint,
  1717. global_event, vint_status_bit, s_host,
  1718. TI_SCI_MSG_FREE_IRQ);
  1719. }
  1720. /**
  1721. * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
  1722. * source and destination.
  1723. * @handle: Pointer to TISCI handle.
  1724. * @src_id: Device ID of the IRQ source
  1725. * @src_index: IRQ source index within the source device
  1726. * @dst_id: Device ID of the IRQ destination
  1727. * @dst_host_irq: IRQ number of the destination device
  1728. * @vint_irq: Boolean specifying if this interrupt belongs to
  1729. * Interrupt Aggregator.
  1730. *
  1731. * Return: 0 if all went fine, else return appropriate error.
  1732. */
  1733. static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
  1734. u16 src_index, u16 dst_id, u16 dst_host_irq)
  1735. {
  1736. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
  1737. return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
  1738. dst_host_irq, 0, 0, 0, 0, 0);
  1739. }
  1740. /**
  1741. * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
  1742. * requested source and Interrupt Aggregator.
  1743. * @handle: Pointer to TISCI handle.
  1744. * @src_id: Device ID of the IRQ source
  1745. * @src_index: IRQ source index within the source device
  1746. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1747. * @vint: Virtual interrupt to be used within the IA
  1748. * @global_event: Global event number to be used for the requesting event
  1749. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1750. *
  1751. * Return: 0 if all went fine, else return appropriate error.
  1752. */
  1753. static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
  1754. u16 src_id, u16 src_index, u16 ia_id,
  1755. u16 vint, u16 global_event,
  1756. u8 vint_status_bit)
  1757. {
  1758. u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
  1759. MSG_FLAG_GLB_EVNT_VALID |
  1760. MSG_FLAG_VINT_STS_BIT_VALID;
  1761. return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
  1762. ia_id, vint, global_event, vint_status_bit, 0);
  1763. }
  1764. /**
  1765. * ti_sci_cmd_free_irq() - Free a host irq route between the between the
  1766. * requested source and destination.
  1767. * @handle: Pointer to TISCI handle.
  1768. * @src_id: Device ID of the IRQ source
  1769. * @src_index: IRQ source index within the source device
  1770. * @dst_id: Device ID of the IRQ destination
  1771. * @dst_host_irq: IRQ number of the destination device
  1772. * @vint_irq: Boolean specifying if this interrupt belongs to
  1773. * Interrupt Aggregator.
  1774. *
  1775. * Return: 0 if all went fine, else return appropriate error.
  1776. */
  1777. static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
  1778. u16 src_index, u16 dst_id, u16 dst_host_irq)
  1779. {
  1780. u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
  1781. return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
  1782. dst_host_irq, 0, 0, 0, 0, 0);
  1783. }
  1784. /**
  1785. * ti_sci_cmd_free_event_map() - Free an event map between the requested source
  1786. * and Interrupt Aggregator.
  1787. * @handle: Pointer to TISCI handle.
  1788. * @src_id: Device ID of the IRQ source
  1789. * @src_index: IRQ source index within the source device
  1790. * @ia_id: Device ID of the IA, if the IRQ flows through this IA
  1791. * @vint: Virtual interrupt to be used within the IA
  1792. * @global_event: Global event number to be used for the requesting event
  1793. * @vint_status_bit: Virtual interrupt status bit to be used for the event
  1794. *
  1795. * Return: 0 if all went fine, else return appropriate error.
  1796. */
  1797. static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
  1798. u16 src_id, u16 src_index, u16 ia_id,
  1799. u16 vint, u16 global_event,
  1800. u8 vint_status_bit)
  1801. {
  1802. u32 valid_params = MSG_FLAG_IA_ID_VALID |
  1803. MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
  1804. MSG_FLAG_VINT_STS_BIT_VALID;
  1805. return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
  1806. ia_id, vint, global_event, vint_status_bit, 0);
  1807. }
  1808. /**
  1809. * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
  1810. * @handle: Pointer to TI SCI handle.
  1811. * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
  1812. *
  1813. * Return: 0 if all went well, else returns appropriate error value.
  1814. *
  1815. * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
  1816. * more info.
  1817. */
  1818. static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
  1819. const struct ti_sci_msg_rm_ring_cfg *params)
  1820. {
  1821. struct ti_sci_msg_rm_ring_cfg_req *req;
  1822. struct ti_sci_msg_hdr *resp;
  1823. struct ti_sci_xfer *xfer;
  1824. struct ti_sci_info *info;
  1825. struct device *dev;
  1826. int ret = 0;
  1827. if (IS_ERR_OR_NULL(handle))
  1828. return -EINVAL;
  1829. info = handle_to_ti_sci_info(handle);
  1830. dev = info->dev;
  1831. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
  1832. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1833. sizeof(*req), sizeof(*resp));
  1834. if (IS_ERR(xfer)) {
  1835. ret = PTR_ERR(xfer);
  1836. dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
  1837. return ret;
  1838. }
  1839. req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
  1840. req->valid_params = params->valid_params;
  1841. req->nav_id = params->nav_id;
  1842. req->index = params->index;
  1843. req->addr_lo = params->addr_lo;
  1844. req->addr_hi = params->addr_hi;
  1845. req->count = params->count;
  1846. req->mode = params->mode;
  1847. req->size = params->size;
  1848. req->order_id = params->order_id;
  1849. req->virtid = params->virtid;
  1850. req->asel = params->asel;
  1851. ret = ti_sci_do_xfer(info, xfer);
  1852. if (ret) {
  1853. dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
  1854. goto fail;
  1855. }
  1856. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1857. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  1858. fail:
  1859. ti_sci_put_one_xfer(&info->minfo, xfer);
  1860. dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
  1861. return ret;
  1862. }
  1863. /**
  1864. * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
  1865. * @handle: Pointer to TI SCI handle.
  1866. * @nav_id: Device ID of Navigator Subsystem which should be used for
  1867. * pairing
  1868. * @src_thread: Source PSI-L thread ID
  1869. * @dst_thread: Destination PSI-L thread ID
  1870. *
  1871. * Return: 0 if all went well, else returns appropriate error value.
  1872. */
  1873. static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
  1874. u32 nav_id, u32 src_thread, u32 dst_thread)
  1875. {
  1876. struct ti_sci_msg_psil_pair *req;
  1877. struct ti_sci_msg_hdr *resp;
  1878. struct ti_sci_xfer *xfer;
  1879. struct ti_sci_info *info;
  1880. struct device *dev;
  1881. int ret = 0;
  1882. if (IS_ERR(handle))
  1883. return PTR_ERR(handle);
  1884. if (!handle)
  1885. return -EINVAL;
  1886. info = handle_to_ti_sci_info(handle);
  1887. dev = info->dev;
  1888. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
  1889. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1890. sizeof(*req), sizeof(*resp));
  1891. if (IS_ERR(xfer)) {
  1892. ret = PTR_ERR(xfer);
  1893. dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
  1894. return ret;
  1895. }
  1896. req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
  1897. req->nav_id = nav_id;
  1898. req->src_thread = src_thread;
  1899. req->dst_thread = dst_thread;
  1900. ret = ti_sci_do_xfer(info, xfer);
  1901. if (ret) {
  1902. dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
  1903. goto fail;
  1904. }
  1905. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1906. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  1907. fail:
  1908. ti_sci_put_one_xfer(&info->minfo, xfer);
  1909. return ret;
  1910. }
  1911. /**
  1912. * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
  1913. * @handle: Pointer to TI SCI handle.
  1914. * @nav_id: Device ID of Navigator Subsystem which should be used for
  1915. * unpairing
  1916. * @src_thread: Source PSI-L thread ID
  1917. * @dst_thread: Destination PSI-L thread ID
  1918. *
  1919. * Return: 0 if all went well, else returns appropriate error value.
  1920. */
  1921. static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
  1922. u32 nav_id, u32 src_thread, u32 dst_thread)
  1923. {
  1924. struct ti_sci_msg_psil_unpair *req;
  1925. struct ti_sci_msg_hdr *resp;
  1926. struct ti_sci_xfer *xfer;
  1927. struct ti_sci_info *info;
  1928. struct device *dev;
  1929. int ret = 0;
  1930. if (IS_ERR(handle))
  1931. return PTR_ERR(handle);
  1932. if (!handle)
  1933. return -EINVAL;
  1934. info = handle_to_ti_sci_info(handle);
  1935. dev = info->dev;
  1936. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
  1937. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1938. sizeof(*req), sizeof(*resp));
  1939. if (IS_ERR(xfer)) {
  1940. ret = PTR_ERR(xfer);
  1941. dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
  1942. return ret;
  1943. }
  1944. req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
  1945. req->nav_id = nav_id;
  1946. req->src_thread = src_thread;
  1947. req->dst_thread = dst_thread;
  1948. ret = ti_sci_do_xfer(info, xfer);
  1949. if (ret) {
  1950. dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
  1951. goto fail;
  1952. }
  1953. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  1954. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  1955. fail:
  1956. ti_sci_put_one_xfer(&info->minfo, xfer);
  1957. return ret;
  1958. }
  1959. /**
  1960. * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
  1961. * @handle: Pointer to TI SCI handle.
  1962. * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
  1963. * structure
  1964. *
  1965. * Return: 0 if all went well, else returns appropriate error value.
  1966. *
  1967. * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
  1968. * more info.
  1969. */
  1970. static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
  1971. const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
  1972. {
  1973. struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
  1974. struct ti_sci_msg_hdr *resp;
  1975. struct ti_sci_xfer *xfer;
  1976. struct ti_sci_info *info;
  1977. struct device *dev;
  1978. int ret = 0;
  1979. if (IS_ERR_OR_NULL(handle))
  1980. return -EINVAL;
  1981. info = handle_to_ti_sci_info(handle);
  1982. dev = info->dev;
  1983. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
  1984. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1985. sizeof(*req), sizeof(*resp));
  1986. if (IS_ERR(xfer)) {
  1987. ret = PTR_ERR(xfer);
  1988. dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
  1989. return ret;
  1990. }
  1991. req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
  1992. req->valid_params = params->valid_params;
  1993. req->nav_id = params->nav_id;
  1994. req->index = params->index;
  1995. req->tx_pause_on_err = params->tx_pause_on_err;
  1996. req->tx_filt_einfo = params->tx_filt_einfo;
  1997. req->tx_filt_pswords = params->tx_filt_pswords;
  1998. req->tx_atype = params->tx_atype;
  1999. req->tx_chan_type = params->tx_chan_type;
  2000. req->tx_supr_tdpkt = params->tx_supr_tdpkt;
  2001. req->tx_fetch_size = params->tx_fetch_size;
  2002. req->tx_credit_count = params->tx_credit_count;
  2003. req->txcq_qnum = params->txcq_qnum;
  2004. req->tx_priority = params->tx_priority;
  2005. req->tx_qos = params->tx_qos;
  2006. req->tx_orderid = params->tx_orderid;
  2007. req->fdepth = params->fdepth;
  2008. req->tx_sched_priority = params->tx_sched_priority;
  2009. req->tx_burst_size = params->tx_burst_size;
  2010. req->tx_tdtype = params->tx_tdtype;
  2011. req->extended_ch_type = params->extended_ch_type;
  2012. ret = ti_sci_do_xfer(info, xfer);
  2013. if (ret) {
  2014. dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
  2015. goto fail;
  2016. }
  2017. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2018. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2019. fail:
  2020. ti_sci_put_one_xfer(&info->minfo, xfer);
  2021. dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
  2022. return ret;
  2023. }
  2024. /**
  2025. * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
  2026. * @handle: Pointer to TI SCI handle.
  2027. * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
  2028. * structure
  2029. *
  2030. * Return: 0 if all went well, else returns appropriate error value.
  2031. *
  2032. * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
  2033. * more info.
  2034. */
  2035. static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
  2036. const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
  2037. {
  2038. struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
  2039. struct ti_sci_msg_hdr *resp;
  2040. struct ti_sci_xfer *xfer;
  2041. struct ti_sci_info *info;
  2042. struct device *dev;
  2043. int ret = 0;
  2044. if (IS_ERR_OR_NULL(handle))
  2045. return -EINVAL;
  2046. info = handle_to_ti_sci_info(handle);
  2047. dev = info->dev;
  2048. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
  2049. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2050. sizeof(*req), sizeof(*resp));
  2051. if (IS_ERR(xfer)) {
  2052. ret = PTR_ERR(xfer);
  2053. dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
  2054. return ret;
  2055. }
  2056. req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
  2057. req->valid_params = params->valid_params;
  2058. req->nav_id = params->nav_id;
  2059. req->index = params->index;
  2060. req->rx_fetch_size = params->rx_fetch_size;
  2061. req->rxcq_qnum = params->rxcq_qnum;
  2062. req->rx_priority = params->rx_priority;
  2063. req->rx_qos = params->rx_qos;
  2064. req->rx_orderid = params->rx_orderid;
  2065. req->rx_sched_priority = params->rx_sched_priority;
  2066. req->flowid_start = params->flowid_start;
  2067. req->flowid_cnt = params->flowid_cnt;
  2068. req->rx_pause_on_err = params->rx_pause_on_err;
  2069. req->rx_atype = params->rx_atype;
  2070. req->rx_chan_type = params->rx_chan_type;
  2071. req->rx_ignore_short = params->rx_ignore_short;
  2072. req->rx_ignore_long = params->rx_ignore_long;
  2073. req->rx_burst_size = params->rx_burst_size;
  2074. ret = ti_sci_do_xfer(info, xfer);
  2075. if (ret) {
  2076. dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
  2077. goto fail;
  2078. }
  2079. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2080. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2081. fail:
  2082. ti_sci_put_one_xfer(&info->minfo, xfer);
  2083. dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
  2084. return ret;
  2085. }
  2086. /**
  2087. * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
  2088. * @handle: Pointer to TI SCI handle.
  2089. * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
  2090. * structure
  2091. *
  2092. * Return: 0 if all went well, else returns appropriate error value.
  2093. *
  2094. * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
  2095. * more info.
  2096. */
  2097. static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
  2098. const struct ti_sci_msg_rm_udmap_flow_cfg *params)
  2099. {
  2100. struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
  2101. struct ti_sci_msg_hdr *resp;
  2102. struct ti_sci_xfer *xfer;
  2103. struct ti_sci_info *info;
  2104. struct device *dev;
  2105. int ret = 0;
  2106. if (IS_ERR_OR_NULL(handle))
  2107. return -EINVAL;
  2108. info = handle_to_ti_sci_info(handle);
  2109. dev = info->dev;
  2110. xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
  2111. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2112. sizeof(*req), sizeof(*resp));
  2113. if (IS_ERR(xfer)) {
  2114. ret = PTR_ERR(xfer);
  2115. dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
  2116. return ret;
  2117. }
  2118. req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
  2119. req->valid_params = params->valid_params;
  2120. req->nav_id = params->nav_id;
  2121. req->flow_index = params->flow_index;
  2122. req->rx_einfo_present = params->rx_einfo_present;
  2123. req->rx_psinfo_present = params->rx_psinfo_present;
  2124. req->rx_error_handling = params->rx_error_handling;
  2125. req->rx_desc_type = params->rx_desc_type;
  2126. req->rx_sop_offset = params->rx_sop_offset;
  2127. req->rx_dest_qnum = params->rx_dest_qnum;
  2128. req->rx_src_tag_hi = params->rx_src_tag_hi;
  2129. req->rx_src_tag_lo = params->rx_src_tag_lo;
  2130. req->rx_dest_tag_hi = params->rx_dest_tag_hi;
  2131. req->rx_dest_tag_lo = params->rx_dest_tag_lo;
  2132. req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
  2133. req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
  2134. req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
  2135. req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
  2136. req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
  2137. req->rx_fdq1_qnum = params->rx_fdq1_qnum;
  2138. req->rx_fdq2_qnum = params->rx_fdq2_qnum;
  2139. req->rx_fdq3_qnum = params->rx_fdq3_qnum;
  2140. req->rx_ps_location = params->rx_ps_location;
  2141. ret = ti_sci_do_xfer(info, xfer);
  2142. if (ret) {
  2143. dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
  2144. goto fail;
  2145. }
  2146. resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
  2147. ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
  2148. fail:
  2149. ti_sci_put_one_xfer(&info->minfo, xfer);
  2150. dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
  2151. return ret;
  2152. }
  2153. /**
  2154. * ti_sci_cmd_proc_request() - Command to request a physical processor control
  2155. * @handle: Pointer to TI SCI handle
  2156. * @proc_id: Processor ID this request is for
  2157. *
  2158. * Return: 0 if all went well, else returns appropriate error value.
  2159. */
  2160. static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
  2161. u8 proc_id)
  2162. {
  2163. struct ti_sci_msg_req_proc_request *req;
  2164. struct ti_sci_msg_hdr *resp;
  2165. struct ti_sci_info *info;
  2166. struct ti_sci_xfer *xfer;
  2167. struct device *dev;
  2168. int ret = 0;
  2169. if (!handle)
  2170. return -EINVAL;
  2171. if (IS_ERR(handle))
  2172. return PTR_ERR(handle);
  2173. info = handle_to_ti_sci_info(handle);
  2174. dev = info->dev;
  2175. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
  2176. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2177. sizeof(*req), sizeof(*resp));
  2178. if (IS_ERR(xfer)) {
  2179. ret = PTR_ERR(xfer);
  2180. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2181. return ret;
  2182. }
  2183. req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
  2184. req->processor_id = proc_id;
  2185. ret = ti_sci_do_xfer(info, xfer);
  2186. if (ret) {
  2187. dev_err(dev, "Mbox send fail %d\n", ret);
  2188. goto fail;
  2189. }
  2190. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2191. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2192. fail:
  2193. ti_sci_put_one_xfer(&info->minfo, xfer);
  2194. return ret;
  2195. }
  2196. /**
  2197. * ti_sci_cmd_proc_release() - Command to release a physical processor control
  2198. * @handle: Pointer to TI SCI handle
  2199. * @proc_id: Processor ID this request is for
  2200. *
  2201. * Return: 0 if all went well, else returns appropriate error value.
  2202. */
  2203. static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
  2204. u8 proc_id)
  2205. {
  2206. struct ti_sci_msg_req_proc_release *req;
  2207. struct ti_sci_msg_hdr *resp;
  2208. struct ti_sci_info *info;
  2209. struct ti_sci_xfer *xfer;
  2210. struct device *dev;
  2211. int ret = 0;
  2212. if (!handle)
  2213. return -EINVAL;
  2214. if (IS_ERR(handle))
  2215. return PTR_ERR(handle);
  2216. info = handle_to_ti_sci_info(handle);
  2217. dev = info->dev;
  2218. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
  2219. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2220. sizeof(*req), sizeof(*resp));
  2221. if (IS_ERR(xfer)) {
  2222. ret = PTR_ERR(xfer);
  2223. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2224. return ret;
  2225. }
  2226. req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
  2227. req->processor_id = proc_id;
  2228. ret = ti_sci_do_xfer(info, xfer);
  2229. if (ret) {
  2230. dev_err(dev, "Mbox send fail %d\n", ret);
  2231. goto fail;
  2232. }
  2233. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2234. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2235. fail:
  2236. ti_sci_put_one_xfer(&info->minfo, xfer);
  2237. return ret;
  2238. }
  2239. /**
  2240. * ti_sci_cmd_proc_handover() - Command to handover a physical processor
  2241. * control to a host in the processor's access
  2242. * control list.
  2243. * @handle: Pointer to TI SCI handle
  2244. * @proc_id: Processor ID this request is for
  2245. * @host_id: Host ID to get the control of the processor
  2246. *
  2247. * Return: 0 if all went well, else returns appropriate error value.
  2248. */
  2249. static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
  2250. u8 proc_id, u8 host_id)
  2251. {
  2252. struct ti_sci_msg_req_proc_handover *req;
  2253. struct ti_sci_msg_hdr *resp;
  2254. struct ti_sci_info *info;
  2255. struct ti_sci_xfer *xfer;
  2256. struct device *dev;
  2257. int ret = 0;
  2258. if (!handle)
  2259. return -EINVAL;
  2260. if (IS_ERR(handle))
  2261. return PTR_ERR(handle);
  2262. info = handle_to_ti_sci_info(handle);
  2263. dev = info->dev;
  2264. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
  2265. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2266. sizeof(*req), sizeof(*resp));
  2267. if (IS_ERR(xfer)) {
  2268. ret = PTR_ERR(xfer);
  2269. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2270. return ret;
  2271. }
  2272. req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
  2273. req->processor_id = proc_id;
  2274. req->host_id = host_id;
  2275. ret = ti_sci_do_xfer(info, xfer);
  2276. if (ret) {
  2277. dev_err(dev, "Mbox send fail %d\n", ret);
  2278. goto fail;
  2279. }
  2280. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2281. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2282. fail:
  2283. ti_sci_put_one_xfer(&info->minfo, xfer);
  2284. return ret;
  2285. }
  2286. /**
  2287. * ti_sci_cmd_proc_set_config() - Command to set the processor boot
  2288. * configuration flags
  2289. * @handle: Pointer to TI SCI handle
  2290. * @proc_id: Processor ID this request is for
  2291. * @config_flags_set: Configuration flags to be set
  2292. * @config_flags_clear: Configuration flags to be cleared.
  2293. *
  2294. * Return: 0 if all went well, else returns appropriate error value.
  2295. */
  2296. static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
  2297. u8 proc_id, u64 bootvector,
  2298. u32 config_flags_set,
  2299. u32 config_flags_clear)
  2300. {
  2301. struct ti_sci_msg_req_set_config *req;
  2302. struct ti_sci_msg_hdr *resp;
  2303. struct ti_sci_info *info;
  2304. struct ti_sci_xfer *xfer;
  2305. struct device *dev;
  2306. int ret = 0;
  2307. if (!handle)
  2308. return -EINVAL;
  2309. if (IS_ERR(handle))
  2310. return PTR_ERR(handle);
  2311. info = handle_to_ti_sci_info(handle);
  2312. dev = info->dev;
  2313. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
  2314. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2315. sizeof(*req), sizeof(*resp));
  2316. if (IS_ERR(xfer)) {
  2317. ret = PTR_ERR(xfer);
  2318. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2319. return ret;
  2320. }
  2321. req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
  2322. req->processor_id = proc_id;
  2323. req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
  2324. req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
  2325. TI_SCI_ADDR_HIGH_SHIFT;
  2326. req->config_flags_set = config_flags_set;
  2327. req->config_flags_clear = config_flags_clear;
  2328. ret = ti_sci_do_xfer(info, xfer);
  2329. if (ret) {
  2330. dev_err(dev, "Mbox send fail %d\n", ret);
  2331. goto fail;
  2332. }
  2333. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2334. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2335. fail:
  2336. ti_sci_put_one_xfer(&info->minfo, xfer);
  2337. return ret;
  2338. }
  2339. /**
  2340. * ti_sci_cmd_proc_set_control() - Command to set the processor boot
  2341. * control flags
  2342. * @handle: Pointer to TI SCI handle
  2343. * @proc_id: Processor ID this request is for
  2344. * @control_flags_set: Control flags to be set
  2345. * @control_flags_clear: Control flags to be cleared
  2346. *
  2347. * Return: 0 if all went well, else returns appropriate error value.
  2348. */
  2349. static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
  2350. u8 proc_id, u32 control_flags_set,
  2351. u32 control_flags_clear)
  2352. {
  2353. struct ti_sci_msg_req_set_ctrl *req;
  2354. struct ti_sci_msg_hdr *resp;
  2355. struct ti_sci_info *info;
  2356. struct ti_sci_xfer *xfer;
  2357. struct device *dev;
  2358. int ret = 0;
  2359. if (!handle)
  2360. return -EINVAL;
  2361. if (IS_ERR(handle))
  2362. return PTR_ERR(handle);
  2363. info = handle_to_ti_sci_info(handle);
  2364. dev = info->dev;
  2365. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
  2366. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2367. sizeof(*req), sizeof(*resp));
  2368. if (IS_ERR(xfer)) {
  2369. ret = PTR_ERR(xfer);
  2370. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2371. return ret;
  2372. }
  2373. req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
  2374. req->processor_id = proc_id;
  2375. req->control_flags_set = control_flags_set;
  2376. req->control_flags_clear = control_flags_clear;
  2377. ret = ti_sci_do_xfer(info, xfer);
  2378. if (ret) {
  2379. dev_err(dev, "Mbox send fail %d\n", ret);
  2380. goto fail;
  2381. }
  2382. resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
  2383. ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
  2384. fail:
  2385. ti_sci_put_one_xfer(&info->minfo, xfer);
  2386. return ret;
  2387. }
  2388. /**
  2389. * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
  2390. * @handle: Pointer to TI SCI handle
  2391. * @proc_id: Processor ID this request is for
  2392. *
  2393. * Return: 0 if all went well, else returns appropriate error value.
  2394. */
  2395. static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
  2396. u8 proc_id, u64 *bv, u32 *cfg_flags,
  2397. u32 *ctrl_flags, u32 *sts_flags)
  2398. {
  2399. struct ti_sci_msg_resp_get_status *resp;
  2400. struct ti_sci_msg_req_get_status *req;
  2401. struct ti_sci_info *info;
  2402. struct ti_sci_xfer *xfer;
  2403. struct device *dev;
  2404. int ret = 0;
  2405. if (!handle)
  2406. return -EINVAL;
  2407. if (IS_ERR(handle))
  2408. return PTR_ERR(handle);
  2409. info = handle_to_ti_sci_info(handle);
  2410. dev = info->dev;
  2411. xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
  2412. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2413. sizeof(*req), sizeof(*resp));
  2414. if (IS_ERR(xfer)) {
  2415. ret = PTR_ERR(xfer);
  2416. dev_err(dev, "Message alloc failed(%d)\n", ret);
  2417. return ret;
  2418. }
  2419. req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
  2420. req->processor_id = proc_id;
  2421. ret = ti_sci_do_xfer(info, xfer);
  2422. if (ret) {
  2423. dev_err(dev, "Mbox send fail %d\n", ret);
  2424. goto fail;
  2425. }
  2426. resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
  2427. if (!ti_sci_is_response_ack(resp)) {
  2428. ret = -ENODEV;
  2429. } else {
  2430. *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
  2431. (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
  2432. TI_SCI_ADDR_HIGH_MASK);
  2433. *cfg_flags = resp->config_flags;
  2434. *ctrl_flags = resp->control_flags;
  2435. *sts_flags = resp->status_flags;
  2436. }
  2437. fail:
  2438. ti_sci_put_one_xfer(&info->minfo, xfer);
  2439. return ret;
  2440. }
  2441. /*
  2442. * ti_sci_setup_ops() - Setup the operations structures
  2443. * @info: pointer to TISCI pointer
  2444. */
  2445. static void ti_sci_setup_ops(struct ti_sci_info *info)
  2446. {
  2447. struct ti_sci_ops *ops = &info->handle.ops;
  2448. struct ti_sci_core_ops *core_ops = &ops->core_ops;
  2449. struct ti_sci_dev_ops *dops = &ops->dev_ops;
  2450. struct ti_sci_clk_ops *cops = &ops->clk_ops;
  2451. struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
  2452. struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
  2453. struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
  2454. struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
  2455. struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
  2456. struct ti_sci_proc_ops *pops = &ops->proc_ops;
  2457. core_ops->reboot_device = ti_sci_cmd_core_reboot;
  2458. dops->get_device = ti_sci_cmd_get_device;
  2459. dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
  2460. dops->idle_device = ti_sci_cmd_idle_device;
  2461. dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
  2462. dops->put_device = ti_sci_cmd_put_device;
  2463. dops->is_valid = ti_sci_cmd_dev_is_valid;
  2464. dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
  2465. dops->is_idle = ti_sci_cmd_dev_is_idle;
  2466. dops->is_stop = ti_sci_cmd_dev_is_stop;
  2467. dops->is_on = ti_sci_cmd_dev_is_on;
  2468. dops->is_transitioning = ti_sci_cmd_dev_is_trans;
  2469. dops->set_device_resets = ti_sci_cmd_set_device_resets;
  2470. dops->get_device_resets = ti_sci_cmd_get_device_resets;
  2471. cops->get_clock = ti_sci_cmd_get_clock;
  2472. cops->idle_clock = ti_sci_cmd_idle_clock;
  2473. cops->put_clock = ti_sci_cmd_put_clock;
  2474. cops->is_auto = ti_sci_cmd_clk_is_auto;
  2475. cops->is_on = ti_sci_cmd_clk_is_on;
  2476. cops->is_off = ti_sci_cmd_clk_is_off;
  2477. cops->set_parent = ti_sci_cmd_clk_set_parent;
  2478. cops->get_parent = ti_sci_cmd_clk_get_parent;
  2479. cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
  2480. cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
  2481. cops->set_freq = ti_sci_cmd_clk_set_freq;
  2482. cops->get_freq = ti_sci_cmd_clk_get_freq;
  2483. rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
  2484. rm_core_ops->get_range_from_shost =
  2485. ti_sci_cmd_get_resource_range_from_shost;
  2486. iops->set_irq = ti_sci_cmd_set_irq;
  2487. iops->set_event_map = ti_sci_cmd_set_event_map;
  2488. iops->free_irq = ti_sci_cmd_free_irq;
  2489. iops->free_event_map = ti_sci_cmd_free_event_map;
  2490. rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
  2491. psilops->pair = ti_sci_cmd_rm_psil_pair;
  2492. psilops->unpair = ti_sci_cmd_rm_psil_unpair;
  2493. udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
  2494. udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
  2495. udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
  2496. pops->request = ti_sci_cmd_proc_request;
  2497. pops->release = ti_sci_cmd_proc_release;
  2498. pops->handover = ti_sci_cmd_proc_handover;
  2499. pops->set_config = ti_sci_cmd_proc_set_config;
  2500. pops->set_control = ti_sci_cmd_proc_set_control;
  2501. pops->get_status = ti_sci_cmd_proc_get_status;
  2502. }
  2503. /**
  2504. * ti_sci_get_handle() - Get the TI SCI handle for a device
  2505. * @dev: Pointer to device for which we want SCI handle
  2506. *
  2507. * NOTE: The function does not track individual clients of the framework
  2508. * and is expected to be maintained by caller of TI SCI protocol library.
  2509. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  2510. * Return: pointer to handle if successful, else:
  2511. * -EPROBE_DEFER if the instance is not ready
  2512. * -ENODEV if the required node handler is missing
  2513. * -EINVAL if invalid conditions are encountered.
  2514. */
  2515. const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
  2516. {
  2517. struct device_node *ti_sci_np;
  2518. struct list_head *p;
  2519. struct ti_sci_handle *handle = NULL;
  2520. struct ti_sci_info *info;
  2521. if (!dev) {
  2522. pr_err("I need a device pointer\n");
  2523. return ERR_PTR(-EINVAL);
  2524. }
  2525. ti_sci_np = of_get_parent(dev->of_node);
  2526. if (!ti_sci_np) {
  2527. dev_err(dev, "No OF information\n");
  2528. return ERR_PTR(-EINVAL);
  2529. }
  2530. mutex_lock(&ti_sci_list_mutex);
  2531. list_for_each(p, &ti_sci_list) {
  2532. info = list_entry(p, struct ti_sci_info, node);
  2533. if (ti_sci_np == info->dev->of_node) {
  2534. handle = &info->handle;
  2535. info->users++;
  2536. break;
  2537. }
  2538. }
  2539. mutex_unlock(&ti_sci_list_mutex);
  2540. of_node_put(ti_sci_np);
  2541. if (!handle)
  2542. return ERR_PTR(-EPROBE_DEFER);
  2543. return handle;
  2544. }
  2545. EXPORT_SYMBOL_GPL(ti_sci_get_handle);
  2546. /**
  2547. * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
  2548. * @handle: Handle acquired by ti_sci_get_handle
  2549. *
  2550. * NOTE: The function does not track individual clients of the framework
  2551. * and is expected to be maintained by caller of TI SCI protocol library.
  2552. * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
  2553. *
  2554. * Return: 0 is successfully released
  2555. * if an error pointer was passed, it returns the error value back,
  2556. * if null was passed, it returns -EINVAL;
  2557. */
  2558. int ti_sci_put_handle(const struct ti_sci_handle *handle)
  2559. {
  2560. struct ti_sci_info *info;
  2561. if (IS_ERR(handle))
  2562. return PTR_ERR(handle);
  2563. if (!handle)
  2564. return -EINVAL;
  2565. info = handle_to_ti_sci_info(handle);
  2566. mutex_lock(&ti_sci_list_mutex);
  2567. if (!WARN_ON(!info->users))
  2568. info->users--;
  2569. mutex_unlock(&ti_sci_list_mutex);
  2570. return 0;
  2571. }
  2572. EXPORT_SYMBOL_GPL(ti_sci_put_handle);
  2573. static void devm_ti_sci_release(struct device *dev, void *res)
  2574. {
  2575. const struct ti_sci_handle **ptr = res;
  2576. const struct ti_sci_handle *handle = *ptr;
  2577. int ret;
  2578. ret = ti_sci_put_handle(handle);
  2579. if (ret)
  2580. dev_err(dev, "failed to put handle %d\n", ret);
  2581. }
  2582. /**
  2583. * devm_ti_sci_get_handle() - Managed get handle
  2584. * @dev: device for which we want SCI handle for.
  2585. *
  2586. * NOTE: This releases the handle once the device resources are
  2587. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  2588. * The function does not track individual clients of the framework
  2589. * and is expected to be maintained by caller of TI SCI protocol library.
  2590. *
  2591. * Return: 0 if all went fine, else corresponding error.
  2592. */
  2593. const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
  2594. {
  2595. const struct ti_sci_handle **ptr;
  2596. const struct ti_sci_handle *handle;
  2597. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  2598. if (!ptr)
  2599. return ERR_PTR(-ENOMEM);
  2600. handle = ti_sci_get_handle(dev);
  2601. if (!IS_ERR(handle)) {
  2602. *ptr = handle;
  2603. devres_add(dev, ptr);
  2604. } else {
  2605. devres_free(ptr);
  2606. }
  2607. return handle;
  2608. }
  2609. EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
  2610. /**
  2611. * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
  2612. * @np: device node
  2613. * @property: property name containing phandle on TISCI node
  2614. *
  2615. * NOTE: The function does not track individual clients of the framework
  2616. * and is expected to be maintained by caller of TI SCI protocol library.
  2617. * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
  2618. * Return: pointer to handle if successful, else:
  2619. * -EPROBE_DEFER if the instance is not ready
  2620. * -ENODEV if the required node handler is missing
  2621. * -EINVAL if invalid conditions are encountered.
  2622. */
  2623. const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
  2624. const char *property)
  2625. {
  2626. struct ti_sci_handle *handle = NULL;
  2627. struct device_node *ti_sci_np;
  2628. struct ti_sci_info *info;
  2629. struct list_head *p;
  2630. if (!np) {
  2631. pr_err("I need a device pointer\n");
  2632. return ERR_PTR(-EINVAL);
  2633. }
  2634. ti_sci_np = of_parse_phandle(np, property, 0);
  2635. if (!ti_sci_np)
  2636. return ERR_PTR(-ENODEV);
  2637. mutex_lock(&ti_sci_list_mutex);
  2638. list_for_each(p, &ti_sci_list) {
  2639. info = list_entry(p, struct ti_sci_info, node);
  2640. if (ti_sci_np == info->dev->of_node) {
  2641. handle = &info->handle;
  2642. info->users++;
  2643. break;
  2644. }
  2645. }
  2646. mutex_unlock(&ti_sci_list_mutex);
  2647. of_node_put(ti_sci_np);
  2648. if (!handle)
  2649. return ERR_PTR(-EPROBE_DEFER);
  2650. return handle;
  2651. }
  2652. EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
  2653. /**
  2654. * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
  2655. * @dev: Device pointer requesting TISCI handle
  2656. * @property: property name containing phandle on TISCI node
  2657. *
  2658. * NOTE: This releases the handle once the device resources are
  2659. * no longer needed. MUST NOT BE released with ti_sci_put_handle.
  2660. * The function does not track individual clients of the framework
  2661. * and is expected to be maintained by caller of TI SCI protocol library.
  2662. *
  2663. * Return: 0 if all went fine, else corresponding error.
  2664. */
  2665. const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
  2666. const char *property)
  2667. {
  2668. const struct ti_sci_handle *handle;
  2669. const struct ti_sci_handle **ptr;
  2670. ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
  2671. if (!ptr)
  2672. return ERR_PTR(-ENOMEM);
  2673. handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
  2674. if (!IS_ERR(handle)) {
  2675. *ptr = handle;
  2676. devres_add(dev, ptr);
  2677. } else {
  2678. devres_free(ptr);
  2679. }
  2680. return handle;
  2681. }
  2682. EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
  2683. /**
  2684. * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
  2685. * @res: Pointer to the TISCI resource
  2686. *
  2687. * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
  2688. */
  2689. u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
  2690. {
  2691. unsigned long flags;
  2692. u16 set, free_bit;
  2693. raw_spin_lock_irqsave(&res->lock, flags);
  2694. for (set = 0; set < res->sets; set++) {
  2695. struct ti_sci_resource_desc *desc = &res->desc[set];
  2696. int res_count = desc->num + desc->num_sec;
  2697. free_bit = find_first_zero_bit(desc->res_map, res_count);
  2698. if (free_bit != res_count) {
  2699. set_bit(free_bit, desc->res_map);
  2700. raw_spin_unlock_irqrestore(&res->lock, flags);
  2701. if (desc->num && free_bit < desc->num)
  2702. return desc->start + free_bit;
  2703. else
  2704. return desc->start_sec + free_bit;
  2705. }
  2706. }
  2707. raw_spin_unlock_irqrestore(&res->lock, flags);
  2708. return TI_SCI_RESOURCE_NULL;
  2709. }
  2710. EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
  2711. /**
  2712. * ti_sci_release_resource() - Release a resource from TISCI resource.
  2713. * @res: Pointer to the TISCI resource
  2714. * @id: Resource id to be released.
  2715. */
  2716. void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
  2717. {
  2718. unsigned long flags;
  2719. u16 set;
  2720. raw_spin_lock_irqsave(&res->lock, flags);
  2721. for (set = 0; set < res->sets; set++) {
  2722. struct ti_sci_resource_desc *desc = &res->desc[set];
  2723. if (desc->num && desc->start <= id &&
  2724. (desc->start + desc->num) > id)
  2725. clear_bit(id - desc->start, desc->res_map);
  2726. else if (desc->num_sec && desc->start_sec <= id &&
  2727. (desc->start_sec + desc->num_sec) > id)
  2728. clear_bit(id - desc->start_sec, desc->res_map);
  2729. }
  2730. raw_spin_unlock_irqrestore(&res->lock, flags);
  2731. }
  2732. EXPORT_SYMBOL_GPL(ti_sci_release_resource);
  2733. /**
  2734. * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
  2735. * @res: Pointer to the TISCI resource
  2736. *
  2737. * Return: Total number of available resources.
  2738. */
  2739. u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
  2740. {
  2741. u32 set, count = 0;
  2742. for (set = 0; set < res->sets; set++)
  2743. count += res->desc[set].num + res->desc[set].num_sec;
  2744. return count;
  2745. }
  2746. EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
  2747. /**
  2748. * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
  2749. * @handle: TISCI handle
  2750. * @dev: Device pointer to which the resource is assigned
  2751. * @dev_id: TISCI device id to which the resource is assigned
  2752. * @sub_types: Array of sub_types assigned corresponding to device
  2753. * @sets: Number of sub_types
  2754. *
  2755. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2756. * error pointer.
  2757. */
  2758. static struct ti_sci_resource *
  2759. devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
  2760. struct device *dev, u32 dev_id, u32 *sub_types,
  2761. u32 sets)
  2762. {
  2763. struct ti_sci_resource *res;
  2764. bool valid_set = false;
  2765. int i, ret, res_count;
  2766. res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
  2767. if (!res)
  2768. return ERR_PTR(-ENOMEM);
  2769. res->sets = sets;
  2770. res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
  2771. GFP_KERNEL);
  2772. if (!res->desc)
  2773. return ERR_PTR(-ENOMEM);
  2774. for (i = 0; i < res->sets; i++) {
  2775. ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
  2776. sub_types[i],
  2777. &res->desc[i]);
  2778. if (ret) {
  2779. dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
  2780. dev_id, sub_types[i]);
  2781. memset(&res->desc[i], 0, sizeof(res->desc[i]));
  2782. continue;
  2783. }
  2784. dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
  2785. dev_id, sub_types[i], res->desc[i].start,
  2786. res->desc[i].num, res->desc[i].start_sec,
  2787. res->desc[i].num_sec);
  2788. valid_set = true;
  2789. res_count = res->desc[i].num + res->desc[i].num_sec;
  2790. res->desc[i].res_map =
  2791. devm_kzalloc(dev, BITS_TO_LONGS(res_count) *
  2792. sizeof(*res->desc[i].res_map), GFP_KERNEL);
  2793. if (!res->desc[i].res_map)
  2794. return ERR_PTR(-ENOMEM);
  2795. }
  2796. raw_spin_lock_init(&res->lock);
  2797. if (valid_set)
  2798. return res;
  2799. return ERR_PTR(-EINVAL);
  2800. }
  2801. /**
  2802. * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
  2803. * @handle: TISCI handle
  2804. * @dev: Device pointer to which the resource is assigned
  2805. * @dev_id: TISCI device id to which the resource is assigned
  2806. * @of_prop: property name by which the resource are represented
  2807. *
  2808. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2809. * error pointer.
  2810. */
  2811. struct ti_sci_resource *
  2812. devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
  2813. struct device *dev, u32 dev_id, char *of_prop)
  2814. {
  2815. struct ti_sci_resource *res;
  2816. u32 *sub_types;
  2817. int sets;
  2818. sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
  2819. sizeof(u32));
  2820. if (sets < 0) {
  2821. dev_err(dev, "%s resource type ids not available\n", of_prop);
  2822. return ERR_PTR(sets);
  2823. }
  2824. sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
  2825. if (!sub_types)
  2826. return ERR_PTR(-ENOMEM);
  2827. of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
  2828. res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
  2829. sets);
  2830. kfree(sub_types);
  2831. return res;
  2832. }
  2833. EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
  2834. /**
  2835. * devm_ti_sci_get_resource() - Get a resource range assigned to the device
  2836. * @handle: TISCI handle
  2837. * @dev: Device pointer to which the resource is assigned
  2838. * @dev_id: TISCI device id to which the resource is assigned
  2839. * @suub_type: TISCI resource subytpe representing the resource.
  2840. *
  2841. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2842. * error pointer.
  2843. */
  2844. struct ti_sci_resource *
  2845. devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
  2846. u32 dev_id, u32 sub_type)
  2847. {
  2848. return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
  2849. }
  2850. EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
  2851. static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
  2852. void *cmd)
  2853. {
  2854. struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
  2855. const struct ti_sci_handle *handle = &info->handle;
  2856. ti_sci_cmd_core_reboot(handle);
  2857. /* call fail OR pass, we should not be here in the first place */
  2858. return NOTIFY_BAD;
  2859. }
  2860. /* Description for K2G */
  2861. static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
  2862. .default_host_id = 2,
  2863. /* Conservative duration */
  2864. .max_rx_timeout_ms = 1000,
  2865. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  2866. .max_msgs = 20,
  2867. .max_msg_size = 64,
  2868. };
  2869. /* Description for AM654 */
  2870. static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
  2871. .default_host_id = 12,
  2872. /* Conservative duration */
  2873. .max_rx_timeout_ms = 10000,
  2874. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  2875. .max_msgs = 20,
  2876. .max_msg_size = 60,
  2877. };
  2878. static const struct of_device_id ti_sci_of_match[] = {
  2879. {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
  2880. {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
  2881. { /* Sentinel */ },
  2882. };
  2883. MODULE_DEVICE_TABLE(of, ti_sci_of_match);
  2884. static int ti_sci_probe(struct platform_device *pdev)
  2885. {
  2886. struct device *dev = &pdev->dev;
  2887. const struct of_device_id *of_id;
  2888. const struct ti_sci_desc *desc;
  2889. struct ti_sci_xfer *xfer;
  2890. struct ti_sci_info *info = NULL;
  2891. struct ti_sci_xfers_info *minfo;
  2892. struct mbox_client *cl;
  2893. int ret = -EINVAL;
  2894. int i;
  2895. int reboot = 0;
  2896. u32 h_id;
  2897. of_id = of_match_device(ti_sci_of_match, dev);
  2898. if (!of_id) {
  2899. dev_err(dev, "OF data missing\n");
  2900. return -EINVAL;
  2901. }
  2902. desc = of_id->data;
  2903. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  2904. if (!info)
  2905. return -ENOMEM;
  2906. info->dev = dev;
  2907. info->desc = desc;
  2908. ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
  2909. /* if the property is not present in DT, use a default from desc */
  2910. if (ret < 0) {
  2911. info->host_id = info->desc->default_host_id;
  2912. } else {
  2913. if (!h_id) {
  2914. dev_warn(dev, "Host ID 0 is reserved for firmware\n");
  2915. info->host_id = info->desc->default_host_id;
  2916. } else {
  2917. info->host_id = h_id;
  2918. }
  2919. }
  2920. reboot = of_property_read_bool(dev->of_node,
  2921. "ti,system-reboot-controller");
  2922. INIT_LIST_HEAD(&info->node);
  2923. minfo = &info->minfo;
  2924. /*
  2925. * Pre-allocate messages
  2926. * NEVER allocate more than what we can indicate in hdr.seq
  2927. * if we have data description bug, force a fix..
  2928. */
  2929. if (WARN_ON(desc->max_msgs >=
  2930. 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
  2931. return -EINVAL;
  2932. minfo->xfer_block = devm_kcalloc(dev,
  2933. desc->max_msgs,
  2934. sizeof(*minfo->xfer_block),
  2935. GFP_KERNEL);
  2936. if (!minfo->xfer_block)
  2937. return -ENOMEM;
  2938. minfo->xfer_alloc_table = devm_kcalloc(dev,
  2939. BITS_TO_LONGS(desc->max_msgs),
  2940. sizeof(unsigned long),
  2941. GFP_KERNEL);
  2942. if (!minfo->xfer_alloc_table)
  2943. return -ENOMEM;
  2944. bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
  2945. /* Pre-initialize the buffer pointer to pre-allocated buffers */
  2946. for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
  2947. xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
  2948. GFP_KERNEL);
  2949. if (!xfer->xfer_buf)
  2950. return -ENOMEM;
  2951. xfer->tx_message.buf = xfer->xfer_buf;
  2952. init_completion(&xfer->done);
  2953. }
  2954. ret = ti_sci_debugfs_create(pdev, info);
  2955. if (ret)
  2956. dev_warn(dev, "Failed to create debug file\n");
  2957. platform_set_drvdata(pdev, info);
  2958. cl = &info->cl;
  2959. cl->dev = dev;
  2960. cl->tx_block = false;
  2961. cl->rx_callback = ti_sci_rx_callback;
  2962. cl->knows_txdone = true;
  2963. spin_lock_init(&minfo->xfer_lock);
  2964. sema_init(&minfo->sem_xfer_count, desc->max_msgs);
  2965. info->chan_rx = mbox_request_channel_byname(cl, "rx");
  2966. if (IS_ERR(info->chan_rx)) {
  2967. ret = PTR_ERR(info->chan_rx);
  2968. goto out;
  2969. }
  2970. info->chan_tx = mbox_request_channel_byname(cl, "tx");
  2971. if (IS_ERR(info->chan_tx)) {
  2972. ret = PTR_ERR(info->chan_tx);
  2973. goto out;
  2974. }
  2975. ret = ti_sci_cmd_get_revision(info);
  2976. if (ret) {
  2977. dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
  2978. goto out;
  2979. }
  2980. ti_sci_setup_ops(info);
  2981. if (reboot) {
  2982. info->nb.notifier_call = tisci_reboot_handler;
  2983. info->nb.priority = 128;
  2984. ret = register_restart_handler(&info->nb);
  2985. if (ret) {
  2986. dev_err(dev, "reboot registration fail(%d)\n", ret);
  2987. goto out;
  2988. }
  2989. }
  2990. dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
  2991. info->handle.version.abi_major, info->handle.version.abi_minor,
  2992. info->handle.version.firmware_revision,
  2993. info->handle.version.firmware_description);
  2994. mutex_lock(&ti_sci_list_mutex);
  2995. list_add_tail(&info->node, &ti_sci_list);
  2996. mutex_unlock(&ti_sci_list_mutex);
  2997. return of_platform_populate(dev->of_node, NULL, NULL, dev);
  2998. out:
  2999. if (!IS_ERR(info->chan_tx))
  3000. mbox_free_channel(info->chan_tx);
  3001. if (!IS_ERR(info->chan_rx))
  3002. mbox_free_channel(info->chan_rx);
  3003. debugfs_remove(info->d);
  3004. return ret;
  3005. }
  3006. static struct platform_driver ti_sci_driver = {
  3007. .probe = ti_sci_probe,
  3008. .driver = {
  3009. .name = "ti-sci",
  3010. .of_match_table = of_match_ptr(ti_sci_of_match),
  3011. .suppress_bind_attrs = true,
  3012. },
  3013. };
  3014. module_platform_driver(ti_sci_driver);
  3015. MODULE_LICENSE("GPL v2");
  3016. MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
  3017. MODULE_AUTHOR("Nishanth Menon");
  3018. MODULE_ALIAS("platform:ti-sci");