driver.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Message Protocol driver
  4. *
  5. * SCMI Message Protocol is used between the System Control Processor(SCP)
  6. * and the Application Processors(AP). The Message Handling Unit(MHU)
  7. * provides a mechanism for inter-processor communication between SCP's
  8. * Cortex M3 and AP.
  9. *
  10. * SCP offers control and management of the core/cluster power states,
  11. * various power domain DVFS including the core/cluster, certain system
  12. * clocks configuration, thermal sensors and many others.
  13. *
  14. * Copyright (C) 2018-2021 ARM Ltd.
  15. */
  16. #include <linux/bitmap.h>
  17. #include <linux/device.h>
  18. #include <linux/export.h>
  19. #include <linux/idr.h>
  20. #include <linux/io.h>
  21. #include <linux/io-64-nonatomic-hi-lo.h>
  22. #include <linux/kernel.h>
  23. #include <linux/ktime.h>
  24. #include <linux/hashtable.h>
  25. #include <linux/list.h>
  26. #include <linux/module.h>
  27. #include <linux/of_address.h>
  28. #include <linux/of_device.h>
  29. #include <linux/processor.h>
  30. #include <linux/refcount.h>
  31. #include <linux/slab.h>
  32. #include "common.h"
  33. #include "notify.h"
  34. #define CREATE_TRACE_POINTS
  35. #include <trace/events/scmi.h>
  36. enum scmi_error_codes {
  37. SCMI_SUCCESS = 0, /* Success */
  38. SCMI_ERR_SUPPORT = -1, /* Not supported */
  39. SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
  40. SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
  41. SCMI_ERR_ENTRY = -4, /* Not found */
  42. SCMI_ERR_RANGE = -5, /* Value out of range */
  43. SCMI_ERR_BUSY = -6, /* Device busy */
  44. SCMI_ERR_COMMS = -7, /* Communication Error */
  45. SCMI_ERR_GENERIC = -8, /* Generic Error */
  46. SCMI_ERR_HARDWARE = -9, /* Hardware Error */
  47. SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
  48. };
  49. /* List of all SCMI devices active in system */
  50. static LIST_HEAD(scmi_list);
  51. /* Protection for the entire list */
  52. static DEFINE_MUTEX(scmi_list_mutex);
  53. /* Track the unique id for the transfers for debug & profiling purpose */
  54. static atomic_t transfer_last_id;
  55. static DEFINE_IDR(scmi_requested_devices);
  56. static DEFINE_MUTEX(scmi_requested_devices_mtx);
  57. /* Track globally the creation of SCMI SystemPower related devices */
  58. static bool scmi_syspower_registered;
  59. /* Protect access to scmi_syspower_registered */
  60. static DEFINE_MUTEX(scmi_syspower_mtx);
  61. struct scmi_requested_dev {
  62. const struct scmi_device_id *id_table;
  63. struct list_head node;
  64. };
  65. /**
  66. * struct scmi_xfers_info - Structure to manage transfer information
  67. *
  68. * @xfer_alloc_table: Bitmap table for allocated messages.
  69. * Index of this bitmap table is also used for message
  70. * sequence identifier.
  71. * @xfer_lock: Protection for message allocation
  72. * @max_msg: Maximum number of messages that can be pending
  73. * @free_xfers: A free list for available to use xfers. It is initialized with
  74. * a number of xfers equal to the maximum allowed in-flight
  75. * messages.
  76. * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
  77. * currently in-flight messages.
  78. */
  79. struct scmi_xfers_info {
  80. unsigned long *xfer_alloc_table;
  81. spinlock_t xfer_lock;
  82. int max_msg;
  83. struct hlist_head free_xfers;
  84. DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
  85. };
  86. /**
  87. * struct scmi_protocol_instance - Describe an initialized protocol instance.
  88. * @handle: Reference to the SCMI handle associated to this protocol instance.
  89. * @proto: A reference to the protocol descriptor.
  90. * @gid: A reference for per-protocol devres management.
  91. * @users: A refcount to track effective users of this protocol.
  92. * @priv: Reference for optional protocol private data.
  93. * @ph: An embedded protocol handle that will be passed down to protocol
  94. * initialization code to identify this instance.
  95. *
  96. * Each protocol is initialized independently once for each SCMI platform in
  97. * which is defined by DT and implemented by the SCMI server fw.
  98. */
  99. struct scmi_protocol_instance {
  100. const struct scmi_handle *handle;
  101. const struct scmi_protocol *proto;
  102. void *gid;
  103. refcount_t users;
  104. void *priv;
  105. struct scmi_protocol_handle ph;
  106. };
  107. #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
  108. /**
  109. * struct scmi_info - Structure representing a SCMI instance
  110. *
  111. * @dev: Device pointer
  112. * @desc: SoC description for this instance
  113. * @version: SCMI revision information containing protocol version,
  114. * implementation version and (sub-)vendor identification.
  115. * @handle: Instance of SCMI handle to send to clients
  116. * @tx_minfo: Universal Transmit Message management info
  117. * @rx_minfo: Universal Receive Message management info
  118. * @tx_idr: IDR object to map protocol id to Tx channel info pointer
  119. * @rx_idr: IDR object to map protocol id to Rx channel info pointer
  120. * @protocols: IDR for protocols' instance descriptors initialized for
  121. * this SCMI instance: populated on protocol's first attempted
  122. * usage.
  123. * @protocols_mtx: A mutex to protect protocols instances initialization.
  124. * @protocols_imp: List of protocols implemented, currently maximum of
  125. * scmi_revision_info.num_protocols elements allocated by the
  126. * base protocol
  127. * @active_protocols: IDR storing device_nodes for protocols actually defined
  128. * in the DT and confirmed as implemented by fw.
  129. * @atomic_threshold: Optional system wide DT-configured threshold, expressed
  130. * in microseconds, for atomic operations.
  131. * Only SCMI synchronous commands reported by the platform
  132. * to have an execution latency lesser-equal to the threshold
  133. * should be considered for atomic mode operation: such
  134. * decision is finally left up to the SCMI drivers.
  135. * @notify_priv: Pointer to private data structure specific to notifications.
  136. * @node: List head
  137. * @users: Number of users of this instance
  138. */
  139. struct scmi_info {
  140. struct device *dev;
  141. const struct scmi_desc *desc;
  142. struct scmi_revision_info version;
  143. struct scmi_handle handle;
  144. struct scmi_xfers_info tx_minfo;
  145. struct scmi_xfers_info rx_minfo;
  146. struct idr tx_idr;
  147. struct idr rx_idr;
  148. struct idr protocols;
  149. /* Ensure mutual exclusive access to protocols instance array */
  150. struct mutex protocols_mtx;
  151. u8 *protocols_imp;
  152. struct idr active_protocols;
  153. unsigned int atomic_threshold;
  154. void *notify_priv;
  155. struct list_head node;
  156. int users;
  157. };
  158. #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
  159. static const int scmi_linux_errmap[] = {
  160. /* better than switch case as long as return value is continuous */
  161. 0, /* SCMI_SUCCESS */
  162. -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
  163. -EINVAL, /* SCMI_ERR_PARAM */
  164. -EACCES, /* SCMI_ERR_ACCESS */
  165. -ENOENT, /* SCMI_ERR_ENTRY */
  166. -ERANGE, /* SCMI_ERR_RANGE */
  167. -EBUSY, /* SCMI_ERR_BUSY */
  168. -ECOMM, /* SCMI_ERR_COMMS */
  169. -EIO, /* SCMI_ERR_GENERIC */
  170. -EREMOTEIO, /* SCMI_ERR_HARDWARE */
  171. -EPROTO, /* SCMI_ERR_PROTOCOL */
  172. };
  173. static inline int scmi_to_linux_errno(int errno)
  174. {
  175. int err_idx = -errno;
  176. if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
  177. return scmi_linux_errmap[err_idx];
  178. return -EIO;
  179. }
  180. void scmi_notification_instance_data_set(const struct scmi_handle *handle,
  181. void *priv)
  182. {
  183. struct scmi_info *info = handle_to_scmi_info(handle);
  184. info->notify_priv = priv;
  185. /* Ensure updated protocol private date are visible */
  186. smp_wmb();
  187. }
  188. void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
  189. {
  190. struct scmi_info *info = handle_to_scmi_info(handle);
  191. /* Ensure protocols_private_data has been updated */
  192. smp_rmb();
  193. return info->notify_priv;
  194. }
  195. /**
  196. * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
  197. *
  198. * @minfo: Pointer to Tx/Rx Message management info based on channel type
  199. * @xfer: The xfer to act upon
  200. *
  201. * Pick the next unused monotonically increasing token and set it into
  202. * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
  203. * reuse of freshly completed or timed-out xfers, thus mitigating the risk
  204. * of incorrect association of a late and expired xfer with a live in-flight
  205. * transaction, both happening to re-use the same token identifier.
  206. *
  207. * Since platform is NOT required to answer our request in-order we should
  208. * account for a few rare but possible scenarios:
  209. *
  210. * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
  211. * using find_next_zero_bit() starting from candidate next_token bit
  212. *
  213. * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
  214. * are plenty of free tokens at start, so try a second pass using
  215. * find_next_zero_bit() and starting from 0.
  216. *
  217. * X = used in-flight
  218. *
  219. * Normal
  220. * ------
  221. *
  222. * |- xfer_id picked
  223. * -----------+----------------------------------------------------------
  224. * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
  225. * ----------------------------------------------------------------------
  226. * ^
  227. * |- next_token
  228. *
  229. * Out-of-order pending at start
  230. * -----------------------------
  231. *
  232. * |- xfer_id picked, last_token fixed
  233. * -----+----------------------------------------------------------------
  234. * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
  235. * ----------------------------------------------------------------------
  236. * ^
  237. * |- next_token
  238. *
  239. *
  240. * Out-of-order pending at end
  241. * ---------------------------
  242. *
  243. * |- xfer_id picked, last_token fixed
  244. * -----+----------------------------------------------------------------
  245. * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
  246. * ----------------------------------------------------------------------
  247. * ^
  248. * |- next_token
  249. *
  250. * Context: Assumes to be called with @xfer_lock already acquired.
  251. *
  252. * Return: 0 on Success or error
  253. */
  254. static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
  255. struct scmi_xfer *xfer)
  256. {
  257. unsigned long xfer_id, next_token;
  258. /*
  259. * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
  260. * using the pre-allocated transfer_id as a base.
  261. * Note that the global transfer_id is shared across all message types
  262. * so there could be holes in the allocated set of monotonic sequence
  263. * numbers, but that is going to limit the effectiveness of the
  264. * mitigation only in very rare limit conditions.
  265. */
  266. next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
  267. /* Pick the next available xfer_id >= next_token */
  268. xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
  269. MSG_TOKEN_MAX, next_token);
  270. if (xfer_id == MSG_TOKEN_MAX) {
  271. /*
  272. * After heavily out-of-order responses, there are no free
  273. * tokens ahead, but only at start of xfer_alloc_table so
  274. * try again from the beginning.
  275. */
  276. xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
  277. MSG_TOKEN_MAX, 0);
  278. /*
  279. * Something is wrong if we got here since there can be a
  280. * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
  281. * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
  282. */
  283. if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
  284. return -ENOMEM;
  285. }
  286. /* Update +/- last_token accordingly if we skipped some hole */
  287. if (xfer_id != next_token)
  288. atomic_add((int)(xfer_id - next_token), &transfer_last_id);
  289. /* Set in-flight */
  290. set_bit(xfer_id, minfo->xfer_alloc_table);
  291. xfer->hdr.seq = (u16)xfer_id;
  292. return 0;
  293. }
  294. /**
  295. * scmi_xfer_token_clear - Release the token
  296. *
  297. * @minfo: Pointer to Tx/Rx Message management info based on channel type
  298. * @xfer: The xfer to act upon
  299. */
  300. static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
  301. struct scmi_xfer *xfer)
  302. {
  303. clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
  304. }
  305. /**
  306. * scmi_xfer_get() - Allocate one message
  307. *
  308. * @handle: Pointer to SCMI entity handle
  309. * @minfo: Pointer to Tx/Rx Message management info based on channel type
  310. * @set_pending: If true a monotonic token is picked and the xfer is added to
  311. * the pending hash table.
  312. *
  313. * Helper function which is used by various message functions that are
  314. * exposed to clients of this driver for allocating a message traffic event.
  315. *
  316. * Picks an xfer from the free list @free_xfers (if any available) and, if
  317. * required, sets a monotonically increasing token and stores the inflight xfer
  318. * into the @pending_xfers hashtable for later retrieval.
  319. *
  320. * The successfully initialized xfer is refcounted.
  321. *
  322. * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
  323. * @free_xfers.
  324. *
  325. * Return: 0 if all went fine, else corresponding error.
  326. */
  327. static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
  328. struct scmi_xfers_info *minfo,
  329. bool set_pending)
  330. {
  331. int ret;
  332. unsigned long flags;
  333. struct scmi_xfer *xfer;
  334. spin_lock_irqsave(&minfo->xfer_lock, flags);
  335. if (hlist_empty(&minfo->free_xfers)) {
  336. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  337. return ERR_PTR(-ENOMEM);
  338. }
  339. /* grab an xfer from the free_list */
  340. xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
  341. hlist_del_init(&xfer->node);
  342. /*
  343. * Allocate transfer_id early so that can be used also as base for
  344. * monotonic sequence number generation if needed.
  345. */
  346. xfer->transfer_id = atomic_inc_return(&transfer_last_id);
  347. if (set_pending) {
  348. /* Pick and set monotonic token */
  349. ret = scmi_xfer_token_set(minfo, xfer);
  350. if (!ret) {
  351. hash_add(minfo->pending_xfers, &xfer->node,
  352. xfer->hdr.seq);
  353. xfer->pending = true;
  354. } else {
  355. dev_err(handle->dev,
  356. "Failed to get monotonic token %d\n", ret);
  357. hlist_add_head(&xfer->node, &minfo->free_xfers);
  358. xfer = ERR_PTR(ret);
  359. }
  360. }
  361. if (!IS_ERR(xfer)) {
  362. refcount_set(&xfer->users, 1);
  363. atomic_set(&xfer->busy, SCMI_XFER_FREE);
  364. }
  365. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  366. return xfer;
  367. }
  368. /**
  369. * __scmi_xfer_put() - Release a message
  370. *
  371. * @minfo: Pointer to Tx/Rx Message management info based on channel type
  372. * @xfer: message that was reserved by scmi_xfer_get
  373. *
  374. * After refcount check, possibly release an xfer, clearing the token slot,
  375. * removing xfer from @pending_xfers and putting it back into free_xfers.
  376. *
  377. * This holds a spinlock to maintain integrity of internal data structures.
  378. */
  379. static void
  380. __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
  381. {
  382. unsigned long flags;
  383. spin_lock_irqsave(&minfo->xfer_lock, flags);
  384. if (refcount_dec_and_test(&xfer->users)) {
  385. if (xfer->pending) {
  386. scmi_xfer_token_clear(minfo, xfer);
  387. hash_del(&xfer->node);
  388. xfer->pending = false;
  389. }
  390. hlist_add_head(&xfer->node, &minfo->free_xfers);
  391. }
  392. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  393. }
  394. /**
  395. * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
  396. *
  397. * @minfo: Pointer to Tx/Rx Message management info based on channel type
  398. * @xfer_id: Token ID to lookup in @pending_xfers
  399. *
  400. * Refcounting is untouched.
  401. *
  402. * Context: Assumes to be called with @xfer_lock already acquired.
  403. *
  404. * Return: A valid xfer on Success or error otherwise
  405. */
  406. static struct scmi_xfer *
  407. scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
  408. {
  409. struct scmi_xfer *xfer = NULL;
  410. if (test_bit(xfer_id, minfo->xfer_alloc_table))
  411. xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
  412. return xfer ?: ERR_PTR(-EINVAL);
  413. }
  414. /**
  415. * scmi_msg_response_validate - Validate message type against state of related
  416. * xfer
  417. *
  418. * @cinfo: A reference to the channel descriptor.
  419. * @msg_type: Message type to check
  420. * @xfer: A reference to the xfer to validate against @msg_type
  421. *
  422. * This function checks if @msg_type is congruent with the current state of
  423. * a pending @xfer; if an asynchronous delayed response is received before the
  424. * related synchronous response (Out-of-Order Delayed Response) the missing
  425. * synchronous response is assumed to be OK and completed, carrying on with the
  426. * Delayed Response: this is done to address the case in which the underlying
  427. * SCMI transport can deliver such out-of-order responses.
  428. *
  429. * Context: Assumes to be called with xfer->lock already acquired.
  430. *
  431. * Return: 0 on Success, error otherwise
  432. */
  433. static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
  434. u8 msg_type,
  435. struct scmi_xfer *xfer)
  436. {
  437. /*
  438. * Even if a response was indeed expected on this slot at this point,
  439. * a buggy platform could wrongly reply feeding us an unexpected
  440. * delayed response we're not prepared to handle: bail-out safely
  441. * blaming firmware.
  442. */
  443. if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
  444. dev_err(cinfo->dev,
  445. "Delayed Response for %d not expected! Buggy F/W ?\n",
  446. xfer->hdr.seq);
  447. return -EINVAL;
  448. }
  449. switch (xfer->state) {
  450. case SCMI_XFER_SENT_OK:
  451. if (msg_type == MSG_TYPE_DELAYED_RESP) {
  452. /*
  453. * Delayed Response expected but delivered earlier.
  454. * Assume message RESPONSE was OK and skip state.
  455. */
  456. xfer->hdr.status = SCMI_SUCCESS;
  457. xfer->state = SCMI_XFER_RESP_OK;
  458. complete(&xfer->done);
  459. dev_warn(cinfo->dev,
  460. "Received valid OoO Delayed Response for %d\n",
  461. xfer->hdr.seq);
  462. }
  463. break;
  464. case SCMI_XFER_RESP_OK:
  465. if (msg_type != MSG_TYPE_DELAYED_RESP)
  466. return -EINVAL;
  467. break;
  468. case SCMI_XFER_DRESP_OK:
  469. /* No further message expected once in SCMI_XFER_DRESP_OK */
  470. return -EINVAL;
  471. }
  472. return 0;
  473. }
  474. /**
  475. * scmi_xfer_state_update - Update xfer state
  476. *
  477. * @xfer: A reference to the xfer to update
  478. * @msg_type: Type of message being processed.
  479. *
  480. * Note that this message is assumed to have been already successfully validated
  481. * by @scmi_msg_response_validate(), so here we just update the state.
  482. *
  483. * Context: Assumes to be called on an xfer exclusively acquired using the
  484. * busy flag.
  485. */
  486. static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
  487. {
  488. xfer->hdr.type = msg_type;
  489. /* Unknown command types were already discarded earlier */
  490. if (xfer->hdr.type == MSG_TYPE_COMMAND)
  491. xfer->state = SCMI_XFER_RESP_OK;
  492. else
  493. xfer->state = SCMI_XFER_DRESP_OK;
  494. }
  495. static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
  496. {
  497. int ret;
  498. ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
  499. return ret == SCMI_XFER_FREE;
  500. }
  501. /**
  502. * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
  503. *
  504. * @cinfo: A reference to the channel descriptor.
  505. * @msg_hdr: A message header to use as lookup key
  506. *
  507. * When a valid xfer is found for the sequence number embedded in the provided
  508. * msg_hdr, reference counting is properly updated and exclusive access to this
  509. * xfer is granted till released with @scmi_xfer_command_release.
  510. *
  511. * Return: A valid @xfer on Success or error otherwise.
  512. */
  513. static inline struct scmi_xfer *
  514. scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
  515. {
  516. int ret;
  517. unsigned long flags;
  518. struct scmi_xfer *xfer;
  519. struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
  520. struct scmi_xfers_info *minfo = &info->tx_minfo;
  521. u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
  522. u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
  523. /* Are we even expecting this? */
  524. spin_lock_irqsave(&minfo->xfer_lock, flags);
  525. xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
  526. if (IS_ERR(xfer)) {
  527. dev_err(cinfo->dev,
  528. "Message for %d type %d is not expected!\n",
  529. xfer_id, msg_type);
  530. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  531. return xfer;
  532. }
  533. refcount_inc(&xfer->users);
  534. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  535. spin_lock_irqsave(&xfer->lock, flags);
  536. ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
  537. /*
  538. * If a pending xfer was found which was also in a congruent state with
  539. * the received message, acquire exclusive access to it setting the busy
  540. * flag.
  541. * Spins only on the rare limit condition of concurrent reception of
  542. * RESP and DRESP for the same xfer.
  543. */
  544. if (!ret) {
  545. spin_until_cond(scmi_xfer_acquired(xfer));
  546. scmi_xfer_state_update(xfer, msg_type);
  547. }
  548. spin_unlock_irqrestore(&xfer->lock, flags);
  549. if (ret) {
  550. dev_err(cinfo->dev,
  551. "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
  552. msg_type, xfer_id, msg_hdr, xfer->state);
  553. /* On error the refcount incremented above has to be dropped */
  554. __scmi_xfer_put(minfo, xfer);
  555. xfer = ERR_PTR(-EINVAL);
  556. }
  557. return xfer;
  558. }
  559. static inline void scmi_xfer_command_release(struct scmi_info *info,
  560. struct scmi_xfer *xfer)
  561. {
  562. atomic_set(&xfer->busy, SCMI_XFER_FREE);
  563. __scmi_xfer_put(&info->tx_minfo, xfer);
  564. }
  565. static inline void scmi_clear_channel(struct scmi_info *info,
  566. struct scmi_chan_info *cinfo)
  567. {
  568. if (info->desc->ops->clear_channel)
  569. info->desc->ops->clear_channel(cinfo);
  570. }
  571. static inline bool is_polling_required(struct scmi_chan_info *cinfo,
  572. struct scmi_info *info)
  573. {
  574. return cinfo->no_completion_irq || info->desc->force_polling;
  575. }
  576. static inline bool is_transport_polling_capable(struct scmi_info *info)
  577. {
  578. return info->desc->ops->poll_done ||
  579. info->desc->sync_cmds_completed_on_ret;
  580. }
  581. static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
  582. struct scmi_info *info)
  583. {
  584. return is_polling_required(cinfo, info) &&
  585. is_transport_polling_capable(info);
  586. }
  587. static void scmi_handle_notification(struct scmi_chan_info *cinfo,
  588. u32 msg_hdr, void *priv)
  589. {
  590. struct scmi_xfer *xfer;
  591. struct device *dev = cinfo->dev;
  592. struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
  593. struct scmi_xfers_info *minfo = &info->rx_minfo;
  594. ktime_t ts;
  595. ts = ktime_get_boottime();
  596. xfer = scmi_xfer_get(cinfo->handle, minfo, false);
  597. if (IS_ERR(xfer)) {
  598. dev_err(dev, "failed to get free message slot (%ld)\n",
  599. PTR_ERR(xfer));
  600. scmi_clear_channel(info, cinfo);
  601. return;
  602. }
  603. unpack_scmi_header(msg_hdr, &xfer->hdr);
  604. if (priv)
  605. /* Ensure order between xfer->priv store and following ops */
  606. smp_store_mb(xfer->priv, priv);
  607. info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
  608. xfer);
  609. trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI",
  610. xfer->hdr.seq, xfer->hdr.status,
  611. xfer->rx.buf, xfer->rx.len);
  612. scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
  613. xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
  614. trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
  615. xfer->hdr.protocol_id, xfer->hdr.seq,
  616. MSG_TYPE_NOTIFICATION);
  617. __scmi_xfer_put(minfo, xfer);
  618. scmi_clear_channel(info, cinfo);
  619. }
  620. static void scmi_handle_response(struct scmi_chan_info *cinfo,
  621. u32 msg_hdr, void *priv)
  622. {
  623. struct scmi_xfer *xfer;
  624. struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
  625. xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
  626. if (IS_ERR(xfer)) {
  627. if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
  628. scmi_clear_channel(info, cinfo);
  629. return;
  630. }
  631. /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
  632. if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
  633. xfer->rx.len = info->desc->max_msg_size;
  634. if (priv)
  635. /* Ensure order between xfer->priv store and following ops */
  636. smp_store_mb(xfer->priv, priv);
  637. info->desc->ops->fetch_response(cinfo, xfer);
  638. trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
  639. xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
  640. "DLYD" : "RESP",
  641. xfer->hdr.seq, xfer->hdr.status,
  642. xfer->rx.buf, xfer->rx.len);
  643. trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
  644. xfer->hdr.protocol_id, xfer->hdr.seq,
  645. xfer->hdr.type);
  646. if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
  647. scmi_clear_channel(info, cinfo);
  648. complete(xfer->async_done);
  649. } else {
  650. complete(&xfer->done);
  651. }
  652. scmi_xfer_command_release(info, xfer);
  653. }
  654. /**
  655. * scmi_rx_callback() - callback for receiving messages
  656. *
  657. * @cinfo: SCMI channel info
  658. * @msg_hdr: Message header
  659. * @priv: Transport specific private data.
  660. *
  661. * Processes one received message to appropriate transfer information and
  662. * signals completion of the transfer.
  663. *
  664. * NOTE: This function will be invoked in IRQ context, hence should be
  665. * as optimal as possible.
  666. */
  667. void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
  668. {
  669. u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
  670. switch (msg_type) {
  671. case MSG_TYPE_NOTIFICATION:
  672. scmi_handle_notification(cinfo, msg_hdr, priv);
  673. break;
  674. case MSG_TYPE_COMMAND:
  675. case MSG_TYPE_DELAYED_RESP:
  676. scmi_handle_response(cinfo, msg_hdr, priv);
  677. break;
  678. default:
  679. WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
  680. break;
  681. }
  682. }
  683. /**
  684. * xfer_put() - Release a transmit message
  685. *
  686. * @ph: Pointer to SCMI protocol handle
  687. * @xfer: message that was reserved by xfer_get_init
  688. */
  689. static void xfer_put(const struct scmi_protocol_handle *ph,
  690. struct scmi_xfer *xfer)
  691. {
  692. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  693. struct scmi_info *info = handle_to_scmi_info(pi->handle);
  694. __scmi_xfer_put(&info->tx_minfo, xfer);
  695. }
  696. static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
  697. struct scmi_xfer *xfer, ktime_t stop)
  698. {
  699. struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
  700. /*
  701. * Poll also on xfer->done so that polling can be forcibly terminated
  702. * in case of out-of-order receptions of delayed responses
  703. */
  704. return info->desc->ops->poll_done(cinfo, xfer) ||
  705. try_wait_for_completion(&xfer->done) ||
  706. ktime_after(ktime_get(), stop);
  707. }
  708. /**
  709. * scmi_wait_for_message_response - An helper to group all the possible ways of
  710. * waiting for a synchronous message response.
  711. *
  712. * @cinfo: SCMI channel info
  713. * @xfer: Reference to the transfer being waited for.
  714. *
  715. * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
  716. * configuration flags like xfer->hdr.poll_completion.
  717. *
  718. * Return: 0 on Success, error otherwise.
  719. */
  720. static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
  721. struct scmi_xfer *xfer)
  722. {
  723. struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
  724. struct device *dev = info->dev;
  725. int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
  726. trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
  727. xfer->hdr.protocol_id, xfer->hdr.seq,
  728. timeout_ms,
  729. xfer->hdr.poll_completion);
  730. if (xfer->hdr.poll_completion) {
  731. /*
  732. * Real polling is needed only if transport has NOT declared
  733. * itself to support synchronous commands replies.
  734. */
  735. if (!info->desc->sync_cmds_completed_on_ret) {
  736. /*
  737. * Poll on xfer using transport provided .poll_done();
  738. * assumes no completion interrupt was available.
  739. */
  740. ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
  741. spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
  742. xfer, stop));
  743. if (ktime_after(ktime_get(), stop)) {
  744. dev_err(dev,
  745. "timed out in resp(caller: %pS) - polling\n",
  746. (void *)_RET_IP_);
  747. ret = -ETIMEDOUT;
  748. }
  749. }
  750. if (!ret) {
  751. unsigned long flags;
  752. /*
  753. * Do not fetch_response if an out-of-order delayed
  754. * response is being processed.
  755. */
  756. spin_lock_irqsave(&xfer->lock, flags);
  757. if (xfer->state == SCMI_XFER_SENT_OK) {
  758. info->desc->ops->fetch_response(cinfo, xfer);
  759. xfer->state = SCMI_XFER_RESP_OK;
  760. }
  761. spin_unlock_irqrestore(&xfer->lock, flags);
  762. /* Trace polled replies. */
  763. trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id,
  764. "RESP",
  765. xfer->hdr.seq, xfer->hdr.status,
  766. xfer->rx.buf, xfer->rx.len);
  767. }
  768. } else {
  769. /* And we wait for the response. */
  770. if (!wait_for_completion_timeout(&xfer->done,
  771. msecs_to_jiffies(timeout_ms))) {
  772. dev_err(dev, "timed out in resp(caller: %pS)\n",
  773. (void *)_RET_IP_);
  774. ret = -ETIMEDOUT;
  775. }
  776. }
  777. return ret;
  778. }
  779. /**
  780. * do_xfer() - Do one transfer
  781. *
  782. * @ph: Pointer to SCMI protocol handle
  783. * @xfer: Transfer to initiate and wait for response
  784. *
  785. * Return: -ETIMEDOUT in case of no response, if transmit error,
  786. * return corresponding error, else if all goes well,
  787. * return 0.
  788. */
  789. static int do_xfer(const struct scmi_protocol_handle *ph,
  790. struct scmi_xfer *xfer)
  791. {
  792. int ret;
  793. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  794. struct scmi_info *info = handle_to_scmi_info(pi->handle);
  795. struct device *dev = info->dev;
  796. struct scmi_chan_info *cinfo;
  797. /* Check for polling request on custom command xfers at first */
  798. if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
  799. dev_warn_once(dev,
  800. "Polling mode is not supported by transport.\n");
  801. return -EINVAL;
  802. }
  803. cinfo = idr_find(&info->tx_idr, pi->proto->id);
  804. if (unlikely(!cinfo))
  805. return -EINVAL;
  806. /* True ONLY if also supported by transport. */
  807. if (is_polling_enabled(cinfo, info))
  808. xfer->hdr.poll_completion = true;
  809. /*
  810. * Initialise protocol id now from protocol handle to avoid it being
  811. * overridden by mistake (or malice) by the protocol code mangling with
  812. * the scmi_xfer structure prior to this.
  813. */
  814. xfer->hdr.protocol_id = pi->proto->id;
  815. reinit_completion(&xfer->done);
  816. trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
  817. xfer->hdr.protocol_id, xfer->hdr.seq,
  818. xfer->hdr.poll_completion);
  819. /* Clear any stale status */
  820. xfer->hdr.status = SCMI_SUCCESS;
  821. xfer->state = SCMI_XFER_SENT_OK;
  822. /*
  823. * Even though spinlocking is not needed here since no race is possible
  824. * on xfer->state due to the monotonically increasing tokens allocation,
  825. * we must anyway ensure xfer->state initialization is not re-ordered
  826. * after the .send_message() to be sure that on the RX path an early
  827. * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
  828. */
  829. smp_mb();
  830. ret = info->desc->ops->send_message(cinfo, xfer);
  831. if (ret < 0) {
  832. dev_dbg(dev, "Failed to send message %d\n", ret);
  833. return ret;
  834. }
  835. trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND",
  836. xfer->hdr.seq, xfer->hdr.status,
  837. xfer->tx.buf, xfer->tx.len);
  838. ret = scmi_wait_for_message_response(cinfo, xfer);
  839. if (!ret && xfer->hdr.status)
  840. ret = scmi_to_linux_errno(xfer->hdr.status);
  841. if (info->desc->ops->mark_txdone)
  842. info->desc->ops->mark_txdone(cinfo, ret, xfer);
  843. trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
  844. xfer->hdr.protocol_id, xfer->hdr.seq, ret);
  845. return ret;
  846. }
  847. static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
  848. struct scmi_xfer *xfer)
  849. {
  850. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  851. struct scmi_info *info = handle_to_scmi_info(pi->handle);
  852. xfer->rx.len = info->desc->max_msg_size;
  853. }
  854. #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
  855. /**
  856. * do_xfer_with_response() - Do one transfer and wait until the delayed
  857. * response is received
  858. *
  859. * @ph: Pointer to SCMI protocol handle
  860. * @xfer: Transfer to initiate and wait for response
  861. *
  862. * Using asynchronous commands in atomic/polling mode should be avoided since
  863. * it could cause long busy-waiting here, so ignore polling for the delayed
  864. * response and WARN if it was requested for this command transaction since
  865. * upper layers should refrain from issuing such kind of requests.
  866. *
  867. * The only other option would have been to refrain from using any asynchronous
  868. * command even if made available, when an atomic transport is detected, and
  869. * instead forcibly use the synchronous version (thing that can be easily
  870. * attained at the protocol layer), but this would also have led to longer
  871. * stalls of the channel for synchronous commands and possibly timeouts.
  872. * (in other words there is usually a good reason if a platform provides an
  873. * asynchronous version of a command and we should prefer to use it...just not
  874. * when using atomic/polling mode)
  875. *
  876. * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
  877. * return corresponding error, else if all goes well, return 0.
  878. */
  879. static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
  880. struct scmi_xfer *xfer)
  881. {
  882. int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
  883. DECLARE_COMPLETION_ONSTACK(async_response);
  884. xfer->async_done = &async_response;
  885. /*
  886. * Delayed responses should not be polled, so an async command should
  887. * not have been used when requiring an atomic/poll context; WARN and
  888. * perform instead a sleeping wait.
  889. * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
  890. */
  891. WARN_ON_ONCE(xfer->hdr.poll_completion);
  892. ret = do_xfer(ph, xfer);
  893. if (!ret) {
  894. if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
  895. dev_err(ph->dev,
  896. "timed out in delayed resp(caller: %pS)\n",
  897. (void *)_RET_IP_);
  898. ret = -ETIMEDOUT;
  899. } else if (xfer->hdr.status) {
  900. ret = scmi_to_linux_errno(xfer->hdr.status);
  901. }
  902. }
  903. xfer->async_done = NULL;
  904. return ret;
  905. }
  906. /**
  907. * xfer_get_init() - Allocate and initialise one message for transmit
  908. *
  909. * @ph: Pointer to SCMI protocol handle
  910. * @msg_id: Message identifier
  911. * @tx_size: transmit message size
  912. * @rx_size: receive message size
  913. * @p: pointer to the allocated and initialised message
  914. *
  915. * This function allocates the message using @scmi_xfer_get and
  916. * initialise the header.
  917. *
  918. * Return: 0 if all went fine with @p pointing to message, else
  919. * corresponding error.
  920. */
  921. static int xfer_get_init(const struct scmi_protocol_handle *ph,
  922. u8 msg_id, size_t tx_size, size_t rx_size,
  923. struct scmi_xfer **p)
  924. {
  925. int ret;
  926. struct scmi_xfer *xfer;
  927. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  928. struct scmi_info *info = handle_to_scmi_info(pi->handle);
  929. struct scmi_xfers_info *minfo = &info->tx_minfo;
  930. struct device *dev = info->dev;
  931. /* Ensure we have sane transfer sizes */
  932. if (rx_size > info->desc->max_msg_size ||
  933. tx_size > info->desc->max_msg_size)
  934. return -ERANGE;
  935. xfer = scmi_xfer_get(pi->handle, minfo, true);
  936. if (IS_ERR(xfer)) {
  937. ret = PTR_ERR(xfer);
  938. dev_err(dev, "failed to get free message slot(%d)\n", ret);
  939. return ret;
  940. }
  941. xfer->tx.len = tx_size;
  942. xfer->rx.len = rx_size ? : info->desc->max_msg_size;
  943. xfer->hdr.type = MSG_TYPE_COMMAND;
  944. xfer->hdr.id = msg_id;
  945. xfer->hdr.poll_completion = false;
  946. *p = xfer;
  947. return 0;
  948. }
  949. /**
  950. * version_get() - command to get the revision of the SCMI entity
  951. *
  952. * @ph: Pointer to SCMI protocol handle
  953. * @version: Holds returned version of protocol.
  954. *
  955. * Updates the SCMI information in the internal data structure.
  956. *
  957. * Return: 0 if all went fine, else return appropriate error.
  958. */
  959. static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
  960. {
  961. int ret;
  962. __le32 *rev_info;
  963. struct scmi_xfer *t;
  964. ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
  965. if (ret)
  966. return ret;
  967. ret = do_xfer(ph, t);
  968. if (!ret) {
  969. rev_info = t->rx.buf;
  970. *version = le32_to_cpu(*rev_info);
  971. }
  972. xfer_put(ph, t);
  973. return ret;
  974. }
  975. /**
  976. * scmi_set_protocol_priv - Set protocol specific data at init time
  977. *
  978. * @ph: A reference to the protocol handle.
  979. * @priv: The private data to set.
  980. *
  981. * Return: 0 on Success
  982. */
  983. static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
  984. void *priv)
  985. {
  986. struct scmi_protocol_instance *pi = ph_to_pi(ph);
  987. pi->priv = priv;
  988. return 0;
  989. }
  990. /**
  991. * scmi_get_protocol_priv - Set protocol specific data at init time
  992. *
  993. * @ph: A reference to the protocol handle.
  994. *
  995. * Return: Protocol private data if any was set.
  996. */
  997. static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
  998. {
  999. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  1000. return pi->priv;
  1001. }
  1002. static const struct scmi_xfer_ops xfer_ops = {
  1003. .version_get = version_get,
  1004. .xfer_get_init = xfer_get_init,
  1005. .reset_rx_to_maxsz = reset_rx_to_maxsz,
  1006. .do_xfer = do_xfer,
  1007. .do_xfer_with_response = do_xfer_with_response,
  1008. .xfer_put = xfer_put,
  1009. };
  1010. struct scmi_msg_resp_domain_name_get {
  1011. __le32 flags;
  1012. u8 name[SCMI_MAX_STR_SIZE];
  1013. };
  1014. /**
  1015. * scmi_common_extended_name_get - Common helper to get extended resources name
  1016. * @ph: A protocol handle reference.
  1017. * @cmd_id: The specific command ID to use.
  1018. * @res_id: The specific resource ID to use.
  1019. * @name: A pointer to the preallocated area where the retrieved name will be
  1020. * stored as a NULL terminated string.
  1021. * @len: The len in bytes of the @name char array.
  1022. *
  1023. * Return: 0 on Succcess
  1024. */
  1025. static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
  1026. u8 cmd_id, u32 res_id, char *name,
  1027. size_t len)
  1028. {
  1029. int ret;
  1030. struct scmi_xfer *t;
  1031. struct scmi_msg_resp_domain_name_get *resp;
  1032. ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
  1033. sizeof(*resp), &t);
  1034. if (ret)
  1035. goto out;
  1036. put_unaligned_le32(res_id, t->tx.buf);
  1037. resp = t->rx.buf;
  1038. ret = ph->xops->do_xfer(ph, t);
  1039. if (!ret)
  1040. strscpy(name, resp->name, len);
  1041. ph->xops->xfer_put(ph, t);
  1042. out:
  1043. if (ret)
  1044. dev_warn(ph->dev,
  1045. "Failed to get extended name - id:%u (ret:%d). Using %s\n",
  1046. res_id, ret, name);
  1047. return ret;
  1048. }
  1049. /**
  1050. * struct scmi_iterator - Iterator descriptor
  1051. * @msg: A reference to the message TX buffer; filled by @prepare_message with
  1052. * a proper custom command payload for each multi-part command request.
  1053. * @resp: A reference to the response RX buffer; used by @update_state and
  1054. * @process_response to parse the multi-part replies.
  1055. * @t: A reference to the underlying xfer initialized and used transparently by
  1056. * the iterator internal routines.
  1057. * @ph: A reference to the associated protocol handle to be used.
  1058. * @ops: A reference to the custom provided iterator operations.
  1059. * @state: The current iterator state; used and updated in turn by the iterators
  1060. * internal routines and by the caller-provided @scmi_iterator_ops.
  1061. * @priv: A reference to optional private data as provided by the caller and
  1062. * passed back to the @@scmi_iterator_ops.
  1063. */
  1064. struct scmi_iterator {
  1065. void *msg;
  1066. void *resp;
  1067. struct scmi_xfer *t;
  1068. const struct scmi_protocol_handle *ph;
  1069. struct scmi_iterator_ops *ops;
  1070. struct scmi_iterator_state state;
  1071. void *priv;
  1072. };
  1073. static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
  1074. struct scmi_iterator_ops *ops,
  1075. unsigned int max_resources, u8 msg_id,
  1076. size_t tx_size, void *priv)
  1077. {
  1078. int ret;
  1079. struct scmi_iterator *i;
  1080. i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
  1081. if (!i)
  1082. return ERR_PTR(-ENOMEM);
  1083. i->ph = ph;
  1084. i->ops = ops;
  1085. i->priv = priv;
  1086. ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
  1087. if (ret) {
  1088. devm_kfree(ph->dev, i);
  1089. return ERR_PTR(ret);
  1090. }
  1091. i->state.max_resources = max_resources;
  1092. i->msg = i->t->tx.buf;
  1093. i->resp = i->t->rx.buf;
  1094. return i;
  1095. }
  1096. static int scmi_iterator_run(void *iter)
  1097. {
  1098. int ret = -EINVAL;
  1099. struct scmi_iterator_ops *iops;
  1100. const struct scmi_protocol_handle *ph;
  1101. struct scmi_iterator_state *st;
  1102. struct scmi_iterator *i = iter;
  1103. if (!i || !i->ops || !i->ph)
  1104. return ret;
  1105. iops = i->ops;
  1106. ph = i->ph;
  1107. st = &i->state;
  1108. do {
  1109. iops->prepare_message(i->msg, st->desc_index, i->priv);
  1110. ret = ph->xops->do_xfer(ph, i->t);
  1111. if (ret)
  1112. break;
  1113. st->rx_len = i->t->rx.len;
  1114. ret = iops->update_state(st, i->resp, i->priv);
  1115. if (ret)
  1116. break;
  1117. if (st->num_returned > st->max_resources - st->desc_index) {
  1118. dev_err(ph->dev,
  1119. "No. of resources can't exceed %d\n",
  1120. st->max_resources);
  1121. ret = -EINVAL;
  1122. break;
  1123. }
  1124. for (st->loop_idx = 0; st->loop_idx < st->num_returned;
  1125. st->loop_idx++) {
  1126. ret = iops->process_response(ph, i->resp, st, i->priv);
  1127. if (ret)
  1128. goto out;
  1129. }
  1130. st->desc_index += st->num_returned;
  1131. ph->xops->reset_rx_to_maxsz(ph, i->t);
  1132. /*
  1133. * check for both returned and remaining to avoid infinite
  1134. * loop due to buggy firmware
  1135. */
  1136. } while (st->num_returned && st->num_remaining);
  1137. out:
  1138. /* Finalize and destroy iterator */
  1139. ph->xops->xfer_put(ph, i->t);
  1140. devm_kfree(ph->dev, i);
  1141. return ret;
  1142. }
  1143. struct scmi_msg_get_fc_info {
  1144. __le32 domain;
  1145. __le32 message_id;
  1146. };
  1147. struct scmi_msg_resp_desc_fc {
  1148. __le32 attr;
  1149. #define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
  1150. #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
  1151. __le32 rate_limit;
  1152. __le32 chan_addr_low;
  1153. __le32 chan_addr_high;
  1154. __le32 chan_size;
  1155. __le32 db_addr_low;
  1156. __le32 db_addr_high;
  1157. __le32 db_set_lmask;
  1158. __le32 db_set_hmask;
  1159. __le32 db_preserve_lmask;
  1160. __le32 db_preserve_hmask;
  1161. };
  1162. static void
  1163. scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
  1164. u8 describe_id, u32 message_id, u32 valid_size,
  1165. u32 domain, void __iomem **p_addr,
  1166. struct scmi_fc_db_info **p_db)
  1167. {
  1168. int ret;
  1169. u32 flags;
  1170. u64 phys_addr;
  1171. u8 size;
  1172. void __iomem *addr;
  1173. struct scmi_xfer *t;
  1174. struct scmi_fc_db_info *db = NULL;
  1175. struct scmi_msg_get_fc_info *info;
  1176. struct scmi_msg_resp_desc_fc *resp;
  1177. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  1178. if (!p_addr) {
  1179. ret = -EINVAL;
  1180. goto err_out;
  1181. }
  1182. ret = ph->xops->xfer_get_init(ph, describe_id,
  1183. sizeof(*info), sizeof(*resp), &t);
  1184. if (ret)
  1185. goto err_out;
  1186. info = t->tx.buf;
  1187. info->domain = cpu_to_le32(domain);
  1188. info->message_id = cpu_to_le32(message_id);
  1189. /*
  1190. * Bail out on error leaving fc_info addresses zeroed; this includes
  1191. * the case in which the requested domain/message_id does NOT support
  1192. * fastchannels at all.
  1193. */
  1194. ret = ph->xops->do_xfer(ph, t);
  1195. if (ret)
  1196. goto err_xfer;
  1197. resp = t->rx.buf;
  1198. flags = le32_to_cpu(resp->attr);
  1199. size = le32_to_cpu(resp->chan_size);
  1200. if (size != valid_size) {
  1201. ret = -EINVAL;
  1202. goto err_xfer;
  1203. }
  1204. phys_addr = le32_to_cpu(resp->chan_addr_low);
  1205. phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
  1206. addr = devm_ioremap(ph->dev, phys_addr, size);
  1207. if (!addr) {
  1208. ret = -EADDRNOTAVAIL;
  1209. goto err_xfer;
  1210. }
  1211. *p_addr = addr;
  1212. if (p_db && SUPPORTS_DOORBELL(flags)) {
  1213. db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
  1214. if (!db) {
  1215. ret = -ENOMEM;
  1216. goto err_db;
  1217. }
  1218. size = 1 << DOORBELL_REG_WIDTH(flags);
  1219. phys_addr = le32_to_cpu(resp->db_addr_low);
  1220. phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
  1221. addr = devm_ioremap(ph->dev, phys_addr, size);
  1222. if (!addr) {
  1223. ret = -EADDRNOTAVAIL;
  1224. goto err_db_mem;
  1225. }
  1226. db->addr = addr;
  1227. db->width = size;
  1228. db->set = le32_to_cpu(resp->db_set_lmask);
  1229. db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
  1230. db->mask = le32_to_cpu(resp->db_preserve_lmask);
  1231. db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
  1232. *p_db = db;
  1233. }
  1234. ph->xops->xfer_put(ph, t);
  1235. dev_dbg(ph->dev,
  1236. "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
  1237. pi->proto->id, message_id, domain);
  1238. return;
  1239. err_db_mem:
  1240. devm_kfree(ph->dev, db);
  1241. err_db:
  1242. *p_addr = NULL;
  1243. err_xfer:
  1244. ph->xops->xfer_put(ph, t);
  1245. err_out:
  1246. dev_warn(ph->dev,
  1247. "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
  1248. pi->proto->id, message_id, domain, ret);
  1249. }
  1250. #define SCMI_PROTO_FC_RING_DB(w) \
  1251. do { \
  1252. u##w val = 0; \
  1253. \
  1254. if (db->mask) \
  1255. val = ioread##w(db->addr) & db->mask; \
  1256. iowrite##w((u##w)db->set | val, db->addr); \
  1257. } while (0)
  1258. static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
  1259. {
  1260. if (!db || !db->addr)
  1261. return;
  1262. if (db->width == 1)
  1263. SCMI_PROTO_FC_RING_DB(8);
  1264. else if (db->width == 2)
  1265. SCMI_PROTO_FC_RING_DB(16);
  1266. else if (db->width == 4)
  1267. SCMI_PROTO_FC_RING_DB(32);
  1268. else /* db->width == 8 */
  1269. #ifdef CONFIG_64BIT
  1270. SCMI_PROTO_FC_RING_DB(64);
  1271. #else
  1272. {
  1273. u64 val = 0;
  1274. if (db->mask)
  1275. val = ioread64_hi_lo(db->addr) & db->mask;
  1276. iowrite64_hi_lo(db->set | val, db->addr);
  1277. }
  1278. #endif
  1279. }
  1280. static const struct scmi_proto_helpers_ops helpers_ops = {
  1281. .extended_name_get = scmi_common_extended_name_get,
  1282. .iter_response_init = scmi_iterator_init,
  1283. .iter_response_run = scmi_iterator_run,
  1284. .fastchannel_init = scmi_common_fastchannel_init,
  1285. .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
  1286. };
  1287. /**
  1288. * scmi_revision_area_get - Retrieve version memory area.
  1289. *
  1290. * @ph: A reference to the protocol handle.
  1291. *
  1292. * A helper to grab the version memory area reference during SCMI Base protocol
  1293. * initialization.
  1294. *
  1295. * Return: A reference to the version memory area associated to the SCMI
  1296. * instance underlying this protocol handle.
  1297. */
  1298. struct scmi_revision_info *
  1299. scmi_revision_area_get(const struct scmi_protocol_handle *ph)
  1300. {
  1301. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  1302. return pi->handle->version;
  1303. }
  1304. /**
  1305. * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
  1306. * instance descriptor.
  1307. * @info: The reference to the related SCMI instance.
  1308. * @proto: The protocol descriptor.
  1309. *
  1310. * Allocate a new protocol instance descriptor, using the provided @proto
  1311. * description, against the specified SCMI instance @info, and initialize it;
  1312. * all resources management is handled via a dedicated per-protocol devres
  1313. * group.
  1314. *
  1315. * Context: Assumes to be called with @protocols_mtx already acquired.
  1316. * Return: A reference to a freshly allocated and initialized protocol instance
  1317. * or ERR_PTR on failure. On failure the @proto reference is at first
  1318. * put using @scmi_protocol_put() before releasing all the devres group.
  1319. */
  1320. static struct scmi_protocol_instance *
  1321. scmi_alloc_init_protocol_instance(struct scmi_info *info,
  1322. const struct scmi_protocol *proto)
  1323. {
  1324. int ret = -ENOMEM;
  1325. void *gid;
  1326. struct scmi_protocol_instance *pi;
  1327. const struct scmi_handle *handle = &info->handle;
  1328. /* Protocol specific devres group */
  1329. gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
  1330. if (!gid) {
  1331. scmi_protocol_put(proto->id);
  1332. goto out;
  1333. }
  1334. pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
  1335. if (!pi)
  1336. goto clean;
  1337. pi->gid = gid;
  1338. pi->proto = proto;
  1339. pi->handle = handle;
  1340. pi->ph.dev = handle->dev;
  1341. pi->ph.xops = &xfer_ops;
  1342. pi->ph.hops = &helpers_ops;
  1343. pi->ph.set_priv = scmi_set_protocol_priv;
  1344. pi->ph.get_priv = scmi_get_protocol_priv;
  1345. refcount_set(&pi->users, 1);
  1346. /* proto->init is assured NON NULL by scmi_protocol_register */
  1347. ret = pi->proto->instance_init(&pi->ph);
  1348. if (ret)
  1349. goto clean;
  1350. ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
  1351. GFP_KERNEL);
  1352. if (ret != proto->id)
  1353. goto clean;
  1354. /*
  1355. * Warn but ignore events registration errors since we do not want
  1356. * to skip whole protocols if their notifications are messed up.
  1357. */
  1358. if (pi->proto->events) {
  1359. ret = scmi_register_protocol_events(handle, pi->proto->id,
  1360. &pi->ph,
  1361. pi->proto->events);
  1362. if (ret)
  1363. dev_warn(handle->dev,
  1364. "Protocol:%X - Events Registration Failed - err:%d\n",
  1365. pi->proto->id, ret);
  1366. }
  1367. devres_close_group(handle->dev, pi->gid);
  1368. dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
  1369. return pi;
  1370. clean:
  1371. /* Take care to put the protocol module's owner before releasing all */
  1372. scmi_protocol_put(proto->id);
  1373. devres_release_group(handle->dev, gid);
  1374. out:
  1375. return ERR_PTR(ret);
  1376. }
  1377. /**
  1378. * scmi_get_protocol_instance - Protocol initialization helper.
  1379. * @handle: A reference to the SCMI platform instance.
  1380. * @protocol_id: The protocol being requested.
  1381. *
  1382. * In case the required protocol has never been requested before for this
  1383. * instance, allocate and initialize all the needed structures while handling
  1384. * resource allocation with a dedicated per-protocol devres subgroup.
  1385. *
  1386. * Return: A reference to an initialized protocol instance or error on failure:
  1387. * in particular returns -EPROBE_DEFER when the desired protocol could
  1388. * NOT be found.
  1389. */
  1390. static struct scmi_protocol_instance * __must_check
  1391. scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
  1392. {
  1393. struct scmi_protocol_instance *pi;
  1394. struct scmi_info *info = handle_to_scmi_info(handle);
  1395. mutex_lock(&info->protocols_mtx);
  1396. pi = idr_find(&info->protocols, protocol_id);
  1397. if (pi) {
  1398. refcount_inc(&pi->users);
  1399. } else {
  1400. const struct scmi_protocol *proto;
  1401. /* Fails if protocol not registered on bus */
  1402. proto = scmi_protocol_get(protocol_id);
  1403. if (proto)
  1404. pi = scmi_alloc_init_protocol_instance(info, proto);
  1405. else
  1406. pi = ERR_PTR(-EPROBE_DEFER);
  1407. }
  1408. mutex_unlock(&info->protocols_mtx);
  1409. return pi;
  1410. }
  1411. /**
  1412. * scmi_protocol_acquire - Protocol acquire
  1413. * @handle: A reference to the SCMI platform instance.
  1414. * @protocol_id: The protocol being requested.
  1415. *
  1416. * Register a new user for the requested protocol on the specified SCMI
  1417. * platform instance, possibly triggering its initialization on first user.
  1418. *
  1419. * Return: 0 if protocol was acquired successfully.
  1420. */
  1421. int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
  1422. {
  1423. return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
  1424. }
  1425. /**
  1426. * scmi_protocol_release - Protocol de-initialization helper.
  1427. * @handle: A reference to the SCMI platform instance.
  1428. * @protocol_id: The protocol being requested.
  1429. *
  1430. * Remove one user for the specified protocol and triggers de-initialization
  1431. * and resources de-allocation once the last user has gone.
  1432. */
  1433. void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
  1434. {
  1435. struct scmi_info *info = handle_to_scmi_info(handle);
  1436. struct scmi_protocol_instance *pi;
  1437. mutex_lock(&info->protocols_mtx);
  1438. pi = idr_find(&info->protocols, protocol_id);
  1439. if (WARN_ON(!pi))
  1440. goto out;
  1441. if (refcount_dec_and_test(&pi->users)) {
  1442. void *gid = pi->gid;
  1443. if (pi->proto->events)
  1444. scmi_deregister_protocol_events(handle, protocol_id);
  1445. if (pi->proto->instance_deinit)
  1446. pi->proto->instance_deinit(&pi->ph);
  1447. idr_remove(&info->protocols, protocol_id);
  1448. scmi_protocol_put(protocol_id);
  1449. devres_release_group(handle->dev, gid);
  1450. dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
  1451. protocol_id);
  1452. }
  1453. out:
  1454. mutex_unlock(&info->protocols_mtx);
  1455. }
  1456. void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
  1457. u8 *prot_imp)
  1458. {
  1459. const struct scmi_protocol_instance *pi = ph_to_pi(ph);
  1460. struct scmi_info *info = handle_to_scmi_info(pi->handle);
  1461. info->protocols_imp = prot_imp;
  1462. }
  1463. static bool
  1464. scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
  1465. {
  1466. int i;
  1467. struct scmi_info *info = handle_to_scmi_info(handle);
  1468. struct scmi_revision_info *rev = handle->version;
  1469. if (!info->protocols_imp)
  1470. return false;
  1471. for (i = 0; i < rev->num_protocols; i++)
  1472. if (info->protocols_imp[i] == prot_id)
  1473. return true;
  1474. return false;
  1475. }
  1476. struct scmi_protocol_devres {
  1477. const struct scmi_handle *handle;
  1478. u8 protocol_id;
  1479. };
  1480. static void scmi_devm_release_protocol(struct device *dev, void *res)
  1481. {
  1482. struct scmi_protocol_devres *dres = res;
  1483. scmi_protocol_release(dres->handle, dres->protocol_id);
  1484. }
  1485. static struct scmi_protocol_instance __must_check *
  1486. scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
  1487. {
  1488. struct scmi_protocol_instance *pi;
  1489. struct scmi_protocol_devres *dres;
  1490. dres = devres_alloc(scmi_devm_release_protocol,
  1491. sizeof(*dres), GFP_KERNEL);
  1492. if (!dres)
  1493. return ERR_PTR(-ENOMEM);
  1494. pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
  1495. if (IS_ERR(pi)) {
  1496. devres_free(dres);
  1497. return pi;
  1498. }
  1499. dres->handle = sdev->handle;
  1500. dres->protocol_id = protocol_id;
  1501. devres_add(&sdev->dev, dres);
  1502. return pi;
  1503. }
  1504. /**
  1505. * scmi_devm_protocol_get - Devres managed get protocol operations and handle
  1506. * @sdev: A reference to an scmi_device whose embedded struct device is to
  1507. * be used for devres accounting.
  1508. * @protocol_id: The protocol being requested.
  1509. * @ph: A pointer reference used to pass back the associated protocol handle.
  1510. *
  1511. * Get hold of a protocol accounting for its usage, eventually triggering its
  1512. * initialization, and returning the protocol specific operations and related
  1513. * protocol handle which will be used as first argument in most of the
  1514. * protocols operations methods.
  1515. * Being a devres based managed method, protocol hold will be automatically
  1516. * released, and possibly de-initialized on last user, once the SCMI driver
  1517. * owning the scmi_device is unbound from it.
  1518. *
  1519. * Return: A reference to the requested protocol operations or error.
  1520. * Must be checked for errors by caller.
  1521. */
  1522. static const void __must_check *
  1523. scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
  1524. struct scmi_protocol_handle **ph)
  1525. {
  1526. struct scmi_protocol_instance *pi;
  1527. if (!ph)
  1528. return ERR_PTR(-EINVAL);
  1529. pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
  1530. if (IS_ERR(pi))
  1531. return pi;
  1532. *ph = &pi->ph;
  1533. return pi->proto->ops;
  1534. }
  1535. /**
  1536. * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
  1537. * @sdev: A reference to an scmi_device whose embedded struct device is to
  1538. * be used for devres accounting.
  1539. * @protocol_id: The protocol being requested.
  1540. *
  1541. * Get hold of a protocol accounting for its usage, possibly triggering its
  1542. * initialization but without getting access to its protocol specific operations
  1543. * and handle.
  1544. *
  1545. * Being a devres based managed method, protocol hold will be automatically
  1546. * released, and possibly de-initialized on last user, once the SCMI driver
  1547. * owning the scmi_device is unbound from it.
  1548. *
  1549. * Return: 0 on SUCCESS
  1550. */
  1551. static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
  1552. u8 protocol_id)
  1553. {
  1554. struct scmi_protocol_instance *pi;
  1555. pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
  1556. if (IS_ERR(pi))
  1557. return PTR_ERR(pi);
  1558. return 0;
  1559. }
  1560. static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
  1561. {
  1562. struct scmi_protocol_devres *dres = res;
  1563. if (WARN_ON(!dres || !data))
  1564. return 0;
  1565. return dres->protocol_id == *((u8 *)data);
  1566. }
  1567. /**
  1568. * scmi_devm_protocol_put - Devres managed put protocol operations and handle
  1569. * @sdev: A reference to an scmi_device whose embedded struct device is to
  1570. * be used for devres accounting.
  1571. * @protocol_id: The protocol being requested.
  1572. *
  1573. * Explicitly release a protocol hold previously obtained calling the above
  1574. * @scmi_devm_protocol_get.
  1575. */
  1576. static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
  1577. {
  1578. int ret;
  1579. ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
  1580. scmi_devm_protocol_match, &protocol_id);
  1581. WARN_ON(ret);
  1582. }
  1583. /**
  1584. * scmi_is_transport_atomic - Method to check if underlying transport for an
  1585. * SCMI instance is configured as atomic.
  1586. *
  1587. * @handle: A reference to the SCMI platform instance.
  1588. * @atomic_threshold: An optional return value for the system wide currently
  1589. * configured threshold for atomic operations.
  1590. *
  1591. * Return: True if transport is configured as atomic
  1592. */
  1593. static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
  1594. unsigned int *atomic_threshold)
  1595. {
  1596. bool ret;
  1597. struct scmi_info *info = handle_to_scmi_info(handle);
  1598. ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
  1599. if (ret && atomic_threshold)
  1600. *atomic_threshold = info->atomic_threshold;
  1601. return ret;
  1602. }
  1603. static inline
  1604. struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
  1605. {
  1606. info->users++;
  1607. return &info->handle;
  1608. }
  1609. /**
  1610. * scmi_handle_get() - Get the SCMI handle for a device
  1611. *
  1612. * @dev: pointer to device for which we want SCMI handle
  1613. *
  1614. * NOTE: The function does not track individual clients of the framework
  1615. * and is expected to be maintained by caller of SCMI protocol library.
  1616. * scmi_handle_put must be balanced with successful scmi_handle_get
  1617. *
  1618. * Return: pointer to handle if successful, NULL on error
  1619. */
  1620. struct scmi_handle *scmi_handle_get(struct device *dev)
  1621. {
  1622. struct list_head *p;
  1623. struct scmi_info *info;
  1624. struct scmi_handle *handle = NULL;
  1625. mutex_lock(&scmi_list_mutex);
  1626. list_for_each(p, &scmi_list) {
  1627. info = list_entry(p, struct scmi_info, node);
  1628. if (dev->parent == info->dev) {
  1629. handle = scmi_handle_get_from_info_unlocked(info);
  1630. break;
  1631. }
  1632. }
  1633. mutex_unlock(&scmi_list_mutex);
  1634. return handle;
  1635. }
  1636. /**
  1637. * scmi_handle_put() - Release the handle acquired by scmi_handle_get
  1638. *
  1639. * @handle: handle acquired by scmi_handle_get
  1640. *
  1641. * NOTE: The function does not track individual clients of the framework
  1642. * and is expected to be maintained by caller of SCMI protocol library.
  1643. * scmi_handle_put must be balanced with successful scmi_handle_get
  1644. *
  1645. * Return: 0 is successfully released
  1646. * if null was passed, it returns -EINVAL;
  1647. */
  1648. int scmi_handle_put(const struct scmi_handle *handle)
  1649. {
  1650. struct scmi_info *info;
  1651. if (!handle)
  1652. return -EINVAL;
  1653. info = handle_to_scmi_info(handle);
  1654. mutex_lock(&scmi_list_mutex);
  1655. if (!WARN_ON(!info->users))
  1656. info->users--;
  1657. mutex_unlock(&scmi_list_mutex);
  1658. return 0;
  1659. }
  1660. static int __scmi_xfer_info_init(struct scmi_info *sinfo,
  1661. struct scmi_xfers_info *info)
  1662. {
  1663. int i;
  1664. struct scmi_xfer *xfer;
  1665. struct device *dev = sinfo->dev;
  1666. const struct scmi_desc *desc = sinfo->desc;
  1667. /* Pre-allocated messages, no more than what hdr.seq can support */
  1668. if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
  1669. dev_err(dev,
  1670. "Invalid maximum messages %d, not in range [1 - %lu]\n",
  1671. info->max_msg, MSG_TOKEN_MAX);
  1672. return -EINVAL;
  1673. }
  1674. hash_init(info->pending_xfers);
  1675. /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
  1676. info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
  1677. sizeof(long), GFP_KERNEL);
  1678. if (!info->xfer_alloc_table)
  1679. return -ENOMEM;
  1680. /*
  1681. * Preallocate a number of xfers equal to max inflight messages,
  1682. * pre-initialize the buffer pointer to pre-allocated buffers and
  1683. * attach all of them to the free list
  1684. */
  1685. INIT_HLIST_HEAD(&info->free_xfers);
  1686. for (i = 0; i < info->max_msg; i++) {
  1687. xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
  1688. if (!xfer)
  1689. return -ENOMEM;
  1690. xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
  1691. GFP_KERNEL);
  1692. if (!xfer->rx.buf)
  1693. return -ENOMEM;
  1694. xfer->tx.buf = xfer->rx.buf;
  1695. init_completion(&xfer->done);
  1696. spin_lock_init(&xfer->lock);
  1697. /* Add initialized xfer to the free list */
  1698. hlist_add_head(&xfer->node, &info->free_xfers);
  1699. }
  1700. spin_lock_init(&info->xfer_lock);
  1701. return 0;
  1702. }
  1703. static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
  1704. {
  1705. const struct scmi_desc *desc = sinfo->desc;
  1706. if (!desc->ops->get_max_msg) {
  1707. sinfo->tx_minfo.max_msg = desc->max_msg;
  1708. sinfo->rx_minfo.max_msg = desc->max_msg;
  1709. } else {
  1710. struct scmi_chan_info *base_cinfo;
  1711. base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
  1712. if (!base_cinfo)
  1713. return -EINVAL;
  1714. sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
  1715. /* RX channel is optional so can be skipped */
  1716. base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
  1717. if (base_cinfo)
  1718. sinfo->rx_minfo.max_msg =
  1719. desc->ops->get_max_msg(base_cinfo);
  1720. }
  1721. return 0;
  1722. }
  1723. static int scmi_xfer_info_init(struct scmi_info *sinfo)
  1724. {
  1725. int ret;
  1726. ret = scmi_channels_max_msg_configure(sinfo);
  1727. if (ret)
  1728. return ret;
  1729. ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
  1730. if (!ret && !idr_is_empty(&sinfo->rx_idr))
  1731. ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
  1732. return ret;
  1733. }
  1734. static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
  1735. int prot_id, bool tx)
  1736. {
  1737. int ret, idx;
  1738. struct scmi_chan_info *cinfo;
  1739. struct idr *idr;
  1740. /* Transmit channel is first entry i.e. index 0 */
  1741. idx = tx ? 0 : 1;
  1742. idr = tx ? &info->tx_idr : &info->rx_idr;
  1743. /* check if already allocated, used for multiple device per protocol */
  1744. cinfo = idr_find(idr, prot_id);
  1745. if (cinfo)
  1746. return 0;
  1747. if (!info->desc->ops->chan_available(dev, idx)) {
  1748. cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
  1749. if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
  1750. return -EINVAL;
  1751. goto idr_alloc;
  1752. }
  1753. cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
  1754. if (!cinfo)
  1755. return -ENOMEM;
  1756. cinfo->dev = dev;
  1757. cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
  1758. ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
  1759. if (ret)
  1760. return ret;
  1761. if (tx && is_polling_required(cinfo, info)) {
  1762. if (is_transport_polling_capable(info))
  1763. dev_info(dev,
  1764. "Enabled polling mode TX channel - prot_id:%d\n",
  1765. prot_id);
  1766. else
  1767. dev_warn(dev,
  1768. "Polling mode NOT supported by transport.\n");
  1769. }
  1770. idr_alloc:
  1771. ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
  1772. if (ret != prot_id) {
  1773. dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
  1774. return ret;
  1775. }
  1776. cinfo->handle = &info->handle;
  1777. return 0;
  1778. }
  1779. static inline int
  1780. scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
  1781. {
  1782. int ret = scmi_chan_setup(info, dev, prot_id, true);
  1783. if (!ret) {
  1784. /* Rx is optional, report only memory errors */
  1785. ret = scmi_chan_setup(info, dev, prot_id, false);
  1786. if (ret && ret != -ENOMEM)
  1787. ret = 0;
  1788. }
  1789. return ret;
  1790. }
  1791. /**
  1792. * scmi_get_protocol_device - Helper to get/create an SCMI device.
  1793. *
  1794. * @np: A device node representing a valid active protocols for the referred
  1795. * SCMI instance.
  1796. * @info: The referred SCMI instance for which we are getting/creating this
  1797. * device.
  1798. * @prot_id: The protocol ID.
  1799. * @name: The device name.
  1800. *
  1801. * Referring to the specific SCMI instance identified by @info, this helper
  1802. * takes care to return a properly initialized device matching the requested
  1803. * @proto_id and @name: if device was still not existent it is created as a
  1804. * child of the specified SCMI instance @info and its transport properly
  1805. * initialized as usual.
  1806. *
  1807. * Return: A properly initialized scmi device, NULL otherwise.
  1808. */
  1809. static inline struct scmi_device *
  1810. scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
  1811. int prot_id, const char *name)
  1812. {
  1813. struct scmi_device *sdev;
  1814. /* Already created for this parent SCMI instance ? */
  1815. sdev = scmi_child_dev_find(info->dev, prot_id, name);
  1816. if (sdev)
  1817. return sdev;
  1818. mutex_lock(&scmi_syspower_mtx);
  1819. if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) {
  1820. dev_warn(info->dev,
  1821. "SCMI SystemPower protocol device must be unique !\n");
  1822. mutex_unlock(&scmi_syspower_mtx);
  1823. return NULL;
  1824. }
  1825. pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
  1826. sdev = scmi_device_create(np, info->dev, prot_id, name);
  1827. if (!sdev) {
  1828. dev_err(info->dev, "failed to create %d protocol device\n",
  1829. prot_id);
  1830. mutex_unlock(&scmi_syspower_mtx);
  1831. return NULL;
  1832. }
  1833. if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
  1834. dev_err(&sdev->dev, "failed to setup transport\n");
  1835. scmi_device_destroy(sdev);
  1836. mutex_unlock(&scmi_syspower_mtx);
  1837. return NULL;
  1838. }
  1839. if (prot_id == SCMI_PROTOCOL_SYSTEM)
  1840. scmi_syspower_registered = true;
  1841. mutex_unlock(&scmi_syspower_mtx);
  1842. return sdev;
  1843. }
  1844. static inline void
  1845. scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
  1846. int prot_id, const char *name)
  1847. {
  1848. struct scmi_device *sdev;
  1849. sdev = scmi_get_protocol_device(np, info, prot_id, name);
  1850. if (!sdev)
  1851. return;
  1852. /* setup handle now as the transport is ready */
  1853. scmi_set_handle(sdev);
  1854. }
  1855. /**
  1856. * scmi_create_protocol_devices - Create devices for all pending requests for
  1857. * this SCMI instance.
  1858. *
  1859. * @np: The device node describing the protocol
  1860. * @info: The SCMI instance descriptor
  1861. * @prot_id: The protocol ID
  1862. *
  1863. * All devices previously requested for this instance (if any) are found and
  1864. * created by scanning the proper @&scmi_requested_devices entry.
  1865. */
  1866. static void scmi_create_protocol_devices(struct device_node *np,
  1867. struct scmi_info *info, int prot_id)
  1868. {
  1869. struct list_head *phead;
  1870. mutex_lock(&scmi_requested_devices_mtx);
  1871. phead = idr_find(&scmi_requested_devices, prot_id);
  1872. if (phead) {
  1873. struct scmi_requested_dev *rdev;
  1874. list_for_each_entry(rdev, phead, node)
  1875. scmi_create_protocol_device(np, info, prot_id,
  1876. rdev->id_table->name);
  1877. }
  1878. mutex_unlock(&scmi_requested_devices_mtx);
  1879. }
  1880. /**
  1881. * scmi_protocol_device_request - Helper to request a device
  1882. *
  1883. * @id_table: A protocol/name pair descriptor for the device to be created.
  1884. *
  1885. * This helper let an SCMI driver request specific devices identified by the
  1886. * @id_table to be created for each active SCMI instance.
  1887. *
  1888. * The requested device name MUST NOT be already existent for any protocol;
  1889. * at first the freshly requested @id_table is annotated in the IDR table
  1890. * @scmi_requested_devices, then a matching device is created for each already
  1891. * active SCMI instance. (if any)
  1892. *
  1893. * This way the requested device is created straight-away for all the already
  1894. * initialized(probed) SCMI instances (handles) and it remains also annotated
  1895. * as pending creation if the requesting SCMI driver was loaded before some
  1896. * SCMI instance and related transports were available: when such late instance
  1897. * is probed, its probe will take care to scan the list of pending requested
  1898. * devices and create those on its own (see @scmi_create_protocol_devices and
  1899. * its enclosing loop)
  1900. *
  1901. * Return: 0 on Success
  1902. */
  1903. int scmi_protocol_device_request(const struct scmi_device_id *id_table)
  1904. {
  1905. int ret = 0;
  1906. unsigned int id = 0;
  1907. struct list_head *head, *phead = NULL;
  1908. struct scmi_requested_dev *rdev;
  1909. struct scmi_info *info;
  1910. pr_debug("Requesting SCMI device (%s) for protocol %x\n",
  1911. id_table->name, id_table->protocol_id);
  1912. /*
  1913. * Search for the matching protocol rdev list and then search
  1914. * of any existent equally named device...fails if any duplicate found.
  1915. */
  1916. mutex_lock(&scmi_requested_devices_mtx);
  1917. idr_for_each_entry(&scmi_requested_devices, head, id) {
  1918. if (!phead) {
  1919. /* A list found registered in the IDR is never empty */
  1920. rdev = list_first_entry(head, struct scmi_requested_dev,
  1921. node);
  1922. if (rdev->id_table->protocol_id ==
  1923. id_table->protocol_id)
  1924. phead = head;
  1925. }
  1926. list_for_each_entry(rdev, head, node) {
  1927. if (!strcmp(rdev->id_table->name, id_table->name)) {
  1928. pr_err("Ignoring duplicate request [%d] %s\n",
  1929. rdev->id_table->protocol_id,
  1930. rdev->id_table->name);
  1931. ret = -EINVAL;
  1932. goto out;
  1933. }
  1934. }
  1935. }
  1936. /*
  1937. * No duplicate found for requested id_table, so let's create a new
  1938. * requested device entry for this new valid request.
  1939. */
  1940. rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
  1941. if (!rdev) {
  1942. ret = -ENOMEM;
  1943. goto out;
  1944. }
  1945. rdev->id_table = id_table;
  1946. /*
  1947. * Append the new requested device table descriptor to the head of the
  1948. * related protocol list, eventually creating such head if not already
  1949. * there.
  1950. */
  1951. if (!phead) {
  1952. phead = kzalloc(sizeof(*phead), GFP_KERNEL);
  1953. if (!phead) {
  1954. kfree(rdev);
  1955. ret = -ENOMEM;
  1956. goto out;
  1957. }
  1958. INIT_LIST_HEAD(phead);
  1959. ret = idr_alloc(&scmi_requested_devices, (void *)phead,
  1960. id_table->protocol_id,
  1961. id_table->protocol_id + 1, GFP_KERNEL);
  1962. if (ret != id_table->protocol_id) {
  1963. pr_err("Failed to save SCMI device - ret:%d\n", ret);
  1964. kfree(rdev);
  1965. kfree(phead);
  1966. ret = -EINVAL;
  1967. goto out;
  1968. }
  1969. ret = 0;
  1970. }
  1971. list_add(&rdev->node, phead);
  1972. /*
  1973. * Now effectively create and initialize the requested device for every
  1974. * already initialized SCMI instance which has registered the requested
  1975. * protocol as a valid active one: i.e. defined in DT and supported by
  1976. * current platform FW.
  1977. */
  1978. mutex_lock(&scmi_list_mutex);
  1979. list_for_each_entry(info, &scmi_list, node) {
  1980. struct device_node *child;
  1981. child = idr_find(&info->active_protocols,
  1982. id_table->protocol_id);
  1983. if (child) {
  1984. struct scmi_device *sdev;
  1985. sdev = scmi_get_protocol_device(child, info,
  1986. id_table->protocol_id,
  1987. id_table->name);
  1988. if (sdev) {
  1989. /* Set handle if not already set: device existed */
  1990. if (!sdev->handle)
  1991. sdev->handle =
  1992. scmi_handle_get_from_info_unlocked(info);
  1993. /* Relink consumer and suppliers */
  1994. if (sdev->handle)
  1995. scmi_device_link_add(&sdev->dev,
  1996. sdev->handle->dev);
  1997. }
  1998. } else {
  1999. dev_err(info->dev,
  2000. "Failed. SCMI protocol %d not active.\n",
  2001. id_table->protocol_id);
  2002. }
  2003. }
  2004. mutex_unlock(&scmi_list_mutex);
  2005. out:
  2006. mutex_unlock(&scmi_requested_devices_mtx);
  2007. return ret;
  2008. }
  2009. /**
  2010. * scmi_protocol_device_unrequest - Helper to unrequest a device
  2011. *
  2012. * @id_table: A protocol/name pair descriptor for the device to be unrequested.
  2013. *
  2014. * An helper to let an SCMI driver release its request about devices; note that
  2015. * devices are created and initialized once the first SCMI driver request them
  2016. * but they destroyed only on SCMI core unloading/unbinding.
  2017. *
  2018. * The current SCMI transport layer uses such devices as internal references and
  2019. * as such they could be shared as same transport between multiple drivers so
  2020. * that cannot be safely destroyed till the whole SCMI stack is removed.
  2021. * (unless adding further burden of refcounting.)
  2022. */
  2023. void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
  2024. {
  2025. struct list_head *phead;
  2026. pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
  2027. id_table->name, id_table->protocol_id);
  2028. mutex_lock(&scmi_requested_devices_mtx);
  2029. phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
  2030. if (phead) {
  2031. struct scmi_requested_dev *victim, *tmp;
  2032. list_for_each_entry_safe(victim, tmp, phead, node) {
  2033. if (!strcmp(victim->id_table->name, id_table->name)) {
  2034. list_del(&victim->node);
  2035. kfree(victim);
  2036. break;
  2037. }
  2038. }
  2039. if (list_empty(phead)) {
  2040. idr_remove(&scmi_requested_devices,
  2041. id_table->protocol_id);
  2042. kfree(phead);
  2043. }
  2044. }
  2045. mutex_unlock(&scmi_requested_devices_mtx);
  2046. }
  2047. static int scmi_cleanup_txrx_channels(struct scmi_info *info)
  2048. {
  2049. int ret;
  2050. struct idr *idr = &info->tx_idr;
  2051. ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
  2052. idr_destroy(&info->tx_idr);
  2053. idr = &info->rx_idr;
  2054. ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
  2055. idr_destroy(&info->rx_idr);
  2056. return ret;
  2057. }
  2058. static int scmi_probe(struct platform_device *pdev)
  2059. {
  2060. int ret;
  2061. struct scmi_handle *handle;
  2062. const struct scmi_desc *desc;
  2063. struct scmi_info *info;
  2064. struct device *dev = &pdev->dev;
  2065. struct device_node *child, *np = dev->of_node;
  2066. desc = of_device_get_match_data(dev);
  2067. if (!desc)
  2068. return -EINVAL;
  2069. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  2070. if (!info)
  2071. return -ENOMEM;
  2072. info->dev = dev;
  2073. info->desc = desc;
  2074. INIT_LIST_HEAD(&info->node);
  2075. idr_init(&info->protocols);
  2076. mutex_init(&info->protocols_mtx);
  2077. idr_init(&info->active_protocols);
  2078. platform_set_drvdata(pdev, info);
  2079. idr_init(&info->tx_idr);
  2080. idr_init(&info->rx_idr);
  2081. handle = &info->handle;
  2082. handle->dev = info->dev;
  2083. handle->version = &info->version;
  2084. handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
  2085. handle->devm_protocol_get = scmi_devm_protocol_get;
  2086. handle->devm_protocol_put = scmi_devm_protocol_put;
  2087. /* System wide atomic threshold for atomic ops .. if any */
  2088. if (!of_property_read_u32(np, "atomic-threshold-us",
  2089. &info->atomic_threshold))
  2090. dev_info(dev,
  2091. "SCMI System wide atomic threshold set to %d us\n",
  2092. info->atomic_threshold);
  2093. handle->is_transport_atomic = scmi_is_transport_atomic;
  2094. if (desc->ops->link_supplier) {
  2095. ret = desc->ops->link_supplier(dev);
  2096. if (ret)
  2097. return ret;
  2098. }
  2099. ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
  2100. if (ret)
  2101. return ret;
  2102. ret = scmi_xfer_info_init(info);
  2103. if (ret)
  2104. goto clear_txrx_setup;
  2105. if (scmi_notification_init(handle))
  2106. dev_err(dev, "SCMI Notifications NOT available.\n");
  2107. if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
  2108. dev_err(dev,
  2109. "Transport is not polling capable. Atomic mode not supported.\n");
  2110. /*
  2111. * Trigger SCMI Base protocol initialization.
  2112. * It's mandatory and won't be ever released/deinit until the
  2113. * SCMI stack is shutdown/unloaded as a whole.
  2114. */
  2115. ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
  2116. if (ret) {
  2117. dev_err(dev, "unable to communicate with SCMI\n");
  2118. goto notification_exit;
  2119. }
  2120. mutex_lock(&scmi_list_mutex);
  2121. list_add_tail(&info->node, &scmi_list);
  2122. mutex_unlock(&scmi_list_mutex);
  2123. for_each_available_child_of_node(np, child) {
  2124. u32 prot_id;
  2125. if (of_property_read_u32(child, "reg", &prot_id))
  2126. continue;
  2127. if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
  2128. dev_err(dev, "Out of range protocol %d\n", prot_id);
  2129. if (!scmi_is_protocol_implemented(handle, prot_id)) {
  2130. dev_err(dev, "SCMI protocol %d not implemented\n",
  2131. prot_id);
  2132. continue;
  2133. }
  2134. /*
  2135. * Save this valid DT protocol descriptor amongst
  2136. * @active_protocols for this SCMI instance/
  2137. */
  2138. ret = idr_alloc(&info->active_protocols, child,
  2139. prot_id, prot_id + 1, GFP_KERNEL);
  2140. if (ret != prot_id) {
  2141. dev_err(dev, "SCMI protocol %d already activated. Skip\n",
  2142. prot_id);
  2143. continue;
  2144. }
  2145. of_node_get(child);
  2146. scmi_create_protocol_devices(child, info, prot_id);
  2147. }
  2148. return 0;
  2149. notification_exit:
  2150. scmi_notification_exit(&info->handle);
  2151. clear_txrx_setup:
  2152. scmi_cleanup_txrx_channels(info);
  2153. return ret;
  2154. }
  2155. void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
  2156. {
  2157. idr_remove(idr, id);
  2158. }
  2159. static int scmi_remove(struct platform_device *pdev)
  2160. {
  2161. int ret, id;
  2162. struct scmi_info *info = platform_get_drvdata(pdev);
  2163. struct device_node *child;
  2164. mutex_lock(&scmi_list_mutex);
  2165. if (info->users)
  2166. dev_warn(&pdev->dev,
  2167. "Still active SCMI users will be forcibly unbound.\n");
  2168. list_del(&info->node);
  2169. mutex_unlock(&scmi_list_mutex);
  2170. scmi_notification_exit(&info->handle);
  2171. mutex_lock(&info->protocols_mtx);
  2172. idr_destroy(&info->protocols);
  2173. mutex_unlock(&info->protocols_mtx);
  2174. idr_for_each_entry(&info->active_protocols, child, id)
  2175. of_node_put(child);
  2176. idr_destroy(&info->active_protocols);
  2177. /* Safe to free channels since no more users */
  2178. ret = scmi_cleanup_txrx_channels(info);
  2179. if (ret)
  2180. dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n");
  2181. return 0;
  2182. }
  2183. static ssize_t protocol_version_show(struct device *dev,
  2184. struct device_attribute *attr, char *buf)
  2185. {
  2186. struct scmi_info *info = dev_get_drvdata(dev);
  2187. return sprintf(buf, "%u.%u\n", info->version.major_ver,
  2188. info->version.minor_ver);
  2189. }
  2190. static DEVICE_ATTR_RO(protocol_version);
  2191. static ssize_t firmware_version_show(struct device *dev,
  2192. struct device_attribute *attr, char *buf)
  2193. {
  2194. struct scmi_info *info = dev_get_drvdata(dev);
  2195. return sprintf(buf, "0x%x\n", info->version.impl_ver);
  2196. }
  2197. static DEVICE_ATTR_RO(firmware_version);
  2198. static ssize_t vendor_id_show(struct device *dev,
  2199. struct device_attribute *attr, char *buf)
  2200. {
  2201. struct scmi_info *info = dev_get_drvdata(dev);
  2202. return sprintf(buf, "%s\n", info->version.vendor_id);
  2203. }
  2204. static DEVICE_ATTR_RO(vendor_id);
  2205. static ssize_t sub_vendor_id_show(struct device *dev,
  2206. struct device_attribute *attr, char *buf)
  2207. {
  2208. struct scmi_info *info = dev_get_drvdata(dev);
  2209. return sprintf(buf, "%s\n", info->version.sub_vendor_id);
  2210. }
  2211. static DEVICE_ATTR_RO(sub_vendor_id);
  2212. static struct attribute *versions_attrs[] = {
  2213. &dev_attr_firmware_version.attr,
  2214. &dev_attr_protocol_version.attr,
  2215. &dev_attr_vendor_id.attr,
  2216. &dev_attr_sub_vendor_id.attr,
  2217. NULL,
  2218. };
  2219. ATTRIBUTE_GROUPS(versions);
  2220. /* Each compatible listed below must have descriptor associated with it */
  2221. static const struct of_device_id scmi_of_match[] = {
  2222. #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
  2223. { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
  2224. #endif
  2225. #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
  2226. { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
  2227. #endif
  2228. #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
  2229. { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
  2230. #endif
  2231. #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
  2232. { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
  2233. #endif
  2234. { /* Sentinel */ },
  2235. };
  2236. MODULE_DEVICE_TABLE(of, scmi_of_match);
  2237. static struct platform_driver scmi_driver = {
  2238. .driver = {
  2239. .name = "arm-scmi",
  2240. .suppress_bind_attrs = true,
  2241. .of_match_table = scmi_of_match,
  2242. .dev_groups = versions_groups,
  2243. },
  2244. .probe = scmi_probe,
  2245. .remove = scmi_remove,
  2246. };
  2247. /**
  2248. * __scmi_transports_setup - Common helper to call transport-specific
  2249. * .init/.exit code if provided.
  2250. *
  2251. * @init: A flag to distinguish between init and exit.
  2252. *
  2253. * Note that, if provided, we invoke .init/.exit functions for all the
  2254. * transports currently compiled in.
  2255. *
  2256. * Return: 0 on Success.
  2257. */
  2258. static inline int __scmi_transports_setup(bool init)
  2259. {
  2260. int ret = 0;
  2261. const struct of_device_id *trans;
  2262. for (trans = scmi_of_match; trans->data; trans++) {
  2263. const struct scmi_desc *tdesc = trans->data;
  2264. if ((init && !tdesc->transport_init) ||
  2265. (!init && !tdesc->transport_exit))
  2266. continue;
  2267. if (init)
  2268. ret = tdesc->transport_init();
  2269. else
  2270. tdesc->transport_exit();
  2271. if (ret) {
  2272. pr_err("SCMI transport %s FAILED initialization!\n",
  2273. trans->compatible);
  2274. break;
  2275. }
  2276. }
  2277. return ret;
  2278. }
  2279. static int __init scmi_transports_init(void)
  2280. {
  2281. return __scmi_transports_setup(true);
  2282. }
  2283. static void __exit scmi_transports_exit(void)
  2284. {
  2285. __scmi_transports_setup(false);
  2286. }
  2287. static int __init scmi_driver_init(void)
  2288. {
  2289. int ret;
  2290. /* Bail out if no SCMI transport was configured */
  2291. if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
  2292. return -EINVAL;
  2293. scmi_bus_init();
  2294. /* Initialize any compiled-in transport which provided an init/exit */
  2295. ret = scmi_transports_init();
  2296. if (ret)
  2297. return ret;
  2298. scmi_base_register();
  2299. scmi_clock_register();
  2300. scmi_perf_register();
  2301. scmi_power_register();
  2302. scmi_reset_register();
  2303. scmi_sensors_register();
  2304. scmi_voltage_register();
  2305. scmi_system_register();
  2306. scmi_powercap_register();
  2307. return platform_driver_register(&scmi_driver);
  2308. }
  2309. subsys_initcall(scmi_driver_init);
  2310. static void __exit scmi_driver_exit(void)
  2311. {
  2312. scmi_base_unregister();
  2313. scmi_clock_unregister();
  2314. scmi_perf_unregister();
  2315. scmi_power_unregister();
  2316. scmi_reset_unregister();
  2317. scmi_sensors_unregister();
  2318. scmi_voltage_unregister();
  2319. scmi_system_unregister();
  2320. scmi_powercap_unregister();
  2321. scmi_bus_exit();
  2322. scmi_transports_exit();
  2323. platform_driver_unregister(&scmi_driver);
  2324. }
  2325. module_exit(scmi_driver_exit);
  2326. MODULE_ALIAS("platform:arm-scmi");
  2327. MODULE_AUTHOR("Sudeep Holla <[email protected]>");
  2328. MODULE_DESCRIPTION("ARM SCMI protocol driver");
  2329. MODULE_LICENSE("GPL v2");