notify.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Notification support
  4. *
  5. * Copyright (C) 2020-2021 ARM Ltd.
  6. */
  7. /**
  8. * DOC: Theory of operation
  9. *
  10. * SCMI Protocol specification allows the platform to signal events to
  11. * interested agents via notification messages: this is an implementation
  12. * of the dispatch and delivery of such notifications to the interested users
  13. * inside the Linux kernel.
  14. *
  15. * An SCMI Notification core instance is initialized for each active platform
  16. * instance identified by the means of the usual &struct scmi_handle.
  17. *
  18. * Each SCMI Protocol implementation, during its initialization, registers with
  19. * this core its set of supported events using scmi_register_protocol_events():
  20. * all the needed descriptors are stored in the &struct registered_protocols and
  21. * &struct registered_events arrays.
  22. *
  23. * Kernel users interested in some specific event can register their callbacks
  24. * providing the usual notifier_block descriptor, since this core implements
  25. * events' delivery using the standard Kernel notification chains machinery.
  26. *
  27. * Given the number of possible events defined by SCMI and the extensibility
  28. * of the SCMI Protocol itself, the underlying notification chains are created
  29. * and destroyed dynamically on demand depending on the number of users
  30. * effectively registered for an event, so that no support structures or chains
  31. * are allocated until at least one user has registered a notifier_block for
  32. * such event. Similarly, events' generation itself is enabled at the platform
  33. * level only after at least one user has registered, and it is shutdown after
  34. * the last user for that event has gone.
  35. *
  36. * All users provided callbacks and allocated notification-chains are stored in
  37. * the @registered_events_handlers hashtable. Callbacks' registration requests
  38. * for still to be registered events are instead kept in the dedicated common
  39. * hashtable @pending_events_handlers.
  40. *
  41. * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
  42. * and is served by its own dedicated notification chain; information contained
  43. * in such tuples is used, in a few different ways, to generate the needed
  44. * hash-keys.
  45. *
  46. * Here proto_id and evt_id are simply the protocol_id and message_id numbers
  47. * as described in the SCMI Protocol specification, while src_id represents an
  48. * optional, protocol dependent, source identifier (like domain_id, perf_id
  49. * or sensor_id and so forth).
  50. *
  51. * Upon reception of a notification message from the platform the SCMI RX ISR
  52. * passes the received message payload and some ancillary information (including
  53. * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
  54. * pushes the event-data itself on a protocol-dedicated kfifo queue for further
  55. * deferred processing as specified in @scmi_events_dispatcher().
  56. *
  57. * Each protocol has it own dedicated work_struct and worker which, once kicked
  58. * by the ISR, takes care to empty its own dedicated queue, deliverying the
  59. * queued items into the proper notification-chain: notifications processing can
  60. * proceed concurrently on distinct workers only between events belonging to
  61. * different protocols while delivery of events within the same protocol is
  62. * still strictly sequentially ordered by time of arrival.
  63. *
  64. * Events' information is then extracted from the SCMI Notification messages and
  65. * conveyed, converted into a custom per-event report struct, as the void *data
  66. * param to the user callback provided by the registered notifier_block, so that
  67. * from the user perspective his callback will look invoked like:
  68. *
  69. * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
  70. *
  71. */
  72. #define dev_fmt(fmt) "SCMI Notifications - " fmt
  73. #define pr_fmt(fmt) "SCMI Notifications - " fmt
  74. #include <linux/bitfield.h>
  75. #include <linux/bug.h>
  76. #include <linux/compiler.h>
  77. #include <linux/device.h>
  78. #include <linux/err.h>
  79. #include <linux/hashtable.h>
  80. #include <linux/kernel.h>
  81. #include <linux/ktime.h>
  82. #include <linux/kfifo.h>
  83. #include <linux/list.h>
  84. #include <linux/mutex.h>
  85. #include <linux/notifier.h>
  86. #include <linux/refcount.h>
  87. #include <linux/scmi_protocol.h>
  88. #include <linux/slab.h>
  89. #include <linux/types.h>
  90. #include <linux/workqueue.h>
  91. #include "common.h"
  92. #include "notify.h"
  93. #define SCMI_MAX_PROTO 256
  94. #define PROTO_ID_MASK GENMASK(31, 24)
  95. #define EVT_ID_MASK GENMASK(23, 16)
  96. #define SRC_ID_MASK GENMASK(15, 0)
  97. /*
  98. * Builds an unsigned 32bit key from the given input tuple to be used
  99. * as a key in hashtables.
  100. */
  101. #define MAKE_HASH_KEY(p, e, s) \
  102. (FIELD_PREP(PROTO_ID_MASK, (p)) | \
  103. FIELD_PREP(EVT_ID_MASK, (e)) | \
  104. FIELD_PREP(SRC_ID_MASK, (s)))
  105. #define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
  106. /*
  107. * Assumes that the stored obj includes its own hash-key in a field named 'key':
  108. * with this simplification this macro can be equally used for all the objects'
  109. * types hashed by this implementation.
  110. *
  111. * @__ht: The hashtable name
  112. * @__obj: A pointer to the object type to be retrieved from the hashtable;
  113. * it will be used as a cursor while scanning the hastable and it will
  114. * be possibly left as NULL when @__k is not found
  115. * @__k: The key to search for
  116. */
  117. #define KEY_FIND(__ht, __obj, __k) \
  118. ({ \
  119. typeof(__k) k_ = __k; \
  120. typeof(__obj) obj_; \
  121. \
  122. hash_for_each_possible((__ht), obj_, hash, k_) \
  123. if (obj_->key == k_) \
  124. break; \
  125. __obj = obj_; \
  126. })
  127. #define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
  128. #define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
  129. #define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
  130. /*
  131. * A set of macros used to access safely @registered_protocols and
  132. * @registered_events arrays; these are fixed in size and each entry is possibly
  133. * populated at protocols' registration time and then only read but NEVER
  134. * modified or removed.
  135. */
  136. #define SCMI_GET_PROTO(__ni, __pid) \
  137. ({ \
  138. typeof(__ni) ni_ = __ni; \
  139. struct scmi_registered_events_desc *__pd = NULL; \
  140. \
  141. if (ni_) \
  142. __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
  143. __pd; \
  144. })
  145. #define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
  146. ({ \
  147. typeof(__pd) pd_ = __pd; \
  148. typeof(__eid) eid_ = __eid; \
  149. struct scmi_registered_event *__revt = NULL; \
  150. \
  151. if (pd_ && eid_ < pd_->num_events) \
  152. __revt = READ_ONCE(pd_->registered_events[eid_]); \
  153. __revt; \
  154. })
  155. #define SCMI_GET_REVT(__ni, __pid, __eid) \
  156. ({ \
  157. struct scmi_registered_event *__revt; \
  158. struct scmi_registered_events_desc *__pd; \
  159. \
  160. __pd = SCMI_GET_PROTO((__ni), (__pid)); \
  161. __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
  162. __revt; \
  163. })
  164. /* A couple of utility macros to limit cruft when calling protocols' helpers */
  165. #define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
  166. ({ \
  167. typeof(revt) r = revt; \
  168. r->proto->ops->set_notify_enabled(r->proto->ph, \
  169. (eid), (sid), (state)); \
  170. })
  171. #define REVT_NOTIFY_ENABLE(revt, eid, sid) \
  172. REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
  173. #define REVT_NOTIFY_DISABLE(revt, eid, sid) \
  174. REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
  175. #define REVT_FILL_REPORT(revt, ...) \
  176. ({ \
  177. typeof(revt) r = revt; \
  178. r->proto->ops->fill_custom_report(r->proto->ph, \
  179. __VA_ARGS__); \
  180. })
  181. #define SCMI_PENDING_HASH_SZ 4
  182. #define SCMI_REGISTERED_HASH_SZ 6
  183. struct scmi_registered_events_desc;
  184. /**
  185. * struct scmi_notify_instance - Represents an instance of the notification
  186. * core
  187. * @gid: GroupID used for devres
  188. * @handle: A reference to the platform instance
  189. * @init_work: A work item to perform final initializations of pending handlers
  190. * @notify_wq: A reference to the allocated Kernel cmwq
  191. * @pending_mtx: A mutex to protect @pending_events_handlers
  192. * @registered_protocols: A statically allocated array containing pointers to
  193. * all the registered protocol-level specific information
  194. * related to events' handling
  195. * @pending_events_handlers: An hashtable containing all pending events'
  196. * handlers descriptors
  197. *
  198. * Each platform instance, represented by a handle, has its own instance of
  199. * the notification subsystem represented by this structure.
  200. */
  201. struct scmi_notify_instance {
  202. void *gid;
  203. struct scmi_handle *handle;
  204. struct work_struct init_work;
  205. struct workqueue_struct *notify_wq;
  206. /* lock to protect pending_events_handlers */
  207. struct mutex pending_mtx;
  208. struct scmi_registered_events_desc **registered_protocols;
  209. DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
  210. };
  211. /**
  212. * struct events_queue - Describes a queue and its associated worker
  213. * @sz: Size in bytes of the related kfifo
  214. * @kfifo: A dedicated Kernel kfifo descriptor
  215. * @notify_work: A custom work item bound to this queue
  216. * @wq: A reference to the associated workqueue
  217. *
  218. * Each protocol has its own dedicated events_queue descriptor.
  219. */
  220. struct events_queue {
  221. size_t sz;
  222. struct kfifo kfifo;
  223. struct work_struct notify_work;
  224. struct workqueue_struct *wq;
  225. };
  226. /**
  227. * struct scmi_event_header - A utility header
  228. * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
  229. * to this event as soon as it entered the SCMI RX ISR
  230. * @payld_sz: Effective size of the embedded message payload which follows
  231. * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
  232. * @payld: A reference to the embedded event payload
  233. *
  234. * This header is prepended to each received event message payload before
  235. * queueing it on the related &struct events_queue.
  236. */
  237. struct scmi_event_header {
  238. ktime_t timestamp;
  239. size_t payld_sz;
  240. unsigned char evt_id;
  241. unsigned char payld[];
  242. };
  243. struct scmi_registered_event;
  244. /**
  245. * struct scmi_registered_events_desc - Protocol Specific information
  246. * @id: Protocol ID
  247. * @ops: Protocol specific and event-related operations
  248. * @equeue: The embedded per-protocol events_queue
  249. * @ni: A reference to the initialized instance descriptor
  250. * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
  251. * deferred worker when fetching data from the kfifo
  252. * @eh_sz: Size of the pre-allocated buffer @eh
  253. * @in_flight: A reference to an in flight &struct scmi_registered_event
  254. * @num_events: Number of events in @registered_events
  255. * @registered_events: A dynamically allocated array holding all the registered
  256. * events' descriptors, whose fixed-size is determined at
  257. * compile time.
  258. * @registered_mtx: A mutex to protect @registered_events_handlers
  259. * @ph: SCMI protocol handle reference
  260. * @registered_events_handlers: An hashtable containing all events' handlers
  261. * descriptors registered for this protocol
  262. *
  263. * All protocols that register at least one event have their protocol-specific
  264. * information stored here, together with the embedded allocated events_queue.
  265. * These descriptors are stored in the @registered_protocols array at protocol
  266. * registration time.
  267. *
  268. * Once these descriptors are successfully registered, they are NEVER again
  269. * removed or modified since protocols do not unregister ever, so that, once
  270. * we safely grab a NON-NULL reference from the array we can keep it and use it.
  271. */
  272. struct scmi_registered_events_desc {
  273. u8 id;
  274. const struct scmi_event_ops *ops;
  275. struct events_queue equeue;
  276. struct scmi_notify_instance *ni;
  277. struct scmi_event_header *eh;
  278. size_t eh_sz;
  279. void *in_flight;
  280. int num_events;
  281. struct scmi_registered_event **registered_events;
  282. /* mutex to protect registered_events_handlers */
  283. struct mutex registered_mtx;
  284. const struct scmi_protocol_handle *ph;
  285. DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
  286. };
  287. /**
  288. * struct scmi_registered_event - Event Specific Information
  289. * @proto: A reference to the associated protocol descriptor
  290. * @evt: A reference to the associated event descriptor (as provided at
  291. * registration time)
  292. * @report: A pre-allocated buffer used by the deferred worker to fill a
  293. * customized event report
  294. * @num_sources: The number of possible sources for this event as stated at
  295. * events' registration time
  296. * @sources: A reference to a dynamically allocated array used to refcount the
  297. * events' enable requests for all the existing sources
  298. * @sources_mtx: A mutex to serialize the access to @sources
  299. *
  300. * All registered events are represented by one of these structures that are
  301. * stored in the @registered_events array at protocol registration time.
  302. *
  303. * Once these descriptors are successfully registered, they are NEVER again
  304. * removed or modified since protocols do not unregister ever, so that once we
  305. * safely grab a NON-NULL reference from the table we can keep it and use it.
  306. */
  307. struct scmi_registered_event {
  308. struct scmi_registered_events_desc *proto;
  309. const struct scmi_event *evt;
  310. void *report;
  311. u32 num_sources;
  312. refcount_t *sources;
  313. /* locking to serialize the access to sources */
  314. struct mutex sources_mtx;
  315. };
  316. /**
  317. * struct scmi_event_handler - Event handler information
  318. * @key: The used hashkey
  319. * @users: A reference count for number of active users for this handler
  320. * @r_evt: A reference to the associated registered event; when this is NULL
  321. * this handler is pending, which means that identifies a set of
  322. * callbacks intended to be attached to an event which is still not
  323. * known nor registered by any protocol at that point in time
  324. * @chain: The notification chain dedicated to this specific event tuple
  325. * @hash: The hlist_node used for collision handling
  326. * @enabled: A boolean which records if event's generation has been already
  327. * enabled for this handler as a whole
  328. *
  329. * This structure collects all the information needed to process a received
  330. * event identified by the tuple (proto_id, evt_id, src_id).
  331. * These descriptors are stored in a per-protocol @registered_events_handlers
  332. * table using as a key a value derived from that tuple.
  333. */
  334. struct scmi_event_handler {
  335. u32 key;
  336. refcount_t users;
  337. struct scmi_registered_event *r_evt;
  338. struct blocking_notifier_head chain;
  339. struct hlist_node hash;
  340. bool enabled;
  341. };
  342. #define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
  343. static struct scmi_event_handler *
  344. scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
  345. static void scmi_put_active_handler(struct scmi_notify_instance *ni,
  346. struct scmi_event_handler *hndl);
  347. static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
  348. struct scmi_event_handler *hndl);
  349. /**
  350. * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
  351. * @ni: A reference to the notification instance to use
  352. * @evt_key: The key to use to lookup the related notification chain
  353. * @report: The customized event-specific report to pass down to the callbacks
  354. * as their *data parameter.
  355. */
  356. static inline void
  357. scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
  358. u32 evt_key, void *report)
  359. {
  360. int ret;
  361. struct scmi_event_handler *hndl;
  362. /*
  363. * Here ensure the event handler cannot vanish while using it.
  364. * It is legitimate, though, for an handler not to be found at all here,
  365. * e.g. when it has been unregistered by the user after some events had
  366. * already been queued.
  367. */
  368. hndl = scmi_get_active_handler(ni, evt_key);
  369. if (!hndl)
  370. return;
  371. ret = blocking_notifier_call_chain(&hndl->chain,
  372. KEY_XTRACT_EVT_ID(evt_key),
  373. report);
  374. /* Notifiers are NOT supposed to cut the chain ... */
  375. WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
  376. scmi_put_active_handler(ni, hndl);
  377. }
  378. /**
  379. * scmi_process_event_header() - Dequeue and process an event header
  380. * @eq: The queue to use
  381. * @pd: The protocol descriptor to use
  382. *
  383. * Read an event header from the protocol queue into the dedicated scratch
  384. * buffer and looks for a matching registered event; in case an anomalously
  385. * sized read is detected just flush the queue.
  386. *
  387. * Return:
  388. * * a reference to the matching registered event when found
  389. * * ERR_PTR(-EINVAL) when NO registered event could be found
  390. * * NULL when the queue is empty
  391. */
  392. static inline struct scmi_registered_event *
  393. scmi_process_event_header(struct events_queue *eq,
  394. struct scmi_registered_events_desc *pd)
  395. {
  396. unsigned int outs;
  397. struct scmi_registered_event *r_evt;
  398. outs = kfifo_out(&eq->kfifo, pd->eh,
  399. sizeof(struct scmi_event_header));
  400. if (!outs)
  401. return NULL;
  402. if (outs != sizeof(struct scmi_event_header)) {
  403. dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
  404. kfifo_reset_out(&eq->kfifo);
  405. return NULL;
  406. }
  407. r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
  408. if (!r_evt)
  409. r_evt = ERR_PTR(-EINVAL);
  410. return r_evt;
  411. }
  412. /**
  413. * scmi_process_event_payload() - Dequeue and process an event payload
  414. * @eq: The queue to use
  415. * @pd: The protocol descriptor to use
  416. * @r_evt: The registered event descriptor to use
  417. *
  418. * Read an event payload from the protocol queue into the dedicated scratch
  419. * buffer, fills a custom report and then look for matching event handlers and
  420. * call them; skip any unknown event (as marked by scmi_process_event_header())
  421. * and in case an anomalously sized read is detected just flush the queue.
  422. *
  423. * Return: False when the queue is empty
  424. */
  425. static inline bool
  426. scmi_process_event_payload(struct events_queue *eq,
  427. struct scmi_registered_events_desc *pd,
  428. struct scmi_registered_event *r_evt)
  429. {
  430. u32 src_id, key;
  431. unsigned int outs;
  432. void *report = NULL;
  433. outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
  434. if (!outs)
  435. return false;
  436. /* Any in-flight event has now been officially processed */
  437. pd->in_flight = NULL;
  438. if (outs != pd->eh->payld_sz) {
  439. dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
  440. kfifo_reset_out(&eq->kfifo);
  441. return false;
  442. }
  443. if (IS_ERR(r_evt)) {
  444. dev_warn(pd->ni->handle->dev,
  445. "SKIP UNKNOWN EVT - proto:%X evt:%d\n",
  446. pd->id, pd->eh->evt_id);
  447. return true;
  448. }
  449. report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
  450. pd->eh->payld, pd->eh->payld_sz,
  451. r_evt->report, &src_id);
  452. if (!report) {
  453. dev_err(pd->ni->handle->dev,
  454. "report not available - proto:%X evt:%d\n",
  455. pd->id, pd->eh->evt_id);
  456. return true;
  457. }
  458. /* At first search for a generic ALL src_ids handler... */
  459. key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
  460. scmi_lookup_and_call_event_chain(pd->ni, key, report);
  461. /* ...then search for any specific src_id */
  462. key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
  463. scmi_lookup_and_call_event_chain(pd->ni, key, report);
  464. return true;
  465. }
  466. /**
  467. * scmi_events_dispatcher() - Common worker logic for all work items.
  468. * @work: The work item to use, which is associated to a dedicated events_queue
  469. *
  470. * Logic:
  471. * 1. dequeue one pending RX notification (queued in SCMI RX ISR context)
  472. * 2. generate a custom event report from the received event message
  473. * 3. lookup for any registered ALL_SRC_IDs handler:
  474. * - > call the related notification chain passing in the report
  475. * 4. lookup for any registered specific SRC_ID handler:
  476. * - > call the related notification chain passing in the report
  477. *
  478. * Note that:
  479. * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
  480. * flood of events cannot saturate other protocols' queues.
  481. * * each per-protocol queue is associated to a distinct work_item, which
  482. * means, in turn, that:
  483. * + all protocols can process their dedicated queues concurrently
  484. * (since notify_wq:max_active != 1)
  485. * + anyway at most one worker instance is allowed to run on the same queue
  486. * concurrently: this ensures that we can have only one concurrent
  487. * reader/writer on the associated kfifo, so that we can use it lock-less
  488. *
  489. * Context: Process context.
  490. */
  491. static void scmi_events_dispatcher(struct work_struct *work)
  492. {
  493. struct events_queue *eq;
  494. struct scmi_registered_events_desc *pd;
  495. struct scmi_registered_event *r_evt;
  496. eq = container_of(work, struct events_queue, notify_work);
  497. pd = container_of(eq, struct scmi_registered_events_desc, equeue);
  498. /*
  499. * In order to keep the queue lock-less and the number of memcopies
  500. * to the bare minimum needed, the dispatcher accounts for the
  501. * possibility of per-protocol in-flight events: i.e. an event whose
  502. * reception could end up being split across two subsequent runs of this
  503. * worker, first the header, then the payload.
  504. */
  505. do {
  506. if (!pd->in_flight) {
  507. r_evt = scmi_process_event_header(eq, pd);
  508. if (!r_evt)
  509. break;
  510. pd->in_flight = r_evt;
  511. } else {
  512. r_evt = pd->in_flight;
  513. }
  514. } while (scmi_process_event_payload(eq, pd, r_evt));
  515. }
  516. /**
  517. * scmi_notify() - Queues a notification for further deferred processing
  518. * @handle: The handle identifying the platform instance from which the
  519. * dispatched event is generated
  520. * @proto_id: Protocol ID
  521. * @evt_id: Event ID (msgID)
  522. * @buf: Event Message Payload (without the header)
  523. * @len: Event Message Payload size
  524. * @ts: RX Timestamp in nanoseconds (boottime)
  525. *
  526. * Context: Called in interrupt context to queue a received event for
  527. * deferred processing.
  528. *
  529. * Return: 0 on Success
  530. */
  531. int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
  532. const void *buf, size_t len, ktime_t ts)
  533. {
  534. struct scmi_registered_event *r_evt;
  535. struct scmi_event_header eh;
  536. struct scmi_notify_instance *ni;
  537. ni = scmi_notification_instance_data_get(handle);
  538. if (!ni)
  539. return 0;
  540. r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
  541. if (!r_evt)
  542. return -EINVAL;
  543. if (len > r_evt->evt->max_payld_sz) {
  544. dev_err(handle->dev, "discard badly sized message\n");
  545. return -EINVAL;
  546. }
  547. if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
  548. dev_warn(handle->dev,
  549. "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
  550. proto_id, evt_id, ktime_to_ns(ts));
  551. return -ENOMEM;
  552. }
  553. eh.timestamp = ts;
  554. eh.evt_id = evt_id;
  555. eh.payld_sz = len;
  556. /*
  557. * Header and payload are enqueued with two distinct kfifo_in() (so non
  558. * atomic), but this situation is handled properly on the consumer side
  559. * with in-flight events tracking.
  560. */
  561. kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
  562. kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
  563. /*
  564. * Don't care about return value here since we just want to ensure that
  565. * a work is queued all the times whenever some items have been pushed
  566. * on the kfifo:
  567. * - if work was already queued it will simply fail to queue a new one
  568. * since it is not needed
  569. * - if work was not queued already it will be now, even in case work
  570. * was in fact already running: this behavior avoids any possible race
  571. * when this function pushes new items onto the kfifos after the
  572. * related executing worker had already determined the kfifo to be
  573. * empty and it was terminating.
  574. */
  575. queue_work(r_evt->proto->equeue.wq,
  576. &r_evt->proto->equeue.notify_work);
  577. return 0;
  578. }
  579. /**
  580. * scmi_kfifo_free() - Devres action helper to free the kfifo
  581. * @kfifo: The kfifo to free
  582. */
  583. static void scmi_kfifo_free(void *kfifo)
  584. {
  585. kfifo_free((struct kfifo *)kfifo);
  586. }
  587. /**
  588. * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
  589. * @ni: A reference to the notification instance to use
  590. * @equeue: The events_queue to initialize
  591. * @sz: Size of the kfifo buffer to allocate
  592. *
  593. * Allocate a buffer for the kfifo and initialize it.
  594. *
  595. * Return: 0 on Success
  596. */
  597. static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
  598. struct events_queue *equeue, size_t sz)
  599. {
  600. int ret;
  601. if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
  602. return -ENOMEM;
  603. /* Size could have been roundup to power-of-two */
  604. equeue->sz = kfifo_size(&equeue->kfifo);
  605. ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
  606. &equeue->kfifo);
  607. if (ret)
  608. return ret;
  609. INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
  610. equeue->wq = ni->notify_wq;
  611. return ret;
  612. }
  613. /**
  614. * scmi_allocate_registered_events_desc() - Allocate a registered events'
  615. * descriptor
  616. * @ni: A reference to the &struct scmi_notify_instance notification instance
  617. * to use
  618. * @proto_id: Protocol ID
  619. * @queue_sz: Size of the associated queue to allocate
  620. * @eh_sz: Size of the event header scratch area to pre-allocate
  621. * @num_events: Number of events to support (size of @registered_events)
  622. * @ops: Pointer to a struct holding references to protocol specific helpers
  623. * needed during events handling
  624. *
  625. * It is supposed to be called only once for each protocol at protocol
  626. * initialization time, so it warns if the requested protocol is found already
  627. * registered.
  628. *
  629. * Return: The allocated and registered descriptor on Success
  630. */
  631. static struct scmi_registered_events_desc *
  632. scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
  633. u8 proto_id, size_t queue_sz, size_t eh_sz,
  634. int num_events,
  635. const struct scmi_event_ops *ops)
  636. {
  637. int ret;
  638. struct scmi_registered_events_desc *pd;
  639. /* Ensure protocols are up to date */
  640. smp_rmb();
  641. if (WARN_ON(ni->registered_protocols[proto_id]))
  642. return ERR_PTR(-EINVAL);
  643. pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
  644. if (!pd)
  645. return ERR_PTR(-ENOMEM);
  646. pd->id = proto_id;
  647. pd->ops = ops;
  648. pd->ni = ni;
  649. ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
  650. if (ret)
  651. return ERR_PTR(ret);
  652. pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
  653. if (!pd->eh)
  654. return ERR_PTR(-ENOMEM);
  655. pd->eh_sz = eh_sz;
  656. pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
  657. sizeof(char *), GFP_KERNEL);
  658. if (!pd->registered_events)
  659. return ERR_PTR(-ENOMEM);
  660. pd->num_events = num_events;
  661. /* Initialize per protocol handlers table */
  662. mutex_init(&pd->registered_mtx);
  663. hash_init(pd->registered_events_handlers);
  664. return pd;
  665. }
  666. /**
  667. * scmi_register_protocol_events() - Register Protocol Events with the core
  668. * @handle: The handle identifying the platform instance against which the
  669. * protocol's events are registered
  670. * @proto_id: Protocol ID
  671. * @ph: SCMI protocol handle.
  672. * @ee: A structure describing the events supported by this protocol.
  673. *
  674. * Used by SCMI Protocols initialization code to register with the notification
  675. * core the list of supported events and their descriptors: takes care to
  676. * pre-allocate and store all needed descriptors, scratch buffers and event
  677. * queues.
  678. *
  679. * Return: 0 on Success
  680. */
  681. int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
  682. const struct scmi_protocol_handle *ph,
  683. const struct scmi_protocol_events *ee)
  684. {
  685. int i;
  686. unsigned int num_sources;
  687. size_t payld_sz = 0;
  688. struct scmi_registered_events_desc *pd;
  689. struct scmi_notify_instance *ni;
  690. const struct scmi_event *evt;
  691. if (!ee || !ee->ops || !ee->evts || !ph ||
  692. (!ee->num_sources && !ee->ops->get_num_sources))
  693. return -EINVAL;
  694. ni = scmi_notification_instance_data_get(handle);
  695. if (!ni)
  696. return -ENOMEM;
  697. /* num_sources cannot be <= 0 */
  698. if (ee->num_sources) {
  699. num_sources = ee->num_sources;
  700. } else {
  701. int nsrc = ee->ops->get_num_sources(ph);
  702. if (nsrc <= 0)
  703. return -EINVAL;
  704. num_sources = nsrc;
  705. }
  706. evt = ee->evts;
  707. for (i = 0; i < ee->num_events; i++)
  708. payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
  709. payld_sz += sizeof(struct scmi_event_header);
  710. pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
  711. payld_sz, ee->num_events,
  712. ee->ops);
  713. if (IS_ERR(pd))
  714. return PTR_ERR(pd);
  715. pd->ph = ph;
  716. for (i = 0; i < ee->num_events; i++, evt++) {
  717. struct scmi_registered_event *r_evt;
  718. r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
  719. GFP_KERNEL);
  720. if (!r_evt)
  721. return -ENOMEM;
  722. r_evt->proto = pd;
  723. r_evt->evt = evt;
  724. r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
  725. sizeof(refcount_t), GFP_KERNEL);
  726. if (!r_evt->sources)
  727. return -ENOMEM;
  728. r_evt->num_sources = num_sources;
  729. mutex_init(&r_evt->sources_mtx);
  730. r_evt->report = devm_kzalloc(ni->handle->dev,
  731. evt->max_report_sz, GFP_KERNEL);
  732. if (!r_evt->report)
  733. return -ENOMEM;
  734. pd->registered_events[i] = r_evt;
  735. /* Ensure events are updated */
  736. smp_wmb();
  737. dev_dbg(handle->dev, "registered event - %lX\n",
  738. MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
  739. }
  740. /* Register protocol and events...it will never be removed */
  741. ni->registered_protocols[proto_id] = pd;
  742. /* Ensure protocols are updated */
  743. smp_wmb();
  744. /*
  745. * Finalize any pending events' handler which could have been waiting
  746. * for this protocol's events registration.
  747. */
  748. schedule_work(&ni->init_work);
  749. return 0;
  750. }
  751. /**
  752. * scmi_deregister_protocol_events - Deregister protocol events with the core
  753. * @handle: The handle identifying the platform instance against which the
  754. * protocol's events are registered
  755. * @proto_id: Protocol ID
  756. */
  757. void scmi_deregister_protocol_events(const struct scmi_handle *handle,
  758. u8 proto_id)
  759. {
  760. struct scmi_notify_instance *ni;
  761. struct scmi_registered_events_desc *pd;
  762. ni = scmi_notification_instance_data_get(handle);
  763. if (!ni)
  764. return;
  765. pd = ni->registered_protocols[proto_id];
  766. if (!pd)
  767. return;
  768. ni->registered_protocols[proto_id] = NULL;
  769. /* Ensure protocols are updated */
  770. smp_wmb();
  771. cancel_work_sync(&pd->equeue.notify_work);
  772. }
  773. /**
  774. * scmi_allocate_event_handler() - Allocate Event handler
  775. * @ni: A reference to the notification instance to use
  776. * @evt_key: 32bit key uniquely bind to the event identified by the tuple
  777. * (proto_id, evt_id, src_id)
  778. *
  779. * Allocate an event handler and related notification chain associated with
  780. * the provided event handler key.
  781. * Note that, at this point, a related registered_event is still to be
  782. * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
  783. * is initialized as pending.
  784. *
  785. * Context: Assumes to be called with @pending_mtx already acquired.
  786. * Return: the freshly allocated structure on Success
  787. */
  788. static struct scmi_event_handler *
  789. scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
  790. {
  791. struct scmi_event_handler *hndl;
  792. hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
  793. if (!hndl)
  794. return NULL;
  795. hndl->key = evt_key;
  796. BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
  797. refcount_set(&hndl->users, 1);
  798. /* New handlers are created pending */
  799. hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
  800. return hndl;
  801. }
  802. /**
  803. * scmi_free_event_handler() - Free the provided Event handler
  804. * @hndl: The event handler structure to free
  805. *
  806. * Context: Assumes to be called with proper locking acquired depending
  807. * on the situation.
  808. */
  809. static void scmi_free_event_handler(struct scmi_event_handler *hndl)
  810. {
  811. hash_del(&hndl->hash);
  812. kfree(hndl);
  813. }
  814. /**
  815. * scmi_bind_event_handler() - Helper to attempt binding an handler to an event
  816. * @ni: A reference to the notification instance to use
  817. * @hndl: The event handler to bind
  818. *
  819. * If an associated registered event is found, move the handler from the pending
  820. * into the registered table.
  821. *
  822. * Context: Assumes to be called with @pending_mtx already acquired.
  823. *
  824. * Return: 0 on Success
  825. */
  826. static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
  827. struct scmi_event_handler *hndl)
  828. {
  829. struct scmi_registered_event *r_evt;
  830. r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
  831. KEY_XTRACT_EVT_ID(hndl->key));
  832. if (!r_evt)
  833. return -EINVAL;
  834. /*
  835. * Remove from pending and insert into registered while getting hold
  836. * of protocol instance.
  837. */
  838. hash_del(&hndl->hash);
  839. /*
  840. * Acquire protocols only for NON pending handlers, so as NOT to trigger
  841. * protocol initialization when a notifier is registered against a still
  842. * not registered protocol, since it would make little sense to force init
  843. * protocols for which still no SCMI driver user exists: they wouldn't
  844. * emit any event anyway till some SCMI driver starts using it.
  845. */
  846. scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
  847. hndl->r_evt = r_evt;
  848. mutex_lock(&r_evt->proto->registered_mtx);
  849. hash_add(r_evt->proto->registered_events_handlers,
  850. &hndl->hash, hndl->key);
  851. mutex_unlock(&r_evt->proto->registered_mtx);
  852. return 0;
  853. }
  854. /**
  855. * scmi_valid_pending_handler() - Helper to check pending status of handlers
  856. * @ni: A reference to the notification instance to use
  857. * @hndl: The event handler to check
  858. *
  859. * An handler is considered pending when its r_evt == NULL, because the related
  860. * event was still unknown at handler's registration time; anyway, since all
  861. * protocols register their supported events once for all at protocols'
  862. * initialization time, a pending handler cannot be considered valid anymore if
  863. * the underlying event (which it is waiting for), belongs to an already
  864. * initialized and registered protocol.
  865. *
  866. * Return: 0 on Success
  867. */
  868. static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
  869. struct scmi_event_handler *hndl)
  870. {
  871. struct scmi_registered_events_desc *pd;
  872. if (!IS_HNDL_PENDING(hndl))
  873. return -EINVAL;
  874. pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
  875. if (pd)
  876. return -EINVAL;
  877. return 0;
  878. }
  879. /**
  880. * scmi_register_event_handler() - Register whenever possible an Event handler
  881. * @ni: A reference to the notification instance to use
  882. * @hndl: The event handler to register
  883. *
  884. * At first try to bind an event handler to its associated event, then check if
  885. * it was at least a valid pending handler: if it was not bound nor valid return
  886. * false.
  887. *
  888. * Valid pending incomplete bindings will be periodically retried by a dedicated
  889. * worker which is kicked each time a new protocol completes its own
  890. * registration phase.
  891. *
  892. * Context: Assumes to be called with @pending_mtx acquired.
  893. *
  894. * Return: 0 on Success
  895. */
  896. static int scmi_register_event_handler(struct scmi_notify_instance *ni,
  897. struct scmi_event_handler *hndl)
  898. {
  899. int ret;
  900. ret = scmi_bind_event_handler(ni, hndl);
  901. if (!ret) {
  902. dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
  903. hndl->key);
  904. } else {
  905. ret = scmi_valid_pending_handler(ni, hndl);
  906. if (!ret)
  907. dev_dbg(ni->handle->dev,
  908. "registered PENDING handler - key:%X\n",
  909. hndl->key);
  910. }
  911. return ret;
  912. }
  913. /**
  914. * __scmi_event_handler_get_ops() - Utility to get or create an event handler
  915. * @ni: A reference to the notification instance to use
  916. * @evt_key: The event key to use
  917. * @create: A boolean flag to specify if a handler must be created when
  918. * not already existent
  919. *
  920. * Search for the desired handler matching the key in both the per-protocol
  921. * registered table and the common pending table:
  922. * * if found adjust users refcount
  923. * * if not found and @create is true, create and register the new handler:
  924. * handler could end up being registered as pending if no matching event
  925. * could be found.
  926. *
  927. * An handler is guaranteed to reside in one and only one of the tables at
  928. * any one time; to ensure this the whole search and create is performed
  929. * holding the @pending_mtx lock, with @registered_mtx additionally acquired
  930. * if needed.
  931. *
  932. * Note that when a nested acquisition of these mutexes is needed the locking
  933. * order is always (same as in @init_work):
  934. * 1. pending_mtx
  935. * 2. registered_mtx
  936. *
  937. * Events generation is NOT enabled right after creation within this routine
  938. * since at creation time we usually want to have all setup and ready before
  939. * events really start flowing.
  940. *
  941. * Return: A properly refcounted handler on Success, NULL on Failure
  942. */
  943. static inline struct scmi_event_handler *
  944. __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
  945. u32 evt_key, bool create)
  946. {
  947. struct scmi_registered_event *r_evt;
  948. struct scmi_event_handler *hndl = NULL;
  949. r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
  950. KEY_XTRACT_EVT_ID(evt_key));
  951. mutex_lock(&ni->pending_mtx);
  952. /* Search registered events at first ... if possible at all */
  953. if (r_evt) {
  954. mutex_lock(&r_evt->proto->registered_mtx);
  955. hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
  956. hndl, evt_key);
  957. if (hndl)
  958. refcount_inc(&hndl->users);
  959. mutex_unlock(&r_evt->proto->registered_mtx);
  960. }
  961. /* ...then amongst pending. */
  962. if (!hndl) {
  963. hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
  964. if (hndl)
  965. refcount_inc(&hndl->users);
  966. }
  967. /* Create if still not found and required */
  968. if (!hndl && create) {
  969. hndl = scmi_allocate_event_handler(ni, evt_key);
  970. if (hndl && scmi_register_event_handler(ni, hndl)) {
  971. dev_dbg(ni->handle->dev,
  972. "purging UNKNOWN handler - key:%X\n",
  973. hndl->key);
  974. /* this hndl can be only a pending one */
  975. scmi_put_handler_unlocked(ni, hndl);
  976. hndl = NULL;
  977. }
  978. }
  979. mutex_unlock(&ni->pending_mtx);
  980. return hndl;
  981. }
  982. static struct scmi_event_handler *
  983. scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
  984. {
  985. return __scmi_event_handler_get_ops(ni, evt_key, false);
  986. }
  987. static struct scmi_event_handler *
  988. scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
  989. {
  990. return __scmi_event_handler_get_ops(ni, evt_key, true);
  991. }
  992. /**
  993. * scmi_get_active_handler() - Helper to get active handlers only
  994. * @ni: A reference to the notification instance to use
  995. * @evt_key: The event key to use
  996. *
  997. * Search for the desired handler matching the key only in the per-protocol
  998. * table of registered handlers: this is called only from the dispatching path
  999. * so want to be as quick as possible and do not care about pending.
  1000. *
  1001. * Return: A properly refcounted active handler
  1002. */
  1003. static struct scmi_event_handler *
  1004. scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
  1005. {
  1006. struct scmi_registered_event *r_evt;
  1007. struct scmi_event_handler *hndl = NULL;
  1008. r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
  1009. KEY_XTRACT_EVT_ID(evt_key));
  1010. if (r_evt) {
  1011. mutex_lock(&r_evt->proto->registered_mtx);
  1012. hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
  1013. hndl, evt_key);
  1014. if (hndl)
  1015. refcount_inc(&hndl->users);
  1016. mutex_unlock(&r_evt->proto->registered_mtx);
  1017. }
  1018. return hndl;
  1019. }
  1020. /**
  1021. * __scmi_enable_evt() - Enable/disable events generation
  1022. * @r_evt: The registered event to act upon
  1023. * @src_id: The src_id to act upon
  1024. * @enable: The action to perform: true->Enable, false->Disable
  1025. *
  1026. * Takes care of proper refcounting while performing enable/disable: handles
  1027. * the special case of ALL sources requests by itself.
  1028. * Returns successfully if at least one of the required src_id has been
  1029. * successfully enabled/disabled.
  1030. *
  1031. * Return: 0 on Success
  1032. */
  1033. static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
  1034. u32 src_id, bool enable)
  1035. {
  1036. int retvals = 0;
  1037. u32 num_sources;
  1038. refcount_t *sid;
  1039. if (src_id == SRC_ID_MASK) {
  1040. src_id = 0;
  1041. num_sources = r_evt->num_sources;
  1042. } else if (src_id < r_evt->num_sources) {
  1043. num_sources = 1;
  1044. } else {
  1045. return -EINVAL;
  1046. }
  1047. mutex_lock(&r_evt->sources_mtx);
  1048. if (enable) {
  1049. for (; num_sources; src_id++, num_sources--) {
  1050. int ret = 0;
  1051. sid = &r_evt->sources[src_id];
  1052. if (refcount_read(sid) == 0) {
  1053. ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
  1054. src_id);
  1055. if (!ret)
  1056. refcount_set(sid, 1);
  1057. } else {
  1058. refcount_inc(sid);
  1059. }
  1060. retvals += !ret;
  1061. }
  1062. } else {
  1063. for (; num_sources; src_id++, num_sources--) {
  1064. sid = &r_evt->sources[src_id];
  1065. if (refcount_dec_and_test(sid))
  1066. REVT_NOTIFY_DISABLE(r_evt,
  1067. r_evt->evt->id, src_id);
  1068. }
  1069. retvals = 1;
  1070. }
  1071. mutex_unlock(&r_evt->sources_mtx);
  1072. return retvals ? 0 : -EINVAL;
  1073. }
  1074. static int scmi_enable_events(struct scmi_event_handler *hndl)
  1075. {
  1076. int ret = 0;
  1077. if (!hndl->enabled) {
  1078. ret = __scmi_enable_evt(hndl->r_evt,
  1079. KEY_XTRACT_SRC_ID(hndl->key), true);
  1080. if (!ret)
  1081. hndl->enabled = true;
  1082. }
  1083. return ret;
  1084. }
  1085. static int scmi_disable_events(struct scmi_event_handler *hndl)
  1086. {
  1087. int ret = 0;
  1088. if (hndl->enabled) {
  1089. ret = __scmi_enable_evt(hndl->r_evt,
  1090. KEY_XTRACT_SRC_ID(hndl->key), false);
  1091. if (!ret)
  1092. hndl->enabled = false;
  1093. }
  1094. return ret;
  1095. }
  1096. /**
  1097. * scmi_put_handler_unlocked() - Put an event handler
  1098. * @ni: A reference to the notification instance to use
  1099. * @hndl: The event handler to act upon
  1100. *
  1101. * After having got exclusive access to the registered handlers hashtable,
  1102. * update the refcount and if @hndl is no more in use by anyone:
  1103. * * ask for events' generation disabling
  1104. * * unregister and free the handler itself
  1105. *
  1106. * Context: Assumes all the proper locking has been managed by the caller.
  1107. *
  1108. * Return: True if handler was freed (users dropped to zero)
  1109. */
  1110. static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
  1111. struct scmi_event_handler *hndl)
  1112. {
  1113. bool freed = false;
  1114. if (refcount_dec_and_test(&hndl->users)) {
  1115. if (!IS_HNDL_PENDING(hndl))
  1116. scmi_disable_events(hndl);
  1117. scmi_free_event_handler(hndl);
  1118. freed = true;
  1119. }
  1120. return freed;
  1121. }
  1122. static void scmi_put_handler(struct scmi_notify_instance *ni,
  1123. struct scmi_event_handler *hndl)
  1124. {
  1125. bool freed;
  1126. u8 protocol_id;
  1127. struct scmi_registered_event *r_evt = hndl->r_evt;
  1128. mutex_lock(&ni->pending_mtx);
  1129. if (r_evt) {
  1130. protocol_id = r_evt->proto->id;
  1131. mutex_lock(&r_evt->proto->registered_mtx);
  1132. }
  1133. freed = scmi_put_handler_unlocked(ni, hndl);
  1134. if (r_evt) {
  1135. mutex_unlock(&r_evt->proto->registered_mtx);
  1136. /*
  1137. * Only registered handler acquired protocol; must be here
  1138. * released only AFTER unlocking registered_mtx, since
  1139. * releasing a protocol can trigger its de-initialization
  1140. * (ie. including r_evt and registered_mtx)
  1141. */
  1142. if (freed)
  1143. scmi_protocol_release(ni->handle, protocol_id);
  1144. }
  1145. mutex_unlock(&ni->pending_mtx);
  1146. }
  1147. static void scmi_put_active_handler(struct scmi_notify_instance *ni,
  1148. struct scmi_event_handler *hndl)
  1149. {
  1150. bool freed;
  1151. struct scmi_registered_event *r_evt = hndl->r_evt;
  1152. u8 protocol_id = r_evt->proto->id;
  1153. mutex_lock(&r_evt->proto->registered_mtx);
  1154. freed = scmi_put_handler_unlocked(ni, hndl);
  1155. mutex_unlock(&r_evt->proto->registered_mtx);
  1156. if (freed)
  1157. scmi_protocol_release(ni->handle, protocol_id);
  1158. }
  1159. /**
  1160. * scmi_event_handler_enable_events() - Enable events associated to an handler
  1161. * @hndl: The Event handler to act upon
  1162. *
  1163. * Return: 0 on Success
  1164. */
  1165. static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
  1166. {
  1167. if (scmi_enable_events(hndl)) {
  1168. pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
  1169. return -EINVAL;
  1170. }
  1171. return 0;
  1172. }
  1173. /**
  1174. * scmi_notifier_register() - Register a notifier_block for an event
  1175. * @handle: The handle identifying the platform instance against which the
  1176. * callback is registered
  1177. * @proto_id: Protocol ID
  1178. * @evt_id: Event ID
  1179. * @src_id: Source ID, when NULL register for events coming form ALL possible
  1180. * sources
  1181. * @nb: A standard notifier block to register for the specified event
  1182. *
  1183. * Generic helper to register a notifier_block against a protocol event.
  1184. *
  1185. * A notifier_block @nb will be registered for each distinct event identified
  1186. * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
  1187. * so that:
  1188. *
  1189. * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
  1190. *
  1191. * @src_id meaning is protocol specific and identifies the origin of the event
  1192. * (like domain_id, sensor_id and so forth).
  1193. *
  1194. * @src_id can be NULL to signify that the caller is interested in receiving
  1195. * notifications from ALL the available sources for that protocol OR simply that
  1196. * the protocol does not support distinct sources.
  1197. *
  1198. * As soon as one user for the specified tuple appears, an handler is created,
  1199. * and that specific event's generation is enabled at the platform level, unless
  1200. * an associated registered event is found missing, meaning that the needed
  1201. * protocol is still to be initialized and the handler has just been registered
  1202. * as still pending.
  1203. *
  1204. * Return: 0 on Success
  1205. */
  1206. static int scmi_notifier_register(const struct scmi_handle *handle,
  1207. u8 proto_id, u8 evt_id, const u32 *src_id,
  1208. struct notifier_block *nb)
  1209. {
  1210. int ret = 0;
  1211. u32 evt_key;
  1212. struct scmi_event_handler *hndl;
  1213. struct scmi_notify_instance *ni;
  1214. ni = scmi_notification_instance_data_get(handle);
  1215. if (!ni)
  1216. return -ENODEV;
  1217. evt_key = MAKE_HASH_KEY(proto_id, evt_id,
  1218. src_id ? *src_id : SRC_ID_MASK);
  1219. hndl = scmi_get_or_create_handler(ni, evt_key);
  1220. if (!hndl)
  1221. return -EINVAL;
  1222. blocking_notifier_chain_register(&hndl->chain, nb);
  1223. /* Enable events for not pending handlers */
  1224. if (!IS_HNDL_PENDING(hndl)) {
  1225. ret = scmi_event_handler_enable_events(hndl);
  1226. if (ret)
  1227. scmi_put_handler(ni, hndl);
  1228. }
  1229. return ret;
  1230. }
  1231. /**
  1232. * scmi_notifier_unregister() - Unregister a notifier_block for an event
  1233. * @handle: The handle identifying the platform instance against which the
  1234. * callback is unregistered
  1235. * @proto_id: Protocol ID
  1236. * @evt_id: Event ID
  1237. * @src_id: Source ID
  1238. * @nb: The notifier_block to unregister
  1239. *
  1240. * Takes care to unregister the provided @nb from the notification chain
  1241. * associated to the specified event and, if there are no more users for the
  1242. * event handler, frees also the associated event handler structures.
  1243. * (this could possibly cause disabling of event's generation at platform level)
  1244. *
  1245. * Return: 0 on Success
  1246. */
  1247. static int scmi_notifier_unregister(const struct scmi_handle *handle,
  1248. u8 proto_id, u8 evt_id, const u32 *src_id,
  1249. struct notifier_block *nb)
  1250. {
  1251. u32 evt_key;
  1252. struct scmi_event_handler *hndl;
  1253. struct scmi_notify_instance *ni;
  1254. ni = scmi_notification_instance_data_get(handle);
  1255. if (!ni)
  1256. return -ENODEV;
  1257. evt_key = MAKE_HASH_KEY(proto_id, evt_id,
  1258. src_id ? *src_id : SRC_ID_MASK);
  1259. hndl = scmi_get_handler(ni, evt_key);
  1260. if (!hndl)
  1261. return -EINVAL;
  1262. /*
  1263. * Note that this chain unregistration call is safe on its own
  1264. * being internally protected by an rwsem.
  1265. */
  1266. blocking_notifier_chain_unregister(&hndl->chain, nb);
  1267. scmi_put_handler(ni, hndl);
  1268. /*
  1269. * This balances the initial get issued in @scmi_notifier_register.
  1270. * If this notifier_block happened to be the last known user callback
  1271. * for this event, the handler is here freed and the event's generation
  1272. * stopped.
  1273. *
  1274. * Note that, an ongoing concurrent lookup on the delivery workqueue
  1275. * path could still hold the refcount to 1 even after this routine
  1276. * completes: in such a case it will be the final put on the delivery
  1277. * path which will finally free this unused handler.
  1278. */
  1279. scmi_put_handler(ni, hndl);
  1280. return 0;
  1281. }
  1282. struct scmi_notifier_devres {
  1283. const struct scmi_handle *handle;
  1284. u8 proto_id;
  1285. u8 evt_id;
  1286. u32 __src_id;
  1287. u32 *src_id;
  1288. struct notifier_block *nb;
  1289. };
  1290. static void scmi_devm_release_notifier(struct device *dev, void *res)
  1291. {
  1292. struct scmi_notifier_devres *dres = res;
  1293. scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
  1294. dres->src_id, dres->nb);
  1295. }
  1296. /**
  1297. * scmi_devm_notifier_register() - Managed registration of a notifier_block
  1298. * for an event
  1299. * @sdev: A reference to an scmi_device whose embedded struct device is to
  1300. * be used for devres accounting.
  1301. * @proto_id: Protocol ID
  1302. * @evt_id: Event ID
  1303. * @src_id: Source ID, when NULL register for events coming form ALL possible
  1304. * sources
  1305. * @nb: A standard notifier block to register for the specified event
  1306. *
  1307. * Generic devres managed helper to register a notifier_block against a
  1308. * protocol event.
  1309. *
  1310. * Return: 0 on Success
  1311. */
  1312. static int scmi_devm_notifier_register(struct scmi_device *sdev,
  1313. u8 proto_id, u8 evt_id,
  1314. const u32 *src_id,
  1315. struct notifier_block *nb)
  1316. {
  1317. int ret;
  1318. struct scmi_notifier_devres *dres;
  1319. dres = devres_alloc(scmi_devm_release_notifier,
  1320. sizeof(*dres), GFP_KERNEL);
  1321. if (!dres)
  1322. return -ENOMEM;
  1323. ret = scmi_notifier_register(sdev->handle, proto_id,
  1324. evt_id, src_id, nb);
  1325. if (ret) {
  1326. devres_free(dres);
  1327. return ret;
  1328. }
  1329. dres->handle = sdev->handle;
  1330. dres->proto_id = proto_id;
  1331. dres->evt_id = evt_id;
  1332. dres->nb = nb;
  1333. if (src_id) {
  1334. dres->__src_id = *src_id;
  1335. dres->src_id = &dres->__src_id;
  1336. } else {
  1337. dres->src_id = NULL;
  1338. }
  1339. devres_add(&sdev->dev, dres);
  1340. return ret;
  1341. }
  1342. static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
  1343. {
  1344. struct scmi_notifier_devres *dres = res;
  1345. struct scmi_notifier_devres *xres = data;
  1346. if (WARN_ON(!dres || !xres))
  1347. return 0;
  1348. return dres->proto_id == xres->proto_id &&
  1349. dres->evt_id == xres->evt_id &&
  1350. dres->nb == xres->nb &&
  1351. ((!dres->src_id && !xres->src_id) ||
  1352. (dres->src_id && xres->src_id &&
  1353. dres->__src_id == xres->__src_id));
  1354. }
  1355. /**
  1356. * scmi_devm_notifier_unregister() - Managed un-registration of a
  1357. * notifier_block for an event
  1358. * @sdev: A reference to an scmi_device whose embedded struct device is to
  1359. * be used for devres accounting.
  1360. * @proto_id: Protocol ID
  1361. * @evt_id: Event ID
  1362. * @src_id: Source ID, when NULL register for events coming form ALL possible
  1363. * sources
  1364. * @nb: A standard notifier block to register for the specified event
  1365. *
  1366. * Generic devres managed helper to explicitly un-register a notifier_block
  1367. * against a protocol event, which was previously registered using the above
  1368. * @scmi_devm_notifier_register.
  1369. *
  1370. * Return: 0 on Success
  1371. */
  1372. static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
  1373. u8 proto_id, u8 evt_id,
  1374. const u32 *src_id,
  1375. struct notifier_block *nb)
  1376. {
  1377. int ret;
  1378. struct scmi_notifier_devres dres;
  1379. dres.handle = sdev->handle;
  1380. dres.proto_id = proto_id;
  1381. dres.evt_id = evt_id;
  1382. if (src_id) {
  1383. dres.__src_id = *src_id;
  1384. dres.src_id = &dres.__src_id;
  1385. } else {
  1386. dres.src_id = NULL;
  1387. }
  1388. ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
  1389. scmi_devm_notifier_match, &dres);
  1390. WARN_ON(ret);
  1391. return ret;
  1392. }
  1393. /**
  1394. * scmi_protocols_late_init() - Worker for late initialization
  1395. * @work: The work item to use associated to the proper SCMI instance
  1396. *
  1397. * This kicks in whenever a new protocol has completed its own registration via
  1398. * scmi_register_protocol_events(): it is in charge of scanning the table of
  1399. * pending handlers (registered by users while the related protocol was still
  1400. * not initialized) and finalizing their initialization whenever possible;
  1401. * invalid pending handlers are purged at this point in time.
  1402. */
  1403. static void scmi_protocols_late_init(struct work_struct *work)
  1404. {
  1405. int bkt;
  1406. struct scmi_event_handler *hndl;
  1407. struct scmi_notify_instance *ni;
  1408. struct hlist_node *tmp;
  1409. ni = container_of(work, struct scmi_notify_instance, init_work);
  1410. /* Ensure protocols and events are up to date */
  1411. smp_rmb();
  1412. mutex_lock(&ni->pending_mtx);
  1413. hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
  1414. int ret;
  1415. ret = scmi_bind_event_handler(ni, hndl);
  1416. if (!ret) {
  1417. dev_dbg(ni->handle->dev,
  1418. "finalized PENDING handler - key:%X\n",
  1419. hndl->key);
  1420. ret = scmi_event_handler_enable_events(hndl);
  1421. if (ret) {
  1422. dev_dbg(ni->handle->dev,
  1423. "purging INVALID handler - key:%X\n",
  1424. hndl->key);
  1425. scmi_put_active_handler(ni, hndl);
  1426. }
  1427. } else {
  1428. ret = scmi_valid_pending_handler(ni, hndl);
  1429. if (ret) {
  1430. dev_dbg(ni->handle->dev,
  1431. "purging PENDING handler - key:%X\n",
  1432. hndl->key);
  1433. /* this hndl can be only a pending one */
  1434. scmi_put_handler_unlocked(ni, hndl);
  1435. }
  1436. }
  1437. }
  1438. mutex_unlock(&ni->pending_mtx);
  1439. }
  1440. /*
  1441. * notify_ops are attached to the handle so that can be accessed
  1442. * directly from an scmi_driver to register its own notifiers.
  1443. */
  1444. static const struct scmi_notify_ops notify_ops = {
  1445. .devm_event_notifier_register = scmi_devm_notifier_register,
  1446. .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
  1447. .event_notifier_register = scmi_notifier_register,
  1448. .event_notifier_unregister = scmi_notifier_unregister,
  1449. };
  1450. /**
  1451. * scmi_notification_init() - Initializes Notification Core Support
  1452. * @handle: The handle identifying the platform instance to initialize
  1453. *
  1454. * This function lays out all the basic resources needed by the notification
  1455. * core instance identified by the provided handle: once done, all of the
  1456. * SCMI Protocols can register their events with the core during their own
  1457. * initializations.
  1458. *
  1459. * Note that failing to initialize the core notifications support does not
  1460. * cause the whole SCMI Protocols stack to fail its initialization.
  1461. *
  1462. * SCMI Notification Initialization happens in 2 steps:
  1463. * * initialization: basic common allocations (this function)
  1464. * * registration: protocols asynchronously come into life and registers their
  1465. * own supported list of events with the core; this causes
  1466. * further per-protocol allocations
  1467. *
  1468. * Any user's callback registration attempt, referring a still not registered
  1469. * event, will be registered as pending and finalized later (if possible)
  1470. * by scmi_protocols_late_init() work.
  1471. * This allows for lazy initialization of SCMI Protocols due to late (or
  1472. * missing) SCMI drivers' modules loading.
  1473. *
  1474. * Return: 0 on Success
  1475. */
  1476. int scmi_notification_init(struct scmi_handle *handle)
  1477. {
  1478. void *gid;
  1479. struct scmi_notify_instance *ni;
  1480. gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
  1481. if (!gid)
  1482. return -ENOMEM;
  1483. ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
  1484. if (!ni)
  1485. goto err;
  1486. ni->gid = gid;
  1487. ni->handle = handle;
  1488. ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
  1489. sizeof(char *), GFP_KERNEL);
  1490. if (!ni->registered_protocols)
  1491. goto err;
  1492. ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
  1493. WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
  1494. 0);
  1495. if (!ni->notify_wq)
  1496. goto err;
  1497. mutex_init(&ni->pending_mtx);
  1498. hash_init(ni->pending_events_handlers);
  1499. INIT_WORK(&ni->init_work, scmi_protocols_late_init);
  1500. scmi_notification_instance_data_set(handle, ni);
  1501. handle->notify_ops = &notify_ops;
  1502. /* Ensure handle is up to date */
  1503. smp_wmb();
  1504. dev_info(handle->dev, "Core Enabled.\n");
  1505. devres_close_group(handle->dev, ni->gid);
  1506. return 0;
  1507. err:
  1508. dev_warn(handle->dev, "Initialization Failed.\n");
  1509. devres_release_group(handle->dev, gid);
  1510. return -ENOMEM;
  1511. }
  1512. /**
  1513. * scmi_notification_exit() - Shutdown and clean Notification core
  1514. * @handle: The handle identifying the platform instance to shutdown
  1515. */
  1516. void scmi_notification_exit(struct scmi_handle *handle)
  1517. {
  1518. struct scmi_notify_instance *ni;
  1519. ni = scmi_notification_instance_data_get(handle);
  1520. if (!ni)
  1521. return;
  1522. scmi_notification_instance_data_set(handle, NULL);
  1523. /* Destroy while letting pending work complete */
  1524. destroy_workqueue(ni->notify_wq);
  1525. devres_release_group(ni->handle->dev, ni->gid);
  1526. }