xlnx_event_manager.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx Event Management Driver
  4. *
  5. * Copyright (C) 2021 Xilinx, Inc.
  6. *
  7. * Abhyuday Godhasara <[email protected]>
  8. */
  9. #include <linux/cpuhotplug.h>
  10. #include <linux/firmware/xlnx-event-manager.h>
  11. #include <linux/firmware/xlnx-zynqmp.h>
  12. #include <linux/hashtable.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/module.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slab.h>
  20. static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
  21. static int virq_sgi;
  22. static int event_manager_availability = -EACCES;
  23. /* SGI number used for Event management driver */
  24. #define XLNX_EVENT_SGI_NUM (15)
  25. /* Max number of driver can register for same event */
  26. #define MAX_DRIVER_PER_EVENT (10U)
  27. /* Max HashMap Order for PM API feature check (1<<7 = 128) */
  28. #define REGISTERED_DRIVER_MAX_ORDER (7)
  29. #define MAX_BITS (32U) /* Number of bits available for error mask */
  30. #define FIRMWARE_VERSION_MASK (0xFFFFU)
  31. #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
  32. static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
  33. static int sgi_num = XLNX_EVENT_SGI_NUM;
  34. static bool is_need_to_unregister;
  35. /**
  36. * struct agent_cb - Registered callback function and private data.
  37. * @agent_data: Data passed back to handler function.
  38. * @eve_cb: Function pointer to store the callback function.
  39. * @list: member to create list.
  40. */
  41. struct agent_cb {
  42. void *agent_data;
  43. event_cb_func_t eve_cb;
  44. struct list_head list;
  45. };
  46. /**
  47. * struct registered_event_data - Registered Event Data.
  48. * @key: key is the combine id(Node-Id | Event-Id) of type u64
  49. * where upper u32 for Node-Id and lower u32 for Event-Id,
  50. * And this used as key to index into hashmap.
  51. * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
  52. * @wake: If this flag set, firmware will wake up processor if is
  53. * in sleep or power down state.
  54. * @cb_list_head: Head of call back data list which contain the information
  55. * about registered handler and private data.
  56. * @hentry: hlist_node that hooks this entry into hashtable.
  57. */
  58. struct registered_event_data {
  59. u64 key;
  60. enum pm_api_cb_id cb_type;
  61. bool wake;
  62. struct list_head cb_list_head;
  63. struct hlist_node hentry;
  64. };
  65. static bool xlnx_is_error_event(const u32 node_id)
  66. {
  67. if (node_id == EVENT_ERROR_PMC_ERR1 ||
  68. node_id == EVENT_ERROR_PMC_ERR2 ||
  69. node_id == EVENT_ERROR_PSM_ERR1 ||
  70. node_id == EVENT_ERROR_PSM_ERR2)
  71. return true;
  72. return false;
  73. }
  74. static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
  75. event_cb_func_t cb_fun, void *data)
  76. {
  77. u64 key = 0;
  78. bool present_in_hash = false;
  79. struct registered_event_data *eve_data;
  80. struct agent_cb *cb_data;
  81. struct agent_cb *cb_pos;
  82. struct agent_cb *cb_next;
  83. key = ((u64)node_id << 32U) | (u64)event;
  84. /* Check for existing entry in hash table for given key id */
  85. hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
  86. if (eve_data->key == key) {
  87. present_in_hash = true;
  88. break;
  89. }
  90. }
  91. if (!present_in_hash) {
  92. /* Add new entry if not present in HASH table */
  93. eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
  94. if (!eve_data)
  95. return -ENOMEM;
  96. eve_data->key = key;
  97. eve_data->cb_type = PM_NOTIFY_CB;
  98. eve_data->wake = wake;
  99. INIT_LIST_HEAD(&eve_data->cb_list_head);
  100. cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
  101. if (!cb_data) {
  102. kfree(eve_data);
  103. return -ENOMEM;
  104. }
  105. cb_data->eve_cb = cb_fun;
  106. cb_data->agent_data = data;
  107. /* Add into callback list */
  108. list_add(&cb_data->list, &eve_data->cb_list_head);
  109. /* Add into HASH table */
  110. hash_add(reg_driver_map, &eve_data->hentry, key);
  111. } else {
  112. /* Search for callback function and private data in list */
  113. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
  114. if (cb_pos->eve_cb == cb_fun &&
  115. cb_pos->agent_data == data) {
  116. return 0;
  117. }
  118. }
  119. /* Add multiple handler and private data in list */
  120. cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
  121. if (!cb_data)
  122. return -ENOMEM;
  123. cb_data->eve_cb = cb_fun;
  124. cb_data->agent_data = data;
  125. list_add(&cb_data->list, &eve_data->cb_list_head);
  126. }
  127. return 0;
  128. }
  129. static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
  130. {
  131. struct registered_event_data *eve_data;
  132. struct agent_cb *cb_data;
  133. /* Check for existing entry in hash table for given cb_type */
  134. hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
  135. if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
  136. pr_err("Found as already registered\n");
  137. return -EINVAL;
  138. }
  139. }
  140. /* Add new entry if not present */
  141. eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
  142. if (!eve_data)
  143. return -ENOMEM;
  144. eve_data->key = 0;
  145. eve_data->cb_type = PM_INIT_SUSPEND_CB;
  146. INIT_LIST_HEAD(&eve_data->cb_list_head);
  147. cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
  148. if (!cb_data)
  149. return -ENOMEM;
  150. cb_data->eve_cb = cb_fun;
  151. cb_data->agent_data = data;
  152. /* Add into callback list */
  153. list_add(&cb_data->list, &eve_data->cb_list_head);
  154. hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
  155. return 0;
  156. }
  157. static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
  158. {
  159. bool is_callback_found = false;
  160. struct registered_event_data *eve_data;
  161. struct agent_cb *cb_pos;
  162. struct agent_cb *cb_next;
  163. struct hlist_node *tmp;
  164. is_need_to_unregister = false;
  165. /* Check for existing entry in hash table for given cb_type */
  166. hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
  167. if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
  168. /* Delete the list of callback */
  169. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
  170. if (cb_pos->eve_cb == cb_fun) {
  171. is_callback_found = true;
  172. list_del_init(&cb_pos->list);
  173. kfree(cb_pos);
  174. }
  175. }
  176. /* remove an object from a hashtable */
  177. hash_del(&eve_data->hentry);
  178. kfree(eve_data);
  179. is_need_to_unregister = true;
  180. }
  181. }
  182. if (!is_callback_found) {
  183. pr_warn("Didn't find any registered callback for suspend event\n");
  184. return -EINVAL;
  185. }
  186. return 0;
  187. }
  188. static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
  189. event_cb_func_t cb_fun, void *data)
  190. {
  191. bool is_callback_found = false;
  192. struct registered_event_data *eve_data;
  193. u64 key = ((u64)node_id << 32U) | (u64)event;
  194. struct agent_cb *cb_pos;
  195. struct agent_cb *cb_next;
  196. struct hlist_node *tmp;
  197. is_need_to_unregister = false;
  198. /* Check for existing entry in hash table for given key id */
  199. hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
  200. if (eve_data->key == key) {
  201. /* Delete the list of callback */
  202. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
  203. if (cb_pos->eve_cb == cb_fun &&
  204. cb_pos->agent_data == data) {
  205. is_callback_found = true;
  206. list_del_init(&cb_pos->list);
  207. kfree(cb_pos);
  208. }
  209. }
  210. /* Remove HASH table if callback list is empty */
  211. if (list_empty(&eve_data->cb_list_head)) {
  212. /* remove an object from a HASH table */
  213. hash_del(&eve_data->hentry);
  214. kfree(eve_data);
  215. is_need_to_unregister = true;
  216. }
  217. }
  218. }
  219. if (!is_callback_found) {
  220. pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
  221. node_id, event);
  222. return -EINVAL;
  223. }
  224. return 0;
  225. }
  226. /**
  227. * xlnx_register_event() - Register for the event.
  228. * @cb_type: Type of callback from pm_api_cb_id,
  229. * PM_NOTIFY_CB - for Error Events,
  230. * PM_INIT_SUSPEND_CB - for suspend callback.
  231. * @node_id: Node-Id related to event.
  232. * @event: Event Mask for the Error Event.
  233. * @wake: Flag specifying whether the subsystem should be woken upon
  234. * event notification.
  235. * @cb_fun: Function pointer to store the callback function.
  236. * @data: Pointer for the driver instance.
  237. *
  238. * Return: Returns 0 on successful registration else error code.
  239. */
  240. int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
  241. const bool wake, event_cb_func_t cb_fun, void *data)
  242. {
  243. int ret = 0;
  244. u32 eve;
  245. int pos;
  246. if (event_manager_availability)
  247. return event_manager_availability;
  248. if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
  249. pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
  250. return -EINVAL;
  251. }
  252. if (!cb_fun)
  253. return -EFAULT;
  254. if (cb_type == PM_INIT_SUSPEND_CB) {
  255. ret = xlnx_add_cb_for_suspend(cb_fun, data);
  256. } else {
  257. if (!xlnx_is_error_event(node_id)) {
  258. /* Add entry for Node-Id/Event in hash table */
  259. ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
  260. } else {
  261. /* Add into Hash table */
  262. for (pos = 0; pos < MAX_BITS; pos++) {
  263. eve = event & (1 << pos);
  264. if (!eve)
  265. continue;
  266. /* Add entry for Node-Id/Eve in hash table */
  267. ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
  268. data);
  269. /* Break the loop if got error */
  270. if (ret)
  271. break;
  272. }
  273. if (ret) {
  274. /* Skip the Event for which got the error */
  275. pos--;
  276. /* Remove registered(during this call) event from hash table */
  277. for ( ; pos >= 0; pos--) {
  278. eve = event & (1 << pos);
  279. if (!eve)
  280. continue;
  281. xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
  282. }
  283. }
  284. }
  285. if (ret) {
  286. pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
  287. event, ret);
  288. return ret;
  289. }
  290. /* Register for Node-Id/Event combination in firmware */
  291. ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
  292. if (ret) {
  293. pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
  294. event, ret);
  295. /* Remove already registered event from hash table */
  296. if (xlnx_is_error_event(node_id)) {
  297. for (pos = 0; pos < MAX_BITS; pos++) {
  298. eve = event & (1 << pos);
  299. if (!eve)
  300. continue;
  301. xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
  302. }
  303. } else {
  304. xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
  305. }
  306. return ret;
  307. }
  308. }
  309. return ret;
  310. }
  311. EXPORT_SYMBOL_GPL(xlnx_register_event);
  312. /**
  313. * xlnx_unregister_event() - Unregister for the event.
  314. * @cb_type: Type of callback from pm_api_cb_id,
  315. * PM_NOTIFY_CB - for Error Events,
  316. * PM_INIT_SUSPEND_CB - for suspend callback.
  317. * @node_id: Node-Id related to event.
  318. * @event: Event Mask for the Error Event.
  319. * @cb_fun: Function pointer of callback function.
  320. * @data: Pointer of agent's private data.
  321. *
  322. * Return: Returns 0 on successful unregistration else error code.
  323. */
  324. int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
  325. event_cb_func_t cb_fun, void *data)
  326. {
  327. int ret = 0;
  328. u32 eve, pos;
  329. is_need_to_unregister = false;
  330. if (event_manager_availability)
  331. return event_manager_availability;
  332. if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
  333. pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
  334. return -EINVAL;
  335. }
  336. if (!cb_fun)
  337. return -EFAULT;
  338. if (cb_type == PM_INIT_SUSPEND_CB) {
  339. ret = xlnx_remove_cb_for_suspend(cb_fun);
  340. } else {
  341. /* Remove Node-Id/Event from hash table */
  342. if (!xlnx_is_error_event(node_id)) {
  343. xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
  344. } else {
  345. for (pos = 0; pos < MAX_BITS; pos++) {
  346. eve = event & (1 << pos);
  347. if (!eve)
  348. continue;
  349. xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
  350. }
  351. }
  352. /* Un-register if list is empty */
  353. if (is_need_to_unregister) {
  354. /* Un-register for Node-Id/Event combination */
  355. ret = zynqmp_pm_register_notifier(node_id, event, false, false);
  356. if (ret) {
  357. pr_err("%s() failed for 0x%x and 0x%x: %d\n",
  358. __func__, node_id, event, ret);
  359. return ret;
  360. }
  361. }
  362. }
  363. return ret;
  364. }
  365. EXPORT_SYMBOL_GPL(xlnx_unregister_event);
  366. static void xlnx_call_suspend_cb_handler(const u32 *payload)
  367. {
  368. bool is_callback_found = false;
  369. struct registered_event_data *eve_data;
  370. u32 cb_type = payload[0];
  371. struct agent_cb *cb_pos;
  372. struct agent_cb *cb_next;
  373. /* Check for existing entry in hash table for given cb_type */
  374. hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
  375. if (eve_data->cb_type == cb_type) {
  376. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
  377. cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
  378. is_callback_found = true;
  379. }
  380. }
  381. }
  382. if (!is_callback_found)
  383. pr_warn("Didn't find any registered callback for suspend event\n");
  384. }
  385. static void xlnx_call_notify_cb_handler(const u32 *payload)
  386. {
  387. bool is_callback_found = false;
  388. struct registered_event_data *eve_data;
  389. u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
  390. int ret;
  391. struct agent_cb *cb_pos;
  392. struct agent_cb *cb_next;
  393. /* Check for existing entry in hash table for given key id */
  394. hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
  395. if (eve_data->key == key) {
  396. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
  397. cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
  398. is_callback_found = true;
  399. }
  400. /* re register with firmware to get future events */
  401. ret = zynqmp_pm_register_notifier(payload[1], payload[2],
  402. eve_data->wake, true);
  403. if (ret) {
  404. pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
  405. payload[1], payload[2], ret);
  406. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
  407. list) {
  408. /* Remove already registered event from hash table */
  409. xlnx_remove_cb_for_notify_event(payload[1], payload[2],
  410. cb_pos->eve_cb,
  411. cb_pos->agent_data);
  412. }
  413. }
  414. }
  415. }
  416. if (!is_callback_found)
  417. pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
  418. payload[1], payload[2]);
  419. }
  420. static void xlnx_get_event_callback_data(u32 *buf)
  421. {
  422. zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
  423. }
  424. static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
  425. {
  426. u32 cb_type, node_id, event, pos;
  427. u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
  428. u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
  429. /* Get event data */
  430. xlnx_get_event_callback_data(payload);
  431. /* First element is callback type, others are callback arguments */
  432. cb_type = payload[0];
  433. if (cb_type == PM_NOTIFY_CB) {
  434. node_id = payload[1];
  435. event = payload[2];
  436. if (!xlnx_is_error_event(node_id)) {
  437. xlnx_call_notify_cb_handler(payload);
  438. } else {
  439. /*
  440. * Each call back function expecting payload as an input arguments.
  441. * We can get multiple error events as in one call back through error
  442. * mask. So payload[2] may can contain multiple error events.
  443. * In reg_driver_map database we store data in the combination of single
  444. * node_id-error combination.
  445. * So coping the payload message into event_data and update the
  446. * event_data[2] with Error Mask for single error event and use
  447. * event_data as input argument for registered call back function.
  448. *
  449. */
  450. memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
  451. /* Support Multiple Error Event */
  452. for (pos = 0; pos < MAX_BITS; pos++) {
  453. if ((0 == (event & (1 << pos))))
  454. continue;
  455. event_data[2] = (event & (1 << pos));
  456. xlnx_call_notify_cb_handler(event_data);
  457. }
  458. }
  459. } else if (cb_type == PM_INIT_SUSPEND_CB) {
  460. xlnx_call_suspend_cb_handler(payload);
  461. } else {
  462. pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
  463. }
  464. return IRQ_HANDLED;
  465. }
  466. static int xlnx_event_cpuhp_start(unsigned int cpu)
  467. {
  468. enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
  469. return 0;
  470. }
  471. static int xlnx_event_cpuhp_down(unsigned int cpu)
  472. {
  473. disable_percpu_irq(virq_sgi);
  474. return 0;
  475. }
  476. static void xlnx_disable_percpu_irq(void *data)
  477. {
  478. disable_percpu_irq(virq_sgi);
  479. }
  480. static int xlnx_event_init_sgi(struct platform_device *pdev)
  481. {
  482. int ret = 0;
  483. int cpu = smp_processor_id();
  484. /*
  485. * IRQ related structures are used for the following:
  486. * for each SGI interrupt ensure its mapped by GIC IRQ domain
  487. * and that each corresponding linux IRQ for the HW IRQ has
  488. * a handler for when receiving an interrupt from the remote
  489. * processor.
  490. */
  491. struct irq_domain *domain;
  492. struct irq_fwspec sgi_fwspec;
  493. struct device_node *interrupt_parent = NULL;
  494. struct device *parent = pdev->dev.parent;
  495. /* Find GIC controller to map SGIs. */
  496. interrupt_parent = of_irq_find_parent(parent->of_node);
  497. if (!interrupt_parent) {
  498. dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
  499. return -EINVAL;
  500. }
  501. /* Each SGI needs to be associated with GIC's IRQ domain. */
  502. domain = irq_find_host(interrupt_parent);
  503. of_node_put(interrupt_parent);
  504. /* Each mapping needs GIC domain when finding IRQ mapping. */
  505. sgi_fwspec.fwnode = domain->fwnode;
  506. /*
  507. * When irq domain looks at mapping each arg is as follows:
  508. * 3 args for: interrupt type (SGI), interrupt # (set later), type
  509. */
  510. sgi_fwspec.param_count = 1;
  511. /* Set SGI's hwirq */
  512. sgi_fwspec.param[0] = sgi_num;
  513. virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
  514. per_cpu(cpu_number1, cpu) = cpu;
  515. ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
  516. &cpu_number1);
  517. WARN_ON(ret);
  518. if (ret) {
  519. irq_dispose_mapping(virq_sgi);
  520. return ret;
  521. }
  522. irq_to_desc(virq_sgi);
  523. irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
  524. return ret;
  525. }
  526. static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
  527. {
  528. int cpu = smp_processor_id();
  529. per_cpu(cpu_number1, cpu) = cpu;
  530. cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
  531. on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
  532. irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
  533. free_percpu_irq(virq_sgi, &cpu_number1);
  534. irq_dispose_mapping(virq_sgi);
  535. }
  536. static int xlnx_event_manager_probe(struct platform_device *pdev)
  537. {
  538. int ret;
  539. ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
  540. if (ret < 0) {
  541. dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
  542. return ret;
  543. }
  544. if ((ret & FIRMWARE_VERSION_MASK) <
  545. REGISTER_NOTIFIER_FIRMWARE_VERSION) {
  546. dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
  547. REGISTER_NOTIFIER_FIRMWARE_VERSION,
  548. ret & FIRMWARE_VERSION_MASK);
  549. return -EOPNOTSUPP;
  550. }
  551. /* Initialize the SGI */
  552. ret = xlnx_event_init_sgi(pdev);
  553. if (ret) {
  554. dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
  555. return ret;
  556. }
  557. /* Setup function for the CPU hot-plug cases */
  558. cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
  559. xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
  560. ret = zynqmp_pm_register_sgi(sgi_num, 0);
  561. if (ret) {
  562. dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
  563. xlnx_event_cleanup_sgi(pdev);
  564. return ret;
  565. }
  566. event_manager_availability = 0;
  567. dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
  568. dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
  569. return ret;
  570. }
  571. static int xlnx_event_manager_remove(struct platform_device *pdev)
  572. {
  573. int i;
  574. struct registered_event_data *eve_data;
  575. struct hlist_node *tmp;
  576. int ret;
  577. struct agent_cb *cb_pos;
  578. struct agent_cb *cb_next;
  579. hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
  580. list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
  581. list_del_init(&cb_pos->list);
  582. kfree(cb_pos);
  583. }
  584. hash_del(&eve_data->hentry);
  585. kfree(eve_data);
  586. }
  587. ret = zynqmp_pm_register_sgi(0, 1);
  588. if (ret)
  589. dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
  590. xlnx_event_cleanup_sgi(pdev);
  591. event_manager_availability = -EACCES;
  592. return ret;
  593. }
  594. static struct platform_driver xlnx_event_manager_driver = {
  595. .probe = xlnx_event_manager_probe,
  596. .remove = xlnx_event_manager_remove,
  597. .driver = {
  598. .name = "xlnx_event_manager",
  599. },
  600. };
  601. module_param(sgi_num, uint, 0);
  602. module_platform_driver(xlnx_event_manager_driver);