opal-async.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PowerNV OPAL asynchronous completion interfaces
  4. *
  5. * Copyright 2013-2017 IBM Corp.
  6. */
  7. #undef DEBUG
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/sched.h>
  12. #include <linux/semaphore.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/wait.h>
  15. #include <linux/gfp.h>
  16. #include <linux/of.h>
  17. #include <asm/machdep.h>
  18. #include <asm/opal.h>
  19. enum opal_async_token_state {
  20. ASYNC_TOKEN_UNALLOCATED = 0,
  21. ASYNC_TOKEN_ALLOCATED,
  22. ASYNC_TOKEN_DISPATCHED,
  23. ASYNC_TOKEN_ABANDONED,
  24. ASYNC_TOKEN_COMPLETED
  25. };
  26. struct opal_async_token {
  27. enum opal_async_token_state state;
  28. struct opal_msg response;
  29. };
  30. static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
  31. static DEFINE_SPINLOCK(opal_async_comp_lock);
  32. static struct semaphore opal_async_sem;
  33. static unsigned int opal_max_async_tokens;
  34. static struct opal_async_token *opal_async_tokens;
  35. static int __opal_async_get_token(void)
  36. {
  37. unsigned long flags;
  38. int i, token = -EBUSY;
  39. spin_lock_irqsave(&opal_async_comp_lock, flags);
  40. for (i = 0; i < opal_max_async_tokens; i++) {
  41. if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
  42. opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
  43. token = i;
  44. break;
  45. }
  46. }
  47. spin_unlock_irqrestore(&opal_async_comp_lock, flags);
  48. return token;
  49. }
  50. /*
  51. * Note: If the returned token is used in an opal call and opal returns
  52. * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
  53. * opal_async_wait_response_interruptible() at least once before calling another
  54. * opal_async_* function
  55. */
  56. int opal_async_get_token_interruptible(void)
  57. {
  58. int token;
  59. /* Wait until a token is available */
  60. if (down_interruptible(&opal_async_sem))
  61. return -ERESTARTSYS;
  62. token = __opal_async_get_token();
  63. if (token < 0)
  64. up(&opal_async_sem);
  65. return token;
  66. }
  67. EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
  68. static int __opal_async_release_token(int token)
  69. {
  70. unsigned long flags;
  71. int rc;
  72. if (token < 0 || token >= opal_max_async_tokens) {
  73. pr_err("%s: Passed token is out of range, token %d\n",
  74. __func__, token);
  75. return -EINVAL;
  76. }
  77. spin_lock_irqsave(&opal_async_comp_lock, flags);
  78. switch (opal_async_tokens[token].state) {
  79. case ASYNC_TOKEN_COMPLETED:
  80. case ASYNC_TOKEN_ALLOCATED:
  81. opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
  82. rc = 0;
  83. break;
  84. /*
  85. * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
  86. * Mark a DISPATCHED token as ABANDONED so that the response handling
  87. * code knows no one cares and that it can free it then.
  88. */
  89. case ASYNC_TOKEN_DISPATCHED:
  90. opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
  91. fallthrough;
  92. default:
  93. rc = 1;
  94. }
  95. spin_unlock_irqrestore(&opal_async_comp_lock, flags);
  96. return rc;
  97. }
  98. int opal_async_release_token(int token)
  99. {
  100. int ret;
  101. ret = __opal_async_release_token(token);
  102. if (!ret)
  103. up(&opal_async_sem);
  104. return ret;
  105. }
  106. EXPORT_SYMBOL_GPL(opal_async_release_token);
  107. int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
  108. {
  109. if (token >= opal_max_async_tokens) {
  110. pr_err("%s: Invalid token passed\n", __func__);
  111. return -EINVAL;
  112. }
  113. if (!msg) {
  114. pr_err("%s: Invalid message pointer passed\n", __func__);
  115. return -EINVAL;
  116. }
  117. /*
  118. * There is no need to mark the token as dispatched, wait_event()
  119. * will block until the token completes.
  120. *
  121. * Wakeup the poller before we wait for events to speed things
  122. * up on platforms or simulators where the interrupts aren't
  123. * functional.
  124. */
  125. opal_wake_poller();
  126. wait_event(opal_async_wait, opal_async_tokens[token].state
  127. == ASYNC_TOKEN_COMPLETED);
  128. memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
  129. return 0;
  130. }
  131. EXPORT_SYMBOL_GPL(opal_async_wait_response);
  132. int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
  133. {
  134. unsigned long flags;
  135. int ret;
  136. if (token >= opal_max_async_tokens) {
  137. pr_err("%s: Invalid token passed\n", __func__);
  138. return -EINVAL;
  139. }
  140. if (!msg) {
  141. pr_err("%s: Invalid message pointer passed\n", __func__);
  142. return -EINVAL;
  143. }
  144. /*
  145. * The first time this gets called we mark the token as DISPATCHED
  146. * so that if wait_event_interruptible() returns not zero and the
  147. * caller frees the token, we know not to actually free the token
  148. * until the response comes.
  149. *
  150. * Only change if the token is ALLOCATED - it may have been
  151. * completed even before the caller gets around to calling this
  152. * the first time.
  153. *
  154. * There is also a dirty great comment at the token allocation
  155. * function that if the opal call returns OPAL_ASYNC_COMPLETION to
  156. * the caller then the caller *must* call this or the not
  157. * interruptible version before doing anything else with the
  158. * token.
  159. */
  160. if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
  161. spin_lock_irqsave(&opal_async_comp_lock, flags);
  162. if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
  163. opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
  164. spin_unlock_irqrestore(&opal_async_comp_lock, flags);
  165. }
  166. /*
  167. * Wakeup the poller before we wait for events to speed things
  168. * up on platforms or simulators where the interrupts aren't
  169. * functional.
  170. */
  171. opal_wake_poller();
  172. ret = wait_event_interruptible(opal_async_wait,
  173. opal_async_tokens[token].state ==
  174. ASYNC_TOKEN_COMPLETED);
  175. if (!ret)
  176. memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
  177. return ret;
  178. }
  179. EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
  180. /* Called from interrupt context */
  181. static int opal_async_comp_event(struct notifier_block *nb,
  182. unsigned long msg_type, void *msg)
  183. {
  184. struct opal_msg *comp_msg = msg;
  185. enum opal_async_token_state state;
  186. unsigned long flags;
  187. uint64_t token;
  188. if (msg_type != OPAL_MSG_ASYNC_COMP)
  189. return 0;
  190. token = be64_to_cpu(comp_msg->params[0]);
  191. spin_lock_irqsave(&opal_async_comp_lock, flags);
  192. state = opal_async_tokens[token].state;
  193. opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
  194. spin_unlock_irqrestore(&opal_async_comp_lock, flags);
  195. if (state == ASYNC_TOKEN_ABANDONED) {
  196. /* Free the token, no one else will */
  197. opal_async_release_token(token);
  198. return 0;
  199. }
  200. memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
  201. wake_up(&opal_async_wait);
  202. return 0;
  203. }
  204. static struct notifier_block opal_async_comp_nb = {
  205. .notifier_call = opal_async_comp_event,
  206. .next = NULL,
  207. .priority = 0,
  208. };
  209. int __init opal_async_comp_init(void)
  210. {
  211. struct device_node *opal_node;
  212. const __be32 *async;
  213. int err;
  214. opal_node = of_find_node_by_path("/ibm,opal");
  215. if (!opal_node) {
  216. pr_err("%s: Opal node not found\n", __func__);
  217. err = -ENOENT;
  218. goto out;
  219. }
  220. async = of_get_property(opal_node, "opal-msg-async-num", NULL);
  221. if (!async) {
  222. pr_err("%s: %pOF has no opal-msg-async-num\n",
  223. __func__, opal_node);
  224. err = -ENOENT;
  225. goto out_opal_node;
  226. }
  227. opal_max_async_tokens = be32_to_cpup(async);
  228. opal_async_tokens = kcalloc(opal_max_async_tokens,
  229. sizeof(*opal_async_tokens), GFP_KERNEL);
  230. if (!opal_async_tokens) {
  231. err = -ENOMEM;
  232. goto out_opal_node;
  233. }
  234. err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
  235. &opal_async_comp_nb);
  236. if (err) {
  237. pr_err("%s: Can't register OPAL event notifier (%d)\n",
  238. __func__, err);
  239. kfree(opal_async_tokens);
  240. goto out_opal_node;
  241. }
  242. sema_init(&opal_async_sem, opal_max_async_tokens);
  243. out_opal_node:
  244. of_node_put(opal_node);
  245. out:
  246. return err;
  247. }