ipmi_powernv.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * PowerNV OPAL IPMI driver
  4. *
  5. * Copyright 2014 IBM Corp.
  6. */
  7. #define pr_fmt(fmt) "ipmi-powernv: " fmt
  8. #include <linux/ipmi_smi.h>
  9. #include <linux/list.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/interrupt.h>
  14. #include <asm/opal.h>
  15. struct ipmi_smi_powernv {
  16. u64 interface_id;
  17. struct ipmi_smi *intf;
  18. unsigned int irq;
  19. /**
  20. * We assume that there can only be one outstanding request, so
  21. * keep the pending message in cur_msg. We protect this from concurrent
  22. * updates through send & recv calls, (and consequently opal_msg, which
  23. * is in-use when cur_msg is set) with msg_lock
  24. */
  25. spinlock_t msg_lock;
  26. struct ipmi_smi_msg *cur_msg;
  27. struct opal_ipmi_msg *opal_msg;
  28. };
  29. static int ipmi_powernv_start_processing(void *send_info, struct ipmi_smi *intf)
  30. {
  31. struct ipmi_smi_powernv *smi = send_info;
  32. smi->intf = intf;
  33. return 0;
  34. }
  35. static void send_error_reply(struct ipmi_smi_powernv *smi,
  36. struct ipmi_smi_msg *msg, u8 completion_code)
  37. {
  38. msg->rsp[0] = msg->data[0] | 0x4;
  39. msg->rsp[1] = msg->data[1];
  40. msg->rsp[2] = completion_code;
  41. msg->rsp_size = 3;
  42. ipmi_smi_msg_received(smi->intf, msg);
  43. }
  44. static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
  45. {
  46. struct ipmi_smi_powernv *smi = send_info;
  47. struct opal_ipmi_msg *opal_msg;
  48. unsigned long flags;
  49. int comp, rc;
  50. size_t size;
  51. /* ensure data_len will fit in the opal_ipmi_msg buffer... */
  52. if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
  53. comp = IPMI_REQ_LEN_EXCEEDED_ERR;
  54. goto err;
  55. }
  56. /* ... and that we at least have netfn and cmd bytes */
  57. if (msg->data_size < 2) {
  58. comp = IPMI_REQ_LEN_INVALID_ERR;
  59. goto err;
  60. }
  61. spin_lock_irqsave(&smi->msg_lock, flags);
  62. if (smi->cur_msg) {
  63. comp = IPMI_NODE_BUSY_ERR;
  64. goto err_unlock;
  65. }
  66. /* format our data for the OPAL API */
  67. opal_msg = smi->opal_msg;
  68. opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
  69. opal_msg->netfn = msg->data[0];
  70. opal_msg->cmd = msg->data[1];
  71. if (msg->data_size > 2)
  72. memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
  73. /* data_size already includes the netfn and cmd bytes */
  74. size = sizeof(*opal_msg) + msg->data_size - 2;
  75. pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
  76. smi->interface_id, opal_msg, size);
  77. rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
  78. pr_devel("%s: -> %d\n", __func__, rc);
  79. if (!rc) {
  80. smi->cur_msg = msg;
  81. spin_unlock_irqrestore(&smi->msg_lock, flags);
  82. return;
  83. }
  84. comp = IPMI_ERR_UNSPECIFIED;
  85. err_unlock:
  86. spin_unlock_irqrestore(&smi->msg_lock, flags);
  87. err:
  88. send_error_reply(smi, msg, comp);
  89. }
  90. static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
  91. {
  92. struct opal_ipmi_msg *opal_msg;
  93. struct ipmi_smi_msg *msg;
  94. unsigned long flags;
  95. uint64_t size;
  96. int rc;
  97. pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
  98. smi->interface_id);
  99. spin_lock_irqsave(&smi->msg_lock, flags);
  100. if (!smi->cur_msg) {
  101. spin_unlock_irqrestore(&smi->msg_lock, flags);
  102. pr_warn("no current message?\n");
  103. return 0;
  104. }
  105. msg = smi->cur_msg;
  106. opal_msg = smi->opal_msg;
  107. size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
  108. rc = opal_ipmi_recv(smi->interface_id,
  109. opal_msg,
  110. &size);
  111. size = be64_to_cpu(size);
  112. pr_devel("%s: -> %d (size %lld)\n", __func__,
  113. rc, rc == 0 ? size : 0);
  114. if (rc) {
  115. /* If came via the poll, and response was not yet ready */
  116. if (rc == OPAL_EMPTY) {
  117. spin_unlock_irqrestore(&smi->msg_lock, flags);
  118. return 0;
  119. }
  120. smi->cur_msg = NULL;
  121. spin_unlock_irqrestore(&smi->msg_lock, flags);
  122. send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
  123. return 0;
  124. }
  125. if (size < sizeof(*opal_msg)) {
  126. spin_unlock_irqrestore(&smi->msg_lock, flags);
  127. pr_warn("unexpected IPMI message size %lld\n", size);
  128. return 0;
  129. }
  130. if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
  131. spin_unlock_irqrestore(&smi->msg_lock, flags);
  132. pr_warn("unexpected IPMI message format (version %d)\n",
  133. opal_msg->version);
  134. return 0;
  135. }
  136. msg->rsp[0] = opal_msg->netfn;
  137. msg->rsp[1] = opal_msg->cmd;
  138. if (size > sizeof(*opal_msg))
  139. memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
  140. msg->rsp_size = 2 + size - sizeof(*opal_msg);
  141. smi->cur_msg = NULL;
  142. spin_unlock_irqrestore(&smi->msg_lock, flags);
  143. ipmi_smi_msg_received(smi->intf, msg);
  144. return 0;
  145. }
  146. static void ipmi_powernv_request_events(void *send_info)
  147. {
  148. }
  149. static void ipmi_powernv_set_run_to_completion(void *send_info,
  150. bool run_to_completion)
  151. {
  152. }
  153. static void ipmi_powernv_poll(void *send_info)
  154. {
  155. struct ipmi_smi_powernv *smi = send_info;
  156. ipmi_powernv_recv(smi);
  157. }
  158. static const struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
  159. .owner = THIS_MODULE,
  160. .start_processing = ipmi_powernv_start_processing,
  161. .sender = ipmi_powernv_send,
  162. .request_events = ipmi_powernv_request_events,
  163. .set_run_to_completion = ipmi_powernv_set_run_to_completion,
  164. .poll = ipmi_powernv_poll,
  165. };
  166. static irqreturn_t ipmi_opal_event(int irq, void *data)
  167. {
  168. struct ipmi_smi_powernv *smi = data;
  169. ipmi_powernv_recv(smi);
  170. return IRQ_HANDLED;
  171. }
  172. static int ipmi_powernv_probe(struct platform_device *pdev)
  173. {
  174. struct ipmi_smi_powernv *ipmi;
  175. struct device *dev;
  176. u32 prop;
  177. int rc;
  178. if (!pdev || !pdev->dev.of_node)
  179. return -ENODEV;
  180. dev = &pdev->dev;
  181. ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
  182. if (!ipmi)
  183. return -ENOMEM;
  184. spin_lock_init(&ipmi->msg_lock);
  185. rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
  186. &prop);
  187. if (rc) {
  188. dev_warn(dev, "No interface ID property\n");
  189. goto err_free;
  190. }
  191. ipmi->interface_id = prop;
  192. rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
  193. if (rc) {
  194. dev_warn(dev, "No interrupts property\n");
  195. goto err_free;
  196. }
  197. ipmi->irq = irq_of_parse_and_map(dev->of_node, 0);
  198. if (!ipmi->irq) {
  199. dev_info(dev, "Unable to map irq from device tree\n");
  200. ipmi->irq = opal_event_request(prop);
  201. }
  202. rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
  203. "opal-ipmi", ipmi);
  204. if (rc) {
  205. dev_warn(dev, "Unable to request irq\n");
  206. goto err_dispose;
  207. }
  208. ipmi->opal_msg = devm_kmalloc(dev,
  209. sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
  210. GFP_KERNEL);
  211. if (!ipmi->opal_msg) {
  212. rc = -ENOMEM;
  213. goto err_unregister;
  214. }
  215. rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
  216. if (rc) {
  217. dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
  218. goto err_free_msg;
  219. }
  220. dev_set_drvdata(dev, ipmi);
  221. return 0;
  222. err_free_msg:
  223. devm_kfree(dev, ipmi->opal_msg);
  224. err_unregister:
  225. free_irq(ipmi->irq, ipmi);
  226. err_dispose:
  227. irq_dispose_mapping(ipmi->irq);
  228. err_free:
  229. devm_kfree(dev, ipmi);
  230. return rc;
  231. }
  232. static int ipmi_powernv_remove(struct platform_device *pdev)
  233. {
  234. struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
  235. ipmi_unregister_smi(smi->intf);
  236. free_irq(smi->irq, smi);
  237. irq_dispose_mapping(smi->irq);
  238. return 0;
  239. }
  240. static const struct of_device_id ipmi_powernv_match[] = {
  241. { .compatible = "ibm,opal-ipmi" },
  242. { },
  243. };
  244. static struct platform_driver powernv_ipmi_driver = {
  245. .driver = {
  246. .name = "ipmi-powernv",
  247. .of_match_table = ipmi_powernv_match,
  248. },
  249. .probe = ipmi_powernv_probe,
  250. .remove = ipmi_powernv_remove,
  251. };
  252. module_platform_driver(powernv_ipmi_driver);
  253. MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
  254. MODULE_DESCRIPTION("powernv IPMI driver");
  255. MODULE_AUTHOR("Jeremy Kerr <[email protected]>");
  256. MODULE_LICENSE("GPL");