hd.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Greybus Host Device
  4. *
  5. * Copyright 2014-2015 Google Inc.
  6. * Copyright 2014-2015 Linaro Ltd.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/greybus.h>
  11. #include "greybus_trace.h"
  12. EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
  13. EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
  14. EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
  15. EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
  16. EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
  17. EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
  18. static struct ida gb_hd_bus_id_map;
  19. int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
  20. bool async)
  21. {
  22. if (!hd || !hd->driver || !hd->driver->output)
  23. return -EINVAL;
  24. return hd->driver->output(hd, req, size, cmd, async);
  25. }
  26. EXPORT_SYMBOL_GPL(gb_hd_output);
  27. static ssize_t bus_id_show(struct device *dev,
  28. struct device_attribute *attr, char *buf)
  29. {
  30. struct gb_host_device *hd = to_gb_host_device(dev);
  31. return sprintf(buf, "%d\n", hd->bus_id);
  32. }
  33. static DEVICE_ATTR_RO(bus_id);
  34. static struct attribute *bus_attrs[] = {
  35. &dev_attr_bus_id.attr,
  36. NULL
  37. };
  38. ATTRIBUTE_GROUPS(bus);
  39. int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
  40. {
  41. struct ida *id_map = &hd->cport_id_map;
  42. int ret;
  43. ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
  44. if (ret < 0) {
  45. dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
  46. return ret;
  47. }
  48. return 0;
  49. }
  50. EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
  51. void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
  52. {
  53. struct ida *id_map = &hd->cport_id_map;
  54. ida_simple_remove(id_map, cport_id);
  55. }
  56. EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
  57. /* Locking: Caller guarantees serialisation */
  58. int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
  59. unsigned long flags)
  60. {
  61. struct ida *id_map = &hd->cport_id_map;
  62. int ida_start, ida_end;
  63. if (hd->driver->cport_allocate)
  64. return hd->driver->cport_allocate(hd, cport_id, flags);
  65. if (cport_id < 0) {
  66. ida_start = 0;
  67. ida_end = hd->num_cports;
  68. } else if (cport_id < hd->num_cports) {
  69. ida_start = cport_id;
  70. ida_end = cport_id + 1;
  71. } else {
  72. dev_err(&hd->dev, "cport %d not available\n", cport_id);
  73. return -EINVAL;
  74. }
  75. return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
  76. }
  77. /* Locking: Caller guarantees serialisation */
  78. void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
  79. {
  80. if (hd->driver->cport_release) {
  81. hd->driver->cport_release(hd, cport_id);
  82. return;
  83. }
  84. ida_simple_remove(&hd->cport_id_map, cport_id);
  85. }
  86. static void gb_hd_release(struct device *dev)
  87. {
  88. struct gb_host_device *hd = to_gb_host_device(dev);
  89. trace_gb_hd_release(hd);
  90. if (hd->svc)
  91. gb_svc_put(hd->svc);
  92. ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
  93. ida_destroy(&hd->cport_id_map);
  94. kfree(hd);
  95. }
  96. struct device_type greybus_hd_type = {
  97. .name = "greybus_host_device",
  98. .release = gb_hd_release,
  99. };
  100. struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
  101. struct device *parent,
  102. size_t buffer_size_max,
  103. size_t num_cports)
  104. {
  105. struct gb_host_device *hd;
  106. int ret;
  107. /*
  108. * Validate that the driver implements all of the callbacks
  109. * so that we don't have to every time we make them.
  110. */
  111. if ((!driver->message_send) || (!driver->message_cancel)) {
  112. dev_err(parent, "mandatory hd-callbacks missing\n");
  113. return ERR_PTR(-EINVAL);
  114. }
  115. if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
  116. dev_err(parent, "greybus host-device buffers too small\n");
  117. return ERR_PTR(-EINVAL);
  118. }
  119. if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
  120. dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
  121. return ERR_PTR(-EINVAL);
  122. }
  123. /*
  124. * Make sure to never allocate messages larger than what the Greybus
  125. * protocol supports.
  126. */
  127. if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
  128. dev_warn(parent, "limiting buffer size to %u\n",
  129. GB_OPERATION_MESSAGE_SIZE_MAX);
  130. buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
  131. }
  132. hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
  133. if (!hd)
  134. return ERR_PTR(-ENOMEM);
  135. ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
  136. if (ret < 0) {
  137. kfree(hd);
  138. return ERR_PTR(ret);
  139. }
  140. hd->bus_id = ret;
  141. hd->driver = driver;
  142. INIT_LIST_HEAD(&hd->modules);
  143. INIT_LIST_HEAD(&hd->connections);
  144. ida_init(&hd->cport_id_map);
  145. hd->buffer_size_max = buffer_size_max;
  146. hd->num_cports = num_cports;
  147. hd->dev.parent = parent;
  148. hd->dev.bus = &greybus_bus_type;
  149. hd->dev.type = &greybus_hd_type;
  150. hd->dev.groups = bus_groups;
  151. hd->dev.dma_mask = hd->dev.parent->dma_mask;
  152. device_initialize(&hd->dev);
  153. dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
  154. trace_gb_hd_create(hd);
  155. hd->svc = gb_svc_create(hd);
  156. if (!hd->svc) {
  157. dev_err(&hd->dev, "failed to create svc\n");
  158. put_device(&hd->dev);
  159. return ERR_PTR(-ENOMEM);
  160. }
  161. return hd;
  162. }
  163. EXPORT_SYMBOL_GPL(gb_hd_create);
  164. int gb_hd_add(struct gb_host_device *hd)
  165. {
  166. int ret;
  167. ret = device_add(&hd->dev);
  168. if (ret)
  169. return ret;
  170. ret = gb_svc_add(hd->svc);
  171. if (ret) {
  172. device_del(&hd->dev);
  173. return ret;
  174. }
  175. trace_gb_hd_add(hd);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(gb_hd_add);
  179. void gb_hd_del(struct gb_host_device *hd)
  180. {
  181. trace_gb_hd_del(hd);
  182. /*
  183. * Tear down the svc and flush any on-going hotplug processing before
  184. * removing the remaining interfaces.
  185. */
  186. gb_svc_del(hd->svc);
  187. device_del(&hd->dev);
  188. }
  189. EXPORT_SYMBOL_GPL(gb_hd_del);
  190. void gb_hd_shutdown(struct gb_host_device *hd)
  191. {
  192. gb_svc_del(hd->svc);
  193. }
  194. EXPORT_SYMBOL_GPL(gb_hd_shutdown);
  195. void gb_hd_put(struct gb_host_device *hd)
  196. {
  197. put_device(&hd->dev);
  198. }
  199. EXPORT_SYMBOL_GPL(gb_hd_put);
  200. int __init gb_hd_init(void)
  201. {
  202. ida_init(&gb_hd_bus_id_map);
  203. return 0;
  204. }
  205. void gb_hd_exit(void)
  206. {
  207. ida_destroy(&gb_hd_bus_id_map);
  208. }