remoteproc_virtio.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Remote processor messaging transport (OMAP platform-specific bits)
  4. *
  5. * Copyright (C) 2011 Texas Instruments, Inc.
  6. * Copyright (C) 2011 Google, Inc.
  7. *
  8. * Ohad Ben-Cohen <[email protected]>
  9. * Brian Swetland <[email protected]>
  10. */
  11. #include <linux/dma-direct.h>
  12. #include <linux/dma-map-ops.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/export.h>
  15. #include <linux/of_reserved_mem.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/remoteproc.h>
  18. #include <linux/virtio.h>
  19. #include <linux/virtio_config.h>
  20. #include <linux/virtio_ids.h>
  21. #include <linux/virtio_ring.h>
  22. #include <linux/err.h>
  23. #include <linux/kref.h>
  24. #include <linux/slab.h>
  25. #include "remoteproc_internal.h"
  26. static int copy_dma_range_map(struct device *to, struct device *from)
  27. {
  28. const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
  29. int num_ranges = 0;
  30. if (!map)
  31. return 0;
  32. for (r = map; r->size; r++)
  33. num_ranges++;
  34. new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
  35. GFP_KERNEL);
  36. if (!new_map)
  37. return -ENOMEM;
  38. to->dma_range_map = new_map;
  39. return 0;
  40. }
  41. static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
  42. {
  43. struct platform_device *pdev;
  44. pdev = container_of(vdev->dev.parent, struct platform_device, dev);
  45. return platform_get_drvdata(pdev);
  46. }
  47. static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
  48. {
  49. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  50. return rvdev->rproc;
  51. }
  52. /* kick the remote processor, and let it know which virtqueue to poke at */
  53. static bool rproc_virtio_notify(struct virtqueue *vq)
  54. {
  55. struct rproc_vring *rvring = vq->priv;
  56. struct rproc *rproc = rvring->rvdev->rproc;
  57. int notifyid = rvring->notifyid;
  58. dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
  59. rproc->ops->kick(rproc, notifyid);
  60. return true;
  61. }
  62. /**
  63. * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
  64. * @rproc: handle to the remote processor
  65. * @notifyid: index of the signalled virtqueue (unique per this @rproc)
  66. *
  67. * This function should be called by the platform-specific rproc driver,
  68. * when the remote processor signals that a specific virtqueue has pending
  69. * messages available.
  70. *
  71. * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
  72. * and otherwise returns IRQ_HANDLED.
  73. */
  74. irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
  75. {
  76. struct rproc_vring *rvring;
  77. dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
  78. rvring = idr_find(&rproc->notifyids, notifyid);
  79. if (!rvring || !rvring->vq)
  80. return IRQ_NONE;
  81. return vring_interrupt(0, rvring->vq);
  82. }
  83. EXPORT_SYMBOL(rproc_vq_interrupt);
  84. static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
  85. unsigned int id,
  86. void (*callback)(struct virtqueue *vq),
  87. const char *name, bool ctx)
  88. {
  89. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  90. struct rproc *rproc = vdev_to_rproc(vdev);
  91. struct device *dev = &rproc->dev;
  92. struct rproc_mem_entry *mem;
  93. struct rproc_vring *rvring;
  94. struct fw_rsc_vdev *rsc;
  95. struct virtqueue *vq;
  96. void *addr;
  97. int num, size;
  98. /* we're temporarily limited to two virtqueues per rvdev */
  99. if (id >= ARRAY_SIZE(rvdev->vring))
  100. return ERR_PTR(-EINVAL);
  101. if (!name)
  102. return NULL;
  103. /* Search allocated memory region by name */
  104. mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
  105. id);
  106. if (!mem || !mem->va)
  107. return ERR_PTR(-ENOMEM);
  108. rvring = &rvdev->vring[id];
  109. addr = mem->va;
  110. num = rvring->num;
  111. /* zero vring */
  112. size = vring_size(num, rvring->align);
  113. memset(addr, 0, size);
  114. dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
  115. id, addr, num, rvring->notifyid);
  116. /*
  117. * Create the new vq, and tell virtio we're not interested in
  118. * the 'weak' smp barriers, since we're talking with a real device.
  119. */
  120. vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
  121. addr, rproc_virtio_notify, callback, name);
  122. if (!vq) {
  123. dev_err(dev, "vring_new_virtqueue %s failed\n", name);
  124. rproc_free_vring(rvring);
  125. return ERR_PTR(-ENOMEM);
  126. }
  127. vq->num_max = num;
  128. rvring->vq = vq;
  129. vq->priv = rvring;
  130. /* Update vring in resource table */
  131. rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
  132. rsc->vring[id].da = mem->da;
  133. return vq;
  134. }
  135. static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
  136. {
  137. struct virtqueue *vq, *n;
  138. struct rproc_vring *rvring;
  139. list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
  140. rvring = vq->priv;
  141. rvring->vq = NULL;
  142. vring_del_virtqueue(vq);
  143. }
  144. }
  145. static void rproc_virtio_del_vqs(struct virtio_device *vdev)
  146. {
  147. __rproc_virtio_del_vqs(vdev);
  148. }
  149. static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
  150. struct virtqueue *vqs[],
  151. vq_callback_t *callbacks[],
  152. const char * const names[],
  153. const bool * ctx,
  154. struct irq_affinity *desc)
  155. {
  156. int i, ret, queue_idx = 0;
  157. for (i = 0; i < nvqs; ++i) {
  158. if (!names[i]) {
  159. vqs[i] = NULL;
  160. continue;
  161. }
  162. vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
  163. ctx ? ctx[i] : false);
  164. if (IS_ERR(vqs[i])) {
  165. ret = PTR_ERR(vqs[i]);
  166. goto error;
  167. }
  168. }
  169. return 0;
  170. error:
  171. __rproc_virtio_del_vqs(vdev);
  172. return ret;
  173. }
  174. static u8 rproc_virtio_get_status(struct virtio_device *vdev)
  175. {
  176. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  177. struct fw_rsc_vdev *rsc;
  178. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  179. return rsc->status;
  180. }
  181. static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
  182. {
  183. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  184. struct fw_rsc_vdev *rsc;
  185. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  186. rsc->status = status;
  187. dev_dbg(&vdev->dev, "status: %d\n", status);
  188. }
  189. static void rproc_virtio_reset(struct virtio_device *vdev)
  190. {
  191. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  192. struct fw_rsc_vdev *rsc;
  193. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  194. rsc->status = 0;
  195. dev_dbg(&vdev->dev, "reset !\n");
  196. }
  197. /* provide the vdev features as retrieved from the firmware */
  198. static u64 rproc_virtio_get_features(struct virtio_device *vdev)
  199. {
  200. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  201. struct fw_rsc_vdev *rsc;
  202. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  203. return rsc->dfeatures;
  204. }
  205. static void rproc_transport_features(struct virtio_device *vdev)
  206. {
  207. /*
  208. * Packed ring isn't enabled on remoteproc for now,
  209. * because remoteproc uses vring_new_virtqueue() which
  210. * creates virtio rings on preallocated memory.
  211. */
  212. __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
  213. }
  214. static int rproc_virtio_finalize_features(struct virtio_device *vdev)
  215. {
  216. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  217. struct fw_rsc_vdev *rsc;
  218. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  219. /* Give virtio_ring a chance to accept features */
  220. vring_transport_features(vdev);
  221. /* Give virtio_rproc a chance to accept features. */
  222. rproc_transport_features(vdev);
  223. /* Make sure we don't have any features > 32 bits! */
  224. BUG_ON((u32)vdev->features != vdev->features);
  225. /*
  226. * Remember the finalized features of our vdev, and provide it
  227. * to the remote processor once it is powered on.
  228. */
  229. rsc->gfeatures = vdev->features;
  230. return 0;
  231. }
  232. static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
  233. void *buf, unsigned int len)
  234. {
  235. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  236. struct fw_rsc_vdev *rsc;
  237. void *cfg;
  238. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  239. cfg = &rsc->vring[rsc->num_of_vrings];
  240. if (offset + len > rsc->config_len || offset + len < len) {
  241. dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
  242. return;
  243. }
  244. memcpy(buf, cfg + offset, len);
  245. }
  246. static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
  247. const void *buf, unsigned int len)
  248. {
  249. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  250. struct fw_rsc_vdev *rsc;
  251. void *cfg;
  252. rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
  253. cfg = &rsc->vring[rsc->num_of_vrings];
  254. if (offset + len > rsc->config_len || offset + len < len) {
  255. dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
  256. return;
  257. }
  258. memcpy(cfg + offset, buf, len);
  259. }
  260. static const struct virtio_config_ops rproc_virtio_config_ops = {
  261. .get_features = rproc_virtio_get_features,
  262. .finalize_features = rproc_virtio_finalize_features,
  263. .find_vqs = rproc_virtio_find_vqs,
  264. .del_vqs = rproc_virtio_del_vqs,
  265. .reset = rproc_virtio_reset,
  266. .set_status = rproc_virtio_set_status,
  267. .get_status = rproc_virtio_get_status,
  268. .get = rproc_virtio_get,
  269. .set = rproc_virtio_set,
  270. };
  271. /*
  272. * This function is called whenever vdev is released, and is responsible
  273. * to decrement the remote processor's refcount which was taken when vdev was
  274. * added.
  275. *
  276. * Never call this function directly; it will be called by the driver
  277. * core when needed.
  278. */
  279. static void rproc_virtio_dev_release(struct device *dev)
  280. {
  281. struct virtio_device *vdev = dev_to_virtio(dev);
  282. struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
  283. kfree(vdev);
  284. put_device(&rvdev->pdev->dev);
  285. }
  286. /**
  287. * rproc_add_virtio_dev() - register an rproc-induced virtio device
  288. * @rvdev: the remote vdev
  289. * @id: the device type identification (used to match it with a driver).
  290. *
  291. * This function registers a virtio device. This vdev's partent is
  292. * the rproc device.
  293. *
  294. * Return: 0 on success or an appropriate error value otherwise
  295. */
  296. static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
  297. {
  298. struct rproc *rproc = rvdev->rproc;
  299. struct device *dev = &rvdev->pdev->dev;
  300. struct virtio_device *vdev;
  301. struct rproc_mem_entry *mem;
  302. int ret;
  303. if (rproc->ops->kick == NULL) {
  304. ret = -EINVAL;
  305. dev_err(dev, ".kick method not defined for %s\n", rproc->name);
  306. goto out;
  307. }
  308. /* Try to find dedicated vdev buffer carveout */
  309. mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
  310. if (mem) {
  311. phys_addr_t pa;
  312. if (mem->of_resm_idx != -1) {
  313. struct device_node *np = rproc->dev.parent->of_node;
  314. /* Associate reserved memory to vdev device */
  315. ret = of_reserved_mem_device_init_by_idx(dev, np,
  316. mem->of_resm_idx);
  317. if (ret) {
  318. dev_err(dev, "Can't associate reserved memory\n");
  319. goto out;
  320. }
  321. } else {
  322. if (mem->va) {
  323. dev_warn(dev, "vdev %d buffer already mapped\n",
  324. rvdev->index);
  325. pa = rproc_va_to_pa(mem->va);
  326. } else {
  327. /* Use dma address as carveout no memmapped yet */
  328. pa = (phys_addr_t)mem->dma;
  329. }
  330. /* Associate vdev buffer memory pool to vdev subdev */
  331. ret = dma_declare_coherent_memory(dev, pa,
  332. mem->da,
  333. mem->len);
  334. if (ret < 0) {
  335. dev_err(dev, "Failed to associate buffer\n");
  336. goto out;
  337. }
  338. }
  339. } else {
  340. struct device_node *np = rproc->dev.parent->of_node;
  341. /*
  342. * If we don't have dedicated buffer, just attempt to re-assign
  343. * the reserved memory from our parent. A default memory-region
  344. * at index 0 from the parent's memory-regions is assigned for
  345. * the rvdev dev to allocate from. Failure is non-critical and
  346. * the allocations will fall back to global pools, so don't
  347. * check return value either.
  348. */
  349. of_reserved_mem_device_init_by_idx(dev, np, 0);
  350. }
  351. /* Allocate virtio device */
  352. vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
  353. if (!vdev) {
  354. ret = -ENOMEM;
  355. goto out;
  356. }
  357. vdev->id.device = id,
  358. vdev->config = &rproc_virtio_config_ops,
  359. vdev->dev.parent = dev;
  360. vdev->dev.release = rproc_virtio_dev_release;
  361. /* Reference the vdev and vring allocations */
  362. get_device(dev);
  363. ret = register_virtio_device(vdev);
  364. if (ret) {
  365. put_device(&vdev->dev);
  366. dev_err(dev, "failed to register vdev: %d\n", ret);
  367. goto out;
  368. }
  369. dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
  370. out:
  371. return ret;
  372. }
  373. /**
  374. * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
  375. * @dev: the virtio device
  376. * @data: must be null
  377. *
  378. * This function unregisters an existing virtio device.
  379. *
  380. * Return: 0
  381. */
  382. static int rproc_remove_virtio_dev(struct device *dev, void *data)
  383. {
  384. struct virtio_device *vdev = dev_to_virtio(dev);
  385. unregister_virtio_device(vdev);
  386. return 0;
  387. }
  388. static int rproc_vdev_do_start(struct rproc_subdev *subdev)
  389. {
  390. struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
  391. return rproc_add_virtio_dev(rvdev, rvdev->id);
  392. }
  393. static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
  394. {
  395. struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
  396. struct device *dev = &rvdev->pdev->dev;
  397. int ret;
  398. ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
  399. if (ret)
  400. dev_warn(dev, "can't remove vdev child device: %d\n", ret);
  401. }
  402. static int rproc_virtio_probe(struct platform_device *pdev)
  403. {
  404. struct device *dev = &pdev->dev;
  405. struct rproc_vdev_data *rvdev_data = dev->platform_data;
  406. struct rproc_vdev *rvdev;
  407. struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
  408. struct fw_rsc_vdev *rsc;
  409. int i, ret;
  410. if (!rvdev_data)
  411. return -EINVAL;
  412. rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
  413. if (!rvdev)
  414. return -ENOMEM;
  415. rvdev->id = rvdev_data->id;
  416. rvdev->rproc = rproc;
  417. rvdev->index = rvdev_data->index;
  418. ret = copy_dma_range_map(dev, rproc->dev.parent);
  419. if (ret)
  420. return ret;
  421. /* Make device dma capable by inheriting from parent's capabilities */
  422. set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
  423. ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
  424. if (ret) {
  425. dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
  426. dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
  427. }
  428. platform_set_drvdata(pdev, rvdev);
  429. rvdev->pdev = pdev;
  430. rsc = rvdev_data->rsc;
  431. /* parse the vrings */
  432. for (i = 0; i < rsc->num_of_vrings; i++) {
  433. ret = rproc_parse_vring(rvdev, rsc, i);
  434. if (ret)
  435. return ret;
  436. }
  437. /* remember the resource offset*/
  438. rvdev->rsc_offset = rvdev_data->rsc_offset;
  439. /* allocate the vring resources */
  440. for (i = 0; i < rsc->num_of_vrings; i++) {
  441. ret = rproc_alloc_vring(rvdev, i);
  442. if (ret)
  443. goto unwind_vring_allocations;
  444. }
  445. rproc_add_rvdev(rproc, rvdev);
  446. rvdev->subdev.start = rproc_vdev_do_start;
  447. rvdev->subdev.stop = rproc_vdev_do_stop;
  448. rproc_add_subdev(rproc, &rvdev->subdev);
  449. /*
  450. * We're indirectly making a non-temporary copy of the rproc pointer
  451. * here, because the platform device or the vdev device will indirectly
  452. * access the wrapping rproc.
  453. *
  454. * Therefore we must increment the rproc refcount here, and decrement
  455. * it _only_ on platform remove.
  456. */
  457. get_device(&rproc->dev);
  458. return 0;
  459. unwind_vring_allocations:
  460. for (i--; i >= 0; i--)
  461. rproc_free_vring(&rvdev->vring[i]);
  462. return ret;
  463. }
  464. static int rproc_virtio_remove(struct platform_device *pdev)
  465. {
  466. struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
  467. struct rproc *rproc = rvdev->rproc;
  468. struct rproc_vring *rvring;
  469. int id;
  470. for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
  471. rvring = &rvdev->vring[id];
  472. rproc_free_vring(rvring);
  473. }
  474. rproc_remove_subdev(rproc, &rvdev->subdev);
  475. rproc_remove_rvdev(rvdev);
  476. of_reserved_mem_device_release(&pdev->dev);
  477. dma_release_coherent_memory(&pdev->dev);
  478. put_device(&rproc->dev);
  479. return 0;
  480. }
  481. /* Platform driver */
  482. static struct platform_driver rproc_virtio_driver = {
  483. .probe = rproc_virtio_probe,
  484. .remove = rproc_virtio_remove,
  485. .driver = {
  486. .name = "rproc-virtio",
  487. },
  488. };
  489. builtin_platform_driver(rproc_virtio_driver);