industrialio-buffer-dma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2013-2015 Analog Devices Inc.
  4. * Author: Lars-Peter Clausen <[email protected]>
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/device.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/mutex.h>
  12. #include <linux/sched.h>
  13. #include <linux/poll.h>
  14. #include <linux/iio/buffer_impl.h>
  15. #include <linux/iio/buffer-dma.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/sizes.h>
  18. /*
  19. * For DMA buffers the storage is sub-divided into so called blocks. Each block
  20. * has its own memory buffer. The size of the block is the granularity at which
  21. * memory is exchanged between the hardware and the application. Increasing the
  22. * basic unit of data exchange from one sample to one block decreases the
  23. * management overhead that is associated with each sample. E.g. if we say the
  24. * management overhead for one exchange is x and the unit of exchange is one
  25. * sample the overhead will be x for each sample. Whereas when using a block
  26. * which contains n samples the overhead per sample is reduced to x/n. This
  27. * allows to achieve much higher samplerates than what can be sustained with
  28. * the one sample approach.
  29. *
  30. * Blocks are exchanged between the DMA controller and the application via the
  31. * means of two queues. The incoming queue and the outgoing queue. Blocks on the
  32. * incoming queue are waiting for the DMA controller to pick them up and fill
  33. * them with data. Block on the outgoing queue have been filled with data and
  34. * are waiting for the application to dequeue them and read the data.
  35. *
  36. * A block can be in one of the following states:
  37. * * Owned by the application. In this state the application can read data from
  38. * the block.
  39. * * On the incoming list: Blocks on the incoming list are queued up to be
  40. * processed by the DMA controller.
  41. * * Owned by the DMA controller: The DMA controller is processing the block
  42. * and filling it with data.
  43. * * On the outgoing list: Blocks on the outgoing list have been successfully
  44. * processed by the DMA controller and contain data. They can be dequeued by
  45. * the application.
  46. * * Dead: A block that is dead has been marked as to be freed. It might still
  47. * be owned by either the application or the DMA controller at the moment.
  48. * But once they are done processing it instead of going to either the
  49. * incoming or outgoing queue the block will be freed.
  50. *
  51. * In addition to this blocks are reference counted and the memory associated
  52. * with both the block structure as well as the storage memory for the block
  53. * will be freed when the last reference to the block is dropped. This means a
  54. * block must not be accessed without holding a reference.
  55. *
  56. * The iio_dma_buffer implementation provides a generic infrastructure for
  57. * managing the blocks.
  58. *
  59. * A driver for a specific piece of hardware that has DMA capabilities need to
  60. * implement the submit() callback from the iio_dma_buffer_ops structure. This
  61. * callback is supposed to initiate the DMA transfer copying data from the
  62. * converter to the memory region of the block. Once the DMA transfer has been
  63. * completed the driver must call iio_dma_buffer_block_done() for the completed
  64. * block.
  65. *
  66. * Prior to this it must set the bytes_used field of the block contains
  67. * the actual number of bytes in the buffer. Typically this will be equal to the
  68. * size of the block, but if the DMA hardware has certain alignment requirements
  69. * for the transfer length it might choose to use less than the full size. In
  70. * either case it is expected that bytes_used is a multiple of the bytes per
  71. * datum, i.e. the block must not contain partial samples.
  72. *
  73. * The driver must call iio_dma_buffer_block_done() for each block it has
  74. * received through its submit_block() callback, even if it does not actually
  75. * perform a DMA transfer for the block, e.g. because the buffer was disabled
  76. * before the block transfer was started. In this case it should set bytes_used
  77. * to 0.
  78. *
  79. * In addition it is recommended that a driver implements the abort() callback.
  80. * It will be called when the buffer is disabled and can be used to cancel
  81. * pending and stop active transfers.
  82. *
  83. * The specific driver implementation should use the default callback
  84. * implementations provided by this module for the iio_buffer_access_funcs
  85. * struct. It may overload some callbacks with custom variants if the hardware
  86. * has special requirements that are not handled by the generic functions. If a
  87. * driver chooses to overload a callback it has to ensure that the generic
  88. * callback is called from within the custom callback.
  89. */
  90. static void iio_buffer_block_release(struct kref *kref)
  91. {
  92. struct iio_dma_buffer_block *block = container_of(kref,
  93. struct iio_dma_buffer_block, kref);
  94. WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
  95. dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
  96. block->vaddr, block->phys_addr);
  97. iio_buffer_put(&block->queue->buffer);
  98. kfree(block);
  99. }
  100. static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
  101. {
  102. kref_get(&block->kref);
  103. }
  104. static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
  105. {
  106. kref_put(&block->kref, iio_buffer_block_release);
  107. }
  108. /*
  109. * dma_free_coherent can sleep, hence we need to take some special care to be
  110. * able to drop a reference from an atomic context.
  111. */
  112. static LIST_HEAD(iio_dma_buffer_dead_blocks);
  113. static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
  114. static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
  115. {
  116. struct iio_dma_buffer_block *block, *_block;
  117. LIST_HEAD(block_list);
  118. spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
  119. list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
  120. spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
  121. list_for_each_entry_safe(block, _block, &block_list, head)
  122. iio_buffer_block_release(&block->kref);
  123. }
  124. static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
  125. static void iio_buffer_block_release_atomic(struct kref *kref)
  126. {
  127. struct iio_dma_buffer_block *block;
  128. unsigned long flags;
  129. block = container_of(kref, struct iio_dma_buffer_block, kref);
  130. spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
  131. list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
  132. spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
  133. schedule_work(&iio_dma_buffer_cleanup_work);
  134. }
  135. /*
  136. * Version of iio_buffer_block_put() that can be called from atomic context
  137. */
  138. static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
  139. {
  140. kref_put(&block->kref, iio_buffer_block_release_atomic);
  141. }
  142. static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
  143. {
  144. return container_of(buf, struct iio_dma_buffer_queue, buffer);
  145. }
  146. static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
  147. struct iio_dma_buffer_queue *queue, size_t size)
  148. {
  149. struct iio_dma_buffer_block *block;
  150. block = kzalloc(sizeof(*block), GFP_KERNEL);
  151. if (!block)
  152. return NULL;
  153. block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
  154. &block->phys_addr, GFP_KERNEL);
  155. if (!block->vaddr) {
  156. kfree(block);
  157. return NULL;
  158. }
  159. block->size = size;
  160. block->state = IIO_BLOCK_STATE_DEQUEUED;
  161. block->queue = queue;
  162. INIT_LIST_HEAD(&block->head);
  163. kref_init(&block->kref);
  164. iio_buffer_get(&queue->buffer);
  165. return block;
  166. }
  167. static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
  168. {
  169. struct iio_dma_buffer_queue *queue = block->queue;
  170. /*
  171. * The buffer has already been freed by the application, just drop the
  172. * reference.
  173. */
  174. if (block->state != IIO_BLOCK_STATE_DEAD) {
  175. block->state = IIO_BLOCK_STATE_DONE;
  176. list_add_tail(&block->head, &queue->outgoing);
  177. }
  178. }
  179. /**
  180. * iio_dma_buffer_block_done() - Indicate that a block has been completed
  181. * @block: The completed block
  182. *
  183. * Should be called when the DMA controller has finished handling the block to
  184. * pass back ownership of the block to the queue.
  185. */
  186. void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
  187. {
  188. struct iio_dma_buffer_queue *queue = block->queue;
  189. unsigned long flags;
  190. spin_lock_irqsave(&queue->list_lock, flags);
  191. _iio_dma_buffer_block_done(block);
  192. spin_unlock_irqrestore(&queue->list_lock, flags);
  193. iio_buffer_block_put_atomic(block);
  194. wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
  195. }
  196. EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
  197. /**
  198. * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
  199. * aborted
  200. * @queue: Queue for which to complete blocks.
  201. * @list: List of aborted blocks. All blocks in this list must be from @queue.
  202. *
  203. * Typically called from the abort() callback after the DMA controller has been
  204. * stopped. This will set bytes_used to 0 for each block in the list and then
  205. * hand the blocks back to the queue.
  206. */
  207. void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
  208. struct list_head *list)
  209. {
  210. struct iio_dma_buffer_block *block, *_block;
  211. unsigned long flags;
  212. spin_lock_irqsave(&queue->list_lock, flags);
  213. list_for_each_entry_safe(block, _block, list, head) {
  214. list_del(&block->head);
  215. block->bytes_used = 0;
  216. _iio_dma_buffer_block_done(block);
  217. iio_buffer_block_put_atomic(block);
  218. }
  219. spin_unlock_irqrestore(&queue->list_lock, flags);
  220. wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
  221. }
  222. EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
  223. static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
  224. {
  225. /*
  226. * If the core owns the block it can be re-used. This should be the
  227. * default case when enabling the buffer, unless the DMA controller does
  228. * not support abort and has not given back the block yet.
  229. */
  230. switch (block->state) {
  231. case IIO_BLOCK_STATE_DEQUEUED:
  232. case IIO_BLOCK_STATE_QUEUED:
  233. case IIO_BLOCK_STATE_DONE:
  234. return true;
  235. default:
  236. return false;
  237. }
  238. }
  239. /**
  240. * iio_dma_buffer_request_update() - DMA buffer request_update callback
  241. * @buffer: The buffer which to request an update
  242. *
  243. * Should be used as the iio_dma_buffer_request_update() callback for
  244. * iio_buffer_access_ops struct for DMA buffers.
  245. */
  246. int iio_dma_buffer_request_update(struct iio_buffer *buffer)
  247. {
  248. struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
  249. struct iio_dma_buffer_block *block;
  250. bool try_reuse = false;
  251. size_t size;
  252. int ret = 0;
  253. int i;
  254. /*
  255. * Split the buffer into two even parts. This is used as a double
  256. * buffering scheme with usually one block at a time being used by the
  257. * DMA and the other one by the application.
  258. */
  259. size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
  260. queue->buffer.length, 2);
  261. mutex_lock(&queue->lock);
  262. /* Allocations are page aligned */
  263. if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
  264. try_reuse = true;
  265. queue->fileio.block_size = size;
  266. queue->fileio.active_block = NULL;
  267. spin_lock_irq(&queue->list_lock);
  268. for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
  269. block = queue->fileio.blocks[i];
  270. /* If we can't re-use it free it */
  271. if (block && (!iio_dma_block_reusable(block) || !try_reuse))
  272. block->state = IIO_BLOCK_STATE_DEAD;
  273. }
  274. /*
  275. * At this point all blocks are either owned by the core or marked as
  276. * dead. This means we can reset the lists without having to fear
  277. * corrution.
  278. */
  279. INIT_LIST_HEAD(&queue->outgoing);
  280. spin_unlock_irq(&queue->list_lock);
  281. INIT_LIST_HEAD(&queue->incoming);
  282. for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
  283. if (queue->fileio.blocks[i]) {
  284. block = queue->fileio.blocks[i];
  285. if (block->state == IIO_BLOCK_STATE_DEAD) {
  286. /* Could not reuse it */
  287. iio_buffer_block_put(block);
  288. block = NULL;
  289. } else {
  290. block->size = size;
  291. }
  292. } else {
  293. block = NULL;
  294. }
  295. if (!block) {
  296. block = iio_dma_buffer_alloc_block(queue, size);
  297. if (!block) {
  298. ret = -ENOMEM;
  299. goto out_unlock;
  300. }
  301. queue->fileio.blocks[i] = block;
  302. }
  303. block->state = IIO_BLOCK_STATE_QUEUED;
  304. list_add_tail(&block->head, &queue->incoming);
  305. }
  306. out_unlock:
  307. mutex_unlock(&queue->lock);
  308. return ret;
  309. }
  310. EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
  311. static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
  312. struct iio_dma_buffer_block *block)
  313. {
  314. int ret;
  315. /*
  316. * If the hardware has already been removed we put the block into
  317. * limbo. It will neither be on the incoming nor outgoing list, nor will
  318. * it ever complete. It will just wait to be freed eventually.
  319. */
  320. if (!queue->ops)
  321. return;
  322. block->state = IIO_BLOCK_STATE_ACTIVE;
  323. iio_buffer_block_get(block);
  324. ret = queue->ops->submit(queue, block);
  325. if (ret) {
  326. /*
  327. * This is a bit of a problem and there is not much we can do
  328. * other then wait for the buffer to be disabled and re-enabled
  329. * and try again. But it should not really happen unless we run
  330. * out of memory or something similar.
  331. *
  332. * TODO: Implement support in the IIO core to allow buffers to
  333. * notify consumers that something went wrong and the buffer
  334. * should be disabled.
  335. */
  336. iio_buffer_block_put(block);
  337. }
  338. }
  339. /**
  340. * iio_dma_buffer_enable() - Enable DMA buffer
  341. * @buffer: IIO buffer to enable
  342. * @indio_dev: IIO device the buffer is attached to
  343. *
  344. * Needs to be called when the device that the buffer is attached to starts
  345. * sampling. Typically should be the iio_buffer_access_ops enable callback.
  346. *
  347. * This will allocate the DMA buffers and start the DMA transfers.
  348. */
  349. int iio_dma_buffer_enable(struct iio_buffer *buffer,
  350. struct iio_dev *indio_dev)
  351. {
  352. struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
  353. struct iio_dma_buffer_block *block, *_block;
  354. mutex_lock(&queue->lock);
  355. queue->active = true;
  356. list_for_each_entry_safe(block, _block, &queue->incoming, head) {
  357. list_del(&block->head);
  358. iio_dma_buffer_submit_block(queue, block);
  359. }
  360. mutex_unlock(&queue->lock);
  361. return 0;
  362. }
  363. EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
  364. /**
  365. * iio_dma_buffer_disable() - Disable DMA buffer
  366. * @buffer: IIO DMA buffer to disable
  367. * @indio_dev: IIO device the buffer is attached to
  368. *
  369. * Needs to be called when the device that the buffer is attached to stops
  370. * sampling. Typically should be the iio_buffer_access_ops disable callback.
  371. */
  372. int iio_dma_buffer_disable(struct iio_buffer *buffer,
  373. struct iio_dev *indio_dev)
  374. {
  375. struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
  376. mutex_lock(&queue->lock);
  377. queue->active = false;
  378. if (queue->ops && queue->ops->abort)
  379. queue->ops->abort(queue);
  380. mutex_unlock(&queue->lock);
  381. return 0;
  382. }
  383. EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
  384. static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
  385. struct iio_dma_buffer_block *block)
  386. {
  387. if (block->state == IIO_BLOCK_STATE_DEAD) {
  388. iio_buffer_block_put(block);
  389. } else if (queue->active) {
  390. iio_dma_buffer_submit_block(queue, block);
  391. } else {
  392. block->state = IIO_BLOCK_STATE_QUEUED;
  393. list_add_tail(&block->head, &queue->incoming);
  394. }
  395. }
  396. static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
  397. struct iio_dma_buffer_queue *queue)
  398. {
  399. struct iio_dma_buffer_block *block;
  400. spin_lock_irq(&queue->list_lock);
  401. block = list_first_entry_or_null(&queue->outgoing, struct
  402. iio_dma_buffer_block, head);
  403. if (block != NULL) {
  404. list_del(&block->head);
  405. block->state = IIO_BLOCK_STATE_DEQUEUED;
  406. }
  407. spin_unlock_irq(&queue->list_lock);
  408. return block;
  409. }
  410. /**
  411. * iio_dma_buffer_read() - DMA buffer read callback
  412. * @buffer: Buffer to read form
  413. * @n: Number of bytes to read
  414. * @user_buffer: Userspace buffer to copy the data to
  415. *
  416. * Should be used as the read callback for iio_buffer_access_ops
  417. * struct for DMA buffers.
  418. */
  419. int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
  420. char __user *user_buffer)
  421. {
  422. struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
  423. struct iio_dma_buffer_block *block;
  424. int ret;
  425. if (n < buffer->bytes_per_datum)
  426. return -EINVAL;
  427. mutex_lock(&queue->lock);
  428. if (!queue->fileio.active_block) {
  429. block = iio_dma_buffer_dequeue(queue);
  430. if (block == NULL) {
  431. ret = 0;
  432. goto out_unlock;
  433. }
  434. queue->fileio.pos = 0;
  435. queue->fileio.active_block = block;
  436. } else {
  437. block = queue->fileio.active_block;
  438. }
  439. n = rounddown(n, buffer->bytes_per_datum);
  440. if (n > block->bytes_used - queue->fileio.pos)
  441. n = block->bytes_used - queue->fileio.pos;
  442. if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
  443. ret = -EFAULT;
  444. goto out_unlock;
  445. }
  446. queue->fileio.pos += n;
  447. if (queue->fileio.pos == block->bytes_used) {
  448. queue->fileio.active_block = NULL;
  449. iio_dma_buffer_enqueue(queue, block);
  450. }
  451. ret = n;
  452. out_unlock:
  453. mutex_unlock(&queue->lock);
  454. return ret;
  455. }
  456. EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
  457. /**
  458. * iio_dma_buffer_data_available() - DMA buffer data_available callback
  459. * @buf: Buffer to check for data availability
  460. *
  461. * Should be used as the data_available callback for iio_buffer_access_ops
  462. * struct for DMA buffers.
  463. */
  464. size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
  465. {
  466. struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
  467. struct iio_dma_buffer_block *block;
  468. size_t data_available = 0;
  469. /*
  470. * For counting the available bytes we'll use the size of the block not
  471. * the number of actual bytes available in the block. Otherwise it is
  472. * possible that we end up with a value that is lower than the watermark
  473. * but won't increase since all blocks are in use.
  474. */
  475. mutex_lock(&queue->lock);
  476. if (queue->fileio.active_block)
  477. data_available += queue->fileio.active_block->size;
  478. spin_lock_irq(&queue->list_lock);
  479. list_for_each_entry(block, &queue->outgoing, head)
  480. data_available += block->size;
  481. spin_unlock_irq(&queue->list_lock);
  482. mutex_unlock(&queue->lock);
  483. return data_available;
  484. }
  485. EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
  486. /**
  487. * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
  488. * @buffer: Buffer to set the bytes-per-datum for
  489. * @bpd: The new bytes-per-datum value
  490. *
  491. * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
  492. * struct for DMA buffers.
  493. */
  494. int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
  495. {
  496. buffer->bytes_per_datum = bpd;
  497. return 0;
  498. }
  499. EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
  500. /**
  501. * iio_dma_buffer_set_length - DMA buffer set_length callback
  502. * @buffer: Buffer to set the length for
  503. * @length: The new buffer length
  504. *
  505. * Should be used as the set_length callback for iio_buffer_access_ops
  506. * struct for DMA buffers.
  507. */
  508. int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
  509. {
  510. /* Avoid an invalid state */
  511. if (length < 2)
  512. length = 2;
  513. buffer->length = length;
  514. buffer->watermark = length / 2;
  515. return 0;
  516. }
  517. EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
  518. /**
  519. * iio_dma_buffer_init() - Initialize DMA buffer queue
  520. * @queue: Buffer to initialize
  521. * @dev: DMA device
  522. * @ops: DMA buffer queue callback operations
  523. *
  524. * The DMA device will be used by the queue to do DMA memory allocations. So it
  525. * should refer to the device that will perform the DMA to ensure that
  526. * allocations are done from a memory region that can be accessed by the device.
  527. */
  528. int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
  529. struct device *dev, const struct iio_dma_buffer_ops *ops)
  530. {
  531. iio_buffer_init(&queue->buffer);
  532. queue->buffer.length = PAGE_SIZE;
  533. queue->buffer.watermark = queue->buffer.length / 2;
  534. queue->dev = dev;
  535. queue->ops = ops;
  536. INIT_LIST_HEAD(&queue->incoming);
  537. INIT_LIST_HEAD(&queue->outgoing);
  538. mutex_init(&queue->lock);
  539. spin_lock_init(&queue->list_lock);
  540. return 0;
  541. }
  542. EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
  543. /**
  544. * iio_dma_buffer_exit() - Cleanup DMA buffer queue
  545. * @queue: Buffer to cleanup
  546. *
  547. * After this function has completed it is safe to free any resources that are
  548. * associated with the buffer and are accessed inside the callback operations.
  549. */
  550. void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
  551. {
  552. unsigned int i;
  553. mutex_lock(&queue->lock);
  554. spin_lock_irq(&queue->list_lock);
  555. for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
  556. if (!queue->fileio.blocks[i])
  557. continue;
  558. queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
  559. }
  560. INIT_LIST_HEAD(&queue->outgoing);
  561. spin_unlock_irq(&queue->list_lock);
  562. INIT_LIST_HEAD(&queue->incoming);
  563. for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
  564. if (!queue->fileio.blocks[i])
  565. continue;
  566. iio_buffer_block_put(queue->fileio.blocks[i]);
  567. queue->fileio.blocks[i] = NULL;
  568. }
  569. queue->fileio.active_block = NULL;
  570. queue->ops = NULL;
  571. mutex_unlock(&queue->lock);
  572. }
  573. EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
  574. /**
  575. * iio_dma_buffer_release() - Release final buffer resources
  576. * @queue: Buffer to release
  577. *
  578. * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
  579. * called in the buffers release callback implementation right before freeing
  580. * the memory associated with the buffer.
  581. */
  582. void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
  583. {
  584. mutex_destroy(&queue->lock);
  585. }
  586. EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
  587. MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
  588. MODULE_DESCRIPTION("DMA buffer for the IIO framework");
  589. MODULE_LICENSE("GPL v2");