uvc_queue.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * uvc_queue.c -- USB Video Class driver - Buffers management
  4. *
  5. * Copyright (C) 2005-2010
  6. * Laurent Pinchart ([email protected])
  7. */
  8. #include <linux/atomic.h>
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/usb.h>
  14. #include <linux/videodev2.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/wait.h>
  17. #include <media/videobuf2-v4l2.h>
  18. #include <media/videobuf2-vmalloc.h>
  19. #include "uvcvideo.h"
  20. /* ------------------------------------------------------------------------
  21. * Video buffers queue management.
  22. *
  23. * Video queues is initialized by uvc_queue_init(). The function performs
  24. * basic initialization of the uvc_video_queue struct and never fails.
  25. *
  26. * Video buffers are managed by videobuf2. The driver uses a mutex to protect
  27. * the videobuf2 queue operations by serializing calls to videobuf2 and a
  28. * spinlock to protect the IRQ queue that holds the buffers to be processed by
  29. * the driver.
  30. */
  31. static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
  32. {
  33. return container_of(buf, struct uvc_buffer, buf);
  34. }
  35. /*
  36. * Return all queued buffers to videobuf2 in the requested state.
  37. *
  38. * This function must be called with the queue spinlock held.
  39. */
  40. static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
  41. enum uvc_buffer_state state)
  42. {
  43. enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
  44. ? VB2_BUF_STATE_ERROR
  45. : VB2_BUF_STATE_QUEUED;
  46. while (!list_empty(&queue->irqqueue)) {
  47. struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
  48. struct uvc_buffer,
  49. queue);
  50. list_del(&buf->queue);
  51. buf->state = state;
  52. vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
  53. }
  54. }
  55. /* -----------------------------------------------------------------------------
  56. * videobuf2 queue operations
  57. */
  58. static int uvc_queue_setup(struct vb2_queue *vq,
  59. unsigned int *nbuffers, unsigned int *nplanes,
  60. unsigned int sizes[], struct device *alloc_devs[])
  61. {
  62. struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
  63. struct uvc_streaming *stream;
  64. unsigned int size;
  65. switch (vq->type) {
  66. case V4L2_BUF_TYPE_META_CAPTURE:
  67. size = UVC_METADATA_BUF_SIZE;
  68. break;
  69. default:
  70. stream = uvc_queue_to_stream(queue);
  71. size = stream->ctrl.dwMaxVideoFrameSize;
  72. break;
  73. }
  74. /*
  75. * When called with plane sizes, validate them. The driver supports
  76. * single planar formats only, and requires buffers to be large enough
  77. * to store a complete frame.
  78. */
  79. if (*nplanes)
  80. return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0;
  81. *nplanes = 1;
  82. sizes[0] = size;
  83. return 0;
  84. }
  85. static int uvc_buffer_prepare(struct vb2_buffer *vb)
  86. {
  87. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  88. struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
  89. struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
  90. if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  91. vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
  92. uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE,
  93. "[E] Bytes used out of bounds\n");
  94. return -EINVAL;
  95. }
  96. if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
  97. return -ENODEV;
  98. buf->state = UVC_BUF_STATE_QUEUED;
  99. buf->error = 0;
  100. buf->mem = vb2_plane_vaddr(vb, 0);
  101. buf->length = vb2_plane_size(vb, 0);
  102. if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
  103. buf->bytesused = 0;
  104. else
  105. buf->bytesused = vb2_get_plane_payload(vb, 0);
  106. return 0;
  107. }
  108. static void uvc_buffer_queue(struct vb2_buffer *vb)
  109. {
  110. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  111. struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
  112. struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
  113. unsigned long flags;
  114. spin_lock_irqsave(&queue->irqlock, flags);
  115. if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
  116. kref_init(&buf->ref);
  117. list_add_tail(&buf->queue, &queue->irqqueue);
  118. } else {
  119. /*
  120. * If the device is disconnected return the buffer to userspace
  121. * directly. The next QBUF call will fail with -ENODEV.
  122. */
  123. buf->state = UVC_BUF_STATE_ERROR;
  124. vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
  125. }
  126. spin_unlock_irqrestore(&queue->irqlock, flags);
  127. }
  128. static void uvc_buffer_finish(struct vb2_buffer *vb)
  129. {
  130. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  131. struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
  132. struct uvc_streaming *stream = uvc_queue_to_stream(queue);
  133. struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
  134. if (vb->state == VB2_BUF_STATE_DONE)
  135. uvc_video_clock_update(stream, vbuf, buf);
  136. }
  137. static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
  138. {
  139. struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
  140. struct uvc_streaming *stream = uvc_queue_to_stream(queue);
  141. int ret;
  142. lockdep_assert_irqs_enabled();
  143. queue->buf_used = 0;
  144. ret = uvc_video_start_streaming(stream);
  145. if (ret == 0)
  146. return 0;
  147. spin_lock_irq(&queue->irqlock);
  148. uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
  149. spin_unlock_irq(&queue->irqlock);
  150. return ret;
  151. }
  152. static void uvc_stop_streaming(struct vb2_queue *vq)
  153. {
  154. struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
  155. lockdep_assert_irqs_enabled();
  156. if (vq->type != V4L2_BUF_TYPE_META_CAPTURE)
  157. uvc_video_stop_streaming(uvc_queue_to_stream(queue));
  158. spin_lock_irq(&queue->irqlock);
  159. uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
  160. spin_unlock_irq(&queue->irqlock);
  161. }
  162. static const struct vb2_ops uvc_queue_qops = {
  163. .queue_setup = uvc_queue_setup,
  164. .buf_prepare = uvc_buffer_prepare,
  165. .buf_queue = uvc_buffer_queue,
  166. .buf_finish = uvc_buffer_finish,
  167. .wait_prepare = vb2_ops_wait_prepare,
  168. .wait_finish = vb2_ops_wait_finish,
  169. .start_streaming = uvc_start_streaming,
  170. .stop_streaming = uvc_stop_streaming,
  171. };
  172. static const struct vb2_ops uvc_meta_queue_qops = {
  173. .queue_setup = uvc_queue_setup,
  174. .buf_prepare = uvc_buffer_prepare,
  175. .buf_queue = uvc_buffer_queue,
  176. .wait_prepare = vb2_ops_wait_prepare,
  177. .wait_finish = vb2_ops_wait_finish,
  178. .stop_streaming = uvc_stop_streaming,
  179. };
  180. int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
  181. int drop_corrupted)
  182. {
  183. int ret;
  184. queue->queue.type = type;
  185. queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
  186. queue->queue.drv_priv = queue;
  187. queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
  188. queue->queue.mem_ops = &vb2_vmalloc_memops;
  189. queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
  190. | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
  191. queue->queue.lock = &queue->mutex;
  192. switch (type) {
  193. case V4L2_BUF_TYPE_META_CAPTURE:
  194. queue->queue.ops = &uvc_meta_queue_qops;
  195. break;
  196. default:
  197. queue->queue.io_modes |= VB2_DMABUF;
  198. queue->queue.ops = &uvc_queue_qops;
  199. break;
  200. }
  201. ret = vb2_queue_init(&queue->queue);
  202. if (ret)
  203. return ret;
  204. mutex_init(&queue->mutex);
  205. spin_lock_init(&queue->irqlock);
  206. INIT_LIST_HEAD(&queue->irqqueue);
  207. queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
  208. return 0;
  209. }
  210. void uvc_queue_release(struct uvc_video_queue *queue)
  211. {
  212. mutex_lock(&queue->mutex);
  213. vb2_queue_release(&queue->queue);
  214. mutex_unlock(&queue->mutex);
  215. }
  216. /* -----------------------------------------------------------------------------
  217. * V4L2 queue operations
  218. */
  219. int uvc_request_buffers(struct uvc_video_queue *queue,
  220. struct v4l2_requestbuffers *rb)
  221. {
  222. int ret;
  223. mutex_lock(&queue->mutex);
  224. ret = vb2_reqbufs(&queue->queue, rb);
  225. mutex_unlock(&queue->mutex);
  226. return ret ? ret : rb->count;
  227. }
  228. int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
  229. {
  230. int ret;
  231. mutex_lock(&queue->mutex);
  232. ret = vb2_querybuf(&queue->queue, buf);
  233. mutex_unlock(&queue->mutex);
  234. return ret;
  235. }
  236. int uvc_create_buffers(struct uvc_video_queue *queue,
  237. struct v4l2_create_buffers *cb)
  238. {
  239. int ret;
  240. mutex_lock(&queue->mutex);
  241. ret = vb2_create_bufs(&queue->queue, cb);
  242. mutex_unlock(&queue->mutex);
  243. return ret;
  244. }
  245. int uvc_queue_buffer(struct uvc_video_queue *queue,
  246. struct media_device *mdev, struct v4l2_buffer *buf)
  247. {
  248. int ret;
  249. mutex_lock(&queue->mutex);
  250. ret = vb2_qbuf(&queue->queue, mdev, buf);
  251. mutex_unlock(&queue->mutex);
  252. return ret;
  253. }
  254. int uvc_export_buffer(struct uvc_video_queue *queue,
  255. struct v4l2_exportbuffer *exp)
  256. {
  257. int ret;
  258. mutex_lock(&queue->mutex);
  259. ret = vb2_expbuf(&queue->queue, exp);
  260. mutex_unlock(&queue->mutex);
  261. return ret;
  262. }
  263. int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
  264. int nonblocking)
  265. {
  266. int ret;
  267. mutex_lock(&queue->mutex);
  268. ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
  269. mutex_unlock(&queue->mutex);
  270. return ret;
  271. }
  272. int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
  273. {
  274. int ret;
  275. mutex_lock(&queue->mutex);
  276. ret = vb2_streamon(&queue->queue, type);
  277. mutex_unlock(&queue->mutex);
  278. return ret;
  279. }
  280. int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
  281. {
  282. int ret;
  283. mutex_lock(&queue->mutex);
  284. ret = vb2_streamoff(&queue->queue, type);
  285. mutex_unlock(&queue->mutex);
  286. return ret;
  287. }
  288. int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
  289. {
  290. return vb2_mmap(&queue->queue, vma);
  291. }
  292. #ifndef CONFIG_MMU
  293. unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
  294. unsigned long pgoff)
  295. {
  296. return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
  297. }
  298. #endif
  299. __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
  300. poll_table *wait)
  301. {
  302. __poll_t ret;
  303. mutex_lock(&queue->mutex);
  304. ret = vb2_poll(&queue->queue, file, wait);
  305. mutex_unlock(&queue->mutex);
  306. return ret;
  307. }
  308. /* -----------------------------------------------------------------------------
  309. *
  310. */
  311. /*
  312. * Check if buffers have been allocated.
  313. */
  314. int uvc_queue_allocated(struct uvc_video_queue *queue)
  315. {
  316. int allocated;
  317. mutex_lock(&queue->mutex);
  318. allocated = vb2_is_busy(&queue->queue);
  319. mutex_unlock(&queue->mutex);
  320. return allocated;
  321. }
  322. /*
  323. * Cancel the video buffers queue.
  324. *
  325. * Cancelling the queue marks all buffers on the irq queue as erroneous,
  326. * wakes them up and removes them from the queue.
  327. *
  328. * If the disconnect parameter is set, further calls to uvc_queue_buffer will
  329. * fail with -ENODEV.
  330. *
  331. * This function acquires the irq spinlock and can be called from interrupt
  332. * context.
  333. */
  334. void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
  335. {
  336. unsigned long flags;
  337. spin_lock_irqsave(&queue->irqlock, flags);
  338. uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
  339. /*
  340. * This must be protected by the irqlock spinlock to avoid race
  341. * conditions between uvc_buffer_queue and the disconnection event that
  342. * could result in an interruptible wait in uvc_dequeue_buffer. Do not
  343. * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
  344. * state outside the queue code.
  345. */
  346. if (disconnect)
  347. queue->flags |= UVC_QUEUE_DISCONNECTED;
  348. spin_unlock_irqrestore(&queue->irqlock, flags);
  349. }
  350. /*
  351. * uvc_queue_get_current_buffer: Obtain the current working output buffer
  352. *
  353. * Buffers may span multiple packets, and even URBs, therefore the active buffer
  354. * remains on the queue until the EOF marker.
  355. */
  356. static struct uvc_buffer *
  357. __uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
  358. {
  359. if (list_empty(&queue->irqqueue))
  360. return NULL;
  361. return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue);
  362. }
  363. struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
  364. {
  365. struct uvc_buffer *nextbuf;
  366. unsigned long flags;
  367. spin_lock_irqsave(&queue->irqlock, flags);
  368. nextbuf = __uvc_queue_get_current_buffer(queue);
  369. spin_unlock_irqrestore(&queue->irqlock, flags);
  370. return nextbuf;
  371. }
  372. /*
  373. * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue
  374. *
  375. * Reuse a buffer through our internal queue without the need to 'prepare'.
  376. * The buffer will be returned to userspace through the uvc_buffer_queue call if
  377. * the device has been disconnected.
  378. */
  379. static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue,
  380. struct uvc_buffer *buf)
  381. {
  382. buf->error = 0;
  383. buf->state = UVC_BUF_STATE_QUEUED;
  384. buf->bytesused = 0;
  385. vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
  386. uvc_buffer_queue(&buf->buf.vb2_buf);
  387. }
  388. static void uvc_queue_buffer_complete(struct kref *ref)
  389. {
  390. struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref);
  391. struct vb2_buffer *vb = &buf->buf.vb2_buf;
  392. struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
  393. if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
  394. uvc_queue_buffer_requeue(queue, buf);
  395. return;
  396. }
  397. buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
  398. vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
  399. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
  400. }
  401. /*
  402. * Release a reference on the buffer. Complete the buffer when the last
  403. * reference is released.
  404. */
  405. void uvc_queue_buffer_release(struct uvc_buffer *buf)
  406. {
  407. kref_put(&buf->ref, uvc_queue_buffer_complete);
  408. }
  409. /*
  410. * Remove this buffer from the queue. Lifetime will persist while async actions
  411. * are still running (if any), and uvc_queue_buffer_release will give the buffer
  412. * back to VB2 when all users have completed.
  413. */
  414. struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
  415. struct uvc_buffer *buf)
  416. {
  417. struct uvc_buffer *nextbuf;
  418. unsigned long flags;
  419. spin_lock_irqsave(&queue->irqlock, flags);
  420. list_del(&buf->queue);
  421. nextbuf = __uvc_queue_get_current_buffer(queue);
  422. spin_unlock_irqrestore(&queue->irqlock, flags);
  423. uvc_queue_buffer_release(buf);
  424. return nextbuf;
  425. }