videobuf2-v4l2.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331
  1. /*
  2. * videobuf2-v4l2.c - V4L2 driver helper framework
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <[email protected]>
  7. * Marek Szyprowski <[email protected]>
  8. *
  9. * The vb2_thread implementation was based on code from videobuf-dvb.c:
  10. * (c) 2004 Gerd Knorr <[email protected]> [SUSE Labs]
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation.
  15. */
  16. #include <linux/device.h>
  17. #include <linux/err.h>
  18. #include <linux/freezer.h>
  19. #include <linux/kernel.h>
  20. #include <linux/kthread.h>
  21. #include <linux/mm.h>
  22. #include <linux/module.h>
  23. #include <linux/poll.h>
  24. #include <linux/sched.h>
  25. #include <linux/slab.h>
  26. #include <media/v4l2-common.h>
  27. #include <media/v4l2-dev.h>
  28. #include <media/v4l2-device.h>
  29. #include <media/v4l2-event.h>
  30. #include <media/v4l2-fh.h>
  31. #include <media/videobuf2-v4l2.h>
  32. static int debug;
  33. module_param(debug, int, 0644);
  34. #define dprintk(q, level, fmt, arg...) \
  35. do { \
  36. if (debug >= level) \
  37. pr_info("vb2-v4l2: [%p] %s: " fmt, \
  38. (q)->name, __func__, ## arg); \
  39. } while (0)
  40. /* Flags that are set by us */
  41. #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
  42. V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
  43. V4L2_BUF_FLAG_PREPARED | \
  44. V4L2_BUF_FLAG_IN_REQUEST | \
  45. V4L2_BUF_FLAG_REQUEST_FD | \
  46. V4L2_BUF_FLAG_TIMESTAMP_MASK)
  47. /* Output buffer flags that should be passed on to the driver */
  48. #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
  49. V4L2_BUF_FLAG_BFRAME | \
  50. V4L2_BUF_FLAG_KEYFRAME | \
  51. V4L2_BUF_FLAG_TIMECODE | \
  52. V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
  53. /*
  54. * __verify_planes_array() - verify that the planes array passed in struct
  55. * v4l2_buffer from userspace can be safely used
  56. */
  57. static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
  58. {
  59. if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
  60. return 0;
  61. /* Is memory for copying plane information present? */
  62. if (b->m.planes == NULL) {
  63. dprintk(vb->vb2_queue, 1,
  64. "multi-planar buffer passed but planes array not provided\n");
  65. return -EINVAL;
  66. }
  67. if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
  68. dprintk(vb->vb2_queue, 1,
  69. "incorrect planes array length, expected %d, got %d\n",
  70. vb->num_planes, b->length);
  71. return -EINVAL;
  72. }
  73. return 0;
  74. }
  75. static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
  76. {
  77. return __verify_planes_array(vb, pb);
  78. }
  79. /*
  80. * __verify_length() - Verify that the bytesused value for each plane fits in
  81. * the plane length and that the data offset doesn't exceed the bytesused value.
  82. */
  83. static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
  84. {
  85. unsigned int length;
  86. unsigned int bytesused;
  87. unsigned int plane;
  88. if (V4L2_TYPE_IS_CAPTURE(b->type))
  89. return 0;
  90. if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
  91. for (plane = 0; plane < vb->num_planes; ++plane) {
  92. length = (b->memory == VB2_MEMORY_USERPTR ||
  93. b->memory == VB2_MEMORY_DMABUF)
  94. ? b->m.planes[plane].length
  95. : vb->planes[plane].length;
  96. bytesused = b->m.planes[plane].bytesused
  97. ? b->m.planes[plane].bytesused : length;
  98. if (b->m.planes[plane].bytesused > length)
  99. return -EINVAL;
  100. if (b->m.planes[plane].data_offset > 0 &&
  101. b->m.planes[plane].data_offset >= bytesused)
  102. return -EINVAL;
  103. }
  104. } else {
  105. length = (b->memory == VB2_MEMORY_USERPTR)
  106. ? b->length : vb->planes[0].length;
  107. if (b->bytesused > length)
  108. return -EINVAL;
  109. }
  110. return 0;
  111. }
  112. /*
  113. * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
  114. */
  115. static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
  116. {
  117. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  118. vbuf->request_fd = -1;
  119. }
  120. static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
  121. {
  122. const struct v4l2_buffer *b = pb;
  123. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  124. struct vb2_queue *q = vb->vb2_queue;
  125. if (q->is_output) {
  126. /*
  127. * For output buffers copy the timestamp if needed,
  128. * and the timecode field and flag if needed.
  129. */
  130. if (q->copy_timestamp)
  131. vb->timestamp = v4l2_buffer_get_timestamp(b);
  132. vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
  133. if (b->flags & V4L2_BUF_FLAG_TIMECODE)
  134. vbuf->timecode = b->timecode;
  135. }
  136. };
  137. static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
  138. {
  139. static bool check_once;
  140. if (check_once)
  141. return;
  142. check_once = true;
  143. pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
  144. if (vb->vb2_queue->allow_zero_bytesused)
  145. pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
  146. else
  147. pr_warn("use the actual size instead.\n");
  148. }
  149. static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
  150. {
  151. struct vb2_queue *q = vb->vb2_queue;
  152. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  153. struct vb2_plane *planes = vbuf->planes;
  154. unsigned int plane;
  155. int ret;
  156. ret = __verify_length(vb, b);
  157. if (ret < 0) {
  158. dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
  159. return ret;
  160. }
  161. if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
  162. /*
  163. * If the format's field is ALTERNATE, then the buffer's field
  164. * should be either TOP or BOTTOM, not ALTERNATE since that
  165. * makes no sense. The driver has to know whether the
  166. * buffer represents a top or a bottom field in order to
  167. * program any DMA correctly. Using ALTERNATE is wrong, since
  168. * that just says that it is either a top or a bottom field,
  169. * but not which of the two it is.
  170. */
  171. dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
  172. return -EINVAL;
  173. }
  174. vbuf->sequence = 0;
  175. vbuf->request_fd = -1;
  176. vbuf->is_held = false;
  177. if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
  178. switch (b->memory) {
  179. case VB2_MEMORY_USERPTR:
  180. for (plane = 0; plane < vb->num_planes; ++plane) {
  181. planes[plane].m.userptr =
  182. b->m.planes[plane].m.userptr;
  183. planes[plane].length =
  184. b->m.planes[plane].length;
  185. }
  186. break;
  187. case VB2_MEMORY_DMABUF:
  188. for (plane = 0; plane < vb->num_planes; ++plane) {
  189. planes[plane].m.fd =
  190. b->m.planes[plane].m.fd;
  191. planes[plane].length =
  192. b->m.planes[plane].length;
  193. }
  194. break;
  195. default:
  196. for (plane = 0; plane < vb->num_planes; ++plane) {
  197. planes[plane].m.offset =
  198. vb->planes[plane].m.offset;
  199. planes[plane].length =
  200. vb->planes[plane].length;
  201. }
  202. break;
  203. }
  204. /* Fill in driver-provided information for OUTPUT types */
  205. if (V4L2_TYPE_IS_OUTPUT(b->type)) {
  206. /*
  207. * Will have to go up to b->length when API starts
  208. * accepting variable number of planes.
  209. *
  210. * If bytesused == 0 for the output buffer, then fall
  211. * back to the full buffer size. In that case
  212. * userspace clearly never bothered to set it and
  213. * it's a safe assumption that they really meant to
  214. * use the full plane sizes.
  215. *
  216. * Some drivers, e.g. old codec drivers, use bytesused == 0
  217. * as a way to indicate that streaming is finished.
  218. * In that case, the driver should use the
  219. * allow_zero_bytesused flag to keep old userspace
  220. * applications working.
  221. */
  222. for (plane = 0; plane < vb->num_planes; ++plane) {
  223. struct vb2_plane *pdst = &planes[plane];
  224. struct v4l2_plane *psrc = &b->m.planes[plane];
  225. if (psrc->bytesused == 0)
  226. vb2_warn_zero_bytesused(vb);
  227. if (vb->vb2_queue->allow_zero_bytesused)
  228. pdst->bytesused = psrc->bytesused;
  229. else
  230. pdst->bytesused = psrc->bytesused ?
  231. psrc->bytesused : pdst->length;
  232. pdst->data_offset = psrc->data_offset;
  233. }
  234. }
  235. } else {
  236. /*
  237. * Single-planar buffers do not use planes array,
  238. * so fill in relevant v4l2_buffer struct fields instead.
  239. * In vb2 we use our internal V4l2_planes struct for
  240. * single-planar buffers as well, for simplicity.
  241. *
  242. * If bytesused == 0 for the output buffer, then fall back
  243. * to the full buffer size as that's a sensible default.
  244. *
  245. * Some drivers, e.g. old codec drivers, use bytesused == 0 as
  246. * a way to indicate that streaming is finished. In that case,
  247. * the driver should use the allow_zero_bytesused flag to keep
  248. * old userspace applications working.
  249. */
  250. switch (b->memory) {
  251. case VB2_MEMORY_USERPTR:
  252. planes[0].m.userptr = b->m.userptr;
  253. planes[0].length = b->length;
  254. break;
  255. case VB2_MEMORY_DMABUF:
  256. planes[0].m.fd = b->m.fd;
  257. planes[0].length = b->length;
  258. break;
  259. default:
  260. planes[0].m.offset = vb->planes[0].m.offset;
  261. planes[0].length = vb->planes[0].length;
  262. break;
  263. }
  264. planes[0].data_offset = 0;
  265. if (V4L2_TYPE_IS_OUTPUT(b->type)) {
  266. if (b->bytesused == 0)
  267. vb2_warn_zero_bytesused(vb);
  268. if (vb->vb2_queue->allow_zero_bytesused)
  269. planes[0].bytesused = b->bytesused;
  270. else
  271. planes[0].bytesused = b->bytesused ?
  272. b->bytesused : planes[0].length;
  273. } else
  274. planes[0].bytesused = 0;
  275. }
  276. /* Zero flags that we handle */
  277. vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
  278. if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
  279. /*
  280. * Non-COPY timestamps and non-OUTPUT queues will get
  281. * their timestamp and timestamp source flags from the
  282. * queue.
  283. */
  284. vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  285. }
  286. if (V4L2_TYPE_IS_OUTPUT(b->type)) {
  287. /*
  288. * For output buffers mask out the timecode flag:
  289. * this will be handled later in vb2_qbuf().
  290. * The 'field' is valid metadata for this output buffer
  291. * and so that needs to be copied here.
  292. */
  293. vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
  294. vbuf->field = b->field;
  295. if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
  296. vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
  297. } else {
  298. /* Zero any output buffer flags as this is a capture buffer */
  299. vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
  300. /* Zero last flag, this is a signal from driver to userspace */
  301. vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
  302. }
  303. return 0;
  304. }
  305. static void set_buffer_cache_hints(struct vb2_queue *q,
  306. struct vb2_buffer *vb,
  307. struct v4l2_buffer *b)
  308. {
  309. if (!vb2_queue_allows_cache_hints(q)) {
  310. /*
  311. * Clear buffer cache flags if queue does not support user
  312. * space hints. That's to indicate to userspace that these
  313. * flags won't work.
  314. */
  315. b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
  316. b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
  317. return;
  318. }
  319. if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
  320. vb->skip_cache_sync_on_finish = 1;
  321. if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
  322. vb->skip_cache_sync_on_prepare = 1;
  323. }
  324. static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
  325. struct v4l2_buffer *b, bool is_prepare,
  326. struct media_request **p_req)
  327. {
  328. const char *opname = is_prepare ? "prepare_buf" : "qbuf";
  329. struct media_request *req;
  330. struct vb2_v4l2_buffer *vbuf;
  331. struct vb2_buffer *vb;
  332. int ret;
  333. if (b->type != q->type) {
  334. dprintk(q, 1, "%s: invalid buffer type\n", opname);
  335. return -EINVAL;
  336. }
  337. if (b->index >= q->num_buffers) {
  338. dprintk(q, 1, "%s: buffer index out of range\n", opname);
  339. return -EINVAL;
  340. }
  341. if (q->bufs[b->index] == NULL) {
  342. /* Should never happen */
  343. dprintk(q, 1, "%s: buffer is NULL\n", opname);
  344. return -EINVAL;
  345. }
  346. if (b->memory != q->memory) {
  347. dprintk(q, 1, "%s: invalid memory type\n", opname);
  348. return -EINVAL;
  349. }
  350. vb = q->bufs[b->index];
  351. vbuf = to_vb2_v4l2_buffer(vb);
  352. ret = __verify_planes_array(vb, b);
  353. if (ret)
  354. return ret;
  355. if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
  356. vb->state != VB2_BUF_STATE_DEQUEUED) {
  357. dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
  358. return -EINVAL;
  359. }
  360. if (!vb->prepared) {
  361. set_buffer_cache_hints(q, vb, b);
  362. /* Copy relevant information provided by the userspace */
  363. memset(vbuf->planes, 0,
  364. sizeof(vbuf->planes[0]) * vb->num_planes);
  365. ret = vb2_fill_vb2_v4l2_buffer(vb, b);
  366. if (ret)
  367. return ret;
  368. }
  369. if (is_prepare)
  370. return 0;
  371. if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
  372. if (q->requires_requests) {
  373. dprintk(q, 1, "%s: queue requires requests\n", opname);
  374. return -EBADR;
  375. }
  376. if (q->uses_requests) {
  377. dprintk(q, 1, "%s: queue uses requests\n", opname);
  378. return -EBUSY;
  379. }
  380. return 0;
  381. } else if (!q->supports_requests) {
  382. dprintk(q, 1, "%s: queue does not support requests\n", opname);
  383. return -EBADR;
  384. } else if (q->uses_qbuf) {
  385. dprintk(q, 1, "%s: queue does not use requests\n", opname);
  386. return -EBUSY;
  387. }
  388. /*
  389. * For proper locking when queueing a request you need to be able
  390. * to lock access to the vb2 queue, so check that there is a lock
  391. * that we can use. In addition p_req must be non-NULL.
  392. */
  393. if (WARN_ON(!q->lock || !p_req))
  394. return -EINVAL;
  395. /*
  396. * Make sure this op is implemented by the driver. It's easy to forget
  397. * this callback, but is it important when canceling a buffer in a
  398. * queued request.
  399. */
  400. if (WARN_ON(!q->ops->buf_request_complete))
  401. return -EINVAL;
  402. /*
  403. * Make sure this op is implemented by the driver for the output queue.
  404. * It's easy to forget this callback, but is it important to correctly
  405. * validate the 'field' value at QBUF time.
  406. */
  407. if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
  408. q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
  409. !q->ops->buf_out_validate))
  410. return -EINVAL;
  411. req = media_request_get_by_fd(mdev, b->request_fd);
  412. if (IS_ERR(req)) {
  413. dprintk(q, 1, "%s: invalid request_fd\n", opname);
  414. return PTR_ERR(req);
  415. }
  416. /*
  417. * Early sanity check. This is checked again when the buffer
  418. * is bound to the request in vb2_core_qbuf().
  419. */
  420. if (req->state != MEDIA_REQUEST_STATE_IDLE &&
  421. req->state != MEDIA_REQUEST_STATE_UPDATING) {
  422. dprintk(q, 1, "%s: request is not idle\n", opname);
  423. media_request_put(req);
  424. return -EBUSY;
  425. }
  426. *p_req = req;
  427. vbuf->request_fd = b->request_fd;
  428. return 0;
  429. }
  430. /*
  431. * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
  432. * returned to userspace
  433. */
  434. static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
  435. {
  436. struct v4l2_buffer *b = pb;
  437. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  438. struct vb2_queue *q = vb->vb2_queue;
  439. unsigned int plane;
  440. /* Copy back data such as timestamp, flags, etc. */
  441. b->index = vb->index;
  442. b->type = vb->type;
  443. b->memory = vb->memory;
  444. b->bytesused = 0;
  445. b->flags = vbuf->flags;
  446. b->field = vbuf->field;
  447. v4l2_buffer_set_timestamp(b, vb->timestamp);
  448. b->timecode = vbuf->timecode;
  449. b->sequence = vbuf->sequence;
  450. b->reserved2 = 0;
  451. b->request_fd = 0;
  452. if (q->is_multiplanar) {
  453. /*
  454. * Fill in plane-related data if userspace provided an array
  455. * for it. The caller has already verified memory and size.
  456. */
  457. b->length = vb->num_planes;
  458. for (plane = 0; plane < vb->num_planes; ++plane) {
  459. struct v4l2_plane *pdst = &b->m.planes[plane];
  460. struct vb2_plane *psrc = &vb->planes[plane];
  461. pdst->bytesused = psrc->bytesused;
  462. pdst->length = psrc->length;
  463. if (q->memory == VB2_MEMORY_MMAP)
  464. pdst->m.mem_offset = psrc->m.offset;
  465. else if (q->memory == VB2_MEMORY_USERPTR)
  466. pdst->m.userptr = psrc->m.userptr;
  467. else if (q->memory == VB2_MEMORY_DMABUF)
  468. pdst->m.fd = psrc->m.fd;
  469. pdst->data_offset = psrc->data_offset;
  470. memset(pdst->reserved, 0, sizeof(pdst->reserved));
  471. }
  472. } else {
  473. /*
  474. * We use length and offset in v4l2_planes array even for
  475. * single-planar buffers, but userspace does not.
  476. */
  477. b->length = vb->planes[0].length;
  478. b->bytesused = vb->planes[0].bytesused;
  479. if (q->memory == VB2_MEMORY_MMAP)
  480. b->m.offset = vb->planes[0].m.offset;
  481. else if (q->memory == VB2_MEMORY_USERPTR)
  482. b->m.userptr = vb->planes[0].m.userptr;
  483. else if (q->memory == VB2_MEMORY_DMABUF)
  484. b->m.fd = vb->planes[0].m.fd;
  485. }
  486. /*
  487. * Clear any buffer state related flags.
  488. */
  489. b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
  490. b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
  491. if (!q->copy_timestamp) {
  492. /*
  493. * For non-COPY timestamps, drop timestamp source bits
  494. * and obtain the timestamp source from the queue.
  495. */
  496. b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  497. b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  498. }
  499. switch (vb->state) {
  500. case VB2_BUF_STATE_QUEUED:
  501. case VB2_BUF_STATE_ACTIVE:
  502. b->flags |= V4L2_BUF_FLAG_QUEUED;
  503. break;
  504. case VB2_BUF_STATE_IN_REQUEST:
  505. b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
  506. break;
  507. case VB2_BUF_STATE_ERROR:
  508. b->flags |= V4L2_BUF_FLAG_ERROR;
  509. fallthrough;
  510. case VB2_BUF_STATE_DONE:
  511. b->flags |= V4L2_BUF_FLAG_DONE;
  512. break;
  513. case VB2_BUF_STATE_PREPARING:
  514. case VB2_BUF_STATE_DEQUEUED:
  515. /* nothing */
  516. break;
  517. }
  518. if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
  519. vb->state == VB2_BUF_STATE_IN_REQUEST) &&
  520. vb->synced && vb->prepared)
  521. b->flags |= V4L2_BUF_FLAG_PREPARED;
  522. if (vb2_buffer_in_use(q, vb))
  523. b->flags |= V4L2_BUF_FLAG_MAPPED;
  524. if (vbuf->request_fd >= 0) {
  525. b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
  526. b->request_fd = vbuf->request_fd;
  527. }
  528. }
  529. /*
  530. * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
  531. * v4l2_buffer by the userspace. It also verifies that struct
  532. * v4l2_buffer has a valid number of planes.
  533. */
  534. static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
  535. {
  536. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  537. unsigned int plane;
  538. if (!vb->vb2_queue->copy_timestamp)
  539. vb->timestamp = 0;
  540. for (plane = 0; plane < vb->num_planes; ++plane) {
  541. if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
  542. planes[plane].m = vbuf->planes[plane].m;
  543. planes[plane].length = vbuf->planes[plane].length;
  544. }
  545. planes[plane].bytesused = vbuf->planes[plane].bytesused;
  546. planes[plane].data_offset = vbuf->planes[plane].data_offset;
  547. }
  548. return 0;
  549. }
  550. static const struct vb2_buf_ops v4l2_buf_ops = {
  551. .verify_planes_array = __verify_planes_array_core,
  552. .init_buffer = __init_vb2_v4l2_buffer,
  553. .fill_user_buffer = __fill_v4l2_buffer,
  554. .fill_vb2_buffer = __fill_vb2_buffer,
  555. .copy_timestamp = __copy_timestamp,
  556. };
  557. struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
  558. {
  559. unsigned int i;
  560. for (i = 0; i < q->num_buffers; i++)
  561. if (q->bufs[i]->copied_timestamp &&
  562. q->bufs[i]->timestamp == timestamp)
  563. return vb2_get_buffer(q, i);
  564. return NULL;
  565. }
  566. EXPORT_SYMBOL_GPL(vb2_find_buffer);
  567. /*
  568. * vb2_querybuf() - query video buffer information
  569. * @q: vb2 queue
  570. * @b: buffer struct passed from userspace to vidioc_querybuf handler
  571. * in driver
  572. *
  573. * Should be called from vidioc_querybuf ioctl handler in driver.
  574. * This function will verify the passed v4l2_buffer structure and fill the
  575. * relevant information for the userspace.
  576. *
  577. * The return values from this function are intended to be directly returned
  578. * from vidioc_querybuf handler in driver.
  579. */
  580. int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
  581. {
  582. struct vb2_buffer *vb;
  583. int ret;
  584. if (b->type != q->type) {
  585. dprintk(q, 1, "wrong buffer type\n");
  586. return -EINVAL;
  587. }
  588. if (b->index >= q->num_buffers) {
  589. dprintk(q, 1, "buffer index out of range\n");
  590. return -EINVAL;
  591. }
  592. vb = q->bufs[b->index];
  593. ret = __verify_planes_array(vb, b);
  594. if (!ret)
  595. vb2_core_querybuf(q, b->index, b);
  596. return ret;
  597. }
  598. EXPORT_SYMBOL(vb2_querybuf);
  599. static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
  600. {
  601. *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
  602. if (q->io_modes & VB2_MMAP)
  603. *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
  604. if (q->io_modes & VB2_USERPTR)
  605. *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
  606. if (q->io_modes & VB2_DMABUF)
  607. *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
  608. if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
  609. *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
  610. if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
  611. *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
  612. #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
  613. if (q->supports_requests)
  614. *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
  615. #endif
  616. }
  617. static void validate_memory_flags(struct vb2_queue *q,
  618. int memory,
  619. u32 *flags)
  620. {
  621. if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
  622. /*
  623. * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
  624. * but in order to avoid bugs we zero out all bits.
  625. */
  626. *flags = 0;
  627. } else {
  628. /* Clear all unknown flags. */
  629. *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
  630. }
  631. }
  632. int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
  633. {
  634. int ret = vb2_verify_memory_type(q, req->memory, req->type);
  635. u32 flags = req->flags;
  636. fill_buf_caps(q, &req->capabilities);
  637. validate_memory_flags(q, req->memory, &flags);
  638. req->flags = flags;
  639. return ret ? ret : vb2_core_reqbufs(q, req->memory,
  640. req->flags, &req->count);
  641. }
  642. EXPORT_SYMBOL_GPL(vb2_reqbufs);
  643. int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
  644. struct v4l2_buffer *b)
  645. {
  646. int ret;
  647. if (vb2_fileio_is_active(q)) {
  648. dprintk(q, 1, "file io in progress\n");
  649. return -EBUSY;
  650. }
  651. if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
  652. return -EINVAL;
  653. ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
  654. return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
  655. }
  656. EXPORT_SYMBOL_GPL(vb2_prepare_buf);
  657. int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
  658. {
  659. unsigned requested_planes = 1;
  660. unsigned requested_sizes[VIDEO_MAX_PLANES];
  661. struct v4l2_format *f = &create->format;
  662. int ret = vb2_verify_memory_type(q, create->memory, f->type);
  663. unsigned i;
  664. fill_buf_caps(q, &create->capabilities);
  665. validate_memory_flags(q, create->memory, &create->flags);
  666. create->index = q->num_buffers;
  667. if (create->count == 0)
  668. return ret != -EBUSY ? ret : 0;
  669. switch (f->type) {
  670. case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
  671. case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
  672. requested_planes = f->fmt.pix_mp.num_planes;
  673. if (requested_planes == 0 ||
  674. requested_planes > VIDEO_MAX_PLANES)
  675. return -EINVAL;
  676. for (i = 0; i < requested_planes; i++)
  677. requested_sizes[i] =
  678. f->fmt.pix_mp.plane_fmt[i].sizeimage;
  679. break;
  680. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  681. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  682. requested_sizes[0] = f->fmt.pix.sizeimage;
  683. break;
  684. case V4L2_BUF_TYPE_VBI_CAPTURE:
  685. case V4L2_BUF_TYPE_VBI_OUTPUT:
  686. requested_sizes[0] = f->fmt.vbi.samples_per_line *
  687. (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
  688. break;
  689. case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
  690. case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
  691. requested_sizes[0] = f->fmt.sliced.io_size;
  692. break;
  693. case V4L2_BUF_TYPE_SDR_CAPTURE:
  694. case V4L2_BUF_TYPE_SDR_OUTPUT:
  695. requested_sizes[0] = f->fmt.sdr.buffersize;
  696. break;
  697. case V4L2_BUF_TYPE_META_CAPTURE:
  698. case V4L2_BUF_TYPE_META_OUTPUT:
  699. requested_sizes[0] = f->fmt.meta.buffersize;
  700. break;
  701. default:
  702. return -EINVAL;
  703. }
  704. for (i = 0; i < requested_planes; i++)
  705. if (requested_sizes[i] == 0)
  706. return -EINVAL;
  707. return ret ? ret : vb2_core_create_bufs(q, create->memory,
  708. create->flags,
  709. &create->count,
  710. requested_planes,
  711. requested_sizes);
  712. }
  713. EXPORT_SYMBOL_GPL(vb2_create_bufs);
  714. int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
  715. struct v4l2_buffer *b)
  716. {
  717. struct media_request *req = NULL;
  718. int ret;
  719. if (vb2_fileio_is_active(q)) {
  720. dprintk(q, 1, "file io in progress\n");
  721. return -EBUSY;
  722. }
  723. ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
  724. if (ret)
  725. return ret;
  726. ret = vb2_core_qbuf(q, b->index, b, req);
  727. if (req)
  728. media_request_put(req);
  729. return ret;
  730. }
  731. EXPORT_SYMBOL_GPL(vb2_qbuf);
  732. int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
  733. {
  734. int ret;
  735. if (vb2_fileio_is_active(q)) {
  736. dprintk(q, 1, "file io in progress\n");
  737. return -EBUSY;
  738. }
  739. if (b->type != q->type) {
  740. dprintk(q, 1, "invalid buffer type\n");
  741. return -EINVAL;
  742. }
  743. ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
  744. if (!q->is_output &&
  745. b->flags & V4L2_BUF_FLAG_DONE &&
  746. b->flags & V4L2_BUF_FLAG_LAST)
  747. q->last_buffer_dequeued = true;
  748. /*
  749. * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
  750. * cleared.
  751. */
  752. b->flags &= ~V4L2_BUF_FLAG_DONE;
  753. return ret;
  754. }
  755. EXPORT_SYMBOL_GPL(vb2_dqbuf);
  756. int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
  757. {
  758. if (vb2_fileio_is_active(q)) {
  759. dprintk(q, 1, "file io in progress\n");
  760. return -EBUSY;
  761. }
  762. return vb2_core_streamon(q, type);
  763. }
  764. EXPORT_SYMBOL_GPL(vb2_streamon);
  765. int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
  766. {
  767. if (vb2_fileio_is_active(q)) {
  768. dprintk(q, 1, "file io in progress\n");
  769. return -EBUSY;
  770. }
  771. return vb2_core_streamoff(q, type);
  772. }
  773. EXPORT_SYMBOL_GPL(vb2_streamoff);
  774. int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
  775. {
  776. return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
  777. eb->plane, eb->flags);
  778. }
  779. EXPORT_SYMBOL_GPL(vb2_expbuf);
  780. int vb2_queue_init_name(struct vb2_queue *q, const char *name)
  781. {
  782. /*
  783. * Sanity check
  784. */
  785. if (WARN_ON(!q) ||
  786. WARN_ON(q->timestamp_flags &
  787. ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
  788. V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
  789. return -EINVAL;
  790. /* Warn that the driver should choose an appropriate timestamp type */
  791. WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
  792. V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
  793. /* Warn that vb2_memory should match with v4l2_memory */
  794. if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
  795. || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
  796. || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
  797. return -EINVAL;
  798. if (q->buf_struct_size == 0)
  799. q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
  800. q->buf_ops = &v4l2_buf_ops;
  801. q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
  802. q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
  803. q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
  804. == V4L2_BUF_FLAG_TIMESTAMP_COPY;
  805. /*
  806. * For compatibility with vb1: if QBUF hasn't been called yet, then
  807. * return EPOLLERR as well. This only affects capture queues, output
  808. * queues will always initialize waiting_for_buffers to false.
  809. */
  810. q->quirk_poll_must_check_waiting_for_buffers = true;
  811. if (name)
  812. strscpy(q->name, name, sizeof(q->name));
  813. else
  814. q->name[0] = '\0';
  815. return vb2_core_queue_init(q);
  816. }
  817. EXPORT_SYMBOL_GPL(vb2_queue_init_name);
  818. int vb2_queue_init(struct vb2_queue *q)
  819. {
  820. return vb2_queue_init_name(q, NULL);
  821. }
  822. EXPORT_SYMBOL_GPL(vb2_queue_init);
  823. void vb2_queue_release(struct vb2_queue *q)
  824. {
  825. vb2_core_queue_release(q);
  826. }
  827. EXPORT_SYMBOL_GPL(vb2_queue_release);
  828. int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
  829. {
  830. if (type == q->type)
  831. return 0;
  832. if (vb2_is_busy(q))
  833. return -EBUSY;
  834. q->type = type;
  835. return 0;
  836. }
  837. EXPORT_SYMBOL_GPL(vb2_queue_change_type);
  838. __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
  839. {
  840. struct video_device *vfd = video_devdata(file);
  841. __poll_t res;
  842. res = vb2_core_poll(q, file, wait);
  843. if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
  844. struct v4l2_fh *fh = file->private_data;
  845. poll_wait(file, &fh->wait, wait);
  846. if (v4l2_event_pending(fh))
  847. res |= EPOLLPRI;
  848. }
  849. return res;
  850. }
  851. EXPORT_SYMBOL_GPL(vb2_poll);
  852. /*
  853. * The following functions are not part of the vb2 core API, but are helper
  854. * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
  855. * and struct vb2_ops.
  856. * They contain boilerplate code that most if not all drivers have to do
  857. * and so they simplify the driver code.
  858. */
  859. /* vb2 ioctl helpers */
  860. int vb2_ioctl_reqbufs(struct file *file, void *priv,
  861. struct v4l2_requestbuffers *p)
  862. {
  863. struct video_device *vdev = video_devdata(file);
  864. int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
  865. u32 flags = p->flags;
  866. fill_buf_caps(vdev->queue, &p->capabilities);
  867. validate_memory_flags(vdev->queue, p->memory, &flags);
  868. p->flags = flags;
  869. if (res)
  870. return res;
  871. if (vb2_queue_is_busy(vdev->queue, file))
  872. return -EBUSY;
  873. res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
  874. /* If count == 0, then the owner has released all buffers and he
  875. is no longer owner of the queue. Otherwise we have a new owner. */
  876. if (res == 0)
  877. vdev->queue->owner = p->count ? file->private_data : NULL;
  878. return res;
  879. }
  880. EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
  881. int vb2_ioctl_create_bufs(struct file *file, void *priv,
  882. struct v4l2_create_buffers *p)
  883. {
  884. struct video_device *vdev = video_devdata(file);
  885. int res = vb2_verify_memory_type(vdev->queue, p->memory,
  886. p->format.type);
  887. p->index = vdev->queue->num_buffers;
  888. fill_buf_caps(vdev->queue, &p->capabilities);
  889. validate_memory_flags(vdev->queue, p->memory, &p->flags);
  890. /*
  891. * If count == 0, then just check if memory and type are valid.
  892. * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
  893. */
  894. if (p->count == 0)
  895. return res != -EBUSY ? res : 0;
  896. if (res)
  897. return res;
  898. if (vb2_queue_is_busy(vdev->queue, file))
  899. return -EBUSY;
  900. res = vb2_create_bufs(vdev->queue, p);
  901. if (res == 0)
  902. vdev->queue->owner = file->private_data;
  903. return res;
  904. }
  905. EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
  906. int vb2_ioctl_prepare_buf(struct file *file, void *priv,
  907. struct v4l2_buffer *p)
  908. {
  909. struct video_device *vdev = video_devdata(file);
  910. if (vb2_queue_is_busy(vdev->queue, file))
  911. return -EBUSY;
  912. return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
  913. }
  914. EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
  915. int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
  916. {
  917. struct video_device *vdev = video_devdata(file);
  918. /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
  919. return vb2_querybuf(vdev->queue, p);
  920. }
  921. EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
  922. int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  923. {
  924. struct video_device *vdev = video_devdata(file);
  925. if (vb2_queue_is_busy(vdev->queue, file))
  926. return -EBUSY;
  927. return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
  928. }
  929. EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
  930. int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  931. {
  932. struct video_device *vdev = video_devdata(file);
  933. if (vb2_queue_is_busy(vdev->queue, file))
  934. return -EBUSY;
  935. return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
  936. }
  937. EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
  938. int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
  939. {
  940. struct video_device *vdev = video_devdata(file);
  941. if (vb2_queue_is_busy(vdev->queue, file))
  942. return -EBUSY;
  943. return vb2_streamon(vdev->queue, i);
  944. }
  945. EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
  946. int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
  947. {
  948. struct video_device *vdev = video_devdata(file);
  949. if (vb2_queue_is_busy(vdev->queue, file))
  950. return -EBUSY;
  951. return vb2_streamoff(vdev->queue, i);
  952. }
  953. EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
  954. int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
  955. {
  956. struct video_device *vdev = video_devdata(file);
  957. if (vb2_queue_is_busy(vdev->queue, file))
  958. return -EBUSY;
  959. return vb2_expbuf(vdev->queue, p);
  960. }
  961. EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
  962. /* v4l2_file_operations helpers */
  963. int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
  964. {
  965. struct video_device *vdev = video_devdata(file);
  966. return vb2_mmap(vdev->queue, vma);
  967. }
  968. EXPORT_SYMBOL_GPL(vb2_fop_mmap);
  969. int _vb2_fop_release(struct file *file, struct mutex *lock)
  970. {
  971. struct video_device *vdev = video_devdata(file);
  972. if (lock)
  973. mutex_lock(lock);
  974. if (file->private_data == vdev->queue->owner) {
  975. vb2_queue_release(vdev->queue);
  976. vdev->queue->owner = NULL;
  977. }
  978. if (lock)
  979. mutex_unlock(lock);
  980. return v4l2_fh_release(file);
  981. }
  982. EXPORT_SYMBOL_GPL(_vb2_fop_release);
  983. int vb2_fop_release(struct file *file)
  984. {
  985. struct video_device *vdev = video_devdata(file);
  986. struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
  987. return _vb2_fop_release(file, lock);
  988. }
  989. EXPORT_SYMBOL_GPL(vb2_fop_release);
  990. ssize_t vb2_fop_write(struct file *file, const char __user *buf,
  991. size_t count, loff_t *ppos)
  992. {
  993. struct video_device *vdev = video_devdata(file);
  994. struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
  995. int err = -EBUSY;
  996. if (!(vdev->queue->io_modes & VB2_WRITE))
  997. return -EINVAL;
  998. if (lock && mutex_lock_interruptible(lock))
  999. return -ERESTARTSYS;
  1000. if (vb2_queue_is_busy(vdev->queue, file))
  1001. goto exit;
  1002. err = vb2_write(vdev->queue, buf, count, ppos,
  1003. file->f_flags & O_NONBLOCK);
  1004. if (vdev->queue->fileio)
  1005. vdev->queue->owner = file->private_data;
  1006. exit:
  1007. if (lock)
  1008. mutex_unlock(lock);
  1009. return err;
  1010. }
  1011. EXPORT_SYMBOL_GPL(vb2_fop_write);
  1012. ssize_t vb2_fop_read(struct file *file, char __user *buf,
  1013. size_t count, loff_t *ppos)
  1014. {
  1015. struct video_device *vdev = video_devdata(file);
  1016. struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
  1017. int err = -EBUSY;
  1018. if (!(vdev->queue->io_modes & VB2_READ))
  1019. return -EINVAL;
  1020. if (lock && mutex_lock_interruptible(lock))
  1021. return -ERESTARTSYS;
  1022. if (vb2_queue_is_busy(vdev->queue, file))
  1023. goto exit;
  1024. err = vb2_read(vdev->queue, buf, count, ppos,
  1025. file->f_flags & O_NONBLOCK);
  1026. if (vdev->queue->fileio)
  1027. vdev->queue->owner = file->private_data;
  1028. exit:
  1029. if (lock)
  1030. mutex_unlock(lock);
  1031. return err;
  1032. }
  1033. EXPORT_SYMBOL_GPL(vb2_fop_read);
  1034. __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
  1035. {
  1036. struct video_device *vdev = video_devdata(file);
  1037. struct vb2_queue *q = vdev->queue;
  1038. struct mutex *lock = q->lock ? q->lock : vdev->lock;
  1039. __poll_t res;
  1040. void *fileio;
  1041. /*
  1042. * If this helper doesn't know how to lock, then you shouldn't be using
  1043. * it but you should write your own.
  1044. */
  1045. WARN_ON(!lock);
  1046. if (lock && mutex_lock_interruptible(lock))
  1047. return EPOLLERR;
  1048. fileio = q->fileio;
  1049. res = vb2_poll(vdev->queue, file, wait);
  1050. /* If fileio was started, then we have a new queue owner. */
  1051. if (!fileio && q->fileio)
  1052. q->owner = file->private_data;
  1053. if (lock)
  1054. mutex_unlock(lock);
  1055. return res;
  1056. }
  1057. EXPORT_SYMBOL_GPL(vb2_fop_poll);
  1058. #ifndef CONFIG_MMU
  1059. unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
  1060. unsigned long len, unsigned long pgoff, unsigned long flags)
  1061. {
  1062. struct video_device *vdev = video_devdata(file);
  1063. return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
  1064. }
  1065. EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
  1066. #endif
  1067. void vb2_video_unregister_device(struct video_device *vdev)
  1068. {
  1069. /* Check if vdev was ever registered at all */
  1070. if (!vdev || !video_is_registered(vdev))
  1071. return;
  1072. /*
  1073. * Calling this function only makes sense if vdev->queue is set.
  1074. * If it is NULL, then just call video_unregister_device() instead.
  1075. */
  1076. WARN_ON(!vdev->queue);
  1077. /*
  1078. * Take a reference to the device since video_unregister_device()
  1079. * calls device_unregister(), but we don't want that to release
  1080. * the device since we want to clean up the queue first.
  1081. */
  1082. get_device(&vdev->dev);
  1083. video_unregister_device(vdev);
  1084. if (vdev->queue && vdev->queue->owner) {
  1085. struct mutex *lock = vdev->queue->lock ?
  1086. vdev->queue->lock : vdev->lock;
  1087. if (lock)
  1088. mutex_lock(lock);
  1089. vb2_queue_release(vdev->queue);
  1090. vdev->queue->owner = NULL;
  1091. if (lock)
  1092. mutex_unlock(lock);
  1093. }
  1094. /*
  1095. * Now we put the device, and in most cases this will release
  1096. * everything.
  1097. */
  1098. put_device(&vdev->dev);
  1099. }
  1100. EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
  1101. /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
  1102. void vb2_ops_wait_prepare(struct vb2_queue *vq)
  1103. {
  1104. mutex_unlock(vq->lock);
  1105. }
  1106. EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
  1107. void vb2_ops_wait_finish(struct vb2_queue *vq)
  1108. {
  1109. mutex_lock(vq->lock);
  1110. }
  1111. EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
  1112. /*
  1113. * Note that this function is called during validation time and
  1114. * thus the req_queue_mutex is held to ensure no request objects
  1115. * can be added or deleted while validating. So there is no need
  1116. * to protect the objects list.
  1117. */
  1118. int vb2_request_validate(struct media_request *req)
  1119. {
  1120. struct media_request_object *obj;
  1121. int ret = 0;
  1122. if (!vb2_request_buffer_cnt(req))
  1123. return -ENOENT;
  1124. list_for_each_entry(obj, &req->objects, list) {
  1125. if (!obj->ops->prepare)
  1126. continue;
  1127. ret = obj->ops->prepare(obj);
  1128. if (ret)
  1129. break;
  1130. }
  1131. if (ret) {
  1132. list_for_each_entry_continue_reverse(obj, &req->objects, list)
  1133. if (obj->ops->unprepare)
  1134. obj->ops->unprepare(obj);
  1135. return ret;
  1136. }
  1137. return 0;
  1138. }
  1139. EXPORT_SYMBOL_GPL(vb2_request_validate);
  1140. void vb2_request_queue(struct media_request *req)
  1141. {
  1142. struct media_request_object *obj, *obj_safe;
  1143. /*
  1144. * Queue all objects. Note that buffer objects are at the end of the
  1145. * objects list, after all other object types. Once buffer objects
  1146. * are queued, the driver might delete them immediately (if the driver
  1147. * processes the buffer at once), so we have to use
  1148. * list_for_each_entry_safe() to handle the case where the object we
  1149. * queue is deleted.
  1150. */
  1151. list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
  1152. if (obj->ops->queue)
  1153. obj->ops->queue(obj);
  1154. }
  1155. EXPORT_SYMBOL_GPL(vb2_request_queue);
  1156. MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
  1157. MODULE_AUTHOR("Pawel Osciak <[email protected]>, Marek Szyprowski");
  1158. MODULE_LICENSE("GPL");