xilinx-dma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx Video DMA
  4. *
  5. * Copyright (C) 2013-2015 Ideas on Board
  6. * Copyright (C) 2013-2015 Xilinx, Inc.
  7. *
  8. * Contacts: Hyun Kwon <[email protected]>
  9. * Laurent Pinchart <[email protected]>
  10. */
  11. #include <linux/dma/xilinx_dma.h>
  12. #include <linux/lcm.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/slab.h>
  17. #include <media/v4l2-dev.h>
  18. #include <media/v4l2-fh.h>
  19. #include <media/v4l2-ioctl.h>
  20. #include <media/videobuf2-v4l2.h>
  21. #include <media/videobuf2-dma-contig.h>
  22. #include "xilinx-dma.h"
  23. #include "xilinx-vip.h"
  24. #include "xilinx-vipp.h"
  25. #define XVIP_DMA_DEF_WIDTH 1920
  26. #define XVIP_DMA_DEF_HEIGHT 1080
  27. /* Minimum and maximum widths are expressed in bytes */
  28. #define XVIP_DMA_MIN_WIDTH 1U
  29. #define XVIP_DMA_MAX_WIDTH 65535U
  30. #define XVIP_DMA_MIN_HEIGHT 1U
  31. #define XVIP_DMA_MAX_HEIGHT 8191U
  32. /* -----------------------------------------------------------------------------
  33. * Helper functions
  34. */
  35. static struct v4l2_subdev *
  36. xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
  37. {
  38. struct media_pad *remote;
  39. remote = media_pad_remote_pad_first(local);
  40. if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  41. return NULL;
  42. if (pad)
  43. *pad = remote->index;
  44. return media_entity_to_v4l2_subdev(remote->entity);
  45. }
  46. static int xvip_dma_verify_format(struct xvip_dma *dma)
  47. {
  48. struct v4l2_subdev_format fmt;
  49. struct v4l2_subdev *subdev;
  50. int ret;
  51. subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
  52. if (subdev == NULL)
  53. return -EPIPE;
  54. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  55. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  56. if (ret < 0)
  57. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  58. if (dma->fmtinfo->code != fmt.format.code ||
  59. dma->format.height != fmt.format.height ||
  60. dma->format.width != fmt.format.width ||
  61. dma->format.colorspace != fmt.format.colorspace)
  62. return -EINVAL;
  63. return 0;
  64. }
  65. /* -----------------------------------------------------------------------------
  66. * Pipeline Stream Management
  67. */
  68. /**
  69. * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
  70. * @pipe: The pipeline
  71. * @start: Start (when true) or stop (when false) the pipeline
  72. *
  73. * Walk the entities chain starting at the pipeline output video node and start
  74. * or stop all of them.
  75. *
  76. * Return: 0 if successful, or the return value of the failed video::s_stream
  77. * operation otherwise.
  78. */
  79. static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
  80. {
  81. struct xvip_dma *dma = pipe->output;
  82. struct media_entity *entity;
  83. struct media_pad *pad;
  84. struct v4l2_subdev *subdev;
  85. int ret;
  86. entity = &dma->video.entity;
  87. while (1) {
  88. pad = &entity->pads[0];
  89. if (!(pad->flags & MEDIA_PAD_FL_SINK))
  90. break;
  91. pad = media_pad_remote_pad_first(pad);
  92. if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
  93. break;
  94. entity = pad->entity;
  95. subdev = media_entity_to_v4l2_subdev(entity);
  96. ret = v4l2_subdev_call(subdev, video, s_stream, start);
  97. if (start && ret < 0 && ret != -ENOIOCTLCMD)
  98. return ret;
  99. }
  100. return 0;
  101. }
  102. /**
  103. * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
  104. * @pipe: The pipeline
  105. * @on: Turn the stream on when true or off when false
  106. *
  107. * The pipeline is shared between all DMA engines connect at its input and
  108. * output. While the stream state of DMA engines can be controlled
  109. * independently, pipelines have a shared stream state that enable or disable
  110. * all entities in the pipeline. For this reason the pipeline uses a streaming
  111. * counter that tracks the number of DMA engines that have requested the stream
  112. * to be enabled.
  113. *
  114. * When called with the @on argument set to true, this function will increment
  115. * the pipeline streaming count. If the streaming count reaches the number of
  116. * DMA engines in the pipeline it will enable all entities that belong to the
  117. * pipeline.
  118. *
  119. * Similarly, when called with the @on argument set to false, this function will
  120. * decrement the pipeline streaming count and disable all entities in the
  121. * pipeline when the streaming count reaches zero.
  122. *
  123. * Return: 0 if successful, or the return value of the failed video::s_stream
  124. * operation otherwise. Stopping the pipeline never fails. The pipeline state is
  125. * not updated when the operation fails.
  126. */
  127. static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
  128. {
  129. int ret = 0;
  130. mutex_lock(&pipe->lock);
  131. if (on) {
  132. if (pipe->stream_count == pipe->num_dmas - 1) {
  133. ret = xvip_pipeline_start_stop(pipe, true);
  134. if (ret < 0)
  135. goto done;
  136. }
  137. pipe->stream_count++;
  138. } else {
  139. if (--pipe->stream_count == 0)
  140. xvip_pipeline_start_stop(pipe, false);
  141. }
  142. done:
  143. mutex_unlock(&pipe->lock);
  144. return ret;
  145. }
  146. static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
  147. struct xvip_dma *start)
  148. {
  149. struct media_graph graph;
  150. struct media_entity *entity = &start->video.entity;
  151. struct media_device *mdev = entity->graph_obj.mdev;
  152. unsigned int num_inputs = 0;
  153. unsigned int num_outputs = 0;
  154. int ret;
  155. mutex_lock(&mdev->graph_mutex);
  156. /* Walk the graph to locate the video nodes. */
  157. ret = media_graph_walk_init(&graph, mdev);
  158. if (ret) {
  159. mutex_unlock(&mdev->graph_mutex);
  160. return ret;
  161. }
  162. media_graph_walk_start(&graph, entity);
  163. while ((entity = media_graph_walk_next(&graph))) {
  164. struct xvip_dma *dma;
  165. if (entity->function != MEDIA_ENT_F_IO_V4L)
  166. continue;
  167. dma = to_xvip_dma(media_entity_to_video_device(entity));
  168. if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
  169. pipe->output = dma;
  170. num_outputs++;
  171. } else {
  172. num_inputs++;
  173. }
  174. }
  175. mutex_unlock(&mdev->graph_mutex);
  176. media_graph_walk_cleanup(&graph);
  177. /* We need exactly one output and zero or one input. */
  178. if (num_outputs != 1 || num_inputs > 1)
  179. return -EPIPE;
  180. pipe->num_dmas = num_inputs + num_outputs;
  181. return 0;
  182. }
  183. static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
  184. {
  185. pipe->num_dmas = 0;
  186. pipe->output = NULL;
  187. }
  188. /**
  189. * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
  190. * @pipe: the pipeline
  191. *
  192. * Decrease the pipeline use count and clean it up if we were the last user.
  193. */
  194. static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
  195. {
  196. mutex_lock(&pipe->lock);
  197. /* If we're the last user clean up the pipeline. */
  198. if (--pipe->use_count == 0)
  199. __xvip_pipeline_cleanup(pipe);
  200. mutex_unlock(&pipe->lock);
  201. }
  202. /**
  203. * xvip_pipeline_prepare - Prepare the pipeline for streaming
  204. * @pipe: the pipeline
  205. * @dma: DMA engine at one end of the pipeline
  206. *
  207. * Validate the pipeline if no user exists yet, otherwise just increase the use
  208. * count.
  209. *
  210. * Return: 0 if successful or -EPIPE if the pipeline is not valid.
  211. */
  212. static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
  213. struct xvip_dma *dma)
  214. {
  215. int ret;
  216. mutex_lock(&pipe->lock);
  217. /* If we're the first user validate and initialize the pipeline. */
  218. if (pipe->use_count == 0) {
  219. ret = xvip_pipeline_validate(pipe, dma);
  220. if (ret < 0) {
  221. __xvip_pipeline_cleanup(pipe);
  222. goto done;
  223. }
  224. }
  225. pipe->use_count++;
  226. ret = 0;
  227. done:
  228. mutex_unlock(&pipe->lock);
  229. return ret;
  230. }
  231. /* -----------------------------------------------------------------------------
  232. * videobuf2 queue operations
  233. */
  234. /**
  235. * struct xvip_dma_buffer - Video DMA buffer
  236. * @buf: vb2 buffer base object
  237. * @queue: buffer list entry in the DMA engine queued buffers list
  238. * @dma: DMA channel that uses the buffer
  239. */
  240. struct xvip_dma_buffer {
  241. struct vb2_v4l2_buffer buf;
  242. struct list_head queue;
  243. struct xvip_dma *dma;
  244. };
  245. #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
  246. static void xvip_dma_complete(void *param)
  247. {
  248. struct xvip_dma_buffer *buf = param;
  249. struct xvip_dma *dma = buf->dma;
  250. spin_lock(&dma->queued_lock);
  251. list_del(&buf->queue);
  252. spin_unlock(&dma->queued_lock);
  253. buf->buf.field = V4L2_FIELD_NONE;
  254. buf->buf.sequence = dma->sequence++;
  255. buf->buf.vb2_buf.timestamp = ktime_get_ns();
  256. vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
  257. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
  258. }
  259. static int
  260. xvip_dma_queue_setup(struct vb2_queue *vq,
  261. unsigned int *nbuffers, unsigned int *nplanes,
  262. unsigned int sizes[], struct device *alloc_devs[])
  263. {
  264. struct xvip_dma *dma = vb2_get_drv_priv(vq);
  265. /* Make sure the image size is large enough. */
  266. if (*nplanes)
  267. return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
  268. *nplanes = 1;
  269. sizes[0] = dma->format.sizeimage;
  270. return 0;
  271. }
  272. static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
  273. {
  274. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  275. struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
  276. struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
  277. buf->dma = dma;
  278. return 0;
  279. }
  280. static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
  281. {
  282. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  283. struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
  284. struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
  285. struct dma_async_tx_descriptor *desc;
  286. dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
  287. u32 flags;
  288. if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  289. flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  290. dma->xt.dir = DMA_DEV_TO_MEM;
  291. dma->xt.src_sgl = false;
  292. dma->xt.dst_sgl = true;
  293. dma->xt.dst_start = addr;
  294. } else {
  295. flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  296. dma->xt.dir = DMA_MEM_TO_DEV;
  297. dma->xt.src_sgl = true;
  298. dma->xt.dst_sgl = false;
  299. dma->xt.src_start = addr;
  300. }
  301. dma->xt.frame_size = 1;
  302. dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
  303. dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
  304. dma->xt.numf = dma->format.height;
  305. desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
  306. if (!desc) {
  307. dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
  308. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  309. return;
  310. }
  311. desc->callback = xvip_dma_complete;
  312. desc->callback_param = buf;
  313. spin_lock_irq(&dma->queued_lock);
  314. list_add_tail(&buf->queue, &dma->queued_bufs);
  315. spin_unlock_irq(&dma->queued_lock);
  316. dmaengine_submit(desc);
  317. if (vb2_is_streaming(&dma->queue))
  318. dma_async_issue_pending(dma->dma);
  319. }
  320. static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
  321. {
  322. struct xvip_dma *dma = vb2_get_drv_priv(vq);
  323. struct xvip_dma_buffer *buf, *nbuf;
  324. struct xvip_pipeline *pipe;
  325. int ret;
  326. dma->sequence = 0;
  327. /*
  328. * Start streaming on the pipeline. No link touching an entity in the
  329. * pipeline can be activated or deactivated once streaming is started.
  330. *
  331. * Use the pipeline object embedded in the first DMA object that starts
  332. * streaming.
  333. */
  334. pipe = to_xvip_pipeline(&dma->video) ? : &dma->pipe;
  335. ret = video_device_pipeline_start(&dma->video, &pipe->pipe);
  336. if (ret < 0)
  337. goto error;
  338. /* Verify that the configured format matches the output of the
  339. * connected subdev.
  340. */
  341. ret = xvip_dma_verify_format(dma);
  342. if (ret < 0)
  343. goto error_stop;
  344. ret = xvip_pipeline_prepare(pipe, dma);
  345. if (ret < 0)
  346. goto error_stop;
  347. /* Start the DMA engine. This must be done before starting the blocks
  348. * in the pipeline to avoid DMA synchronization issues.
  349. */
  350. dma_async_issue_pending(dma->dma);
  351. /* Start the pipeline. */
  352. xvip_pipeline_set_stream(pipe, true);
  353. return 0;
  354. error_stop:
  355. video_device_pipeline_stop(&dma->video);
  356. error:
  357. /* Give back all queued buffers to videobuf2. */
  358. spin_lock_irq(&dma->queued_lock);
  359. list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
  360. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
  361. list_del(&buf->queue);
  362. }
  363. spin_unlock_irq(&dma->queued_lock);
  364. return ret;
  365. }
  366. static void xvip_dma_stop_streaming(struct vb2_queue *vq)
  367. {
  368. struct xvip_dma *dma = vb2_get_drv_priv(vq);
  369. struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video);
  370. struct xvip_dma_buffer *buf, *nbuf;
  371. /* Stop the pipeline. */
  372. xvip_pipeline_set_stream(pipe, false);
  373. /* Stop and reset the DMA engine. */
  374. dmaengine_terminate_all(dma->dma);
  375. /* Cleanup the pipeline and mark it as being stopped. */
  376. xvip_pipeline_cleanup(pipe);
  377. video_device_pipeline_stop(&dma->video);
  378. /* Give back all queued buffers to videobuf2. */
  379. spin_lock_irq(&dma->queued_lock);
  380. list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
  381. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  382. list_del(&buf->queue);
  383. }
  384. spin_unlock_irq(&dma->queued_lock);
  385. }
  386. static const struct vb2_ops xvip_dma_queue_qops = {
  387. .queue_setup = xvip_dma_queue_setup,
  388. .buf_prepare = xvip_dma_buffer_prepare,
  389. .buf_queue = xvip_dma_buffer_queue,
  390. .wait_prepare = vb2_ops_wait_prepare,
  391. .wait_finish = vb2_ops_wait_finish,
  392. .start_streaming = xvip_dma_start_streaming,
  393. .stop_streaming = xvip_dma_stop_streaming,
  394. };
  395. /* -----------------------------------------------------------------------------
  396. * V4L2 ioctls
  397. */
  398. static int
  399. xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  400. {
  401. struct v4l2_fh *vfh = file->private_data;
  402. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  403. cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
  404. V4L2_CAP_DEVICE_CAPS;
  405. strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
  406. strscpy(cap->card, dma->video.name, sizeof(cap->card));
  407. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%pOFn:%u",
  408. dma->xdev->dev->of_node, dma->port);
  409. return 0;
  410. }
  411. /* FIXME: without this callback function, some applications are not configured
  412. * with correct formats, and it results in frames in wrong format. Whether this
  413. * callback needs to be required is not clearly defined, so it should be
  414. * clarified through the mailing list.
  415. */
  416. static int
  417. xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
  418. {
  419. struct v4l2_fh *vfh = file->private_data;
  420. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  421. if (f->index > 0)
  422. return -EINVAL;
  423. f->pixelformat = dma->format.pixelformat;
  424. return 0;
  425. }
  426. static int
  427. xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
  428. {
  429. struct v4l2_fh *vfh = file->private_data;
  430. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  431. format->fmt.pix = dma->format;
  432. return 0;
  433. }
  434. static void
  435. __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
  436. const struct xvip_video_format **fmtinfo)
  437. {
  438. const struct xvip_video_format *info;
  439. unsigned int min_width;
  440. unsigned int max_width;
  441. unsigned int min_bpl;
  442. unsigned int max_bpl;
  443. unsigned int width;
  444. unsigned int align;
  445. unsigned int bpl;
  446. /* Retrieve format information and select the default format if the
  447. * requested format isn't supported.
  448. */
  449. info = xvip_get_format_by_fourcc(pix->pixelformat);
  450. pix->pixelformat = info->fourcc;
  451. pix->field = V4L2_FIELD_NONE;
  452. /* The transfer alignment requirements are expressed in bytes. Compute
  453. * the minimum and maximum values, clamp the requested width and convert
  454. * it back to pixels.
  455. */
  456. align = lcm(dma->align, info->bpp);
  457. min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
  458. max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
  459. width = rounddown(pix->width * info->bpp, align);
  460. pix->width = clamp(width, min_width, max_width) / info->bpp;
  461. pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
  462. XVIP_DMA_MAX_HEIGHT);
  463. /* Clamp the requested bytes per line value. If the maximum bytes per
  464. * line value is zero, the module doesn't support user configurable line
  465. * sizes. Override the requested value with the minimum in that case.
  466. */
  467. min_bpl = pix->width * info->bpp;
  468. max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
  469. bpl = rounddown(pix->bytesperline, dma->align);
  470. pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
  471. pix->sizeimage = pix->bytesperline * pix->height;
  472. if (fmtinfo)
  473. *fmtinfo = info;
  474. }
  475. static int
  476. xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
  477. {
  478. struct v4l2_fh *vfh = file->private_data;
  479. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  480. __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
  481. return 0;
  482. }
  483. static int
  484. xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
  485. {
  486. struct v4l2_fh *vfh = file->private_data;
  487. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  488. const struct xvip_video_format *info;
  489. __xvip_dma_try_format(dma, &format->fmt.pix, &info);
  490. if (vb2_is_busy(&dma->queue))
  491. return -EBUSY;
  492. dma->format = format->fmt.pix;
  493. dma->fmtinfo = info;
  494. return 0;
  495. }
  496. static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
  497. .vidioc_querycap = xvip_dma_querycap,
  498. .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
  499. .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
  500. .vidioc_g_fmt_vid_out = xvip_dma_get_format,
  501. .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
  502. .vidioc_s_fmt_vid_out = xvip_dma_set_format,
  503. .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
  504. .vidioc_try_fmt_vid_out = xvip_dma_try_format,
  505. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  506. .vidioc_querybuf = vb2_ioctl_querybuf,
  507. .vidioc_qbuf = vb2_ioctl_qbuf,
  508. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  509. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  510. .vidioc_expbuf = vb2_ioctl_expbuf,
  511. .vidioc_streamon = vb2_ioctl_streamon,
  512. .vidioc_streamoff = vb2_ioctl_streamoff,
  513. };
  514. /* -----------------------------------------------------------------------------
  515. * V4L2 file operations
  516. */
  517. static const struct v4l2_file_operations xvip_dma_fops = {
  518. .owner = THIS_MODULE,
  519. .unlocked_ioctl = video_ioctl2,
  520. .open = v4l2_fh_open,
  521. .release = vb2_fop_release,
  522. .poll = vb2_fop_poll,
  523. .mmap = vb2_fop_mmap,
  524. };
  525. /* -----------------------------------------------------------------------------
  526. * Xilinx Video DMA Core
  527. */
  528. int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
  529. enum v4l2_buf_type type, unsigned int port)
  530. {
  531. char name[16];
  532. int ret;
  533. dma->xdev = xdev;
  534. dma->port = port;
  535. mutex_init(&dma->lock);
  536. mutex_init(&dma->pipe.lock);
  537. INIT_LIST_HEAD(&dma->queued_bufs);
  538. spin_lock_init(&dma->queued_lock);
  539. dma->fmtinfo = xvip_get_format_by_fourcc(V4L2_PIX_FMT_YUYV);
  540. dma->format.pixelformat = dma->fmtinfo->fourcc;
  541. dma->format.colorspace = V4L2_COLORSPACE_SRGB;
  542. dma->format.field = V4L2_FIELD_NONE;
  543. dma->format.width = XVIP_DMA_DEF_WIDTH;
  544. dma->format.height = XVIP_DMA_DEF_HEIGHT;
  545. dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
  546. dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
  547. /* Initialize the media entity... */
  548. dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  549. ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
  550. ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
  551. if (ret < 0)
  552. goto error;
  553. /* ... and the video node... */
  554. dma->video.fops = &xvip_dma_fops;
  555. dma->video.v4l2_dev = &xdev->v4l2_dev;
  556. dma->video.queue = &dma->queue;
  557. snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
  558. xdev->dev->of_node,
  559. type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
  560. port);
  561. dma->video.vfl_type = VFL_TYPE_VIDEO;
  562. dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  563. ? VFL_DIR_RX : VFL_DIR_TX;
  564. dma->video.release = video_device_release_empty;
  565. dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
  566. dma->video.lock = &dma->lock;
  567. dma->video.device_caps = V4L2_CAP_STREAMING;
  568. if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  569. dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  570. else
  571. dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
  572. video_set_drvdata(&dma->video, dma);
  573. /* ... and the buffers queue... */
  574. /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
  575. * V4L2 APIs would be inefficient. Testing on the command line with a
  576. * 'cat /dev/video?' thus won't be possible, but given that the driver
  577. * anyway requires a test tool to setup the pipeline before any video
  578. * stream can be started, requiring a specific V4L2 test tool as well
  579. * instead of 'cat' isn't really a drawback.
  580. */
  581. dma->queue.type = type;
  582. dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  583. dma->queue.lock = &dma->lock;
  584. dma->queue.drv_priv = dma;
  585. dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
  586. dma->queue.ops = &xvip_dma_queue_qops;
  587. dma->queue.mem_ops = &vb2_dma_contig_memops;
  588. dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
  589. | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
  590. dma->queue.dev = dma->xdev->dev;
  591. ret = vb2_queue_init(&dma->queue);
  592. if (ret < 0) {
  593. dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
  594. goto error;
  595. }
  596. /* ... and the DMA channel. */
  597. snprintf(name, sizeof(name), "port%u", port);
  598. dma->dma = dma_request_chan(dma->xdev->dev, name);
  599. if (IS_ERR(dma->dma)) {
  600. ret = PTR_ERR(dma->dma);
  601. if (ret != -EPROBE_DEFER)
  602. dev_err(dma->xdev->dev, "no VDMA channel found\n");
  603. goto error;
  604. }
  605. dma->align = 1 << dma->dma->device->copy_align;
  606. ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
  607. if (ret < 0) {
  608. dev_err(dma->xdev->dev, "failed to register video device\n");
  609. goto error;
  610. }
  611. return 0;
  612. error:
  613. xvip_dma_cleanup(dma);
  614. return ret;
  615. }
  616. void xvip_dma_cleanup(struct xvip_dma *dma)
  617. {
  618. if (video_is_registered(&dma->video))
  619. video_unregister_device(&dma->video);
  620. if (!IS_ERR_OR_NULL(dma->dma))
  621. dma_release_channel(dma->dma);
  622. media_entity_cleanup(&dma->video.entity);
  623. mutex_destroy(&dma->lock);
  624. mutex_destroy(&dma->pipe.lock);
  625. }