cx23885-vbi.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for the Conexant CX23885 PCIe bridge
  4. *
  5. * Copyright (c) 2007 Steven Toth <[email protected]>
  6. */
  7. #include "cx23885.h"
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/moduleparam.h>
  11. #include <linux/init.h>
  12. static unsigned int vbibufs = 4;
  13. module_param(vbibufs, int, 0644);
  14. MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
  15. static unsigned int vbi_debug;
  16. module_param(vbi_debug, int, 0644);
  17. MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
  18. #define dprintk(level, fmt, arg...)\
  19. do { if (vbi_debug >= level)\
  20. printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
  21. __func__, ##arg); \
  22. } while (0)
  23. /* ------------------------------------------------------------------ */
  24. #define VBI_LINE_LENGTH 1440
  25. #define VBI_NTSC_LINE_COUNT 12
  26. #define VBI_PAL_LINE_COUNT 18
  27. int cx23885_vbi_fmt(struct file *file, void *priv,
  28. struct v4l2_format *f)
  29. {
  30. struct cx23885_dev *dev = video_drvdata(file);
  31. f->fmt.vbi.sampling_rate = 27000000;
  32. f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
  33. f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
  34. f->fmt.vbi.offset = 0;
  35. f->fmt.vbi.flags = 0;
  36. if (dev->tvnorm & V4L2_STD_525_60) {
  37. /* ntsc */
  38. f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9;
  39. f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9;
  40. f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT;
  41. f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT;
  42. } else if (dev->tvnorm & V4L2_STD_625_50) {
  43. /* pal */
  44. f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
  45. f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
  46. f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT;
  47. f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT;
  48. }
  49. return 0;
  50. }
  51. /* We're given the Video Interrupt status register.
  52. * The cx23885_video_irq() func has already validated
  53. * the potential error bits, we just need to
  54. * deal with vbi payload and return indication if
  55. * we actually processed any payload.
  56. */
  57. int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status)
  58. {
  59. u32 count;
  60. int handled = 0;
  61. if (status & VID_BC_MSK_VBI_RISCI1) {
  62. dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__);
  63. spin_lock(&dev->slock);
  64. count = cx_read(VBI_A_GPCNT);
  65. cx23885_video_wakeup(dev, &dev->vbiq, count);
  66. spin_unlock(&dev->slock);
  67. handled++;
  68. }
  69. return handled;
  70. }
  71. static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
  72. struct cx23885_dmaqueue *q,
  73. struct cx23885_buffer *buf)
  74. {
  75. dprintk(1, "%s()\n", __func__);
  76. /* setup fifo + format */
  77. cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02],
  78. VBI_LINE_LENGTH, buf->risc.dma);
  79. /* reset counter */
  80. cx_write(VID_A_VBI_CTRL, 3);
  81. cx_write(VBI_A_GPCNT_CTL, 3);
  82. q->count = 0;
  83. /* enable irq */
  84. cx23885_irq_add_enable(dev, 0x01);
  85. cx_set(VID_A_INT_MSK, 0x000022);
  86. /* start dma */
  87. cx_set(DEV_CNTRL2, (1<<5));
  88. cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
  89. return 0;
  90. }
  91. /* ------------------------------------------------------------------ */
  92. static int queue_setup(struct vb2_queue *q,
  93. unsigned int *num_buffers, unsigned int *num_planes,
  94. unsigned int sizes[], struct device *alloc_devs[])
  95. {
  96. struct cx23885_dev *dev = q->drv_priv;
  97. unsigned lines = VBI_PAL_LINE_COUNT;
  98. if (dev->tvnorm & V4L2_STD_525_60)
  99. lines = VBI_NTSC_LINE_COUNT;
  100. *num_planes = 1;
  101. sizes[0] = lines * VBI_LINE_LENGTH * 2;
  102. return 0;
  103. }
  104. static int buffer_prepare(struct vb2_buffer *vb)
  105. {
  106. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  107. struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
  108. struct cx23885_buffer *buf = container_of(vbuf,
  109. struct cx23885_buffer, vb);
  110. struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
  111. unsigned lines = VBI_PAL_LINE_COUNT;
  112. if (dev->tvnorm & V4L2_STD_525_60)
  113. lines = VBI_NTSC_LINE_COUNT;
  114. if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2)
  115. return -EINVAL;
  116. vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
  117. cx23885_risc_vbibuffer(dev->pci, &buf->risc,
  118. sgt->sgl,
  119. 0, VBI_LINE_LENGTH * lines,
  120. VBI_LINE_LENGTH, 0,
  121. lines);
  122. return 0;
  123. }
  124. static void buffer_finish(struct vb2_buffer *vb)
  125. {
  126. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  127. struct cx23885_buffer *buf = container_of(vbuf,
  128. struct cx23885_buffer, vb);
  129. cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
  130. }
  131. /*
  132. * The risc program for each buffer works as follows: it starts with a simple
  133. * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
  134. * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
  135. * the initial JUMP).
  136. *
  137. * This is the risc program of the first buffer to be queued if the active list
  138. * is empty and it just keeps DMAing this buffer without generating any
  139. * interrupts.
  140. *
  141. * If a new buffer is added then the initial JUMP in the code for that buffer
  142. * will generate an interrupt which signals that the previous buffer has been
  143. * DMAed successfully and that it can be returned to userspace.
  144. *
  145. * It also sets the final jump of the previous buffer to the start of the new
  146. * buffer, thus chaining the new buffer into the DMA chain. This is a single
  147. * atomic u32 write, so there is no race condition.
  148. *
  149. * The end-result of all this that you only get an interrupt when a buffer
  150. * is ready, so the control flow is very easy.
  151. */
  152. static void buffer_queue(struct vb2_buffer *vb)
  153. {
  154. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  155. struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
  156. struct cx23885_buffer *buf = container_of(vbuf,
  157. struct cx23885_buffer, vb);
  158. struct cx23885_buffer *prev;
  159. struct cx23885_dmaqueue *q = &dev->vbiq;
  160. unsigned long flags;
  161. buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
  162. buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
  163. buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
  164. buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
  165. if (list_empty(&q->active)) {
  166. spin_lock_irqsave(&dev->slock, flags);
  167. list_add_tail(&buf->queue, &q->active);
  168. spin_unlock_irqrestore(&dev->slock, flags);
  169. dprintk(2, "[%p/%d] vbi_queue - first active\n",
  170. buf, buf->vb.vb2_buf.index);
  171. } else {
  172. buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
  173. prev = list_entry(q->active.prev, struct cx23885_buffer,
  174. queue);
  175. spin_lock_irqsave(&dev->slock, flags);
  176. list_add_tail(&buf->queue, &q->active);
  177. spin_unlock_irqrestore(&dev->slock, flags);
  178. prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
  179. dprintk(2, "[%p/%d] buffer_queue - append to active\n",
  180. buf, buf->vb.vb2_buf.index);
  181. }
  182. }
  183. static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
  184. {
  185. struct cx23885_dev *dev = q->drv_priv;
  186. struct cx23885_dmaqueue *dmaq = &dev->vbiq;
  187. struct cx23885_buffer *buf = list_entry(dmaq->active.next,
  188. struct cx23885_buffer, queue);
  189. cx23885_start_vbi_dma(dev, dmaq, buf);
  190. return 0;
  191. }
  192. static void cx23885_stop_streaming(struct vb2_queue *q)
  193. {
  194. struct cx23885_dev *dev = q->drv_priv;
  195. struct cx23885_dmaqueue *dmaq = &dev->vbiq;
  196. unsigned long flags;
  197. cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
  198. spin_lock_irqsave(&dev->slock, flags);
  199. while (!list_empty(&dmaq->active)) {
  200. struct cx23885_buffer *buf = list_entry(dmaq->active.next,
  201. struct cx23885_buffer, queue);
  202. list_del(&buf->queue);
  203. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  204. }
  205. spin_unlock_irqrestore(&dev->slock, flags);
  206. }
  207. const struct vb2_ops cx23885_vbi_qops = {
  208. .queue_setup = queue_setup,
  209. .buf_prepare = buffer_prepare,
  210. .buf_finish = buffer_finish,
  211. .buf_queue = buffer_queue,
  212. .wait_prepare = vb2_ops_wait_prepare,
  213. .wait_finish = vb2_ops_wait_finish,
  214. .start_streaming = cx23885_start_streaming,
  215. .stop_streaming = cx23885_stop_streaming,
  216. };