ipu-image-convert.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2012-2016 Mentor Graphics Inc.
  4. *
  5. * Queued image conversion support, with tiling and rotation.
  6. */
  7. #include <linux/interrupt.h>
  8. #include <linux/dma-mapping.h>
  9. #include <video/imx-ipu-image-convert.h>
  10. #include "ipu-prv.h"
  11. /*
  12. * The IC Resizer has a restriction that the output frame from the
  13. * resizer must be 1024 or less in both width (pixels) and height
  14. * (lines).
  15. *
  16. * The image converter attempts to split up a conversion when
  17. * the desired output (converted) frame resolution exceeds the
  18. * IC resizer limit of 1024 in either dimension.
  19. *
  20. * If either dimension of the output frame exceeds the limit, the
  21. * dimension is split into 1, 2, or 4 equal stripes, for a maximum
  22. * of 4*4 or 16 tiles. A conversion is then carried out for each
  23. * tile (but taking care to pass the full frame stride length to
  24. * the DMA channel's parameter memory!). IDMA double-buffering is used
  25. * to convert each tile back-to-back when possible (see note below
  26. * when double_buffering boolean is set).
  27. *
  28. * Note that the input frame must be split up into the same number
  29. * of tiles as the output frame:
  30. *
  31. * +---------+-----+
  32. * +-----+---+ | A | B |
  33. * | A | B | | | |
  34. * +-----+---+ --> +---------+-----+
  35. * | C | D | | C | D |
  36. * +-----+---+ | | |
  37. * +---------+-----+
  38. *
  39. * Clockwise 90° rotations are handled by first rescaling into a
  40. * reusable temporary tile buffer and then rotating with the 8x8
  41. * block rotator, writing to the correct destination:
  42. *
  43. * +-----+-----+
  44. * | | |
  45. * +-----+---+ +---------+ | C | A |
  46. * | A | B | | A,B, | | | | |
  47. * +-----+---+ --> | C,D | | --> | | |
  48. * | C | D | +---------+ +-----+-----+
  49. * +-----+---+ | D | B |
  50. * | | |
  51. * +-----+-----+
  52. *
  53. * If the 8x8 block rotator is used, horizontal or vertical flipping
  54. * is done during the rotation step, otherwise flipping is done
  55. * during the scaling step.
  56. * With rotation or flipping, tile order changes between input and
  57. * output image. Tiles are numbered row major from top left to bottom
  58. * right for both input and output image.
  59. */
  60. #define MAX_STRIPES_W 4
  61. #define MAX_STRIPES_H 4
  62. #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
  63. #define MIN_W 16
  64. #define MIN_H 8
  65. #define MAX_W 4096
  66. #define MAX_H 4096
  67. enum ipu_image_convert_type {
  68. IMAGE_CONVERT_IN = 0,
  69. IMAGE_CONVERT_OUT,
  70. };
  71. struct ipu_image_convert_dma_buf {
  72. void *virt;
  73. dma_addr_t phys;
  74. unsigned long len;
  75. };
  76. struct ipu_image_convert_dma_chan {
  77. int in;
  78. int out;
  79. int rot_in;
  80. int rot_out;
  81. int vdi_in_p;
  82. int vdi_in;
  83. int vdi_in_n;
  84. };
  85. /* dimensions of one tile */
  86. struct ipu_image_tile {
  87. u32 width;
  88. u32 height;
  89. u32 left;
  90. u32 top;
  91. /* size and strides are in bytes */
  92. u32 size;
  93. u32 stride;
  94. u32 rot_stride;
  95. /* start Y or packed offset of this tile */
  96. u32 offset;
  97. /* offset from start to tile in U plane, for planar formats */
  98. u32 u_off;
  99. /* offset from start to tile in V plane, for planar formats */
  100. u32 v_off;
  101. };
  102. struct ipu_image_convert_image {
  103. struct ipu_image base;
  104. enum ipu_image_convert_type type;
  105. const struct ipu_image_pixfmt *fmt;
  106. unsigned int stride;
  107. /* # of rows (horizontal stripes) if dest height is > 1024 */
  108. unsigned int num_rows;
  109. /* # of columns (vertical stripes) if dest width is > 1024 */
  110. unsigned int num_cols;
  111. struct ipu_image_tile tile[MAX_TILES];
  112. };
  113. struct ipu_image_pixfmt {
  114. u32 fourcc; /* V4L2 fourcc */
  115. int bpp; /* total bpp */
  116. int uv_width_dec; /* decimation in width for U/V planes */
  117. int uv_height_dec; /* decimation in height for U/V planes */
  118. bool planar; /* planar format */
  119. bool uv_swapped; /* U and V planes are swapped */
  120. bool uv_packed; /* partial planar (U and V in same plane) */
  121. };
  122. struct ipu_image_convert_ctx;
  123. struct ipu_image_convert_chan;
  124. struct ipu_image_convert_priv;
  125. enum eof_irq_mask {
  126. EOF_IRQ_IN = BIT(0),
  127. EOF_IRQ_ROT_IN = BIT(1),
  128. EOF_IRQ_OUT = BIT(2),
  129. EOF_IRQ_ROT_OUT = BIT(3),
  130. };
  131. #define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
  132. #define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
  133. EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
  134. struct ipu_image_convert_ctx {
  135. struct ipu_image_convert_chan *chan;
  136. ipu_image_convert_cb_t complete;
  137. void *complete_context;
  138. /* Source/destination image data and rotation mode */
  139. struct ipu_image_convert_image in;
  140. struct ipu_image_convert_image out;
  141. struct ipu_ic_csc csc;
  142. enum ipu_rotate_mode rot_mode;
  143. u32 downsize_coeff_h;
  144. u32 downsize_coeff_v;
  145. u32 image_resize_coeff_h;
  146. u32 image_resize_coeff_v;
  147. u32 resize_coeffs_h[MAX_STRIPES_W];
  148. u32 resize_coeffs_v[MAX_STRIPES_H];
  149. /* intermediate buffer for rotation */
  150. struct ipu_image_convert_dma_buf rot_intermediate[2];
  151. /* current buffer number for double buffering */
  152. int cur_buf_num;
  153. bool aborting;
  154. struct completion aborted;
  155. /* can we use double-buffering for this conversion operation? */
  156. bool double_buffering;
  157. /* num_rows * num_cols */
  158. unsigned int num_tiles;
  159. /* next tile to process */
  160. unsigned int next_tile;
  161. /* where to place converted tile in dest image */
  162. unsigned int out_tile_map[MAX_TILES];
  163. /* mask of completed EOF irqs at every tile conversion */
  164. enum eof_irq_mask eof_mask;
  165. struct list_head list;
  166. };
  167. struct ipu_image_convert_chan {
  168. struct ipu_image_convert_priv *priv;
  169. enum ipu_ic_task ic_task;
  170. const struct ipu_image_convert_dma_chan *dma_ch;
  171. struct ipu_ic *ic;
  172. struct ipuv3_channel *in_chan;
  173. struct ipuv3_channel *out_chan;
  174. struct ipuv3_channel *rotation_in_chan;
  175. struct ipuv3_channel *rotation_out_chan;
  176. /* the IPU end-of-frame irqs */
  177. int in_eof_irq;
  178. int rot_in_eof_irq;
  179. int out_eof_irq;
  180. int rot_out_eof_irq;
  181. spinlock_t irqlock;
  182. /* list of convert contexts */
  183. struct list_head ctx_list;
  184. /* queue of conversion runs */
  185. struct list_head pending_q;
  186. /* queue of completed runs */
  187. struct list_head done_q;
  188. /* the current conversion run */
  189. struct ipu_image_convert_run *current_run;
  190. };
  191. struct ipu_image_convert_priv {
  192. struct ipu_image_convert_chan chan[IC_NUM_TASKS];
  193. struct ipu_soc *ipu;
  194. };
  195. static const struct ipu_image_convert_dma_chan
  196. image_convert_dma_chan[IC_NUM_TASKS] = {
  197. [IC_TASK_VIEWFINDER] = {
  198. .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
  199. .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
  200. .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
  201. .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
  202. .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
  203. .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
  204. .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
  205. },
  206. [IC_TASK_POST_PROCESSOR] = {
  207. .in = IPUV3_CHANNEL_MEM_IC_PP,
  208. .out = IPUV3_CHANNEL_IC_PP_MEM,
  209. .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
  210. .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
  211. },
  212. };
  213. static const struct ipu_image_pixfmt image_convert_formats[] = {
  214. {
  215. .fourcc = V4L2_PIX_FMT_RGB565,
  216. .bpp = 16,
  217. }, {
  218. .fourcc = V4L2_PIX_FMT_RGB24,
  219. .bpp = 24,
  220. }, {
  221. .fourcc = V4L2_PIX_FMT_BGR24,
  222. .bpp = 24,
  223. }, {
  224. .fourcc = V4L2_PIX_FMT_RGB32,
  225. .bpp = 32,
  226. }, {
  227. .fourcc = V4L2_PIX_FMT_BGR32,
  228. .bpp = 32,
  229. }, {
  230. .fourcc = V4L2_PIX_FMT_XRGB32,
  231. .bpp = 32,
  232. }, {
  233. .fourcc = V4L2_PIX_FMT_XBGR32,
  234. .bpp = 32,
  235. }, {
  236. .fourcc = V4L2_PIX_FMT_BGRX32,
  237. .bpp = 32,
  238. }, {
  239. .fourcc = V4L2_PIX_FMT_RGBX32,
  240. .bpp = 32,
  241. }, {
  242. .fourcc = V4L2_PIX_FMT_YUYV,
  243. .bpp = 16,
  244. .uv_width_dec = 2,
  245. .uv_height_dec = 1,
  246. }, {
  247. .fourcc = V4L2_PIX_FMT_UYVY,
  248. .bpp = 16,
  249. .uv_width_dec = 2,
  250. .uv_height_dec = 1,
  251. }, {
  252. .fourcc = V4L2_PIX_FMT_YUV420,
  253. .bpp = 12,
  254. .planar = true,
  255. .uv_width_dec = 2,
  256. .uv_height_dec = 2,
  257. }, {
  258. .fourcc = V4L2_PIX_FMT_YVU420,
  259. .bpp = 12,
  260. .planar = true,
  261. .uv_width_dec = 2,
  262. .uv_height_dec = 2,
  263. .uv_swapped = true,
  264. }, {
  265. .fourcc = V4L2_PIX_FMT_NV12,
  266. .bpp = 12,
  267. .planar = true,
  268. .uv_width_dec = 2,
  269. .uv_height_dec = 2,
  270. .uv_packed = true,
  271. }, {
  272. .fourcc = V4L2_PIX_FMT_YUV422P,
  273. .bpp = 16,
  274. .planar = true,
  275. .uv_width_dec = 2,
  276. .uv_height_dec = 1,
  277. }, {
  278. .fourcc = V4L2_PIX_FMT_NV16,
  279. .bpp = 16,
  280. .planar = true,
  281. .uv_width_dec = 2,
  282. .uv_height_dec = 1,
  283. .uv_packed = true,
  284. },
  285. };
  286. static const struct ipu_image_pixfmt *get_format(u32 fourcc)
  287. {
  288. const struct ipu_image_pixfmt *ret = NULL;
  289. unsigned int i;
  290. for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
  291. if (image_convert_formats[i].fourcc == fourcc) {
  292. ret = &image_convert_formats[i];
  293. break;
  294. }
  295. }
  296. return ret;
  297. }
  298. static void dump_format(struct ipu_image_convert_ctx *ctx,
  299. struct ipu_image_convert_image *ic_image)
  300. {
  301. struct ipu_image_convert_chan *chan = ctx->chan;
  302. struct ipu_image_convert_priv *priv = chan->priv;
  303. dev_dbg(priv->ipu->dev,
  304. "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
  305. chan->ic_task, ctx,
  306. ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
  307. ic_image->base.pix.width, ic_image->base.pix.height,
  308. ic_image->num_cols, ic_image->num_rows,
  309. ic_image->fmt->fourcc & 0xff,
  310. (ic_image->fmt->fourcc >> 8) & 0xff,
  311. (ic_image->fmt->fourcc >> 16) & 0xff,
  312. (ic_image->fmt->fourcc >> 24) & 0xff);
  313. }
  314. int ipu_image_convert_enum_format(int index, u32 *fourcc)
  315. {
  316. const struct ipu_image_pixfmt *fmt;
  317. if (index >= (int)ARRAY_SIZE(image_convert_formats))
  318. return -EINVAL;
  319. /* Format found */
  320. fmt = &image_convert_formats[index];
  321. *fourcc = fmt->fourcc;
  322. return 0;
  323. }
  324. EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
  325. static void free_dma_buf(struct ipu_image_convert_priv *priv,
  326. struct ipu_image_convert_dma_buf *buf)
  327. {
  328. if (buf->virt)
  329. dma_free_coherent(priv->ipu->dev,
  330. buf->len, buf->virt, buf->phys);
  331. buf->virt = NULL;
  332. buf->phys = 0;
  333. }
  334. static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
  335. struct ipu_image_convert_dma_buf *buf,
  336. int size)
  337. {
  338. buf->len = PAGE_ALIGN(size);
  339. buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
  340. GFP_DMA | GFP_KERNEL);
  341. if (!buf->virt) {
  342. dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
  343. return -ENOMEM;
  344. }
  345. return 0;
  346. }
  347. static inline int num_stripes(int dim)
  348. {
  349. return (dim - 1) / 1024 + 1;
  350. }
  351. /*
  352. * Calculate downsizing coefficients, which are the same for all tiles,
  353. * and initial bilinear resizing coefficients, which are used to find the
  354. * best seam positions.
  355. * Also determine the number of tiles necessary to guarantee that no tile
  356. * is larger than 1024 pixels in either dimension at the output and between
  357. * IC downsizing and main processing sections.
  358. */
  359. static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
  360. struct ipu_image *in,
  361. struct ipu_image *out)
  362. {
  363. u32 downsized_width = in->rect.width;
  364. u32 downsized_height = in->rect.height;
  365. u32 downsize_coeff_v = 0;
  366. u32 downsize_coeff_h = 0;
  367. u32 resized_width = out->rect.width;
  368. u32 resized_height = out->rect.height;
  369. u32 resize_coeff_h;
  370. u32 resize_coeff_v;
  371. u32 cols;
  372. u32 rows;
  373. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  374. resized_width = out->rect.height;
  375. resized_height = out->rect.width;
  376. }
  377. /* Do not let invalid input lead to an endless loop below */
  378. if (WARN_ON(resized_width == 0 || resized_height == 0))
  379. return -EINVAL;
  380. while (downsized_width >= resized_width * 2) {
  381. downsized_width >>= 1;
  382. downsize_coeff_h++;
  383. }
  384. while (downsized_height >= resized_height * 2) {
  385. downsized_height >>= 1;
  386. downsize_coeff_v++;
  387. }
  388. /*
  389. * Calculate the bilinear resizing coefficients that could be used if
  390. * we were converting with a single tile. The bottom right output pixel
  391. * should sample as close as possible to the bottom right input pixel
  392. * out of the decimator, but not overshoot it:
  393. */
  394. resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
  395. resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
  396. /*
  397. * Both the output of the IC downsizing section before being passed to
  398. * the IC main processing section and the final output of the IC main
  399. * processing section must be <= 1024 pixels in both dimensions.
  400. */
  401. cols = num_stripes(max_t(u32, downsized_width, resized_width));
  402. rows = num_stripes(max_t(u32, downsized_height, resized_height));
  403. dev_dbg(ctx->chan->priv->ipu->dev,
  404. "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
  405. __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
  406. resize_coeff_v, cols, rows);
  407. if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
  408. resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
  409. return -EINVAL;
  410. ctx->downsize_coeff_h = downsize_coeff_h;
  411. ctx->downsize_coeff_v = downsize_coeff_v;
  412. ctx->image_resize_coeff_h = resize_coeff_h;
  413. ctx->image_resize_coeff_v = resize_coeff_v;
  414. ctx->in.num_cols = cols;
  415. ctx->in.num_rows = rows;
  416. return 0;
  417. }
  418. #define round_closest(x, y) round_down((x) + (y)/2, (y))
  419. /*
  420. * Find the best aligned seam position for the given column / row index.
  421. * Rotation and image offsets are out of scope.
  422. *
  423. * @index: column / row index, used to calculate valid interval
  424. * @in_edge: input right / bottom edge
  425. * @out_edge: output right / bottom edge
  426. * @in_align: input alignment, either horizontal 8-byte line start address
  427. * alignment, or pixel alignment due to image format
  428. * @out_align: output alignment, either horizontal 8-byte line start address
  429. * alignment, or pixel alignment due to image format or rotator
  430. * block size
  431. * @in_burst: horizontal input burst size in case of horizontal flip
  432. * @out_burst: horizontal output burst size or rotator block size
  433. * @downsize_coeff: downsizing section coefficient
  434. * @resize_coeff: main processing section resizing coefficient
  435. * @_in_seam: aligned input seam position return value
  436. * @_out_seam: aligned output seam position return value
  437. */
  438. static void find_best_seam(struct ipu_image_convert_ctx *ctx,
  439. unsigned int index,
  440. unsigned int in_edge,
  441. unsigned int out_edge,
  442. unsigned int in_align,
  443. unsigned int out_align,
  444. unsigned int in_burst,
  445. unsigned int out_burst,
  446. unsigned int downsize_coeff,
  447. unsigned int resize_coeff,
  448. u32 *_in_seam,
  449. u32 *_out_seam)
  450. {
  451. struct device *dev = ctx->chan->priv->ipu->dev;
  452. unsigned int out_pos;
  453. /* Input / output seam position candidates */
  454. unsigned int out_seam = 0;
  455. unsigned int in_seam = 0;
  456. unsigned int min_diff = UINT_MAX;
  457. unsigned int out_start;
  458. unsigned int out_end;
  459. unsigned int in_start;
  460. unsigned int in_end;
  461. /* Start within 1024 pixels of the right / bottom edge */
  462. out_start = max_t(int, index * out_align, out_edge - 1024);
  463. /* End before having to add more columns to the left / rows above */
  464. out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
  465. /*
  466. * Limit input seam position to make sure that the downsized input tile
  467. * to the right or bottom does not exceed 1024 pixels.
  468. */
  469. in_start = max_t(int, index * in_align,
  470. in_edge - (1024 << downsize_coeff));
  471. in_end = min_t(unsigned int, in_edge,
  472. index * (1024 << downsize_coeff) + 1);
  473. /*
  474. * Output tiles must start at a multiple of 8 bytes horizontally and
  475. * possibly at an even line horizontally depending on the pixel format.
  476. * Only consider output aligned positions for the seam.
  477. */
  478. out_start = round_up(out_start, out_align);
  479. for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
  480. unsigned int in_pos;
  481. unsigned int in_pos_aligned;
  482. unsigned int in_pos_rounded;
  483. unsigned int abs_diff;
  484. /*
  485. * Tiles in the right row / bottom column may not be allowed to
  486. * overshoot horizontally / vertically. out_burst may be the
  487. * actual DMA burst size, or the rotator block size.
  488. */
  489. if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
  490. continue;
  491. /*
  492. * Input sample position, corresponding to out_pos, 19.13 fixed
  493. * point.
  494. */
  495. in_pos = (out_pos * resize_coeff) << downsize_coeff;
  496. /*
  497. * The closest input sample position that we could actually
  498. * start the input tile at, 19.13 fixed point.
  499. */
  500. in_pos_aligned = round_closest(in_pos, 8192U * in_align);
  501. /* Convert 19.13 fixed point to integer */
  502. in_pos_rounded = in_pos_aligned / 8192U;
  503. if (in_pos_rounded < in_start)
  504. continue;
  505. if (in_pos_rounded >= in_end)
  506. break;
  507. if ((in_burst > 1) &&
  508. (in_edge - in_pos_rounded) % in_burst)
  509. continue;
  510. if (in_pos < in_pos_aligned)
  511. abs_diff = in_pos_aligned - in_pos;
  512. else
  513. abs_diff = in_pos - in_pos_aligned;
  514. if (abs_diff < min_diff) {
  515. in_seam = in_pos_rounded;
  516. out_seam = out_pos;
  517. min_diff = abs_diff;
  518. }
  519. }
  520. *_out_seam = out_seam;
  521. *_in_seam = in_seam;
  522. dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
  523. __func__, out_seam, out_align, out_start, out_end,
  524. in_seam, in_align, in_start, in_end, min_diff / 8192,
  525. DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
  526. }
  527. /*
  528. * Tile left edges are required to be aligned to multiples of 8 bytes
  529. * by the IDMAC.
  530. */
  531. static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
  532. {
  533. if (fmt->planar)
  534. return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
  535. else
  536. return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
  537. }
  538. /*
  539. * Tile top edge alignment is only limited by chroma subsampling.
  540. */
  541. static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
  542. {
  543. return fmt->uv_height_dec > 1 ? 2 : 1;
  544. }
  545. static inline u32 tile_width_align(enum ipu_image_convert_type type,
  546. const struct ipu_image_pixfmt *fmt,
  547. enum ipu_rotate_mode rot_mode)
  548. {
  549. if (type == IMAGE_CONVERT_IN) {
  550. /*
  551. * The IC burst reads 8 pixels at a time. Reading beyond the
  552. * end of the line is usually acceptable. Those pixels are
  553. * ignored, unless the IC has to write the scaled line in
  554. * reverse.
  555. */
  556. return (!ipu_rot_mode_is_irt(rot_mode) &&
  557. (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
  558. }
  559. /*
  560. * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
  561. * formats to guarantee 8-byte aligned line start addresses in the
  562. * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
  563. * for all other formats.
  564. */
  565. return (ipu_rot_mode_is_irt(rot_mode) &&
  566. fmt->planar && !fmt->uv_packed) ?
  567. 8 * fmt->uv_width_dec : 8;
  568. }
  569. static inline u32 tile_height_align(enum ipu_image_convert_type type,
  570. const struct ipu_image_pixfmt *fmt,
  571. enum ipu_rotate_mode rot_mode)
  572. {
  573. if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
  574. return 2;
  575. /*
  576. * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
  577. * formats to guarantee 8-byte aligned line start addresses in the
  578. * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
  579. * for all other formats.
  580. */
  581. return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
  582. }
  583. /*
  584. * Fill in left position and width and for all tiles in an input column, and
  585. * for all corresponding output tiles. If the 90° rotator is used, the output
  586. * tiles are in a row, and output tile top position and height are set.
  587. */
  588. static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
  589. unsigned int col,
  590. struct ipu_image_convert_image *in,
  591. unsigned int in_left, unsigned int in_width,
  592. struct ipu_image_convert_image *out,
  593. unsigned int out_left, unsigned int out_width)
  594. {
  595. unsigned int row, tile_idx;
  596. struct ipu_image_tile *in_tile, *out_tile;
  597. for (row = 0; row < in->num_rows; row++) {
  598. tile_idx = in->num_cols * row + col;
  599. in_tile = &in->tile[tile_idx];
  600. out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
  601. in_tile->left = in_left;
  602. in_tile->width = in_width;
  603. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  604. out_tile->top = out_left;
  605. out_tile->height = out_width;
  606. } else {
  607. out_tile->left = out_left;
  608. out_tile->width = out_width;
  609. }
  610. }
  611. }
  612. /*
  613. * Fill in top position and height and for all tiles in an input row, and
  614. * for all corresponding output tiles. If the 90° rotator is used, the output
  615. * tiles are in a column, and output tile left position and width are set.
  616. */
  617. static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
  618. struct ipu_image_convert_image *in,
  619. unsigned int in_top, unsigned int in_height,
  620. struct ipu_image_convert_image *out,
  621. unsigned int out_top, unsigned int out_height)
  622. {
  623. unsigned int col, tile_idx;
  624. struct ipu_image_tile *in_tile, *out_tile;
  625. for (col = 0; col < in->num_cols; col++) {
  626. tile_idx = in->num_cols * row + col;
  627. in_tile = &in->tile[tile_idx];
  628. out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
  629. in_tile->top = in_top;
  630. in_tile->height = in_height;
  631. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  632. out_tile->left = out_top;
  633. out_tile->width = out_height;
  634. } else {
  635. out_tile->top = out_top;
  636. out_tile->height = out_height;
  637. }
  638. }
  639. }
  640. /*
  641. * Find the best horizontal and vertical seam positions to split into tiles.
  642. * Minimize the fractional part of the input sampling position for the
  643. * top / left pixels of each tile.
  644. */
  645. static void find_seams(struct ipu_image_convert_ctx *ctx,
  646. struct ipu_image_convert_image *in,
  647. struct ipu_image_convert_image *out)
  648. {
  649. struct device *dev = ctx->chan->priv->ipu->dev;
  650. unsigned int resized_width = out->base.rect.width;
  651. unsigned int resized_height = out->base.rect.height;
  652. unsigned int col;
  653. unsigned int row;
  654. unsigned int in_left_align = tile_left_align(in->fmt);
  655. unsigned int in_top_align = tile_top_align(in->fmt);
  656. unsigned int out_left_align = tile_left_align(out->fmt);
  657. unsigned int out_top_align = tile_top_align(out->fmt);
  658. unsigned int out_width_align = tile_width_align(out->type, out->fmt,
  659. ctx->rot_mode);
  660. unsigned int out_height_align = tile_height_align(out->type, out->fmt,
  661. ctx->rot_mode);
  662. unsigned int in_right = in->base.rect.width;
  663. unsigned int in_bottom = in->base.rect.height;
  664. unsigned int out_right = out->base.rect.width;
  665. unsigned int out_bottom = out->base.rect.height;
  666. unsigned int flipped_out_left;
  667. unsigned int flipped_out_top;
  668. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  669. /* Switch width/height and align top left to IRT block size */
  670. resized_width = out->base.rect.height;
  671. resized_height = out->base.rect.width;
  672. out_left_align = out_height_align;
  673. out_top_align = out_width_align;
  674. out_width_align = out_left_align;
  675. out_height_align = out_top_align;
  676. out_right = out->base.rect.height;
  677. out_bottom = out->base.rect.width;
  678. }
  679. for (col = in->num_cols - 1; col > 0; col--) {
  680. bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
  681. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  682. bool allow_out_overshoot = (col < in->num_cols - 1) &&
  683. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  684. unsigned int in_left;
  685. unsigned int out_left;
  686. /*
  687. * Align input width to burst length if the scaling step flips
  688. * horizontally.
  689. */
  690. find_best_seam(ctx, col,
  691. in_right, out_right,
  692. in_left_align, out_left_align,
  693. allow_in_overshoot ? 1 : 8 /* burst length */,
  694. allow_out_overshoot ? 1 : out_width_align,
  695. ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
  696. &in_left, &out_left);
  697. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  698. flipped_out_left = resized_width - out_right;
  699. else
  700. flipped_out_left = out_left;
  701. fill_tile_column(ctx, col, in, in_left, in_right - in_left,
  702. out, flipped_out_left, out_right - out_left);
  703. dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
  704. in_left, in_right - in_left,
  705. flipped_out_left, out_right - out_left);
  706. in_right = in_left;
  707. out_right = out_left;
  708. }
  709. flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
  710. resized_width - out_right : 0;
  711. fill_tile_column(ctx, 0, in, 0, in_right,
  712. out, flipped_out_left, out_right);
  713. dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
  714. in_right, flipped_out_left, out_right);
  715. for (row = in->num_rows - 1; row > 0; row--) {
  716. bool allow_overshoot = row < in->num_rows - 1;
  717. unsigned int in_top;
  718. unsigned int out_top;
  719. find_best_seam(ctx, row,
  720. in_bottom, out_bottom,
  721. in_top_align, out_top_align,
  722. 1, allow_overshoot ? 1 : out_height_align,
  723. ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
  724. &in_top, &out_top);
  725. if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
  726. ipu_rot_mode_is_irt(ctx->rot_mode))
  727. flipped_out_top = resized_height - out_bottom;
  728. else
  729. flipped_out_top = out_top;
  730. fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
  731. out, flipped_out_top, out_bottom - out_top);
  732. dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
  733. in_top, in_bottom - in_top,
  734. flipped_out_top, out_bottom - out_top);
  735. in_bottom = in_top;
  736. out_bottom = out_top;
  737. }
  738. if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
  739. ipu_rot_mode_is_irt(ctx->rot_mode))
  740. flipped_out_top = resized_height - out_bottom;
  741. else
  742. flipped_out_top = 0;
  743. fill_tile_row(ctx, 0, in, 0, in_bottom,
  744. out, flipped_out_top, out_bottom);
  745. dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
  746. in_bottom, flipped_out_top, out_bottom);
  747. }
  748. static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
  749. struct ipu_image_convert_image *image)
  750. {
  751. struct ipu_image_convert_chan *chan = ctx->chan;
  752. struct ipu_image_convert_priv *priv = chan->priv;
  753. unsigned int max_width = 1024;
  754. unsigned int max_height = 1024;
  755. unsigned int i;
  756. if (image->type == IMAGE_CONVERT_IN) {
  757. /* Up to 4096x4096 input tile size */
  758. max_width <<= ctx->downsize_coeff_h;
  759. max_height <<= ctx->downsize_coeff_v;
  760. }
  761. for (i = 0; i < ctx->num_tiles; i++) {
  762. struct ipu_image_tile *tile;
  763. const unsigned int row = i / image->num_cols;
  764. const unsigned int col = i % image->num_cols;
  765. if (image->type == IMAGE_CONVERT_OUT)
  766. tile = &image->tile[ctx->out_tile_map[i]];
  767. else
  768. tile = &image->tile[i];
  769. tile->size = ((tile->height * image->fmt->bpp) >> 3) *
  770. tile->width;
  771. if (image->fmt->planar) {
  772. tile->stride = tile->width;
  773. tile->rot_stride = tile->height;
  774. } else {
  775. tile->stride =
  776. (image->fmt->bpp * tile->width) >> 3;
  777. tile->rot_stride =
  778. (image->fmt->bpp * tile->height) >> 3;
  779. }
  780. dev_dbg(priv->ipu->dev,
  781. "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
  782. chan->ic_task, ctx,
  783. image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
  784. row, col,
  785. tile->width, tile->height, tile->left, tile->top);
  786. if (!tile->width || tile->width > max_width ||
  787. !tile->height || tile->height > max_height) {
  788. dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
  789. image->type == IMAGE_CONVERT_IN ? "input" :
  790. "output", tile->width, tile->height);
  791. return -EINVAL;
  792. }
  793. }
  794. return 0;
  795. }
  796. /*
  797. * Use the rotation transformation to find the tile coordinates
  798. * (row, col) of a tile in the destination frame that corresponds
  799. * to the given tile coordinates of a source frame. The destination
  800. * coordinate is then converted to a tile index.
  801. */
  802. static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
  803. int src_row, int src_col)
  804. {
  805. struct ipu_image_convert_chan *chan = ctx->chan;
  806. struct ipu_image_convert_priv *priv = chan->priv;
  807. struct ipu_image_convert_image *s_image = &ctx->in;
  808. struct ipu_image_convert_image *d_image = &ctx->out;
  809. int dst_row, dst_col;
  810. /* with no rotation it's a 1:1 mapping */
  811. if (ctx->rot_mode == IPU_ROTATE_NONE)
  812. return src_row * s_image->num_cols + src_col;
  813. /*
  814. * before doing the transform, first we have to translate
  815. * source row,col for an origin in the center of s_image
  816. */
  817. src_row = src_row * 2 - (s_image->num_rows - 1);
  818. src_col = src_col * 2 - (s_image->num_cols - 1);
  819. /* do the rotation transform */
  820. if (ctx->rot_mode & IPU_ROT_BIT_90) {
  821. dst_col = -src_row;
  822. dst_row = src_col;
  823. } else {
  824. dst_col = src_col;
  825. dst_row = src_row;
  826. }
  827. /* apply flip */
  828. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  829. dst_col = -dst_col;
  830. if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
  831. dst_row = -dst_row;
  832. dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
  833. chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
  834. /*
  835. * finally translate dest row,col using an origin in upper
  836. * left of d_image
  837. */
  838. dst_row += d_image->num_rows - 1;
  839. dst_col += d_image->num_cols - 1;
  840. dst_row /= 2;
  841. dst_col /= 2;
  842. return dst_row * d_image->num_cols + dst_col;
  843. }
  844. /*
  845. * Fill the out_tile_map[] with transformed destination tile indeces.
  846. */
  847. static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
  848. {
  849. struct ipu_image_convert_image *s_image = &ctx->in;
  850. unsigned int row, col, tile = 0;
  851. for (row = 0; row < s_image->num_rows; row++) {
  852. for (col = 0; col < s_image->num_cols; col++) {
  853. ctx->out_tile_map[tile] =
  854. transform_tile_index(ctx, row, col);
  855. tile++;
  856. }
  857. }
  858. }
  859. static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
  860. struct ipu_image_convert_image *image)
  861. {
  862. struct ipu_image_convert_chan *chan = ctx->chan;
  863. struct ipu_image_convert_priv *priv = chan->priv;
  864. const struct ipu_image_pixfmt *fmt = image->fmt;
  865. unsigned int row, col, tile = 0;
  866. u32 H, top, y_stride, uv_stride;
  867. u32 uv_row_off, uv_col_off, uv_off, u_off, v_off;
  868. u32 y_row_off, y_col_off, y_off;
  869. u32 y_size, uv_size;
  870. /* setup some convenience vars */
  871. H = image->base.pix.height;
  872. y_stride = image->stride;
  873. uv_stride = y_stride / fmt->uv_width_dec;
  874. if (fmt->uv_packed)
  875. uv_stride *= 2;
  876. y_size = H * y_stride;
  877. uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
  878. for (row = 0; row < image->num_rows; row++) {
  879. top = image->tile[tile].top;
  880. y_row_off = top * y_stride;
  881. uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
  882. for (col = 0; col < image->num_cols; col++) {
  883. y_col_off = image->tile[tile].left;
  884. uv_col_off = y_col_off / fmt->uv_width_dec;
  885. if (fmt->uv_packed)
  886. uv_col_off *= 2;
  887. y_off = y_row_off + y_col_off;
  888. uv_off = uv_row_off + uv_col_off;
  889. u_off = y_size - y_off + uv_off;
  890. v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
  891. if (fmt->uv_swapped)
  892. swap(u_off, v_off);
  893. image->tile[tile].offset = y_off;
  894. image->tile[tile].u_off = u_off;
  895. image->tile[tile++].v_off = v_off;
  896. if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
  897. dev_err(priv->ipu->dev,
  898. "task %u: ctx %p: %s@[%d,%d]: "
  899. "y_off %08x, u_off %08x, v_off %08x\n",
  900. chan->ic_task, ctx,
  901. image->type == IMAGE_CONVERT_IN ?
  902. "Input" : "Output", row, col,
  903. y_off, u_off, v_off);
  904. return -EINVAL;
  905. }
  906. }
  907. }
  908. return 0;
  909. }
  910. static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
  911. struct ipu_image_convert_image *image)
  912. {
  913. struct ipu_image_convert_chan *chan = ctx->chan;
  914. struct ipu_image_convert_priv *priv = chan->priv;
  915. const struct ipu_image_pixfmt *fmt = image->fmt;
  916. unsigned int row, col, tile = 0;
  917. u32 bpp, stride, offset;
  918. u32 row_off, col_off;
  919. /* setup some convenience vars */
  920. stride = image->stride;
  921. bpp = fmt->bpp;
  922. for (row = 0; row < image->num_rows; row++) {
  923. row_off = image->tile[tile].top * stride;
  924. for (col = 0; col < image->num_cols; col++) {
  925. col_off = (image->tile[tile].left * bpp) >> 3;
  926. offset = row_off + col_off;
  927. image->tile[tile].offset = offset;
  928. image->tile[tile].u_off = 0;
  929. image->tile[tile++].v_off = 0;
  930. if (offset & 0x7) {
  931. dev_err(priv->ipu->dev,
  932. "task %u: ctx %p: %s@[%d,%d]: "
  933. "phys %08x\n",
  934. chan->ic_task, ctx,
  935. image->type == IMAGE_CONVERT_IN ?
  936. "Input" : "Output", row, col,
  937. row_off + col_off);
  938. return -EINVAL;
  939. }
  940. }
  941. }
  942. return 0;
  943. }
  944. static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
  945. struct ipu_image_convert_image *image)
  946. {
  947. if (image->fmt->planar)
  948. return calc_tile_offsets_planar(ctx, image);
  949. return calc_tile_offsets_packed(ctx, image);
  950. }
  951. /*
  952. * Calculate the resizing ratio for the IC main processing section given input
  953. * size, fixed downsizing coefficient, and output size.
  954. * Either round to closest for the next tile's first pixel to minimize seams
  955. * and distortion (for all but right column / bottom row), or round down to
  956. * avoid sampling beyond the edges of the input image for this tile's last
  957. * pixel.
  958. * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
  959. */
  960. static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
  961. u32 output_size, bool allow_overshoot)
  962. {
  963. u32 downsized = input_size >> downsize_coeff;
  964. if (allow_overshoot)
  965. return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
  966. else
  967. return 8192 * (downsized - 1) / (output_size - 1);
  968. }
  969. /*
  970. * Slightly modify resize coefficients per tile to hide the bilinear
  971. * interpolator reset at tile borders, shifting the right / bottom edge
  972. * by up to a half input pixel. This removes noticeable seams between
  973. * tiles at higher upscaling factors.
  974. */
  975. static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
  976. {
  977. struct ipu_image_convert_chan *chan = ctx->chan;
  978. struct ipu_image_convert_priv *priv = chan->priv;
  979. struct ipu_image_tile *in_tile, *out_tile;
  980. unsigned int col, row, tile_idx;
  981. unsigned int last_output;
  982. for (col = 0; col < ctx->in.num_cols; col++) {
  983. bool closest = (col < ctx->in.num_cols - 1) &&
  984. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  985. u32 resized_width;
  986. u32 resize_coeff_h;
  987. u32 in_width;
  988. tile_idx = col;
  989. in_tile = &ctx->in.tile[tile_idx];
  990. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  991. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  992. resized_width = out_tile->height;
  993. else
  994. resized_width = out_tile->width;
  995. resize_coeff_h = calc_resize_coeff(in_tile->width,
  996. ctx->downsize_coeff_h,
  997. resized_width, closest);
  998. dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
  999. __func__, col, resize_coeff_h);
  1000. /*
  1001. * With the horizontal scaling factor known, round up resized
  1002. * width (output width or height) to burst size.
  1003. */
  1004. resized_width = round_up(resized_width, 8);
  1005. /*
  1006. * Calculate input width from the last accessed input pixel
  1007. * given resized width and scaling coefficients. Round up to
  1008. * burst size.
  1009. */
  1010. last_output = resized_width - 1;
  1011. if (closest && ((last_output * resize_coeff_h) % 8192))
  1012. last_output++;
  1013. in_width = round_up(
  1014. (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
  1015. << ctx->downsize_coeff_h, 8);
  1016. for (row = 0; row < ctx->in.num_rows; row++) {
  1017. tile_idx = row * ctx->in.num_cols + col;
  1018. in_tile = &ctx->in.tile[tile_idx];
  1019. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1020. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1021. out_tile->height = resized_width;
  1022. else
  1023. out_tile->width = resized_width;
  1024. in_tile->width = in_width;
  1025. }
  1026. ctx->resize_coeffs_h[col] = resize_coeff_h;
  1027. }
  1028. for (row = 0; row < ctx->in.num_rows; row++) {
  1029. bool closest = (row < ctx->in.num_rows - 1) &&
  1030. !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
  1031. u32 resized_height;
  1032. u32 resize_coeff_v;
  1033. u32 in_height;
  1034. tile_idx = row * ctx->in.num_cols;
  1035. in_tile = &ctx->in.tile[tile_idx];
  1036. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1037. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1038. resized_height = out_tile->width;
  1039. else
  1040. resized_height = out_tile->height;
  1041. resize_coeff_v = calc_resize_coeff(in_tile->height,
  1042. ctx->downsize_coeff_v,
  1043. resized_height, closest);
  1044. dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
  1045. __func__, row, resize_coeff_v);
  1046. /*
  1047. * With the vertical scaling factor known, round up resized
  1048. * height (output width or height) to IDMAC limitations.
  1049. */
  1050. resized_height = round_up(resized_height, 2);
  1051. /*
  1052. * Calculate input width from the last accessed input pixel
  1053. * given resized height and scaling coefficients. Align to
  1054. * IDMAC restrictions.
  1055. */
  1056. last_output = resized_height - 1;
  1057. if (closest && ((last_output * resize_coeff_v) % 8192))
  1058. last_output++;
  1059. in_height = round_up(
  1060. (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
  1061. << ctx->downsize_coeff_v, 2);
  1062. for (col = 0; col < ctx->in.num_cols; col++) {
  1063. tile_idx = row * ctx->in.num_cols + col;
  1064. in_tile = &ctx->in.tile[tile_idx];
  1065. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1066. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1067. out_tile->width = resized_height;
  1068. else
  1069. out_tile->height = resized_height;
  1070. in_tile->height = in_height;
  1071. }
  1072. ctx->resize_coeffs_v[row] = resize_coeff_v;
  1073. }
  1074. }
  1075. /*
  1076. * return the number of runs in given queue (pending_q or done_q)
  1077. * for this context. hold irqlock when calling.
  1078. */
  1079. static int get_run_count(struct ipu_image_convert_ctx *ctx,
  1080. struct list_head *q)
  1081. {
  1082. struct ipu_image_convert_run *run;
  1083. int count = 0;
  1084. lockdep_assert_held(&ctx->chan->irqlock);
  1085. list_for_each_entry(run, q, list) {
  1086. if (run->ctx == ctx)
  1087. count++;
  1088. }
  1089. return count;
  1090. }
  1091. static void convert_stop(struct ipu_image_convert_run *run)
  1092. {
  1093. struct ipu_image_convert_ctx *ctx = run->ctx;
  1094. struct ipu_image_convert_chan *chan = ctx->chan;
  1095. struct ipu_image_convert_priv *priv = chan->priv;
  1096. dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
  1097. __func__, chan->ic_task, ctx, run);
  1098. /* disable IC tasks and the channels */
  1099. ipu_ic_task_disable(chan->ic);
  1100. ipu_idmac_disable_channel(chan->in_chan);
  1101. ipu_idmac_disable_channel(chan->out_chan);
  1102. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1103. ipu_idmac_disable_channel(chan->rotation_in_chan);
  1104. ipu_idmac_disable_channel(chan->rotation_out_chan);
  1105. ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
  1106. }
  1107. ipu_ic_disable(chan->ic);
  1108. }
  1109. static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
  1110. struct ipuv3_channel *channel,
  1111. struct ipu_image_convert_image *image,
  1112. enum ipu_rotate_mode rot_mode,
  1113. bool rot_swap_width_height,
  1114. unsigned int tile)
  1115. {
  1116. struct ipu_image_convert_chan *chan = ctx->chan;
  1117. unsigned int burst_size;
  1118. u32 width, height, stride;
  1119. dma_addr_t addr0, addr1 = 0;
  1120. struct ipu_image tile_image;
  1121. unsigned int tile_idx[2];
  1122. if (image->type == IMAGE_CONVERT_OUT) {
  1123. tile_idx[0] = ctx->out_tile_map[tile];
  1124. tile_idx[1] = ctx->out_tile_map[1];
  1125. } else {
  1126. tile_idx[0] = tile;
  1127. tile_idx[1] = 1;
  1128. }
  1129. if (rot_swap_width_height) {
  1130. width = image->tile[tile_idx[0]].height;
  1131. height = image->tile[tile_idx[0]].width;
  1132. stride = image->tile[tile_idx[0]].rot_stride;
  1133. addr0 = ctx->rot_intermediate[0].phys;
  1134. if (ctx->double_buffering)
  1135. addr1 = ctx->rot_intermediate[1].phys;
  1136. } else {
  1137. width = image->tile[tile_idx[0]].width;
  1138. height = image->tile[tile_idx[0]].height;
  1139. stride = image->stride;
  1140. addr0 = image->base.phys0 +
  1141. image->tile[tile_idx[0]].offset;
  1142. if (ctx->double_buffering)
  1143. addr1 = image->base.phys0 +
  1144. image->tile[tile_idx[1]].offset;
  1145. }
  1146. ipu_cpmem_zero(channel);
  1147. memset(&tile_image, 0, sizeof(tile_image));
  1148. tile_image.pix.width = tile_image.rect.width = width;
  1149. tile_image.pix.height = tile_image.rect.height = height;
  1150. tile_image.pix.bytesperline = stride;
  1151. tile_image.pix.pixelformat = image->fmt->fourcc;
  1152. tile_image.phys0 = addr0;
  1153. tile_image.phys1 = addr1;
  1154. if (image->fmt->planar && !rot_swap_width_height) {
  1155. tile_image.u_offset = image->tile[tile_idx[0]].u_off;
  1156. tile_image.v_offset = image->tile[tile_idx[0]].v_off;
  1157. }
  1158. ipu_cpmem_set_image(channel, &tile_image);
  1159. if (rot_mode)
  1160. ipu_cpmem_set_rotation(channel, rot_mode);
  1161. /*
  1162. * Skip writing U and V components to odd rows in the output
  1163. * channels for planar 4:2:0.
  1164. */
  1165. if ((channel == chan->out_chan ||
  1166. channel == chan->rotation_out_chan) &&
  1167. image->fmt->planar && image->fmt->uv_height_dec == 2)
  1168. ipu_cpmem_skip_odd_chroma_rows(channel);
  1169. if (channel == chan->rotation_in_chan ||
  1170. channel == chan->rotation_out_chan) {
  1171. burst_size = 8;
  1172. ipu_cpmem_set_block_mode(channel);
  1173. } else
  1174. burst_size = (width % 16) ? 8 : 16;
  1175. ipu_cpmem_set_burstsize(channel, burst_size);
  1176. ipu_ic_task_idma_init(chan->ic, channel, width, height,
  1177. burst_size, rot_mode);
  1178. /*
  1179. * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
  1180. * only do this when there is no PRG present.
  1181. */
  1182. if (!channel->ipu->prg_priv)
  1183. ipu_cpmem_set_axi_id(channel, 1);
  1184. ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
  1185. }
  1186. static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
  1187. {
  1188. struct ipu_image_convert_ctx *ctx = run->ctx;
  1189. struct ipu_image_convert_chan *chan = ctx->chan;
  1190. struct ipu_image_convert_priv *priv = chan->priv;
  1191. struct ipu_image_convert_image *s_image = &ctx->in;
  1192. struct ipu_image_convert_image *d_image = &ctx->out;
  1193. unsigned int dst_tile = ctx->out_tile_map[tile];
  1194. unsigned int dest_width, dest_height;
  1195. unsigned int col, row;
  1196. u32 rsc;
  1197. int ret;
  1198. dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
  1199. __func__, chan->ic_task, ctx, run, tile, dst_tile);
  1200. /* clear EOF irq mask */
  1201. ctx->eof_mask = 0;
  1202. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1203. /* swap width/height for resizer */
  1204. dest_width = d_image->tile[dst_tile].height;
  1205. dest_height = d_image->tile[dst_tile].width;
  1206. } else {
  1207. dest_width = d_image->tile[dst_tile].width;
  1208. dest_height = d_image->tile[dst_tile].height;
  1209. }
  1210. row = tile / s_image->num_cols;
  1211. col = tile % s_image->num_cols;
  1212. rsc = (ctx->downsize_coeff_v << 30) |
  1213. (ctx->resize_coeffs_v[row] << 16) |
  1214. (ctx->downsize_coeff_h << 14) |
  1215. (ctx->resize_coeffs_h[col]);
  1216. dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
  1217. __func__, s_image->tile[tile].width,
  1218. s_image->tile[tile].height, dest_width, dest_height, rsc);
  1219. /* setup the IC resizer and CSC */
  1220. ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
  1221. s_image->tile[tile].width,
  1222. s_image->tile[tile].height,
  1223. dest_width,
  1224. dest_height,
  1225. rsc);
  1226. if (ret) {
  1227. dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
  1228. return ret;
  1229. }
  1230. /* init the source MEM-->IC PP IDMAC channel */
  1231. init_idmac_channel(ctx, chan->in_chan, s_image,
  1232. IPU_ROTATE_NONE, false, tile);
  1233. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1234. /* init the IC PP-->MEM IDMAC channel */
  1235. init_idmac_channel(ctx, chan->out_chan, d_image,
  1236. IPU_ROTATE_NONE, true, tile);
  1237. /* init the MEM-->IC PP ROT IDMAC channel */
  1238. init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
  1239. ctx->rot_mode, true, tile);
  1240. /* init the destination IC PP ROT-->MEM IDMAC channel */
  1241. init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
  1242. IPU_ROTATE_NONE, false, tile);
  1243. /* now link IC PP-->MEM to MEM-->IC PP ROT */
  1244. ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
  1245. } else {
  1246. /* init the destination IC PP-->MEM IDMAC channel */
  1247. init_idmac_channel(ctx, chan->out_chan, d_image,
  1248. ctx->rot_mode, false, tile);
  1249. }
  1250. /* enable the IC */
  1251. ipu_ic_enable(chan->ic);
  1252. /* set buffers ready */
  1253. ipu_idmac_select_buffer(chan->in_chan, 0);
  1254. ipu_idmac_select_buffer(chan->out_chan, 0);
  1255. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1256. ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
  1257. if (ctx->double_buffering) {
  1258. ipu_idmac_select_buffer(chan->in_chan, 1);
  1259. ipu_idmac_select_buffer(chan->out_chan, 1);
  1260. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1261. ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
  1262. }
  1263. /* enable the channels! */
  1264. ipu_idmac_enable_channel(chan->in_chan);
  1265. ipu_idmac_enable_channel(chan->out_chan);
  1266. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1267. ipu_idmac_enable_channel(chan->rotation_in_chan);
  1268. ipu_idmac_enable_channel(chan->rotation_out_chan);
  1269. }
  1270. ipu_ic_task_enable(chan->ic);
  1271. ipu_cpmem_dump(chan->in_chan);
  1272. ipu_cpmem_dump(chan->out_chan);
  1273. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1274. ipu_cpmem_dump(chan->rotation_in_chan);
  1275. ipu_cpmem_dump(chan->rotation_out_chan);
  1276. }
  1277. ipu_dump(priv->ipu);
  1278. return 0;
  1279. }
  1280. /* hold irqlock when calling */
  1281. static int do_run(struct ipu_image_convert_run *run)
  1282. {
  1283. struct ipu_image_convert_ctx *ctx = run->ctx;
  1284. struct ipu_image_convert_chan *chan = ctx->chan;
  1285. lockdep_assert_held(&chan->irqlock);
  1286. ctx->in.base.phys0 = run->in_phys;
  1287. ctx->out.base.phys0 = run->out_phys;
  1288. ctx->cur_buf_num = 0;
  1289. ctx->next_tile = 1;
  1290. /* remove run from pending_q and set as current */
  1291. list_del(&run->list);
  1292. chan->current_run = run;
  1293. return convert_start(run, 0);
  1294. }
  1295. /* hold irqlock when calling */
  1296. static void run_next(struct ipu_image_convert_chan *chan)
  1297. {
  1298. struct ipu_image_convert_priv *priv = chan->priv;
  1299. struct ipu_image_convert_run *run, *tmp;
  1300. int ret;
  1301. lockdep_assert_held(&chan->irqlock);
  1302. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1303. /* skip contexts that are aborting */
  1304. if (run->ctx->aborting) {
  1305. dev_dbg(priv->ipu->dev,
  1306. "%s: task %u: skipping aborting ctx %p run %p\n",
  1307. __func__, chan->ic_task, run->ctx, run);
  1308. continue;
  1309. }
  1310. ret = do_run(run);
  1311. if (!ret)
  1312. break;
  1313. /*
  1314. * something went wrong with start, add the run
  1315. * to done q and continue to the next run in the
  1316. * pending q.
  1317. */
  1318. run->status = ret;
  1319. list_add_tail(&run->list, &chan->done_q);
  1320. chan->current_run = NULL;
  1321. }
  1322. }
  1323. static void empty_done_q(struct ipu_image_convert_chan *chan)
  1324. {
  1325. struct ipu_image_convert_priv *priv = chan->priv;
  1326. struct ipu_image_convert_run *run;
  1327. unsigned long flags;
  1328. spin_lock_irqsave(&chan->irqlock, flags);
  1329. while (!list_empty(&chan->done_q)) {
  1330. run = list_entry(chan->done_q.next,
  1331. struct ipu_image_convert_run,
  1332. list);
  1333. list_del(&run->list);
  1334. dev_dbg(priv->ipu->dev,
  1335. "%s: task %u: completing ctx %p run %p with %d\n",
  1336. __func__, chan->ic_task, run->ctx, run, run->status);
  1337. /* call the completion callback and free the run */
  1338. spin_unlock_irqrestore(&chan->irqlock, flags);
  1339. run->ctx->complete(run, run->ctx->complete_context);
  1340. spin_lock_irqsave(&chan->irqlock, flags);
  1341. }
  1342. spin_unlock_irqrestore(&chan->irqlock, flags);
  1343. }
  1344. /*
  1345. * the bottom half thread clears out the done_q, calling the
  1346. * completion handler for each.
  1347. */
  1348. static irqreturn_t do_bh(int irq, void *dev_id)
  1349. {
  1350. struct ipu_image_convert_chan *chan = dev_id;
  1351. struct ipu_image_convert_priv *priv = chan->priv;
  1352. struct ipu_image_convert_ctx *ctx;
  1353. unsigned long flags;
  1354. dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
  1355. chan->ic_task);
  1356. empty_done_q(chan);
  1357. spin_lock_irqsave(&chan->irqlock, flags);
  1358. /*
  1359. * the done_q is cleared out, signal any contexts
  1360. * that are aborting that abort can complete.
  1361. */
  1362. list_for_each_entry(ctx, &chan->ctx_list, list) {
  1363. if (ctx->aborting) {
  1364. dev_dbg(priv->ipu->dev,
  1365. "%s: task %u: signaling abort for ctx %p\n",
  1366. __func__, chan->ic_task, ctx);
  1367. complete_all(&ctx->aborted);
  1368. }
  1369. }
  1370. spin_unlock_irqrestore(&chan->irqlock, flags);
  1371. dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
  1372. chan->ic_task);
  1373. return IRQ_HANDLED;
  1374. }
  1375. static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
  1376. {
  1377. unsigned int cur_tile = ctx->next_tile - 1;
  1378. unsigned int next_tile = ctx->next_tile;
  1379. if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
  1380. ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
  1381. ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
  1382. ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
  1383. ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
  1384. ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
  1385. ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
  1386. ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
  1387. return true;
  1388. return false;
  1389. }
  1390. /* hold irqlock when calling */
  1391. static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
  1392. {
  1393. struct ipu_image_convert_ctx *ctx = run->ctx;
  1394. struct ipu_image_convert_chan *chan = ctx->chan;
  1395. struct ipu_image_tile *src_tile, *dst_tile;
  1396. struct ipu_image_convert_image *s_image = &ctx->in;
  1397. struct ipu_image_convert_image *d_image = &ctx->out;
  1398. struct ipuv3_channel *outch;
  1399. unsigned int dst_idx;
  1400. lockdep_assert_held(&chan->irqlock);
  1401. outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
  1402. chan->rotation_out_chan : chan->out_chan;
  1403. /*
  1404. * It is difficult to stop the channel DMA before the channels
  1405. * enter the paused state. Without double-buffering the channels
  1406. * are always in a paused state when the EOF irq occurs, so it
  1407. * is safe to stop the channels now. For double-buffering we
  1408. * just ignore the abort until the operation completes, when it
  1409. * is safe to shut down.
  1410. */
  1411. if (ctx->aborting && !ctx->double_buffering) {
  1412. convert_stop(run);
  1413. run->status = -EIO;
  1414. goto done;
  1415. }
  1416. if (ctx->next_tile == ctx->num_tiles) {
  1417. /*
  1418. * the conversion is complete
  1419. */
  1420. convert_stop(run);
  1421. run->status = 0;
  1422. goto done;
  1423. }
  1424. /*
  1425. * not done, place the next tile buffers.
  1426. */
  1427. if (!ctx->double_buffering) {
  1428. if (ic_settings_changed(ctx)) {
  1429. convert_stop(run);
  1430. convert_start(run, ctx->next_tile);
  1431. } else {
  1432. src_tile = &s_image->tile[ctx->next_tile];
  1433. dst_idx = ctx->out_tile_map[ctx->next_tile];
  1434. dst_tile = &d_image->tile[dst_idx];
  1435. ipu_cpmem_set_buffer(chan->in_chan, 0,
  1436. s_image->base.phys0 +
  1437. src_tile->offset);
  1438. ipu_cpmem_set_buffer(outch, 0,
  1439. d_image->base.phys0 +
  1440. dst_tile->offset);
  1441. if (s_image->fmt->planar)
  1442. ipu_cpmem_set_uv_offset(chan->in_chan,
  1443. src_tile->u_off,
  1444. src_tile->v_off);
  1445. if (d_image->fmt->planar)
  1446. ipu_cpmem_set_uv_offset(outch,
  1447. dst_tile->u_off,
  1448. dst_tile->v_off);
  1449. ipu_idmac_select_buffer(chan->in_chan, 0);
  1450. ipu_idmac_select_buffer(outch, 0);
  1451. }
  1452. } else if (ctx->next_tile < ctx->num_tiles - 1) {
  1453. src_tile = &s_image->tile[ctx->next_tile + 1];
  1454. dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
  1455. dst_tile = &d_image->tile[dst_idx];
  1456. ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
  1457. s_image->base.phys0 + src_tile->offset);
  1458. ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
  1459. d_image->base.phys0 + dst_tile->offset);
  1460. ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
  1461. ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
  1462. ctx->cur_buf_num ^= 1;
  1463. }
  1464. ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
  1465. ctx->next_tile++;
  1466. return IRQ_HANDLED;
  1467. done:
  1468. list_add_tail(&run->list, &chan->done_q);
  1469. chan->current_run = NULL;
  1470. run_next(chan);
  1471. return IRQ_WAKE_THREAD;
  1472. }
  1473. static irqreturn_t eof_irq(int irq, void *data)
  1474. {
  1475. struct ipu_image_convert_chan *chan = data;
  1476. struct ipu_image_convert_priv *priv = chan->priv;
  1477. struct ipu_image_convert_ctx *ctx;
  1478. struct ipu_image_convert_run *run;
  1479. irqreturn_t ret = IRQ_HANDLED;
  1480. bool tile_complete = false;
  1481. unsigned long flags;
  1482. spin_lock_irqsave(&chan->irqlock, flags);
  1483. /* get current run and its context */
  1484. run = chan->current_run;
  1485. if (!run) {
  1486. ret = IRQ_NONE;
  1487. goto out;
  1488. }
  1489. ctx = run->ctx;
  1490. if (irq == chan->in_eof_irq) {
  1491. ctx->eof_mask |= EOF_IRQ_IN;
  1492. } else if (irq == chan->out_eof_irq) {
  1493. ctx->eof_mask |= EOF_IRQ_OUT;
  1494. } else if (irq == chan->rot_in_eof_irq ||
  1495. irq == chan->rot_out_eof_irq) {
  1496. if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1497. /* this was NOT a rotation op, shouldn't happen */
  1498. dev_err(priv->ipu->dev,
  1499. "Unexpected rotation interrupt\n");
  1500. goto out;
  1501. }
  1502. ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
  1503. EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
  1504. } else {
  1505. dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
  1506. ret = IRQ_NONE;
  1507. goto out;
  1508. }
  1509. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1510. tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
  1511. else
  1512. tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
  1513. if (tile_complete)
  1514. ret = do_tile_complete(run);
  1515. out:
  1516. spin_unlock_irqrestore(&chan->irqlock, flags);
  1517. return ret;
  1518. }
  1519. /*
  1520. * try to force the completion of runs for this ctx. Called when
  1521. * abort wait times out in ipu_image_convert_abort().
  1522. */
  1523. static void force_abort(struct ipu_image_convert_ctx *ctx)
  1524. {
  1525. struct ipu_image_convert_chan *chan = ctx->chan;
  1526. struct ipu_image_convert_run *run;
  1527. unsigned long flags;
  1528. spin_lock_irqsave(&chan->irqlock, flags);
  1529. run = chan->current_run;
  1530. if (run && run->ctx == ctx) {
  1531. convert_stop(run);
  1532. run->status = -EIO;
  1533. list_add_tail(&run->list, &chan->done_q);
  1534. chan->current_run = NULL;
  1535. run_next(chan);
  1536. }
  1537. spin_unlock_irqrestore(&chan->irqlock, flags);
  1538. empty_done_q(chan);
  1539. }
  1540. static void release_ipu_resources(struct ipu_image_convert_chan *chan)
  1541. {
  1542. if (chan->in_eof_irq >= 0)
  1543. free_irq(chan->in_eof_irq, chan);
  1544. if (chan->rot_in_eof_irq >= 0)
  1545. free_irq(chan->rot_in_eof_irq, chan);
  1546. if (chan->out_eof_irq >= 0)
  1547. free_irq(chan->out_eof_irq, chan);
  1548. if (chan->rot_out_eof_irq >= 0)
  1549. free_irq(chan->rot_out_eof_irq, chan);
  1550. if (!IS_ERR_OR_NULL(chan->in_chan))
  1551. ipu_idmac_put(chan->in_chan);
  1552. if (!IS_ERR_OR_NULL(chan->out_chan))
  1553. ipu_idmac_put(chan->out_chan);
  1554. if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
  1555. ipu_idmac_put(chan->rotation_in_chan);
  1556. if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
  1557. ipu_idmac_put(chan->rotation_out_chan);
  1558. if (!IS_ERR_OR_NULL(chan->ic))
  1559. ipu_ic_put(chan->ic);
  1560. chan->in_chan = chan->out_chan = chan->rotation_in_chan =
  1561. chan->rotation_out_chan = NULL;
  1562. chan->in_eof_irq = -1;
  1563. chan->rot_in_eof_irq = -1;
  1564. chan->out_eof_irq = -1;
  1565. chan->rot_out_eof_irq = -1;
  1566. }
  1567. static int get_eof_irq(struct ipu_image_convert_chan *chan,
  1568. struct ipuv3_channel *channel)
  1569. {
  1570. struct ipu_image_convert_priv *priv = chan->priv;
  1571. int ret, irq;
  1572. irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
  1573. ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
  1574. if (ret < 0) {
  1575. dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
  1576. return ret;
  1577. }
  1578. return irq;
  1579. }
  1580. static int get_ipu_resources(struct ipu_image_convert_chan *chan)
  1581. {
  1582. const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
  1583. struct ipu_image_convert_priv *priv = chan->priv;
  1584. int ret;
  1585. /* get IC */
  1586. chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
  1587. if (IS_ERR(chan->ic)) {
  1588. dev_err(priv->ipu->dev, "could not acquire IC\n");
  1589. ret = PTR_ERR(chan->ic);
  1590. goto err;
  1591. }
  1592. /* get IDMAC channels */
  1593. chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
  1594. chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
  1595. if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
  1596. dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
  1597. ret = -EBUSY;
  1598. goto err;
  1599. }
  1600. chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
  1601. chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
  1602. if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
  1603. dev_err(priv->ipu->dev,
  1604. "could not acquire idmac rotation channels\n");
  1605. ret = -EBUSY;
  1606. goto err;
  1607. }
  1608. /* acquire the EOF interrupts */
  1609. ret = get_eof_irq(chan, chan->in_chan);
  1610. if (ret < 0) {
  1611. chan->in_eof_irq = -1;
  1612. goto err;
  1613. }
  1614. chan->in_eof_irq = ret;
  1615. ret = get_eof_irq(chan, chan->rotation_in_chan);
  1616. if (ret < 0) {
  1617. chan->rot_in_eof_irq = -1;
  1618. goto err;
  1619. }
  1620. chan->rot_in_eof_irq = ret;
  1621. ret = get_eof_irq(chan, chan->out_chan);
  1622. if (ret < 0) {
  1623. chan->out_eof_irq = -1;
  1624. goto err;
  1625. }
  1626. chan->out_eof_irq = ret;
  1627. ret = get_eof_irq(chan, chan->rotation_out_chan);
  1628. if (ret < 0) {
  1629. chan->rot_out_eof_irq = -1;
  1630. goto err;
  1631. }
  1632. chan->rot_out_eof_irq = ret;
  1633. return 0;
  1634. err:
  1635. release_ipu_resources(chan);
  1636. return ret;
  1637. }
  1638. static int fill_image(struct ipu_image_convert_ctx *ctx,
  1639. struct ipu_image_convert_image *ic_image,
  1640. struct ipu_image *image,
  1641. enum ipu_image_convert_type type)
  1642. {
  1643. struct ipu_image_convert_priv *priv = ctx->chan->priv;
  1644. ic_image->base = *image;
  1645. ic_image->type = type;
  1646. ic_image->fmt = get_format(image->pix.pixelformat);
  1647. if (!ic_image->fmt) {
  1648. dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
  1649. type == IMAGE_CONVERT_OUT ? "Output" : "Input");
  1650. return -EINVAL;
  1651. }
  1652. if (ic_image->fmt->planar)
  1653. ic_image->stride = ic_image->base.pix.width;
  1654. else
  1655. ic_image->stride = ic_image->base.pix.bytesperline;
  1656. return 0;
  1657. }
  1658. /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
  1659. static unsigned int clamp_align(unsigned int x, unsigned int min,
  1660. unsigned int max, unsigned int align)
  1661. {
  1662. /* Bits that must be zero to be aligned */
  1663. unsigned int mask = ~((1 << align) - 1);
  1664. /* Clamp to aligned min and max */
  1665. x = clamp(x, (min + ~mask) & mask, max & mask);
  1666. /* Round to nearest aligned value */
  1667. if (align)
  1668. x = (x + (1 << (align - 1))) & mask;
  1669. return x;
  1670. }
  1671. /* Adjusts input/output images to IPU restrictions */
  1672. void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
  1673. enum ipu_rotate_mode rot_mode)
  1674. {
  1675. const struct ipu_image_pixfmt *infmt, *outfmt;
  1676. u32 w_align_out, h_align_out;
  1677. u32 w_align_in, h_align_in;
  1678. infmt = get_format(in->pix.pixelformat);
  1679. outfmt = get_format(out->pix.pixelformat);
  1680. /* set some default pixel formats if needed */
  1681. if (!infmt) {
  1682. in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1683. infmt = get_format(V4L2_PIX_FMT_RGB24);
  1684. }
  1685. if (!outfmt) {
  1686. out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1687. outfmt = get_format(V4L2_PIX_FMT_RGB24);
  1688. }
  1689. /* image converter does not handle fields */
  1690. in->pix.field = out->pix.field = V4L2_FIELD_NONE;
  1691. /* resizer cannot downsize more than 4:1 */
  1692. if (ipu_rot_mode_is_irt(rot_mode)) {
  1693. out->pix.height = max_t(__u32, out->pix.height,
  1694. in->pix.width / 4);
  1695. out->pix.width = max_t(__u32, out->pix.width,
  1696. in->pix.height / 4);
  1697. } else {
  1698. out->pix.width = max_t(__u32, out->pix.width,
  1699. in->pix.width / 4);
  1700. out->pix.height = max_t(__u32, out->pix.height,
  1701. in->pix.height / 4);
  1702. }
  1703. /* align input width/height */
  1704. w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
  1705. rot_mode));
  1706. h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
  1707. rot_mode));
  1708. in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
  1709. w_align_in);
  1710. in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
  1711. h_align_in);
  1712. /* align output width/height */
  1713. w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
  1714. rot_mode));
  1715. h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
  1716. rot_mode));
  1717. out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
  1718. w_align_out);
  1719. out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
  1720. h_align_out);
  1721. /* set input/output strides and image sizes */
  1722. in->pix.bytesperline = infmt->planar ?
  1723. clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
  1724. w_align_in) :
  1725. clamp_align((in->pix.width * infmt->bpp) >> 3,
  1726. ((2 << w_align_in) * infmt->bpp) >> 3,
  1727. (MAX_W * infmt->bpp) >> 3,
  1728. w_align_in);
  1729. in->pix.sizeimage = infmt->planar ?
  1730. (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
  1731. in->pix.height * in->pix.bytesperline;
  1732. out->pix.bytesperline = outfmt->planar ? out->pix.width :
  1733. (out->pix.width * outfmt->bpp) >> 3;
  1734. out->pix.sizeimage = outfmt->planar ?
  1735. (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
  1736. out->pix.height * out->pix.bytesperline;
  1737. }
  1738. EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
  1739. /*
  1740. * this is used by ipu_image_convert_prepare() to verify set input and
  1741. * output images are valid before starting the conversion. Clients can
  1742. * also call it before calling ipu_image_convert_prepare().
  1743. */
  1744. int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
  1745. enum ipu_rotate_mode rot_mode)
  1746. {
  1747. struct ipu_image testin, testout;
  1748. testin = *in;
  1749. testout = *out;
  1750. ipu_image_convert_adjust(&testin, &testout, rot_mode);
  1751. if (testin.pix.width != in->pix.width ||
  1752. testin.pix.height != in->pix.height ||
  1753. testout.pix.width != out->pix.width ||
  1754. testout.pix.height != out->pix.height)
  1755. return -EINVAL;
  1756. return 0;
  1757. }
  1758. EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
  1759. /*
  1760. * Call ipu_image_convert_prepare() to prepare for the conversion of
  1761. * given images and rotation mode. Returns a new conversion context.
  1762. */
  1763. struct ipu_image_convert_ctx *
  1764. ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1765. struct ipu_image *in, struct ipu_image *out,
  1766. enum ipu_rotate_mode rot_mode,
  1767. ipu_image_convert_cb_t complete,
  1768. void *complete_context)
  1769. {
  1770. struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
  1771. struct ipu_image_convert_image *s_image, *d_image;
  1772. struct ipu_image_convert_chan *chan;
  1773. struct ipu_image_convert_ctx *ctx;
  1774. unsigned long flags;
  1775. unsigned int i;
  1776. bool get_res;
  1777. int ret;
  1778. if (!in || !out || !complete ||
  1779. (ic_task != IC_TASK_VIEWFINDER &&
  1780. ic_task != IC_TASK_POST_PROCESSOR))
  1781. return ERR_PTR(-EINVAL);
  1782. /* verify the in/out images before continuing */
  1783. ret = ipu_image_convert_verify(in, out, rot_mode);
  1784. if (ret) {
  1785. dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
  1786. __func__);
  1787. return ERR_PTR(ret);
  1788. }
  1789. chan = &priv->chan[ic_task];
  1790. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1791. if (!ctx)
  1792. return ERR_PTR(-ENOMEM);
  1793. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
  1794. chan->ic_task, ctx);
  1795. ctx->chan = chan;
  1796. init_completion(&ctx->aborted);
  1797. ctx->rot_mode = rot_mode;
  1798. /* Sets ctx->in.num_rows/cols as well */
  1799. ret = calc_image_resize_coefficients(ctx, in, out);
  1800. if (ret)
  1801. goto out_free;
  1802. s_image = &ctx->in;
  1803. d_image = &ctx->out;
  1804. /* set tiling and rotation */
  1805. if (ipu_rot_mode_is_irt(rot_mode)) {
  1806. d_image->num_rows = s_image->num_cols;
  1807. d_image->num_cols = s_image->num_rows;
  1808. } else {
  1809. d_image->num_rows = s_image->num_rows;
  1810. d_image->num_cols = s_image->num_cols;
  1811. }
  1812. ctx->num_tiles = d_image->num_cols * d_image->num_rows;
  1813. ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
  1814. if (ret)
  1815. goto out_free;
  1816. ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
  1817. if (ret)
  1818. goto out_free;
  1819. calc_out_tile_map(ctx);
  1820. find_seams(ctx, s_image, d_image);
  1821. ret = calc_tile_dimensions(ctx, s_image);
  1822. if (ret)
  1823. goto out_free;
  1824. ret = calc_tile_offsets(ctx, s_image);
  1825. if (ret)
  1826. goto out_free;
  1827. calc_tile_dimensions(ctx, d_image);
  1828. ret = calc_tile_offsets(ctx, d_image);
  1829. if (ret)
  1830. goto out_free;
  1831. calc_tile_resize_coefficients(ctx);
  1832. ret = ipu_ic_calc_csc(&ctx->csc,
  1833. s_image->base.pix.ycbcr_enc,
  1834. s_image->base.pix.quantization,
  1835. ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
  1836. d_image->base.pix.ycbcr_enc,
  1837. d_image->base.pix.quantization,
  1838. ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
  1839. if (ret)
  1840. goto out_free;
  1841. dump_format(ctx, s_image);
  1842. dump_format(ctx, d_image);
  1843. ctx->complete = complete;
  1844. ctx->complete_context = complete_context;
  1845. /*
  1846. * Can we use double-buffering for this operation? If there is
  1847. * only one tile (the whole image can be converted in a single
  1848. * operation) there's no point in using double-buffering. Also,
  1849. * the IPU's IDMAC channels allow only a single U and V plane
  1850. * offset shared between both buffers, but these offsets change
  1851. * for every tile, and therefore would have to be updated for
  1852. * each buffer which is not possible. So double-buffering is
  1853. * impossible when either the source or destination images are
  1854. * a planar format (YUV420, YUV422P, etc.). Further, differently
  1855. * sized tiles or different resizing coefficients per tile
  1856. * prevent double-buffering as well.
  1857. */
  1858. ctx->double_buffering = (ctx->num_tiles > 1 &&
  1859. !s_image->fmt->planar &&
  1860. !d_image->fmt->planar);
  1861. for (i = 1; i < ctx->num_tiles; i++) {
  1862. if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
  1863. ctx->in.tile[i].height != ctx->in.tile[0].height ||
  1864. ctx->out.tile[i].width != ctx->out.tile[0].width ||
  1865. ctx->out.tile[i].height != ctx->out.tile[0].height) {
  1866. ctx->double_buffering = false;
  1867. break;
  1868. }
  1869. }
  1870. for (i = 1; i < ctx->in.num_cols; i++) {
  1871. if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
  1872. ctx->double_buffering = false;
  1873. break;
  1874. }
  1875. }
  1876. for (i = 1; i < ctx->in.num_rows; i++) {
  1877. if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
  1878. ctx->double_buffering = false;
  1879. break;
  1880. }
  1881. }
  1882. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1883. unsigned long intermediate_size = d_image->tile[0].size;
  1884. for (i = 1; i < ctx->num_tiles; i++) {
  1885. if (d_image->tile[i].size > intermediate_size)
  1886. intermediate_size = d_image->tile[i].size;
  1887. }
  1888. ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
  1889. intermediate_size);
  1890. if (ret)
  1891. goto out_free;
  1892. if (ctx->double_buffering) {
  1893. ret = alloc_dma_buf(priv,
  1894. &ctx->rot_intermediate[1],
  1895. intermediate_size);
  1896. if (ret)
  1897. goto out_free_dmabuf0;
  1898. }
  1899. }
  1900. spin_lock_irqsave(&chan->irqlock, flags);
  1901. get_res = list_empty(&chan->ctx_list);
  1902. list_add_tail(&ctx->list, &chan->ctx_list);
  1903. spin_unlock_irqrestore(&chan->irqlock, flags);
  1904. if (get_res) {
  1905. ret = get_ipu_resources(chan);
  1906. if (ret)
  1907. goto out_free_dmabuf1;
  1908. }
  1909. return ctx;
  1910. out_free_dmabuf1:
  1911. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1912. spin_lock_irqsave(&chan->irqlock, flags);
  1913. list_del(&ctx->list);
  1914. spin_unlock_irqrestore(&chan->irqlock, flags);
  1915. out_free_dmabuf0:
  1916. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1917. out_free:
  1918. kfree(ctx);
  1919. return ERR_PTR(ret);
  1920. }
  1921. EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
  1922. /*
  1923. * Carry out a single image conversion run. Only the physaddr's of the input
  1924. * and output image buffers are needed. The conversion context must have
  1925. * been created previously with ipu_image_convert_prepare().
  1926. */
  1927. int ipu_image_convert_queue(struct ipu_image_convert_run *run)
  1928. {
  1929. struct ipu_image_convert_chan *chan;
  1930. struct ipu_image_convert_priv *priv;
  1931. struct ipu_image_convert_ctx *ctx;
  1932. unsigned long flags;
  1933. int ret = 0;
  1934. if (!run || !run->ctx || !run->in_phys || !run->out_phys)
  1935. return -EINVAL;
  1936. ctx = run->ctx;
  1937. chan = ctx->chan;
  1938. priv = chan->priv;
  1939. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
  1940. chan->ic_task, ctx, run);
  1941. INIT_LIST_HEAD(&run->list);
  1942. spin_lock_irqsave(&chan->irqlock, flags);
  1943. if (ctx->aborting) {
  1944. ret = -EIO;
  1945. goto unlock;
  1946. }
  1947. list_add_tail(&run->list, &chan->pending_q);
  1948. if (!chan->current_run) {
  1949. ret = do_run(run);
  1950. if (ret)
  1951. chan->current_run = NULL;
  1952. }
  1953. unlock:
  1954. spin_unlock_irqrestore(&chan->irqlock, flags);
  1955. return ret;
  1956. }
  1957. EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
  1958. /* Abort any active or pending conversions for this context */
  1959. static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  1960. {
  1961. struct ipu_image_convert_chan *chan = ctx->chan;
  1962. struct ipu_image_convert_priv *priv = chan->priv;
  1963. struct ipu_image_convert_run *run, *active_run, *tmp;
  1964. unsigned long flags;
  1965. int run_count, ret;
  1966. spin_lock_irqsave(&chan->irqlock, flags);
  1967. /* move all remaining pending runs in this context to done_q */
  1968. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1969. if (run->ctx != ctx)
  1970. continue;
  1971. run->status = -EIO;
  1972. list_move_tail(&run->list, &chan->done_q);
  1973. }
  1974. run_count = get_run_count(ctx, &chan->done_q);
  1975. active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
  1976. chan->current_run : NULL;
  1977. if (active_run)
  1978. reinit_completion(&ctx->aborted);
  1979. ctx->aborting = true;
  1980. spin_unlock_irqrestore(&chan->irqlock, flags);
  1981. if (!run_count && !active_run) {
  1982. dev_dbg(priv->ipu->dev,
  1983. "%s: task %u: no abort needed for ctx %p\n",
  1984. __func__, chan->ic_task, ctx);
  1985. return;
  1986. }
  1987. if (!active_run) {
  1988. empty_done_q(chan);
  1989. return;
  1990. }
  1991. dev_dbg(priv->ipu->dev,
  1992. "%s: task %u: wait for completion: %d runs\n",
  1993. __func__, chan->ic_task, run_count);
  1994. ret = wait_for_completion_timeout(&ctx->aborted,
  1995. msecs_to_jiffies(10000));
  1996. if (ret == 0) {
  1997. dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
  1998. force_abort(ctx);
  1999. }
  2000. }
  2001. void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  2002. {
  2003. __ipu_image_convert_abort(ctx);
  2004. ctx->aborting = false;
  2005. }
  2006. EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
  2007. /* Unprepare image conversion context */
  2008. void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
  2009. {
  2010. struct ipu_image_convert_chan *chan = ctx->chan;
  2011. struct ipu_image_convert_priv *priv = chan->priv;
  2012. unsigned long flags;
  2013. bool put_res;
  2014. /* make sure no runs are hanging around */
  2015. __ipu_image_convert_abort(ctx);
  2016. dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
  2017. chan->ic_task, ctx);
  2018. spin_lock_irqsave(&chan->irqlock, flags);
  2019. list_del(&ctx->list);
  2020. put_res = list_empty(&chan->ctx_list);
  2021. spin_unlock_irqrestore(&chan->irqlock, flags);
  2022. if (put_res)
  2023. release_ipu_resources(chan);
  2024. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  2025. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  2026. kfree(ctx);
  2027. }
  2028. EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
  2029. /*
  2030. * "Canned" asynchronous single image conversion. Allocates and returns
  2031. * a new conversion run. On successful return the caller must free the
  2032. * run and call ipu_image_convert_unprepare() after conversion completes.
  2033. */
  2034. struct ipu_image_convert_run *
  2035. ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  2036. struct ipu_image *in, struct ipu_image *out,
  2037. enum ipu_rotate_mode rot_mode,
  2038. ipu_image_convert_cb_t complete,
  2039. void *complete_context)
  2040. {
  2041. struct ipu_image_convert_ctx *ctx;
  2042. struct ipu_image_convert_run *run;
  2043. int ret;
  2044. ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
  2045. complete, complete_context);
  2046. if (IS_ERR(ctx))
  2047. return ERR_CAST(ctx);
  2048. run = kzalloc(sizeof(*run), GFP_KERNEL);
  2049. if (!run) {
  2050. ipu_image_convert_unprepare(ctx);
  2051. return ERR_PTR(-ENOMEM);
  2052. }
  2053. run->ctx = ctx;
  2054. run->in_phys = in->phys0;
  2055. run->out_phys = out->phys0;
  2056. ret = ipu_image_convert_queue(run);
  2057. if (ret) {
  2058. ipu_image_convert_unprepare(ctx);
  2059. kfree(run);
  2060. return ERR_PTR(ret);
  2061. }
  2062. return run;
  2063. }
  2064. EXPORT_SYMBOL_GPL(ipu_image_convert);
  2065. /* "Canned" synchronous single image conversion */
  2066. static void image_convert_sync_complete(struct ipu_image_convert_run *run,
  2067. void *data)
  2068. {
  2069. struct completion *comp = data;
  2070. complete(comp);
  2071. }
  2072. int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  2073. struct ipu_image *in, struct ipu_image *out,
  2074. enum ipu_rotate_mode rot_mode)
  2075. {
  2076. struct ipu_image_convert_run *run;
  2077. struct completion comp;
  2078. int ret;
  2079. init_completion(&comp);
  2080. run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
  2081. image_convert_sync_complete, &comp);
  2082. if (IS_ERR(run))
  2083. return PTR_ERR(run);
  2084. ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
  2085. ret = (ret == 0) ? -ETIMEDOUT : 0;
  2086. ipu_image_convert_unprepare(run->ctx);
  2087. kfree(run);
  2088. return ret;
  2089. }
  2090. EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
  2091. int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
  2092. {
  2093. struct ipu_image_convert_priv *priv;
  2094. int i;
  2095. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  2096. if (!priv)
  2097. return -ENOMEM;
  2098. ipu->image_convert_priv = priv;
  2099. priv->ipu = ipu;
  2100. for (i = 0; i < IC_NUM_TASKS; i++) {
  2101. struct ipu_image_convert_chan *chan = &priv->chan[i];
  2102. chan->ic_task = i;
  2103. chan->priv = priv;
  2104. chan->dma_ch = &image_convert_dma_chan[i];
  2105. chan->in_eof_irq = -1;
  2106. chan->rot_in_eof_irq = -1;
  2107. chan->out_eof_irq = -1;
  2108. chan->rot_out_eof_irq = -1;
  2109. spin_lock_init(&chan->irqlock);
  2110. INIT_LIST_HEAD(&chan->ctx_list);
  2111. INIT_LIST_HEAD(&chan->pending_q);
  2112. INIT_LIST_HEAD(&chan->done_q);
  2113. }
  2114. return 0;
  2115. }
  2116. void ipu_image_convert_exit(struct ipu_soc *ipu)
  2117. {
  2118. }