ipu-image-convert.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709
  1. /*
  2. * Copyright (C) 2012-2016 Mentor Graphics Inc.
  3. *
  4. * Queued image conversion support, with tiling and rotation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  13. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  14. * for more details.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/dma-mapping.h>
  18. #include <video/imx-ipu-image-convert.h>
  19. #include "ipu-prv.h"
  20. /*
  21. * The IC Resizer has a restriction that the output frame from the
  22. * resizer must be 1024 or less in both width (pixels) and height
  23. * (lines).
  24. *
  25. * The image converter attempts to split up a conversion when
  26. * the desired output (converted) frame resolution exceeds the
  27. * IC resizer limit of 1024 in either dimension.
  28. *
  29. * If either dimension of the output frame exceeds the limit, the
  30. * dimension is split into 1, 2, or 4 equal stripes, for a maximum
  31. * of 4*4 or 16 tiles. A conversion is then carried out for each
  32. * tile (but taking care to pass the full frame stride length to
  33. * the DMA channel's parameter memory!). IDMA double-buffering is used
  34. * to convert each tile back-to-back when possible (see note below
  35. * when double_buffering boolean is set).
  36. *
  37. * Note that the input frame must be split up into the same number
  38. * of tiles as the output frame.
  39. *
  40. * FIXME: at this point there is no attempt to deal with visible seams
  41. * at the tile boundaries when upscaling. The seams are caused by a reset
  42. * of the bilinear upscale interpolation when starting a new tile. The
  43. * seams are barely visible for small upscale factors, but become
  44. * increasingly visible as the upscale factor gets larger, since more
  45. * interpolated pixels get thrown out at the tile boundaries. A possilble
  46. * fix might be to overlap tiles of different sizes, but this must be done
  47. * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
  48. * alignment restrictions of each tile.
  49. */
  50. #define MAX_STRIPES_W 4
  51. #define MAX_STRIPES_H 4
  52. #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
  53. #define MIN_W 16
  54. #define MIN_H 8
  55. #define MAX_W 4096
  56. #define MAX_H 4096
  57. enum ipu_image_convert_type {
  58. IMAGE_CONVERT_IN = 0,
  59. IMAGE_CONVERT_OUT,
  60. };
  61. struct ipu_image_convert_dma_buf {
  62. void *virt;
  63. dma_addr_t phys;
  64. unsigned long len;
  65. };
  66. struct ipu_image_convert_dma_chan {
  67. int in;
  68. int out;
  69. int rot_in;
  70. int rot_out;
  71. int vdi_in_p;
  72. int vdi_in;
  73. int vdi_in_n;
  74. };
  75. /* dimensions of one tile */
  76. struct ipu_image_tile {
  77. u32 width;
  78. u32 height;
  79. /* size and strides are in bytes */
  80. u32 size;
  81. u32 stride;
  82. u32 rot_stride;
  83. /* start Y or packed offset of this tile */
  84. u32 offset;
  85. /* offset from start to tile in U plane, for planar formats */
  86. u32 u_off;
  87. /* offset from start to tile in V plane, for planar formats */
  88. u32 v_off;
  89. };
  90. struct ipu_image_convert_image {
  91. struct ipu_image base;
  92. enum ipu_image_convert_type type;
  93. const struct ipu_image_pixfmt *fmt;
  94. unsigned int stride;
  95. /* # of rows (horizontal stripes) if dest height is > 1024 */
  96. unsigned int num_rows;
  97. /* # of columns (vertical stripes) if dest width is > 1024 */
  98. unsigned int num_cols;
  99. struct ipu_image_tile tile[MAX_TILES];
  100. };
  101. struct ipu_image_pixfmt {
  102. u32 fourcc; /* V4L2 fourcc */
  103. int bpp; /* total bpp */
  104. int uv_width_dec; /* decimation in width for U/V planes */
  105. int uv_height_dec; /* decimation in height for U/V planes */
  106. bool planar; /* planar format */
  107. bool uv_swapped; /* U and V planes are swapped */
  108. bool uv_packed; /* partial planar (U and V in same plane) */
  109. };
  110. struct ipu_image_convert_ctx;
  111. struct ipu_image_convert_chan;
  112. struct ipu_image_convert_priv;
  113. struct ipu_image_convert_ctx {
  114. struct ipu_image_convert_chan *chan;
  115. ipu_image_convert_cb_t complete;
  116. void *complete_context;
  117. /* Source/destination image data and rotation mode */
  118. struct ipu_image_convert_image in;
  119. struct ipu_image_convert_image out;
  120. enum ipu_rotate_mode rot_mode;
  121. /* intermediate buffer for rotation */
  122. struct ipu_image_convert_dma_buf rot_intermediate[2];
  123. /* current buffer number for double buffering */
  124. int cur_buf_num;
  125. bool aborting;
  126. struct completion aborted;
  127. /* can we use double-buffering for this conversion operation? */
  128. bool double_buffering;
  129. /* num_rows * num_cols */
  130. unsigned int num_tiles;
  131. /* next tile to process */
  132. unsigned int next_tile;
  133. /* where to place converted tile in dest image */
  134. unsigned int out_tile_map[MAX_TILES];
  135. struct list_head list;
  136. };
  137. struct ipu_image_convert_chan {
  138. struct ipu_image_convert_priv *priv;
  139. enum ipu_ic_task ic_task;
  140. const struct ipu_image_convert_dma_chan *dma_ch;
  141. struct ipu_ic *ic;
  142. struct ipuv3_channel *in_chan;
  143. struct ipuv3_channel *out_chan;
  144. struct ipuv3_channel *rotation_in_chan;
  145. struct ipuv3_channel *rotation_out_chan;
  146. /* the IPU end-of-frame irqs */
  147. int out_eof_irq;
  148. int rot_out_eof_irq;
  149. spinlock_t irqlock;
  150. /* list of convert contexts */
  151. struct list_head ctx_list;
  152. /* queue of conversion runs */
  153. struct list_head pending_q;
  154. /* queue of completed runs */
  155. struct list_head done_q;
  156. /* the current conversion run */
  157. struct ipu_image_convert_run *current_run;
  158. };
  159. struct ipu_image_convert_priv {
  160. struct ipu_image_convert_chan chan[IC_NUM_TASKS];
  161. struct ipu_soc *ipu;
  162. };
  163. static const struct ipu_image_convert_dma_chan
  164. image_convert_dma_chan[IC_NUM_TASKS] = {
  165. [IC_TASK_VIEWFINDER] = {
  166. .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
  167. .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
  168. .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
  169. .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
  170. .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
  171. .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
  172. .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
  173. },
  174. [IC_TASK_POST_PROCESSOR] = {
  175. .in = IPUV3_CHANNEL_MEM_IC_PP,
  176. .out = IPUV3_CHANNEL_IC_PP_MEM,
  177. .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
  178. .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
  179. },
  180. };
  181. static const struct ipu_image_pixfmt image_convert_formats[] = {
  182. {
  183. .fourcc = V4L2_PIX_FMT_RGB565,
  184. .bpp = 16,
  185. }, {
  186. .fourcc = V4L2_PIX_FMT_RGB24,
  187. .bpp = 24,
  188. }, {
  189. .fourcc = V4L2_PIX_FMT_BGR24,
  190. .bpp = 24,
  191. }, {
  192. .fourcc = V4L2_PIX_FMT_RGB32,
  193. .bpp = 32,
  194. }, {
  195. .fourcc = V4L2_PIX_FMT_BGR32,
  196. .bpp = 32,
  197. }, {
  198. .fourcc = V4L2_PIX_FMT_YUYV,
  199. .bpp = 16,
  200. .uv_width_dec = 2,
  201. .uv_height_dec = 1,
  202. }, {
  203. .fourcc = V4L2_PIX_FMT_UYVY,
  204. .bpp = 16,
  205. .uv_width_dec = 2,
  206. .uv_height_dec = 1,
  207. }, {
  208. .fourcc = V4L2_PIX_FMT_YUV420,
  209. .bpp = 12,
  210. .planar = true,
  211. .uv_width_dec = 2,
  212. .uv_height_dec = 2,
  213. }, {
  214. .fourcc = V4L2_PIX_FMT_YVU420,
  215. .bpp = 12,
  216. .planar = true,
  217. .uv_width_dec = 2,
  218. .uv_height_dec = 2,
  219. .uv_swapped = true,
  220. }, {
  221. .fourcc = V4L2_PIX_FMT_NV12,
  222. .bpp = 12,
  223. .planar = true,
  224. .uv_width_dec = 2,
  225. .uv_height_dec = 2,
  226. .uv_packed = true,
  227. }, {
  228. .fourcc = V4L2_PIX_FMT_YUV422P,
  229. .bpp = 16,
  230. .planar = true,
  231. .uv_width_dec = 2,
  232. .uv_height_dec = 1,
  233. }, {
  234. .fourcc = V4L2_PIX_FMT_NV16,
  235. .bpp = 16,
  236. .planar = true,
  237. .uv_width_dec = 2,
  238. .uv_height_dec = 1,
  239. .uv_packed = true,
  240. },
  241. };
  242. static const struct ipu_image_pixfmt *get_format(u32 fourcc)
  243. {
  244. const struct ipu_image_pixfmt *ret = NULL;
  245. unsigned int i;
  246. for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
  247. if (image_convert_formats[i].fourcc == fourcc) {
  248. ret = &image_convert_formats[i];
  249. break;
  250. }
  251. }
  252. return ret;
  253. }
  254. static void dump_format(struct ipu_image_convert_ctx *ctx,
  255. struct ipu_image_convert_image *ic_image)
  256. {
  257. struct ipu_image_convert_chan *chan = ctx->chan;
  258. struct ipu_image_convert_priv *priv = chan->priv;
  259. dev_dbg(priv->ipu->dev,
  260. "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
  261. chan->ic_task, ctx,
  262. ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
  263. ic_image->base.pix.width, ic_image->base.pix.height,
  264. ic_image->num_cols, ic_image->num_rows,
  265. ic_image->tile[0].width, ic_image->tile[0].height,
  266. ic_image->fmt->fourcc & 0xff,
  267. (ic_image->fmt->fourcc >> 8) & 0xff,
  268. (ic_image->fmt->fourcc >> 16) & 0xff,
  269. (ic_image->fmt->fourcc >> 24) & 0xff);
  270. }
  271. int ipu_image_convert_enum_format(int index, u32 *fourcc)
  272. {
  273. const struct ipu_image_pixfmt *fmt;
  274. if (index >= (int)ARRAY_SIZE(image_convert_formats))
  275. return -EINVAL;
  276. /* Format found */
  277. fmt = &image_convert_formats[index];
  278. *fourcc = fmt->fourcc;
  279. return 0;
  280. }
  281. EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
  282. static void free_dma_buf(struct ipu_image_convert_priv *priv,
  283. struct ipu_image_convert_dma_buf *buf)
  284. {
  285. if (buf->virt)
  286. dma_free_coherent(priv->ipu->dev,
  287. buf->len, buf->virt, buf->phys);
  288. buf->virt = NULL;
  289. buf->phys = 0;
  290. }
  291. static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
  292. struct ipu_image_convert_dma_buf *buf,
  293. int size)
  294. {
  295. buf->len = PAGE_ALIGN(size);
  296. buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
  297. GFP_DMA | GFP_KERNEL);
  298. if (!buf->virt) {
  299. dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
  300. return -ENOMEM;
  301. }
  302. return 0;
  303. }
  304. static inline int num_stripes(int dim)
  305. {
  306. if (dim <= 1024)
  307. return 1;
  308. else if (dim <= 2048)
  309. return 2;
  310. else
  311. return 4;
  312. }
  313. static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
  314. struct ipu_image_convert_image *image)
  315. {
  316. int i;
  317. for (i = 0; i < ctx->num_tiles; i++) {
  318. struct ipu_image_tile *tile = &image->tile[i];
  319. tile->height = image->base.pix.height / image->num_rows;
  320. tile->width = image->base.pix.width / image->num_cols;
  321. tile->size = ((tile->height * image->fmt->bpp) >> 3) *
  322. tile->width;
  323. if (image->fmt->planar) {
  324. tile->stride = tile->width;
  325. tile->rot_stride = tile->height;
  326. } else {
  327. tile->stride =
  328. (image->fmt->bpp * tile->width) >> 3;
  329. tile->rot_stride =
  330. (image->fmt->bpp * tile->height) >> 3;
  331. }
  332. }
  333. }
  334. /*
  335. * Use the rotation transformation to find the tile coordinates
  336. * (row, col) of a tile in the destination frame that corresponds
  337. * to the given tile coordinates of a source frame. The destination
  338. * coordinate is then converted to a tile index.
  339. */
  340. static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
  341. int src_row, int src_col)
  342. {
  343. struct ipu_image_convert_chan *chan = ctx->chan;
  344. struct ipu_image_convert_priv *priv = chan->priv;
  345. struct ipu_image_convert_image *s_image = &ctx->in;
  346. struct ipu_image_convert_image *d_image = &ctx->out;
  347. int dst_row, dst_col;
  348. /* with no rotation it's a 1:1 mapping */
  349. if (ctx->rot_mode == IPU_ROTATE_NONE)
  350. return src_row * s_image->num_cols + src_col;
  351. /*
  352. * before doing the transform, first we have to translate
  353. * source row,col for an origin in the center of s_image
  354. */
  355. src_row = src_row * 2 - (s_image->num_rows - 1);
  356. src_col = src_col * 2 - (s_image->num_cols - 1);
  357. /* do the rotation transform */
  358. if (ctx->rot_mode & IPU_ROT_BIT_90) {
  359. dst_col = -src_row;
  360. dst_row = src_col;
  361. } else {
  362. dst_col = src_col;
  363. dst_row = src_row;
  364. }
  365. /* apply flip */
  366. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  367. dst_col = -dst_col;
  368. if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
  369. dst_row = -dst_row;
  370. dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
  371. chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
  372. /*
  373. * finally translate dest row,col using an origin in upper
  374. * left of d_image
  375. */
  376. dst_row += d_image->num_rows - 1;
  377. dst_col += d_image->num_cols - 1;
  378. dst_row /= 2;
  379. dst_col /= 2;
  380. return dst_row * d_image->num_cols + dst_col;
  381. }
  382. /*
  383. * Fill the out_tile_map[] with transformed destination tile indeces.
  384. */
  385. static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
  386. {
  387. struct ipu_image_convert_image *s_image = &ctx->in;
  388. unsigned int row, col, tile = 0;
  389. for (row = 0; row < s_image->num_rows; row++) {
  390. for (col = 0; col < s_image->num_cols; col++) {
  391. ctx->out_tile_map[tile] =
  392. transform_tile_index(ctx, row, col);
  393. tile++;
  394. }
  395. }
  396. }
  397. static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
  398. struct ipu_image_convert_image *image)
  399. {
  400. struct ipu_image_convert_chan *chan = ctx->chan;
  401. struct ipu_image_convert_priv *priv = chan->priv;
  402. const struct ipu_image_pixfmt *fmt = image->fmt;
  403. unsigned int row, col, tile = 0;
  404. u32 H, w, h, y_stride, uv_stride;
  405. u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
  406. u32 y_row_off, y_col_off, y_off;
  407. u32 y_size, uv_size;
  408. /* setup some convenience vars */
  409. H = image->base.pix.height;
  410. y_stride = image->stride;
  411. uv_stride = y_stride / fmt->uv_width_dec;
  412. if (fmt->uv_packed)
  413. uv_stride *= 2;
  414. y_size = H * y_stride;
  415. uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
  416. for (row = 0; row < image->num_rows; row++) {
  417. w = image->tile[tile].width;
  418. h = image->tile[tile].height;
  419. y_row_off = row * h * y_stride;
  420. uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
  421. for (col = 0; col < image->num_cols; col++) {
  422. y_col_off = col * w;
  423. uv_col_off = y_col_off / fmt->uv_width_dec;
  424. if (fmt->uv_packed)
  425. uv_col_off *= 2;
  426. y_off = y_row_off + y_col_off;
  427. uv_off = uv_row_off + uv_col_off;
  428. u_off = y_size - y_off + uv_off;
  429. v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
  430. if (fmt->uv_swapped) {
  431. tmp = u_off;
  432. u_off = v_off;
  433. v_off = tmp;
  434. }
  435. image->tile[tile].offset = y_off;
  436. image->tile[tile].u_off = u_off;
  437. image->tile[tile++].v_off = v_off;
  438. dev_dbg(priv->ipu->dev,
  439. "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
  440. chan->ic_task, ctx,
  441. image->type == IMAGE_CONVERT_IN ?
  442. "Input" : "Output", row, col,
  443. y_off, u_off, v_off);
  444. }
  445. }
  446. }
  447. static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
  448. struct ipu_image_convert_image *image)
  449. {
  450. struct ipu_image_convert_chan *chan = ctx->chan;
  451. struct ipu_image_convert_priv *priv = chan->priv;
  452. const struct ipu_image_pixfmt *fmt = image->fmt;
  453. unsigned int row, col, tile = 0;
  454. u32 w, h, bpp, stride;
  455. u32 row_off, col_off;
  456. /* setup some convenience vars */
  457. stride = image->stride;
  458. bpp = fmt->bpp;
  459. for (row = 0; row < image->num_rows; row++) {
  460. w = image->tile[tile].width;
  461. h = image->tile[tile].height;
  462. row_off = row * h * stride;
  463. for (col = 0; col < image->num_cols; col++) {
  464. col_off = (col * w * bpp) >> 3;
  465. image->tile[tile].offset = row_off + col_off;
  466. image->tile[tile].u_off = 0;
  467. image->tile[tile++].v_off = 0;
  468. dev_dbg(priv->ipu->dev,
  469. "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
  470. chan->ic_task, ctx,
  471. image->type == IMAGE_CONVERT_IN ?
  472. "Input" : "Output", row, col,
  473. row_off + col_off);
  474. }
  475. }
  476. }
  477. static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
  478. struct ipu_image_convert_image *image)
  479. {
  480. if (image->fmt->planar)
  481. calc_tile_offsets_planar(ctx, image);
  482. else
  483. calc_tile_offsets_packed(ctx, image);
  484. }
  485. /*
  486. * return the number of runs in given queue (pending_q or done_q)
  487. * for this context. hold irqlock when calling.
  488. */
  489. static int get_run_count(struct ipu_image_convert_ctx *ctx,
  490. struct list_head *q)
  491. {
  492. struct ipu_image_convert_run *run;
  493. int count = 0;
  494. lockdep_assert_held(&ctx->chan->irqlock);
  495. list_for_each_entry(run, q, list) {
  496. if (run->ctx == ctx)
  497. count++;
  498. }
  499. return count;
  500. }
  501. static void convert_stop(struct ipu_image_convert_run *run)
  502. {
  503. struct ipu_image_convert_ctx *ctx = run->ctx;
  504. struct ipu_image_convert_chan *chan = ctx->chan;
  505. struct ipu_image_convert_priv *priv = chan->priv;
  506. dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
  507. __func__, chan->ic_task, ctx, run);
  508. /* disable IC tasks and the channels */
  509. ipu_ic_task_disable(chan->ic);
  510. ipu_idmac_disable_channel(chan->in_chan);
  511. ipu_idmac_disable_channel(chan->out_chan);
  512. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  513. ipu_idmac_disable_channel(chan->rotation_in_chan);
  514. ipu_idmac_disable_channel(chan->rotation_out_chan);
  515. ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
  516. }
  517. ipu_ic_disable(chan->ic);
  518. }
  519. static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
  520. struct ipuv3_channel *channel,
  521. struct ipu_image_convert_image *image,
  522. enum ipu_rotate_mode rot_mode,
  523. bool rot_swap_width_height)
  524. {
  525. struct ipu_image_convert_chan *chan = ctx->chan;
  526. unsigned int burst_size;
  527. u32 width, height, stride;
  528. dma_addr_t addr0, addr1 = 0;
  529. struct ipu_image tile_image;
  530. unsigned int tile_idx[2];
  531. if (image->type == IMAGE_CONVERT_OUT) {
  532. tile_idx[0] = ctx->out_tile_map[0];
  533. tile_idx[1] = ctx->out_tile_map[1];
  534. } else {
  535. tile_idx[0] = 0;
  536. tile_idx[1] = 1;
  537. }
  538. if (rot_swap_width_height) {
  539. width = image->tile[0].height;
  540. height = image->tile[0].width;
  541. stride = image->tile[0].rot_stride;
  542. addr0 = ctx->rot_intermediate[0].phys;
  543. if (ctx->double_buffering)
  544. addr1 = ctx->rot_intermediate[1].phys;
  545. } else {
  546. width = image->tile[0].width;
  547. height = image->tile[0].height;
  548. stride = image->stride;
  549. addr0 = image->base.phys0 +
  550. image->tile[tile_idx[0]].offset;
  551. if (ctx->double_buffering)
  552. addr1 = image->base.phys0 +
  553. image->tile[tile_idx[1]].offset;
  554. }
  555. ipu_cpmem_zero(channel);
  556. memset(&tile_image, 0, sizeof(tile_image));
  557. tile_image.pix.width = tile_image.rect.width = width;
  558. tile_image.pix.height = tile_image.rect.height = height;
  559. tile_image.pix.bytesperline = stride;
  560. tile_image.pix.pixelformat = image->fmt->fourcc;
  561. tile_image.phys0 = addr0;
  562. tile_image.phys1 = addr1;
  563. ipu_cpmem_set_image(channel, &tile_image);
  564. if (image->fmt->planar && !rot_swap_width_height)
  565. ipu_cpmem_set_uv_offset(channel,
  566. image->tile[tile_idx[0]].u_off,
  567. image->tile[tile_idx[0]].v_off);
  568. if (rot_mode)
  569. ipu_cpmem_set_rotation(channel, rot_mode);
  570. if (channel == chan->rotation_in_chan ||
  571. channel == chan->rotation_out_chan) {
  572. burst_size = 8;
  573. ipu_cpmem_set_block_mode(channel);
  574. } else
  575. burst_size = (width % 16) ? 8 : 16;
  576. ipu_cpmem_set_burstsize(channel, burst_size);
  577. ipu_ic_task_idma_init(chan->ic, channel, width, height,
  578. burst_size, rot_mode);
  579. ipu_cpmem_set_axi_id(channel, 1);
  580. ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
  581. }
  582. static int convert_start(struct ipu_image_convert_run *run)
  583. {
  584. struct ipu_image_convert_ctx *ctx = run->ctx;
  585. struct ipu_image_convert_chan *chan = ctx->chan;
  586. struct ipu_image_convert_priv *priv = chan->priv;
  587. struct ipu_image_convert_image *s_image = &ctx->in;
  588. struct ipu_image_convert_image *d_image = &ctx->out;
  589. enum ipu_color_space src_cs, dest_cs;
  590. unsigned int dest_width, dest_height;
  591. int ret;
  592. dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
  593. __func__, chan->ic_task, ctx, run);
  594. src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
  595. dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
  596. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  597. /* swap width/height for resizer */
  598. dest_width = d_image->tile[0].height;
  599. dest_height = d_image->tile[0].width;
  600. } else {
  601. dest_width = d_image->tile[0].width;
  602. dest_height = d_image->tile[0].height;
  603. }
  604. /* setup the IC resizer and CSC */
  605. ret = ipu_ic_task_init(chan->ic,
  606. s_image->tile[0].width,
  607. s_image->tile[0].height,
  608. dest_width,
  609. dest_height,
  610. src_cs, dest_cs);
  611. if (ret) {
  612. dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
  613. return ret;
  614. }
  615. /* init the source MEM-->IC PP IDMAC channel */
  616. init_idmac_channel(ctx, chan->in_chan, s_image,
  617. IPU_ROTATE_NONE, false);
  618. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  619. /* init the IC PP-->MEM IDMAC channel */
  620. init_idmac_channel(ctx, chan->out_chan, d_image,
  621. IPU_ROTATE_NONE, true);
  622. /* init the MEM-->IC PP ROT IDMAC channel */
  623. init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
  624. ctx->rot_mode, true);
  625. /* init the destination IC PP ROT-->MEM IDMAC channel */
  626. init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
  627. IPU_ROTATE_NONE, false);
  628. /* now link IC PP-->MEM to MEM-->IC PP ROT */
  629. ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
  630. } else {
  631. /* init the destination IC PP-->MEM IDMAC channel */
  632. init_idmac_channel(ctx, chan->out_chan, d_image,
  633. ctx->rot_mode, false);
  634. }
  635. /* enable the IC */
  636. ipu_ic_enable(chan->ic);
  637. /* set buffers ready */
  638. ipu_idmac_select_buffer(chan->in_chan, 0);
  639. ipu_idmac_select_buffer(chan->out_chan, 0);
  640. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  641. ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
  642. if (ctx->double_buffering) {
  643. ipu_idmac_select_buffer(chan->in_chan, 1);
  644. ipu_idmac_select_buffer(chan->out_chan, 1);
  645. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  646. ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
  647. }
  648. /* enable the channels! */
  649. ipu_idmac_enable_channel(chan->in_chan);
  650. ipu_idmac_enable_channel(chan->out_chan);
  651. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  652. ipu_idmac_enable_channel(chan->rotation_in_chan);
  653. ipu_idmac_enable_channel(chan->rotation_out_chan);
  654. }
  655. ipu_ic_task_enable(chan->ic);
  656. ipu_cpmem_dump(chan->in_chan);
  657. ipu_cpmem_dump(chan->out_chan);
  658. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  659. ipu_cpmem_dump(chan->rotation_in_chan);
  660. ipu_cpmem_dump(chan->rotation_out_chan);
  661. }
  662. ipu_dump(priv->ipu);
  663. return 0;
  664. }
  665. /* hold irqlock when calling */
  666. static int do_run(struct ipu_image_convert_run *run)
  667. {
  668. struct ipu_image_convert_ctx *ctx = run->ctx;
  669. struct ipu_image_convert_chan *chan = ctx->chan;
  670. lockdep_assert_held(&chan->irqlock);
  671. ctx->in.base.phys0 = run->in_phys;
  672. ctx->out.base.phys0 = run->out_phys;
  673. ctx->cur_buf_num = 0;
  674. ctx->next_tile = 1;
  675. /* remove run from pending_q and set as current */
  676. list_del(&run->list);
  677. chan->current_run = run;
  678. return convert_start(run);
  679. }
  680. /* hold irqlock when calling */
  681. static void run_next(struct ipu_image_convert_chan *chan)
  682. {
  683. struct ipu_image_convert_priv *priv = chan->priv;
  684. struct ipu_image_convert_run *run, *tmp;
  685. int ret;
  686. lockdep_assert_held(&chan->irqlock);
  687. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  688. /* skip contexts that are aborting */
  689. if (run->ctx->aborting) {
  690. dev_dbg(priv->ipu->dev,
  691. "%s: task %u: skipping aborting ctx %p run %p\n",
  692. __func__, chan->ic_task, run->ctx, run);
  693. continue;
  694. }
  695. ret = do_run(run);
  696. if (!ret)
  697. break;
  698. /*
  699. * something went wrong with start, add the run
  700. * to done q and continue to the next run in the
  701. * pending q.
  702. */
  703. run->status = ret;
  704. list_add_tail(&run->list, &chan->done_q);
  705. chan->current_run = NULL;
  706. }
  707. }
  708. static void empty_done_q(struct ipu_image_convert_chan *chan)
  709. {
  710. struct ipu_image_convert_priv *priv = chan->priv;
  711. struct ipu_image_convert_run *run;
  712. unsigned long flags;
  713. spin_lock_irqsave(&chan->irqlock, flags);
  714. while (!list_empty(&chan->done_q)) {
  715. run = list_entry(chan->done_q.next,
  716. struct ipu_image_convert_run,
  717. list);
  718. list_del(&run->list);
  719. dev_dbg(priv->ipu->dev,
  720. "%s: task %u: completing ctx %p run %p with %d\n",
  721. __func__, chan->ic_task, run->ctx, run, run->status);
  722. /* call the completion callback and free the run */
  723. spin_unlock_irqrestore(&chan->irqlock, flags);
  724. run->ctx->complete(run, run->ctx->complete_context);
  725. spin_lock_irqsave(&chan->irqlock, flags);
  726. }
  727. spin_unlock_irqrestore(&chan->irqlock, flags);
  728. }
  729. /*
  730. * the bottom half thread clears out the done_q, calling the
  731. * completion handler for each.
  732. */
  733. static irqreturn_t do_bh(int irq, void *dev_id)
  734. {
  735. struct ipu_image_convert_chan *chan = dev_id;
  736. struct ipu_image_convert_priv *priv = chan->priv;
  737. struct ipu_image_convert_ctx *ctx;
  738. unsigned long flags;
  739. dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
  740. chan->ic_task);
  741. empty_done_q(chan);
  742. spin_lock_irqsave(&chan->irqlock, flags);
  743. /*
  744. * the done_q is cleared out, signal any contexts
  745. * that are aborting that abort can complete.
  746. */
  747. list_for_each_entry(ctx, &chan->ctx_list, list) {
  748. if (ctx->aborting) {
  749. dev_dbg(priv->ipu->dev,
  750. "%s: task %u: signaling abort for ctx %p\n",
  751. __func__, chan->ic_task, ctx);
  752. complete(&ctx->aborted);
  753. }
  754. }
  755. spin_unlock_irqrestore(&chan->irqlock, flags);
  756. dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
  757. chan->ic_task);
  758. return IRQ_HANDLED;
  759. }
  760. /* hold irqlock when calling */
  761. static irqreturn_t do_irq(struct ipu_image_convert_run *run)
  762. {
  763. struct ipu_image_convert_ctx *ctx = run->ctx;
  764. struct ipu_image_convert_chan *chan = ctx->chan;
  765. struct ipu_image_tile *src_tile, *dst_tile;
  766. struct ipu_image_convert_image *s_image = &ctx->in;
  767. struct ipu_image_convert_image *d_image = &ctx->out;
  768. struct ipuv3_channel *outch;
  769. unsigned int dst_idx;
  770. lockdep_assert_held(&chan->irqlock);
  771. outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
  772. chan->rotation_out_chan : chan->out_chan;
  773. /*
  774. * It is difficult to stop the channel DMA before the channels
  775. * enter the paused state. Without double-buffering the channels
  776. * are always in a paused state when the EOF irq occurs, so it
  777. * is safe to stop the channels now. For double-buffering we
  778. * just ignore the abort until the operation completes, when it
  779. * is safe to shut down.
  780. */
  781. if (ctx->aborting && !ctx->double_buffering) {
  782. convert_stop(run);
  783. run->status = -EIO;
  784. goto done;
  785. }
  786. if (ctx->next_tile == ctx->num_tiles) {
  787. /*
  788. * the conversion is complete
  789. */
  790. convert_stop(run);
  791. run->status = 0;
  792. goto done;
  793. }
  794. /*
  795. * not done, place the next tile buffers.
  796. */
  797. if (!ctx->double_buffering) {
  798. src_tile = &s_image->tile[ctx->next_tile];
  799. dst_idx = ctx->out_tile_map[ctx->next_tile];
  800. dst_tile = &d_image->tile[dst_idx];
  801. ipu_cpmem_set_buffer(chan->in_chan, 0,
  802. s_image->base.phys0 + src_tile->offset);
  803. ipu_cpmem_set_buffer(outch, 0,
  804. d_image->base.phys0 + dst_tile->offset);
  805. if (s_image->fmt->planar)
  806. ipu_cpmem_set_uv_offset(chan->in_chan,
  807. src_tile->u_off,
  808. src_tile->v_off);
  809. if (d_image->fmt->planar)
  810. ipu_cpmem_set_uv_offset(outch,
  811. dst_tile->u_off,
  812. dst_tile->v_off);
  813. ipu_idmac_select_buffer(chan->in_chan, 0);
  814. ipu_idmac_select_buffer(outch, 0);
  815. } else if (ctx->next_tile < ctx->num_tiles - 1) {
  816. src_tile = &s_image->tile[ctx->next_tile + 1];
  817. dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
  818. dst_tile = &d_image->tile[dst_idx];
  819. ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
  820. s_image->base.phys0 + src_tile->offset);
  821. ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
  822. d_image->base.phys0 + dst_tile->offset);
  823. ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
  824. ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
  825. ctx->cur_buf_num ^= 1;
  826. }
  827. ctx->next_tile++;
  828. return IRQ_HANDLED;
  829. done:
  830. list_add_tail(&run->list, &chan->done_q);
  831. chan->current_run = NULL;
  832. run_next(chan);
  833. return IRQ_WAKE_THREAD;
  834. }
  835. static irqreturn_t norotate_irq(int irq, void *data)
  836. {
  837. struct ipu_image_convert_chan *chan = data;
  838. struct ipu_image_convert_ctx *ctx;
  839. struct ipu_image_convert_run *run;
  840. unsigned long flags;
  841. irqreturn_t ret;
  842. spin_lock_irqsave(&chan->irqlock, flags);
  843. /* get current run and its context */
  844. run = chan->current_run;
  845. if (!run) {
  846. ret = IRQ_NONE;
  847. goto out;
  848. }
  849. ctx = run->ctx;
  850. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  851. /* this is a rotation operation, just ignore */
  852. spin_unlock_irqrestore(&chan->irqlock, flags);
  853. return IRQ_HANDLED;
  854. }
  855. ret = do_irq(run);
  856. out:
  857. spin_unlock_irqrestore(&chan->irqlock, flags);
  858. return ret;
  859. }
  860. static irqreturn_t rotate_irq(int irq, void *data)
  861. {
  862. struct ipu_image_convert_chan *chan = data;
  863. struct ipu_image_convert_priv *priv = chan->priv;
  864. struct ipu_image_convert_ctx *ctx;
  865. struct ipu_image_convert_run *run;
  866. unsigned long flags;
  867. irqreturn_t ret;
  868. spin_lock_irqsave(&chan->irqlock, flags);
  869. /* get current run and its context */
  870. run = chan->current_run;
  871. if (!run) {
  872. ret = IRQ_NONE;
  873. goto out;
  874. }
  875. ctx = run->ctx;
  876. if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
  877. /* this was NOT a rotation operation, shouldn't happen */
  878. dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
  879. spin_unlock_irqrestore(&chan->irqlock, flags);
  880. return IRQ_HANDLED;
  881. }
  882. ret = do_irq(run);
  883. out:
  884. spin_unlock_irqrestore(&chan->irqlock, flags);
  885. return ret;
  886. }
  887. /*
  888. * try to force the completion of runs for this ctx. Called when
  889. * abort wait times out in ipu_image_convert_abort().
  890. */
  891. static void force_abort(struct ipu_image_convert_ctx *ctx)
  892. {
  893. struct ipu_image_convert_chan *chan = ctx->chan;
  894. struct ipu_image_convert_run *run;
  895. unsigned long flags;
  896. spin_lock_irqsave(&chan->irqlock, flags);
  897. run = chan->current_run;
  898. if (run && run->ctx == ctx) {
  899. convert_stop(run);
  900. run->status = -EIO;
  901. list_add_tail(&run->list, &chan->done_q);
  902. chan->current_run = NULL;
  903. run_next(chan);
  904. }
  905. spin_unlock_irqrestore(&chan->irqlock, flags);
  906. empty_done_q(chan);
  907. }
  908. static void release_ipu_resources(struct ipu_image_convert_chan *chan)
  909. {
  910. if (chan->out_eof_irq >= 0)
  911. free_irq(chan->out_eof_irq, chan);
  912. if (chan->rot_out_eof_irq >= 0)
  913. free_irq(chan->rot_out_eof_irq, chan);
  914. if (!IS_ERR_OR_NULL(chan->in_chan))
  915. ipu_idmac_put(chan->in_chan);
  916. if (!IS_ERR_OR_NULL(chan->out_chan))
  917. ipu_idmac_put(chan->out_chan);
  918. if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
  919. ipu_idmac_put(chan->rotation_in_chan);
  920. if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
  921. ipu_idmac_put(chan->rotation_out_chan);
  922. if (!IS_ERR_OR_NULL(chan->ic))
  923. ipu_ic_put(chan->ic);
  924. chan->in_chan = chan->out_chan = chan->rotation_in_chan =
  925. chan->rotation_out_chan = NULL;
  926. chan->out_eof_irq = chan->rot_out_eof_irq = -1;
  927. }
  928. static int get_ipu_resources(struct ipu_image_convert_chan *chan)
  929. {
  930. const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
  931. struct ipu_image_convert_priv *priv = chan->priv;
  932. int ret;
  933. /* get IC */
  934. chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
  935. if (IS_ERR(chan->ic)) {
  936. dev_err(priv->ipu->dev, "could not acquire IC\n");
  937. ret = PTR_ERR(chan->ic);
  938. goto err;
  939. }
  940. /* get IDMAC channels */
  941. chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
  942. chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
  943. if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
  944. dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
  945. ret = -EBUSY;
  946. goto err;
  947. }
  948. chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
  949. chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
  950. if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
  951. dev_err(priv->ipu->dev,
  952. "could not acquire idmac rotation channels\n");
  953. ret = -EBUSY;
  954. goto err;
  955. }
  956. /* acquire the EOF interrupts */
  957. chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
  958. chan->out_chan,
  959. IPU_IRQ_EOF);
  960. ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
  961. 0, "ipu-ic", chan);
  962. if (ret < 0) {
  963. dev_err(priv->ipu->dev, "could not acquire irq %d\n",
  964. chan->out_eof_irq);
  965. chan->out_eof_irq = -1;
  966. goto err;
  967. }
  968. chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
  969. chan->rotation_out_chan,
  970. IPU_IRQ_EOF);
  971. ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
  972. 0, "ipu-ic", chan);
  973. if (ret < 0) {
  974. dev_err(priv->ipu->dev, "could not acquire irq %d\n",
  975. chan->rot_out_eof_irq);
  976. chan->rot_out_eof_irq = -1;
  977. goto err;
  978. }
  979. return 0;
  980. err:
  981. release_ipu_resources(chan);
  982. return ret;
  983. }
  984. static int fill_image(struct ipu_image_convert_ctx *ctx,
  985. struct ipu_image_convert_image *ic_image,
  986. struct ipu_image *image,
  987. enum ipu_image_convert_type type)
  988. {
  989. struct ipu_image_convert_priv *priv = ctx->chan->priv;
  990. ic_image->base = *image;
  991. ic_image->type = type;
  992. ic_image->fmt = get_format(image->pix.pixelformat);
  993. if (!ic_image->fmt) {
  994. dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
  995. type == IMAGE_CONVERT_OUT ? "Output" : "Input");
  996. return -EINVAL;
  997. }
  998. if (ic_image->fmt->planar)
  999. ic_image->stride = ic_image->base.pix.width;
  1000. else
  1001. ic_image->stride = ic_image->base.pix.bytesperline;
  1002. calc_tile_dimensions(ctx, ic_image);
  1003. calc_tile_offsets(ctx, ic_image);
  1004. return 0;
  1005. }
  1006. /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
  1007. static unsigned int clamp_align(unsigned int x, unsigned int min,
  1008. unsigned int max, unsigned int align)
  1009. {
  1010. /* Bits that must be zero to be aligned */
  1011. unsigned int mask = ~((1 << align) - 1);
  1012. /* Clamp to aligned min and max */
  1013. x = clamp(x, (min + ~mask) & mask, max & mask);
  1014. /* Round to nearest aligned value */
  1015. if (align)
  1016. x = (x + (1 << (align - 1))) & mask;
  1017. return x;
  1018. }
  1019. /*
  1020. * We have to adjust the tile width such that the tile physaddrs and
  1021. * U and V plane offsets are multiples of 8 bytes as required by
  1022. * the IPU DMA Controller. For the planar formats, this corresponds
  1023. * to a pixel alignment of 16 (but use a more formal equation since
  1024. * the variables are available). For all the packed formats, 8 is
  1025. * good enough.
  1026. */
  1027. static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
  1028. {
  1029. return fmt->planar ? 8 * fmt->uv_width_dec : 8;
  1030. }
  1031. /*
  1032. * For tile height alignment, we have to ensure that the output tile
  1033. * heights are multiples of 8 lines if the IRT is required by the
  1034. * given rotation mode (the IRT performs rotations on 8x8 blocks
  1035. * at a time). If the IRT is not used, or for input image tiles,
  1036. * 2 lines are good enough.
  1037. */
  1038. static inline u32 tile_height_align(enum ipu_image_convert_type type,
  1039. enum ipu_rotate_mode rot_mode)
  1040. {
  1041. return (type == IMAGE_CONVERT_OUT &&
  1042. ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
  1043. }
  1044. /* Adjusts input/output images to IPU restrictions */
  1045. void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
  1046. enum ipu_rotate_mode rot_mode)
  1047. {
  1048. const struct ipu_image_pixfmt *infmt, *outfmt;
  1049. unsigned int num_in_rows, num_in_cols;
  1050. unsigned int num_out_rows, num_out_cols;
  1051. u32 w_align, h_align;
  1052. infmt = get_format(in->pix.pixelformat);
  1053. outfmt = get_format(out->pix.pixelformat);
  1054. /* set some default pixel formats if needed */
  1055. if (!infmt) {
  1056. in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1057. infmt = get_format(V4L2_PIX_FMT_RGB24);
  1058. }
  1059. if (!outfmt) {
  1060. out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1061. outfmt = get_format(V4L2_PIX_FMT_RGB24);
  1062. }
  1063. /* image converter does not handle fields */
  1064. in->pix.field = out->pix.field = V4L2_FIELD_NONE;
  1065. /* resizer cannot downsize more than 4:1 */
  1066. if (ipu_rot_mode_is_irt(rot_mode)) {
  1067. out->pix.height = max_t(__u32, out->pix.height,
  1068. in->pix.width / 4);
  1069. out->pix.width = max_t(__u32, out->pix.width,
  1070. in->pix.height / 4);
  1071. } else {
  1072. out->pix.width = max_t(__u32, out->pix.width,
  1073. in->pix.width / 4);
  1074. out->pix.height = max_t(__u32, out->pix.height,
  1075. in->pix.height / 4);
  1076. }
  1077. /* get tiling rows/cols from output format */
  1078. num_out_rows = num_stripes(out->pix.height);
  1079. num_out_cols = num_stripes(out->pix.width);
  1080. if (ipu_rot_mode_is_irt(rot_mode)) {
  1081. num_in_rows = num_out_cols;
  1082. num_in_cols = num_out_rows;
  1083. } else {
  1084. num_in_rows = num_out_rows;
  1085. num_in_cols = num_out_cols;
  1086. }
  1087. /* align input width/height */
  1088. w_align = ilog2(tile_width_align(infmt) * num_in_cols);
  1089. h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
  1090. num_in_rows);
  1091. in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
  1092. in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
  1093. /* align output width/height */
  1094. w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
  1095. h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
  1096. num_out_rows);
  1097. out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
  1098. out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
  1099. /* set input/output strides and image sizes */
  1100. in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
  1101. in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
  1102. out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
  1103. out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
  1104. }
  1105. EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
  1106. /*
  1107. * this is used by ipu_image_convert_prepare() to verify set input and
  1108. * output images are valid before starting the conversion. Clients can
  1109. * also call it before calling ipu_image_convert_prepare().
  1110. */
  1111. int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
  1112. enum ipu_rotate_mode rot_mode)
  1113. {
  1114. struct ipu_image testin, testout;
  1115. testin = *in;
  1116. testout = *out;
  1117. ipu_image_convert_adjust(&testin, &testout, rot_mode);
  1118. if (testin.pix.width != in->pix.width ||
  1119. testin.pix.height != in->pix.height ||
  1120. testout.pix.width != out->pix.width ||
  1121. testout.pix.height != out->pix.height)
  1122. return -EINVAL;
  1123. return 0;
  1124. }
  1125. EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
  1126. /*
  1127. * Call ipu_image_convert_prepare() to prepare for the conversion of
  1128. * given images and rotation mode. Returns a new conversion context.
  1129. */
  1130. struct ipu_image_convert_ctx *
  1131. ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1132. struct ipu_image *in, struct ipu_image *out,
  1133. enum ipu_rotate_mode rot_mode,
  1134. ipu_image_convert_cb_t complete,
  1135. void *complete_context)
  1136. {
  1137. struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
  1138. struct ipu_image_convert_image *s_image, *d_image;
  1139. struct ipu_image_convert_chan *chan;
  1140. struct ipu_image_convert_ctx *ctx;
  1141. unsigned long flags;
  1142. bool get_res;
  1143. int ret;
  1144. if (!in || !out || !complete ||
  1145. (ic_task != IC_TASK_VIEWFINDER &&
  1146. ic_task != IC_TASK_POST_PROCESSOR))
  1147. return ERR_PTR(-EINVAL);
  1148. /* verify the in/out images before continuing */
  1149. ret = ipu_image_convert_verify(in, out, rot_mode);
  1150. if (ret) {
  1151. dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
  1152. __func__);
  1153. return ERR_PTR(ret);
  1154. }
  1155. chan = &priv->chan[ic_task];
  1156. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1157. if (!ctx)
  1158. return ERR_PTR(-ENOMEM);
  1159. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
  1160. chan->ic_task, ctx);
  1161. ctx->chan = chan;
  1162. init_completion(&ctx->aborted);
  1163. s_image = &ctx->in;
  1164. d_image = &ctx->out;
  1165. /* set tiling and rotation */
  1166. d_image->num_rows = num_stripes(out->pix.height);
  1167. d_image->num_cols = num_stripes(out->pix.width);
  1168. if (ipu_rot_mode_is_irt(rot_mode)) {
  1169. s_image->num_rows = d_image->num_cols;
  1170. s_image->num_cols = d_image->num_rows;
  1171. } else {
  1172. s_image->num_rows = d_image->num_rows;
  1173. s_image->num_cols = d_image->num_cols;
  1174. }
  1175. ctx->num_tiles = d_image->num_cols * d_image->num_rows;
  1176. ctx->rot_mode = rot_mode;
  1177. ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
  1178. if (ret)
  1179. goto out_free;
  1180. ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
  1181. if (ret)
  1182. goto out_free;
  1183. calc_out_tile_map(ctx);
  1184. dump_format(ctx, s_image);
  1185. dump_format(ctx, d_image);
  1186. ctx->complete = complete;
  1187. ctx->complete_context = complete_context;
  1188. /*
  1189. * Can we use double-buffering for this operation? If there is
  1190. * only one tile (the whole image can be converted in a single
  1191. * operation) there's no point in using double-buffering. Also,
  1192. * the IPU's IDMAC channels allow only a single U and V plane
  1193. * offset shared between both buffers, but these offsets change
  1194. * for every tile, and therefore would have to be updated for
  1195. * each buffer which is not possible. So double-buffering is
  1196. * impossible when either the source or destination images are
  1197. * a planar format (YUV420, YUV422P, etc.).
  1198. */
  1199. ctx->double_buffering = (ctx->num_tiles > 1 &&
  1200. !s_image->fmt->planar &&
  1201. !d_image->fmt->planar);
  1202. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1203. ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
  1204. d_image->tile[0].size);
  1205. if (ret)
  1206. goto out_free;
  1207. if (ctx->double_buffering) {
  1208. ret = alloc_dma_buf(priv,
  1209. &ctx->rot_intermediate[1],
  1210. d_image->tile[0].size);
  1211. if (ret)
  1212. goto out_free_dmabuf0;
  1213. }
  1214. }
  1215. spin_lock_irqsave(&chan->irqlock, flags);
  1216. get_res = list_empty(&chan->ctx_list);
  1217. list_add_tail(&ctx->list, &chan->ctx_list);
  1218. spin_unlock_irqrestore(&chan->irqlock, flags);
  1219. if (get_res) {
  1220. ret = get_ipu_resources(chan);
  1221. if (ret)
  1222. goto out_free_dmabuf1;
  1223. }
  1224. return ctx;
  1225. out_free_dmabuf1:
  1226. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1227. spin_lock_irqsave(&chan->irqlock, flags);
  1228. list_del(&ctx->list);
  1229. spin_unlock_irqrestore(&chan->irqlock, flags);
  1230. out_free_dmabuf0:
  1231. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1232. out_free:
  1233. kfree(ctx);
  1234. return ERR_PTR(ret);
  1235. }
  1236. EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
  1237. /*
  1238. * Carry out a single image conversion run. Only the physaddr's of the input
  1239. * and output image buffers are needed. The conversion context must have
  1240. * been created previously with ipu_image_convert_prepare().
  1241. */
  1242. int ipu_image_convert_queue(struct ipu_image_convert_run *run)
  1243. {
  1244. struct ipu_image_convert_chan *chan;
  1245. struct ipu_image_convert_priv *priv;
  1246. struct ipu_image_convert_ctx *ctx;
  1247. unsigned long flags;
  1248. int ret = 0;
  1249. if (!run || !run->ctx || !run->in_phys || !run->out_phys)
  1250. return -EINVAL;
  1251. ctx = run->ctx;
  1252. chan = ctx->chan;
  1253. priv = chan->priv;
  1254. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
  1255. chan->ic_task, ctx, run);
  1256. INIT_LIST_HEAD(&run->list);
  1257. spin_lock_irqsave(&chan->irqlock, flags);
  1258. if (ctx->aborting) {
  1259. ret = -EIO;
  1260. goto unlock;
  1261. }
  1262. list_add_tail(&run->list, &chan->pending_q);
  1263. if (!chan->current_run) {
  1264. ret = do_run(run);
  1265. if (ret)
  1266. chan->current_run = NULL;
  1267. }
  1268. unlock:
  1269. spin_unlock_irqrestore(&chan->irqlock, flags);
  1270. return ret;
  1271. }
  1272. EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
  1273. /* Abort any active or pending conversions for this context */
  1274. void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  1275. {
  1276. struct ipu_image_convert_chan *chan = ctx->chan;
  1277. struct ipu_image_convert_priv *priv = chan->priv;
  1278. struct ipu_image_convert_run *run, *active_run, *tmp;
  1279. unsigned long flags;
  1280. int run_count, ret;
  1281. bool need_abort;
  1282. reinit_completion(&ctx->aborted);
  1283. spin_lock_irqsave(&chan->irqlock, flags);
  1284. /* move all remaining pending runs in this context to done_q */
  1285. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1286. if (run->ctx != ctx)
  1287. continue;
  1288. run->status = -EIO;
  1289. list_move_tail(&run->list, &chan->done_q);
  1290. }
  1291. run_count = get_run_count(ctx, &chan->done_q);
  1292. active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
  1293. chan->current_run : NULL;
  1294. need_abort = (run_count || active_run);
  1295. ctx->aborting = need_abort;
  1296. spin_unlock_irqrestore(&chan->irqlock, flags);
  1297. if (!need_abort) {
  1298. dev_dbg(priv->ipu->dev,
  1299. "%s: task %u: no abort needed for ctx %p\n",
  1300. __func__, chan->ic_task, ctx);
  1301. return;
  1302. }
  1303. dev_dbg(priv->ipu->dev,
  1304. "%s: task %u: wait for completion: %d runs, active run %p\n",
  1305. __func__, chan->ic_task, run_count, active_run);
  1306. ret = wait_for_completion_timeout(&ctx->aborted,
  1307. msecs_to_jiffies(10000));
  1308. if (ret == 0) {
  1309. dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
  1310. force_abort(ctx);
  1311. }
  1312. ctx->aborting = false;
  1313. }
  1314. EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
  1315. /* Unprepare image conversion context */
  1316. void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
  1317. {
  1318. struct ipu_image_convert_chan *chan = ctx->chan;
  1319. struct ipu_image_convert_priv *priv = chan->priv;
  1320. unsigned long flags;
  1321. bool put_res;
  1322. /* make sure no runs are hanging around */
  1323. ipu_image_convert_abort(ctx);
  1324. dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
  1325. chan->ic_task, ctx);
  1326. spin_lock_irqsave(&chan->irqlock, flags);
  1327. list_del(&ctx->list);
  1328. put_res = list_empty(&chan->ctx_list);
  1329. spin_unlock_irqrestore(&chan->irqlock, flags);
  1330. if (put_res)
  1331. release_ipu_resources(chan);
  1332. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1333. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1334. kfree(ctx);
  1335. }
  1336. EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
  1337. /*
  1338. * "Canned" asynchronous single image conversion. Allocates and returns
  1339. * a new conversion run. On successful return the caller must free the
  1340. * run and call ipu_image_convert_unprepare() after conversion completes.
  1341. */
  1342. struct ipu_image_convert_run *
  1343. ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1344. struct ipu_image *in, struct ipu_image *out,
  1345. enum ipu_rotate_mode rot_mode,
  1346. ipu_image_convert_cb_t complete,
  1347. void *complete_context)
  1348. {
  1349. struct ipu_image_convert_ctx *ctx;
  1350. struct ipu_image_convert_run *run;
  1351. int ret;
  1352. ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
  1353. complete, complete_context);
  1354. if (IS_ERR(ctx))
  1355. return ERR_CAST(ctx);
  1356. run = kzalloc(sizeof(*run), GFP_KERNEL);
  1357. if (!run) {
  1358. ipu_image_convert_unprepare(ctx);
  1359. return ERR_PTR(-ENOMEM);
  1360. }
  1361. run->ctx = ctx;
  1362. run->in_phys = in->phys0;
  1363. run->out_phys = out->phys0;
  1364. ret = ipu_image_convert_queue(run);
  1365. if (ret) {
  1366. ipu_image_convert_unprepare(ctx);
  1367. kfree(run);
  1368. return ERR_PTR(ret);
  1369. }
  1370. return run;
  1371. }
  1372. EXPORT_SYMBOL_GPL(ipu_image_convert);
  1373. /* "Canned" synchronous single image conversion */
  1374. static void image_convert_sync_complete(struct ipu_image_convert_run *run,
  1375. void *data)
  1376. {
  1377. struct completion *comp = data;
  1378. complete(comp);
  1379. }
  1380. int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1381. struct ipu_image *in, struct ipu_image *out,
  1382. enum ipu_rotate_mode rot_mode)
  1383. {
  1384. struct ipu_image_convert_run *run;
  1385. struct completion comp;
  1386. int ret;
  1387. init_completion(&comp);
  1388. run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
  1389. image_convert_sync_complete, &comp);
  1390. if (IS_ERR(run))
  1391. return PTR_ERR(run);
  1392. ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
  1393. ret = (ret == 0) ? -ETIMEDOUT : 0;
  1394. ipu_image_convert_unprepare(run->ctx);
  1395. kfree(run);
  1396. return ret;
  1397. }
  1398. EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
  1399. int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
  1400. {
  1401. struct ipu_image_convert_priv *priv;
  1402. int i;
  1403. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1404. if (!priv)
  1405. return -ENOMEM;
  1406. ipu->image_convert_priv = priv;
  1407. priv->ipu = ipu;
  1408. for (i = 0; i < IC_NUM_TASKS; i++) {
  1409. struct ipu_image_convert_chan *chan = &priv->chan[i];
  1410. chan->ic_task = i;
  1411. chan->priv = priv;
  1412. chan->dma_ch = &image_convert_dma_chan[i];
  1413. chan->out_eof_irq = -1;
  1414. chan->rot_out_eof_irq = -1;
  1415. spin_lock_init(&chan->irqlock);
  1416. INIT_LIST_HEAD(&chan->ctx_list);
  1417. INIT_LIST_HEAD(&chan->pending_q);
  1418. INIT_LIST_HEAD(&chan->done_q);
  1419. }
  1420. return 0;
  1421. }
  1422. void ipu_image_convert_exit(struct ipu_soc *ipu)
  1423. {
  1424. }