blob: 542c091cfef17f018cbc3aa66e2c5d945d5049ad [file] [log] [blame]
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001/*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
3 *
4 * Queued image conversion support, with tiling and rotation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <video/imx-ipu-image-convert.h>
20#include "ipu-prv.h"
21
22/*
23 * The IC Resizer has a restriction that the output frame from the
24 * resizer must be 1024 or less in both width (pixels) and height
25 * (lines).
26 *
27 * The image converter attempts to split up a conversion when
28 * the desired output (converted) frame resolution exceeds the
29 * IC resizer limit of 1024 in either dimension.
30 *
31 * If either dimension of the output frame exceeds the limit, the
32 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
33 * of 4*4 or 16 tiles. A conversion is then carried out for each
34 * tile (but taking care to pass the full frame stride length to
35 * the DMA channel's parameter memory!). IDMA double-buffering is used
36 * to convert each tile back-to-back when possible (see note below
37 * when double_buffering boolean is set).
38 *
39 * Note that the input frame must be split up into the same number
40 * of tiles as the output frame.
41 *
42 * FIXME: at this point there is no attempt to deal with visible seams
43 * at the tile boundaries when upscaling. The seams are caused by a reset
44 * of the bilinear upscale interpolation when starting a new tile. The
45 * seams are barely visible for small upscale factors, but become
46 * increasingly visible as the upscale factor gets larger, since more
47 * interpolated pixels get thrown out at the tile boundaries. A possilble
48 * fix might be to overlap tiles of different sizes, but this must be done
49 * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
50 * alignment restrictions of each tile.
51 */
52
53#define MAX_STRIPES_W 4
54#define MAX_STRIPES_H 4
55#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
56
57#define MIN_W 16
58#define MIN_H 8
59#define MAX_W 4096
60#define MAX_H 4096
61
62enum ipu_image_convert_type {
63 IMAGE_CONVERT_IN = 0,
64 IMAGE_CONVERT_OUT,
65};
66
67struct ipu_image_convert_dma_buf {
68 void *virt;
69 dma_addr_t phys;
70 unsigned long len;
71};
72
73struct ipu_image_convert_dma_chan {
74 int in;
75 int out;
76 int rot_in;
77 int rot_out;
78 int vdi_in_p;
79 int vdi_in;
80 int vdi_in_n;
81};
82
83/* dimensions of one tile */
84struct ipu_image_tile {
85 u32 width;
86 u32 height;
Philipp Zabel571dd822018-09-18 11:34:12 +020087 u32 left;
88 u32 top;
Steve Longerbeamcd98e852016-09-17 12:33:58 -070089 /* size and strides are in bytes */
90 u32 size;
91 u32 stride;
92 u32 rot_stride;
93 /* start Y or packed offset of this tile */
94 u32 offset;
95 /* offset from start to tile in U plane, for planar formats */
96 u32 u_off;
97 /* offset from start to tile in V plane, for planar formats */
98 u32 v_off;
99};
100
101struct ipu_image_convert_image {
102 struct ipu_image base;
103 enum ipu_image_convert_type type;
104
105 const struct ipu_image_pixfmt *fmt;
106 unsigned int stride;
107
108 /* # of rows (horizontal stripes) if dest height is > 1024 */
109 unsigned int num_rows;
110 /* # of columns (vertical stripes) if dest width is > 1024 */
111 unsigned int num_cols;
112
113 struct ipu_image_tile tile[MAX_TILES];
114};
115
116struct ipu_image_pixfmt {
117 u32 fourcc; /* V4L2 fourcc */
118 int bpp; /* total bpp */
119 int uv_width_dec; /* decimation in width for U/V planes */
120 int uv_height_dec; /* decimation in height for U/V planes */
121 bool planar; /* planar format */
122 bool uv_swapped; /* U and V planes are swapped */
123 bool uv_packed; /* partial planar (U and V in same plane) */
124};
125
126struct ipu_image_convert_ctx;
127struct ipu_image_convert_chan;
128struct ipu_image_convert_priv;
129
130struct ipu_image_convert_ctx {
131 struct ipu_image_convert_chan *chan;
132
133 ipu_image_convert_cb_t complete;
134 void *complete_context;
135
136 /* Source/destination image data and rotation mode */
137 struct ipu_image_convert_image in;
138 struct ipu_image_convert_image out;
139 enum ipu_rotate_mode rot_mode;
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200140 u32 downsize_coeff_h;
141 u32 downsize_coeff_v;
142 u32 image_resize_coeff_h;
143 u32 image_resize_coeff_v;
144 u32 resize_coeffs_h[MAX_STRIPES_W];
145 u32 resize_coeffs_v[MAX_STRIPES_H];
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700146
147 /* intermediate buffer for rotation */
148 struct ipu_image_convert_dma_buf rot_intermediate[2];
149
150 /* current buffer number for double buffering */
151 int cur_buf_num;
152
153 bool aborting;
154 struct completion aborted;
155
156 /* can we use double-buffering for this conversion operation? */
157 bool double_buffering;
158 /* num_rows * num_cols */
159 unsigned int num_tiles;
160 /* next tile to process */
161 unsigned int next_tile;
162 /* where to place converted tile in dest image */
163 unsigned int out_tile_map[MAX_TILES];
164
165 struct list_head list;
166};
167
168struct ipu_image_convert_chan {
169 struct ipu_image_convert_priv *priv;
170
171 enum ipu_ic_task ic_task;
172 const struct ipu_image_convert_dma_chan *dma_ch;
173
174 struct ipu_ic *ic;
175 struct ipuv3_channel *in_chan;
176 struct ipuv3_channel *out_chan;
177 struct ipuv3_channel *rotation_in_chan;
178 struct ipuv3_channel *rotation_out_chan;
179
180 /* the IPU end-of-frame irqs */
181 int out_eof_irq;
182 int rot_out_eof_irq;
183
184 spinlock_t irqlock;
185
186 /* list of convert contexts */
187 struct list_head ctx_list;
188 /* queue of conversion runs */
189 struct list_head pending_q;
190 /* queue of completed runs */
191 struct list_head done_q;
192
193 /* the current conversion run */
194 struct ipu_image_convert_run *current_run;
195};
196
197struct ipu_image_convert_priv {
198 struct ipu_image_convert_chan chan[IC_NUM_TASKS];
199 struct ipu_soc *ipu;
200};
201
202static const struct ipu_image_convert_dma_chan
203image_convert_dma_chan[IC_NUM_TASKS] = {
204 [IC_TASK_VIEWFINDER] = {
205 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
206 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
207 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
208 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
209 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
210 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
211 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
212 },
213 [IC_TASK_POST_PROCESSOR] = {
214 .in = IPUV3_CHANNEL_MEM_IC_PP,
215 .out = IPUV3_CHANNEL_IC_PP_MEM,
216 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
217 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
218 },
219};
220
221static const struct ipu_image_pixfmt image_convert_formats[] = {
222 {
223 .fourcc = V4L2_PIX_FMT_RGB565,
224 .bpp = 16,
225 }, {
226 .fourcc = V4L2_PIX_FMT_RGB24,
227 .bpp = 24,
228 }, {
229 .fourcc = V4L2_PIX_FMT_BGR24,
230 .bpp = 24,
231 }, {
232 .fourcc = V4L2_PIX_FMT_RGB32,
233 .bpp = 32,
234 }, {
235 .fourcc = V4L2_PIX_FMT_BGR32,
236 .bpp = 32,
237 }, {
Philipp Zabel5c41bb62018-08-02 10:40:33 +0200238 .fourcc = V4L2_PIX_FMT_XRGB32,
239 .bpp = 32,
240 }, {
241 .fourcc = V4L2_PIX_FMT_XBGR32,
242 .bpp = 32,
243 }, {
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700244 .fourcc = V4L2_PIX_FMT_YUYV,
245 .bpp = 16,
246 .uv_width_dec = 2,
247 .uv_height_dec = 1,
248 }, {
249 .fourcc = V4L2_PIX_FMT_UYVY,
250 .bpp = 16,
251 .uv_width_dec = 2,
252 .uv_height_dec = 1,
253 }, {
254 .fourcc = V4L2_PIX_FMT_YUV420,
255 .bpp = 12,
256 .planar = true,
257 .uv_width_dec = 2,
258 .uv_height_dec = 2,
259 }, {
260 .fourcc = V4L2_PIX_FMT_YVU420,
261 .bpp = 12,
262 .planar = true,
263 .uv_width_dec = 2,
264 .uv_height_dec = 2,
265 .uv_swapped = true,
266 }, {
267 .fourcc = V4L2_PIX_FMT_NV12,
268 .bpp = 12,
269 .planar = true,
270 .uv_width_dec = 2,
271 .uv_height_dec = 2,
272 .uv_packed = true,
273 }, {
274 .fourcc = V4L2_PIX_FMT_YUV422P,
275 .bpp = 16,
276 .planar = true,
277 .uv_width_dec = 2,
278 .uv_height_dec = 1,
279 }, {
280 .fourcc = V4L2_PIX_FMT_NV16,
281 .bpp = 16,
282 .planar = true,
283 .uv_width_dec = 2,
284 .uv_height_dec = 1,
285 .uv_packed = true,
286 },
287};
288
289static const struct ipu_image_pixfmt *get_format(u32 fourcc)
290{
291 const struct ipu_image_pixfmt *ret = NULL;
292 unsigned int i;
293
294 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
295 if (image_convert_formats[i].fourcc == fourcc) {
296 ret = &image_convert_formats[i];
297 break;
298 }
299 }
300
301 return ret;
302}
303
304static void dump_format(struct ipu_image_convert_ctx *ctx,
305 struct ipu_image_convert_image *ic_image)
306{
307 struct ipu_image_convert_chan *chan = ctx->chan;
308 struct ipu_image_convert_priv *priv = chan->priv;
309
310 dev_dbg(priv->ipu->dev,
311 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
312 chan->ic_task, ctx,
313 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
314 ic_image->base.pix.width, ic_image->base.pix.height,
315 ic_image->num_cols, ic_image->num_rows,
316 ic_image->tile[0].width, ic_image->tile[0].height,
317 ic_image->fmt->fourcc & 0xff,
318 (ic_image->fmt->fourcc >> 8) & 0xff,
319 (ic_image->fmt->fourcc >> 16) & 0xff,
320 (ic_image->fmt->fourcc >> 24) & 0xff);
321}
322
323int ipu_image_convert_enum_format(int index, u32 *fourcc)
324{
325 const struct ipu_image_pixfmt *fmt;
326
327 if (index >= (int)ARRAY_SIZE(image_convert_formats))
328 return -EINVAL;
329
330 /* Format found */
331 fmt = &image_convert_formats[index];
332 *fourcc = fmt->fourcc;
333 return 0;
334}
335EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
336
337static void free_dma_buf(struct ipu_image_convert_priv *priv,
338 struct ipu_image_convert_dma_buf *buf)
339{
340 if (buf->virt)
341 dma_free_coherent(priv->ipu->dev,
342 buf->len, buf->virt, buf->phys);
343 buf->virt = NULL;
344 buf->phys = 0;
345}
346
347static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
348 struct ipu_image_convert_dma_buf *buf,
349 int size)
350{
351 buf->len = PAGE_ALIGN(size);
352 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
353 GFP_DMA | GFP_KERNEL);
354 if (!buf->virt) {
355 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
356 return -ENOMEM;
357 }
358
359 return 0;
360}
361
362static inline int num_stripes(int dim)
363{
364 if (dim <= 1024)
365 return 1;
366 else if (dim <= 2048)
367 return 2;
368 else
369 return 4;
370}
371
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200372/*
373 * Calculate downsizing coefficients, which are the same for all tiles,
374 * and bilinear resizing coefficients, which are used to find the best
375 * seam positions.
376 */
377static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
378 struct ipu_image *in,
379 struct ipu_image *out)
380{
381 u32 downsized_width = in->rect.width;
382 u32 downsized_height = in->rect.height;
383 u32 downsize_coeff_v = 0;
384 u32 downsize_coeff_h = 0;
385 u32 resized_width = out->rect.width;
386 u32 resized_height = out->rect.height;
387 u32 resize_coeff_h;
388 u32 resize_coeff_v;
389
390 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
391 resized_width = out->rect.height;
392 resized_height = out->rect.width;
393 }
394
395 /* Do not let invalid input lead to an endless loop below */
396 if (WARN_ON(resized_width == 0 || resized_height == 0))
397 return -EINVAL;
398
399 while (downsized_width >= resized_width * 2) {
400 downsized_width >>= 1;
401 downsize_coeff_h++;
402 }
403
404 while (downsized_height >= resized_height * 2) {
405 downsized_height >>= 1;
406 downsize_coeff_v++;
407 }
408
409 /*
410 * Calculate the bilinear resizing coefficients that could be used if
411 * we were converting with a single tile. The bottom right output pixel
412 * should sample as close as possible to the bottom right input pixel
413 * out of the decimator, but not overshoot it:
414 */
415 resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
416 resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
417
418 dev_dbg(ctx->chan->priv->ipu->dev,
419 "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
420 __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
421 resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows);
422
423 if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
424 resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
425 return -EINVAL;
426
427 ctx->downsize_coeff_h = downsize_coeff_h;
428 ctx->downsize_coeff_v = downsize_coeff_v;
429 ctx->image_resize_coeff_h = resize_coeff_h;
430 ctx->image_resize_coeff_v = resize_coeff_v;
431
432 return 0;
433}
434
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700435static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
436 struct ipu_image_convert_image *image)
437{
Philipp Zabel571dd822018-09-18 11:34:12 +0200438 unsigned int i;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700439
440 for (i = 0; i < ctx->num_tiles; i++) {
441 struct ipu_image_tile *tile = &image->tile[i];
Philipp Zabel571dd822018-09-18 11:34:12 +0200442 const unsigned int row = i / image->num_cols;
443 const unsigned int col = i % image->num_cols;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700444
445 tile->height = image->base.pix.height / image->num_rows;
446 tile->width = image->base.pix.width / image->num_cols;
Philipp Zabel571dd822018-09-18 11:34:12 +0200447 tile->left = col * tile->width;
448 tile->top = row * tile->height;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700449 tile->size = ((tile->height * image->fmt->bpp) >> 3) *
450 tile->width;
451
452 if (image->fmt->planar) {
453 tile->stride = tile->width;
454 tile->rot_stride = tile->height;
455 } else {
456 tile->stride =
457 (image->fmt->bpp * tile->width) >> 3;
458 tile->rot_stride =
459 (image->fmt->bpp * tile->height) >> 3;
460 }
461 }
462}
463
464/*
465 * Use the rotation transformation to find the tile coordinates
466 * (row, col) of a tile in the destination frame that corresponds
467 * to the given tile coordinates of a source frame. The destination
468 * coordinate is then converted to a tile index.
469 */
470static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
471 int src_row, int src_col)
472{
473 struct ipu_image_convert_chan *chan = ctx->chan;
474 struct ipu_image_convert_priv *priv = chan->priv;
475 struct ipu_image_convert_image *s_image = &ctx->in;
476 struct ipu_image_convert_image *d_image = &ctx->out;
477 int dst_row, dst_col;
478
479 /* with no rotation it's a 1:1 mapping */
480 if (ctx->rot_mode == IPU_ROTATE_NONE)
481 return src_row * s_image->num_cols + src_col;
482
483 /*
484 * before doing the transform, first we have to translate
485 * source row,col for an origin in the center of s_image
486 */
487 src_row = src_row * 2 - (s_image->num_rows - 1);
488 src_col = src_col * 2 - (s_image->num_cols - 1);
489
490 /* do the rotation transform */
491 if (ctx->rot_mode & IPU_ROT_BIT_90) {
492 dst_col = -src_row;
493 dst_row = src_col;
494 } else {
495 dst_col = src_col;
496 dst_row = src_row;
497 }
498
499 /* apply flip */
500 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
501 dst_col = -dst_col;
502 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
503 dst_row = -dst_row;
504
505 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
506 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
507
508 /*
509 * finally translate dest row,col using an origin in upper
510 * left of d_image
511 */
512 dst_row += d_image->num_rows - 1;
513 dst_col += d_image->num_cols - 1;
514 dst_row /= 2;
515 dst_col /= 2;
516
517 return dst_row * d_image->num_cols + dst_col;
518}
519
520/*
521 * Fill the out_tile_map[] with transformed destination tile indeces.
522 */
523static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
524{
525 struct ipu_image_convert_image *s_image = &ctx->in;
526 unsigned int row, col, tile = 0;
527
528 for (row = 0; row < s_image->num_rows; row++) {
529 for (col = 0; col < s_image->num_cols; col++) {
530 ctx->out_tile_map[tile] =
531 transform_tile_index(ctx, row, col);
532 tile++;
533 }
534 }
535}
536
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700537static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
538 struct ipu_image_convert_image *image)
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700539{
540 struct ipu_image_convert_chan *chan = ctx->chan;
541 struct ipu_image_convert_priv *priv = chan->priv;
542 const struct ipu_image_pixfmt *fmt = image->fmt;
543 unsigned int row, col, tile = 0;
Philipp Zabel571dd822018-09-18 11:34:12 +0200544 u32 H, top, y_stride, uv_stride;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700545 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
546 u32 y_row_off, y_col_off, y_off;
547 u32 y_size, uv_size;
548
549 /* setup some convenience vars */
550 H = image->base.pix.height;
551
552 y_stride = image->stride;
553 uv_stride = y_stride / fmt->uv_width_dec;
554 if (fmt->uv_packed)
555 uv_stride *= 2;
556
557 y_size = H * y_stride;
558 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
559
560 for (row = 0; row < image->num_rows; row++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200561 top = image->tile[tile].top;
562 y_row_off = top * y_stride;
563 uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700564
565 for (col = 0; col < image->num_cols; col++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200566 y_col_off = image->tile[tile].left;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700567 uv_col_off = y_col_off / fmt->uv_width_dec;
568 if (fmt->uv_packed)
569 uv_col_off *= 2;
570
571 y_off = y_row_off + y_col_off;
572 uv_off = uv_row_off + uv_col_off;
573
574 u_off = y_size - y_off + uv_off;
575 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
576 if (fmt->uv_swapped) {
577 tmp = u_off;
578 u_off = v_off;
579 v_off = tmp;
580 }
581
582 image->tile[tile].offset = y_off;
583 image->tile[tile].u_off = u_off;
584 image->tile[tile++].v_off = v_off;
585
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700586 if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
587 dev_err(priv->ipu->dev,
588 "task %u: ctx %p: %s@[%d,%d]: "
589 "y_off %08x, u_off %08x, v_off %08x\n",
590 chan->ic_task, ctx,
591 image->type == IMAGE_CONVERT_IN ?
592 "Input" : "Output", row, col,
593 y_off, u_off, v_off);
594 return -EINVAL;
595 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700596 }
597 }
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700598
599 return 0;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700600}
601
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700602static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
603 struct ipu_image_convert_image *image)
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700604{
605 struct ipu_image_convert_chan *chan = ctx->chan;
606 struct ipu_image_convert_priv *priv = chan->priv;
607 const struct ipu_image_pixfmt *fmt = image->fmt;
608 unsigned int row, col, tile = 0;
Philipp Zabel571dd822018-09-18 11:34:12 +0200609 u32 bpp, stride, offset;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700610 u32 row_off, col_off;
611
612 /* setup some convenience vars */
613 stride = image->stride;
614 bpp = fmt->bpp;
615
616 for (row = 0; row < image->num_rows; row++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200617 row_off = image->tile[tile].top * stride;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700618
619 for (col = 0; col < image->num_cols; col++) {
Philipp Zabel571dd822018-09-18 11:34:12 +0200620 col_off = (image->tile[tile].left * bpp) >> 3;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700621
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700622 offset = row_off + col_off;
623
624 image->tile[tile].offset = offset;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700625 image->tile[tile].u_off = 0;
626 image->tile[tile++].v_off = 0;
627
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700628 if (offset & 0x7) {
629 dev_err(priv->ipu->dev,
630 "task %u: ctx %p: %s@[%d,%d]: "
631 "phys %08x\n",
632 chan->ic_task, ctx,
633 image->type == IMAGE_CONVERT_IN ?
634 "Input" : "Output", row, col,
635 row_off + col_off);
636 return -EINVAL;
637 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700638 }
639 }
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700640
641 return 0;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700642}
643
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700644static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700645 struct ipu_image_convert_image *image)
646{
647 if (image->fmt->planar)
Steve Longerbeamc4e45652018-09-21 11:46:39 -0700648 return calc_tile_offsets_planar(ctx, image);
649
650 return calc_tile_offsets_packed(ctx, image);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700651}
652
653/*
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200654 * Calculate the resizing ratio for the IC main processing section given input
655 * size, fixed downsizing coefficient, and output size.
656 * Either round to closest for the next tile's first pixel to minimize seams
657 * and distortion (for all but right column / bottom row), or round down to
658 * avoid sampling beyond the edges of the input image for this tile's last
659 * pixel.
660 * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
661 */
662static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
663 u32 output_size, bool allow_overshoot)
664{
665 u32 downsized = input_size >> downsize_coeff;
666
667 if (allow_overshoot)
668 return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
669 else
670 return 8192 * (downsized - 1) / (output_size - 1);
671}
672
673/*
674 * Slightly modify resize coefficients per tile to hide the bilinear
675 * interpolator reset at tile borders, shifting the right / bottom edge
676 * by up to a half input pixel. This removes noticeable seams between
677 * tiles at higher upscaling factors.
678 */
679static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
680{
681 struct ipu_image_convert_chan *chan = ctx->chan;
682 struct ipu_image_convert_priv *priv = chan->priv;
683 struct ipu_image_tile *in_tile, *out_tile;
684 unsigned int col, row, tile_idx;
685 unsigned int last_output;
686
687 for (col = 0; col < ctx->in.num_cols; col++) {
688 bool closest = (col < ctx->in.num_cols - 1) &&
689 !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
690 u32 resized_width;
691 u32 resize_coeff_h;
692
693 tile_idx = col;
694 in_tile = &ctx->in.tile[tile_idx];
695 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
696
697 if (ipu_rot_mode_is_irt(ctx->rot_mode))
698 resized_width = out_tile->height;
699 else
700 resized_width = out_tile->width;
701
702 resize_coeff_h = calc_resize_coeff(in_tile->width,
703 ctx->downsize_coeff_h,
704 resized_width, closest);
705
706 dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
707 __func__, col, resize_coeff_h);
708
709
710 for (row = 0; row < ctx->in.num_rows; row++) {
711 tile_idx = row * ctx->in.num_cols + col;
712 in_tile = &ctx->in.tile[tile_idx];
713 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
714
715 /*
716 * With the horizontal scaling factor known, round up
717 * resized width (output width or height) to burst size.
718 */
719 if (ipu_rot_mode_is_irt(ctx->rot_mode))
720 out_tile->height = round_up(resized_width, 8);
721 else
722 out_tile->width = round_up(resized_width, 8);
723
724 /*
725 * Calculate input width from the last accessed input
726 * pixel given resized width and scaling coefficients.
727 * Round up to burst size.
728 */
729 last_output = round_up(resized_width, 8) - 1;
730 if (closest)
731 last_output++;
732 in_tile->width = round_up(
733 (DIV_ROUND_UP(last_output * resize_coeff_h,
734 8192) + 1)
735 << ctx->downsize_coeff_h, 8);
736 }
737
738 ctx->resize_coeffs_h[col] = resize_coeff_h;
739 }
740
741 for (row = 0; row < ctx->in.num_rows; row++) {
742 bool closest = (row < ctx->in.num_rows - 1) &&
743 !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
744 u32 resized_height;
745 u32 resize_coeff_v;
746
747 tile_idx = row * ctx->in.num_cols;
748 in_tile = &ctx->in.tile[tile_idx];
749 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
750
751 if (ipu_rot_mode_is_irt(ctx->rot_mode))
752 resized_height = out_tile->width;
753 else
754 resized_height = out_tile->height;
755
756 resize_coeff_v = calc_resize_coeff(in_tile->height,
757 ctx->downsize_coeff_v,
758 resized_height, closest);
759
760 dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
761 __func__, row, resize_coeff_v);
762
763 for (col = 0; col < ctx->in.num_cols; col++) {
764 tile_idx = row * ctx->in.num_cols + col;
765 in_tile = &ctx->in.tile[tile_idx];
766 out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
767
768 /*
769 * With the vertical scaling factor known, round up
770 * resized height (output width or height) to IDMAC
771 * limitations.
772 */
773 if (ipu_rot_mode_is_irt(ctx->rot_mode))
774 out_tile->width = round_up(resized_height, 2);
775 else
776 out_tile->height = round_up(resized_height, 2);
777
778 /*
779 * Calculate input width from the last accessed input
780 * pixel given resized height and scaling coefficients.
781 * Align to IDMAC restrictions.
782 */
783 last_output = round_up(resized_height, 2) - 1;
784 if (closest)
785 last_output++;
786 in_tile->height = round_up(
787 (DIV_ROUND_UP(last_output * resize_coeff_v,
788 8192) + 1)
789 << ctx->downsize_coeff_v, 2);
790 }
791
792 ctx->resize_coeffs_v[row] = resize_coeff_v;
793 }
794}
795
796/*
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700797 * return the number of runs in given queue (pending_q or done_q)
798 * for this context. hold irqlock when calling.
799 */
800static int get_run_count(struct ipu_image_convert_ctx *ctx,
801 struct list_head *q)
802{
803 struct ipu_image_convert_run *run;
804 int count = 0;
805
806 lockdep_assert_held(&ctx->chan->irqlock);
807
808 list_for_each_entry(run, q, list) {
809 if (run->ctx == ctx)
810 count++;
811 }
812
813 return count;
814}
815
816static void convert_stop(struct ipu_image_convert_run *run)
817{
818 struct ipu_image_convert_ctx *ctx = run->ctx;
819 struct ipu_image_convert_chan *chan = ctx->chan;
820 struct ipu_image_convert_priv *priv = chan->priv;
821
822 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
823 __func__, chan->ic_task, ctx, run);
824
825 /* disable IC tasks and the channels */
826 ipu_ic_task_disable(chan->ic);
827 ipu_idmac_disable_channel(chan->in_chan);
828 ipu_idmac_disable_channel(chan->out_chan);
829
830 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
831 ipu_idmac_disable_channel(chan->rotation_in_chan);
832 ipu_idmac_disable_channel(chan->rotation_out_chan);
833 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
834 }
835
836 ipu_ic_disable(chan->ic);
837}
838
839static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
840 struct ipuv3_channel *channel,
841 struct ipu_image_convert_image *image,
842 enum ipu_rotate_mode rot_mode,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200843 bool rot_swap_width_height,
844 unsigned int tile)
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700845{
846 struct ipu_image_convert_chan *chan = ctx->chan;
847 unsigned int burst_size;
848 u32 width, height, stride;
849 dma_addr_t addr0, addr1 = 0;
850 struct ipu_image tile_image;
851 unsigned int tile_idx[2];
852
853 if (image->type == IMAGE_CONVERT_OUT) {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200854 tile_idx[0] = ctx->out_tile_map[tile];
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700855 tile_idx[1] = ctx->out_tile_map[1];
856 } else {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200857 tile_idx[0] = tile;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700858 tile_idx[1] = 1;
859 }
860
861 if (rot_swap_width_height) {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200862 width = image->tile[tile_idx[0]].height;
863 height = image->tile[tile_idx[0]].width;
864 stride = image->tile[tile_idx[0]].rot_stride;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700865 addr0 = ctx->rot_intermediate[0].phys;
866 if (ctx->double_buffering)
867 addr1 = ctx->rot_intermediate[1].phys;
868 } else {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200869 width = image->tile[tile_idx[0]].width;
870 height = image->tile[tile_idx[0]].height;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700871 stride = image->stride;
872 addr0 = image->base.phys0 +
873 image->tile[tile_idx[0]].offset;
874 if (ctx->double_buffering)
875 addr1 = image->base.phys0 +
876 image->tile[tile_idx[1]].offset;
877 }
878
879 ipu_cpmem_zero(channel);
880
881 memset(&tile_image, 0, sizeof(tile_image));
882 tile_image.pix.width = tile_image.rect.width = width;
883 tile_image.pix.height = tile_image.rect.height = height;
884 tile_image.pix.bytesperline = stride;
885 tile_image.pix.pixelformat = image->fmt->fourcc;
886 tile_image.phys0 = addr0;
887 tile_image.phys1 = addr1;
Steve Longerbeamdec408f2018-10-06 14:45:48 -0700888 if (image->fmt->planar && !rot_swap_width_height) {
889 tile_image.u_offset = image->tile[tile_idx[0]].u_off;
890 tile_image.v_offset = image->tile[tile_idx[0]].v_off;
891 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700892
Steve Longerbeamdec408f2018-10-06 14:45:48 -0700893 ipu_cpmem_set_image(channel, &tile_image);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700894
895 if (rot_mode)
896 ipu_cpmem_set_rotation(channel, rot_mode);
897
898 if (channel == chan->rotation_in_chan ||
899 channel == chan->rotation_out_chan) {
900 burst_size = 8;
901 ipu_cpmem_set_block_mode(channel);
902 } else
903 burst_size = (width % 16) ? 8 : 16;
904
905 ipu_cpmem_set_burstsize(channel, burst_size);
906
907 ipu_ic_task_idma_init(chan->ic, channel, width, height,
908 burst_size, rot_mode);
909
Lucas Stach320a89a2017-03-08 12:13:19 +0100910 /*
911 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
912 * only do this when there is no PRG present.
913 */
914 if (!channel->ipu->prg_priv)
915 ipu_cpmem_set_axi_id(channel, 1);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700916
917 ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
918}
919
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200920static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700921{
922 struct ipu_image_convert_ctx *ctx = run->ctx;
923 struct ipu_image_convert_chan *chan = ctx->chan;
924 struct ipu_image_convert_priv *priv = chan->priv;
925 struct ipu_image_convert_image *s_image = &ctx->in;
926 struct ipu_image_convert_image *d_image = &ctx->out;
927 enum ipu_color_space src_cs, dest_cs;
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200928 unsigned int dst_tile = ctx->out_tile_map[tile];
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700929 unsigned int dest_width, dest_height;
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200930 unsigned int col, row;
931 u32 rsc;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700932 int ret;
933
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200934 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
935 __func__, chan->ic_task, ctx, run, tile, dst_tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700936
937 src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
938 dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
939
940 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
941 /* swap width/height for resizer */
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200942 dest_width = d_image->tile[dst_tile].height;
943 dest_height = d_image->tile[dst_tile].width;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700944 } else {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200945 dest_width = d_image->tile[dst_tile].width;
946 dest_height = d_image->tile[dst_tile].height;
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700947 }
948
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200949 row = tile / s_image->num_cols;
950 col = tile % s_image->num_cols;
951
952 rsc = (ctx->downsize_coeff_v << 30) |
953 (ctx->resize_coeffs_v[row] << 16) |
954 (ctx->downsize_coeff_h << 14) |
955 (ctx->resize_coeffs_h[col]);
956
957 dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
958 __func__, s_image->tile[tile].width,
959 s_image->tile[tile].height, dest_width, dest_height, rsc);
960
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700961 /* setup the IC resizer and CSC */
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200962 ret = ipu_ic_task_init_rsc(chan->ic,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200963 s_image->tile[tile].width,
964 s_image->tile[tile].height,
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700965 dest_width,
966 dest_height,
Philipp Zabel70b9b6b2018-09-18 11:34:10 +0200967 src_cs, dest_cs,
968 rsc);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700969 if (ret) {
970 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
971 return ret;
972 }
973
974 /* init the source MEM-->IC PP IDMAC channel */
975 init_idmac_channel(ctx, chan->in_chan, s_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200976 IPU_ROTATE_NONE, false, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700977
978 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
979 /* init the IC PP-->MEM IDMAC channel */
980 init_idmac_channel(ctx, chan->out_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200981 IPU_ROTATE_NONE, true, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700982
983 /* init the MEM-->IC PP ROT IDMAC channel */
984 init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200985 ctx->rot_mode, true, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700986
987 /* init the destination IC PP ROT-->MEM IDMAC channel */
988 init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200989 IPU_ROTATE_NONE, false, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700990
991 /* now link IC PP-->MEM to MEM-->IC PP ROT */
992 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
993 } else {
994 /* init the destination IC PP-->MEM IDMAC channel */
995 init_idmac_channel(ctx, chan->out_chan, d_image,
Philipp Zabeldd65d2a2018-09-18 11:34:09 +0200996 ctx->rot_mode, false, tile);
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700997 }
998
999 /* enable the IC */
1000 ipu_ic_enable(chan->ic);
1001
1002 /* set buffers ready */
1003 ipu_idmac_select_buffer(chan->in_chan, 0);
1004 ipu_idmac_select_buffer(chan->out_chan, 0);
1005 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1006 ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
1007 if (ctx->double_buffering) {
1008 ipu_idmac_select_buffer(chan->in_chan, 1);
1009 ipu_idmac_select_buffer(chan->out_chan, 1);
1010 if (ipu_rot_mode_is_irt(ctx->rot_mode))
1011 ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
1012 }
1013
1014 /* enable the channels! */
1015 ipu_idmac_enable_channel(chan->in_chan);
1016 ipu_idmac_enable_channel(chan->out_chan);
1017 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1018 ipu_idmac_enable_channel(chan->rotation_in_chan);
1019 ipu_idmac_enable_channel(chan->rotation_out_chan);
1020 }
1021
1022 ipu_ic_task_enable(chan->ic);
1023
1024 ipu_cpmem_dump(chan->in_chan);
1025 ipu_cpmem_dump(chan->out_chan);
1026 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1027 ipu_cpmem_dump(chan->rotation_in_chan);
1028 ipu_cpmem_dump(chan->rotation_out_chan);
1029 }
1030
1031 ipu_dump(priv->ipu);
1032
1033 return 0;
1034}
1035
1036/* hold irqlock when calling */
1037static int do_run(struct ipu_image_convert_run *run)
1038{
1039 struct ipu_image_convert_ctx *ctx = run->ctx;
1040 struct ipu_image_convert_chan *chan = ctx->chan;
1041
1042 lockdep_assert_held(&chan->irqlock);
1043
1044 ctx->in.base.phys0 = run->in_phys;
1045 ctx->out.base.phys0 = run->out_phys;
1046
1047 ctx->cur_buf_num = 0;
1048 ctx->next_tile = 1;
1049
1050 /* remove run from pending_q and set as current */
1051 list_del(&run->list);
1052 chan->current_run = run;
1053
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001054 return convert_start(run, 0);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001055}
1056
1057/* hold irqlock when calling */
1058static void run_next(struct ipu_image_convert_chan *chan)
1059{
1060 struct ipu_image_convert_priv *priv = chan->priv;
1061 struct ipu_image_convert_run *run, *tmp;
1062 int ret;
1063
1064 lockdep_assert_held(&chan->irqlock);
1065
1066 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1067 /* skip contexts that are aborting */
1068 if (run->ctx->aborting) {
1069 dev_dbg(priv->ipu->dev,
1070 "%s: task %u: skipping aborting ctx %p run %p\n",
1071 __func__, chan->ic_task, run->ctx, run);
1072 continue;
1073 }
1074
1075 ret = do_run(run);
1076 if (!ret)
1077 break;
1078
1079 /*
1080 * something went wrong with start, add the run
1081 * to done q and continue to the next run in the
1082 * pending q.
1083 */
1084 run->status = ret;
1085 list_add_tail(&run->list, &chan->done_q);
1086 chan->current_run = NULL;
1087 }
1088}
1089
1090static void empty_done_q(struct ipu_image_convert_chan *chan)
1091{
1092 struct ipu_image_convert_priv *priv = chan->priv;
1093 struct ipu_image_convert_run *run;
1094 unsigned long flags;
1095
1096 spin_lock_irqsave(&chan->irqlock, flags);
1097
1098 while (!list_empty(&chan->done_q)) {
1099 run = list_entry(chan->done_q.next,
1100 struct ipu_image_convert_run,
1101 list);
1102
1103 list_del(&run->list);
1104
1105 dev_dbg(priv->ipu->dev,
1106 "%s: task %u: completing ctx %p run %p with %d\n",
1107 __func__, chan->ic_task, run->ctx, run, run->status);
1108
1109 /* call the completion callback and free the run */
1110 spin_unlock_irqrestore(&chan->irqlock, flags);
1111 run->ctx->complete(run, run->ctx->complete_context);
1112 spin_lock_irqsave(&chan->irqlock, flags);
1113 }
1114
1115 spin_unlock_irqrestore(&chan->irqlock, flags);
1116}
1117
1118/*
1119 * the bottom half thread clears out the done_q, calling the
1120 * completion handler for each.
1121 */
1122static irqreturn_t do_bh(int irq, void *dev_id)
1123{
1124 struct ipu_image_convert_chan *chan = dev_id;
1125 struct ipu_image_convert_priv *priv = chan->priv;
1126 struct ipu_image_convert_ctx *ctx;
1127 unsigned long flags;
1128
1129 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
1130 chan->ic_task);
1131
1132 empty_done_q(chan);
1133
1134 spin_lock_irqsave(&chan->irqlock, flags);
1135
1136 /*
1137 * the done_q is cleared out, signal any contexts
1138 * that are aborting that abort can complete.
1139 */
1140 list_for_each_entry(ctx, &chan->ctx_list, list) {
1141 if (ctx->aborting) {
1142 dev_dbg(priv->ipu->dev,
1143 "%s: task %u: signaling abort for ctx %p\n",
1144 __func__, chan->ic_task, ctx);
Steve Longerbeamaa60b262018-09-19 16:17:15 -07001145 complete_all(&ctx->aborted);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001146 }
1147 }
1148
1149 spin_unlock_irqrestore(&chan->irqlock, flags);
1150
1151 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
1152 chan->ic_task);
1153
1154 return IRQ_HANDLED;
1155}
1156
Philipp Zabel0537db82018-09-18 11:34:11 +02001157static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
1158{
1159 unsigned int cur_tile = ctx->next_tile - 1;
1160 unsigned int next_tile = ctx->next_tile;
1161
1162 if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
1163 ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
1164 ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
1165 ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
1166 ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
1167 ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
1168 ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
1169 ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
1170 return true;
1171
1172 return false;
1173}
1174
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001175/* hold irqlock when calling */
1176static irqreturn_t do_irq(struct ipu_image_convert_run *run)
1177{
1178 struct ipu_image_convert_ctx *ctx = run->ctx;
1179 struct ipu_image_convert_chan *chan = ctx->chan;
1180 struct ipu_image_tile *src_tile, *dst_tile;
1181 struct ipu_image_convert_image *s_image = &ctx->in;
1182 struct ipu_image_convert_image *d_image = &ctx->out;
1183 struct ipuv3_channel *outch;
1184 unsigned int dst_idx;
1185
1186 lockdep_assert_held(&chan->irqlock);
1187
1188 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
1189 chan->rotation_out_chan : chan->out_chan;
1190
1191 /*
1192 * It is difficult to stop the channel DMA before the channels
1193 * enter the paused state. Without double-buffering the channels
1194 * are always in a paused state when the EOF irq occurs, so it
1195 * is safe to stop the channels now. For double-buffering we
1196 * just ignore the abort until the operation completes, when it
1197 * is safe to shut down.
1198 */
1199 if (ctx->aborting && !ctx->double_buffering) {
1200 convert_stop(run);
1201 run->status = -EIO;
1202 goto done;
1203 }
1204
1205 if (ctx->next_tile == ctx->num_tiles) {
1206 /*
1207 * the conversion is complete
1208 */
1209 convert_stop(run);
1210 run->status = 0;
1211 goto done;
1212 }
1213
1214 /*
1215 * not done, place the next tile buffers.
1216 */
1217 if (!ctx->double_buffering) {
Philipp Zabel0537db82018-09-18 11:34:11 +02001218 if (ic_settings_changed(ctx)) {
1219 convert_stop(run);
1220 convert_start(run, ctx->next_tile);
1221 } else {
1222 src_tile = &s_image->tile[ctx->next_tile];
1223 dst_idx = ctx->out_tile_map[ctx->next_tile];
1224 dst_tile = &d_image->tile[dst_idx];
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001225
Philipp Zabel0537db82018-09-18 11:34:11 +02001226 ipu_cpmem_set_buffer(chan->in_chan, 0,
1227 s_image->base.phys0 +
1228 src_tile->offset);
1229 ipu_cpmem_set_buffer(outch, 0,
1230 d_image->base.phys0 +
1231 dst_tile->offset);
1232 if (s_image->fmt->planar)
1233 ipu_cpmem_set_uv_offset(chan->in_chan,
1234 src_tile->u_off,
1235 src_tile->v_off);
1236 if (d_image->fmt->planar)
1237 ipu_cpmem_set_uv_offset(outch,
1238 dst_tile->u_off,
1239 dst_tile->v_off);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001240
Philipp Zabel0537db82018-09-18 11:34:11 +02001241 ipu_idmac_select_buffer(chan->in_chan, 0);
1242 ipu_idmac_select_buffer(outch, 0);
1243 }
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001244 } else if (ctx->next_tile < ctx->num_tiles - 1) {
1245
1246 src_tile = &s_image->tile[ctx->next_tile + 1];
1247 dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
1248 dst_tile = &d_image->tile[dst_idx];
1249
1250 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
1251 s_image->base.phys0 + src_tile->offset);
1252 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
1253 d_image->base.phys0 + dst_tile->offset);
1254
1255 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
1256 ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
1257
1258 ctx->cur_buf_num ^= 1;
1259 }
1260
1261 ctx->next_tile++;
1262 return IRQ_HANDLED;
1263done:
1264 list_add_tail(&run->list, &chan->done_q);
1265 chan->current_run = NULL;
1266 run_next(chan);
1267 return IRQ_WAKE_THREAD;
1268}
1269
1270static irqreturn_t norotate_irq(int irq, void *data)
1271{
1272 struct ipu_image_convert_chan *chan = data;
1273 struct ipu_image_convert_ctx *ctx;
1274 struct ipu_image_convert_run *run;
1275 unsigned long flags;
1276 irqreturn_t ret;
1277
1278 spin_lock_irqsave(&chan->irqlock, flags);
1279
1280 /* get current run and its context */
1281 run = chan->current_run;
1282 if (!run) {
1283 ret = IRQ_NONE;
1284 goto out;
1285 }
1286
1287 ctx = run->ctx;
1288
1289 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1290 /* this is a rotation operation, just ignore */
1291 spin_unlock_irqrestore(&chan->irqlock, flags);
1292 return IRQ_HANDLED;
1293 }
1294
1295 ret = do_irq(run);
1296out:
1297 spin_unlock_irqrestore(&chan->irqlock, flags);
1298 return ret;
1299}
1300
1301static irqreturn_t rotate_irq(int irq, void *data)
1302{
1303 struct ipu_image_convert_chan *chan = data;
1304 struct ipu_image_convert_priv *priv = chan->priv;
1305 struct ipu_image_convert_ctx *ctx;
1306 struct ipu_image_convert_run *run;
1307 unsigned long flags;
1308 irqreturn_t ret;
1309
1310 spin_lock_irqsave(&chan->irqlock, flags);
1311
1312 /* get current run and its context */
1313 run = chan->current_run;
1314 if (!run) {
1315 ret = IRQ_NONE;
1316 goto out;
1317 }
1318
1319 ctx = run->ctx;
1320
1321 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1322 /* this was NOT a rotation operation, shouldn't happen */
1323 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1324 spin_unlock_irqrestore(&chan->irqlock, flags);
1325 return IRQ_HANDLED;
1326 }
1327
1328 ret = do_irq(run);
1329out:
1330 spin_unlock_irqrestore(&chan->irqlock, flags);
1331 return ret;
1332}
1333
1334/*
1335 * try to force the completion of runs for this ctx. Called when
1336 * abort wait times out in ipu_image_convert_abort().
1337 */
1338static void force_abort(struct ipu_image_convert_ctx *ctx)
1339{
1340 struct ipu_image_convert_chan *chan = ctx->chan;
1341 struct ipu_image_convert_run *run;
1342 unsigned long flags;
1343
1344 spin_lock_irqsave(&chan->irqlock, flags);
1345
1346 run = chan->current_run;
1347 if (run && run->ctx == ctx) {
1348 convert_stop(run);
1349 run->status = -EIO;
1350 list_add_tail(&run->list, &chan->done_q);
1351 chan->current_run = NULL;
1352 run_next(chan);
1353 }
1354
1355 spin_unlock_irqrestore(&chan->irqlock, flags);
1356
1357 empty_done_q(chan);
1358}
1359
1360static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1361{
1362 if (chan->out_eof_irq >= 0)
1363 free_irq(chan->out_eof_irq, chan);
1364 if (chan->rot_out_eof_irq >= 0)
1365 free_irq(chan->rot_out_eof_irq, chan);
1366
1367 if (!IS_ERR_OR_NULL(chan->in_chan))
1368 ipu_idmac_put(chan->in_chan);
1369 if (!IS_ERR_OR_NULL(chan->out_chan))
1370 ipu_idmac_put(chan->out_chan);
1371 if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1372 ipu_idmac_put(chan->rotation_in_chan);
1373 if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1374 ipu_idmac_put(chan->rotation_out_chan);
1375 if (!IS_ERR_OR_NULL(chan->ic))
1376 ipu_ic_put(chan->ic);
1377
1378 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1379 chan->rotation_out_chan = NULL;
1380 chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1381}
1382
1383static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1384{
1385 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1386 struct ipu_image_convert_priv *priv = chan->priv;
1387 int ret;
1388
1389 /* get IC */
1390 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1391 if (IS_ERR(chan->ic)) {
1392 dev_err(priv->ipu->dev, "could not acquire IC\n");
1393 ret = PTR_ERR(chan->ic);
1394 goto err;
1395 }
1396
1397 /* get IDMAC channels */
1398 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1399 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1400 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1401 dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1402 ret = -EBUSY;
1403 goto err;
1404 }
1405
1406 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1407 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1408 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1409 dev_err(priv->ipu->dev,
1410 "could not acquire idmac rotation channels\n");
1411 ret = -EBUSY;
1412 goto err;
1413 }
1414
1415 /* acquire the EOF interrupts */
1416 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1417 chan->out_chan,
1418 IPU_IRQ_EOF);
1419
1420 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1421 0, "ipu-ic", chan);
1422 if (ret < 0) {
1423 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1424 chan->out_eof_irq);
1425 chan->out_eof_irq = -1;
1426 goto err;
1427 }
1428
1429 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1430 chan->rotation_out_chan,
1431 IPU_IRQ_EOF);
1432
1433 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1434 0, "ipu-ic", chan);
1435 if (ret < 0) {
1436 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1437 chan->rot_out_eof_irq);
1438 chan->rot_out_eof_irq = -1;
1439 goto err;
1440 }
1441
1442 return 0;
1443err:
1444 release_ipu_resources(chan);
1445 return ret;
1446}
1447
1448static int fill_image(struct ipu_image_convert_ctx *ctx,
1449 struct ipu_image_convert_image *ic_image,
1450 struct ipu_image *image,
1451 enum ipu_image_convert_type type)
1452{
1453 struct ipu_image_convert_priv *priv = ctx->chan->priv;
1454
1455 ic_image->base = *image;
1456 ic_image->type = type;
1457
1458 ic_image->fmt = get_format(image->pix.pixelformat);
1459 if (!ic_image->fmt) {
1460 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1461 type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1462 return -EINVAL;
1463 }
1464
1465 if (ic_image->fmt->planar)
1466 ic_image->stride = ic_image->base.pix.width;
1467 else
1468 ic_image->stride = ic_image->base.pix.bytesperline;
1469
Philipp Zabel26ddd032018-09-18 11:34:13 +02001470 return 0;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001471}
1472
1473/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1474static unsigned int clamp_align(unsigned int x, unsigned int min,
1475 unsigned int max, unsigned int align)
1476{
1477 /* Bits that must be zero to be aligned */
1478 unsigned int mask = ~((1 << align) - 1);
1479
1480 /* Clamp to aligned min and max */
1481 x = clamp(x, (min + ~mask) & mask, max & mask);
1482
1483 /* Round to nearest aligned value */
1484 if (align)
1485 x = (x + (1 << (align - 1))) & mask;
1486
1487 return x;
1488}
1489
1490/*
1491 * We have to adjust the tile width such that the tile physaddrs and
1492 * U and V plane offsets are multiples of 8 bytes as required by
1493 * the IPU DMA Controller. For the planar formats, this corresponds
1494 * to a pixel alignment of 16 (but use a more formal equation since
1495 * the variables are available). For all the packed formats, 8 is
1496 * good enough.
1497 */
1498static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
1499{
1500 return fmt->planar ? 8 * fmt->uv_width_dec : 8;
1501}
1502
1503/*
1504 * For tile height alignment, we have to ensure that the output tile
1505 * heights are multiples of 8 lines if the IRT is required by the
1506 * given rotation mode (the IRT performs rotations on 8x8 blocks
1507 * at a time). If the IRT is not used, or for input image tiles,
1508 * 2 lines are good enough.
1509 */
1510static inline u32 tile_height_align(enum ipu_image_convert_type type,
1511 enum ipu_rotate_mode rot_mode)
1512{
1513 return (type == IMAGE_CONVERT_OUT &&
1514 ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
1515}
1516
1517/* Adjusts input/output images to IPU restrictions */
1518void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1519 enum ipu_rotate_mode rot_mode)
1520{
1521 const struct ipu_image_pixfmt *infmt, *outfmt;
1522 unsigned int num_in_rows, num_in_cols;
1523 unsigned int num_out_rows, num_out_cols;
1524 u32 w_align, h_align;
1525
1526 infmt = get_format(in->pix.pixelformat);
1527 outfmt = get_format(out->pix.pixelformat);
1528
1529 /* set some default pixel formats if needed */
1530 if (!infmt) {
1531 in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1532 infmt = get_format(V4L2_PIX_FMT_RGB24);
1533 }
1534 if (!outfmt) {
1535 out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1536 outfmt = get_format(V4L2_PIX_FMT_RGB24);
1537 }
1538
1539 /* image converter does not handle fields */
1540 in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1541
1542 /* resizer cannot downsize more than 4:1 */
1543 if (ipu_rot_mode_is_irt(rot_mode)) {
1544 out->pix.height = max_t(__u32, out->pix.height,
1545 in->pix.width / 4);
1546 out->pix.width = max_t(__u32, out->pix.width,
1547 in->pix.height / 4);
1548 } else {
1549 out->pix.width = max_t(__u32, out->pix.width,
1550 in->pix.width / 4);
1551 out->pix.height = max_t(__u32, out->pix.height,
1552 in->pix.height / 4);
1553 }
1554
1555 /* get tiling rows/cols from output format */
1556 num_out_rows = num_stripes(out->pix.height);
1557 num_out_cols = num_stripes(out->pix.width);
1558 if (ipu_rot_mode_is_irt(rot_mode)) {
1559 num_in_rows = num_out_cols;
1560 num_in_cols = num_out_rows;
1561 } else {
1562 num_in_rows = num_out_rows;
1563 num_in_cols = num_out_cols;
1564 }
1565
1566 /* align input width/height */
1567 w_align = ilog2(tile_width_align(infmt) * num_in_cols);
1568 h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
1569 num_in_rows);
1570 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
1571 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
1572
1573 /* align output width/height */
1574 w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
1575 h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
1576 num_out_rows);
1577 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
1578 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
1579
1580 /* set input/output strides and image sizes */
1581 in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
1582 in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
1583 out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
1584 out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
1585}
1586EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
1587
1588/*
1589 * this is used by ipu_image_convert_prepare() to verify set input and
1590 * output images are valid before starting the conversion. Clients can
1591 * also call it before calling ipu_image_convert_prepare().
1592 */
1593int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
1594 enum ipu_rotate_mode rot_mode)
1595{
1596 struct ipu_image testin, testout;
1597
1598 testin = *in;
1599 testout = *out;
1600
1601 ipu_image_convert_adjust(&testin, &testout, rot_mode);
1602
1603 if (testin.pix.width != in->pix.width ||
1604 testin.pix.height != in->pix.height ||
1605 testout.pix.width != out->pix.width ||
1606 testout.pix.height != out->pix.height)
1607 return -EINVAL;
1608
1609 return 0;
1610}
1611EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
1612
1613/*
1614 * Call ipu_image_convert_prepare() to prepare for the conversion of
1615 * given images and rotation mode. Returns a new conversion context.
1616 */
1617struct ipu_image_convert_ctx *
1618ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1619 struct ipu_image *in, struct ipu_image *out,
1620 enum ipu_rotate_mode rot_mode,
1621 ipu_image_convert_cb_t complete,
1622 void *complete_context)
1623{
1624 struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
1625 struct ipu_image_convert_image *s_image, *d_image;
1626 struct ipu_image_convert_chan *chan;
1627 struct ipu_image_convert_ctx *ctx;
1628 unsigned long flags;
1629 bool get_res;
1630 int ret;
1631
1632 if (!in || !out || !complete ||
1633 (ic_task != IC_TASK_VIEWFINDER &&
1634 ic_task != IC_TASK_POST_PROCESSOR))
1635 return ERR_PTR(-EINVAL);
1636
1637 /* verify the in/out images before continuing */
1638 ret = ipu_image_convert_verify(in, out, rot_mode);
1639 if (ret) {
1640 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
1641 __func__);
1642 return ERR_PTR(ret);
1643 }
1644
1645 chan = &priv->chan[ic_task];
1646
1647 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1648 if (!ctx)
1649 return ERR_PTR(-ENOMEM);
1650
1651 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
1652 chan->ic_task, ctx);
1653
1654 ctx->chan = chan;
1655 init_completion(&ctx->aborted);
1656
1657 s_image = &ctx->in;
1658 d_image = &ctx->out;
1659
1660 /* set tiling and rotation */
1661 d_image->num_rows = num_stripes(out->pix.height);
1662 d_image->num_cols = num_stripes(out->pix.width);
1663 if (ipu_rot_mode_is_irt(rot_mode)) {
1664 s_image->num_rows = d_image->num_cols;
1665 s_image->num_cols = d_image->num_rows;
1666 } else {
1667 s_image->num_rows = d_image->num_rows;
1668 s_image->num_cols = d_image->num_cols;
1669 }
1670
1671 ctx->num_tiles = d_image->num_cols * d_image->num_rows;
1672 ctx->rot_mode = rot_mode;
1673
1674 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
1675 if (ret)
1676 goto out_free;
1677 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
1678 if (ret)
1679 goto out_free;
1680
Philipp Zabel26ddd032018-09-18 11:34:13 +02001681 ret = calc_image_resize_coefficients(ctx, in, out);
1682 if (ret)
1683 goto out_free;
1684
1685 calc_tile_dimensions(ctx, s_image);
1686 ret = calc_tile_offsets(ctx, s_image);
1687 if (ret)
1688 goto out_free;
1689
1690 calc_tile_dimensions(ctx, d_image);
1691 ret = calc_tile_offsets(ctx, d_image);
1692 if (ret)
1693 goto out_free;
1694
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001695 calc_out_tile_map(ctx);
Philipp Zabel70b9b6b2018-09-18 11:34:10 +02001696 calc_tile_resize_coefficients(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001697
1698 dump_format(ctx, s_image);
1699 dump_format(ctx, d_image);
1700
1701 ctx->complete = complete;
1702 ctx->complete_context = complete_context;
1703
1704 /*
1705 * Can we use double-buffering for this operation? If there is
1706 * only one tile (the whole image can be converted in a single
1707 * operation) there's no point in using double-buffering. Also,
1708 * the IPU's IDMAC channels allow only a single U and V plane
1709 * offset shared between both buffers, but these offsets change
1710 * for every tile, and therefore would have to be updated for
1711 * each buffer which is not possible. So double-buffering is
1712 * impossible when either the source or destination images are
1713 * a planar format (YUV420, YUV422P, etc.).
1714 */
1715 ctx->double_buffering = (ctx->num_tiles > 1 &&
1716 !s_image->fmt->planar &&
1717 !d_image->fmt->planar);
1718
1719 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001720 unsigned long intermediate_size = d_image->tile[0].size;
1721 unsigned int i;
1722
1723 for (i = 1; i < ctx->num_tiles; i++) {
1724 if (d_image->tile[i].size > intermediate_size)
1725 intermediate_size = d_image->tile[i].size;
1726 }
1727
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001728 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001729 intermediate_size);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001730 if (ret)
1731 goto out_free;
1732 if (ctx->double_buffering) {
1733 ret = alloc_dma_buf(priv,
1734 &ctx->rot_intermediate[1],
Philipp Zabeldd65d2a2018-09-18 11:34:09 +02001735 intermediate_size);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001736 if (ret)
1737 goto out_free_dmabuf0;
1738 }
1739 }
1740
1741 spin_lock_irqsave(&chan->irqlock, flags);
1742
1743 get_res = list_empty(&chan->ctx_list);
1744
1745 list_add_tail(&ctx->list, &chan->ctx_list);
1746
1747 spin_unlock_irqrestore(&chan->irqlock, flags);
1748
1749 if (get_res) {
1750 ret = get_ipu_resources(chan);
1751 if (ret)
1752 goto out_free_dmabuf1;
1753 }
1754
1755 return ctx;
1756
1757out_free_dmabuf1:
1758 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1759 spin_lock_irqsave(&chan->irqlock, flags);
1760 list_del(&ctx->list);
1761 spin_unlock_irqrestore(&chan->irqlock, flags);
1762out_free_dmabuf0:
1763 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1764out_free:
1765 kfree(ctx);
1766 return ERR_PTR(ret);
1767}
1768EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
1769
1770/*
1771 * Carry out a single image conversion run. Only the physaddr's of the input
1772 * and output image buffers are needed. The conversion context must have
1773 * been created previously with ipu_image_convert_prepare().
1774 */
1775int ipu_image_convert_queue(struct ipu_image_convert_run *run)
1776{
1777 struct ipu_image_convert_chan *chan;
1778 struct ipu_image_convert_priv *priv;
1779 struct ipu_image_convert_ctx *ctx;
1780 unsigned long flags;
1781 int ret = 0;
1782
1783 if (!run || !run->ctx || !run->in_phys || !run->out_phys)
1784 return -EINVAL;
1785
1786 ctx = run->ctx;
1787 chan = ctx->chan;
1788 priv = chan->priv;
1789
1790 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
1791 chan->ic_task, ctx, run);
1792
1793 INIT_LIST_HEAD(&run->list);
1794
1795 spin_lock_irqsave(&chan->irqlock, flags);
1796
1797 if (ctx->aborting) {
1798 ret = -EIO;
1799 goto unlock;
1800 }
1801
1802 list_add_tail(&run->list, &chan->pending_q);
1803
1804 if (!chan->current_run) {
1805 ret = do_run(run);
1806 if (ret)
1807 chan->current_run = NULL;
1808 }
1809unlock:
1810 spin_unlock_irqrestore(&chan->irqlock, flags);
1811 return ret;
1812}
1813EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
1814
1815/* Abort any active or pending conversions for this context */
Steve Longerbeam819bec32018-09-19 16:07:18 -07001816static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001817{
1818 struct ipu_image_convert_chan *chan = ctx->chan;
1819 struct ipu_image_convert_priv *priv = chan->priv;
1820 struct ipu_image_convert_run *run, *active_run, *tmp;
1821 unsigned long flags;
1822 int run_count, ret;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001823
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001824 spin_lock_irqsave(&chan->irqlock, flags);
1825
1826 /* move all remaining pending runs in this context to done_q */
1827 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1828 if (run->ctx != ctx)
1829 continue;
1830 run->status = -EIO;
1831 list_move_tail(&run->list, &chan->done_q);
1832 }
1833
1834 run_count = get_run_count(ctx, &chan->done_q);
1835 active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
1836 chan->current_run : NULL;
1837
Steve Longerbeamaa60b262018-09-19 16:17:15 -07001838 if (active_run)
1839 reinit_completion(&ctx->aborted);
1840
Steve Longerbeam819bec32018-09-19 16:07:18 -07001841 ctx->aborting = true;
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001842
1843 spin_unlock_irqrestore(&chan->irqlock, flags);
1844
Steve Longerbeamb288ada2018-09-19 16:20:43 -07001845 if (!run_count && !active_run) {
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001846 dev_dbg(priv->ipu->dev,
1847 "%s: task %u: no abort needed for ctx %p\n",
1848 __func__, chan->ic_task, ctx);
1849 return;
1850 }
1851
Steve Longerbeam920340a2018-09-19 16:13:03 -07001852 if (!active_run) {
1853 empty_done_q(chan);
1854 return;
1855 }
1856
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001857 dev_dbg(priv->ipu->dev,
Steve Longerbeam920340a2018-09-19 16:13:03 -07001858 "%s: task %u: wait for completion: %d runs\n",
1859 __func__, chan->ic_task, run_count);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001860
1861 ret = wait_for_completion_timeout(&ctx->aborted,
1862 msecs_to_jiffies(10000));
1863 if (ret == 0) {
1864 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
1865 force_abort(ctx);
1866 }
Steve Longerbeam819bec32018-09-19 16:07:18 -07001867}
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001868
Steve Longerbeam819bec32018-09-19 16:07:18 -07001869void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
1870{
1871 __ipu_image_convert_abort(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001872 ctx->aborting = false;
1873}
1874EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
1875
1876/* Unprepare image conversion context */
1877void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
1878{
1879 struct ipu_image_convert_chan *chan = ctx->chan;
1880 struct ipu_image_convert_priv *priv = chan->priv;
1881 unsigned long flags;
1882 bool put_res;
1883
1884 /* make sure no runs are hanging around */
Steve Longerbeam819bec32018-09-19 16:07:18 -07001885 __ipu_image_convert_abort(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001886
1887 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
1888 chan->ic_task, ctx);
1889
1890 spin_lock_irqsave(&chan->irqlock, flags);
1891
1892 list_del(&ctx->list);
1893
1894 put_res = list_empty(&chan->ctx_list);
1895
1896 spin_unlock_irqrestore(&chan->irqlock, flags);
1897
1898 if (put_res)
1899 release_ipu_resources(chan);
1900
1901 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1902 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1903
1904 kfree(ctx);
1905}
1906EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
1907
1908/*
1909 * "Canned" asynchronous single image conversion. Allocates and returns
1910 * a new conversion run. On successful return the caller must free the
1911 * run and call ipu_image_convert_unprepare() after conversion completes.
1912 */
1913struct ipu_image_convert_run *
1914ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1915 struct ipu_image *in, struct ipu_image *out,
1916 enum ipu_rotate_mode rot_mode,
1917 ipu_image_convert_cb_t complete,
1918 void *complete_context)
1919{
1920 struct ipu_image_convert_ctx *ctx;
1921 struct ipu_image_convert_run *run;
1922 int ret;
1923
1924 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
1925 complete, complete_context);
1926 if (IS_ERR(ctx))
Wei Yongjun4ad3e922016-09-21 15:12:24 +00001927 return ERR_CAST(ctx);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001928
1929 run = kzalloc(sizeof(*run), GFP_KERNEL);
1930 if (!run) {
1931 ipu_image_convert_unprepare(ctx);
1932 return ERR_PTR(-ENOMEM);
1933 }
1934
1935 run->ctx = ctx;
1936 run->in_phys = in->phys0;
1937 run->out_phys = out->phys0;
1938
1939 ret = ipu_image_convert_queue(run);
1940 if (ret) {
1941 ipu_image_convert_unprepare(ctx);
1942 kfree(run);
1943 return ERR_PTR(ret);
1944 }
1945
1946 return run;
1947}
1948EXPORT_SYMBOL_GPL(ipu_image_convert);
1949
1950/* "Canned" synchronous single image conversion */
1951static void image_convert_sync_complete(struct ipu_image_convert_run *run,
1952 void *data)
1953{
1954 struct completion *comp = data;
1955
1956 complete(comp);
1957}
1958
1959int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1960 struct ipu_image *in, struct ipu_image *out,
1961 enum ipu_rotate_mode rot_mode)
1962{
1963 struct ipu_image_convert_run *run;
1964 struct completion comp;
1965 int ret;
1966
1967 init_completion(&comp);
1968
1969 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
1970 image_convert_sync_complete, &comp);
1971 if (IS_ERR(run))
1972 return PTR_ERR(run);
1973
1974 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
1975 ret = (ret == 0) ? -ETIMEDOUT : 0;
1976
1977 ipu_image_convert_unprepare(run->ctx);
1978 kfree(run);
1979
1980 return ret;
1981}
1982EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
1983
1984int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
1985{
1986 struct ipu_image_convert_priv *priv;
1987 int i;
1988
1989 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1990 if (!priv)
1991 return -ENOMEM;
1992
1993 ipu->image_convert_priv = priv;
1994 priv->ipu = ipu;
1995
1996 for (i = 0; i < IC_NUM_TASKS; i++) {
1997 struct ipu_image_convert_chan *chan = &priv->chan[i];
1998
1999 chan->ic_task = i;
2000 chan->priv = priv;
2001 chan->dma_ch = &image_convert_dma_chan[i];
2002 chan->out_eof_irq = -1;
2003 chan->rot_out_eof_irq = -1;
2004
2005 spin_lock_init(&chan->irqlock);
2006 INIT_LIST_HEAD(&chan->ctx_list);
2007 INIT_LIST_HEAD(&chan->pending_q);
2008 INIT_LIST_HEAD(&chan->done_q);
2009 }
2010
2011 return 0;
2012}
2013
2014void ipu_image_convert_exit(struct ipu_soc *ipu)
2015{
2016}