From 3594788b713e76449eda0bc9d64b38258c86a594 Mon Sep 17 00:00:00 2001 From: Paul B Mahol Date: Mon, 26 Jun 2017 11:29:02 +0200 Subject: avcodec/utvideodec: decode to GBR(A)P This is actually internal utvideo format. Allows to make use of SIMD for median prediction for rgb(a) formats, thus speeding up decoding. Simplifies code, eases further developement and maintenance. Update FATE because of pixel format switch. Signed-off-by: Paul B Mahol --- libavcodec/utvideo.h | 3 - libavcodec/utvideodec.c | 283 +++++------------------------------------------- 2 files changed, 29 insertions(+), 257 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/utvideo.h b/libavcodec/utvideo.h index 1d70865..9559c83 100644 --- a/libavcodec/utvideo.h +++ b/libavcodec/utvideo.h @@ -63,9 +63,6 @@ enum { /* Mapping of libavcodec prediction modes to Ut Video's */ extern const int ff_ut_pred_order[5]; -/* Order of RGB(A) planes in Ut Video */ -extern const int ff_ut_rgb_order[4]; - typedef struct UtvideoContext { const AVClass *class; AVCodecContext *avctx; diff --git a/libavcodec/utvideodec.c b/libavcodec/utvideodec.c index 7979618..0c6f89e 100644 --- a/libavcodec/utvideodec.c +++ b/libavcodec/utvideodec.c @@ -333,21 +333,25 @@ fail: return AVERROR_INVALIDDATA; } -static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride, - int width, int height) +static void restore_rgb_planes(AVFrame *frame, int width, int height) { - int i, j; + uint8_t *src_r = (uint8_t *)frame->data[2]; + uint8_t *src_g = (uint8_t *)frame->data[0]; + uint8_t *src_b = (uint8_t *)frame->data[1]; uint8_t r, g, b; + int i, j; for (j = 0; j < height; j++) { - for (i = 0; i < width * step; i += step) { - r = src[i]; - g = src[i + 1]; - b = src[i + 2]; - src[i] = r + g - 0x80; - src[i + 2] = b + g - 0x80; + for (i = 0; i < width; i++) { + r = src_r[i]; + g = src_g[i]; + b = src_b[i]; + src_r[i] = r + g - 0x80; + src_b[i] = b + g - 0x80; } - src += stride; + src_r += frame->linesize[2]; + src_g += frame->linesize[0]; + src_b += frame->linesize[1]; } } @@ -476,132 +480,6 @@ static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t } } -static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride, - int width, int height, int slices, int rmode) -{ - int i, j, slice; - int A, B, C; - uint8_t *bsrc; - int slice_start, slice_height; - const int cmask = ~rmode; - - for (slice = 0; slice < slices; slice++) { - slice_start = ((slice * height) / slices) & cmask; - slice_height = ((((slice + 1) * height) / slices) & cmask) - - slice_start; - - if (!slice_height) - continue; - bsrc = src + slice_start * stride; - - // first line - left neighbour prediction - bsrc[0] += 0x80; - A = bsrc[0]; - for (i = step; i < width * step; i += step) { - bsrc[i] += A; - A = bsrc[i]; - } - bsrc += stride; - if (slice_height <= 1) - continue; - // second line - first element has top prediction, the rest uses median - C = bsrc[-stride]; - bsrc[0] += C; - A = bsrc[0]; - for (i = step; i < width * step; i += step) { - B = bsrc[i - stride]; - bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); - C = B; - A = bsrc[i]; - } - bsrc += stride; - // the rest of lines use continuous median prediction - for (j = 2; j < slice_height; j++) { - for (i = 0; i < width * step; i += step) { - B = bsrc[i - stride]; - bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); - C = B; - A = bsrc[i]; - } - bsrc += stride; - } - } -} - -/* UtVideo interlaced mode treats every two lines as a single one, - * so restoring function should take care of possible padding between - * two parts of the same "line". - */ -static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride, - int width, int height, int slices, int rmode) -{ - int i, j, slice; - int A, B, C; - uint8_t *bsrc; - int slice_start, slice_height; - const int cmask = ~(rmode ? 3 : 1); - const ptrdiff_t stride2 = stride << 1; - - for (slice = 0; slice < slices; slice++) { - slice_start = ((slice * height) / slices) & cmask; - slice_height = ((((slice + 1) * height) / slices) & cmask) - - slice_start; - slice_height >>= 1; - if (!slice_height) - continue; - - bsrc = src + slice_start * stride; - - // first line - left neighbour prediction - bsrc[0] += 0x80; - A = bsrc[0]; - for (i = step; i < width * step; i += step) { - bsrc[i] += A; - A = bsrc[i]; - } - for (i = 0; i < width * step; i += step) { - bsrc[stride + i] += A; - A = bsrc[stride + i]; - } - bsrc += stride2; - if (slice_height <= 1) - continue; - // second line - first element has top prediction, the rest uses median - C = bsrc[-stride2]; - bsrc[0] += C; - A = bsrc[0]; - for (i = step; i < width * step; i += step) { - B = bsrc[i - stride2]; - bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); - C = B; - A = bsrc[i]; - } - for (i = 0; i < width * step; i += step) { - B = bsrc[i - stride]; - bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C)); - C = B; - A = bsrc[stride + i]; - } - bsrc += stride2; - // the rest of lines use continuous median prediction - for (j = 2; j < slice_height; j++) { - for (i = 0; i < width * step; i += step) { - B = bsrc[i - stride2]; - bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); - C = B; - A = bsrc[i]; - } - for (i = 0; i < width * step; i += step) { - B = bsrc[i - stride]; - bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C)); - C = B; - A = bsrc[i + stride]; - } - bsrc += stride2; - } - } -} - static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode) { @@ -691,108 +569,6 @@ static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_ } } -static void restore_gradient_packed(uint8_t *src, int step, ptrdiff_t stride, - int width, int height, int slices, int rmode) -{ - int i, j, slice; - int A, B, C; - uint8_t *bsrc; - int slice_start, slice_height; - const int cmask = ~rmode; - - for (slice = 0; slice < slices; slice++) { - slice_start = ((slice * height) / slices) & cmask; - slice_height = ((((slice + 1) * height) / slices) & cmask) - - slice_start; - - if (!slice_height) - continue; - bsrc = src + slice_start * stride; - - // first line - left neighbour prediction - bsrc[0] += 0x80; - A = bsrc[0]; - for (i = step; i < width * step; i += step) { - bsrc[i] += A; - A = bsrc[i]; - } - bsrc += stride; - if (slice_height <= 1) - continue; - for (j = 1; j < slice_height; j++) { - // second line - first element has top prediction, the rest uses gradient - C = bsrc[-stride]; - bsrc[0] += C; - for (i = step; i < width * step; i += step) { - A = bsrc[i - stride]; - B = bsrc[i - (stride + step)]; - C = bsrc[i - step]; - bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; - } - bsrc += stride; - } - } -} - -static void restore_gradient_packed_il(uint8_t *src, int step, ptrdiff_t stride, - int width, int height, int slices, int rmode) -{ - int i, j, slice; - int A, B, C; - uint8_t *bsrc; - int slice_start, slice_height; - const int cmask = ~(rmode ? 3 : 1); - const ptrdiff_t stride2 = stride << 1; - - for (slice = 0; slice < slices; slice++) { - slice_start = ((slice * height) / slices) & cmask; - slice_height = ((((slice + 1) * height) / slices) & cmask) - - slice_start; - slice_height >>= 1; - if (!slice_height) - continue; - - bsrc = src + slice_start * stride; - - // first line - left neighbour prediction - bsrc[0] += 0x80; - A = bsrc[0]; - for (i = step; i < width * step; i += step) { - bsrc[i] += A; - A = bsrc[i]; - } - for (i = 0; i < width * step; i += step) { - bsrc[stride + i] += A; - A = bsrc[stride + i]; - } - bsrc += stride2; - if (slice_height <= 1) - continue; - for (j = 1; j < slice_height; j++) { - // second line - first element has top prediction, the rest uses gradient - C = bsrc[-stride2]; - bsrc[0] += C; - for (i = step; i < width * step; i += step) { - A = bsrc[i - stride2]; - B = bsrc[i - (stride2 + step)]; - C = bsrc[i - step]; - bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; - } - A = bsrc[-stride]; - B = bsrc[-(step + stride + stride - width * step)]; - C = bsrc[width * step - step]; - bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF; - for (i = step; i < width * step; i += step) { - A = bsrc[i - stride]; - B = bsrc[i - (step + stride)]; - C = bsrc[i - step + stride]; - bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF; - } - bsrc += stride2; - } - } -} - static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { @@ -887,41 +663,40 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, } switch (c->avctx->pix_fmt) { - case AV_PIX_FMT_RGB24: - case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_GBRP: + case AV_PIX_FMT_GBRAP: for (i = 0; i < c->planes; i++) { - ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i], - c->planes, frame.f->linesize[0], avctx->width, + ret = decode_plane(c, i, frame.f->data[i], 1, + frame.f->linesize[i], avctx->width, avctx->height, plane_start[i], c->frame_pred == PRED_LEFT); if (ret) return ret; if (c->frame_pred == PRED_MEDIAN) { if (!c->interlaced) { - restore_median_packed(frame.f->data[0] + ff_ut_rgb_order[i], - c->planes, frame.f->linesize[0], avctx->width, + restore_median_planar(c, frame.f->data[i], + frame.f->linesize[i], avctx->width, avctx->height, c->slices, 0); } else { - restore_median_packed_il(frame.f->data[0] + ff_ut_rgb_order[i], - c->planes, frame.f->linesize[0], + restore_median_planar_il(c, frame.f->data[i], + frame.f->linesize[i], avctx->width, avctx->height, c->slices, 0); } } else if (c->frame_pred == PRED_GRADIENT) { if (!c->interlaced) { - restore_gradient_packed(frame.f->data[0] + ff_ut_rgb_order[i], - c->planes, frame.f->linesize[0], avctx->width, + restore_gradient_planar(c, frame.f->data[i], + frame.f->linesize[i], avctx->width, avctx->height, c->slices, 0); } else { - restore_gradient_packed_il(frame.f->data[0] + ff_ut_rgb_order[i], - c->planes, frame.f->linesize[0], + restore_gradient_planar_il(c, frame.f->data[i], + frame.f->linesize[i], avctx->width, avctx->height, c->slices, 0); } } } - restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0], - avctx->width, avctx->height); + restore_rgb_planes(frame.f, avctx->width, avctx->height); break; case AV_PIX_FMT_GBRAP10: case AV_PIX_FMT_GBRP10: @@ -1094,11 +869,11 @@ static av_cold int decode_init(AVCodecContext *avctx) switch (avctx->codec_tag) { case MKTAG('U', 'L', 'R', 'G'): c->planes = 3; - avctx->pix_fmt = AV_PIX_FMT_RGB24; + avctx->pix_fmt = AV_PIX_FMT_GBRP; break; case MKTAG('U', 'L', 'R', 'A'): c->planes = 4; - avctx->pix_fmt = AV_PIX_FMT_RGBA; + avctx->pix_fmt = AV_PIX_FMT_GBRAP; break; case MKTAG('U', 'L', 'Y', '0'): c->planes = 3; -- cgit v1.1