diff options
author | Zhong Li <zhong.li@intel.com> | 2018-05-21 17:58:52 +0800 |
---|---|---|
committer | Zhong Li <zhong.li@intel.com> | 2018-10-11 13:26:59 +0800 |
commit | a5e1cb9e96bca091ed7103d8be72a99e7dc31582 (patch) | |
tree | 609539b41ae8560ac589236e9fb37ed0d1c5db4f /libavutil | |
parent | 7a6d88ee6269666e5676a37a75bde231a6508e28 (diff) | |
download | ffmpeg-streaming-a5e1cb9e96bca091ed7103d8be72a99e7dc31582.zip ffmpeg-streaming-a5e1cb9e96bca091ed7103d8be72a99e7dc31582.tar.gz |
lavu/hwcontext_qsv: Add support for AV_PIX_FMT_BGRA.
RGB32(AV_PIX_FMT_BGRA on intel platforms) format may be used as overlay with alpha blending.
So add AV_PIX_FMT_BGRA format support.
One example of alpha blending overlay: ffmpeg -hwaccel qsv -c:v h264_qsv -i BA1_Sony_D.jsv
-filter_complex 'movie=lena-rgba.png,hwupload=extra_hw_frames=16[a];[0:v][a]overlay_qsv=x=10:y=10'
-c:v h264_qsv -y out.mp4
Rename RGB32 to be BGRA to make it clearer as Mark Thompson's suggestion.
V2: Add P010 format support else will introduce HEVC 10bit encoding regression.
Thanks for LinJie's discovery.
Signed-off-by: Zhong Li <zhong.li@intel.com>
Verified-by: Fu, Linjie <linjie.fu@intel.com>
Diffstat (limited to 'libavutil')
-rw-r--r-- | libavutil/hwcontext_qsv.c | 44 |
1 files changed, 34 insertions, 10 deletions
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c index a581d2a..33e121b 100644 --- a/libavutil/hwcontext_qsv.c +++ b/libavutil/hwcontext_qsv.c @@ -100,6 +100,7 @@ static const struct { uint32_t fourcc; } supported_pixel_formats[] = { { AV_PIX_FMT_NV12, MFX_FOURCC_NV12 }, + { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 }, { AV_PIX_FMT_P010, MFX_FOURCC_P010 }, { AV_PIX_FMT_PAL8, MFX_FOURCC_P8 }, }; @@ -751,6 +752,37 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst, return ret; } +static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface) +{ + switch (frame->format) { + case AV_PIX_FMT_NV12: + case AV_PIX_FMT_P010: + surface->Data.Y = frame->data[0]; + surface->Data.UV = frame->data[1]; + break; + + case AV_PIX_FMT_YUV420P: + surface->Data.Y = frame->data[0]; + surface->Data.U = frame->data[1]; + surface->Data.V = frame->data[2]; + break; + + case AV_PIX_FMT_BGRA: + surface->Data.B = frame->data[0]; + surface->Data.G = frame->data[0] + 1; + surface->Data.R = frame->data[0] + 2; + surface->Data.A = frame->data[0] + 3; + break; + + default: + return MFX_ERR_UNSUPPORTED; + } + surface->Data.Pitch = frame->linesize[0]; + surface->Data.TimeStamp = frame->pts; + + return 0; +} + static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src) { @@ -796,11 +828,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst, } out.Info = in->Info; - out.Data.PitchLow = dst->linesize[0]; - out.Data.Y = dst->data[0]; - out.Data.U = dst->data[1]; - out.Data.V = dst->data[2]; - out.Data.A = dst->data[3]; + map_frame_to_surface(dst, &out); do { err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync); @@ -868,11 +896,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst, } in.Info = out->Info; - in.Data.PitchLow = src->linesize[0]; - in.Data.Y = src->data[0]; - in.Data.U = src->data[1]; - in.Data.V = src->data[2]; - in.Data.A = src->data[3]; + map_frame_to_surface(src, &in); do { err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync); |