summaryrefslogtreecommitdiffstats
path: root/doc/examples
diff options
context:
space:
mode:
authorClément Bœsch <u@pkh.me>2017-04-03 21:07:59 +0200
committerClément Bœsch <u@pkh.me>2017-04-03 21:07:59 +0200
commit4069394fd4027a4052c6cf247870aac51b67fda7 (patch)
treec99eb02e6428039cd587dc052e61710b72b2d500 /doc/examples
parentd1c2c210bfeeda209e11efcc980b7c9781000c40 (diff)
parent1dd2b6c91ca5f26207805720d4f5564de60b241b (diff)
downloadffmpeg-streaming-4069394fd4027a4052c6cf247870aac51b67fda7.zip
ffmpeg-streaming-4069394fd4027a4052c6cf247870aac51b67fda7.tar.gz
Merge commit '1dd2b6c91ca5f26207805720d4f5564de60b241b'
* commit '1dd2b6c91ca5f26207805720d4f5564de60b241b': examples/qsvdec: switch to the hwcontext API Merged-by: Clément Bœsch <u@pkh.me>
Diffstat (limited to 'doc/examples')
-rw-r--r--doc/examples/qsvdec.c317
1 files changed, 51 insertions, 266 deletions
diff --git a/doc/examples/qsvdec.c b/doc/examples/qsvdec.c
index aaecd81..141c581 100644
--- a/doc/examples/qsvdec.c
+++ b/doc/examples/qsvdec.c
@@ -26,185 +26,55 @@
*
* @example qsvdec.c
* This example shows how to do QSV-accelerated H.264 decoding with output
- * frames in the VA-API video surfaces.
+ * frames in the GPU video surfaces.
*/
#include "config.h"
#include <stdio.h>
-#include <mfx/mfxvideo.h>
-
-#include <va/va.h>
-#include <va/va_x11.h>
-#include <X11/Xlib.h>
-
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
-#include "libavcodec/qsv.h"
+#include "libavutil/buffer.h"
#include "libavutil/error.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_qsv.h"
#include "libavutil/mem.h"
typedef struct DecodeContext {
- mfxSession mfx_session;
- VADisplay va_dpy;
-
- VASurfaceID *surfaces;
- mfxMemId *surface_ids;
- int *surface_used;
- int nb_surfaces;
-
- mfxFrameInfo frame_info;
+ AVBufferRef *hw_device_ref;
} DecodeContext;
-static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
- mfxFrameAllocResponse *resp)
-{
- DecodeContext *decode = pthis;
- int err, i;
-
- if (decode->surfaces) {
- fprintf(stderr, "Multiple allocation requests.\n");
- return MFX_ERR_MEMORY_ALLOC;
- }
- if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
- fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
- return MFX_ERR_UNSUPPORTED;
- }
- if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
- req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
- req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
- fprintf(stderr, "Unsupported surface properties.\n");
- return MFX_ERR_UNSUPPORTED;
- }
-
- decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
- decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
- decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
- if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
- goto fail;
-
- err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
- req->Info.Width, req->Info.Height,
- decode->surfaces, req->NumFrameSuggested,
- NULL, 0);
- if (err != VA_STATUS_SUCCESS) {
- fprintf(stderr, "Error allocating VA surfaces\n");
- goto fail;
- }
- decode->nb_surfaces = req->NumFrameSuggested;
-
- for (i = 0; i < decode->nb_surfaces; i++)
- decode->surface_ids[i] = &decode->surfaces[i];
-
- resp->mids = decode->surface_ids;
- resp->NumFrameActual = decode->nb_surfaces;
-
- decode->frame_info = req->Info;
-
- return MFX_ERR_NONE;
-fail:
- av_freep(&decode->surfaces);
- av_freep(&decode->surface_ids);
- av_freep(&decode->surface_used);
-
- return MFX_ERR_MEMORY_ALLOC;
-}
-
-static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
-{
- return MFX_ERR_NONE;
-}
-
-static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
-{
- return MFX_ERR_UNSUPPORTED;
-}
-
-static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
-{
- return MFX_ERR_UNSUPPORTED;
-}
-
-static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
-{
- *hdl = mid;
- return MFX_ERR_NONE;
-}
-
-static void free_surfaces(DecodeContext *decode)
-{
- if (decode->surfaces)
- vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
- av_freep(&decode->surfaces);
- av_freep(&decode->surface_ids);
- av_freep(&decode->surface_used);
- decode->nb_surfaces = 0;
-}
-
-static void free_buffer(void *opaque, uint8_t *data)
-{
- int *used = opaque;
- *used = 0;
- av_freep(&data);
-}
-
-static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
-{
- DecodeContext *decode = avctx->opaque;
-
- mfxFrameSurface1 *surf;
- AVBufferRef *surf_buf;
- int idx;
-
- for (idx = 0; idx < decode->nb_surfaces; idx++) {
- if (!decode->surface_used[idx])
- break;
- }
- if (idx == decode->nb_surfaces) {
- fprintf(stderr, "No free surfaces\n");
- return AVERROR(ENOMEM);
- }
-
- surf = av_mallocz(sizeof(*surf));
- if (!surf)
- return AVERROR(ENOMEM);
- surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
- &decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
- if (!surf_buf) {
- av_freep(&surf);
- return AVERROR(ENOMEM);
- }
-
- surf->Info = decode->frame_info;
- surf->Data.MemId = &decode->surfaces[idx];
-
- frame->buf[0] = surf_buf;
- frame->data[3] = (uint8_t*)surf;
-
- decode->surface_used[idx] = 1;
-
- return 0;
-}
-
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
{
while (*pix_fmts != AV_PIX_FMT_NONE) {
if (*pix_fmts == AV_PIX_FMT_QSV) {
- if (!avctx->hwaccel_context) {
- DecodeContext *decode = avctx->opaque;
- AVQSVContext *qsv = av_qsv_alloc_context();
- if (!qsv)
- return AV_PIX_FMT_NONE;
-
- qsv->session = decode->mfx_session;
- qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
-
- avctx->hwaccel_context = qsv;
- }
+ DecodeContext *decode = avctx->opaque;
+ AVHWFramesContext *frames_ctx;
+ AVQSVFramesContext *frames_hwctx;
+ int ret;
+
+ /* create a pool of surfaces to be used by the decoder */
+ avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
+ if (!avctx->hw_frames_ctx)
+ return AV_PIX_FMT_NONE;
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ frames_hwctx = frames_ctx->hwctx;
+
+ frames_ctx->format = AV_PIX_FMT_QSV;
+ frames_ctx->sw_format = avctx->sw_pix_fmt;
+ frames_ctx->width = FFALIGN(avctx->coded_width, 32);
+ frames_ctx->height = FFALIGN(avctx->coded_height, 32);
+ frames_ctx->initial_pool_size = 32;
+
+ frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
+
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+ if (ret < 0)
+ return AV_PIX_FMT_NONE;
return AV_PIX_FMT_QSV;
}
@@ -218,8 +88,8 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
}
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
- AVFrame *frame, AVPacket *pkt,
- AVIOContext *output_ctx)
+ AVFrame *frame, AVFrame *sw_frame,
+ AVPacket *pkt, AVIOContext *output_ctx)
{
int ret = 0;
int got_frame = 1;
@@ -238,61 +108,20 @@ static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
* We just retrieve the raw data and write it to a file, which is rather
* useless but pedagogic. */
if (got_frame) {
- mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
- VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
-
- VAImageFormat img_fmt = {
- .fourcc = VA_FOURCC_NV12,
- .byte_order = VA_LSB_FIRST,
- .bits_per_pixel = 8,
- .depth = 8,
- };
-
- VAImage img;
-
- VAStatus err;
- uint8_t *data;
int i, j;
- img.buf = VA_INVALID_ID;
- img.image_id = VA_INVALID_ID;
-
- err = vaCreateImage(decode->va_dpy, &img_fmt,
- frame->width, frame->height, &img);
- if (err != VA_STATUS_SUCCESS) {
- fprintf(stderr, "Error creating an image: %s\n",
- vaErrorStr(err));
- ret = AVERROR_UNKNOWN;
+ ret = av_hwframe_transfer_data(sw_frame, frame, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Error transferring the data to system memory\n");
goto fail;
}
- err = vaGetImage(decode->va_dpy, surface, 0, 0,
- frame->width, frame->height,
- img.image_id);
- if (err != VA_STATUS_SUCCESS) {
- fprintf(stderr, "Error getting an image: %s\n",
- vaErrorStr(err));
- ret = AVERROR_UNKNOWN;
- goto fail;
- }
-
- err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
- if (err != VA_STATUS_SUCCESS) {
- fprintf(stderr, "Error mapping the image buffer: %s\n",
- vaErrorStr(err));
- ret = AVERROR_UNKNOWN;
- goto fail;
- }
-
- for (i = 0; i < img.num_planes; i++)
- for (j = 0; j < (img.height >> (i > 0)); j++)
- avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
+ for (i = 0; i < FF_ARRAY_ELEMS(sw_frame->data) && sw_frame->data[i]; i++)
+ for (j = 0; j < (sw_frame->height >> (i > 0)); j++)
+ avio_write(output_ctx, sw_frame->data[i] + j * sw_frame->linesize[i], sw_frame->width);
fail:
- if (img.buf != VA_INVALID_ID)
- vaUnmapBuffer(decode->va_dpy, img.buf);
- if (img.image_id != VA_INVALID_ID)
- vaDestroyImage(decode->va_dpy, img.image_id);
+ av_frame_unref(sw_frame);
av_frame_unref(frame);
if (ret < 0)
@@ -311,28 +140,13 @@ int main(int argc, char **argv)
const AVCodec *decoder;
AVPacket pkt = { 0 };
- AVFrame *frame = NULL;
+ AVFrame *frame = NULL, *sw_frame = NULL;
DecodeContext decode = { NULL };
- Display *dpy = NULL;
- int va_ver_major, va_ver_minor;
-
- mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
- mfxVersion mfx_ver = { { 1, 1 } };
-
- mfxFrameAllocator frame_allocator = {
- .pthis = &decode,
- .Alloc = frame_alloc,
- .Lock = frame_lock,
- .Unlock = frame_unlock,
- .GetHDL = frame_get_hdl,
- .Free = frame_free,
- };
-
AVIOContext *output_ctx = NULL;
- int ret, i, err;
+ int ret, i;
av_register_all();
@@ -362,35 +176,14 @@ int main(int argc, char **argv)
goto finish;
}
- /* initialize VA-API */
- dpy = XOpenDisplay(NULL);
- if (!dpy) {
- fprintf(stderr, "Cannot open the X display\n");
- goto finish;
- }
- decode.va_dpy = vaGetDisplay(dpy);
- if (!decode.va_dpy) {
- fprintf(stderr, "Cannot open the VA display\n");
- goto finish;
- }
-
- err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
- if (err != VA_STATUS_SUCCESS) {
- fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
- goto finish;
- }
- fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
-
- /* initialize an MFX session */
- err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
- if (err != MFX_ERR_NONE) {
- fprintf(stderr, "Error initializing an MFX session\n");
+ /* open the hardware device */
+ ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
+ "auto", NULL, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Cannot open the hardware device\n");
goto finish;
}
- MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
- MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
-
/* initialize the decoder */
decoder = avcodec_find_decoder_by_name("h264_qsv");
if (!decoder) {
@@ -418,7 +211,6 @@ int main(int argc, char **argv)
decoder_ctx->refcounted_frames = 1;
decoder_ctx->opaque = &decode;
- decoder_ctx->get_buffer2 = get_buffer;
decoder_ctx->get_format = get_format;
ret = avcodec_open2(decoder_ctx, NULL, NULL);
@@ -434,8 +226,9 @@ int main(int argc, char **argv)
goto finish;
}
- frame = av_frame_alloc();
- if (!frame) {
+ frame = av_frame_alloc();
+ sw_frame = av_frame_alloc();
+ if (!frame || !sw_frame) {
ret = AVERROR(ENOMEM);
goto finish;
}
@@ -447,7 +240,7 @@ int main(int argc, char **argv)
break;
if (pkt.stream_index == video_st->index)
- ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
+ ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
av_packet_unref(&pkt);
}
@@ -455,7 +248,7 @@ int main(int argc, char **argv)
/* flush the decoder */
pkt.data = NULL;
pkt.size = 0;
- ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
+ ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
finish:
if (ret < 0) {
@@ -467,19 +260,11 @@ finish:
avformat_close_input(&input_ctx);
av_frame_free(&frame);
+ av_frame_free(&sw_frame);
- if (decoder_ctx)
- av_freep(&decoder_ctx->hwaccel_context);
avcodec_free_context(&decoder_ctx);
- free_surfaces(&decode);
-
- if (decode.mfx_session)
- MFXClose(decode.mfx_session);
- if (decode.va_dpy)
- vaTerminate(decode.va_dpy);
- if (dpy)
- XCloseDisplay(dpy);
+ av_buffer_unref(&decode.hw_device_ref);
avio_close(output_ctx);
OpenPOWER on IntegriCloud