summaryrefslogtreecommitdiffstats
path: root/ffmpeg_filter.c
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2016-05-27 12:14:33 +0200
committerwm4 <nfxjfg@googlemail.com>2017-03-03 08:45:43 +0100
commitaf1761f7b5b1b72197dc40934953b775c2d951cc (patch)
tree46941e97c792c8331d288639e4f39231bcb0f017 /ffmpeg_filter.c
parent4ee5aed122ba7d289c1686eca6eba161d5d62304 (diff)
downloadffmpeg-streaming-af1761f7b5b1b72197dc40934953b775c2d951cc.zip
ffmpeg-streaming-af1761f7b5b1b72197dc40934953b775c2d951cc.tar.gz
ffmpeg: init filtergraphs only after we have a frame on each input
This makes sure the actual stream parameters are used, which is important mainly for hardware decoding+filtering cases, which would previously require various weird workarounds to handle the fact that a fake software graph has to be constructed, but never used. This should also improve behaviour in rare cases where avformat_find_stream_info() does not provide accurate information. This merges Libav commit a3a0230. It was previously skipped. The code in flush_encoders() which sets up a "fake" format wasn't in Libav. I'm not sure if it's a good idea, but it tends to give behavior closer to the old one in certain corner cases. The vp8-size-change gives different result, because now the size of the first frame is used. libavformat reported the size of the largest frame for some reason. The exr tests now use the sample aspect ratio of the first frame. For some reason libavformat determines 0/1 as aspect ratio, while the decoder returns the correct one. The ffm and mxf tests change the field_order values. I'm assuming another libavformat/decoding mismatch. Signed-off-by: wm4 <nfxjfg@googlemail.com>
Diffstat (limited to 'ffmpeg_filter.c')
-rw-r--r--ffmpeg_filter.c48
1 files changed, 15 insertions, 33 deletions
diff --git a/ffmpeg_filter.c b/ffmpeg_filter.c
index f13f523..8490f4a 100644
--- a/ffmpeg_filter.c
+++ b/ffmpeg_filter.c
@@ -217,6 +217,10 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
fg->inputs[0]->graph = fg;
fg->inputs[0]->format = -1;
+ fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
+ if (!fg->inputs[0]->frame_queue)
+ exit_program(1);
+
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
@@ -295,6 +299,11 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
fg->inputs[fg->nb_inputs - 1]->ist = ist;
fg->inputs[fg->nb_inputs - 1]->graph = fg;
fg->inputs[fg->nb_inputs - 1]->format = -1;
+ fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
+
+ fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
+ if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
+ exit_program(1);
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
@@ -691,12 +700,15 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
}
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
}
- ist->sub2video.w = ist->resample_width = ifilter->width = w;
- ist->sub2video.h = ist->resample_height = ifilter->height = h;
+ ist->sub2video.w = ifilter->width = w;
+ ist->sub2video.h = ifilter->height = h;
+
+ ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
+ ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
- ist->resample_pix_fmt = ifilter->format = AV_PIX_FMT_RGB32;
+ ifilter->format = AV_PIX_FMT_RGB32;
ist->sub2video.frame = av_frame_alloc();
if (!ist->sub2video.frame)
@@ -1133,36 +1145,6 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
return 0;
}
-int ifilter_parameters_from_decoder(InputFilter *ifilter, const AVCodecContext *avctx)
-{
- av_buffer_unref(&ifilter->hw_frames_ctx);
-
- if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
- ifilter->format = avctx->pix_fmt;
- else
- ifilter->format = avctx->sample_fmt;
-
- ifilter->width = avctx->width;
- ifilter->height = avctx->height;
- if (ifilter->ist && ifilter->ist->st && ifilter->ist->st->sample_aspect_ratio.num)
- ifilter->sample_aspect_ratio = ifilter->ist->st->sample_aspect_ratio;
- else
- ifilter->sample_aspect_ratio = avctx->sample_aspect_ratio;
-
- ifilter->sample_rate = avctx->sample_rate;
- ifilter->channels = avctx->channels;
- ifilter->channel_layout = avctx->channel_layout;
-
- if (ifilter->ist && ifilter->ist->hw_frames_ctx) {
- ifilter->format = ifilter->ist->resample_pix_fmt;
- ifilter->hw_frames_ctx = av_buffer_ref(ifilter->ist->hw_frames_ctx);
- if (!ifilter->hw_frames_ctx)
- return AVERROR(ENOMEM);
- }
-
- return 0;
-}
-
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
{
int i;
OpenPOWER on IntegriCloud