diff options
Diffstat (limited to 'libavfilter/setpts.c')
-rw-r--r-- | libavfilter/setpts.c | 163 |
1 files changed, 100 insertions, 63 deletions
diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c index ff0016d..0db0218 100644 --- a/libavfilter/setpts.c +++ b/libavfilter/setpts.c @@ -2,20 +2,20 @@ * Copyright (c) 2010 Stefano Sabatini * Copyright (c) 2008 Victor Paesa * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -29,24 +29,27 @@ #include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/time.h" - #include "audio.h" #include "avfilter.h" #include "internal.h" #include "video.h" -#include "config.h" - static const char *const var_names[] = { - "E", ///< Euler number + "FRAME_RATE", ///< defined only for constant frame-rate video "INTERLACED", ///< tell if the current frame is interlaced "N", ///< frame / sample number (starting at zero) - "PHI", ///< golden ratio - "PI", ///< greek pi + "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio) + "NB_SAMPLES", ///< number of samples in the current frame (only audio) + "POS", ///< original position in the file of the frame "PREV_INPTS", ///< previous input PTS + "PREV_INT", ///< previous input time in seconds "PREV_OUTPTS", ///< previous output PTS + "PREV_OUTT", ///< previous output time in seconds "PTS", ///< original pts in the file of the frame + "SAMPLE_RATE", ///< sample rate (only audio) "STARTPTS", ///< PTS at start of movie + "STARTT", ///< time at start of movie + "T", ///< original time in the file of the frame "TB", ///< timebase "RTCTIME", ///< wallclock (RTC) time in micro seconds "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds @@ -56,15 +59,21 @@ static const char *const var_names[] = { }; enum var_name { - VAR_E, + VAR_FRAME_RATE, VAR_INTERLACED, VAR_N, - VAR_PHI, - VAR_PI, + VAR_NB_CONSUMED_SAMPLES, + VAR_NB_SAMPLES, + VAR_POS, VAR_PREV_INPTS, + VAR_PREV_INT, VAR_PREV_OUTPTS, + VAR_PREV_OUTT, VAR_PTS, + VAR_SAMPLE_RATE, VAR_STARTPTS, + VAR_STARTT, + VAR_T, VAR_TB, VAR_RTCTIME, VAR_RTCSTART, @@ -78,6 +87,7 @@ typedef struct SetPTSContext { char *expr_str; AVExpr *expr; double var_values[VAR_VARS_NB]; + enum AVMediaType type; } SetPTSContext; static av_cold int init(AVFilterContext *ctx) @@ -91,34 +101,54 @@ static av_cold int init(AVFilterContext *ctx) return ret; } - setpts->var_values[VAR_E] = M_E; setpts->var_values[VAR_N] = 0.0; setpts->var_values[VAR_S] = 0.0; - setpts->var_values[VAR_PHI] = M_PHI; - setpts->var_values[VAR_PI] = M_PI; setpts->var_values[VAR_PREV_INPTS] = NAN; + setpts->var_values[VAR_PREV_INT] = NAN; setpts->var_values[VAR_PREV_OUTPTS] = NAN; + setpts->var_values[VAR_PREV_OUTT] = NAN; setpts->var_values[VAR_STARTPTS] = NAN; + setpts->var_values[VAR_STARTT] = NAN; return 0; } static int config_input(AVFilterLink *inlink) { - SetPTSContext *setpts = inlink->dst->priv; + AVFilterContext *ctx = inlink->dst; + SetPTSContext *setpts = ctx->priv; + setpts->type = inlink->type; setpts->var_values[VAR_TB] = av_q2d(inlink->time_base); setpts->var_values[VAR_RTCSTART] = av_gettime(); - if (inlink->type == AVMEDIA_TYPE_AUDIO) { - setpts->var_values[VAR_SR] = inlink->sample_rate; - } + setpts->var_values[VAR_SR] = + setpts->var_values[VAR_SAMPLE_RATE] = + setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN; + + setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ? + av_q2d(inlink->frame_rate) : NAN; - av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f\n", setpts->var_values[VAR_TB]); + av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n", + setpts->var_values[VAR_TB], + setpts->var_values[VAR_FRAME_RATE], + setpts->var_values[VAR_SAMPLE_RATE]); return 0; } #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) +#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb)) + +#define BUF_SIZE 64 + +static inline char *double2int64str(char *buf, double v) +{ + if (isnan(v)) snprintf(buf, BUF_SIZE, "nan"); + else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v); + return buf; +} + +#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { @@ -126,30 +156,43 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) int64_t in_pts = frame->pts; double d; - if (isnan(setpts->var_values[VAR_STARTPTS])) + if (isnan(setpts->var_values[VAR_STARTPTS])) { setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); - + setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base); + } setpts->var_values[VAR_PTS ] = TS2D(frame->pts); + setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); + setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); setpts->var_values[VAR_RTCTIME ] = av_gettime(); if (inlink->type == AVMEDIA_TYPE_VIDEO) { setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; - } else { + } else if (inlink->type == AVMEDIA_TYPE_AUDIO) { setpts->var_values[VAR_S] = frame->nb_samples; + setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; } d = av_expr_eval(setpts->expr, setpts->var_values, NULL); frame->pts = D2TS(d); -#ifdef DEBUG av_log(inlink->dst, AV_LOG_DEBUG, - "n:%"PRId64" interlaced:%d pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", + "N:%"PRId64" PTS:%s T:%f POS:%s", (int64_t)setpts->var_values[VAR_N], - (int)setpts->var_values[VAR_INTERLACED], - in_pts, in_pts * av_q2d(inlink->time_base), - frame->pts, frame->pts * av_q2d(inlink->time_base)); -#endif - + d2istr(setpts->var_values[VAR_PTS]), + setpts->var_values[VAR_T], + d2istr(setpts->var_values[VAR_POS])); + switch (inlink->type) { + case AVMEDIA_TYPE_VIDEO: + av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64, + (int64_t)setpts->var_values[VAR_INTERLACED]); + break; + case AVMEDIA_TYPE_AUDIO: + av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64, + (int64_t)setpts->var_values[VAR_NB_SAMPLES], + (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]); + break; + } + av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base)); if (inlink->type == AVMEDIA_TYPE_VIDEO) { setpts->var_values[VAR_N] += 1.0; @@ -158,7 +201,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) } setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); + setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); + setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); + if (setpts->type == AVMEDIA_TYPE_AUDIO) { + setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples; + } return ff_filter_frame(inlink->dst->outputs[0], frame); } @@ -170,27 +218,22 @@ static av_cold void uninit(AVFilterContext *ctx) } #define OFFSET(x) offsetof(SetPTSContext, x) -#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM static const AVOption options[] = { { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS }, - { NULL }, + { NULL } }; #if CONFIG_SETPTS_FILTER -static const AVClass setpts_class = { - .class_name = "setpts", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; +#define setpts_options options +AVFILTER_DEFINE_CLASS(setpts); static const AVFilterPad avfilter_vf_setpts_inputs[] = { { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .get_video_buffer = ff_null_get_video_buffer, - .config_props = config_input, - .filter_frame = filter_frame, + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .filter_frame = filter_frame, }, { NULL } }; @@ -215,23 +258,19 @@ AVFilter ff_vf_setpts = { .inputs = avfilter_vf_setpts_inputs, .outputs = avfilter_vf_setpts_outputs, }; -#endif +#endif /* CONFIG_SETPTS_FILTER */ #if CONFIG_ASETPTS_FILTER -static const AVClass asetpts_class = { - .class_name = "asetpts", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; + +#define asetpts_options options +AVFILTER_DEFINE_CLASS(asetpts); static const AVFilterPad asetpts_inputs[] = { { - .name = "default", - .type = AVMEDIA_TYPE_AUDIO, - .get_audio_buffer = ff_null_get_audio_buffer, - .config_props = config_input, - .filter_frame = filter_frame, + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_input, + .filter_frame = filter_frame, }, { NULL } }; @@ -249,11 +288,9 @@ AVFilter ff_af_asetpts = { .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."), .init = init, .uninit = uninit, - - .priv_size = sizeof(SetPTSContext), - .priv_class = &asetpts_class, - - .inputs = asetpts_inputs, - .outputs = asetpts_outputs, + .priv_size = sizeof(SetPTSContext), + .priv_class = &asetpts_class, + .inputs = asetpts_inputs, + .outputs = asetpts_outputs, }; -#endif +#endif /* CONFIG_ASETPTS_FILTER */ |