diff options
Diffstat (limited to 'libavfilter')
203 files changed, 44855 insertions, 3374 deletions
diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 4a3331a..938b183 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -1,11 +1,28 @@ +include $(SUBDIR)../config.mak + NAME = avfilter FFLIBS = avutil -FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample -FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec -FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample -FFLIBS-$(CONFIG_SCALE_FILTER) += swscale +FFLIBS-$(CONFIG_ACONVERT_FILTER) += swresample +FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec +FFLIBS-$(CONFIG_ARESAMPLE_FILTER) += swresample +FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample +FFLIBS-$(CONFIG_ATEMPO_FILTER) += avcodec +FFLIBS-$(CONFIG_DECIMATE_FILTER) += avcodec +FFLIBS-$(CONFIG_DESHAKE_FILTER) += avcodec +FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec +FFLIBS-$(CONFIG_MP_FILTER) += avcodec +FFLIBS-$(CONFIG_PAN_FILTER) += swresample +FFLIBS-$(CONFIG_PP_FILTER) += postproc +FFLIBS-$(CONFIG_REMOVELOGO_FILTER) += avformat avcodec swscale +FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample +FFLIBS-$(CONFIG_SCALE_FILTER) += swscale +FFLIBS-$(CONFIG_SHOWSPECTRUM_FILTER) += avcodec +FFLIBS-$(CONFIG_SMARTBLUR_FILTER) += swscale +FFLIBS-$(CONFIG_SUBTITLES_FILTER) += avformat avcodec -HEADERS = avfilter.h \ +HEADERS = asrc_abuffer.h \ + avcodec.h \ + avfilter.h \ avfiltergraph.h \ buffersink.h \ buffersrc.h \ @@ -21,44 +38,98 @@ OBJS = allfilters.o \ drawutils.o \ fifo.o \ formats.o \ + graphdump.o \ graphparser.o \ + sink_buffer.o \ + src_buffer.o \ + transform.o \ video.o \ + +OBJS-$(CONFIG_AVCODEC) += avcodec.o +OBJS-$(CONFIG_AVFORMAT) += lavfutils.o +OBJS-$(CONFIG_SWSCALE) += lswsutils.o + +OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o +OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o +OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o +OBJS-$(CONFIG_APAD_FILTER) += af_apad.o +OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o +OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o +OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o +OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o +OBJS-$(CONFIG_ASETPTS_FILTER) += f_setpts.o +OBJS-$(CONFIG_ASETTB_FILTER) += f_settb.o OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o OBJS-$(CONFIG_ASPLIT_FILTER) += split.o +OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o +OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o +OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o +OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o +OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o +OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o +OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o +OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o OBJS-$(CONFIG_JOIN_FILTER) += af_join.o +OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_PAN_FILTER) += af_pan.o OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o +OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o +OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o +OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o +OBJS-$(CONFIG_AEVALSRC_FILTER) += asrc_aevalsrc.o OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o +OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o +OBJS-$(CONFIG_ASS_FILTER) += vf_ass.o +OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_alphaextract.o +OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o +OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o +OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o +OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o +OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o +OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o +OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o +OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o +OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o +OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o +OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o +OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o +OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o +OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o +OBJS-$(CONFIG_MP_FILTER) += vf_mp.o OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o OBJS-$(CONFIG_NULL_FILTER) += vf_null.o @@ -66,27 +137,90 @@ OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o +OBJS-$(CONFIG_PP_FILTER) += vf_pp.o +OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o -OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o +OBJS-$(CONFIG_SELECT_FILTER) += f_select.o +OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o -OBJS-$(CONFIG_SETPTS_FILTER) += vf_setpts.o +OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o +OBJS-$(CONFIG_SETPTS_FILTER) += f_setpts.o OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o -OBJS-$(CONFIG_SETTB_FILTER) += vf_settb.o +OBJS-$(CONFIG_SETTB_FILTER) += f_settb.o OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o +OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o OBJS-$(CONFIG_SPLIT_FILTER) += split.o +OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_ass.o +OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o +OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o +OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o +OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o +OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o -OBJS-$(CONFIG_COLOR_FILTER) += vsrc_color.o +OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o +OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o -OBJS-$(CONFIG_MOVIE_FILTER) += vsrc_movie.o -OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_nullsrc.o +OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o +OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o +OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o +OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o +OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/mp_image.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/img_format.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_detc.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_dint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_divtc.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_down3dright.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_dsize.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq2.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fil.o +#OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_filmdint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fspp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_harddup.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_il.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ilpack.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ivtc.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_kerndeint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_mcdeint.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_noise.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ow.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_perspective.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_phase.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pp7.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pullup.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_qp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_sab.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_softpulldown.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_softskip.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_spp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_stereo3d.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_telecine.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_tinterlace.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_unsharp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_uspp.o +OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/pullup.o + +# multimedia filters +OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o +OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o +OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o + +# multimedia sources +OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o +OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o + TOOLS = graph2dot -TESTPROGS = filtfmts +TESTPROGS = drawutils filtfmts formats + +clean:: + $(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%) diff --git a/libavfilter/af_aconvert.c b/libavfilter/af_aconvert.c new file mode 100644 index 0000000..e41095f --- /dev/null +++ b/libavfilter/af_aconvert.c @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu> + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2011 Mina Nagy Zaki + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * sample format and channel layout conversion audio filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libswresample/swresample.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +typedef struct { + enum AVSampleFormat out_sample_fmt; + int64_t out_chlayout; + struct SwrContext *swr; +} AConvertContext; + +static av_cold int init(AVFilterContext *ctx, const char *args0) +{ + AConvertContext *aconvert = ctx->priv; + char *arg, *ptr = NULL; + int ret = 0; + char *args = av_strdup(args0); + + aconvert->out_sample_fmt = AV_SAMPLE_FMT_NONE; + aconvert->out_chlayout = 0; + + if ((arg = av_strtok(args, ":", &ptr)) && strcmp(arg, "auto")) { + if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0) + goto end; + } + if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) { + if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0) + goto end; + } + +end: + av_freep(&args); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + AConvertContext *aconvert = ctx->priv; + swr_free(&aconvert->swr); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AConvertContext *aconvert = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterChannelLayouts *layouts; + + ff_formats_ref(ff_all_formats(AVMEDIA_TYPE_AUDIO), + &inlink->out_formats); + if (aconvert->out_sample_fmt != AV_SAMPLE_FMT_NONE) { + formats = NULL; + ff_add_format(&formats, aconvert->out_sample_fmt); + ff_formats_ref(formats, &outlink->in_formats); + } else + ff_formats_ref(ff_all_formats(AVMEDIA_TYPE_AUDIO), + &outlink->in_formats); + + ff_channel_layouts_ref(ff_all_channel_layouts(), + &inlink->out_channel_layouts); + if (aconvert->out_chlayout != 0) { + layouts = NULL; + ff_add_channel_layout(&layouts, aconvert->out_chlayout); + ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts); + } else + ff_channel_layouts_ref(ff_all_channel_layouts(), + &outlink->in_channel_layouts); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + int ret; + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + AConvertContext *aconvert = ctx->priv; + char buf1[64], buf2[64]; + + /* if not specified in args, use the format and layout of the output */ + if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE) + aconvert->out_sample_fmt = outlink->format; + if (aconvert->out_chlayout == 0) + aconvert->out_chlayout = outlink->channel_layout; + + aconvert->swr = swr_alloc_set_opts(aconvert->swr, + aconvert->out_chlayout, aconvert->out_sample_fmt, inlink->sample_rate, + inlink->channel_layout, inlink->format, inlink->sample_rate, + 0, ctx); + if (!aconvert->swr) + return AVERROR(ENOMEM); + ret = swr_init(aconvert->swr); + if (ret < 0) + return ret; + + av_get_channel_layout_string(buf1, sizeof(buf1), + -1, inlink ->channel_layout); + av_get_channel_layout_string(buf2, sizeof(buf2), + -1, outlink->channel_layout); + av_log(ctx, AV_LOG_VERBOSE, + "fmt:%s cl:%s -> fmt:%s cl:%s\n", + av_get_sample_fmt_name(inlink ->format), buf1, + av_get_sample_fmt_name(outlink->format), buf2); + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) +{ + AConvertContext *aconvert = inlink->dst->priv; + const int n = insamplesref->audio->nb_samples; + AVFilterLink *const outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); + int ret; + + swr_convert(aconvert->swr, outsamplesref->data, n, + (void *)insamplesref->data, n); + + avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); + outsamplesref->audio->channels = outlink->channels; + outsamplesref->audio->channel_layout = outlink->channel_layout; + + ret = ff_filter_frame(outlink, outsamplesref); + avfilter_unref_buffer(insamplesref); + return ret; +} + +static const AVFilterPad aconvert_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad aconvert_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter avfilter_af_aconvert = { + .name = "aconvert", + .description = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout."), + .priv_size = sizeof(AConvertContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = aconvert_inputs, + .outputs = aconvert_outputs, +}; diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c new file mode 100644 index 0000000..00a05e2 --- /dev/null +++ b/libavfilter/af_afade.c @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2013 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * fade audio filter + */ + +#include "libavutil/opt.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int type; + int curve; + int nb_samples; + int64_t start_sample; + double duration; + double start_time; + + void (*fade_samples)(uint8_t **dst, uint8_t * const *src, + int nb_samples, int channels, int direction, + int64_t start, int range, int curve); +} AudioFadeContext; + +enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, PAR, QUA, CUB, SQU, CBR }; + +#define OFFSET(x) offsetof(AudioFadeContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption afade_options[] = { + { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" }, + { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" }, + { "in", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" }, + { "out", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" }, + { "start_sample", "set expression of sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS }, + { "ss", "set expression of sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS }, + { "nb_samples", "set expression for fade duration in samples", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS }, + { "ns", "set expression for fade duration in samples", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS }, + { "start_time", "set expression of second to start fading", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 7*24*60*60,FLAGS }, + { "st", "set expression of second to start fading", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 7*24*60*60,FLAGS }, + { "duration", "set expression for fade duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 24*60*60, FLAGS }, + { "d", "set expression for fade duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl = 0. }, 0, 24*60*60, FLAGS }, + { "curve", "set expression for fade curve", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" }, + { "c", "set expression for fade curve", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" }, + { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" }, + { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" }, + { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" }, + { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" }, + { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" }, + { "par", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" }, + { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" }, + { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" }, + { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" }, + { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(afade); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AudioFadeContext *afade = ctx->priv; + int ret; + + afade->class = &afade_class; + av_opt_set_defaults(afade); + + if ((ret = av_set_options_string(afade, args, "=", ":")) < 0) + return ret; + + if (INT64_MAX - afade->nb_samples < afade->start_sample) + return AVERROR(EINVAL); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static double fade_gain(int curve, int64_t index, int range) +{ + double gain; + + gain = FFMAX(0.0, FFMIN(1.0, 1.0 * index / range)); + + switch (curve) { + case QSIN: + gain = sin(gain * M_PI / 2.0); + break; + case ESIN: + gain = 1.0 - cos(M_PI / 4.0 * (pow(2.0*gain - 1, 3) + 1)); + break; + case HSIN: + gain = (1.0 - cos(gain * M_PI)) / 2.0; + break; + case LOG: + gain = pow(0.1, (1 - gain) * 5.0); + break; + case PAR: + gain = (1 - (1 - gain) * (1 - gain)); + break; + case QUA: + gain *= gain; + break; + case CUB: + gain = gain * gain * gain; + break; + case SQU: + gain = sqrt(gain); + break; + case CBR: + gain = cbrt(gain); + break; + } + + return gain; +} + +#define FADE_PLANAR(name, type) \ +static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \ + int nb_samples, int channels, int dir, \ + int64_t start, int range, int curve) \ +{ \ + int i, c; \ + \ + for (i = 0; i < nb_samples; i++) { \ + double gain = fade_gain(curve, start + i * dir, range); \ + for (c = 0; c < channels; c++) { \ + type *d = (type *)dst[c]; \ + const type *s = (type *)src[c]; \ + \ + d[i] = s[i] * gain; \ + } \ + } \ +} + +#define FADE(name, type) \ +static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \ + int nb_samples, int channels, int dir, \ + int64_t start, int range, int curve) \ +{ \ + type *d = (type *)dst[0]; \ + const type *s = (type *)src[0]; \ + int i, c, k = 0; \ + \ + for (i = 0; i < nb_samples; i++) { \ + double gain = fade_gain(curve, start + i * dir, range); \ + for (c = 0; c < channels; c++, k++) \ + d[k] = s[k] * gain; \ + } \ +} + +FADE_PLANAR(dbl, double) +FADE_PLANAR(flt, float) +FADE_PLANAR(s16, int16_t) +FADE_PLANAR(s32, int32_t) + +FADE(dbl, double) +FADE(flt, float) +FADE(s16, int16_t) +FADE(s32, int32_t) + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AudioFadeContext *afade = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + switch (inlink->format) { + case AV_SAMPLE_FMT_DBL: afade->fade_samples = fade_samples_dbl; break; + case AV_SAMPLE_FMT_DBLP: afade->fade_samples = fade_samples_dblp; break; + case AV_SAMPLE_FMT_FLT: afade->fade_samples = fade_samples_flt; break; + case AV_SAMPLE_FMT_FLTP: afade->fade_samples = fade_samples_fltp; break; + case AV_SAMPLE_FMT_S16: afade->fade_samples = fade_samples_s16; break; + case AV_SAMPLE_FMT_S16P: afade->fade_samples = fade_samples_s16p; break; + case AV_SAMPLE_FMT_S32: afade->fade_samples = fade_samples_s32; break; + case AV_SAMPLE_FMT_S32P: afade->fade_samples = fade_samples_s32p; break; + } + + if (afade->duration) + afade->nb_samples = afade->duration * inlink->sample_rate; + if (afade->start_time) + afade->start_sample = afade->start_time * inlink->sample_rate; + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) +{ + AudioFadeContext *afade = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + int nb_samples = buf->audio->nb_samples; + AVFilterBufferRef *out_buf; + int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base); + + if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) || + ( afade->type && (cur_sample + afade->nb_samples < afade->start_sample))) + return ff_filter_frame(outlink, buf); + + if (buf->perms & AV_PERM_WRITE) { + out_buf = buf; + } else { + out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); + if (!out_buf) + return AVERROR(ENOMEM); + out_buf->pts = buf->pts; + } + + if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) || + ( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) { + av_samples_set_silence(out_buf->extended_data, 0, nb_samples, + out_buf->audio->channels, out_buf->format); + } else { + int64_t start; + + if (!afade->type) + start = cur_sample - afade->start_sample; + else + start = afade->start_sample + afade->nb_samples - cur_sample; + + afade->fade_samples(out_buf->extended_data, buf->extended_data, + nb_samples, buf->audio->channels, + afade->type ? -1 : 1, start, + afade->nb_samples, afade->curve); + } + + if (buf != out_buf) + avfilter_unref_buffer(buf); + + return ff_filter_frame(outlink, out_buf); +} + +static const AVFilterPad avfilter_af_afade_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_afade_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter avfilter_af_afade = { + .name = "afade", + .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."), + .query_formats = query_formats, + .priv_size = sizeof(AudioFadeContext), + .init = init, + .inputs = avfilter_af_afade_inputs, + .outputs = avfilter_af_afade_outputs, + .priv_class = &afade_class, +}; diff --git a/libavfilter/af_aformat.c b/libavfilter/af_aformat.c index 2059cf2..9ac381f 100644 --- a/libavfilter/af_aformat.c +++ b/libavfilter/af_aformat.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2011 Mina Nagy Zaki * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -47,19 +47,15 @@ typedef struct AFormatContext { #define OFFSET(x) offsetof(AFormatContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM -static const AVOption options[] = { - { "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A }, - { "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A }, - { "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A }, +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption aformat_options[] = { + { "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F }, { NULL }, }; -static const AVClass aformat_class = { - .class_name = "aformat filter", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(aformat); #define PARSE_FORMATS(str, type, list, add_to_list, get_fmt, none, desc) \ do { \ @@ -100,10 +96,8 @@ static av_cold int init(AVFilterContext *ctx, const char *args) s->class = &aformat_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) return ret; - } PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats, ff_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format"); @@ -127,7 +121,7 @@ static int query_formats(AVFilterContext *ctx) ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates : ff_all_samplerates()); ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts : - ff_all_channel_layouts()); + ff_all_channel_counts()); return 0; } @@ -157,4 +151,5 @@ AVFilter avfilter_af_aformat = { .inputs = avfilter_af_aformat_inputs, .outputs = avfilter_af_aformat_outputs, + .priv_class = &aformat_class, }; diff --git a/libavfilter/af_amerge.c b/libavfilter/af_amerge.c new file mode 100644 index 0000000..f67a7a8 --- /dev/null +++ b/libavfilter/af_amerge.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio merging filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/bprint.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libswresample/swresample.h" // only for SWR_CH_MAX +#include "avfilter.h" +#include "audio.h" +#include "bufferqueue.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int nb_inputs; + int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */ + int bps; + struct amerge_input { + struct FFBufQueue queue; + int nb_ch; /**< number of channels for the input */ + int nb_samples; + int pos; + } *in; +} AMergeContext; + +#define OFFSET(x) offsetof(AMergeContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption amerge_options[] = { + { "inputs", "specify the number of inputs", OFFSET(nb_inputs), + AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS }, + {0} +}; + +AVFILTER_DEFINE_CLASS(amerge); + +static av_cold void uninit(AVFilterContext *ctx) +{ + AMergeContext *am = ctx->priv; + int i; + + for (i = 0; i < am->nb_inputs; i++) { + ff_bufqueue_discard_all(&am->in[i].queue); + av_freep(&ctx->input_pads[i].name); + } + av_freep(&am->in); +} + +static int query_formats(AVFilterContext *ctx) +{ + AMergeContext *am = ctx->priv; + int64_t inlayout[SWR_CH_MAX], outlayout = 0; + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + int i, overlap = 0, nb_ch = 0; + + for (i = 0; i < am->nb_inputs; i++) { + if (!ctx->inputs[i]->in_channel_layouts || + !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) { + av_log(ctx, AV_LOG_ERROR, + "No channel layout for input %d\n", i + 1); + return AVERROR(EINVAL); + } + inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0]; + if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) { + char buf[256]; + av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]); + av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1); + } + am->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]); + if (outlayout & inlayout[i]) + overlap++; + outlayout |= inlayout[i]; + nb_ch += am->in[i].nb_ch; + } + if (nb_ch > SWR_CH_MAX) { + av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX); + return AVERROR(EINVAL); + } + if (overlap) { + av_log(ctx, AV_LOG_WARNING, + "Input channel layouts overlap: " + "output layout will be determined by the number of distinct input channels\n"); + for (i = 0; i < nb_ch; i++) + am->route[i] = i; + outlayout = av_get_default_channel_layout(nb_ch); + if (!outlayout) + outlayout = ((int64_t)1 << nb_ch) - 1; + } else { + int *route[SWR_CH_MAX]; + int c, out_ch_number = 0; + + route[0] = am->route; + for (i = 1; i < am->nb_inputs; i++) + route[i] = route[i - 1] + am->in[i - 1].nb_ch; + for (c = 0; c < 64; c++) + for (i = 0; i < am->nb_inputs; i++) + if ((inlayout[i] >> c) & 1) + *(route[i]++) = out_ch_number++; + } + formats = ff_make_format_list(ff_packed_sample_fmts_array); + ff_set_common_formats(ctx, formats); + for (i = 0; i < am->nb_inputs; i++) { + layouts = NULL; + ff_add_channel_layout(&layouts, inlayout[i]); + ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts); + } + layouts = NULL; + ff_add_channel_layout(&layouts, outlayout); + ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AMergeContext *am = ctx->priv; + AVBPrint bp; + int i; + + for (i = 1; i < am->nb_inputs; i++) { + if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) { + av_log(ctx, AV_LOG_ERROR, + "Inputs must have the same sample rate " + "%d for in%d vs %d\n", + ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate); + return AVERROR(EINVAL); + } + } + am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format); + outlink->sample_rate = ctx->inputs[0]->sample_rate; + outlink->time_base = ctx->inputs[0]->time_base; + + av_bprint_init(&bp, 0, 1); + for (i = 0; i < am->nb_inputs; i++) { + av_bprintf(&bp, "%sin%d:", i ? " + " : "", i); + av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout); + } + av_bprintf(&bp, " -> out:"); + av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout); + av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AMergeContext *am = ctx->priv; + int i, ret; + + for (i = 0; i < am->nb_inputs; i++) + if (!am->in[i].nb_samples) + if ((ret = ff_request_frame(ctx->inputs[i])) < 0) + return ret; + return 0; +} + +/** + * Copy samples from several input streams to one output stream. + * @param nb_inputs number of inputs + * @param in inputs; used only for the nb_ch field; + * @param route routing values; + * input channel i goes to output channel route[i]; + * i < in[0].nb_ch are the channels from the first output; + * i >= in[0].nb_ch are the channels from the second output + * @param ins pointer to the samples of each inputs, in packed format; + * will be left at the end of the copied samples + * @param outs pointer to the samples of the output, in packet format; + * must point to a buffer big enough; + * will be left at the end of the copied samples + * @param ns number of samples to copy + * @param bps bytes per sample + */ +static inline void copy_samples(int nb_inputs, struct amerge_input in[], + int *route, uint8_t *ins[], + uint8_t **outs, int ns, int bps) +{ + int *route_cur; + int i, c, nb_ch = 0; + + for (i = 0; i < nb_inputs; i++) + nb_ch += in[i].nb_ch; + while (ns--) { + route_cur = route; + for (i = 0; i < nb_inputs; i++) { + for (c = 0; c < in[i].nb_ch; c++) { + memcpy((*outs) + bps * *(route_cur++), ins[i], bps); + ins[i] += bps; + } + } + *outs += nb_ch * bps; + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AMergeContext *am = ctx->priv; + AVFilterLink *const outlink = ctx->outputs[0]; + int input_number; + int nb_samples, ns, i; + AVFilterBufferRef *outbuf, *inbuf[SWR_CH_MAX]; + uint8_t *ins[SWR_CH_MAX], *outs; + + for (input_number = 0; input_number < am->nb_inputs; input_number++) + if (inlink == ctx->inputs[input_number]) + break; + av_assert1(input_number < am->nb_inputs); + if (ff_bufqueue_is_full(&am->in[input_number].queue)) { + av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n"); + avfilter_unref_buffer(insamples); + return AVERROR(ENOMEM); + } + ff_bufqueue_add(ctx, &am->in[input_number].queue, insamples); + am->in[input_number].nb_samples += insamples->audio->nb_samples; + nb_samples = am->in[0].nb_samples; + for (i = 1; i < am->nb_inputs; i++) + nb_samples = FFMIN(nb_samples, am->in[i].nb_samples); + if (!nb_samples) + return 0; + + outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples); + outs = outbuf->data[0]; + for (i = 0; i < am->nb_inputs; i++) { + inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); + ins[i] = inbuf[i]->data[0] + + am->in[i].pos * am->in[i].nb_ch * am->bps; + } + avfilter_copy_buffer_ref_props(outbuf, inbuf[0]); + outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : + inbuf[0]->pts + + av_rescale_q(am->in[0].pos, + (AVRational){ 1, ctx->inputs[0]->sample_rate }, + ctx->outputs[0]->time_base); + + outbuf->audio->nb_samples = nb_samples; + outbuf->audio->channel_layout = outlink->channel_layout; + outbuf->audio->channels = outlink->channels; + + while (nb_samples) { + ns = nb_samples; + for (i = 0; i < am->nb_inputs; i++) + ns = FFMIN(ns, inbuf[i]->audio->nb_samples - am->in[i].pos); + /* Unroll the most common sample formats: speed +~350% for the loop, + +~13% overall (including two common decoders) */ + switch (am->bps) { + case 1: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 1); + break; + case 2: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 2); + break; + case 4: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 4); + break; + default: + copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, am->bps); + break; + } + + nb_samples -= ns; + for (i = 0; i < am->nb_inputs; i++) { + am->in[i].nb_samples -= ns; + am->in[i].pos += ns; + if (am->in[i].pos == inbuf[i]->audio->nb_samples) { + am->in[i].pos = 0; + avfilter_unref_buffer(inbuf[i]); + ff_bufqueue_get(&am->in[i].queue); + inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); + ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL; + } + } + } + return ff_filter_frame(ctx->outputs[0], outbuf); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AMergeContext *am = ctx->priv; + int ret, i; + + am->class = &amerge_class; + av_opt_set_defaults(am); + ret = av_set_options_string(am, args, "=", ":"); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args); + return ret; + } + am->in = av_calloc(am->nb_inputs, sizeof(*am->in)); + if (!am->in) + return AVERROR(ENOMEM); + for (i = 0; i < am->nb_inputs; i++) { + char *name = av_asprintf("in%d", i); + AVFilterPad pad = { + .name = name, + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }; + if (!name) + return AVERROR(ENOMEM); + ff_insert_inpad(ctx, i, &pad); + } + return 0; +} + +static const AVFilterPad amerge_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_amerge = { + .name = "amerge", + .description = NULL_IF_CONFIG_SMALL("Merge two audio streams into " + "a single multi-channel stream."), + .priv_size = sizeof(AMergeContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = NULL, + .outputs = amerge_outputs, + .priv_class = &amerge_class, +}; diff --git a/libavfilter/af_amix.c b/libavfilter/af_amix.c index c2fb158..aeefff8 100644 --- a/libavfilter/af_amix.c +++ b/libavfilter/af_amix.c @@ -174,27 +174,22 @@ typedef struct MixContext { #define OFFSET(x) offsetof(MixContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM -static const AVOption options[] = { +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption amix_options[] = { { "inputs", "Number of inputs.", - OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A }, + OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F }, { "duration", "How to determine the end-of-stream.", - OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A, "duration" }, - { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A, "duration" }, - { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A, "duration" }, - { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A, "duration" }, + OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" }, + { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" }, + { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" }, + { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" }, { "dropout_transition", "Transition time, in seconds, for volume " "renormalization when an input stream ends.", - OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A }, + OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F }, { NULL }, }; -static const AVClass amix_class = { - .class_name = "amix filter", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - +AVFILTER_DEFINE_CLASS(amix); /** * Update the scaling factors to apply to each input during mixing. @@ -496,10 +491,8 @@ static int init(AVFilterContext *ctx, const char *args) s->class = &amix_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) return ret; - } av_opt_free(s); for (i = 0; i < s->nb_inputs; i++) { @@ -570,4 +563,5 @@ AVFilter avfilter_af_amix = { .inputs = NULL, .outputs = avfilter_af_amix_outputs, + .priv_class = &amix_class, }; diff --git a/libavfilter/af_anull.c b/libavfilter/af_anull.c index a791064..c61da3b 100644 --- a/libavfilter/af_anull.c +++ b/libavfilter/af_anull.c @@ -1,18 +1,19 @@ /* - * This file is part of Libav. + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu> + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -49,6 +50,8 @@ AVFilter avfilter_af_anull = { .priv_size = 0, + .query_formats = ff_query_formats_all, + .inputs = avfilter_af_anull_inputs, .outputs = avfilter_af_anull_outputs, diff --git a/libavfilter/af_apad.c b/libavfilter/af_apad.c new file mode 100644 index 0000000..18a0170 --- /dev/null +++ b/libavfilter/af_apad.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/** + * @file + * audio pad filter. + * + * Based on af_aresample.c + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "libavutil/avassert.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int64_t next_pts; + + int packet_size; + int64_t pad_len; + int64_t whole_len; +} APadContext; + +#define OFFSET(x) offsetof(APadContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption apad_options[] = { + { "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A }, + { "pad_len", "number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A }, + { "whole_len", "target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(apad); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + APadContext *apad = ctx->priv; + + apad->class = &apad_class; + apad->next_pts = AV_NOPTS_VALUE; + + av_opt_set_defaults(apad); + + if ((ret = av_opt_set_from_string(apad, args, NULL, "=", ":")) < 0) + return ret; + + if (apad->whole_len && apad->pad_len) { + av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) +{ + AVFilterContext *ctx = inlink->dst; + APadContext *apad = ctx->priv; + + if (apad->whole_len) + apad->whole_len -= frame->audio->nb_samples; + + apad->next_pts = frame->pts + av_rescale_q(frame->audio->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); + return ff_filter_frame(ctx->outputs[0], frame); +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + APadContext *apad = ctx->priv; + int ret; + + ret = ff_request_frame(ctx->inputs[0]); + + if (ret == AVERROR_EOF) { + int n_out = apad->packet_size; + AVFilterBufferRef *outsamplesref; + + if (apad->whole_len > 0) { + apad->pad_len = apad->whole_len; + apad->whole_len = 0; + } + if (apad->pad_len > 0) { + n_out = FFMIN(n_out, apad->pad_len); + apad->pad_len -= n_out; + } + + if(!n_out) + return AVERROR_EOF; + + outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); + if (!outsamplesref) + return AVERROR(ENOMEM); + + av_assert0(outsamplesref->audio->sample_rate == outlink->sample_rate); + av_assert0(outsamplesref->audio->nb_samples == n_out); + + av_samples_set_silence(outsamplesref->extended_data, 0, + n_out, + outsamplesref->audio->channels, + outsamplesref->format); + + outsamplesref->pts = apad->next_pts; + if (apad->next_pts != AV_NOPTS_VALUE) + apad->next_pts += av_rescale_q(n_out, (AVRational){1, outlink->sample_rate}, outlink->time_base); + + return ff_filter_frame(outlink, outsamplesref); + } + return ret; +} + +static const AVFilterPad apad_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL }, +}; + +static const AVFilterPad apad_outputs[] = { + { + .name = "default", + .request_frame = request_frame, + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL }, +}; + +AVFilter avfilter_af_apad = { + .name = "apad", + .description = NULL_IF_CONFIG_SMALL("Pad audio with silence."), + .init = init, + .priv_size = sizeof(APadContext), + .inputs = apad_inputs, + .outputs = apad_outputs, + .priv_class = &apad_class, +}; diff --git a/libavfilter/af_aresample.c b/libavfilter/af_aresample.c new file mode 100644 index 0000000..2e3867e --- /dev/null +++ b/libavfilter/af_aresample.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2011 Mina Nagy Zaki + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * resampling audio filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "libavutil/avassert.h" +#include "libswresample/swresample.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +typedef struct { + double ratio; + struct SwrContext *swr; + int64_t next_pts; + int req_fullfilled; +} AResampleContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + AResampleContext *aresample = ctx->priv; + int ret = 0; + char *argd = av_strdup(args); + + aresample->next_pts = AV_NOPTS_VALUE; + aresample->swr = swr_alloc(); + if (!aresample->swr) { + ret = AVERROR(ENOMEM); + goto end; + } + + if (args) { + char *ptr = argd, *token; + + while (token = av_strtok(ptr, ":", &ptr)) { + char *value; + av_strtok(token, "=", &value); + + if (value) { + if ((ret = av_opt_set(aresample->swr, token, value, 0)) < 0) + goto end; + } else { + int out_rate; + if ((ret = ff_parse_sample_rate(&out_rate, token, ctx)) < 0) + goto end; + if ((ret = av_opt_set_int(aresample->swr, "osr", out_rate, 0)) < 0) + goto end; + } + } + } +end: + av_free(argd); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + AResampleContext *aresample = ctx->priv; + swr_free(&aresample->swr); +} + +static int query_formats(AVFilterContext *ctx) +{ + AResampleContext *aresample = ctx->priv; + int out_rate = av_get_int(aresample->swr, "osr", NULL); + uint64_t out_layout = av_get_int(aresample->swr, "ocl", NULL); + enum AVSampleFormat out_format = av_get_int(aresample->swr, "osf", NULL); + + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + + AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); + AVFilterFormats *out_formats; + AVFilterFormats *in_samplerates = ff_all_samplerates(); + AVFilterFormats *out_samplerates; + AVFilterChannelLayouts *in_layouts = ff_all_channel_counts(); + AVFilterChannelLayouts *out_layouts; + + ff_formats_ref (in_formats, &inlink->out_formats); + ff_formats_ref (in_samplerates, &inlink->out_samplerates); + ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts); + + if(out_rate > 0) { + out_samplerates = ff_make_format_list((int[]){ out_rate, -1 }); + } else { + out_samplerates = ff_all_samplerates(); + } + ff_formats_ref(out_samplerates, &outlink->in_samplerates); + + if(out_format != AV_SAMPLE_FMT_NONE) { + out_formats = ff_make_format_list((int[]){ out_format, -1 }); + } else + out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO); + ff_formats_ref(out_formats, &outlink->in_formats); + + if(out_layout) { + out_layouts = avfilter_make_format64_list((int64_t[]){ out_layout, -1 }); + } else + out_layouts = ff_all_channel_counts(); + ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts); + + return 0; +} + + +static int config_output(AVFilterLink *outlink) +{ + int ret; + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + AResampleContext *aresample = ctx->priv; + int out_rate; + uint64_t out_layout; + enum AVSampleFormat out_format; + char inchl_buf[128], outchl_buf[128]; + + aresample->swr = swr_alloc_set_opts(aresample->swr, + outlink->channel_layout, outlink->format, outlink->sample_rate, + inlink->channel_layout, inlink->format, inlink->sample_rate, + 0, ctx); + if (!aresample->swr) + return AVERROR(ENOMEM); + if (!inlink->channel_layout) + av_opt_set_int(aresample->swr, "ich", inlink->channels, 0); + if (!outlink->channel_layout) + av_opt_set_int(aresample->swr, "och", outlink->channels, 0); + + ret = swr_init(aresample->swr); + if (ret < 0) + return ret; + + out_rate = av_get_int(aresample->swr, "osr", NULL); + out_layout = av_get_int(aresample->swr, "ocl", NULL); + out_format = av_get_int(aresample->swr, "osf", NULL); + outlink->time_base = (AVRational) {1, out_rate}; + + av_assert0(outlink->sample_rate == out_rate); + av_assert0(outlink->channel_layout == out_layout); + av_assert0(outlink->format == out_format); + + aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate; + + av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), -1, inlink ->channel_layout); + av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), -1, outlink->channel_layout); + + av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n", + inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate, + outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) +{ + AResampleContext *aresample = inlink->dst->priv; + const int n_in = insamplesref->audio->nb_samples; + int n_out = n_in * aresample->ratio * 2 + 256; + AVFilterLink *const outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); + int ret; + + if(!outsamplesref) + return AVERROR(ENOMEM); + + avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); + outsamplesref->format = outlink->format; + outsamplesref->audio->channels = outlink->channels; + outsamplesref->audio->channel_layout = outlink->channel_layout; + outsamplesref->audio->sample_rate = outlink->sample_rate; + + if(insamplesref->pts != AV_NOPTS_VALUE) { + int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); + int64_t outpts= swr_next_pts(aresample->swr, inpts); + aresample->next_pts = + outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate); + } else { + outsamplesref->pts = AV_NOPTS_VALUE; + } + n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, + (void *)insamplesref->extended_data, n_in); + if (n_out <= 0) { + avfilter_unref_buffer(outsamplesref); + avfilter_unref_buffer(insamplesref); + return 0; + } + + outsamplesref->audio->nb_samples = n_out; + + ret = ff_filter_frame(outlink, outsamplesref); + aresample->req_fullfilled= 1; + avfilter_unref_buffer(insamplesref); + return ret; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AResampleContext *aresample = ctx->priv; + AVFilterLink *const inlink = outlink->src->inputs[0]; + int ret; + + aresample->req_fullfilled = 0; + do{ + ret = ff_request_frame(ctx->inputs[0]); + }while(!aresample->req_fullfilled && ret>=0); + + if (ret == AVERROR_EOF) { + AVFilterBufferRef *outsamplesref; + int n_out = 4096; + + outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); + if (!outsamplesref) + return AVERROR(ENOMEM); + n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0); + if (n_out <= 0) { + avfilter_unref_buffer(outsamplesref); + return (n_out == 0) ? AVERROR_EOF : n_out; + } + + outsamplesref->audio->sample_rate = outlink->sample_rate; + outsamplesref->audio->nb_samples = n_out; +#if 0 + outsamplesref->pts = aresample->next_pts; + if(aresample->next_pts != AV_NOPTS_VALUE) + aresample->next_pts += av_rescale_q(n_out, (AVRational){1 ,outlink->sample_rate}, outlink->time_base); +#else + outsamplesref->pts = swr_next_pts(aresample->swr, INT64_MIN); + outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate); +#endif + + ff_filter_frame(outlink, outsamplesref); + return 0; + } + return ret; +} + +static const AVFilterPad aresample_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL }, +}; + +static const AVFilterPad aresample_outputs[] = { + { + .name = "default", + .config_props = config_output, + .request_frame = request_frame, + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL }, +}; + +AVFilter avfilter_af_aresample = { + .name = "aresample", + .description = NULL_IF_CONFIG_SMALL("Resample audio data."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(AResampleContext), + .inputs = aresample_inputs, + .outputs = aresample_outputs, +}; diff --git a/libavfilter/af_asetnsamples.c b/libavfilter/af_asetnsamples.c new file mode 100644 index 0000000..ee80c1c --- /dev/null +++ b/libavfilter/af_asetnsamples.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2012 Andrey Utkin + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Filter that changes number of samples on single output operation + */ + +#include "libavutil/audio_fifo.h" +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" +#include "formats.h" + +typedef struct { + const AVClass *class; + int nb_out_samples; ///< how many samples to output + AVAudioFifo *fifo; ///< samples are queued here + int64_t next_out_pts; + int req_fullfilled; + int pad; +} ASNSContext; + +#define OFFSET(x) offsetof(ASNSContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption asetnsamples_options[] = { +{ "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS }, +{ "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS }, +{ "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS }, +{ "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS }, +{ NULL } +}; + +AVFILTER_DEFINE_CLASS(asetnsamples); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ASNSContext *asns = ctx->priv; + int err; + + asns->class = &asetnsamples_class; + av_opt_set_defaults(asns); + + if ((err = av_set_options_string(asns, args, "=", ":")) < 0) + return err; + + asns->next_out_pts = AV_NOPTS_VALUE; + av_log(ctx, AV_LOG_VERBOSE, "nb_out_samples:%d pad:%d\n", asns->nb_out_samples, asns->pad); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ASNSContext *asns = ctx->priv; + av_audio_fifo_free(asns->fifo); +} + +static int config_props_output(AVFilterLink *outlink) +{ + ASNSContext *asns = outlink->src->priv; + int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + + asns->fifo = av_audio_fifo_alloc(outlink->format, nb_channels, asns->nb_out_samples); + if (!asns->fifo) + return AVERROR(ENOMEM); + + return 0; +} + +static int push_samples(AVFilterLink *outlink) +{ + ASNSContext *asns = outlink->src->priv; + AVFilterBufferRef *outsamples = NULL; + int nb_out_samples, nb_pad_samples; + + if (asns->pad) { + nb_out_samples = av_audio_fifo_size(asns->fifo) ? asns->nb_out_samples : 0; + nb_pad_samples = nb_out_samples - FFMIN(nb_out_samples, av_audio_fifo_size(asns->fifo)); + } else { + nb_out_samples = FFMIN(asns->nb_out_samples, av_audio_fifo_size(asns->fifo)); + nb_pad_samples = 0; + } + + if (!nb_out_samples) + return 0; + + outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_out_samples); + av_assert0(outsamples); + + av_audio_fifo_read(asns->fifo, + (void **)outsamples->extended_data, nb_out_samples); + + if (nb_pad_samples) + av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples, + nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout), + outlink->format); + outsamples->audio->nb_samples = nb_out_samples; + outsamples->audio->channel_layout = outlink->channel_layout; + outsamples->audio->sample_rate = outlink->sample_rate; + outsamples->pts = asns->next_out_pts; + + if (asns->next_out_pts != AV_NOPTS_VALUE) + asns->next_out_pts += nb_out_samples; + + ff_filter_frame(outlink, outsamples); + asns->req_fullfilled = 1; + return nb_out_samples; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + AVFilterContext *ctx = inlink->dst; + ASNSContext *asns = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + int ret; + int nb_samples = insamples->audio->nb_samples; + + if (av_audio_fifo_space(asns->fifo) < nb_samples) { + av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples); + ret = av_audio_fifo_realloc(asns->fifo, av_audio_fifo_size(asns->fifo) + nb_samples); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, + "Stretching audio fifo failed, discarded %d samples\n", nb_samples); + return -1; + } + } + av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples); + if (asns->next_out_pts == AV_NOPTS_VALUE) + asns->next_out_pts = insamples->pts; + avfilter_unref_buffer(insamples); + + while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples) + push_samples(outlink); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + ASNSContext *asns = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + asns->req_fullfilled = 0; + do { + ret = ff_request_frame(inlink); + } while (!asns->req_fullfilled && ret >= 0); + + if (ret == AVERROR_EOF) + while (push_samples(outlink)) + ; + + return ret; +} + +static const AVFilterPad asetnsamples_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_WRITE, + }, + { NULL } +}; + +static const AVFilterPad asetnsamples_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .request_frame = request_frame, + .config_props = config_props_output, + }, + { NULL } +}; + +AVFilter avfilter_af_asetnsamples = { + .name = "asetnsamples", + .description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."), + .priv_size = sizeof(ASNSContext), + .init = init, + .uninit = uninit, + .inputs = asetnsamples_inputs, + .outputs = asetnsamples_outputs, + .priv_class = &asetnsamples_class, +}; diff --git a/libavfilter/af_ashowinfo.c b/libavfilter/af_ashowinfo.c index c8e830e..7e7543f 100644 --- a/libavfilter/af_ashowinfo.c +++ b/libavfilter/af_ashowinfo.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2011 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -30,6 +30,7 @@ #include "libavutil/channel_layout.h" #include "libavutil/common.h" #include "libavutil/mem.h" +#include "libavutil/timestamp.h" #include "libavutil/samplefmt.h" #include "audio.h" @@ -48,17 +49,6 @@ typedef struct AShowInfoContext { uint64_t frame; } AShowInfoContext; -static int config_input(AVFilterLink *inlink) -{ - AShowInfoContext *s = inlink->dst->priv; - int channels = av_get_channel_layout_nb_channels(inlink->channel_layout); - s->plane_checksums = av_malloc(channels * sizeof(*s->plane_checksums)); - if (!s->plane_checksums) - return AVERROR(ENOMEM); - - return 0; -} - static void uninit(AVFilterContext *ctx) { AShowInfoContext *s = ctx->priv; @@ -77,6 +67,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) int data_size = buf->audio->nb_samples * block_align; int planes = planar ? channels : 1; int i; + void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums)); + + if (!tmp_ptr) + return AVERROR(ENOMEM); + s->plane_checksums = tmp_ptr; for (i = 0; i < planes; i++) { uint8_t *data = buf->extended_data[i]; @@ -90,11 +85,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) buf->audio->channel_layout); av_log(ctx, AV_LOG_INFO, - "n:%"PRIu64" pts:%"PRId64" pts_time:%f " - "fmt:%s chlayout:%s rate:%d nb_samples:%d " + "n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" " + "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d " "checksum:%08X ", - s->frame, buf->pts, buf->pts * av_q2d(inlink->time_base), - av_get_sample_fmt_name(buf->format), chlayout_str, + s->frame, + av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), + buf->pos, + av_get_sample_fmt_name(buf->format), buf->audio->channels, chlayout_str, buf->audio->sample_rate, buf->audio->nb_samples, checksum); @@ -112,7 +109,6 @@ static const AVFilterPad inputs[] = { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .get_audio_buffer = ff_null_get_audio_buffer, - .config_props = config_input, .filter_frame = filter_frame, .min_perms = AV_PERM_READ, }, diff --git a/libavfilter/af_astreamsync.c b/libavfilter/af_astreamsync.c new file mode 100644 index 0000000..269ffc1 --- /dev/null +++ b/libavfilter/af_astreamsync.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Stream (de)synchronization filter + */ + +#include "libavutil/eval.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +#define QUEUE_SIZE 16 + +static const char * const var_names[] = { + "b1", "b2", + "s1", "s2", + "t1", "t2", + NULL +}; + +enum var_name { + VAR_B1, VAR_B2, + VAR_S1, VAR_S2, + VAR_T1, VAR_T2, + VAR_NB +}; + +typedef struct { + AVExpr *expr; + double var_values[VAR_NB]; + struct buf_queue { + AVFilterBufferRef *buf[QUEUE_SIZE]; + unsigned tail, nb; + /* buf[tail] is the oldest, + buf[(tail + nb) % QUEUE_SIZE] is where the next is added */ + } queue[2]; + int req[2]; + int next_out; + int eof; /* bitmask, one bit for each stream */ +} AStreamSyncContext; + +static const char *default_expr = "t1-t2"; + +static av_cold int init(AVFilterContext *ctx, const char *args0) +{ + AStreamSyncContext *as = ctx->priv; + const char *expr = args0 ? args0 : default_expr; + int r, i; + + r = av_expr_parse(&as->expr, expr, var_names, + NULL, NULL, NULL, NULL, 0, ctx); + if (r < 0) { + av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", expr); + return r; + } + for (i = 0; i < 42; i++) + av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */ + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + int i; + AVFilterFormats *formats, *rates; + AVFilterChannelLayouts *layouts; + + for (i = 0; i < 2; i++) { + formats = ctx->inputs[i]->in_formats; + ff_formats_ref(formats, &ctx->inputs[i]->out_formats); + ff_formats_ref(formats, &ctx->outputs[i]->in_formats); + rates = ff_all_samplerates(); + ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates); + ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates); + layouts = ctx->inputs[i]->in_channel_layouts; + ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts); + ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts); + } + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + int id = outlink == ctx->outputs[1]; + + outlink->sample_rate = ctx->inputs[id]->sample_rate; + outlink->time_base = ctx->inputs[id]->time_base; + return 0; +} + +static int send_out(AVFilterContext *ctx, int out_id) +{ + AStreamSyncContext *as = ctx->priv; + struct buf_queue *queue = &as->queue[out_id]; + AVFilterBufferRef *buf = queue->buf[queue->tail]; + int ret; + + queue->buf[queue->tail] = NULL; + as->var_values[VAR_B1 + out_id]++; + as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples; + if (buf->pts != AV_NOPTS_VALUE) + as->var_values[VAR_T1 + out_id] = + av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; + as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples / + (double)ctx->inputs[out_id]->sample_rate; + ret = ff_filter_frame(ctx->outputs[out_id], buf); + queue->nb--; + queue->tail = (queue->tail + 1) % QUEUE_SIZE; + if (as->req[out_id]) + as->req[out_id]--; + return ret; +} + +static void send_next(AVFilterContext *ctx) +{ + AStreamSyncContext *as = ctx->priv; + int i; + + while (1) { + if (!as->queue[as->next_out].nb) + break; + send_out(ctx, as->next_out); + if (!as->eof) + as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0; + } + for (i = 0; i < 2; i++) + if (as->queue[i].nb == QUEUE_SIZE) + send_out(ctx, i); +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AStreamSyncContext *as = ctx->priv; + int id = outlink == ctx->outputs[1]; + + as->req[id]++; + while (as->req[id] && !(as->eof & (1 << id))) { + if (as->queue[as->next_out].nb) { + send_next(ctx); + } else { + as->eof |= 1 << as->next_out; + ff_request_frame(ctx->inputs[as->next_out]); + if (as->eof & (1 << as->next_out)) + as->next_out = !as->next_out; + } + } + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AStreamSyncContext *as = ctx->priv; + int id = inlink == ctx->inputs[1]; + + as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] = + insamples; + as->eof &= ~(1 << id); + send_next(ctx); + return 0; +} + +static const AVFilterPad astreamsync_inputs[] = { + { + .name = "in1", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + },{ + .name = "in2", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL } +}; + +static const AVFilterPad astreamsync_outputs[] = { + { + .name = "out1", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame, + },{ + .name = "out2", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_astreamsync = { + .name = "astreamsync", + .description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data " + "in a configurable order."), + .priv_size = sizeof(AStreamSyncContext), + .init = init, + .query_formats = query_formats, + .inputs = astreamsync_inputs, + .outputs = astreamsync_outputs, +}; diff --git a/libavfilter/af_asyncts.c b/libavfilter/af_asyncts.c index 40680c8..500be0f 100644 --- a/libavfilter/af_asyncts.c +++ b/libavfilter/af_asyncts.c @@ -47,34 +47,28 @@ typedef struct ASyncContext { #define OFFSET(x) offsetof(ASyncContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM -static const AVOption options[] = { - { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A }, +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption asyncts_options[] = { + { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A|F }, { "min_delta", "Minimum difference between timestamps and audio data " - "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A }, - { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A }, - { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A }, + "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F }, + { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F }, + { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F }, { NULL }, }; -static const AVClass async_class = { - .class_name = "asyncts filter", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(asyncts); static int init(AVFilterContext *ctx, const char *args) { ASyncContext *s = ctx->priv; int ret; - s->class = &async_class; + s->class = &asyncts_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) return ret; - } av_opt_free(s); s->pts = AV_NOPTS_VALUE; @@ -285,7 +279,7 @@ static const AVFilterPad avfilter_af_asyncts_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, - .filter_frame = filter_frame, + .filter_frame = filter_frame }, { NULL } }; @@ -311,4 +305,5 @@ AVFilter avfilter_af_asyncts = { .inputs = avfilter_af_asyncts_inputs, .outputs = avfilter_af_asyncts_outputs, + .priv_class = &asyncts_class, }; diff --git a/libavfilter/af_atempo.c b/libavfilter/af_atempo.c new file mode 100644 index 0000000..d186aaf --- /dev/null +++ b/libavfilter/af_atempo.c @@ -0,0 +1,1169 @@ +/* + * Copyright (c) 2012 Pavel Koshevoy <pkoshevoy at gmail dot com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * tempo scaling audio filter -- an implementation of WSOLA algorithm + * + * Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h + * from Apprentice Video player by Pavel Koshevoy. + * https://sourceforge.net/projects/apprenticevideo/ + * + * An explanation of SOLA algorithm is available at + * http://www.surina.net/article/time-and-pitch-scaling.html + * + * WSOLA is very similar to SOLA, only one major difference exists between + * these algorithms. SOLA shifts audio fragments along the output stream, + * where as WSOLA shifts audio fragments along the input stream. + * + * The advantage of WSOLA algorithm is that the overlap region size is + * always the same, therefore the blending function is constant and + * can be precomputed. + */ + +#include <float.h> +#include "libavcodec/avfft.h" +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/eval.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +/** + * A fragment of audio waveform + */ +typedef struct { + // index of the first sample of this fragment in the overall waveform; + // 0: input sample position + // 1: output sample position + int64_t position[2]; + + // original packed multi-channel samples: + uint8_t *data; + + // number of samples in this fragment: + int nsamples; + + // rDFT transform of the down-mixed mono fragment, used for + // fast waveform alignment via correlation in frequency domain: + FFTSample *xdat; +} AudioFragment; + +/** + * Filter state machine states + */ +typedef enum { + YAE_LOAD_FRAGMENT, + YAE_ADJUST_POSITION, + YAE_RELOAD_FRAGMENT, + YAE_OUTPUT_OVERLAP_ADD, + YAE_FLUSH_OUTPUT, +} FilterState; + +/** + * Filter state machine + */ +typedef struct { + // ring-buffer of input samples, necessary because some times + // input fragment position may be adjusted backwards: + uint8_t *buffer; + + // ring-buffer maximum capacity, expressed in sample rate time base: + int ring; + + // ring-buffer house keeping: + int size; + int head; + int tail; + + // 0: input sample position corresponding to the ring buffer tail + // 1: output sample position + int64_t position[2]; + + // sample format: + enum AVSampleFormat format; + + // number of channels: + int channels; + + // row of bytes to skip from one sample to next, across multple channels; + // stride = (number-of-channels * bits-per-sample-per-channel) / 8 + int stride; + + // fragment window size, power-of-two integer: + int window; + + // Hann window coefficients, for feathering + // (blending) the overlapping fragment region: + float *hann; + + // tempo scaling factor: + double tempo; + + // cumulative alignment drift: + int drift; + + // current/previous fragment ring-buffer: + AudioFragment frag[2]; + + // current fragment index: + uint64_t nfrag; + + // current state: + FilterState state; + + // for fast correlation calculation in frequency domain: + RDFTContext *real_to_complex; + RDFTContext *complex_to_real; + FFTSample *correlation; + + // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame + int request_fulfilled; + AVFilterBufferRef *dst_buffer; + uint8_t *dst; + uint8_t *dst_end; + uint64_t nsamples_in; + uint64_t nsamples_out; +} ATempoContext; + +/** + * Reset filter to initial state, do not deallocate existing local buffers. + */ +static void yae_clear(ATempoContext *atempo) +{ + atempo->size = 0; + atempo->head = 0; + atempo->tail = 0; + + atempo->drift = 0; + atempo->nfrag = 0; + atempo->state = YAE_LOAD_FRAGMENT; + + atempo->position[0] = 0; + atempo->position[1] = 0; + + atempo->frag[0].position[0] = 0; + atempo->frag[0].position[1] = 0; + atempo->frag[0].nsamples = 0; + + atempo->frag[1].position[0] = 0; + atempo->frag[1].position[1] = 0; + atempo->frag[1].nsamples = 0; + + // shift left position of 1st fragment by half a window + // so that no re-normalization would be required for + // the left half of the 1st fragment: + atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2); + atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2); + + avfilter_unref_bufferp(&atempo->dst_buffer); + atempo->dst = NULL; + atempo->dst_end = NULL; + + atempo->request_fulfilled = 0; + atempo->nsamples_in = 0; + atempo->nsamples_out = 0; +} + +/** + * Reset filter to initial state and deallocate all buffers. + */ +static void yae_release_buffers(ATempoContext *atempo) +{ + yae_clear(atempo); + + av_freep(&atempo->frag[0].data); + av_freep(&atempo->frag[1].data); + av_freep(&atempo->frag[0].xdat); + av_freep(&atempo->frag[1].xdat); + + av_freep(&atempo->buffer); + av_freep(&atempo->hann); + av_freep(&atempo->correlation); + + av_rdft_end(atempo->real_to_complex); + atempo->real_to_complex = NULL; + + av_rdft_end(atempo->complex_to_real); + atempo->complex_to_real = NULL; +} + +/* av_realloc is not aligned enough; fortunately, the data does not need to + * be preserved */ +#define RE_MALLOC_OR_FAIL(field, field_size) \ + do { \ + av_freep(&field); \ + field = av_malloc(field_size); \ + if (!field) { \ + yae_release_buffers(atempo); \ + return AVERROR(ENOMEM); \ + } \ + } while (0) + +/** + * Prepare filter for processing audio data of given format, + * sample rate and number of channels. + */ +static int yae_reset(ATempoContext *atempo, + enum AVSampleFormat format, + int sample_rate, + int channels) +{ + const int sample_size = av_get_bytes_per_sample(format); + uint32_t nlevels = 0; + uint32_t pot; + int i; + + atempo->format = format; + atempo->channels = channels; + atempo->stride = sample_size * channels; + + // pick a segment window size: + atempo->window = sample_rate / 24; + + // adjust window size to be a power-of-two integer: + nlevels = av_log2(atempo->window); + pot = 1 << nlevels; + av_assert0(pot <= atempo->window); + + if (pot < atempo->window) { + atempo->window = pot * 2; + nlevels++; + } + + // initialize audio fragment buffers: + RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride); + RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride); + RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex)); + RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex)); + + // initialize rDFT contexts: + av_rdft_end(atempo->real_to_complex); + atempo->real_to_complex = NULL; + + av_rdft_end(atempo->complex_to_real); + atempo->complex_to_real = NULL; + + atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C); + if (!atempo->real_to_complex) { + yae_release_buffers(atempo); + return AVERROR(ENOMEM); + } + + atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R); + if (!atempo->complex_to_real) { + yae_release_buffers(atempo); + return AVERROR(ENOMEM); + } + + RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex)); + + atempo->ring = atempo->window * 3; + RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride); + + // initialize the Hann window function: + RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float)); + + for (i = 0; i < atempo->window; i++) { + double t = (double)i / (double)(atempo->window - 1); + double h = 0.5 * (1.0 - cos(2.0 * M_PI * t)); + atempo->hann[i] = (float)h; + } + + yae_clear(atempo); + return 0; +} + +static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo) +{ + ATempoContext *atempo = ctx->priv; + char *tail = NULL; + double tempo = av_strtod(arg_tempo, &tail); + + if (tail && *tail) { + av_log(ctx, AV_LOG_ERROR, "Invalid tempo value '%s'\n", arg_tempo); + return AVERROR(EINVAL); + } + + if (tempo < 0.5 || tempo > 2.0) { + av_log(ctx, AV_LOG_ERROR, "Tempo value %f exceeds [0.5, 2.0] range\n", + tempo); + return AVERROR(EINVAL); + } + + atempo->tempo = tempo; + return 0; +} + +inline static AudioFragment *yae_curr_frag(ATempoContext *atempo) +{ + return &atempo->frag[atempo->nfrag % 2]; +} + +inline static AudioFragment *yae_prev_frag(ATempoContext *atempo) +{ + return &atempo->frag[(atempo->nfrag + 1) % 2]; +} + +/** + * A helper macro for initializing complex data buffer with scalar data + * of a given type. + */ +#define yae_init_xdat(scalar_type, scalar_max) \ + do { \ + const uint8_t *src_end = src + \ + frag->nsamples * atempo->channels * sizeof(scalar_type); \ + \ + FFTSample *xdat = frag->xdat; \ + scalar_type tmp; \ + \ + if (atempo->channels == 1) { \ + for (; src < src_end; xdat++) { \ + tmp = *(const scalar_type *)src; \ + src += sizeof(scalar_type); \ + \ + *xdat = (FFTSample)tmp; \ + } \ + } else { \ + FFTSample s, max, ti, si; \ + int i; \ + \ + for (; src < src_end; xdat++) { \ + tmp = *(const scalar_type *)src; \ + src += sizeof(scalar_type); \ + \ + max = (FFTSample)tmp; \ + s = FFMIN((FFTSample)scalar_max, \ + (FFTSample)fabsf(max)); \ + \ + for (i = 1; i < atempo->channels; i++) { \ + tmp = *(const scalar_type *)src; \ + src += sizeof(scalar_type); \ + \ + ti = (FFTSample)tmp; \ + si = FFMIN((FFTSample)scalar_max, \ + (FFTSample)fabsf(ti)); \ + \ + if (s < si) { \ + s = si; \ + max = ti; \ + } \ + } \ + \ + *xdat = max; \ + } \ + } \ + } while (0) + +/** + * Initialize complex data buffer of a given audio fragment + * with down-mixed mono data of appropriate scalar type. + */ +static void yae_downmix(ATempoContext *atempo, AudioFragment *frag) +{ + // shortcuts: + const uint8_t *src = frag->data; + + // init complex data buffer used for FFT and Correlation: + memset(frag->xdat, 0, sizeof(FFTComplex) * atempo->window); + + if (atempo->format == AV_SAMPLE_FMT_U8) { + yae_init_xdat(uint8_t, 127); + } else if (atempo->format == AV_SAMPLE_FMT_S16) { + yae_init_xdat(int16_t, 32767); + } else if (atempo->format == AV_SAMPLE_FMT_S32) { + yae_init_xdat(int, 2147483647); + } else if (atempo->format == AV_SAMPLE_FMT_FLT) { + yae_init_xdat(float, 1); + } else if (atempo->format == AV_SAMPLE_FMT_DBL) { + yae_init_xdat(double, 1); + } +} + +/** + * Populate the internal data buffer on as-needed basis. + * + * @return + * 0 if requested data was already available or was successfully loaded, + * AVERROR(EAGAIN) if more input data is required. + */ +static int yae_load_data(ATempoContext *atempo, + const uint8_t **src_ref, + const uint8_t *src_end, + int64_t stop_here) +{ + // shortcut: + const uint8_t *src = *src_ref; + const int read_size = stop_here - atempo->position[0]; + + if (stop_here <= atempo->position[0]) { + return 0; + } + + // samples are not expected to be skipped: + av_assert0(read_size <= atempo->ring); + + while (atempo->position[0] < stop_here && src < src_end) { + int src_samples = (src_end - src) / atempo->stride; + + // load data piece-wise, in order to avoid complicating the logic: + int nsamples = FFMIN(read_size, src_samples); + int na; + int nb; + + nsamples = FFMIN(nsamples, atempo->ring); + na = FFMIN(nsamples, atempo->ring - atempo->tail); + nb = FFMIN(nsamples - na, atempo->ring); + + if (na) { + uint8_t *a = atempo->buffer + atempo->tail * atempo->stride; + memcpy(a, src, na * atempo->stride); + + src += na * atempo->stride; + atempo->position[0] += na; + + atempo->size = FFMIN(atempo->size + na, atempo->ring); + atempo->tail = (atempo->tail + na) % atempo->ring; + atempo->head = + atempo->size < atempo->ring ? + atempo->tail - atempo->size : + atempo->tail; + } + + if (nb) { + uint8_t *b = atempo->buffer; + memcpy(b, src, nb * atempo->stride); + + src += nb * atempo->stride; + atempo->position[0] += nb; + + atempo->size = FFMIN(atempo->size + nb, atempo->ring); + atempo->tail = (atempo->tail + nb) % atempo->ring; + atempo->head = + atempo->size < atempo->ring ? + atempo->tail - atempo->size : + atempo->tail; + } + } + + // pass back the updated source buffer pointer: + *src_ref = src; + + // sanity check: + av_assert0(atempo->position[0] <= stop_here); + + return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN); +} + +/** + * Populate current audio fragment data buffer. + * + * @return + * 0 when the fragment is ready, + * AVERROR(EAGAIN) if more input data is required. + */ +static int yae_load_frag(ATempoContext *atempo, + const uint8_t **src_ref, + const uint8_t *src_end) +{ + // shortcuts: + AudioFragment *frag = yae_curr_frag(atempo); + uint8_t *dst; + int64_t missing, start, zeros; + uint32_t nsamples; + const uint8_t *a, *b; + int i0, i1, n0, n1, na, nb; + + int64_t stop_here = frag->position[0] + atempo->window; + if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) { + return AVERROR(EAGAIN); + } + + // calculate the number of samples we don't have: + missing = + stop_here > atempo->position[0] ? + stop_here - atempo->position[0] : 0; + + nsamples = + missing < (int64_t)atempo->window ? + (uint32_t)(atempo->window - missing) : 0; + + // setup the output buffer: + frag->nsamples = nsamples; + dst = frag->data; + + start = atempo->position[0] - atempo->size; + zeros = 0; + + if (frag->position[0] < start) { + // what we don't have we substitute with zeros: + zeros = FFMIN(start - frag->position[0], (int64_t)nsamples); + av_assert0(zeros != nsamples); + + memset(dst, 0, zeros * atempo->stride); + dst += zeros * atempo->stride; + } + + if (zeros == nsamples) { + return 0; + } + + // get the remaining data from the ring buffer: + na = (atempo->head < atempo->tail ? + atempo->tail - atempo->head : + atempo->ring - atempo->head); + + nb = atempo->head < atempo->tail ? 0 : atempo->tail; + + // sanity check: + av_assert0(nsamples <= zeros + na + nb); + + a = atempo->buffer + atempo->head * atempo->stride; + b = atempo->buffer; + + i0 = frag->position[0] + zeros - start; + i1 = i0 < na ? 0 : i0 - na; + + n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0; + n1 = nsamples - zeros - n0; + + if (n0) { + memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride); + dst += n0 * atempo->stride; + } + + if (n1) { + memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride); + } + + return 0; +} + +/** + * Prepare for loading next audio fragment. + */ +static void yae_advance_to_next_frag(ATempoContext *atempo) +{ + const double fragment_step = atempo->tempo * (double)(atempo->window / 2); + + const AudioFragment *prev; + AudioFragment *frag; + + atempo->nfrag++; + prev = yae_prev_frag(atempo); + frag = yae_curr_frag(atempo); + + frag->position[0] = prev->position[0] + (int64_t)fragment_step; + frag->position[1] = prev->position[1] + atempo->window / 2; + frag->nsamples = 0; +} + +/** + * Calculate cross-correlation via rDFT. + * + * Multiply two vectors of complex numbers (result of real_to_complex rDFT) + * and transform back via complex_to_real rDFT. + */ +static void yae_xcorr_via_rdft(FFTSample *xcorr, + RDFTContext *complex_to_real, + const FFTComplex *xa, + const FFTComplex *xb, + const int window) +{ + FFTComplex *xc = (FFTComplex *)xcorr; + int i; + + // NOTE: first element requires special care -- Given Y = rDFT(X), + // Im(Y[0]) and Im(Y[N/2]) are always zero, therefore av_rdft_calc + // stores Re(Y[N/2]) in place of Im(Y[0]). + + xc->re = xa->re * xb->re; + xc->im = xa->im * xb->im; + xa++; + xb++; + xc++; + + for (i = 1; i < window; i++, xa++, xb++, xc++) { + xc->re = (xa->re * xb->re + xa->im * xb->im); + xc->im = (xa->im * xb->re - xa->re * xb->im); + } + + // apply inverse rDFT: + av_rdft_calc(complex_to_real, xcorr); +} + +/** + * Calculate alignment offset for given fragment + * relative to the previous fragment. + * + * @return alignment offset of current fragment relative to previous. + */ +static int yae_align(AudioFragment *frag, + const AudioFragment *prev, + const int window, + const int delta_max, + const int drift, + FFTSample *correlation, + RDFTContext *complex_to_real) +{ + int best_offset = -drift; + FFTSample best_metric = -FLT_MAX; + FFTSample *xcorr; + + int i0; + int i1; + int i; + + yae_xcorr_via_rdft(correlation, + complex_to_real, + (const FFTComplex *)prev->xdat, + (const FFTComplex *)frag->xdat, + window); + + // identify search window boundaries: + i0 = FFMAX(window / 2 - delta_max - drift, 0); + i0 = FFMIN(i0, window); + + i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16); + i1 = FFMAX(i1, 0); + + // identify cross-correlation peaks within search window: + xcorr = correlation + i0; + + for (i = i0; i < i1; i++, xcorr++) { + FFTSample metric = *xcorr; + + // normalize: + FFTSample drifti = (FFTSample)(drift + i); + metric *= drifti * (FFTSample)(i - i0) * (FFTSample)(i1 - i); + + if (metric > best_metric) { + best_metric = metric; + best_offset = i - window / 2; + } + } + + return best_offset; +} + +/** + * Adjust current fragment position for better alignment + * with previous fragment. + * + * @return alignment correction. + */ +static int yae_adjust_position(ATempoContext *atempo) +{ + const AudioFragment *prev = yae_prev_frag(atempo); + AudioFragment *frag = yae_curr_frag(atempo); + + const int delta_max = atempo->window / 2; + const int correction = yae_align(frag, + prev, + atempo->window, + delta_max, + atempo->drift, + atempo->correlation, + atempo->complex_to_real); + + if (correction) { + // adjust fragment position: + frag->position[0] -= correction; + + // clear so that the fragment can be reloaded: + frag->nsamples = 0; + + // update cumulative correction drift counter: + atempo->drift += correction; + } + + return correction; +} + +/** + * A helper macro for blending the overlap region of previous + * and current audio fragment. + */ +#define yae_blend(scalar_type) \ + do { \ + const scalar_type *aaa = (const scalar_type *)a; \ + const scalar_type *bbb = (const scalar_type *)b; \ + \ + scalar_type *out = (scalar_type *)dst; \ + scalar_type *out_end = (scalar_type *)dst_end; \ + int64_t i; \ + \ + for (i = 0; i < overlap && out < out_end; \ + i++, atempo->position[1]++, wa++, wb++) { \ + float w0 = *wa; \ + float w1 = *wb; \ + int j; \ + \ + for (j = 0; j < atempo->channels; \ + j++, aaa++, bbb++, out++) { \ + float t0 = (float)*aaa; \ + float t1 = (float)*bbb; \ + \ + *out = \ + frag->position[0] + i < 0 ? \ + *aaa : \ + (scalar_type)(t0 * w0 + t1 * w1); \ + } \ + } \ + dst = (uint8_t *)out; \ + } while (0) + +/** + * Blend the overlap region of previous and current audio fragment + * and output the results to the given destination buffer. + * + * @return + * 0 if the overlap region was completely stored in the dst buffer, + * AVERROR(EAGAIN) if more destination buffer space is required. + */ +static int yae_overlap_add(ATempoContext *atempo, + uint8_t **dst_ref, + uint8_t *dst_end) +{ + // shortcuts: + const AudioFragment *prev = yae_prev_frag(atempo); + const AudioFragment *frag = yae_curr_frag(atempo); + + const int64_t start_here = FFMAX(atempo->position[1], + frag->position[1]); + + const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples, + frag->position[1] + frag->nsamples); + + const int64_t overlap = stop_here - start_here; + + const int64_t ia = start_here - prev->position[1]; + const int64_t ib = start_here - frag->position[1]; + + const float *wa = atempo->hann + ia; + const float *wb = atempo->hann + ib; + + const uint8_t *a = prev->data + ia * atempo->stride; + const uint8_t *b = frag->data + ib * atempo->stride; + + uint8_t *dst = *dst_ref; + + av_assert0(start_here <= stop_here && + frag->position[1] <= start_here && + overlap <= frag->nsamples); + + if (atempo->format == AV_SAMPLE_FMT_U8) { + yae_blend(uint8_t); + } else if (atempo->format == AV_SAMPLE_FMT_S16) { + yae_blend(int16_t); + } else if (atempo->format == AV_SAMPLE_FMT_S32) { + yae_blend(int); + } else if (atempo->format == AV_SAMPLE_FMT_FLT) { + yae_blend(float); + } else if (atempo->format == AV_SAMPLE_FMT_DBL) { + yae_blend(double); + } + + // pass-back the updated destination buffer pointer: + *dst_ref = dst; + + return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN); +} + +/** + * Feed as much data to the filter as it is able to consume + * and receive as much processed data in the destination buffer + * as it is able to produce or store. + */ +static void +yae_apply(ATempoContext *atempo, + const uint8_t **src_ref, + const uint8_t *src_end, + uint8_t **dst_ref, + uint8_t *dst_end) +{ + while (1) { + if (atempo->state == YAE_LOAD_FRAGMENT) { + // load additional data for the current fragment: + if (yae_load_frag(atempo, src_ref, src_end) != 0) { + break; + } + + // down-mix to mono: + yae_downmix(atempo, yae_curr_frag(atempo)); + + // apply rDFT: + av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat); + + // must load the second fragment before alignment can start: + if (!atempo->nfrag) { + yae_advance_to_next_frag(atempo); + continue; + } + + atempo->state = YAE_ADJUST_POSITION; + } + + if (atempo->state == YAE_ADJUST_POSITION) { + // adjust position for better alignment: + if (yae_adjust_position(atempo)) { + // reload the fragment at the corrected position, so that the + // Hann window blending would not require normalization: + atempo->state = YAE_RELOAD_FRAGMENT; + } else { + atempo->state = YAE_OUTPUT_OVERLAP_ADD; + } + } + + if (atempo->state == YAE_RELOAD_FRAGMENT) { + // load additional data if necessary due to position adjustment: + if (yae_load_frag(atempo, src_ref, src_end) != 0) { + break; + } + + // down-mix to mono: + yae_downmix(atempo, yae_curr_frag(atempo)); + + // apply rDFT: + av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat); + + atempo->state = YAE_OUTPUT_OVERLAP_ADD; + } + + if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) { + // overlap-add and output the result: + if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) { + break; + } + + // advance to the next fragment, repeat: + yae_advance_to_next_frag(atempo); + atempo->state = YAE_LOAD_FRAGMENT; + } + } +} + +/** + * Flush any buffered data from the filter. + * + * @return + * 0 if all data was completely stored in the dst buffer, + * AVERROR(EAGAIN) if more destination buffer space is required. + */ +static int yae_flush(ATempoContext *atempo, + uint8_t **dst_ref, + uint8_t *dst_end) +{ + AudioFragment *frag = yae_curr_frag(atempo); + int64_t overlap_end; + int64_t start_here; + int64_t stop_here; + int64_t offset; + + const uint8_t *src; + uint8_t *dst; + + int src_size; + int dst_size; + int nbytes; + + atempo->state = YAE_FLUSH_OUTPUT; + + if (atempo->position[0] == frag->position[0] + frag->nsamples && + atempo->position[1] == frag->position[1] + frag->nsamples) { + // the current fragment is already flushed: + return 0; + } + + if (frag->position[0] + frag->nsamples < atempo->position[0]) { + // finish loading the current (possibly partial) fragment: + yae_load_frag(atempo, NULL, NULL); + + if (atempo->nfrag) { + // down-mix to mono: + yae_downmix(atempo, frag); + + // apply rDFT: + av_rdft_calc(atempo->real_to_complex, frag->xdat); + + // align current fragment to previous fragment: + if (yae_adjust_position(atempo)) { + // reload the current fragment due to adjusted position: + yae_load_frag(atempo, NULL, NULL); + } + } + } + + // flush the overlap region: + overlap_end = frag->position[1] + FFMIN(atempo->window / 2, + frag->nsamples); + + while (atempo->position[1] < overlap_end) { + if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) { + return AVERROR(EAGAIN); + } + } + + // flush the remaininder of the current fragment: + start_here = FFMAX(atempo->position[1], overlap_end); + stop_here = frag->position[1] + frag->nsamples; + offset = start_here - frag->position[1]; + av_assert0(start_here <= stop_here && frag->position[1] <= start_here); + + src = frag->data + offset * atempo->stride; + dst = (uint8_t *)*dst_ref; + + src_size = (int)(stop_here - start_here) * atempo->stride; + dst_size = dst_end - dst; + nbytes = FFMIN(src_size, dst_size); + + memcpy(dst, src, nbytes); + dst += nbytes; + + atempo->position[1] += (nbytes / atempo->stride); + + // pass-back the updated destination buffer pointer: + *dst_ref = (uint8_t *)dst; + + return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ATempoContext *atempo = ctx->priv; + + // NOTE: this assumes that the caller has memset ctx->priv to 0: + atempo->format = AV_SAMPLE_FMT_NONE; + atempo->tempo = 1.0; + atempo->state = YAE_LOAD_FRAGMENT; + + return args ? yae_set_tempo(ctx, args) : 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ATempoContext *atempo = ctx->priv; + yae_release_buffers(atempo); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterChannelLayouts *layouts = NULL; + AVFilterFormats *formats = NULL; + + // WSOLA necessitates an internal sliding window ring buffer + // for incoming audio stream. + // + // Planar sample formats are too cumbersome to store in a ring buffer, + // therefore planar sample formats are not supported. + // + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_U8, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S32, + AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) { + return AVERROR(ENOMEM); + } + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) { + return AVERROR(ENOMEM); + } + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) { + return AVERROR(ENOMEM); + } + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + ATempoContext *atempo = ctx->priv; + + enum AVSampleFormat format = inlink->format; + int sample_rate = (int)inlink->sample_rate; + int channels = av_get_channel_layout_nb_channels(inlink->channel_layout); + + return yae_reset(atempo, format, sample_rate, channels); +} + +static void push_samples(ATempoContext *atempo, + AVFilterLink *outlink, + int n_out) +{ + atempo->dst_buffer->audio->sample_rate = outlink->sample_rate; + atempo->dst_buffer->audio->nb_samples = n_out; + + // adjust the PTS: + atempo->dst_buffer->pts = + av_rescale_q(atempo->nsamples_out, + (AVRational){ 1, outlink->sample_rate }, + outlink->time_base); + + ff_filter_frame(outlink, atempo->dst_buffer); + atempo->dst_buffer = NULL; + atempo->dst = NULL; + atempo->dst_end = NULL; + + atempo->nsamples_out += n_out; +} + +static int filter_frame(AVFilterLink *inlink, + AVFilterBufferRef *src_buffer) +{ + AVFilterContext *ctx = inlink->dst; + ATempoContext *atempo = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + + int n_in = src_buffer->audio->nb_samples; + int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo); + + const uint8_t *src = src_buffer->data[0]; + const uint8_t *src_end = src + n_in * atempo->stride; + + while (src < src_end) { + if (!atempo->dst_buffer) { + atempo->dst_buffer = ff_get_audio_buffer(outlink, + AV_PERM_WRITE, + n_out); + avfilter_copy_buffer_ref_props(atempo->dst_buffer, src_buffer); + + atempo->dst = atempo->dst_buffer->data[0]; + atempo->dst_end = atempo->dst + n_out * atempo->stride; + } + + yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end); + + if (atempo->dst == atempo->dst_end) { + push_samples(atempo, outlink, n_out); + atempo->request_fulfilled = 1; + } + } + + atempo->nsamples_in += n_in; + avfilter_unref_bufferp(&src_buffer); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ATempoContext *atempo = ctx->priv; + int ret; + + atempo->request_fulfilled = 0; + do { + ret = ff_request_frame(ctx->inputs[0]); + } + while (!atempo->request_fulfilled && ret >= 0); + + if (ret == AVERROR_EOF) { + // flush the filter: + int n_max = atempo->ring; + int n_out; + int err = AVERROR(EAGAIN); + + while (err == AVERROR(EAGAIN)) { + if (!atempo->dst_buffer) { + atempo->dst_buffer = ff_get_audio_buffer(outlink, + AV_PERM_WRITE, + n_max); + + atempo->dst = atempo->dst_buffer->data[0]; + atempo->dst_end = atempo->dst + n_max * atempo->stride; + } + + err = yae_flush(atempo, &atempo->dst, atempo->dst_end); + + n_out = ((atempo->dst - atempo->dst_buffer->data[0]) / + atempo->stride); + + if (n_out) { + push_samples(atempo, outlink, n_out); + } + } + + avfilter_unref_bufferp(&atempo->dst_buffer); + atempo->dst = NULL; + atempo->dst_end = NULL; + + return AVERROR_EOF; + } + + return ret; +} + +static int process_command(AVFilterContext *ctx, + const char *cmd, + const char *arg, + char *res, + int res_len, + int flags) +{ + return !strcmp(cmd, "tempo") ? yae_set_tempo(ctx, arg) : AVERROR(ENOSYS); +} + +static const AVFilterPad atempo_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .config_props = config_props, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad atempo_outputs[] = { + { + .name = "default", + .request_frame = request_frame, + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_atempo = { + .name = "atempo", + .description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .process_command = process_command, + .priv_size = sizeof(ATempoContext), + .inputs = atempo_inputs, + .outputs = atempo_outputs, +}; diff --git a/libavfilter/af_biquads.c b/libavfilter/af_biquads.c new file mode 100644 index 0000000..123d7a2 --- /dev/null +++ b/libavfilter/af_biquads.c @@ -0,0 +1,599 @@ +/* + * Copyright (c) 2013 Paul B Mahol + * Copyright (c) 2006-2008 Rob Sykes <robs@users.sourceforge.net> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * 2-pole filters designed by Robert Bristow-Johnson <rbj@audioimagination.com> + * see http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt + * + * 1-pole filters based on code (c) 2000 Chris Bagwell <cbagwell@sprynet.com> + * Algorithms: Recursive single pole low/high pass filter + * Reference: The Scientist and Engineer's Guide to Digital Signal Processing + * + * low-pass: output[N] = input[N] * A + output[N-1] * B + * X = exp(-2.0 * pi * Fc) + * A = 1 - X + * B = X + * Fc = cutoff freq / sample rate + * + * Mimics an RC low-pass filter: + * + * ---/\/\/\/\-----------> + * | + * --- C + * --- + * | + * | + * V + * + * high-pass: output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1] + * X = exp(-2.0 * pi * Fc) + * A0 = (1 + X) / 2 + * A1 = -(1 + X) / 2 + * B1 = X + * Fc = cutoff freq / sample rate + * + * Mimics an RC high-pass filter: + * + * || C + * ----||---------> + * || | + * < + * > R + * < + * | + * V + */ + +#include "libavutil/opt.h" +#include "libavutil/avassert.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +enum FilterType { + biquad, + equalizer, + bass, + treble, + band, + bandpass, + bandreject, + allpass, + highpass, + lowpass, +}; + +enum WidthType { + NONE, + HZ, + OCTAVE, + QFACTOR, + SLOPE, +}; + +typedef struct ChanCache { + double i1, i2; + double o1, o2; +} ChanCache; + +typedef struct { + const AVClass *class; + + enum FilterType filter_type; + enum WidthType width_type; + int poles; + int csg; + + double gain; + double frequency; + double width; + + double a0, a1, a2; + double b0, b1, b2; + + ChanCache *cache; + + void (*filter)(const void *ibuf, void *obuf, int len, + double *i1, double *i2, double *o1, double *o2, + double b0, double b1, double b2, double a1, double a2); +} BiquadsContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + BiquadsContext *p = ctx->priv; + int ret; + + av_opt_set_defaults(p); + + if ((ret = av_set_options_string(p, args, "=", ":")) < 0) + return ret; + + if (p->filter_type != biquad) { + if (p->frequency <= 0 || p->width <= 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frequency %f and/or width %f <= 0\n", + p->frequency, p->width); + return AVERROR(EINVAL); + } + } + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +#define BIQUAD_FILTER(name, type, min, max) \ +static void biquad_## name (const void *input, void *output, int len, \ + double *in1, double *in2, \ + double *out1, double *out2, \ + double b0, double b1, double b2, \ + double a1, double a2) \ +{ \ + const type *ibuf = input; \ + type *obuf = output; \ + double i1 = *in1; \ + double i2 = *in2; \ + double o1 = *out1; \ + double o2 = *out2; \ + int i; \ + \ + for (i = 0; i < len; i++) { \ + double o0 = ibuf[i] * b0 + i1 * b1 + i2 * b2 - o1 * a1 - o2 * a2; \ + i2 = i1; \ + i1 = ibuf[i]; \ + o2 = o1; \ + o1 = o0; \ + if (o0 < min) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = min; \ + } else if (o0 > max) { \ + av_log(NULL, AV_LOG_WARNING, "clipping\n"); \ + obuf[i] = max; \ + } else { \ + obuf[i] = o0; \ + } \ + } \ + *in1 = i1; \ + *in2 = i2; \ + *out1 = o1; \ + *out2 = o2; \ +} + +BIQUAD_FILTER(s16, int16_t, INT16_MIN, INT16_MAX) +BIQUAD_FILTER(s32, int32_t, INT32_MIN, INT32_MAX) +BIQUAD_FILTER(flt, float, -1., 1.) +BIQUAD_FILTER(dbl, double, -1., 1.) + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + BiquadsContext *p = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + double A = exp(p->gain / 40 * log(10.)); + double w0 = 2 * M_PI * p->frequency / inlink->sample_rate; + double alpha; + + if (w0 > M_PI) { + av_log(ctx, AV_LOG_ERROR, + "Invalid frequency %f. Frequency must be less than half the sample-rate %d.\n", + p->frequency, inlink->sample_rate); + return AVERROR(EINVAL); + } + + switch (p->width_type) { + case NONE: + alpha = 0.0; + break; + case HZ: + alpha = sin(w0) / (2 * p->frequency / p->width); + break; + case OCTAVE: + alpha = sin(w0) * sinh(log(2.) / 2 * p->width * w0 / sin(w0)); + break; + case QFACTOR: + alpha = sin(w0) / (2 * p->width); + break; + case SLOPE: + alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / p->width - 1) + 2); + break; + default: + av_assert0(0); + } + + switch (p->filter_type) { + case biquad: + break; + case equalizer: + p->a0 = 1 + alpha / A; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha / A; + p->b0 = 1 + alpha * A; + p->b1 = -2 * cos(w0); + p->b2 = 1 - alpha * A; + break; + case bass: + p->a0 = (A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha; + p->a1 = -2 * ((A - 1) + (A + 1) * cos(w0)); + p->a2 = (A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha; + p->b0 = A * ((A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha); + p->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0)); + p->b2 = A * ((A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha); + break; + case treble: + p->a0 = (A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha; + p->a1 = 2 * ((A - 1) - (A + 1) * cos(w0)); + p->a2 = (A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha; + p->b0 = A * ((A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha); + p->b1 =-2 * A * ((A - 1) + (A + 1) * cos(w0)); + p->b2 = A * ((A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha); + break; + case bandpass: + if (p->csg) { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = sin(w0) / 2; + p->b1 = 0; + p->b2 = -sin(w0) / 2; + } else { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = alpha; + p->b1 = 0; + p->b2 = -alpha; + } + break; + case bandreject: + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = 1; + p->b1 = -2 * cos(w0); + p->b2 = 1; + break; + case lowpass: + if (p->poles == 1) { + p->a0 = 1; + p->a1 = -exp(-w0); + p->a2 = 0; + p->b0 = 1 + p->a1; + p->b1 = 0; + p->b2 = 0; + } else { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = (1 - cos(w0)) / 2; + p->b1 = 1 - cos(w0); + p->b2 = (1 - cos(w0)) / 2; + } + break; + case highpass: + if (p->poles == 1) { + p->a0 = 1; + p->a1 = -exp(-w0); + p->a2 = 0; + p->b0 = (1 - p->a1) / 2; + p->b1 = -p->b0; + p->b2 = 0; + } else { + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = (1 + cos(w0)) / 2; + p->b1 = -(1 + cos(w0)); + p->b2 = (1 + cos(w0)) / 2; + } + break; + case allpass: + p->a0 = 1 + alpha; + p->a1 = -2 * cos(w0); + p->a2 = 1 - alpha; + p->b0 = 1 - alpha; + p->b1 = -2 * cos(w0); + p->b2 = 1 + alpha; + break; + default: + av_assert0(0); + } + + p->a1 /= p->a0; + p->a2 /= p->a0; + p->b0 /= p->a0; + p->b1 /= p->a0; + p->b2 /= p->a0; + + p->cache = av_realloc_f(p->cache, sizeof(ChanCache), inlink->channels); + if (!p->cache) + return AVERROR(ENOMEM); + + switch (inlink->format) { + case AV_SAMPLE_FMT_S16P: p->filter = biquad_s16; break; + case AV_SAMPLE_FMT_S32P: p->filter = biquad_s32; break; + case AV_SAMPLE_FMT_FLTP: p->filter = biquad_flt; break; + case AV_SAMPLE_FMT_DBLP: p->filter = biquad_dbl; break; + default: av_assert0(0); + } + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) +{ + BiquadsContext *p = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *out_buf; + int nb_samples = buf->audio->nb_samples; + int ch; + + if (buf->perms & AV_PERM_WRITE) { + out_buf = buf; + } else { + out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); + if (!out_buf) + return AVERROR(ENOMEM); + out_buf->pts = buf->pts; + } + + for (ch = 0; ch < buf->audio->channels; ch++) + p->filter(buf->extended_data[ch], + out_buf->extended_data[ch], nb_samples, + &p->cache[ch].i1, &p->cache[ch].i2, + &p->cache[ch].o1, &p->cache[ch].o2, + p->b0, p->b1, p->b2, p->a1, p->a2); + + if (buf != out_buf) + avfilter_unref_buffer(buf); + + return ff_filter_frame(outlink, out_buf); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + BiquadsContext *p = ctx->priv; + + av_freep(&p->cache); + av_opt_free(p); +} + +static const AVFilterPad inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output, + }, + { NULL } +}; + +#define OFFSET(x) offsetof(BiquadsContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +#define DEFINE_BIQUAD_FILTER(name_, description_) \ +AVFILTER_DEFINE_CLASS(name_); \ +static av_cold int name_##_init(AVFilterContext *ctx, const char *args) \ +{ \ + BiquadsContext *p = ctx->priv; \ + p->class = &name_##_class; \ + p->filter_type = name_; \ + return init(ctx, args); \ +} \ + \ +AVFilter avfilter_af_##name_ = { \ + .name = #name_, \ + .description = NULL_IF_CONFIG_SMALL(description_), \ + .priv_size = sizeof(BiquadsContext), \ + .init = name_##_init, \ + .uninit = uninit, \ + .query_formats = query_formats, \ + .inputs = inputs, \ + .outputs = outputs, \ + .priv_class = &name_##_class, \ +} + +#if CONFIG_EQUALIZER_FILTER +static const AVOption equalizer_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS}, + {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS}, + {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter."); +#endif /* CONFIG_EQUALIZER_FILTER */ +#if CONFIG_BASS_FILTER +static const AVOption bass_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies."); +#endif /* CONFIG_BASS_FILTER */ +#if CONFIG_TREBLE_FILTER +static const AVOption treble_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS}, + {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies."); +#endif /* CONFIG_TREBLE_FILTER */ +#if CONFIG_BANDPASS_FILTER +static const AVOption bandpass_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter."); +#endif /* CONFIG_BANDPASS_FILTER */ +#if CONFIG_BANDREJECT_FILTER +static const AVOption bandreject_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter."); +#endif /* CONFIG_BANDREJECT_FILTER */ +#if CONFIG_LOWPASS_FILTER +static const AVOption lowpass_options[] = { + {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS}, + {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency."); +#endif /* CONFIG_LOWPASS_FILTER */ +#if CONFIG_HIGHPASS_FILTER +static const AVOption highpass_options[] = { + {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS}, + {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency."); +#endif /* CONFIG_HIGHPASS_FILTER */ +#if CONFIG_ALLPASS_FILTER +static const AVOption allpass_options[] = { + {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS}, + {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HZ}, HZ, SLOPE, FLAGS, "width_type"}, + {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HZ}, 0, 0, FLAGS, "width_type"}, + {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"}, + {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"}, + {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"}, + {"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS}, + {"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter."); +#endif /* CONFIG_ALLPASS_FILTER */ +#if CONFIG_BIQUAD_FILTER +static const AVOption biquad_options[] = { + {"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MAX, INT16_MAX, FLAGS}, + {NULL}, +}; + +DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients."); +#endif /* CONFIG_BIQUAD_FILTER */ diff --git a/libavfilter/af_channelmap.c b/libavfilter/af_channelmap.c index 8b72d5b..6fe8704 100644 --- a/libavfilter/af_channelmap.c +++ b/libavfilter/af_channelmap.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2012 Google, Inc. * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -68,11 +68,12 @@ typedef struct ChannelMapContext { #define OFFSET(x) offsetof(ChannelMapContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM static const AVOption options[] = { { "map", "A comma-separated list of input channel numbers in output order.", - OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A }, + OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F }, { "channel_layout", "Output channel layout.", - OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A }, + OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F }, { NULL }, }; @@ -138,10 +139,8 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args) s->class = &channelmap_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) return ret; - } mapping = s->mapping_str; @@ -389,6 +388,7 @@ static const AVFilterPad avfilter_af_channelmap_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, + .min_perms = AV_PERM_READ | AV_PERM_WRITE, .filter_frame = channelmap_filter_frame, .config_props = channelmap_config_input }, @@ -412,4 +412,5 @@ AVFilter avfilter_af_channelmap = { .inputs = avfilter_af_channelmap_inputs, .outputs = avfilter_af_channelmap_outputs, + .priv_class = &channelmap_class, }; diff --git a/libavfilter/af_channelsplit.c b/libavfilter/af_channelsplit.c index cc379f3..9ca9dad 100644 --- a/libavfilter/af_channelsplit.c +++ b/libavfilter/af_channelsplit.c @@ -41,17 +41,13 @@ typedef struct ChannelSplitContext { #define OFFSET(x) offsetof(ChannelSplitContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM -static const AVOption options[] = { - { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A }, +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption channelsplit_options[] = { + { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F }, { NULL }, }; -static const AVClass channelsplit_class = { - .class_name = "channelsplit filter", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(channelsplit); static int init(AVFilterContext *ctx, const char *arg) { @@ -61,10 +57,8 @@ static int init(AVFilterContext *ctx, const char *arg) s->class = &channelsplit_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, arg, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", arg); + if ((ret = av_set_options_string(s, arg, "=", ":")) < 0) return ret; - } if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) { av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n", s->channel_layout_str); @@ -155,4 +149,5 @@ AVFilter avfilter_af_channelsplit = { .inputs = avfilter_af_channelsplit_inputs, .outputs = NULL, + .priv_class = &channelsplit_class, }; diff --git a/libavfilter/af_earwax.c b/libavfilter/af_earwax.c new file mode 100644 index 0000000..a169d2a --- /dev/null +++ b/libavfilter/af_earwax.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2011 Mina Nagy Zaki + * Copyright (c) 2000 Edward Beingessner And Sundry Contributors. + * This source code is freely redistributable and may be used for any purpose. + * This copyright notice must be maintained. Edward Beingessner And Sundry + * Contributors are not responsible for the consequences of using this + * software. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Stereo Widening Effect. Adds audio cues to move stereo image in + * front of the listener. Adapted from the libsox earwax effect. + */ + +#include "libavutil/channel_layout.h" +#include "avfilter.h" +#include "audio.h" +#include "formats.h" + +#define NUMTAPS 64 + +static const int8_t filt[NUMTAPS] = { +/* 30° 330° */ + 4, -6, /* 32 tap stereo FIR filter. */ + 4, -11, /* One side filters as if the */ + -1, -5, /* signal was from 30 degrees */ + 3, 3, /* from the ear, the other as */ + -2, 5, /* if 330 degrees. */ + -5, 0, + 9, 1, + 6, 3, /* Input */ + -4, -1, /* Left Right */ + -5, -3, /* __________ __________ */ + -2, -5, /* | | | | */ + -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */ + 6, -7, /* / |__________| |__________| \ */ + 30, -29, /* / \ / \ */ + 12, -3, /* / X \ */ + -11, 4, /* / / \ \ */ + -3, 7, /* ____V_____ __________V V__________ _____V____ */ + -20, 23, /* | | | | | | | | */ + 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */ + 1, -6, /* |__________| |__________| |__________| |__________| */ + -14, -5, /* \ ___ / \ ___ / */ + 15, -18, /* \ / \ / _____ \ / \ / */ + 6, 7, /* `->| + |<--' / \ `-->| + |<-' */ + 15, -10, /* \___/ _/ \_ \___/ */ + -14, 22, /* \ / \ / \ / */ + -7, -2, /* `--->| | | |<---' */ + -4, 9, /* \_/ \_/ */ + 6, -12, /* */ + 6, -6, /* Headphones */ + 0, -11, + 0, -5, + 4, 0}; + +typedef struct { + int16_t taps[NUMTAPS * 2]; +} EarwaxContext; + +static int query_formats(AVFilterContext *ctx) +{ + static const int sample_rates[] = { 44100, -1 }; + + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layout = NULL; + + ff_add_format(&formats, AV_SAMPLE_FMT_S16); + ff_set_common_formats(ctx, formats); + ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO); + ff_set_common_channel_layouts(ctx, layout); + ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates)); + + return 0; +} + +//FIXME: replace with DSPContext.scalarproduct_int16 +static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out) +{ + int32_t sample; + int16_t j; + + while (in < endin) { + sample = 32; + for (j = 0; j < NUMTAPS; j++) + sample += in[j] * filt[j]; + *out = sample >> 6; + out++; + in++; + } + + return out; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + AVFilterLink *outlink = inlink->dst->outputs[0]; + int16_t *taps, *endin, *in, *out; + AVFilterBufferRef *outsamples = + ff_get_audio_buffer(inlink, AV_PERM_WRITE, + insamples->audio->nb_samples); + int ret; + + if (!outsamples) + return AVERROR(ENOMEM); + avfilter_copy_buffer_ref_props(outsamples, insamples); + + taps = ((EarwaxContext *)inlink->dst->priv)->taps; + out = (int16_t *)outsamples->data[0]; + in = (int16_t *)insamples ->data[0]; + + // copy part of new input and process with saved input + memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps)); + out = scalarproduct(taps, taps + NUMTAPS, out); + + // process current input + endin = in + insamples->audio->nb_samples * 2 - NUMTAPS; + scalarproduct(in, endin, out); + + // save part of input for next round + memcpy(taps, endin, NUMTAPS * sizeof(*taps)); + + ret = ff_filter_frame(outlink, outsamples); + avfilter_unref_buffer(insamples); + return ret; +} + +static const AVFilterPad earwax_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad earwax_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_earwax = { + .name = "earwax", + .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."), + .query_formats = query_formats, + .priv_size = sizeof(EarwaxContext), + .inputs = earwax_inputs, + .outputs = earwax_outputs, +}; diff --git a/libavfilter/af_join.c b/libavfilter/af_join.c index 2b715d5..864663b 100644 --- a/libavfilter/af_join.c +++ b/libavfilter/af_join.c @@ -76,13 +76,14 @@ typedef struct JoinBufferPriv { #define OFFSET(x) offsetof(JoinContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM static const AVOption join_options[] = { - { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A }, + { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F }, { "channel_layout", "Channel layout of the " - "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A }, + "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F }, { "map", "A comma-separated list of channels maps in the format " "'input_stream.input_channel-output_channel.", - OFFSET(map), AV_OPT_TYPE_STRING, .flags = A }, + OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F }, { NULL }, }; @@ -194,10 +195,8 @@ static int join_init(AVFilterContext *ctx, const char *args) s->class = &join_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) return ret; - } if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) { av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n", @@ -507,4 +506,5 @@ AVFilter avfilter_af_join = { .inputs = NULL, .outputs = avfilter_af_join_outputs, + .priv_class = &join_class, }; diff --git a/libavfilter/af_pan.c b/libavfilter/af_pan.c new file mode 100644 index 0000000..77ca549 --- /dev/null +++ b/libavfilter/af_pan.c @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au> + * Copyright (c) 2011 Clément Bœsch <ubitux@gmail.com> + * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio panning filter (channels mixing) + * Original code written by Anders Johansson for MPlayer, + * reimplemented for FFmpeg. + */ + +#include <stdio.h> +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libswresample/swresample.h" +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +#define MAX_CHANNELS 63 + +typedef struct PanContext { + int64_t out_channel_layout; + double gain[MAX_CHANNELS][MAX_CHANNELS]; + int64_t need_renorm; + int need_renumber; + int nb_input_channels; + int nb_output_channels; + + int pure_gains; + /* channel mapping specific */ + int channel_map[SWR_CH_MAX]; + struct SwrContext *swr; +} PanContext; + +static int parse_channel_name(char **arg, int *rchannel, int *rnamed) +{ + char buf[8]; + int len, i, channel_id = 0; + int64_t layout, layout0; + + /* try to parse a channel name, e.g. "FL" */ + if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) { + layout0 = layout = av_get_channel_layout(buf); + /* channel_id <- first set bit in layout */ + for (i = 32; i > 0; i >>= 1) { + if (layout >= (int64_t)1 << i) { + channel_id += i; + layout >>= i; + } + } + /* reject layouts that are not a single channel */ + if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id) + return AVERROR(EINVAL); + *rchannel = channel_id; + *rnamed = 1; + *arg += len; + return 0; + } + /* try to parse a channel number, e.g. "c2" */ + if (sscanf(*arg, "c%d%n", &channel_id, &len) && + channel_id >= 0 && channel_id < MAX_CHANNELS) { + *rchannel = channel_id; + *rnamed = 0; + *arg += len; + return 0; + } + return AVERROR(EINVAL); +} + +static void skip_spaces(char **arg) +{ + int len = 0; + + sscanf(*arg, " %n", &len); + *arg += len; +} + +static av_cold int init(AVFilterContext *ctx, const char *args0) +{ + PanContext *const pan = ctx->priv; + char *arg, *arg0, *tokenizer, *args = av_strdup(args0); + int out_ch_id, in_ch_id, len, named, ret; + int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels + double gain; + + if (!args0) { + av_log(ctx, AV_LOG_ERROR, + "pan filter needs a channel layout and a set " + "of channels definitions as parameter\n"); + return AVERROR(EINVAL); + } + if (!args) + return AVERROR(ENOMEM); + arg = av_strtok(args, ":", &tokenizer); + ret = ff_parse_channel_layout(&pan->out_channel_layout, arg, ctx); + if (ret < 0) + goto fail; + pan->nb_output_channels = av_get_channel_layout_nb_channels(pan->out_channel_layout); + + /* parse channel specifications */ + while ((arg = arg0 = av_strtok(NULL, ":", &tokenizer))) { + /* channel name */ + if (parse_channel_name(&arg, &out_ch_id, &named)) { + av_log(ctx, AV_LOG_ERROR, + "Expected out channel name, got \"%.8s\"\n", arg); + ret = AVERROR(EINVAL); + goto fail; + } + if (named) { + if (!((pan->out_channel_layout >> out_ch_id) & 1)) { + av_log(ctx, AV_LOG_ERROR, + "Channel \"%.8s\" does not exist in the chosen layout\n", arg0); + ret = AVERROR(EINVAL); + goto fail; + } + /* get the channel number in the output channel layout: + * out_channel_layout & ((1 << out_ch_id) - 1) are all the + * channels that come before out_ch_id, + * so their count is the index of out_ch_id */ + out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1)); + } + if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) { + av_log(ctx, AV_LOG_ERROR, + "Invalid out channel name \"%.8s\"\n", arg0); + ret = AVERROR(EINVAL); + goto fail; + } + skip_spaces(&arg); + if (*arg == '=') { + arg++; + } else if (*arg == '<') { + pan->need_renorm |= (int64_t)1 << out_ch_id; + arg++; + } else { + av_log(ctx, AV_LOG_ERROR, + "Syntax error after channel name in \"%.8s\"\n", arg0); + ret = AVERROR(EINVAL); + goto fail; + } + /* gains */ + while (1) { + gain = 1; + if (sscanf(arg, "%lf%n *%n", &gain, &len, &len)) + arg += len; + if (parse_channel_name(&arg, &in_ch_id, &named)){ + av_log(ctx, AV_LOG_ERROR, + "Expected in channel name, got \"%.8s\"\n", arg); + ret = AVERROR(EINVAL); + goto fail; + } + nb_in_channels[named]++; + if (nb_in_channels[!named]) { + av_log(ctx, AV_LOG_ERROR, + "Can not mix named and numbered channels\n"); + ret = AVERROR(EINVAL); + goto fail; + } + pan->gain[out_ch_id][in_ch_id] = gain; + skip_spaces(&arg); + if (!*arg) + break; + if (*arg != '+') { + av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg); + ret = AVERROR(EINVAL); + goto fail; + } + arg++; + } + } + pan->need_renumber = !!nb_in_channels[1]; + + ret = 0; +fail: + av_free(args); + return ret; +} + +static int are_gains_pure(const PanContext *pan) +{ + int i, j; + + for (i = 0; i < MAX_CHANNELS; i++) { + int nb_gain = 0; + + for (j = 0; j < MAX_CHANNELS; j++) { + double gain = pan->gain[i][j]; + + /* channel mapping is effective only if 0% or 100% of a channel is + * selected... */ + if (gain != 0. && gain != 1.) + return 0; + /* ...and if the output channel is only composed of one input */ + if (gain && nb_gain++) + return 0; + } + } + return 1; +} + +static int query_formats(AVFilterContext *ctx) +{ + PanContext *pan = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts; + + pan->pure_gains = are_gains_pure(pan); + /* libswr supports any sample and packing formats */ + ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO)); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + // inlink supports any channel layout + layouts = ff_all_channel_layouts(); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + // outlink supports only requested output channel layout + layouts = NULL; + ff_add_channel_layout(&layouts, pan->out_channel_layout); + ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts); + return 0; +} + +static int config_props(AVFilterLink *link) +{ + AVFilterContext *ctx = link->dst; + PanContext *pan = ctx->priv; + char buf[1024], *cur; + int i, j, k, r; + double t; + + pan->nb_input_channels = av_get_channel_layout_nb_channels(link->channel_layout); + if (pan->need_renumber) { + // input channels were given by their name: renumber them + for (i = j = 0; i < MAX_CHANNELS; i++) { + if ((link->channel_layout >> i) & 1) { + for (k = 0; k < pan->nb_output_channels; k++) + pan->gain[k][j] = pan->gain[k][i]; + j++; + } + } + } + + // sanity check; can't be done in query_formats since the inlink + // channel layout is unknown at that time + if (pan->nb_input_channels > SWR_CH_MAX || + pan->nb_output_channels > SWR_CH_MAX) { + av_log(ctx, AV_LOG_ERROR, + "libswresample support a maximum of %d channels. " + "Feel free to ask for a higher limit.\n", SWR_CH_MAX); + return AVERROR_PATCHWELCOME; + } + + // init libswresample context + pan->swr = swr_alloc_set_opts(pan->swr, + pan->out_channel_layout, link->format, link->sample_rate, + link->channel_layout, link->format, link->sample_rate, + 0, ctx); + if (!pan->swr) + return AVERROR(ENOMEM); + + // gains are pure, init the channel mapping + if (pan->pure_gains) { + + // get channel map from the pure gains + for (i = 0; i < pan->nb_output_channels; i++) { + int ch_id = -1; + for (j = 0; j < pan->nb_input_channels; j++) { + if (pan->gain[i][j]) { + ch_id = j; + break; + } + } + pan->channel_map[i] = ch_id; + } + + av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0); + av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0); + swr_set_channel_mapping(pan->swr, pan->channel_map); + } else { + // renormalize + for (i = 0; i < pan->nb_output_channels; i++) { + if (!((pan->need_renorm >> i) & 1)) + continue; + t = 0; + for (j = 0; j < pan->nb_input_channels; j++) + t += pan->gain[i][j]; + if (t > -1E-5 && t < 1E-5) { + // t is almost 0 but not exactly, this is probably a mistake + if (t) + av_log(ctx, AV_LOG_WARNING, + "Degenerate coefficients while renormalizing\n"); + continue; + } + for (j = 0; j < pan->nb_input_channels; j++) + pan->gain[i][j] /= t; + } + av_opt_set_int(pan->swr, "icl", link->channel_layout, 0); + av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0); + swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]); + } + + r = swr_init(pan->swr); + if (r < 0) + return r; + + // summary + for (i = 0; i < pan->nb_output_channels; i++) { + cur = buf; + for (j = 0; j < pan->nb_input_channels; j++) { + r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d", + j ? " + " : "", pan->gain[i][j], j); + cur += FFMIN(buf + sizeof(buf) - cur, r); + } + av_log(ctx, AV_LOG_VERBOSE, "o%d = %s\n", i, buf); + } + // add channel mapping summary if possible + if (pan->pure_gains) { + av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:"); + for (i = 0; i < pan->nb_output_channels; i++) + if (pan->channel_map[i] < 0) + av_log(ctx, AV_LOG_INFO, " M"); + else + av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]); + av_log(ctx, AV_LOG_INFO, "\n"); + return 0; + } + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + int ret; + int n = insamples->audio->nb_samples; + AVFilterLink *const outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); + PanContext *pan = inlink->dst->priv; + + swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n); + avfilter_copy_buffer_ref_props(outsamples, insamples); + outsamples->audio->channel_layout = outlink->channel_layout; + outsamples->audio->channels = outlink->channels; + + ret = ff_filter_frame(outlink, outsamples); + avfilter_unref_buffer(insamples); + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + PanContext *pan = ctx->priv; + swr_free(&pan->swr); +} + +static const AVFilterPad pan_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad pan_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_pan = { + .name = "pan", + .description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."), + .priv_size = sizeof(PanContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = pan_inputs, + .outputs = pan_outputs, +}; diff --git a/libavfilter/af_silencedetect.c b/libavfilter/af_silencedetect.c new file mode 100644 index 0000000..8a60176 --- /dev/null +++ b/libavfilter/af_silencedetect.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2012 Clément Bœsch <ubitux@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio silence detector + */ + +#include <float.h> /* DBL_MAX */ + +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/timestamp.h" +#include "audio.h" +#include "formats.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + double noise; ///< noise amplitude ratio + double duration; ///< minimum duration of silence until notification + int64_t nb_null_samples; ///< current number of continuous zero samples + int64_t start; ///< if silence is detected, this value contains the time of the first zero sample + int last_sample_rate; ///< last sample rate to check for sample rate changes +} SilenceDetectContext; + +#define OFFSET(x) offsetof(SilenceDetectContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM +static const AVOption silencedetect_options[] = { + { "n", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS }, + { "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS }, + { "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS }, + { "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(silencedetect); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + SilenceDetectContext *silence = ctx->priv; + + silence->class = &silencedetect_class; + av_opt_set_defaults(silence); + + if ((ret = av_set_options_string(silence, args, "=", ":")) < 0) + return ret; + + av_opt_free(silence); + + return 0; +} + +static char *get_metadata_val(AVFilterBufferRef *insamples, const char *key) +{ + AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0); + return e && e->value ? e->value : NULL; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + int i; + SilenceDetectContext *silence = inlink->dst->priv; + const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout); + const int srate = inlink->sample_rate; + const int nb_samples = insamples->audio->nb_samples * nb_channels; + const int64_t nb_samples_notify = srate * silence->duration * nb_channels; + + // scale number of null samples to the new sample rate + if (silence->last_sample_rate && silence->last_sample_rate != srate) + silence->nb_null_samples = + srate * silence->nb_null_samples / silence->last_sample_rate; + silence->last_sample_rate = srate; + + // TODO: support more sample formats + // TODO: document metadata + if (insamples->format == AV_SAMPLE_FMT_DBL) { + double *p = (double *)insamples->data[0]; + + for (i = 0; i < nb_samples; i++, p++) { + if (*p < silence->noise && *p > -silence->noise) { + if (!silence->start) { + silence->nb_null_samples++; + if (silence->nb_null_samples >= nb_samples_notify) { + silence->start = insamples->pts - (int64_t)(silence->duration / av_q2d(inlink->time_base) + .5); + av_dict_set(&insamples->metadata, "lavfi.silence_start", + av_ts2timestr(silence->start, &inlink->time_base), 0); + av_log(silence, AV_LOG_INFO, "silence_start: %s\n", + get_metadata_val(insamples, "lavfi.silence_start")); + } + } + } else { + if (silence->start) { + av_dict_set(&insamples->metadata, "lavfi.silence_end", + av_ts2timestr(insamples->pts, &inlink->time_base), 0); + av_dict_set(&insamples->metadata, "lavfi.silence_duration", + av_ts2timestr(insamples->pts - silence->start, &inlink->time_base), 0); + av_log(silence, AV_LOG_INFO, + "silence_end: %s | silence_duration: %s\n", + get_metadata_val(insamples, "lavfi.silence_end"), + get_metadata_val(insamples, "lavfi.silence_duration")); + } + silence->nb_null_samples = silence->start = 0; + } + } + } + + return ff_filter_frame(inlink->dst->outputs[0], insamples); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_NONE + }; + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, formats); + + return 0; +} + +static const AVFilterPad silencedetect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad silencedetect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_silencedetect = { + .name = "silencedetect", + .description = NULL_IF_CONFIG_SMALL("Detect silence."), + .priv_size = sizeof(SilenceDetectContext), + .init = init, + .query_formats = query_formats, + .inputs = silencedetect_inputs, + .outputs = silencedetect_outputs, + .priv_class = &silencedetect_class, +}; diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c index 3f3ad47..5ffa1fe 100644 --- a/libavfilter/af_volume.c +++ b/libavfilter/af_volume.c @@ -2,20 +2,20 @@ * Copyright (c) 2011 Stefano Sabatini * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -41,37 +41,32 @@ static const char *precision_str[] = { #define OFFSET(x) offsetof(VolumeContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM - -static const AVOption options[] = { - { "volume", "Volume adjustment.", - OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A }, - { "precision", "Mathematical precision.", - OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A, "precision" }, - { "fixed", "8-bit fixed-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A, "precision" }, - { "float", "32-bit floating-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A, "precision" }, - { "double", "64-bit floating-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A, "precision" }, +#define F AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption volume_options[] = { + { "volume", "set volume adjustment", + OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A|F }, + { "precision", "select mathematical precision", + OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" }, + { "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" }, + { "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" }, + { "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" }, { NULL }, }; -static const AVClass volume_class = { - .class_name = "volume filter", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(volume); static av_cold int init(AVFilterContext *ctx, const char *args) { VolumeContext *vol = ctx->priv; + static const char *shorthand[] = { "volume", "precision", NULL }; int ret; vol->class = &volume_class; av_opt_set_defaults(vol); - if ((ret = av_set_options_string(vol, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args); + if ((ret = av_opt_set_from_string(vol, args, shorthand, "=", ":")) < 0) return ret; - } if (vol->precision == PRECISION_FIXED) { vol->volume_i = (int)(vol->volume * 256 + 0.5); @@ -182,8 +177,6 @@ static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src, smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8)); } - - static void volume_init(VolumeContext *vol) { vol->samples_align = 1; @@ -314,4 +307,5 @@ AVFilter avfilter_af_volume = { .init = init, .inputs = avfilter_af_volume_inputs, .outputs = avfilter_af_volume_outputs, + .priv_class = &volume_class, }; diff --git a/libavfilter/af_volume.h b/libavfilter/af_volume.h index a1883ed..bd7932e 100644 --- a/libavfilter/af_volume.h +++ b/libavfilter/af_volume.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/af_volumedetect.c b/libavfilter/af_volumedetect.c new file mode 100644 index 0000000..39265c0 --- /dev/null +++ b/libavfilter/af_volumedetect.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/avassert.h" +#include "audio.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + /** + * Number of samples at each PCM value. + * histogram[0x8000 + i] is the number of samples at value i. + * The extra element is there for symmetry. + */ + uint64_t histogram[0x10001]; +} VolDetectContext; + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVSampleFormat sample_fmts[] = { + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_NONE + }; + AVFilterFormats *formats; + + if (!(formats = ff_make_format_list(sample_fmts))) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samples) +{ + AVFilterContext *ctx = inlink->dst; + VolDetectContext *vd = ctx->priv; + int64_t layout = samples->audio->channel_layout; + int nb_samples = samples->audio->nb_samples; + int nb_channels = av_get_channel_layout_nb_channels(layout); + int nb_planes = nb_channels; + int plane, i; + int16_t *pcm; + + if (!av_sample_fmt_is_planar(samples->format)) { + nb_samples *= nb_channels; + nb_planes = 1; + } + for (plane = 0; plane < nb_planes; plane++) { + pcm = (int16_t *)samples->extended_data[plane]; + for (i = 0; i < nb_samples; i++) + vd->histogram[pcm[i] + 0x8000]++; + } + + return ff_filter_frame(inlink->dst->outputs[0], samples); +} + +#define MAX_DB 91 + +static inline double logdb(uint64_t v) +{ + double d = v / (double)(0x8000 * 0x8000); + if (!v) + return MAX_DB; + return log(d) * -4.3429448190325182765112891891660508229; /* -10/log(10) */ +} + +static void print_stats(AVFilterContext *ctx) +{ + VolDetectContext *vd = ctx->priv; + int i, max_volume, shift; + uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0; + uint64_t histdb[MAX_DB + 1] = { 0 }; + + for (i = 0; i < 0x10000; i++) + nb_samples += vd->histogram[i]; + av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples); + if (!nb_samples) + return; + + /* If nb_samples > 1<<34, there is a risk of overflow in the + multiplication or the sum: shift all histogram values to avoid that. + The total number of samples must be recomputed to avoid rounding + errors. */ + shift = av_log2(nb_samples >> 33); + for (i = 0; i < 0x10000; i++) { + nb_samples_shift += vd->histogram[i] >> shift; + power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift); + } + if (!nb_samples_shift) + return; + power = (power + nb_samples_shift / 2) / nb_samples_shift; + av_assert0(power <= 0x8000 * 0x8000); + av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power)); + + max_volume = 0x8000; + while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] && + !vd->histogram[0x8000 - max_volume]) + max_volume--; + av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume)); + + for (i = 0; i < 0x10000; i++) + histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i]; + for (i = 0; i <= MAX_DB && !histdb[i]; i++); + for (; i <= MAX_DB && sum < nb_samples / 1000; i++) { + av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]); + sum += histdb[i]; + } +} + +static void uninit(AVFilterContext *ctx) +{ + print_stats(ctx); +} + +static const AVFilterPad volumedetect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad volumedetect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_volumedetect = { + .name = "volumedetect", + .description = NULL_IF_CONFIG_SMALL("Detect audio volume."), + + .priv_size = sizeof(VolDetectContext), + .query_formats = query_formats, + .uninit = uninit, + .inputs = volumedetect_inputs, + .outputs = volumedetect_outputs, +}; diff --git a/libavfilter/all_channel_layouts.inc b/libavfilter/all_channel_layouts.inc new file mode 100644 index 0000000..878e1f5 --- /dev/null +++ b/libavfilter/all_channel_layouts.inc @@ -0,0 +1,68 @@ +AV_CH_FRONT_CENTER, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER, +AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, +AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT, diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index f3ce91c..47158f9 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -2,20 +2,20 @@ * filter registration * Copyright (c) 2008 Vitor Sessak * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -44,41 +44,86 @@ void avfilter_register_all(void) return; initialized = 1; + REGISTER_FILTER(ACONVERT, aconvert, af); + REGISTER_FILTER(AFADE, afade, af); REGISTER_FILTER(AFORMAT, aformat, af); + REGISTER_FILTER(ALLPASS, allpass, af); + REGISTER_FILTER(AMERGE, amerge, af); REGISTER_FILTER(AMIX, amix, af); REGISTER_FILTER(ANULL, anull, af); + REGISTER_FILTER(APAD, apad, af); + REGISTER_FILTER(ARESAMPLE, aresample, af); + REGISTER_FILTER(ASELECT, aselect, af); + REGISTER_FILTER(ASENDCMD, asendcmd, af); + REGISTER_FILTER(ASETNSAMPLES, asetnsamples, af); + REGISTER_FILTER(ASETPTS, asetpts, af); + REGISTER_FILTER(ASETTB, asettb, af); REGISTER_FILTER(ASHOWINFO, ashowinfo, af); REGISTER_FILTER(ASPLIT, asplit, af); + REGISTER_FILTER(ASTREAMSYNC, astreamsync, af); REGISTER_FILTER(ASYNCTS, asyncts, af); + REGISTER_FILTER(ATEMPO, atempo, af); + REGISTER_FILTER(BANDPASS, bandpass, af); + REGISTER_FILTER(BANDREJECT, bandreject, af); + REGISTER_FILTER(BASS, bass, af); + REGISTER_FILTER(BIQUAD, biquad, af); REGISTER_FILTER(CHANNELMAP, channelmap, af); REGISTER_FILTER(CHANNELSPLIT, channelsplit, af); + REGISTER_FILTER(EARWAX, earwax, af); + REGISTER_FILTER(EBUR128, ebur128, af); + REGISTER_FILTER(EQUALIZER, equalizer, af); + REGISTER_FILTER(HIGHPASS, highpass, af); REGISTER_FILTER(JOIN, join, af); + REGISTER_FILTER(LOWPASS, lowpass, af); + REGISTER_FILTER(PAN, pan, af); REGISTER_FILTER(RESAMPLE, resample, af); + REGISTER_FILTER(SILENCEDETECT, silencedetect, af); + REGISTER_FILTER(TREBLE, treble, af); REGISTER_FILTER(VOLUME, volume, af); + REGISTER_FILTER(VOLUMEDETECT, volumedetect, af); + REGISTER_FILTER(AEVALSRC, aevalsrc, asrc); REGISTER_FILTER(ANULLSRC, anullsrc, asrc); + REGISTER_FILTER(FLITE, flite, asrc); REGISTER_FILTER(ANULLSINK, anullsink, asink); + REGISTER_FILTER(ALPHAEXTRACT, alphaextract, vf); + REGISTER_FILTER(ALPHAMERGE, alphamerge, vf); + REGISTER_FILTER(ASS, ass, vf); + REGISTER_FILTER(BBOX, bbox, vf); + REGISTER_FILTER(BLACKDETECT, blackdetect, vf); REGISTER_FILTER(BLACKFRAME, blackframe, vf); REGISTER_FILTER(BOXBLUR, boxblur, vf); + REGISTER_FILTER(COLORMATRIX, colormatrix, vf); REGISTER_FILTER(COPY, copy, vf); REGISTER_FILTER(CROP, crop, vf); REGISTER_FILTER(CROPDETECT, cropdetect, vf); + REGISTER_FILTER(DECIMATE, decimate, vf); REGISTER_FILTER(DELOGO, delogo, vf); + REGISTER_FILTER(DESHAKE, deshake, vf); REGISTER_FILTER(DRAWBOX, drawbox, vf); REGISTER_FILTER(DRAWTEXT, drawtext, vf); + REGISTER_FILTER(EDGEDETECT, edgedetect, vf); REGISTER_FILTER(FADE, fade, vf); + REGISTER_FILTER(FIELD, field, vf); REGISTER_FILTER(FIELDORDER, fieldorder, vf); REGISTER_FILTER(FORMAT, format, vf); REGISTER_FILTER(FPS, fps, vf); + REGISTER_FILTER(FRAMESTEP, framestep, vf); REGISTER_FILTER(FREI0R, frei0r, vf); + REGISTER_FILTER(GEQ, geq, vf); REGISTER_FILTER(GRADFUN, gradfun, vf); REGISTER_FILTER(HFLIP, hflip, vf); + REGISTER_FILTER(HISTEQ, histeq, vf); REGISTER_FILTER(HQDN3D, hqdn3d, vf); + REGISTER_FILTER(HUE, hue, vf); + REGISTER_FILTER(IDET, idet, vf); + REGISTER_FILTER(KERNDEINT, kerndeint, vf); REGISTER_FILTER(LUT, lut, vf); REGISTER_FILTER(LUTRGB, lutrgb, vf); REGISTER_FILTER(LUTYUV, lutyuv, vf); + REGISTER_FILTER(MP, mp, vf); REGISTER_FILTER(NEGATE, negate, vf); REGISTER_FILTER(NOFORMAT, noformat, vf); REGISTER_FILTER(NULL, null, vf); @@ -86,28 +131,59 @@ void avfilter_register_all(void) REGISTER_FILTER(OVERLAY, overlay, vf); REGISTER_FILTER(PAD, pad, vf); REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf); + REGISTER_FILTER(PP, pp, vf); + REGISTER_FILTER(REMOVELOGO, removelogo, vf); REGISTER_FILTER(SCALE, scale, vf); REGISTER_FILTER(SELECT, select, vf); + REGISTER_FILTER(SENDCMD, sendcmd, vf); REGISTER_FILTER(SETDAR, setdar, vf); + REGISTER_FILTER(SETFIELD, setfield, vf); REGISTER_FILTER(SETPTS, setpts, vf); REGISTER_FILTER(SETSAR, setsar, vf); REGISTER_FILTER(SETTB, settb, vf); REGISTER_FILTER(SHOWINFO, showinfo, vf); + REGISTER_FILTER(SMARTBLUR, smartblur, vf); REGISTER_FILTER(SPLIT, split, vf); + REGISTER_FILTER(SUBTITLES, subtitles, vf); + REGISTER_FILTER(SUPER2XSAI, super2xsai, vf); + REGISTER_FILTER(SWAPUV, swapuv, vf); + REGISTER_FILTER(THUMBNAIL, thumbnail, vf); + REGISTER_FILTER(TILE, tile, vf); + REGISTER_FILTER(TINTERLACE, tinterlace, vf); REGISTER_FILTER(TRANSPOSE, transpose, vf); REGISTER_FILTER(UNSHARP, unsharp, vf); REGISTER_FILTER(VFLIP, vflip, vf); REGISTER_FILTER(YADIF, yadif, vf); + REGISTER_FILTER(CELLAUTO, cellauto, vsrc); REGISTER_FILTER(COLOR, color, vsrc); REGISTER_FILTER(FREI0R, frei0r_src, vsrc); - REGISTER_FILTER(MOVIE, movie, vsrc); + REGISTER_FILTER(LIFE, life, vsrc); + REGISTER_FILTER(MANDELBROT, mandelbrot, vsrc); + REGISTER_FILTER(MPTESTSRC, mptestsrc, vsrc); REGISTER_FILTER(NULLSRC, nullsrc, vsrc); REGISTER_FILTER(RGBTESTSRC, rgbtestsrc, vsrc); + REGISTER_FILTER(SMPTEBARS, smptebars, vsrc); REGISTER_FILTER(TESTSRC, testsrc, vsrc); REGISTER_FILTER(NULLSINK, nullsink, vsink); + /* multimedia filters */ + REGISTER_FILTER(CONCAT, concat, avf); + REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf); + REGISTER_FILTER(SHOWWAVES, showwaves, avf); + + /* multimedia sources */ + REGISTER_FILTER(AMOVIE, amovie, avsrc); + REGISTER_FILTER(MOVIE, movie, avsrc); + + REGISTER_FILTER_UNCONDITIONAL(vsink_ffbuffersink); + REGISTER_FILTER_UNCONDITIONAL(asink_ffabuffersink); +#if !AV_HAVE_INCOMPATIBLE_FORK_ABI + REGISTER_FILTER_UNCONDITIONAL(vsink_buffersink); + REGISTER_FILTER_UNCONDITIONAL(asink_abuffersink); +#endif + /* those filters are part of public or internal API => registered * unconditionally */ REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer); diff --git a/libavfilter/asink_anullsink.c b/libavfilter/asink_anullsink.c index ede54c0..5a324fc 100644 --- a/libavfilter/asink_anullsink.c +++ b/libavfilter/asink_anullsink.c @@ -1,18 +1,20 @@ /* - * This file is part of Libav. + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu> * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/asrc_abuffer.h b/libavfilter/asrc_abuffer.h new file mode 100644 index 0000000..aa34461 --- /dev/null +++ b/libavfilter/asrc_abuffer.h @@ -0,0 +1,91 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_ASRC_ABUFFER_H +#define AVFILTER_ASRC_ABUFFER_H + +#include "avfilter.h" + +/** + * @file + * memory buffer source for audio + * + * @deprecated use buffersrc.h instead. + */ + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param data pointers to the samples planes + * @param linesize linesizes of each audio buffer plane + * @param nb_samples number of samples per channel + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param planar flag to indicate if audio data is planar or packed + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc, + uint8_t *data[8], int linesize[8], + int nb_samples, int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * This is similar to av_asrc_buffer_add_samples(), but the samples + * are stored in a buffer with known size. + * + * @param abuffersrc audio source buffer context + * @param buf pointer to the samples data, packed is assumed + * @param size the size in bytes of the buffer, it must contain an + * integer number of samples + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc, + uint8_t *buf, int buf_size, + int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param samplesref buffer ref to queue + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc, + AVFilterBufferRef *samplesref, + int av_unused flags); + +#endif /* AVFILTER_ASRC_ABUFFER_H */ diff --git a/libavfilter/asrc_aevalsrc.c b/libavfilter/asrc_aevalsrc.c new file mode 100644 index 0000000..2e5fa98 --- /dev/null +++ b/libavfilter/asrc_aevalsrc.c @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * eval audio source + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/eval.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "avfilter.h" +#include "audio.h" +#include "internal.h" + +static const char * const var_names[] = { + "n", ///< number of frame + "t", ///< timestamp expressed in seconds + "s", ///< sample rate + NULL +}; + +enum var_name { + VAR_N, + VAR_T, + VAR_S, + VAR_VARS_NB +}; + +typedef struct { + const AVClass *class; + char *sample_rate_str; + int sample_rate; + int64_t chlayout; + char *chlayout_str; + int nb_channels; + int64_t pts; + AVExpr *expr[8]; + char *expr_str[8]; + int nb_samples; ///< number of samples per requested frame + char *duration_str; ///< total duration of the generated audio + double duration; + uint64_t n; + double var_values[VAR_VARS_NB]; +} EvalContext; + +#define OFFSET(x) offsetof(EvalContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption aevalsrc_options[]= { + { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "duration", "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "d", "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, +{NULL}, +}; + +AVFILTER_DEFINE_CLASS(aevalsrc); + +static int init(AVFilterContext *ctx, const char *args) +{ + EvalContext *eval = ctx->priv; + char *args1 = av_strdup(args); + char *expr, *buf, *bufptr; + int ret, i; + + eval->class = &aevalsrc_class; + av_opt_set_defaults(eval); + + if (!args1) { + av_log(ctx, AV_LOG_ERROR, "Argument is empty\n"); + ret = args ? AVERROR(ENOMEM) : AVERROR(EINVAL); + goto end; + } + + /* parse expressions */ + buf = args1; + i = 0; + while (expr = av_strtok(buf, ":", &bufptr)) { + ret = av_expr_parse(&eval->expr[i], expr, var_names, + NULL, NULL, NULL, NULL, 0, ctx); + if (ret < 0) + goto end; + i++; + if (bufptr && *bufptr == ':') { /* found last expression */ + bufptr++; + break; + } + buf = NULL; + } + eval->nb_channels = i; + + if (bufptr && (ret = av_set_options_string(eval, bufptr, "=", ":")) < 0) + goto end; + + if (eval->chlayout_str) { + int n; + ret = ff_parse_channel_layout(&eval->chlayout, eval->chlayout_str, ctx); + if (ret < 0) + goto end; + + n = av_get_channel_layout_nb_channels(eval->chlayout); + if (n != eval->nb_channels) { + av_log(ctx, AV_LOG_ERROR, + "Mismatch between the specified number of channels '%d' " + "and the number of channels '%d' in the specified channel layout '%s'\n", + eval->nb_channels, n, eval->chlayout_str); + ret = AVERROR(EINVAL); + goto end; + } + } else { + /* guess channel layout from nb expressions/channels */ + eval->chlayout = av_get_default_channel_layout(eval->nb_channels); + if (!eval->chlayout) { + av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n", + eval->nb_channels); + ret = AVERROR(EINVAL); + goto end; + } + } + + if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx))) + goto end; + + eval->duration = -1; + if (eval->duration_str) { + int64_t us = -1; + if ((ret = av_parse_time(&us, eval->duration_str, 1)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", eval->duration_str); + goto end; + } + eval->duration = (double)us / 1000000; + } + eval->n = 0; + +end: + av_free(args1); + return ret; +} + +static void uninit(AVFilterContext *ctx) +{ + EvalContext *eval = ctx->priv; + int i; + + for (i = 0; i < 8; i++) { + av_expr_free(eval->expr[i]); + eval->expr[i] = NULL; + } + av_freep(&eval->chlayout_str); + av_freep(&eval->duration_str); + av_freep(&eval->sample_rate_str); +} + +static int config_props(AVFilterLink *outlink) +{ + EvalContext *eval = outlink->src->priv; + char buf[128]; + + outlink->time_base = (AVRational){1, eval->sample_rate}; + outlink->sample_rate = eval->sample_rate; + + eval->var_values[VAR_S] = eval->sample_rate; + + av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout); + + av_log(outlink->src, AV_LOG_VERBOSE, + "sample_rate:%d chlayout:%s duration:%f\n", + eval->sample_rate, buf, eval->duration); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + EvalContext *eval = ctx->priv; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE }; + int64_t chlayouts[] = { eval->chlayout, -1 }; + int sample_rates[] = { eval->sample_rate, -1 }; + + ff_set_common_formats (ctx, ff_make_format_list(sample_fmts)); + ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts)); + ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates)); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + EvalContext *eval = outlink->src->priv; + AVFilterBufferRef *samplesref; + int i, j; + double t = eval->n * (double)1/eval->sample_rate; + + if (eval->duration >= 0 && t >= eval->duration) + return AVERROR_EOF; + + samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples); + + /* evaluate expression for each single sample and for each channel */ + for (i = 0; i < eval->nb_samples; i++, eval->n++) { + eval->var_values[VAR_N] = eval->n; + eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate; + + for (j = 0; j < eval->nb_channels; j++) { + *((double *) samplesref->extended_data[j] + i) = + av_expr_eval(eval->expr[j], eval->var_values, NULL); + } + } + + samplesref->pts = eval->pts; + samplesref->pos = -1; + samplesref->audio->sample_rate = eval->sample_rate; + eval->pts += eval->nb_samples; + + ff_filter_frame(outlink, samplesref); + + return 0; +} + +static const AVFilterPad aevalsrc_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_asrc_aevalsrc = { + .name = "aevalsrc", + .description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."), + + .query_formats = query_formats, + .init = init, + .uninit = uninit, + .priv_size = sizeof(EvalContext), + .inputs = NULL, + .outputs = aevalsrc_outputs, + .priv_class = &aevalsrc_class, +}; diff --git a/libavfilter/asrc_anullsrc.c b/libavfilter/asrc_anullsrc.c index 4cbaa81..43e9a7c 100644 --- a/libavfilter/asrc_anullsrc.c +++ b/libavfilter/asrc_anullsrc.c @@ -1,18 +1,21 @@ /* - * This file is part of Libav. + * Copyright 2010 S.N. Hemanth Meenakshisundaram <smeenaks ucsd edu> + * Copyright 2010 Stefano Sabatini <stefano.sabatini-lala poste it> * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -26,62 +29,93 @@ #include "libavutil/channel_layout.h" #include "libavutil/internal.h" +#include "libavutil/opt.h" +#include "audio.h" #include "avfilter.h" #include "internal.h" typedef struct { + const AVClass *class; + char *channel_layout_str; uint64_t channel_layout; - int64_t sample_rate; + char *sample_rate_str; + int sample_rate; + int nb_samples; ///< number of samples per requested frame + int64_t pts; } ANullContext; +#define OFFSET(x) offsetof(ANullContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption anullsrc_options[]= { + { "channel_layout", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS }, + { "cl", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS }, + { "sample_rate", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS }, + { "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS }, + { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(anullsrc); + static int init(AVFilterContext *ctx, const char *args) { - ANullContext *priv = ctx->priv; - char channel_layout_str[128] = ""; + ANullContext *null = ctx->priv; + int ret; - priv->sample_rate = 44100; - priv->channel_layout = AV_CH_LAYOUT_STEREO; + null->class = &anullsrc_class; + av_opt_set_defaults(null); - if (args) - sscanf(args, "%"PRId64":%s", &priv->sample_rate, channel_layout_str); + if ((ret = (av_set_options_string(null, args, "=", ":"))) < 0) + return ret; - if (priv->sample_rate < 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid negative sample rate: %"PRId64"\n", priv->sample_rate); - return AVERROR(EINVAL); - } + if ((ret = ff_parse_sample_rate(&null->sample_rate, + null->sample_rate_str, ctx)) < 0) + return ret; - if (*channel_layout_str) - if (!(priv->channel_layout = av_get_channel_layout(channel_layout_str)) - && sscanf(channel_layout_str, "%"PRId64, &priv->channel_layout) != 1) { - av_log(ctx, AV_LOG_ERROR, "Invalid value '%s' for channel layout\n", - channel_layout_str); - return AVERROR(EINVAL); - } + if ((ret = ff_parse_channel_layout(&null->channel_layout, + null->channel_layout_str, ctx)) < 0) + return ret; return 0; } static int config_props(AVFilterLink *outlink) { - ANullContext *priv = outlink->src->priv; + ANullContext *null = outlink->src->priv; char buf[128]; int chans_nb; - outlink->sample_rate = priv->sample_rate; - outlink->channel_layout = priv->channel_layout; + outlink->sample_rate = null->sample_rate; + outlink->channel_layout = null->channel_layout; - chans_nb = av_get_channel_layout_nb_channels(priv->channel_layout); - av_get_channel_layout_string(buf, sizeof(buf), chans_nb, priv->channel_layout); + chans_nb = av_get_channel_layout_nb_channels(null->channel_layout); + av_get_channel_layout_string(buf, sizeof(buf), chans_nb, null->channel_layout); av_log(outlink->src, AV_LOG_VERBOSE, - "sample_rate:%"PRId64 " channel_layout:%"PRId64 " channel_layout_description:'%s'\n", - priv->sample_rate, priv->channel_layout, buf); + "sample_rate:%d channel_layout:'%s' nb_samples:%d\n", + null->sample_rate, buf, null->nb_samples); return 0; } -static int request_frame(AVFilterLink *link) +static int request_frame(AVFilterLink *outlink) { - return -1; + ANullContext *null = outlink->src->priv; + AVFilterBufferRef *samplesref; + + samplesref = + ff_get_audio_buffer(outlink, AV_PERM_WRITE, null->nb_samples); + samplesref->pts = null->pts; + samplesref->pos = -1; + samplesref->audio->channel_layout = null->channel_layout; + samplesref->audio->sample_rate = outlink->sample_rate; + + ff_filter_frame(outlink, avfilter_ref_buffer(samplesref, ~0)); + avfilter_unref_buffer(samplesref); + + null->pts += null->nb_samples; + return 0; } static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = { @@ -96,7 +130,7 @@ static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = { AVFilter avfilter_asrc_anullsrc = { .name = "anullsrc", - .description = NULL_IF_CONFIG_SMALL("Null audio source, never return audio frames."), + .description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."), .init = init, .priv_size = sizeof(ANullContext), @@ -104,4 +138,5 @@ AVFilter avfilter_asrc_anullsrc = { .inputs = NULL, .outputs = avfilter_asrc_anullsrc_outputs, + .priv_class = &anullsrc_class, }; diff --git a/libavfilter/asrc_flite.c b/libavfilter/asrc_flite.c new file mode 100644 index 0000000..04901da --- /dev/null +++ b/libavfilter/asrc_flite.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * flite voice synth source + */ + +#include <flite/flite.h> +#include "libavutil/channel_layout.h" +#include "libavutil/file.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "audio.h" +#include "formats.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + char *voice_str; + char *textfile; + char *text; + cst_wave *wave; + int16_t *wave_samples; + int wave_nb_samples; + int list_voices; + cst_voice *voice; + struct voice_entry *voice_entry; + int64_t pts; + int frame_nb_samples; ///< number of samples per frame +} FliteContext; + +#define OFFSET(x) offsetof(FliteContext, x) +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption flite_options[] = { + { "list_voices", "list voices and exit", OFFSET(list_voices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + { "nb_samples", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS }, + { "n", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS }, + { "text", "set text to speak", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "textfile", "set filename of the text to speak", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "v", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "voice", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(flite); + +static volatile int flite_inited = 0; + +/* declare functions for all the supported voices */ +#define DECLARE_REGISTER_VOICE_FN(name) \ + cst_voice *register_cmu_us_## name(const char *); \ + void unregister_cmu_us_## name(cst_voice *); +DECLARE_REGISTER_VOICE_FN(awb); +DECLARE_REGISTER_VOICE_FN(kal); +DECLARE_REGISTER_VOICE_FN(kal16); +DECLARE_REGISTER_VOICE_FN(rms); +DECLARE_REGISTER_VOICE_FN(slt); + +struct voice_entry { + const char *name; + cst_voice * (*register_fn)(const char *); + void (*unregister_fn)(cst_voice *); + cst_voice *voice; + unsigned usage_count; +} voice_entry; + +#define MAKE_VOICE_STRUCTURE(voice_name) { \ + .name = #voice_name, \ + .register_fn = register_cmu_us_ ## voice_name, \ + .unregister_fn = unregister_cmu_us_ ## voice_name, \ +} +static struct voice_entry voice_entries[] = { + MAKE_VOICE_STRUCTURE(awb), + MAKE_VOICE_STRUCTURE(kal), + MAKE_VOICE_STRUCTURE(kal16), + MAKE_VOICE_STRUCTURE(rms), + MAKE_VOICE_STRUCTURE(slt), +}; + +static void list_voices(void *log_ctx, const char *sep) +{ + int i, n = FF_ARRAY_ELEMS(voice_entries); + for (i = 0; i < n; i++) + av_log(log_ctx, AV_LOG_INFO, "%s%s", + voice_entries[i].name, i < (n-1) ? sep : "\n"); +} + +static int select_voice(struct voice_entry **entry_ret, const char *voice_name, void *log_ctx) +{ + int i; + + for (i = 0; i < FF_ARRAY_ELEMS(voice_entries); i++) { + struct voice_entry *entry = &voice_entries[i]; + if (!strcmp(entry->name, voice_name)) { + if (!entry->voice) + entry->voice = entry->register_fn(NULL); + if (!entry->voice) { + av_log(log_ctx, AV_LOG_ERROR, + "Could not register voice '%s'\n", voice_name); + return AVERROR_UNKNOWN; + } + entry->usage_count++; + *entry_ret = entry; + return 0; + } + } + + av_log(log_ctx, AV_LOG_ERROR, "Could not find voice '%s'\n", voice_name); + av_log(log_ctx, AV_LOG_INFO, "Choose between the voices: "); + list_voices(log_ctx, ", "); + + return AVERROR(EINVAL); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + FliteContext *flite = ctx->priv; + int ret = 0; + + flite->class = &flite_class; + av_opt_set_defaults(flite); + + if ((ret = av_set_options_string(flite, args, "=", ":")) < 0) + return ret; + + if (flite->list_voices) { + list_voices(ctx, "\n"); + return AVERROR_EXIT; + } + + if (!flite_inited) { + if (flite_init() < 0) { + av_log(ctx, AV_LOG_ERROR, "flite initialization failed\n"); + return AVERROR_UNKNOWN; + } + flite_inited++; + } + + if ((ret = select_voice(&flite->voice_entry, flite->voice_str, ctx)) < 0) + return ret; + flite->voice = flite->voice_entry->voice; + + if (flite->textfile && flite->text) { + av_log(ctx, AV_LOG_ERROR, + "Both text and textfile options set: only one must be specified\n"); + return AVERROR(EINVAL); + } + + if (flite->textfile) { + uint8_t *textbuf; + size_t textbuf_size; + + if ((ret = av_file_map(flite->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, + "The text file '%s' could not be read: %s\n", + flite->textfile, av_err2str(ret)); + return ret; + } + + if (!(flite->text = av_malloc(textbuf_size+1))) + return AVERROR(ENOMEM); + memcpy(flite->text, textbuf, textbuf_size); + flite->text[textbuf_size] = 0; + av_file_unmap(textbuf, textbuf_size); + } + + if (!flite->text) { + av_log(ctx, AV_LOG_ERROR, + "No speech text specified, specify the 'text' or 'textfile' option\n"); + return AVERROR(EINVAL); + } + + /* synth all the file data in block */ + flite->wave = flite_text_to_wave(flite->text, flite->voice); + flite->wave_samples = flite->wave->samples; + flite->wave_nb_samples = flite->wave->num_samples; + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + FliteContext *flite = ctx->priv; + + av_opt_free(flite); + + if (!--flite->voice_entry->usage_count) + flite->voice_entry->unregister_fn(flite->voice); + flite->voice = NULL; + flite->voice_entry = NULL; + delete_wave(flite->wave); + flite->wave = NULL; +} + +static int query_formats(AVFilterContext *ctx) +{ + FliteContext *flite = ctx->priv; + + AVFilterChannelLayouts *chlayouts = NULL; + int64_t chlayout = av_get_default_channel_layout(flite->wave->num_channels); + AVFilterFormats *sample_formats = NULL; + AVFilterFormats *sample_rates = NULL; + + ff_add_channel_layout(&chlayouts, chlayout); + ff_set_common_channel_layouts(ctx, chlayouts); + ff_add_format(&sample_formats, AV_SAMPLE_FMT_S16); + ff_set_common_formats(ctx, sample_formats); + ff_add_format(&sample_rates, flite->wave->sample_rate); + ff_set_common_samplerates (ctx, sample_rates); + + return 0; +} + +static int config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + FliteContext *flite = ctx->priv; + + outlink->sample_rate = flite->wave->sample_rate; + outlink->time_base = (AVRational){1, flite->wave->sample_rate}; + + av_log(ctx, AV_LOG_VERBOSE, "voice:%s fmt:%s sample_rate:%d\n", + flite->voice_str, + av_get_sample_fmt_name(outlink->format), outlink->sample_rate); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterBufferRef *samplesref; + FliteContext *flite = outlink->src->priv; + int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples); + + if (!nb_samples) + return AVERROR_EOF; + + samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); + if (!samplesref) + return AVERROR(ENOMEM); + + memcpy(samplesref->data[0], flite->wave_samples, + nb_samples * flite->wave->num_channels * 2); + samplesref->pts = flite->pts; + samplesref->pos = -1; + samplesref->audio->sample_rate = flite->wave->sample_rate; + flite->pts += nb_samples; + flite->wave_samples += nb_samples * flite->wave->num_channels; + flite->wave_nb_samples -= nb_samples; + + return ff_filter_frame(outlink, samplesref); +} + +static const AVFilterPad flite_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_asrc_flite = { + .name = "flite", + .description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."), + .query_formats = query_formats, + .init = init, + .uninit = uninit, + .priv_size = sizeof(FliteContext), + .inputs = NULL, + .outputs = flite_outputs, + .priv_class = &flite_class, +}; diff --git a/libavfilter/audio.c b/libavfilter/audio.c index bbe12b2..c72979d 100644 --- a/libavfilter/audio.c +++ b/libavfilter/audio.c @@ -1,21 +1,25 @@ /* - * This file is part of Libav. + * Copyright (c) Stefano Sabatini | stefasab at gmail.com + * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/avassert.h" #include "libavutil/channel_layout.h" #include "libavutil/common.h" @@ -23,6 +27,11 @@ #include "avfilter.h" #include "internal.h" +int avfilter_ref_get_channels(AVFilterBufferRef *ref) +{ + return ref->audio ? ref->audio->channels : 0; +} + AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, int nb_samples) { @@ -35,9 +44,13 @@ AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, AVFilterBufferRef *samplesref = NULL; uint8_t **data; int planar = av_sample_fmt_is_planar(link->format); - int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); + int nb_channels = link->channels; int planes = planar ? nb_channels : 1; int linesize; + int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE | + AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN; + + av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES))); if (!(data = av_mallocz(sizeof(*data) * planes))) goto fail; @@ -45,12 +58,14 @@ AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0) goto fail; - samplesref = avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms, - nb_samples, link->format, - link->channel_layout); + samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( + data, linesize, full_perms, nb_samples, link->format, + link->channels, link->channel_layout); if (!samplesref) goto fail; + samplesref->audio->sample_rate = link->sample_rate; + av_freep(&data); fail: @@ -77,11 +92,13 @@ AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, return ret; } -AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, - int linesize,int perms, - int nb_samples, - enum AVSampleFormat sample_fmt, - uint64_t channel_layout) +AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout) { int planes; AVFilterBuffer *samples = av_mallocz(sizeof(*samples)); @@ -90,6 +107,10 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, if (!samples || !samplesref) goto fail; + av_assert0(channels); + av_assert0(channel_layout == 0 || + channels == av_get_channel_layout_nb_channels(channel_layout)); + samplesref->buf = samples; samplesref->buf->free = ff_avfilter_default_free_buffer; if (!(samplesref->audio = av_mallocz(sizeof(*samplesref->audio)))) @@ -97,9 +118,9 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, samplesref->audio->nb_samples = nb_samples; samplesref->audio->channel_layout = channel_layout; - samplesref->audio->planar = av_sample_fmt_is_planar(sample_fmt); + samplesref->audio->channels = channels; - planes = samplesref->audio->planar ? av_get_channel_layout_nb_channels(channel_layout) : 1; + planes = av_sample_fmt_is_planar(sample_fmt) ? channels : 1; /* make sure the buffer gets read permission or it's useless for output */ samplesref->perms = perms | AV_PERM_READ; @@ -146,3 +167,15 @@ fail: av_freep(&samples); return NULL; } + +AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, + int linesize,int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + uint64_t channel_layout) +{ + int channels = av_get_channel_layout_nb_channels(channel_layout); + return avfilter_get_audio_buffer_ref_from_arrays_channels(data, linesize, perms, + nb_samples, sample_fmt, + channels, channel_layout); +} diff --git a/libavfilter/audio.h b/libavfilter/audio.h index a377503..8fe4d8e 100644 --- a/libavfilter/audio.h +++ b/libavfilter/audio.h @@ -1,18 +1,21 @@ /* - * This file is part of Libav. + * Copyright (c) Stefano Sabatini | stefasab at gmail.com + * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -20,6 +23,25 @@ #define AVFILTER_AUDIO_H #include "avfilter.h" +#include "internal.h" + +static const enum AVSampleFormat ff_packed_sample_fmts_array[] = { + AV_SAMPLE_FMT_U8, + AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_S32, + AV_SAMPLE_FMT_FLT, + AV_SAMPLE_FMT_DBL, + AV_SAMPLE_FMT_NONE +}; + +static const enum AVSampleFormat ff_planar_sample_fmts_array[] = { + AV_SAMPLE_FMT_U8P, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_FLTP, + AV_SAMPLE_FMT_DBLP, + AV_SAMPLE_FMT_NONE +}; /** default handler for get_audio_buffer() for audio inputs */ AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, @@ -42,4 +64,24 @@ AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, int nb_samples); +/** + * Send a buffer of audio samples to the next filter. + * + * @param link the output link over which the audio samples are being sent + * @param samplesref a reference to the buffer of audio samples being sent. The + * receiving filter will free this reference when it no longer + * needs it or pass it on to the next filter. + * + * @return >= 0 on success, a negative AVERROR on error. The receiving filter + * is responsible for unreferencing samplesref in case of error. + */ +int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref); + +/** + * Send a buffer of audio samples to the next link, without checking + * min_samples. + */ +int ff_filter_samples_framed(AVFilterLink *link, + AVFilterBufferRef *samplesref); + #endif /* AVFILTER_AUDIO_H */ diff --git a/libavfilter/avcodec.c b/libavfilter/avcodec.c new file mode 100644 index 0000000..dd3c886 --- /dev/null +++ b/libavfilter/avcodec.c @@ -0,0 +1,200 @@ +/* + * Copyright 2011 Stefano Sabatini | stefasab at gmail.com + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * libavcodec/libavfilter gluing utilities + */ + +#include "avcodec.h" +#include "libavutil/avassert.h" +#include "libavutil/opt.h" + +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) +{ + dst->pts = src->pts; + dst->pos = av_frame_get_pkt_pos(src); + dst->format = src->format; + + av_dict_free(&dst->metadata); + av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); + + switch (dst->type) { + case AVMEDIA_TYPE_VIDEO: + dst->video->w = src->width; + dst->video->h = src->height; + dst->video->sample_aspect_ratio = src->sample_aspect_ratio; + dst->video->interlaced = src->interlaced_frame; + dst->video->top_field_first = src->top_field_first; + dst->video->key_frame = src->key_frame; + dst->video->pict_type = src->pict_type; + av_freep(&dst->video->qp_table); + dst->video->qp_table_linesize = 0; + if (src->qscale_table) { + int qsize = src->qstride ? src->qstride * ((src->height+15)/16) : (src->width+15)/16; + dst->video->qp_table = av_malloc(qsize); + if (!dst->video->qp_table) + return AVERROR(ENOMEM); + dst->video->qp_table_linesize = src->qstride; + dst->video->qp_table_size = qsize; + memcpy(dst->video->qp_table, src->qscale_table, qsize); + } + break; + case AVMEDIA_TYPE_AUDIO: + dst->audio->sample_rate = src->sample_rate; + dst->audio->channel_layout = src->channel_layout; + dst->audio->channels = src->channels; + if(src->channels < av_get_channel_layout_nb_channels(src->channel_layout)) { + av_log(NULL, AV_LOG_ERROR, "libavfilter does not support this channel layout\n"); + return AVERROR(EINVAL); + } + break; + default: + return AVERROR(EINVAL); + } + + return 0; +} + +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, + int perms) +{ + AVFilterBufferRef *picref = + avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms, + frame->width, frame->height, + frame->format); + if (!picref) + return NULL; + if (avfilter_copy_frame_props(picref, frame) < 0) { + picref->buf->data[0] = NULL; + avfilter_unref_bufferp(&picref); + } + return picref; +} + +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms) +{ + AVFilterBufferRef *samplesref; + int channels = av_frame_get_channels(frame); + int64_t layout = av_frame_get_channel_layout(frame); + + if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { + av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); + return NULL; + } + + samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( + (uint8_t **)frame->extended_data, frame->linesize[0], perms, + frame->nb_samples, frame->format, channels, layout); + if (!samplesref) + return NULL; + if (avfilter_copy_frame_props(samplesref, frame) < 0) { + samplesref->buf->data[0] = NULL; + avfilter_unref_bufferp(&samplesref); + } + return samplesref; +} + +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms) +{ + switch (type) { + case AVMEDIA_TYPE_VIDEO: + return avfilter_get_video_buffer_ref_from_frame(frame, perms); + case AVMEDIA_TYPE_AUDIO: + return avfilter_get_audio_buffer_ref_from_frame(frame, perms); + default: + return NULL; + } +} + +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src) +{ + int planes, nb_channels; + + if (!dst) + return AVERROR(EINVAL); + /* abort in case the src is NULL and dst is not, avoid inconsistent state in dst */ + av_assert0(src); + + memcpy(dst->data, src->data, sizeof(dst->data)); + memcpy(dst->linesize, src->linesize, sizeof(dst->linesize)); + + dst->pts = src->pts; + dst->format = src->format; + av_frame_set_pkt_pos(dst, src->pos); + + switch (src->type) { + case AVMEDIA_TYPE_VIDEO: + av_assert0(src->video); + dst->width = src->video->w; + dst->height = src->video->h; + dst->sample_aspect_ratio = src->video->sample_aspect_ratio; + dst->interlaced_frame = src->video->interlaced; + dst->top_field_first = src->video->top_field_first; + dst->key_frame = src->video->key_frame; + dst->pict_type = src->video->pict_type; + break; + case AVMEDIA_TYPE_AUDIO: + av_assert0(src->audio); + nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout); + planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1; + + if (planes > FF_ARRAY_ELEMS(dst->data)) { + dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data)); + if (!dst->extended_data) + return AVERROR(ENOMEM); + memcpy(dst->extended_data, src->extended_data, + planes * sizeof(*dst->extended_data)); + } else + dst->extended_data = dst->data; + dst->nb_samples = src->audio->nb_samples; + av_frame_set_sample_rate (dst, src->audio->sample_rate); + av_frame_set_channel_layout(dst, src->audio->channel_layout); + av_frame_set_channels (dst, src->audio->channels); + break; + default: + return AVERROR(EINVAL); + } + + return 0; +} + +#ifdef FF_API_FILL_FRAME +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref) +{ + return avfilter_copy_buf_props(frame, samplesref); +} + +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref) +{ + return avfilter_copy_buf_props(frame, picref); +} + +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref) +{ + return avfilter_copy_buf_props(frame, ref); +} +#endif diff --git a/libavfilter/avcodec.h b/libavfilter/avcodec.h new file mode 100644 index 0000000..5f4209a --- /dev/null +++ b/libavfilter/avcodec.h @@ -0,0 +1,131 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVCODEC_H +#define AVFILTER_AVCODEC_H + +/** + * @file + * libavcodec/libavfilter gluing utilities + * + * This should be included in an application ONLY if the installed + * libavfilter has been compiled with libavcodec support, otherwise + * symbols defined below will not be available. + */ + +#include "libavcodec/avcodec.h" // AVFrame +#include "avfilter.h" + +/** + * Copy the frame properties of src to dst, without copying the actual + * image data. + * + * @return 0 on success, a negative number on error. + */ +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); + +/** + * Copy the frame properties and data pointers of src to dst, without copying + * the actual data. + * + * @return 0 on success, a negative number on error. + */ +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); + +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + */ +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms); + + +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + */ +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms); + +/** + * Create and return a buffer reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + */ +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms); + +#ifdef FF_API_FILL_FRAME +/** + * Fill an AVFrame with the information stored in samplesref. + * + * @param frame an already allocated AVFrame + * @param samplesref an audio buffer reference + * @return 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref); + +/** + * Fill an AVFrame with the information stored in picref. + * + * @param frame an already allocated AVFrame + * @param picref a video buffer reference + * @return 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref); + +/** + * Fill an AVFrame with information stored in ref. + * + * @param frame an already allocated AVFrame + * @param ref a video or audio buffer reference + * @return 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref); +#endif + +/** + * Add frame data to buffer_src. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_frame(AVFilterContext *buffer_src, + const AVFrame *frame, int flags); + +#endif /* AVFILTER_AVCODEC_H */ diff --git a/libavfilter/avf_concat.c b/libavfilter/avf_concat.c new file mode 100644 index 0000000..079d55d --- /dev/null +++ b/libavfilter/avf_concat.c @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * concat audio-video filter + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#define FF_BUFQUEUE_SIZE 256 +#include "bufferqueue.h" +#include "internal.h" +#include "video.h" +#include "audio.h" + +#define TYPE_ALL 2 + +typedef struct { + const AVClass *class; + unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */ + unsigned nb_segments; + unsigned cur_idx; /**< index of the first input of current segment */ + int64_t delta_ts; /**< timestamp to add to produce output timestamps */ + unsigned nb_in_active; /**< number of active inputs in current segment */ + unsigned unsafe; + struct concat_in { + int64_t pts; + int64_t nb_frames; + unsigned eof; + struct FFBufQueue queue; + } *in; +} ConcatContext; + +#define OFFSET(x) offsetof(ConcatContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +#define V AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption concat_options[] = { + { "n", "specify the number of segments", OFFSET(nb_segments), + AV_OPT_TYPE_INT, { .i64 = 2 }, 2, INT_MAX, V|A|F}, + { "v", "specify the number of video streams", + OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]), + AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F }, + { "a", "specify the number of audio streams", + OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]), + AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F}, + { "unsafe", "enable unsafe mode", + OFFSET(unsafe), + AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|A|F}, + { 0 } +}; + +AVFILTER_DEFINE_CLASS(concat); + +static int query_formats(AVFilterContext *ctx) +{ + ConcatContext *cat = ctx->priv; + unsigned type, nb_str, idx0 = 0, idx, str, seg; + AVFilterFormats *formats, *rates = NULL; + AVFilterChannelLayouts *layouts = NULL; + + for (type = 0; type < TYPE_ALL; type++) { + nb_str = cat->nb_streams[type]; + for (str = 0; str < nb_str; str++) { + idx = idx0; + + /* Set the output formats */ + formats = ff_all_formats(type); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &ctx->outputs[idx]->in_formats); + if (type == AVMEDIA_TYPE_AUDIO) { + rates = ff_all_samplerates(); + if (!rates) + return AVERROR(ENOMEM); + ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates); + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts); + } + + /* Set the same formats for each corresponding input */ + for (seg = 0; seg < cat->nb_segments; seg++) { + ff_formats_ref(formats, &ctx->inputs[idx]->out_formats); + if (type == AVMEDIA_TYPE_AUDIO) { + ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates); + ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts); + } + idx += ctx->nb_outputs; + } + + idx0++; + } + } + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ConcatContext *cat = ctx->priv; + unsigned out_no = FF_OUTLINK_IDX(outlink); + unsigned in_no = out_no, seg; + AVFilterLink *inlink = ctx->inputs[in_no]; + + /* enhancement: find a common one */ + outlink->time_base = AV_TIME_BASE_Q; + outlink->w = inlink->w; + outlink->h = inlink->h; + outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; + outlink->format = inlink->format; + for (seg = 1; seg < cat->nb_segments; seg++) { + inlink = ctx->inputs[in_no += ctx->nb_outputs]; + /* possible enhancement: unsafe mode, do not check */ + if (outlink->w != inlink->w || + outlink->h != inlink->h || + outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num || + outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) { + av_log(ctx, AV_LOG_ERROR, "Input link %s parameters " + "(size %dx%d, SAR %d:%d) do not match the corresponding " + "output link %s parameters (%dx%d, SAR %d:%d)\n", + ctx->input_pads[in_no].name, inlink->w, inlink->h, + inlink->sample_aspect_ratio.num, + inlink->sample_aspect_ratio.den, + ctx->input_pads[out_no].name, outlink->w, outlink->h, + outlink->sample_aspect_ratio.num, + outlink->sample_aspect_ratio.den); + if (!cat->unsafe) + return AVERROR(EINVAL); + } + } + + return 0; +} + +static void push_frame(AVFilterContext *ctx, unsigned in_no, + AVFilterBufferRef *buf) +{ + ConcatContext *cat = ctx->priv; + unsigned out_no = in_no % ctx->nb_outputs; + AVFilterLink * inlink = ctx-> inputs[ in_no]; + AVFilterLink *outlink = ctx->outputs[out_no]; + struct concat_in *in = &cat->in[in_no]; + + buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); + in->pts = buf->pts; + in->nb_frames++; + /* add duration to input PTS */ + if (inlink->sample_rate) + /* use number of audio samples */ + in->pts += av_rescale_q(buf->audio->nb_samples, + (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + else if (in->nb_frames >= 2) + /* use mean duration */ + in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1); + + buf->pts += cat->delta_ts; + ff_filter_frame(outlink, buf); +} + +static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) +{ + AVFilterContext *ctx = inlink->dst; + ConcatContext *cat = ctx->priv; + unsigned in_no = FF_INLINK_IDX(inlink); + + if (in_no < cat->cur_idx) { + av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n", + ctx->input_pads[in_no].name); + avfilter_unref_buffer(buf); + } else if (in_no >= cat->cur_idx + ctx->nb_outputs) { + ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf); + } else { + push_frame(ctx, in_no, buf); + } +} + +static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, + int w, int h) +{ + AVFilterContext *ctx = inlink->dst; + unsigned in_no = FF_INLINK_IDX(inlink); + AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; + + return ff_get_video_buffer(outlink, perms, w, h); +} + +static AVFilterBufferRef *get_audio_buffer(AVFilterLink *inlink, int perms, + int nb_samples) +{ + AVFilterContext *ctx = inlink->dst; + unsigned in_no = FF_INLINK_IDX(inlink); + AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; + + return ff_get_audio_buffer(outlink, perms, nb_samples); +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) +{ + process_frame(inlink, buf); + return 0; /* enhancement: handle error return */ +} + +static void close_input(AVFilterContext *ctx, unsigned in_no) +{ + ConcatContext *cat = ctx->priv; + + cat->in[in_no].eof = 1; + cat->nb_in_active--; + av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n", + ctx->input_pads[in_no].name, cat->nb_in_active); +} + +static void find_next_delta_ts(AVFilterContext *ctx) +{ + ConcatContext *cat = ctx->priv; + unsigned i = cat->cur_idx; + unsigned imax = i + ctx->nb_outputs; + int64_t pts; + + pts = cat->in[i++].pts; + for (; i < imax; i++) + pts = FFMAX(pts, cat->in[i].pts); + cat->delta_ts += pts; +} + +static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no) +{ + ConcatContext *cat = ctx->priv; + AVFilterLink *outlink = ctx->outputs[out_no]; + int64_t base_pts = cat->in[in_no].pts + cat->delta_ts; + int64_t nb_samples, sent = 0; + int frame_nb_samples; + AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate }; + AVFilterBufferRef *buf; + int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + + if (!rate_tb.den) + return; + nb_samples = av_rescale_q(cat->delta_ts - base_pts, + outlink->time_base, rate_tb); + frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */ + while (nb_samples) { + frame_nb_samples = FFMIN(frame_nb_samples, nb_samples); + buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, frame_nb_samples); + if (!buf) + return; + av_samples_set_silence(buf->extended_data, 0, frame_nb_samples, + nb_channels, outlink->format); + buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base); + ff_filter_frame(outlink, buf); + sent += frame_nb_samples; + nb_samples -= frame_nb_samples; + } +} + +static void flush_segment(AVFilterContext *ctx) +{ + ConcatContext *cat = ctx->priv; + unsigned str, str_max; + + find_next_delta_ts(ctx); + cat->cur_idx += ctx->nb_outputs; + cat->nb_in_active = ctx->nb_outputs; + av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n", + cat->delta_ts); + + if (cat->cur_idx < ctx->nb_inputs) { + /* pad audio streams with silence */ + str = cat->nb_streams[AVMEDIA_TYPE_VIDEO]; + str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO]; + for (; str < str_max; str++) + send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str); + /* flush queued buffers */ + /* possible enhancement: flush in PTS order */ + str_max = cat->cur_idx + ctx->nb_outputs; + for (str = cat->cur_idx; str < str_max; str++) + while (cat->in[str].queue.available) + push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue)); + } +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + ConcatContext *cat = ctx->priv; + unsigned out_no = FF_OUTLINK_IDX(outlink); + unsigned in_no = out_no + cat->cur_idx; + unsigned str, str_max; + int ret; + + while (1) { + if (in_no >= ctx->nb_inputs) + return AVERROR_EOF; + if (!cat->in[in_no].eof) { + ret = ff_request_frame(ctx->inputs[in_no]); + if (ret != AVERROR_EOF) + return ret; + close_input(ctx, in_no); + } + /* cycle on all inputs to finish the segment */ + /* possible enhancement: request in PTS order */ + str_max = cat->cur_idx + ctx->nb_outputs - 1; + for (str = cat->cur_idx; cat->nb_in_active; + str = str == str_max ? cat->cur_idx : str + 1) { + if (cat->in[str].eof) + continue; + ret = ff_request_frame(ctx->inputs[str]); + if (ret == AVERROR_EOF) + close_input(ctx, str); + else if (ret < 0) + return ret; + } + flush_segment(ctx); + in_no += ctx->nb_outputs; + } +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ConcatContext *cat = ctx->priv; + int ret; + unsigned seg, type, str; + + cat->class = &concat_class; + av_opt_set_defaults(cat); + ret = av_set_options_string(cat, args, "=", ":"); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args); + return ret; + } + + /* create input pads */ + for (seg = 0; seg < cat->nb_segments; seg++) { + for (type = 0; type < TYPE_ALL; type++) { + for (str = 0; str < cat->nb_streams[type]; str++) { + AVFilterPad pad = { + .type = type, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + .get_video_buffer = get_video_buffer, + .get_audio_buffer = get_audio_buffer, + .filter_frame = filter_frame, + }; + pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str); + ff_insert_inpad(ctx, ctx->nb_inputs, &pad); + } + } + } + /* create output pads */ + for (type = 0; type < TYPE_ALL; type++) { + for (str = 0; str < cat->nb_streams[type]; str++) { + AVFilterPad pad = { + .type = type, + .config_props = config_output, + .request_frame = request_frame, + }; + pad.name = av_asprintf("out:%c%d", "va"[type], str); + ff_insert_outpad(ctx, ctx->nb_outputs, &pad); + } + } + + cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in)); + if (!cat->in) + return AVERROR(ENOMEM); + cat->nb_in_active = ctx->nb_outputs; + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ConcatContext *cat = ctx->priv; + unsigned i; + + for (i = 0; i < ctx->nb_inputs; i++) { + av_freep(&ctx->input_pads[i].name); + ff_bufqueue_discard_all(&cat->in[i].queue); + } + for (i = 0; i < ctx->nb_outputs; i++) + av_freep(&ctx->output_pads[i].name); + av_free(cat->in); +} + +AVFilter avfilter_avf_concat = { + .name = "concat", + .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ConcatContext), + .inputs = NULL, + .outputs = NULL, + .priv_class = &concat_class, +}; diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c new file mode 100644 index 0000000..4eb9731 --- /dev/null +++ b/libavfilter/avf_showspectrum.c @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2012 Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode + * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini). + */ + +#include <math.h> + +#include "libavcodec/avfft.h" +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "internal.h" + +enum DisplayMode { COMBINED, SEPARATE, NB_MODES }; +enum DisplayScale { LINEAR, SQRT, CBRT, LOG, NB_SCALES }; +enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES }; + +typedef struct { + const AVClass *class; + int w, h; + AVFilterBufferRef *outpicref; + int req_fullfilled; + int nb_display_channels; + int channel_height; + int sliding; ///< 1 if sliding mode, 0 otherwise + enum DisplayMode mode; ///< channel display mode + enum ColorMode color_mode; ///< display color scheme + enum DisplayScale scale; + float saturation; ///< color saturation multiplier + int xpos; ///< x position (current column) + RDFTContext *rdft; ///< Real Discrete Fourier Transform context + int rdft_bits; ///< number of bits (RDFT window size = 1<<rdft_bits) + FFTSample **rdft_data; ///< bins holder for each (displayed) channels + int filled; ///< number of samples (per channel) filled in current rdft_buffer + int consumed; ///< number of samples (per channel) consumed from the input frame + float *window_func_lut; ///< Window function LUT + float *combine_buffer; ///< color combining buffer (3 * h items) +} ShowSpectrumContext; + +#define OFFSET(x) offsetof(ShowSpectrumContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption showspectrum_options[] = { + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS }, + { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS }, + { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" }, + { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" }, + { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" }, + { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" }, + { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" }, + { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" }, + { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" }, + { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" }, + { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" }, + { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" }, + { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" }, + { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(showspectrum); + +typedef struct { + float a, y, u, v; +} intensity_color_table_item; +static const intensity_color_table_item intensity_color_table[] = +{ + { 0, 0, 0, 0 }, + { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 }, + { 0.3, .1857228179456802, .1772436246393981, .1747555484041475 }, + { 0.6, .2818498058365613, -.1593064119945782, .4713207455460892 }, + { 0.73, .6583062117554781, -.3716070802232764, .2435275933125293 }, + { 0.78, 0.763185357582429, -.4307467689263783, .1686649662231043 }, + { 0.91, .9533636363636364, -.2045454545454546, .03313636363636363 }, + { 1, 1, 0, 0 } +}; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ShowSpectrumContext *showspectrum = ctx->priv; + int err; + + showspectrum->class = &showspectrum_class; + av_opt_set_defaults(showspectrum); + + if ((err = av_set_options_string(showspectrum, args, "=", ":")) < 0) + return err; + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ShowSpectrumContext *showspectrum = ctx->priv; + int i; + + av_freep(&showspectrum->combine_buffer); + av_rdft_end(showspectrum->rdft); + for (i = 0; i < showspectrum->nb_display_channels; i++) + av_freep(&showspectrum->rdft_data[i]); + av_freep(&showspectrum->rdft_data); + av_freep(&showspectrum->window_func_lut); + avfilter_unref_bufferp(&showspectrum->outpicref); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE }; + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE }; + + /* set input audio formats */ + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_formats); + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_samplerates); + + /* set output video format */ + formats = ff_make_format_list(pix_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_formats); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + ShowSpectrumContext *showspectrum = ctx->priv; + int i, rdft_bits, win_size, h; + + outlink->w = showspectrum->w; + outlink->h = showspectrum->h; + + h = (showspectrum->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels; + showspectrum->channel_height = h; + + /* RDFT window size (precision) according to the requested output frame height */ + for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++); + win_size = 1 << rdft_bits; + + /* (re-)configuration if the video output changed (or first init) */ + if (rdft_bits != showspectrum->rdft_bits) { + size_t rdft_size, rdft_listsize; + AVFilterBufferRef *outpicref; + + av_rdft_end(showspectrum->rdft); + showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C); + showspectrum->rdft_bits = rdft_bits; + + /* RDFT buffers: x2 for each (display) channel buffer. + * Note: we use free and malloc instead of a realloc-like function to + * make sure the buffer is aligned in memory for the FFT functions. */ + for (i = 0; i < showspectrum->nb_display_channels; i++) + av_freep(&showspectrum->rdft_data[i]); + av_freep(&showspectrum->rdft_data); + showspectrum->nb_display_channels = inlink->channels; + + if (av_size_mult(sizeof(*showspectrum->rdft_data), + showspectrum->nb_display_channels, &rdft_listsize) < 0) + return AVERROR(EINVAL); + if (av_size_mult(sizeof(**showspectrum->rdft_data), + win_size, &rdft_size) < 0) + return AVERROR(EINVAL); + showspectrum->rdft_data = av_malloc(rdft_listsize); + if (!showspectrum->rdft_data) + return AVERROR(ENOMEM); + for (i = 0; i < showspectrum->nb_display_channels; i++) { + showspectrum->rdft_data[i] = av_malloc(rdft_size); + if (!showspectrum->rdft_data[i]) + return AVERROR(ENOMEM); + } + showspectrum->filled = 0; + + /* pre-calc windowing function (hann here) */ + showspectrum->window_func_lut = + av_realloc_f(showspectrum->window_func_lut, win_size, + sizeof(*showspectrum->window_func_lut)); + if (!showspectrum->window_func_lut) + return AVERROR(ENOMEM); + for (i = 0; i < win_size; i++) + showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1))); + + /* prepare the initial picref buffer (black frame) */ + avfilter_unref_bufferp(&showspectrum->outpicref); + showspectrum->outpicref = outpicref = + ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2, + outlink->w, outlink->h); + if (!outpicref) + return AVERROR(ENOMEM); + outlink->sample_aspect_ratio = (AVRational){1,1}; + memset(outpicref->data[0], 0, outlink->h * outpicref->linesize[0]); + } + + if (showspectrum->xpos >= outlink->w) + showspectrum->xpos = 0; + + showspectrum->combine_buffer = + av_realloc_f(showspectrum->combine_buffer, outlink->h * 3, + sizeof(*showspectrum->combine_buffer)); + + av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n", + showspectrum->w, showspectrum->h, win_size); + return 0; +} + +inline static void push_frame(AVFilterLink *outlink) +{ + ShowSpectrumContext *showspectrum = outlink->src->priv; + + showspectrum->xpos++; + if (showspectrum->xpos >= outlink->w) + showspectrum->xpos = 0; + showspectrum->filled = 0; + showspectrum->req_fullfilled = 1; + + ff_filter_frame(outlink, avfilter_ref_buffer(showspectrum->outpicref, ~AV_PERM_WRITE)); +} + +static int request_frame(AVFilterLink *outlink) +{ + ShowSpectrumContext *showspectrum = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + showspectrum->req_fullfilled = 0; + do { + ret = ff_request_frame(inlink); + } while (!showspectrum->req_fullfilled && ret >= 0); + + if (ret == AVERROR_EOF && showspectrum->outpicref) + push_frame(outlink); + return ret; +} + +static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insamples, int nb_samples) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + ShowSpectrumContext *showspectrum = ctx->priv; + AVFilterBufferRef *outpicref = showspectrum->outpicref; + + /* nb_freq contains the power of two superior or equal to the output image + * height (or half the RDFT window size) */ + const int nb_freq = 1 << (showspectrum->rdft_bits - 1); + const int win_size = nb_freq << 1; + const double w = 1. / (sqrt(nb_freq) * 32768.); + + int ch, plane, n, y; + const int start = showspectrum->filled; + const int add_samples = FFMIN(win_size - start, nb_samples); + + /* fill RDFT input with the number of samples available */ + for (ch = 0; ch < showspectrum->nb_display_channels; ch++) { + const int16_t *p = (int16_t *)insamples->extended_data[ch]; + + p += showspectrum->consumed; + for (n = 0; n < add_samples; n++) + showspectrum->rdft_data[ch][start + n] = p[n] * showspectrum->window_func_lut[start + n]; + } + showspectrum->filled += add_samples; + + /* complete RDFT window size? */ + if (showspectrum->filled == win_size) { + + /* channel height */ + int h = showspectrum->channel_height; + + /* run RDFT on each samples set */ + for (ch = 0; ch < showspectrum->nb_display_channels; ch++) + av_rdft_calc(showspectrum->rdft, showspectrum->rdft_data[ch]); + + /* fill a new spectrum column */ +#define RE(y, ch) showspectrum->rdft_data[ch][2 * y + 0] +#define IM(y, ch) showspectrum->rdft_data[ch][2 * y + 1] +#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch)) + + /* initialize buffer for combining to black */ + for (y = 0; y < outlink->h; y++) { + showspectrum->combine_buffer[3 * y ] = 0; + showspectrum->combine_buffer[3 * y + 1] = 127.5; + showspectrum->combine_buffer[3 * y + 2] = 127.5; + } + + for (ch = 0; ch < showspectrum->nb_display_channels; ch++) { + float yf, uf, vf; + + /* decide color range */ + switch (showspectrum->mode) { + case COMBINED: + // reduce range by channel count + yf = 256.0f / showspectrum->nb_display_channels; + switch (showspectrum->color_mode) { + case INTENSITY: + uf = yf; + vf = yf; + break; + case CHANNEL: + /* adjust saturation for mixed UV coloring */ + /* this factor is correct for infinite channels, an approximation otherwise */ + uf = yf * M_PI; + vf = yf * M_PI; + break; + default: + av_assert0(0); + } + break; + case SEPARATE: + // full range + yf = 256.0f; + uf = 256.0f; + vf = 256.0f; + break; + default: + av_assert0(0); + } + + if (showspectrum->color_mode == CHANNEL) { + if (showspectrum->nb_display_channels > 1) { + uf *= 0.5 * sin((2 * M_PI * ch) / showspectrum->nb_display_channels); + vf *= 0.5 * cos((2 * M_PI * ch) / showspectrum->nb_display_channels); + } else { + uf = 0.0f; + vf = 0.0f; + } + } + uf *= showspectrum->saturation; + vf *= showspectrum->saturation; + + /* draw the channel */ + for (y = 0; y < h; y++) { + int row = (showspectrum->mode == COMBINED) ? y : ch * h + y; + float *out = &showspectrum->combine_buffer[3 * row]; + + /* get magnitude */ + float a = w * MAGNITUDE(y, ch); + + /* apply scale */ + switch (showspectrum->scale) { + case LINEAR: + break; + case SQRT: + a = sqrt(a); + break; + case CBRT: + a = cbrt(a); + break; + case LOG: + a = 1 - log(FFMAX(FFMIN(1, a), 1e-6)) / log(1e-6); // zero = -120dBFS + break; + default: + av_assert0(0); + } + + if (showspectrum->color_mode == INTENSITY) { + float y, u, v; + int i; + + for (i = 1; i < sizeof(intensity_color_table) / sizeof(*intensity_color_table) - 1; i++) + if (intensity_color_table[i].a >= a) + break; + // i now is the first item >= the color + // now we know to interpolate between item i - 1 and i + if (a <= intensity_color_table[i - 1].a) { + y = intensity_color_table[i - 1].y; + u = intensity_color_table[i - 1].u; + v = intensity_color_table[i - 1].v; + } else if (a >= intensity_color_table[i].a) { + y = intensity_color_table[i].y; + u = intensity_color_table[i].u; + v = intensity_color_table[i].v; + } else { + float start = intensity_color_table[i - 1].a; + float end = intensity_color_table[i].a; + float lerpfrac = (a - start) / (end - start); + y = intensity_color_table[i - 1].y * (1.0f - lerpfrac) + + intensity_color_table[i].y * lerpfrac; + u = intensity_color_table[i - 1].u * (1.0f - lerpfrac) + + intensity_color_table[i].u * lerpfrac; + v = intensity_color_table[i - 1].v * (1.0f - lerpfrac) + + intensity_color_table[i].v * lerpfrac; + } + + out[0] += y * yf; + out[1] += u * uf; + out[2] += v * vf; + } else { + out[0] += a * yf; + out[1] += a * uf; + out[2] += a * vf; + } + } + } + + /* copy to output */ + if (showspectrum->sliding) { + for (plane = 0; plane < 3; plane++) { + for (y = 0; y < outlink->h; y++) { + uint8_t *p = outpicref->data[plane] + + y * outpicref->linesize[plane]; + memmove(p, p + 1, outlink->w - 1); + } + } + showspectrum->xpos = outlink->w - 1; + } + for (plane = 0; plane < 3; plane++) { + uint8_t *p = outpicref->data[plane] + + (outlink->h - 1) * outpicref->linesize[plane] + + showspectrum->xpos; + for (y = 0; y < outlink->h; y++) { + *p = rint(FFMAX(0, FFMIN(showspectrum->combine_buffer[3 * y + plane], 255))); + p -= outpicref->linesize[plane]; + } + } + + outpicref->pts = insamples->pts + + av_rescale_q(showspectrum->consumed, + (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + push_frame(outlink); + } + + return add_samples; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + AVFilterContext *ctx = inlink->dst; + ShowSpectrumContext *showspectrum = ctx->priv; + int left_samples = insamples->audio->nb_samples; + + showspectrum->consumed = 0; + while (left_samples) { + const int added_samples = plot_spectrum_column(inlink, insamples, left_samples); + showspectrum->consumed += added_samples; + left_samples -= added_samples; + } + + avfilter_unref_buffer(insamples); + return 0; +} + +static const AVFilterPad showspectrum_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad showspectrum_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_avf_showspectrum = { + .name = "showspectrum", + .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ShowSpectrumContext), + .inputs = showspectrum_inputs, + .outputs = showspectrum_outputs, + .priv_class = &showspectrum_class, +}; diff --git a/libavfilter/avf_showwaves.c b/libavfilter/avf_showwaves.c new file mode 100644 index 0000000..1b9d28d --- /dev/null +++ b/libavfilter/avf_showwaves.c @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * audio to video multimedia filter + */ + +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "avfilter.h" +#include "formats.h" +#include "audio.h" +#include "video.h" +#include "internal.h" + +enum ShowWavesMode { + MODE_POINT, + MODE_LINE, + MODE_NB, +}; + +typedef struct { + const AVClass *class; + int w, h; + char *rate_str; + AVRational rate; + int buf_idx; + AVFilterBufferRef *outpicref; + int req_fullfilled; + int n; + int sample_count_mod; + enum ShowWavesMode mode; +} ShowWavesContext; + +#define OFFSET(x) offsetof(ShowWavesContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption showwaves_options[] = { + { "rate", "set video rate", OFFSET(rate_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "r", "set video rate", OFFSET(rate_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, + { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS }, + + {"mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"}, + {"point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"}, + {"line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"}, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(showwaves); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ShowWavesContext *showwaves = ctx->priv; + int err; + + showwaves->class = &showwaves_class; + av_opt_set_defaults(showwaves); + showwaves->buf_idx = 0; + + if ((err = av_set_options_string(showwaves, args, "=", ":")) < 0) + return err; + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + ShowWavesContext *showwaves = ctx->priv; + + av_freep(&showwaves->rate_str); + avfilter_unref_bufferp(&showwaves->outpicref); +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; + + /* set input audio formats */ + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_formats); + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + formats = ff_all_samplerates(); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_samplerates); + + /* set output video format */ + formats = ff_make_format_list(pix_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_formats); + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + ShowWavesContext *showwaves = ctx->priv; + int err; + + if (showwaves->n && showwaves->rate_str) { + av_log(ctx, AV_LOG_ERROR, "Options 'n' and 'rate' cannot be set at the same time\n"); + return AVERROR(EINVAL); + } + + if (!showwaves->n) { + if (!showwaves->rate_str) + showwaves->rate = (AVRational){25,1}; /* set default value */ + else if ((err = av_parse_video_rate(&showwaves->rate, showwaves->rate_str)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", showwaves->rate_str); + return err; + } + showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5); + } + + outlink->w = showwaves->w; + outlink->h = showwaves->h; + outlink->sample_aspect_ratio = (AVRational){1,1}; + + outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n}, + (AVRational){showwaves->w,1}); + + av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n", + showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n); + return 0; +} + +inline static int push_frame(AVFilterLink *outlink) +{ + ShowWavesContext *showwaves = outlink->src->priv; + int ret; + + if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0) + showwaves->req_fullfilled = 1; + showwaves->outpicref = NULL; + showwaves->buf_idx = 0; + return ret; +} + +static int request_frame(AVFilterLink *outlink) +{ + ShowWavesContext *showwaves = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + showwaves->req_fullfilled = 0; + do { + ret = ff_request_frame(inlink); + } while (!showwaves->req_fullfilled && ret >= 0); + + if (ret == AVERROR_EOF && showwaves->outpicref) + push_frame(outlink); + return ret; +} + +#define MAX_INT16 ((1<<15) -1) + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + ShowWavesContext *showwaves = ctx->priv; + const int nb_samples = insamples->audio->nb_samples; + AVFilterBufferRef *outpicref = showwaves->outpicref; + int linesize = outpicref ? outpicref->linesize[0] : 0; + int16_t *p = (int16_t *)insamples->data[0]; + int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout); + int i, j, k, h, ret = 0; + const int n = showwaves->n; + const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ + + /* draw data in the buffer */ + for (i = 0; i < nb_samples; i++) { + if (!showwaves->outpicref) { + showwaves->outpicref = outpicref = + ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, + outlink->w, outlink->h); + if (!outpicref) + return AVERROR(ENOMEM); + outpicref->video->w = outlink->w; + outpicref->video->h = outlink->h; + outpicref->pts = insamples->pts + + av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, + (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + linesize = outpicref->linesize[0]; + memset(outpicref->data[0], 0, showwaves->h*linesize); + } + for (j = 0; j < nb_channels; j++) { + h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16); + switch (showwaves->mode) { + case MODE_POINT: + if (h >= 0 && h < outlink->h) + *(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x; + break; + + case MODE_LINE: + { + int start = showwaves->h/2, end = av_clip(h, 0, outlink->h-1); + if (start > end) FFSWAP(int16_t, start, end); + for (k = start; k < end; k++) + *(outpicref->data[0] + showwaves->buf_idx + k * linesize) += x; + break; + } + } + } + + showwaves->sample_count_mod++; + if (showwaves->sample_count_mod == n) { + showwaves->sample_count_mod = 0; + showwaves->buf_idx++; + } + if (showwaves->buf_idx == showwaves->w) + if ((ret = push_frame(outlink)) < 0) + break; + outpicref = showwaves->outpicref; + } + + avfilter_unref_buffer(insamples); + return ret; +} + +static const AVFilterPad showwaves_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad showwaves_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_avf_showwaves = { + .name = "showwaves", + .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ShowWavesContext), + .inputs = showwaves_inputs, + .outputs = showwaves_outputs, + .priv_class = &showwaves_class, +}; diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index 93302cc..7c0bee6 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -2,25 +2,25 @@ * filter layer * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -/* #define DEBUG */ - +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" #include "libavutil/channel_layout.h" #include "libavutil/common.h" #include "libavutil/imgutils.h" @@ -32,21 +32,73 @@ #include "avfilter.h" #include "formats.h" #include "internal.h" -#include "video.h" +#include "audio.h" + +static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame); + +char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms) +{ + snprintf(buf, buf_size, "%s%s%s%s%s%s", + perms & AV_PERM_READ ? "r" : "", + perms & AV_PERM_WRITE ? "w" : "", + perms & AV_PERM_PRESERVE ? "p" : "", + perms & AV_PERM_REUSE ? "u" : "", + perms & AV_PERM_REUSE2 ? "U" : "", + perms & AV_PERM_NEG_LINESIZES ? "n" : ""); + return buf; +} + +void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end) +{ + av_unused char buf[16]; + ff_tlog(ctx, + "ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, + ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0], + ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], + ref->pts, ref->pos); + + if (ref->video) { + ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", + ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den, + ref->video->w, ref->video->h, + !ref->video->interlaced ? 'P' : /* Progressive */ + ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ + ref->video->key_frame, + av_get_picture_type_char(ref->video->pict_type)); + } + if (ref->audio) { + ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d", + ref->audio->channel_layout, + ref->audio->nb_samples, + ref->audio->sample_rate); + } + + ff_tlog(ctx, "]%s", end ? "\n" : ""); +} unsigned avfilter_version(void) { + av_assert0(LIBAVFILTER_VERSION_MICRO >= 100); return LIBAVFILTER_VERSION_INT; } const char *avfilter_configuration(void) { - return LIBAV_CONFIGURATION; + return FFMPEG_CONFIGURATION; } const char *avfilter_license(void) { #define LICENSE_PREFIX "libavfilter license: " - return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; +} + +void ff_command_queue_pop(AVFilterContext *filter) +{ + AVFilterCommand *c= filter->command_queue; + av_freep(&c->arg); + av_freep(&c->command); + filter->command_queue= c->next; + av_free(c); } void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, @@ -81,8 +133,9 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad, if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) { av_log(src, AV_LOG_ERROR, - "Media type mismatch between the '%s' filter output pad %d and the '%s' filter input pad %d\n", - src->name, srcpad, dst->name, dstpad); + "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n", + src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"), + dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?")); return AVERROR(EINVAL); } @@ -94,12 +147,35 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad, link->srcpad = &src->output_pads[srcpad]; link->dstpad = &dst->input_pads[dstpad]; link->type = src->output_pads[srcpad].type; - assert(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1); + av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1); link->format = -1; return 0; } +void avfilter_link_free(AVFilterLink **link) +{ + if (!*link) + return; + + if ((*link)->pool) + ff_free_pool((*link)->pool); + + avfilter_unref_bufferp(&(*link)->partial_buf); + + av_freep(link); +} + +int avfilter_link_get_channels(AVFilterLink *link) +{ + return link->channels; +} + +void avfilter_link_set_closed(AVFilterLink *link, int closed) +{ + link->closed = closed; +} + int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, unsigned filt_srcpad_idx, unsigned filt_dstpad_idx) { @@ -127,6 +203,7 @@ int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, if (link->out_formats) ff_formats_changeref(&link->out_formats, &filt->outputs[filt_dstpad_idx]->out_formats); + if (link->out_samplerates) ff_formats_changeref(&link->out_samplerates, &filt->outputs[filt_dstpad_idx]->out_samplerates); @@ -145,9 +222,13 @@ int avfilter_config_links(AVFilterContext *filter) for (i = 0; i < filter->nb_inputs; i ++) { AVFilterLink *link = filter->inputs[i]; + AVFilterLink *inlink; if (!link) continue; + inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL; + link->current_pts = AV_NOPTS_VALUE; + switch (link->init_state) { case AVLINK_INIT: continue; @@ -175,26 +256,39 @@ int avfilter_config_links(AVFilterContext *filter) return ret; } - if (link->time_base.num == 0 && link->time_base.den == 0) - link->time_base = link->src && link->src->nb_inputs ? - link->src->inputs[0]->time_base : AV_TIME_BASE_Q; + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + if (!link->time_base.num && !link->time_base.den) + link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q; - if (link->type == AVMEDIA_TYPE_VIDEO) { if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den) - link->sample_aspect_ratio = link->src->nb_inputs ? - link->src->inputs[0]->sample_aspect_ratio : (AVRational){1,1}; + link->sample_aspect_ratio = inlink ? + inlink->sample_aspect_ratio : (AVRational){1,1}; + + if (inlink && !link->frame_rate.num && !link->frame_rate.den) + link->frame_rate = inlink->frame_rate; - if (link->src->nb_inputs) { + if (inlink) { if (!link->w) - link->w = link->src->inputs[0]->w; + link->w = inlink->w; if (!link->h) - link->h = link->src->inputs[0]->h; + link->h = inlink->h; } else if (!link->w || !link->h) { av_log(link->src, AV_LOG_ERROR, "Video source filters must set their output link's " "width and height\n"); return AVERROR(EINVAL); } + break; + + case AVMEDIA_TYPE_AUDIO: + if (inlink) { + if (!link->time_base.num && !link->time_base.den) + link->time_base = inlink->time_base; + } + + if (!link->time_base.num && !link->time_base.den) + link->time_base = (AVRational) {1, link->sample_rate}; } if ((config_link = link->dstpad->config_props)) @@ -212,11 +306,11 @@ int avfilter_config_links(AVFilterContext *filter) return 0; } -void ff_dlog_link(void *ctx, AVFilterLink *link, int end) +void ff_tlog_link(void *ctx, AVFilterLink *link, int end) { if (link->type == AVMEDIA_TYPE_VIDEO) { - av_dlog(ctx, - "link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s", + ff_tlog(ctx, + "link[%p s:%dx%d fmt:%s %s->%s]%s", link, link->w, link->h, av_get_pix_fmt_name(link->format), link->src ? link->src->filter->name : "", @@ -226,9 +320,9 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end) char buf[128]; av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout); - av_dlog(ctx, - "link[%p r:%d cl:%s fmt:%-16s %-16s->%-16s]%s", - link, link->sample_rate, buf, + ff_tlog(ctx, + "link[%p r:%d cl:%s fmt:%s %s->%s]%s", + link, (int)link->sample_rate, buf, av_get_sample_fmt_name(link->format), link->src ? link->src->filter->name : "", link->dst ? link->dst->filter->name : "", @@ -238,13 +332,24 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end) int ff_request_frame(AVFilterLink *link) { - FF_DPRINTF_START(NULL, request_frame); ff_dlog_link(NULL, link, 1); + int ret = -1; + FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1); + if (link->closed) + return AVERROR_EOF; if (link->srcpad->request_frame) - return link->srcpad->request_frame(link); + ret = link->srcpad->request_frame(link); else if (link->src->inputs[0]) - return ff_request_frame(link->src->inputs[0]); - else return -1; + ret = ff_request_frame(link->src->inputs[0]); + if (ret == AVERROR_EOF && link->partial_buf) { + AVFilterBufferRef *pbuf = link->partial_buf; + link->partial_buf = NULL; + ff_filter_frame_framed(link, pbuf); + return 0; + } + if (ret == AVERROR_EOF) + link->closed = 1; + return ret; } int ff_poll_frame(AVFilterLink *link) @@ -265,7 +370,28 @@ int ff_poll_frame(AVFilterLink *link) return min; } -#define MAX_REGISTERED_AVFILTERS_NB 64 +void ff_update_link_current_pts(AVFilterLink *link, int64_t pts) +{ + if (pts == AV_NOPTS_VALUE) + return; + link->current_pts = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q); + /* TODO use duration */ + if (link->graph && link->age_index >= 0) + ff_avfilter_graph_update_heap(link->graph, link); +} + +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags) +{ + if(!strcmp(cmd, "ping")){ + av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name); + return 0; + }else if(filter->filter->process_command) { + return filter->filter->process_command(filter, cmd, arg, res, res_len, flags); + } + return AVERROR(ENOSYS); +} + +#define MAX_REGISTERED_AVFILTERS_NB 256 static AVFilter *registered_avfilters[MAX_REGISTERED_AVFILTERS_NB + 1]; @@ -284,8 +410,21 @@ AVFilter *avfilter_get_by_name(const char *name) int avfilter_register(AVFilter *filter) { - if (next_registered_avfilter_idx == MAX_REGISTERED_AVFILTERS_NB) - return -1; + int i; + + if (next_registered_avfilter_idx == MAX_REGISTERED_AVFILTERS_NB) { + av_log(NULL, AV_LOG_ERROR, + "Maximum number of registered filters %d reached, " + "impossible to register filter with name '%s'\n", + MAX_REGISTERED_AVFILTERS_NB, filter->name); + return AVERROR(ENOMEM); + } + + for(i=0; filter->inputs && filter->inputs[i].name; i++) { + const AVFilterPad *input = &filter->inputs[i]; + av_assert0( !input->filter_frame + || (!input->start_frame && !input->end_frame)); + } registered_avfilters[next_registered_avfilter_idx++] = filter; return 0; @@ -313,19 +452,54 @@ static int pad_count(const AVFilterPad *pads) return count; } -static const char *filter_name(void *p) +static const char *default_filter_name(void *filter_ctx) { - AVFilterContext *filter = p; - return filter->filter->name; + AVFilterContext *ctx = filter_ctx; + return ctx->name ? ctx->name : ctx->filter->name; +} + +static void *filter_child_next(void *obj, void *prev) +{ + AVFilterContext *ctx = obj; + if (!prev && ctx->filter && ctx->filter->priv_class) + return ctx->priv; + return NULL; +} + +static const AVClass *filter_child_class_next(const AVClass *prev) +{ + AVFilter **filter_ptr = NULL; + + /* find the filter that corresponds to prev */ + while (prev && *(filter_ptr = av_filter_next(filter_ptr))) + if ((*filter_ptr)->priv_class == prev) + break; + + /* could not find filter corresponding to prev */ + if (prev && !(*filter_ptr)) + return NULL; + + /* find next filter with specific options */ + while (*(filter_ptr = av_filter_next(filter_ptr))) + if ((*filter_ptr)->priv_class) + return (*filter_ptr)->priv_class; + return NULL; } static const AVClass avfilter_class = { - "AVFilter", - filter_name, - NULL, - LIBAVUTIL_VERSION_INT, + .class_name = "AVFilter", + .item_name = default_filter_name, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, + .child_next = filter_child_next, + .child_class_next = filter_child_class_next, }; +const AVClass *avfilter_get_class(void) +{ + return &avfilter_class; +} + int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name) { AVFilterContext *ret; @@ -393,6 +567,9 @@ void avfilter_free(AVFilterContext *filter) int i; AVFilterLink *link; + if (!filter) + return; + if (filter->filter->uninit) filter->filter->uninit(filter); @@ -407,7 +584,7 @@ void avfilter_free(AVFilterContext *filter) ff_channel_layouts_unref(&link->in_channel_layouts); ff_channel_layouts_unref(&link->out_channel_layouts); } - av_freep(&link); + avfilter_link_free(&link); } for (i = 0; i < filter->nb_outputs; i++) { if ((link = filter->outputs[i])) { @@ -420,7 +597,7 @@ void avfilter_free(AVFilterContext *filter) ff_channel_layouts_unref(&link->in_channel_layouts); ff_channel_layouts_unref(&link->out_channel_layouts); } - av_freep(&link); + avfilter_link_free(&link); } av_freep(&filter->name); @@ -429,6 +606,9 @@ void avfilter_free(AVFilterContext *filter) av_freep(&filter->inputs); av_freep(&filter->outputs); av_freep(&filter->priv); + while(filter->command_queue){ + ff_command_queue_pop(filter); + } av_free(filter); } @@ -436,7 +616,9 @@ int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque { int ret=0; - if (filter->filter->init) + if (filter->filter->init_opaque) + ret = filter->filter->init_opaque(filter, args, opaque); + else if (filter->filter->init) ret = filter->filter->init(filter, args); return ret; } @@ -456,21 +638,31 @@ static int default_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) return ff_filter_frame(link->dst->outputs[0], frame); } -int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) +static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame) { int (*filter_frame)(AVFilterLink *, AVFilterBufferRef *); + AVFilterPad *src = link->srcpad; AVFilterPad *dst = link->dstpad; AVFilterBufferRef *out; - int perms = frame->perms; + int perms, ret; + AVFilterCommand *cmd= link->dst->command_queue; + int64_t pts; - FF_DPRINTF_START(NULL, filter_frame); - ff_dlog_link(NULL, link, 1); + if (link->closed) { + avfilter_unref_buffer(frame); + return AVERROR_EOF; + } if (!(filter_frame = dst->filter_frame)) filter_frame = default_filter_frame; + av_assert1((frame->perms & src->min_perms) == src->min_perms); + frame->perms &= ~ src->rej_perms; + perms = frame->perms; + if (frame->linesize[0] < 0) perms |= AV_PERM_NEG_LINESIZES; + /* prepare to copy the frame if the buffer has insufficient permissions */ if ((dst->min_perms & perms) != dst->min_perms || dst->rej_perms & perms) { @@ -478,6 +670,7 @@ int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) "Copying data in avfilter (have perms %x, need %x, reject %x)\n", perms, link->dstpad->min_perms, link->dstpad->rej_perms); + /* Maybe use ff_copy_buffer_ref instead? */ switch (link->type) { case AVMEDIA_TYPE_VIDEO: out = ff_get_video_buffer(link, dst->min_perms, @@ -513,5 +706,88 @@ int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) } else out = frame; - return filter_frame(link, out); + while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){ + av_log(link->dst, AV_LOG_DEBUG, + "Processing command time:%f command:%s arg:%s\n", + cmd->time, cmd->command, cmd->arg); + avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags); + ff_command_queue_pop(link->dst); + cmd= link->dst->command_queue; + } + + pts = out->pts; + ret = filter_frame(link, out); + ff_update_link_current_pts(link, pts); + return ret; +} + +static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFilterBufferRef *frame) +{ + int insamples = frame->audio->nb_samples, inpos = 0, nb_samples; + AVFilterBufferRef *pbuf = link->partial_buf; + int nb_channels = frame->audio->channels; + int ret = 0; + + /* Handle framing (min_samples, max_samples) */ + while (insamples) { + if (!pbuf) { + AVRational samples_tb = { 1, link->sample_rate }; + int perms = link->dstpad->min_perms | AV_PERM_WRITE; + pbuf = ff_get_audio_buffer(link, perms, link->partial_buf_size); + if (!pbuf) { + av_log(link->dst, AV_LOG_WARNING, + "Samples dropped due to memory allocation failure.\n"); + return 0; + } + avfilter_copy_buffer_ref_props(pbuf, frame); + pbuf->pts = frame->pts + + av_rescale_q(inpos, samples_tb, link->time_base); + pbuf->audio->nb_samples = 0; + } + nb_samples = FFMIN(insamples, + link->partial_buf_size - pbuf->audio->nb_samples); + av_samples_copy(pbuf->extended_data, frame->extended_data, + pbuf->audio->nb_samples, inpos, + nb_samples, nb_channels, link->format); + inpos += nb_samples; + insamples -= nb_samples; + pbuf->audio->nb_samples += nb_samples; + if (pbuf->audio->nb_samples >= link->min_samples) { + ret = ff_filter_frame_framed(link, pbuf); + pbuf = NULL; + } + } + avfilter_unref_buffer(frame); + link->partial_buf = pbuf; + return ret; +} + +int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) +{ + FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); + + /* Consistency checks */ + if (link->type == AVMEDIA_TYPE_VIDEO) { + if (strcmp(link->dst->filter->name, "scale")) { + av_assert1(frame->format == link->format); + av_assert1(frame->video->w == link->w); + av_assert1(frame->video->h == link->h); + } + } else { + av_assert1(frame->format == link->format); + av_assert1(frame->audio->channels == link->channels); + av_assert1(frame->audio->channel_layout == link->channel_layout); + av_assert1(frame->audio->sample_rate == link->sample_rate); + } + + /* Go directly to actual filtering if possible */ + if (link->type == AVMEDIA_TYPE_AUDIO && + link->min_samples && + (link->partial_buf || + frame->audio->nb_samples < link->min_samples || + frame->audio->nb_samples > link->max_samples)) { + return ff_filter_frame_needs_framing(link, frame); + } else { + return ff_filter_frame_framed(link, frame); + } } diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index c5f8d56..1c80167 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -2,34 +2,45 @@ * filter layer * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVFILTER_AVFILTER_H #define AVFILTER_AVFILTER_H +/** + * @file + * @ingroup lavfi + * external API header + */ + +/** + * @defgroup lavfi Libavfilter + * @{ + */ + +#include <stddef.h> + #include "libavutil/avutil.h" +#include "libavutil/dict.h" #include "libavutil/log.h" #include "libavutil/samplefmt.h" #include "libavutil/pixfmt.h" #include "libavutil/rational.h" -#include "libavcodec/avcodec.h" - -#include <stddef.h> #include "libavfilter/version.h" @@ -48,6 +59,10 @@ const char *avfilter_configuration(void); */ const char *avfilter_license(void); +/** + * Get the class for the AVFilterContext struct. + */ +const AVClass *avfilter_get_class(void); typedef struct AVFilterContext AVFilterContext; typedef struct AVFilterLink AVFilterLink; @@ -100,6 +115,9 @@ typedef struct AVFilterBuffer { #define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time #define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time #define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes +#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned + +#define AVFILTER_ALIGN 16 //not part of ABI /** * Audio specific properties in a reference to an AVFilterBuffer. Since @@ -108,9 +126,9 @@ typedef struct AVFilterBuffer { */ typedef struct AVFilterBufferRefAudioProps { uint64_t channel_layout; ///< channel layout of audio buffer - int nb_samples; ///< number of audio samples + int nb_samples; ///< number of audio samples per channel int sample_rate; ///< audio buffer sample rate - int planar; ///< audio buffer - planar or packed + int channels; ///< number of channels (do not access directly) } AVFilterBufferRefAudioProps; /** @@ -121,11 +139,14 @@ typedef struct AVFilterBufferRefAudioProps { typedef struct AVFilterBufferRefVideoProps { int w; ///< image width int h; ///< image height - AVRational pixel_aspect; ///< pixel aspect ratio + AVRational sample_aspect_ratio; ///< sample aspect ratio int interlaced; ///< is frame interlaced int top_field_first; ///< field order enum AVPictureType pict_type; ///< picture type of the frame int key_frame; ///< 1 -> keyframe, 0-> not + int qp_table_linesize; ///< qp_table stride + int qp_table_size; ///< qp_table size + int8_t *qp_table; ///< array of Quantization Parameters } AVFilterBufferRefVideoProps; /** @@ -172,6 +193,8 @@ typedef struct AVFilterBufferRef { int perms; ///< permissions, see the AV_PERM_* flags enum AVMediaType type; ///< media type of buffer data + + AVDictionary *metadata; ///< dictionary containing metadata key=value tags } AVFilterBufferRef; /** @@ -210,11 +233,18 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref); */ void avfilter_unref_bufferp(AVFilterBufferRef **ref); +/** + * Get the number of channels of a buffer reference. + */ +int avfilter_ref_get_channels(AVFilterBufferRef *ref); + #if FF_API_AVFILTERPAD_PUBLIC /** * A filter pad used for either input or output. * - * @warning this struct will be removed from public API. + * See doc/filter_design.txt for details on how to implement the methods. + * + * @warning this struct might be removed from public API. * users should call avfilter_pad_get_name() and avfilter_pad_get_type() * to access the name and type fields; there should be no need to access * any other fields from outside of libavfilter. @@ -233,22 +263,29 @@ struct AVFilterPad { enum AVMediaType type; /** + * Input pads: * Minimum required permissions on incoming buffers. Any buffer with * insufficient permissions will be automatically copied by the filter * system to a new buffer which provides the needed access permissions. * - * Input pads only. + * Output pads: + * Guaranteed permissions on outgoing buffers. Any buffer pushed on the + * link must have at least these permissions; this fact is checked by + * asserts. It can be used to optimize buffer allocation. */ int min_perms; /** + * Input pads: * Permissions which are not accepted on incoming buffers. Any buffer * which has any of these permissions set will be automatically copied * by the filter system to a new buffer which does not have those * permissions. This can be used to easily disallow buffers with * AV_PERM_REUSE. * - * Input pads only. + * Output pads: + * Permissions which are automatically removed on outgoing buffers. It + * can be used to optimize buffer allocation. */ int rej_perms; @@ -259,7 +296,7 @@ struct AVFilterPad { /** * Callback function to get a video buffer. If NULL, the filter system will - * use avfilter_default_get_video_buffer(). + * use ff_default_get_video_buffer(). * * Input video pads only. */ @@ -267,7 +304,7 @@ struct AVFilterPad { /** * Callback function to get an audio buffer. If NULL, the filter system will - * use avfilter_default_get_audio_buffer(). + * use ff_default_get_audio_buffer(). * * Input audio pads only. */ @@ -291,7 +328,7 @@ struct AVFilterPad { * Input pads only. * * @return >= 0 on success, a negative AVERROR on error. This function - * must ensure that samplesref is properly unreferenced on error if it + * must ensure that frame is properly unreferenced on error if it * hasn't been passed on to another filter. */ int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); @@ -311,6 +348,8 @@ struct AVFilterPad { * Frame request callback. A call to this should result in at least one * frame being output over the given link. This should return zero on * success, and another value on error. + * See ff_request_frame() for the error codes with a specific + * meaning. * * Output pads only. */ @@ -319,15 +358,18 @@ struct AVFilterPad { /** * Link configuration callback. * - * For output pads, this should set the link properties such as - * width/height. This should NOT set the format property - that is - * negotiated between filters by the filter system using the + * For output pads, this should set the following link properties: + * video: width, height, sample_aspect_ratio, time_base + * audio: sample_rate. + * + * This should NOT set properties such as format, channel_layout, etc which + * are negotiated between filters by the filter system using the * query_formats() callback before this function is called. * * For input pads, this should check the properties of the link, and update * the filter's internal state as necessary. * - * For both input and output filters, this should return zero on success, + * For both input and output pads, this should return zero on success, * and another value on error. */ int (*config_props)(AVFilterLink *link); @@ -402,9 +444,9 @@ typedef struct AVFilter { void (*uninit)(AVFilterContext *ctx); /** - * Queries formats supported by the filter and its pads, and sets the - * in_formats for links connected to its output pads, and out_formats - * for links connected to its input pads. + * Queries formats/layouts supported by the filter and its pads, and sets + * the in_formats/in_chlayouts for links connected to its output pads, + * and out_formats/out_chlayouts for links connected to its input pads. * * @return zero on success, a negative value corresponding to an * AVERROR code otherwise @@ -412,11 +454,34 @@ typedef struct AVFilter { int (*query_formats)(AVFilterContext *); int priv_size; ///< size of private data to allocate for the filter + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, const char *args, void *opaque); + + const AVClass *priv_class; ///< private class, containing filter specific options } AVFilter; /** An instance of a filter */ struct AVFilterContext { - const AVClass *av_class; ///< needed for av_log() + const AVClass *av_class; ///< needed for av_log() AVFilter *filter; ///< the AVFilter of which this is an instance @@ -437,6 +502,8 @@ struct AVFilterContext { unsigned nb_outputs; ///< number of output pads void *priv; ///< private data for use by the filter + + struct AVFilterCommand *command_queue; }; /** @@ -459,7 +526,7 @@ struct AVFilterLink { int w; ///< agreed upon image width int h; ///< agreed upon image height AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio - /* These two parameters apply only to audio */ + /* These parameters apply only to audio */ uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) int sample_rate; ///< samples per second @@ -482,9 +549,11 @@ struct AVFilterLink { ***************************************************************** */ /** - * Lists of formats supported by the input and output filters respectively. - * These lists are used for negotiating the format to actually be used, - * which will be loaded into the format member, above, when chosen. + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * */ AVFilterFormats *in_formats; AVFilterFormats *out_formats; @@ -513,6 +582,88 @@ struct AVFilterLink { AVLINK_STARTINIT, ///< started, but incomplete AVLINK_INIT ///< complete } init_state; + + struct AVFilterPool *pool; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown; + * if left to 0/0, will be automatically be copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFilterBufferRef *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * The buffer reference currently being received across the link by the + * destination filter. This is used internally by the filter system to + * allow automatic copying of buffers which do not have sufficient + * permissions for the destination. This should not be accessed directly + * by the filters. + */ + AVFilterBufferRef *cur_buf_copy; + + /** + * True if the link is closed. + * If set, all attemps of start_frame, filter_frame or request_frame + * will fail with AVERROR_EOF, and if necessary the reference will be + * destroyed. + * If request_frame returns AVERROR_EOF, this flag is set on the + * corresponding link. + * It can be set also be set by either the source or the destination + * filter. + */ + int closed; + + /** + * Number of channels. + */ + int channels; }; /** @@ -528,6 +679,21 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad); /** + * Free the link in *link, and set its pointer to NULL. + */ +void avfilter_link_free(AVFilterLink **link); + +/** + * Get the number of channels of a link. + */ +int avfilter_link_get_channels(AVFilterLink *link); + +/** + * Set the closed field of a link. + */ +void avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** * Negotiate the media format, dimensions, etc of all inputs to a filter. * * @param filter the filter to negotiate the properties for its inputs @@ -547,13 +713,16 @@ int avfilter_config_links(AVFilterContext *filter); * @param format the pixel format of the image specified by the data and linesize arrays */ AVFilterBufferRef * -avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms, +avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, int w, int h, enum AVPixelFormat format); /** * Create an audio buffer reference wrapped around an already * allocated samples buffer. * + * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version + * that can handle unknown channel layouts. + * * @param data pointers to the samples plane buffers * @param linesize linesize for the samples plane buffers * @param perms the required access permissions @@ -567,6 +736,37 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, int nb_samples, enum AVSampleFormat sample_fmt, uint64_t channel_layout); +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channels the number of channels of the buffer + * @param channel_layout the channel layout of the buffer, + * must be either 0 or consistent with channels + */ +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout); + + + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use avfilter_graph_send_command(). + */ +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); /** Initialize the filter system. Register all builtin filters. */ void avfilter_register_all(void); @@ -581,7 +781,7 @@ void avfilter_uninit(void); * registered. * * @param filter the filter to register - * @return 0 if the registration was succesfull, a negative value + * @return 0 if the registration was successful, a negative value * otherwise */ int avfilter_register(AVFilter *filter); @@ -646,19 +846,7 @@ int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); /** - * Copy the frame properties of src to dst, without copying the actual - * image data. - * - * @return 0 on success, a negative number on error. - */ -int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); - -/** - * Copy the frame properties and data pointers of src to dst, without copying - * the actual data. - * - * @return 0 on success, a negative number on error. + * @} */ -int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); #endif /* AVFILTER_AVFILTER_H */ diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c index f5c9984..4b7d194 100644 --- a/libavfilter/avfiltergraph.c +++ b/libavfilter/avfiltergraph.c @@ -3,20 +3,20 @@ * Copyright (c) 2008 Vitor Sessak * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,17 +25,29 @@ #include "libavutil/avassert.h" #include "libavutil/channel_layout.h" -#include "libavutil/common.h" -#include "libavutil/log.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "libavcodec/avcodec.h" // avcodec_find_best_pix_fmt_of_2() #include "avfilter.h" #include "avfiltergraph.h" #include "formats.h" #include "internal.h" +#define OFFSET(x) offsetof(AVFilterGraph,x) + +static const AVOption options[]={ +{"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) , AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, 0 }, +{"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) , AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, 0 }, +{0} +}; + + static const AVClass filtergraph_class = { .class_name = "AVFilterGraph", .item_name = av_default_item_name, + .option = options, .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, }; AVFilterGraph *avfilter_graph_alloc(void) @@ -53,7 +65,9 @@ void avfilter_graph_free(AVFilterGraph **graph) return; for (; (*graph)->filter_count > 0; (*graph)->filter_count--) avfilter_free((*graph)->filters[(*graph)->filter_count - 1]); + av_freep(&(*graph)->sink_links); av_freep(&(*graph)->scale_sws_opts); + av_freep(&(*graph)->aresample_swr_opts); av_freep(&(*graph)->filters); av_freep(graph); } @@ -92,6 +106,11 @@ fail: return ret; } +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags) +{ + graph->disable_auto_convert = flags; +} + /** * Check for the validity of graph. * @@ -106,22 +125,25 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx) int i, j; for (i = 0; i < graph->filter_count; i++) { + const AVFilterPad *pad; filt = graph->filters[i]; for (j = 0; j < filt->nb_inputs; j++) { if (!filt->inputs[j] || !filt->inputs[j]->src) { + pad = &filt->input_pads[j]; av_log(log_ctx, AV_LOG_ERROR, - "Input pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any source\n", - filt->input_pads[j].name, filt->name, filt->filter->name); + "Input pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any source\n", + pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name); return AVERROR(EINVAL); } } for (j = 0; j < filt->nb_outputs; j++) { if (!filt->outputs[j] || !filt->outputs[j]->dst) { + pad = &filt->output_pads[j]; av_log(log_ctx, AV_LOG_ERROR, - "Output pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any destination\n", - filt->output_pads[j].name, filt->name, filt->filter->name); + "Output pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any destination\n", + pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name); return AVERROR(EINVAL); } } @@ -163,17 +185,138 @@ AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name) return NULL; } +static void sanitize_channel_layouts(void *log, AVFilterChannelLayouts *l) +{ + if (!l) + return; + if (l->nb_channel_layouts) { + if (l->all_layouts || l->all_counts) + av_log(log, AV_LOG_WARNING, "All layouts set on non-empty list\n"); + l->all_layouts = l->all_counts = 0; + } else { + if (l->all_counts && !l->all_layouts) + av_log(log, AV_LOG_WARNING, "All counts without all layouts\n"); + l->all_layouts = 1; + } +} + +static int filter_query_formats(AVFilterContext *ctx) +{ + int ret, i; + AVFilterFormats *formats; + AVFilterChannelLayouts *chlayouts; + AVFilterFormats *samplerates; + enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type : + ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type : + AVMEDIA_TYPE_VIDEO; + + if ((ret = ctx->filter->query_formats(ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n", + ctx->name, av_err2str(ret)); + return ret; + } + + for (i = 0; i < ctx->nb_inputs; i++) + sanitize_channel_layouts(ctx, ctx->inputs[i]->out_channel_layouts); + for (i = 0; i < ctx->nb_outputs; i++) + sanitize_channel_layouts(ctx, ctx->outputs[i]->in_channel_layouts); + + formats = ff_all_formats(type); + if (!formats) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + if (type == AVMEDIA_TYPE_AUDIO) { + samplerates = ff_all_samplerates(); + if (!samplerates) + return AVERROR(ENOMEM); + ff_set_common_samplerates(ctx, samplerates); + chlayouts = ff_all_channel_layouts(); + if (!chlayouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, chlayouts); + } + return 0; +} + +static int insert_conv_filter(AVFilterGraph *graph, AVFilterLink *link, + const char *filt_name, const char *filt_args) +{ + static int auto_count = 0, ret; + char inst_name[32]; + AVFilterContext *filt_ctx; + + if (graph->disable_auto_convert) { + av_log(NULL, AV_LOG_ERROR, + "The filters '%s' and '%s' do not have a common format " + "and automatic conversion is disabled.\n", + link->src->name, link->dst->name); + return AVERROR(EINVAL); + } + + snprintf(inst_name, sizeof(inst_name), "auto-inserted %s %d", + filt_name, auto_count++); + + if ((ret = avfilter_graph_create_filter(&filt_ctx, + avfilter_get_by_name(filt_name), + inst_name, filt_args, NULL, graph)) < 0) + return ret; + if ((ret = avfilter_insert_filter(link, filt_ctx, 0, 0)) < 0) + return ret; + + filter_query_formats(filt_ctx); + + if ( ((link = filt_ctx-> inputs[0]) && + !ff_merge_formats(link->in_formats, link->out_formats)) || + ((link = filt_ctx->outputs[0]) && + !ff_merge_formats(link->in_formats, link->out_formats)) + ) { + av_log(NULL, AV_LOG_ERROR, + "Impossible to convert between the formats supported by the filter " + "'%s' and the filter '%s'\n", link->src->name, link->dst->name); + return AVERROR(EINVAL); + } + + if (link->type == AVMEDIA_TYPE_AUDIO && + (((link = filt_ctx-> inputs[0]) && + !ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts)) || + ((link = filt_ctx->outputs[0]) && + !ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts))) + ) { + av_log(NULL, AV_LOG_ERROR, + "Impossible to convert between the channel layouts formats supported by the filter " + "'%s' and the filter '%s'\n", link->src->name, link->dst->name); + return AVERROR(EINVAL); + } + + return 0; +} + static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) { int i, j, ret; +#if 0 + char filt_args[128]; + AVFilterFormats *formats; + AVFilterChannelLayouts *chlayouts; + AVFilterFormats *samplerates; +#endif int scaler_count = 0, resampler_count = 0; + for (j = 0; j < 2; j++) { /* ask all the sub-filters for their supported media formats */ for (i = 0; i < graph->filter_count; i++) { + /* Call query_formats on sources first. + This is a temporary workaround for amerge, + until format renegociation is implemented. */ + if (!graph->filters[i]->nb_inputs == j) + continue; if (graph->filters[i]->filter->query_formats) - graph->filters[i]->filter->query_formats(graph->filters[i]); + ret = filter_query_formats(graph->filters[i]); else - ff_default_query_formats(graph->filters[i]); + ret = ff_default_query_formats(graph->filters[i]); + if (ret < 0) + return ret; + } } /* go through and merge as many format lists as possible */ @@ -182,6 +325,36 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) for (j = 0; j < filter->nb_inputs; j++) { AVFilterLink *link = filter->inputs[j]; +#if 0 + if (!link) continue; + + if (!link->in_formats || !link->out_formats) + return AVERROR(EINVAL); + + if (link->type == AVMEDIA_TYPE_VIDEO && + !ff_merge_formats(link->in_formats, link->out_formats)) { + + /* couldn't merge format lists, auto-insert scale filter */ + snprintf(filt_args, sizeof(filt_args), "0:0:%s", + graph->scale_sws_opts); + if (ret = insert_conv_filter(graph, link, "scale", filt_args)) + return ret; + } + else if (link->type == AVMEDIA_TYPE_AUDIO) { + if (!link->in_channel_layouts || !link->out_channel_layouts) + return AVERROR(EINVAL); + + /* Merge all three list before checking: that way, in all + * three categories, aconvert will use a common format + * whenever possible. */ + formats = ff_merge_formats(link->in_formats, link->out_formats); + chlayouts = ff_merge_channel_layouts(link->in_channel_layouts , link->out_channel_layouts); + samplerates = ff_merge_samplerates (link->in_samplerates, link->out_samplerates); + + if (!formats || !chlayouts || !samplerates) + if (ret = insert_conv_filter(graph, link, "aresample", NULL)) + return ret; +#else int convert_needed = 0; if (!link) @@ -220,15 +393,19 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d", scaler_count++); - snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts); + if (graph->scale_sws_opts) + snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts); + else + snprintf(scale_args, sizeof(scale_args), "0:0"); + if ((ret = avfilter_graph_create_filter(&convert, filter, inst_name, scale_args, NULL, graph)) < 0) return ret; break; case AVMEDIA_TYPE_AUDIO: - if (!(filter = avfilter_get_by_name("resample"))) { - av_log(log_ctx, AV_LOG_ERROR, "'resample' filter " + if (!(filter = avfilter_get_by_name("aresample"))) { + av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter " "not present, cannot convert audio formats.\n"); return AVERROR(EINVAL); } @@ -236,7 +413,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d", resampler_count++); if ((ret = avfilter_graph_create_filter(&convert, filter, - inst_name, NULL, NULL, graph)) < 0) + inst_name, graph->aresample_swr_opts, NULL, graph)) < 0) return ret; break; default: @@ -246,7 +423,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0) return ret; - convert->filter->query_formats(convert); + filter_query_formats(convert); inlink = convert->inputs[0]; outlink = convert->outputs[0]; if (!ff_merge_formats( inlink->in_formats, inlink->out_formats) || @@ -271,6 +448,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) "'%s' and the filter '%s'\n", link->src->name, link->dst->name); return ret; } +#endif } } } @@ -278,11 +456,27 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) return 0; } -static int pick_format(AVFilterLink *link) +static int pick_format(AVFilterLink *link, AVFilterLink *ref) { if (!link || !link->in_formats) return 0; + if (link->type == AVMEDIA_TYPE_VIDEO) { + if(ref && ref->type == AVMEDIA_TYPE_VIDEO){ + int has_alpha= av_pix_fmt_desc_get(ref->format)->nb_components % 2 == 0; + enum AVPixelFormat best= AV_PIX_FMT_NONE; + int i; + for (i=0; i<link->in_formats->format_count; i++) { + enum AVPixelFormat p = link->in_formats->formats[i]; + best= avcodec_find_best_pix_fmt_of_2(best, p, ref->format, has_alpha, NULL); + } + av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n", + av_get_pix_fmt_name(best), link->in_formats->format_count, + av_get_pix_fmt_name(ref->format), has_alpha); + link->in_formats->formats[0] = best; + } + } + link->in_formats->format_count = 1; link->format = link->in_formats->formats[0]; @@ -296,7 +490,7 @@ static int pick_format(AVFilterLink *link) link->in_samplerates->format_count = 1; link->sample_rate = link->in_samplerates->formats[0]; - if (!link->in_channel_layouts->nb_channel_layouts) { + if (link->in_channel_layouts->all_layouts) { av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for" "the link between filters %s and %s.\n", link->src->name, link->dst->name); @@ -304,6 +498,10 @@ static int pick_format(AVFilterLink *link) } link->in_channel_layouts->nb_channel_layouts = 1; link->channel_layout = link->in_channel_layouts->channel_layouts[0]; + if ((link->channels = FF_LAYOUT2COUNT(link->channel_layout))) + link->channel_layout = 0; + else + link->channels = av_get_channel_layout_nb_channels(link->channel_layout); } ff_formats_unref(&link->in_formats); @@ -359,8 +557,42 @@ static int reduce_formats_on_filter(AVFilterContext *filter) format_count, ff_add_format); REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats, format_count, ff_add_format); - REDUCE_FORMATS(uint64_t, AVFilterChannelLayouts, channel_layouts, - channel_layouts, nb_channel_layouts, ff_add_channel_layout); + + /* reduce channel layouts */ + for (i = 0; i < filter->nb_inputs; i++) { + AVFilterLink *inlink = filter->inputs[i]; + uint64_t fmt; + + if (!inlink->out_channel_layouts || + inlink->out_channel_layouts->nb_channel_layouts != 1) + continue; + fmt = inlink->out_channel_layouts->channel_layouts[0]; + + for (j = 0; j < filter->nb_outputs; j++) { + AVFilterLink *outlink = filter->outputs[j]; + AVFilterChannelLayouts *fmts; + + fmts = outlink->in_channel_layouts; + if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1) + continue; + + if (fmts->all_layouts) { + /* Turn the infinite list into a singleton */ + fmts->all_layouts = fmts->all_counts = 0; + ff_add_channel_layout(&outlink->in_channel_layouts, fmt); + break; + } + + for (k = 0; k < outlink->in_channel_layouts->nb_channel_layouts; k++) { + if (fmts->channel_layouts[k] == fmt) { + fmts->channel_layouts[0] = fmt; + fmts->nb_channel_layouts = 1; + ret = 1; + break; + } + } + } + } return ret; } @@ -488,7 +720,23 @@ static void swap_channel_layouts_on_filter(AVFilterContext *filter) int out_channels = av_get_channel_layout_nb_channels(out_chlayout); int count_diff = out_channels - in_channels; int matched_channels, extra_channels; - int score = 0; + int score = 100000; + + if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) { + /* Compute score in case the input or output layout encodes + a channel count; in this case the score is not altered by + the computation afterwards, as in_chlayout and + out_chlayout have both been set to 0 */ + if (FF_LAYOUT2COUNT(in_chlayout)) + in_channels = FF_LAYOUT2COUNT(in_chlayout); + if (FF_LAYOUT2COUNT(out_chlayout)) + out_channels = FF_LAYOUT2COUNT(out_chlayout); + score -= 10000 + FFABS(out_channels - in_channels) + + (in_channels > out_channels ? 10000 : 0); + in_chlayout = out_chlayout = 0; + /* Let the remaining computation run, even if the score + value is not altered */ + } /* channel substitution */ for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) { @@ -611,15 +859,50 @@ static void swap_sample_fmts(AVFilterGraph *graph) static int pick_formats(AVFilterGraph *graph) { int i, j, ret; + int change; + + do{ + change = 0; + for (i = 0; i < graph->filter_count; i++) { + AVFilterContext *filter = graph->filters[i]; + if (filter->nb_inputs){ + for (j = 0; j < filter->nb_inputs; j++){ + if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) { + if ((ret = pick_format(filter->inputs[j], NULL)) < 0) + return ret; + change = 1; + } + } + } + if (filter->nb_outputs){ + for (j = 0; j < filter->nb_outputs; j++){ + if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) { + if ((ret = pick_format(filter->outputs[j], NULL)) < 0) + return ret; + change = 1; + } + } + } + if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) { + for (j = 0; j < filter->nb_outputs; j++) { + if(filter->outputs[j]->format<0) { + if ((ret = pick_format(filter->outputs[j], filter->inputs[0])) < 0) + return ret; + change = 1; + } + } + } + } + }while(change); for (i = 0; i < graph->filter_count; i++) { AVFilterContext *filter = graph->filters[i]; for (j = 0; j < filter->nb_inputs; j++) - if ((ret = pick_format(filter->inputs[j])) < 0) + if ((ret = pick_format(filter->inputs[j], NULL)) < 0) return ret; for (j = 0; j < filter->nb_outputs; j++) - if ((ret = pick_format(filter->outputs[j])) < 0) + if ((ret = pick_format(filter->outputs[j], NULL)) < 0) return ret; } return 0; @@ -653,6 +936,48 @@ static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx) return 0; } +static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph, + AVClass *log_ctx) +{ + unsigned i, j; + int sink_links_count = 0, n = 0; + AVFilterContext *f; + AVFilterLink **sinks; + + for (i = 0; i < graph->filter_count; i++) { + f = graph->filters[i]; + for (j = 0; j < f->nb_inputs; j++) { + f->inputs[j]->graph = graph; + f->inputs[j]->age_index = -1; + } + for (j = 0; j < f->nb_outputs; j++) { + f->outputs[j]->graph = graph; + f->outputs[j]->age_index= -1; + } + if (!f->nb_outputs) { + if (f->nb_inputs > INT_MAX - sink_links_count) + return AVERROR(EINVAL); + sink_links_count += f->nb_inputs; + } + } + sinks = av_calloc(sink_links_count, sizeof(*sinks)); + if (!sinks) + return AVERROR(ENOMEM); + for (i = 0; i < graph->filter_count; i++) { + f = graph->filters[i]; + if (!f->nb_outputs) { + for (j = 0; j < f->nb_inputs; j++) { + sinks[n] = f->inputs[j]; + f->inputs[j]->age_index = n++; + } + } + } + av_assert0(n == sink_links_count); + graph->sink_links = sinks; + graph->sink_links_count = sink_links_count; + return 0; +} + static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx) { AVFilterContext *f; @@ -703,6 +1028,131 @@ int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx) return ret; if ((ret = graph_config_links(graphctx, log_ctx))) return ret; + if ((ret = ff_avfilter_graph_config_pointers(graphctx, log_ctx))) + return ret; + + return 0; +} + +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags) +{ + int i, r = AVERROR(ENOSYS); + + if(!graph) + return r; + + if((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) { + r=avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST); + if(r != AVERROR(ENOSYS)) + return r; + } + + if(res_len && res) + res[0]= 0; + + for (i = 0; i < graph->filter_count; i++) { + AVFilterContext *filter = graph->filters[i]; + if(!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)){ + r = avfilter_process_command(filter, cmd, arg, res, res_len, flags); + if(r != AVERROR(ENOSYS)) { + if((flags & AVFILTER_CMD_FLAG_ONE) || r<0) + return r; + } + } + } + + return r; +} + +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *command, const char *arg, int flags, double ts) +{ + int i; + + if(!graph) + return 0; + + for (i = 0; i < graph->filter_count; i++) { + AVFilterContext *filter = graph->filters[i]; + if(filter && (!strcmp(target, "all") || !strcmp(target, filter->name) || !strcmp(target, filter->filter->name))){ + AVFilterCommand **queue = &filter->command_queue, *next; + while (*queue && (*queue)->time <= ts) + queue = &(*queue)->next; + next = *queue; + *queue = av_mallocz(sizeof(AVFilterCommand)); + (*queue)->command = av_strdup(command); + (*queue)->arg = av_strdup(arg); + (*queue)->time = ts; + (*queue)->flags = flags; + (*queue)->next = next; + if(flags & AVFILTER_CMD_FLAG_ONE) + return 0; + } + } return 0; } + +static void heap_bubble_up(AVFilterGraph *graph, + AVFilterLink *link, int index) +{ + AVFilterLink **links = graph->sink_links; + + while (index) { + int parent = (index - 1) >> 1; + if (links[parent]->current_pts >= link->current_pts) + break; + links[index] = links[parent]; + links[index]->age_index = index; + index = parent; + } + links[index] = link; + link->age_index = index; +} + +static void heap_bubble_down(AVFilterGraph *graph, + AVFilterLink *link, int index) +{ + AVFilterLink **links = graph->sink_links; + + while (1) { + int child = 2 * index + 1; + if (child >= graph->sink_links_count) + break; + if (child + 1 < graph->sink_links_count && + links[child + 1]->current_pts < links[child]->current_pts) + child++; + if (link->current_pts < links[child]->current_pts) + break; + links[index] = links[child]; + links[index]->age_index = index; + index = child; + } + links[index] = link; + link->age_index = index; +} + +void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link) +{ + heap_bubble_up (graph, link, link->age_index); + heap_bubble_down(graph, link, link->age_index); +} + + +int avfilter_graph_request_oldest(AVFilterGraph *graph) +{ + while (graph->sink_links_count) { + AVFilterLink *oldest = graph->sink_links[0]; + int r = ff_request_frame(oldest); + if (r != AVERROR_EOF) + return r; + av_log(oldest->dst, AV_LOG_DEBUG, "EOF on sink link %s:%s.\n", + oldest->dst ? oldest->dst->name : "unknown", + oldest->dstpad ? oldest->dstpad->name : "unknown"); + /* EOF: remove the link from the heap */ + if (oldest->age_index < --graph->sink_links_count) + heap_bubble_down(graph, graph->sink_links[graph->sink_links_count], + oldest->age_index); + oldest->age_index = -1; + } + return AVERROR_EOF; +} diff --git a/libavfilter/avfiltergraph.h b/libavfilter/avfiltergraph.h index 7c4672d..728bbb5 100644 --- a/libavfilter/avfiltergraph.h +++ b/libavfilter/avfiltergraph.h @@ -2,20 +2,20 @@ * Filter graphs * copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -31,6 +31,20 @@ typedef struct AVFilterGraph { AVFilterContext **filters; char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; } AVFilterGraph; /** @@ -72,6 +86,21 @@ int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt, AVFilterGraph *graph_ctx); /** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aconvert filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** * Check validity and configure all the links and formats in the graph. * * @param graphctx the filter graph @@ -127,12 +156,16 @@ void avfilter_inout_free(AVFilterInOut **inout); * * @param graph the filter graph where to link the parsed graph context * @param filters string to be parsed - * @param inputs linked list to the inputs of the graph - * @param outputs linked list to the outputs of the graph - * @return zero on success, a negative AVERROR code on error + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error */ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, - AVFilterInOut *inputs, AVFilterInOut *outputs, + AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx); /** @@ -169,4 +202,70 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using av_free + */ +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int avfilter_graph_request_oldest(AVFilterGraph *graph); + #endif /* AVFILTER_AVFILTERGRAPH_H */ diff --git a/libavfilter/bbox.c b/libavfilter/bbox.c new file mode 100644 index 0000000..be9b2e6 --- /dev/null +++ b/libavfilter/bbox.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "bbox.h" + +int ff_calculate_bounding_box(FFBoundingBox *bbox, + const uint8_t *data, int linesize, int w, int h, + int min_val) +{ + int x, y; + int start_x; + int start_y; + int end_x; + int end_y; + const uint8_t *line; + + /* left bound */ + for (start_x = 0; start_x < w; start_x++) + for (y = 0; y < h; y++) + if ((data[y * linesize + start_x] > min_val)) + goto outl; +outl: + if (start_x == w) /* no points found */ + return 0; + + /* right bound */ + for (end_x = w - 1; end_x >= start_x; end_x--) + for (y = 0; y < h; y++) + if ((data[y * linesize + end_x] > min_val)) + goto outr; +outr: + + /* top bound */ + line = data; + for (start_y = 0; start_y < h; start_y++) { + for (x = 0; x < w; x++) + if (line[x] > min_val) + goto outt; + line += linesize; + } +outt: + + /* bottom bound */ + line = data + (h-1)*linesize; + for (end_y = h - 1; end_y >= start_y; end_y--) { + for (x = 0; x < w; x++) + if (line[x] > min_val) + goto outb; + line -= linesize; + } +outb: + + bbox->x1 = start_x; + bbox->y1 = start_y; + bbox->x2 = end_x; + bbox->y2 = end_y; + return 1; +} diff --git a/libavfilter/bbox.h b/libavfilter/bbox.h new file mode 100644 index 0000000..eb73154 --- /dev/null +++ b/libavfilter/bbox.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BBOX_H +#define AVFILTER_BBOX_H + +#include <stdint.h> + +typedef struct { + int x1, x2, y1, y2; +} FFBoundingBox; + +/** + * Calculate the smallest rectangle that will encompass the + * region with values > min_val. + * + * @param bbox bounding box structure which is updated with the found values. + * If no pixels could be found with value > min_val, the + * structure is not modified. + * @return 1 in case at least one pixel with value > min_val was found, + * 0 otherwise + */ +int ff_calculate_bounding_box(FFBoundingBox *bbox, + const uint8_t *data, int linesize, + int w, int h, int min_val); + +#endif /* AVFILTER_BBOX_H */ diff --git a/libavfilter/buffer.c b/libavfilter/buffer.c index 8eb3ce3..9a3f131 100644 --- a/libavfilter/buffer.c +++ b/libavfilter/buffer.c @@ -1,29 +1,36 @@ /* - * This file is part of Libav. + * Copyright Stefano Sabatini <stefasab gmail com> + * Copyright Anton Khirnov <anton khirnov net> + * Copyright Michael Niedermayer <michaelni gmx at> * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/channel_layout.h" +#include "libavutil/avassert.h" #include "libavutil/common.h" +#include "libavutil/imgutils.h" #include "libavcodec/avcodec.h" #include "avfilter.h" #include "internal.h" +#include "audio.h" +#include "avcodec.h" -/* TODO: buffer pool. see comment for avfilter_default_get_video_buffer() */ void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr) { if (ptr->extended_data != ptr->data) @@ -32,19 +39,32 @@ void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr) av_free(ptr); } +static void copy_video_props(AVFilterBufferRefVideoProps *dst, AVFilterBufferRefVideoProps *src) { + *dst = *src; + if (src->qp_table) { + int qsize = src->qp_table_size; + dst->qp_table = av_malloc(qsize); + memcpy(dst->qp_table, src->qp_table, qsize); + } +} + AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask) { AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef)); if (!ret) return NULL; *ret = *ref; + + ret->metadata = NULL; + av_dict_copy(&ret->metadata, ref->metadata, 0); + if (ref->type == AVMEDIA_TYPE_VIDEO) { ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps)); if (!ret->video) { av_free(ret); return NULL; } - *ret->video = *ref->video; + copy_video_props(ret->video, ref->video); ret->extended_data = ret->data; } else if (ref->type == AVMEDIA_TYPE_AUDIO) { ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps)); @@ -54,7 +74,7 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask) } *ret->audio = *ref->audio; - if (ref->extended_data != ref->data) { + if (ref->extended_data && ref->extended_data != ref->data) { int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout); if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) * nb_channels))) { @@ -72,16 +92,91 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask) return ret; } +void ff_free_pool(AVFilterPool *pool) +{ + int i; + + av_assert0(pool->refcount > 0); + + for (i = 0; i < POOL_SIZE; i++) { + if (pool->pic[i]) { + AVFilterBufferRef *picref = pool->pic[i]; + /* free buffer: picrefs stored in the pool are not + * supposed to contain a free callback */ + av_assert0(!picref->buf->refcount); + av_freep(&picref->buf->data[0]); + av_freep(&picref->buf); + + av_freep(&picref->audio); + av_assert0(!picref->video || !picref->video->qp_table); + av_freep(&picref->video); + av_freep(&pool->pic[i]); + pool->count--; + } + } + pool->draining = 1; + + if (!--pool->refcount) { + av_assert0(!pool->count); + av_free(pool); + } +} + +static void store_in_pool(AVFilterBufferRef *ref) +{ + int i; + AVFilterPool *pool= ref->buf->priv; + + av_assert0(ref->buf->data[0]); + av_assert0(pool->refcount>0); + + if (ref->video) + av_freep(&ref->video->qp_table); + + if (pool->count == POOL_SIZE) { + AVFilterBufferRef *ref1 = pool->pic[0]; + av_freep(&ref1->video); + av_freep(&ref1->audio); + av_freep(&ref1->buf->data[0]); + av_freep(&ref1->buf); + av_free(ref1); + memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1)); + pool->count--; + pool->pic[POOL_SIZE-1] = NULL; + } + + for (i = 0; i < POOL_SIZE; i++) { + if (!pool->pic[i]) { + pool->pic[i] = ref; + pool->count++; + break; + } + } + if (pool->draining) { + ff_free_pool(pool); + } else + --pool->refcount; +} + void avfilter_unref_buffer(AVFilterBufferRef *ref) { if (!ref) return; - if (!(--ref->buf->refcount)) + av_assert0(ref->buf->refcount > 0); + if (!(--ref->buf->refcount)) { + if (!ref->buf->free) { + store_in_pool(ref); + return; + } ref->buf->free(ref->buf); + } if (ref->extended_data != ref->data) av_freep(&ref->extended_data); - av_free(ref->video); - av_free(ref->audio); + if (ref->video) + av_freep(&ref->video->qp_table); + av_freep(&ref->video); + av_freep(&ref->audio); + av_dict_free(&ref->metadata); av_free(ref); } @@ -91,85 +186,60 @@ void avfilter_unref_bufferp(AVFilterBufferRef **ref) *ref = NULL; } -int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) +void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src) { - dst->pts = src->pts; - dst->format = src->format; + // copy common properties + dst->pts = src->pts; + dst->pos = src->pos; - switch (dst->type) { - case AVMEDIA_TYPE_VIDEO: - dst->video->w = src->width; - dst->video->h = src->height; - dst->video->pixel_aspect = src->sample_aspect_ratio; - dst->video->interlaced = src->interlaced_frame; - dst->video->top_field_first = src->top_field_first; - dst->video->key_frame = src->key_frame; - dst->video->pict_type = src->pict_type; - break; - case AVMEDIA_TYPE_AUDIO: - dst->audio->sample_rate = src->sample_rate; - dst->audio->channel_layout = src->channel_layout; + switch (src->type) { + case AVMEDIA_TYPE_VIDEO: { + if (dst->video->qp_table) + av_freep(&dst->video->qp_table); + copy_video_props(dst->video, src->video); break; - default: - return AVERROR(EINVAL); + } + case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break; + default: break; } - return 0; + av_dict_free(&dst->metadata); + av_dict_copy(&dst->metadata, src->metadata, 0); } -int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src) +AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink, + AVFilterBufferRef *ref) { - int planes, nb_channels; - - memcpy(dst->data, src->data, sizeof(dst->data)); - memcpy(dst->linesize, src->linesize, sizeof(dst->linesize)); + AVFilterBufferRef *buf; + int channels; - dst->pts = src->pts; - dst->format = src->format; + switch (outlink->type) { - switch (src->type) { case AVMEDIA_TYPE_VIDEO: - dst->width = src->video->w; - dst->height = src->video->h; - dst->sample_aspect_ratio = src->video->pixel_aspect; - dst->interlaced_frame = src->video->interlaced; - dst->top_field_first = src->video->top_field_first; - dst->key_frame = src->video->key_frame; - dst->pict_type = src->video->pict_type; + buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, + ref->video->w, ref->video->h); + if(!buf) + return NULL; + av_image_copy(buf->data, buf->linesize, + (void*)ref->data, ref->linesize, + ref->format, ref->video->w, ref->video->h); break; - case AVMEDIA_TYPE_AUDIO: - nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout); - planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1; - - if (planes > FF_ARRAY_ELEMS(dst->data)) { - dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data)); - if (!dst->extended_data) - return AVERROR(ENOMEM); - memcpy(dst->extended_data, src->extended_data, - planes * sizeof(*dst->extended_data)); - } else - dst->extended_data = dst->data; - dst->sample_rate = src->audio->sample_rate; - dst->channel_layout = src->audio->channel_layout; - dst->nb_samples = src->audio->nb_samples; + case AVMEDIA_TYPE_AUDIO: + buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, + ref->audio->nb_samples); + if(!buf) + return NULL; + channels = ref->audio->channels; + av_samples_copy(buf->extended_data, ref->buf->extended_data, + 0, 0, ref->audio->nb_samples, + channels, + ref->format); break; - default: - return AVERROR(EINVAL); - } - - return 0; -} - -void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src) -{ - // copy common properties - dst->pts = src->pts; - dst->pos = src->pos; - switch (src->type) { - case AVMEDIA_TYPE_VIDEO: *dst->video = *src->video; break; - case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break; - default: break; + default: + return NULL; } + avfilter_copy_buffer_ref_props(buf, ref); + return buf; } diff --git a/libavfilter/bufferqueue.h b/libavfilter/bufferqueue.h new file mode 100644 index 0000000..34c4c0f --- /dev/null +++ b/libavfilter/bufferqueue.h @@ -0,0 +1,119 @@ +/* + * Generic buffer queue + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERQUEUE_H +#define AVFILTER_BUFFERQUEUE_H + +/** + * FFBufQueue: simple AVFilterBufferRef queue API + * + * Note: this API is not thread-safe. Concurrent access to the same queue + * must be protected by a mutex or any synchronization mechanism. + */ + +/** + * Maximum size of the queue. + * + * This value can be overridden by definying it before including this + * header. + * Powers of 2 are recommended. + */ +#ifndef FF_BUFQUEUE_SIZE +#define FF_BUFQUEUE_SIZE 32 +#endif + +#include "avfilter.h" +#include "libavutil/avassert.h" + +/** + * Structure holding the queue + */ +struct FFBufQueue { + AVFilterBufferRef *queue[FF_BUFQUEUE_SIZE]; + unsigned short head; + unsigned short available; /**< number of available buffers */ +}; + +#define BUCKET(i) queue->queue[(queue->head + (i)) % FF_BUFQUEUE_SIZE] + +/** + * Test if a buffer queue is full. + */ +static inline int ff_bufqueue_is_full(struct FFBufQueue *queue) +{ + return queue->available == FF_BUFQUEUE_SIZE; +} + +/** + * Add a buffer to the queue. + * + * If the queue is already full, then the current last buffer is dropped + * (and unrefed) with a warning before adding the new buffer. + */ +static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, + AVFilterBufferRef *buf) +{ + if (ff_bufqueue_is_full(queue)) { + av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n"); + avfilter_unref_buffer(BUCKET(--queue->available)); + } + BUCKET(queue->available++) = buf; +} + +/** + * Get a buffer from the queue without altering it. + * + * Buffer with index 0 is the first buffer in the queue. + * Return NULL if the queue has not enough buffers. + */ +static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue, + unsigned index) +{ + return index < queue->available ? BUCKET(index) : NULL; +} + +/** + * Get the first buffer from the queue and remove it. + * + * Do not use on an empty queue. + */ +static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue) +{ + AVFilterBufferRef *ret = queue->queue[queue->head]; + av_assert0(queue->available); + queue->available--; + queue->queue[queue->head] = NULL; + queue->head = (queue->head + 1) % FF_BUFQUEUE_SIZE; + return ret; +} + +/** + * Unref and remove all buffers from the queue. + */ +static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue) +{ + while (queue->available) + avfilter_unref_buffer(ff_bufqueue_get(queue)); +} + +#undef BUCKET + +#endif /* AVFILTER_BUFFERQUEUE_H */ diff --git a/libavfilter/buffersink.c b/libavfilter/buffersink.c index a315cb3..282be30 100644 --- a/libavfilter/buffersink.c +++ b/libavfilter/buffersink.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2011 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -52,13 +52,13 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) { BufferSinkContext *s = link->dst->priv; - av_assert0(!s->cur_buf); +// av_assert0(!s->cur_buf); s->cur_buf = buf; return 0; } -int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) +int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf) { BufferSinkContext *s = ctx->priv; AVFilterLink *link = ctx->inputs[0]; @@ -99,8 +99,8 @@ static int read_from_fifo(AVFilterContext *ctx, AVFilterBufferRef **pbuf, } -int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf, - int nb_samples) +int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf, + int nb_samples) { BufferSinkContext *s = ctx->priv; AVFilterLink *link = ctx->inputs[0]; @@ -151,7 +151,11 @@ static const AVFilterPad avfilter_vsink_buffer_inputs[] = { }; AVFilter avfilter_vsink_buffer = { +#if AV_HAVE_INCOMPATIBLE_FORK_ABI .name = "buffersink", +#else + .name = "buffersink_old", +#endif .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), .priv_size = sizeof(BufferSinkContext), .uninit = uninit, @@ -172,7 +176,11 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = { }; AVFilter avfilter_asink_abuffer = { +#if AV_HAVE_INCOMPATIBLE_FORK_ABI .name = "abuffersink", +#else + .name = "abuffersink_old", +#endif .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), .priv_size = sizeof(BufferSinkContext), .uninit = uninit, diff --git a/libavfilter/buffersink.h b/libavfilter/buffersink.h index e358ac3..825a36a 100644 --- a/libavfilter/buffersink.h +++ b/libavfilter/buffersink.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -21,12 +21,95 @@ /** * @file - * memory buffer sink API + * memory buffer sink API for audio and video */ #include "avfilter.h" /** + * Struct to use for initializing a buffersink context. + */ +typedef struct { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVBufferSinkParams *av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVABufferSinkParams *av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Get an audio/video buffer data from buffer_sink and put it in bufref. + * + * This function works with both audio and video buffer sinks. + * + * @param buffer_sink pointer to a buffersink or abuffersink context + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, + AVFilterBufferRef **bufref, int flags); + + +/** + * Get the number of immediately available frames. + */ +int av_buffersink_poll_frame(AVFilterContext *ctx); + +/** + * Get the frame rate of the input. + */ +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); + +/** + * @defgroup libav_api Libav API + * @{ + */ + +/** * Get a buffer with filtered data from sink and put it in buf. * * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. @@ -59,4 +142,8 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, int nb_samples); +/** + * @} + */ + #endif /* AVFILTER_BUFFERSINK_H */ diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c index 3cee68d..3fdf8d3 100644 --- a/libavfilter/buffersrc.c +++ b/libavfilter/buffersrc.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2008 Vitor Sessak * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -35,21 +35,27 @@ #include "formats.h" #include "internal.h" #include "video.h" +#include "avcodec.h" typedef struct { const AVClass *class; AVFifoBuffer *fifo; AVRational time_base; ///< time_base to set in the output link + AVRational frame_rate; ///< frame_rate to set in the output link + unsigned nb_failed_requests; + unsigned warning_limit; /* video only */ - int h, w; + int w, h; enum AVPixelFormat pix_fmt; AVRational pixel_aspect; + char *sws_param; /* audio only */ int sample_rate; enum AVSampleFormat sample_fmt; char *sample_fmt_str; + int channels; uint64_t channel_layout; char *channel_layout_str; @@ -58,8 +64,7 @@ typedef struct { #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\ if (c->w != width || c->h != height || c->pix_fmt != format) {\ - av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\ - return AVERROR(EINVAL);\ + av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\ } #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\ @@ -69,65 +74,34 @@ typedef struct { return AVERROR(EINVAL);\ } -int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame) +int av_buffersrc_add_frame(AVFilterContext *buffer_src, + const AVFrame *frame, int flags) { - BufferSourceContext *c = buffer_filter->priv; - AVFilterBufferRef *buf; + AVFilterBufferRef *picref; int ret; - if (!frame) { - c->eof = 1; - return 0; - } else if (c->eof) - return AVERROR(EINVAL); - - if (!av_fifo_space(c->fifo) && - (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) + - sizeof(buf))) < 0) - return ret; - - switch (buffer_filter->outputs[0]->type) { - case AVMEDIA_TYPE_VIDEO: - CHECK_VIDEO_PARAM_CHANGE(buffer_filter, c, frame->width, frame->height, - frame->format); - buf = ff_get_video_buffer(buffer_filter->outputs[0], AV_PERM_WRITE, - c->w, c->h); - if (!buf) - return AVERROR(ENOMEM); - - av_image_copy(buf->data, buf->linesize, frame->data, frame->linesize, - c->pix_fmt, c->w, c->h); - break; - case AVMEDIA_TYPE_AUDIO: - CHECK_AUDIO_PARAM_CHANGE(buffer_filter, c, frame->sample_rate, frame->channel_layout, - frame->format); - buf = ff_get_audio_buffer(buffer_filter->outputs[0], AV_PERM_WRITE, - frame->nb_samples); - if (!buf) - return AVERROR(ENOMEM); - - av_samples_copy(buf->extended_data, frame->extended_data, - 0, 0, frame->nb_samples, - av_get_channel_layout_nb_channels(frame->channel_layout), - frame->format); - break; - default: - return AVERROR(EINVAL); - } - - avfilter_copy_frame_props(buf, frame); + if (!frame) /* NULL for EOF */ + return av_buffersrc_add_ref(buffer_src, NULL, flags); - if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) { - avfilter_unref_buffer(buf); - return ret; - } + picref = avfilter_get_buffer_ref_from_frame(buffer_src->outputs[0]->type, + frame, AV_PERM_WRITE); + if (!picref) + return AVERROR(ENOMEM); + ret = av_buffersrc_add_ref(buffer_src, picref, flags); + picref->buf->data[0] = NULL; + avfilter_unref_buffer(picref); + return ret; +} - return 0; +int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame) +{ + return av_buffersrc_add_frame(buffer_filter, frame, 0); } -int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf) +int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags) { BufferSourceContext *c = s->priv; + AVFilterBufferRef *to_free = NULL; int ret; if (!buf) { @@ -141,69 +115,138 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf) sizeof(buf))) < 0) return ret; - switch (s->outputs[0]->type) { - case AVMEDIA_TYPE_VIDEO: - CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format); - break; - case AVMEDIA_TYPE_AUDIO: - CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout, - buf->format); - break; - default: - return AVERROR(EINVAL); + if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { + switch (s->outputs[0]->type) { + case AVMEDIA_TYPE_VIDEO: + CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format); + break; + case AVMEDIA_TYPE_AUDIO: + CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout, + buf->format); + break; + default: + return AVERROR(EINVAL); + } } + if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY)) + to_free = buf = ff_copy_buffer_ref(s->outputs[0], buf); + if(!buf) + return -1; - if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) + if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) { + avfilter_unref_buffer(to_free); return ret; + } + c->nb_failed_requests = 0; + if (c->warning_limit && + av_fifo_size(c->fifo) / sizeof(buf) >= c->warning_limit) { + av_log(s, AV_LOG_WARNING, + "%d buffers queued in %s, something may be wrong.\n", + c->warning_limit, + (char *)av_x_if_null(s->name, s->filter->name)); + c->warning_limit *= 10; + } + + if ((flags & AV_BUFFERSRC_FLAG_PUSH)) + if ((ret = s->output_pads[0].request_frame(s->outputs[0])) < 0) + return ret; return 0; } +#ifdef FF_API_BUFFERSRC_BUFFER +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf) +{ + return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY); +} +#endif + +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src) +{ + return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests; +} + +#define OFFSET(x) offsetof(BufferSourceContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +static const AVOption buffer_options[] = { + { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = FLAGS }, + { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = FLAGS }, + { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { NULL }, +}; +#undef FLAGS + +AVFILTER_DEFINE_CLASS(buffer); + static av_cold int init_video(AVFilterContext *ctx, const char *args) { BufferSourceContext *c = ctx->priv; - char pix_fmt_str[128]; - int n = 0; + char pix_fmt_str[128], sws_param[256] = "", *colon, *equal; + int ret, n = 0; - if (!args || - (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str, - &c->time_base.num, &c->time_base.den, - &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) { - av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args); + c->class = &buffer_class; + + if (!args) { + av_log(ctx, AV_LOG_ERROR, "Arguments required\n"); return AVERROR(EINVAL); } - if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == AV_PIX_FMT_NONE) { - char *tail; - c->pix_fmt = strtol(pix_fmt_str, &tail, 10); - if (*tail || c->pix_fmt < 0 || c->pix_fmt >= AV_PIX_FMT_NB) { - av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); - return AVERROR(EINVAL); - } + colon = strchr(args, ':'); + equal = strchr(args, '='); + if (equal && (!colon || equal < colon)) { + av_opt_set_defaults(c); + ret = av_set_options_string(c, args, "=", ":"); + if (ret < 0) + goto fail; + } else { + if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str, + &c->time_base.num, &c->time_base.den, + &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) { + av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args); + ret = AVERROR(EINVAL); + goto fail; } + av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n"); - if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) - return AVERROR(ENOMEM); + if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0) + goto fail; + c->sws_param = av_strdup(sws_param); + if (!c->sws_param) { + ret = AVERROR(ENOMEM); + goto fail; + } + } - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt)); + if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) { + ret = AVERROR(ENOMEM); + goto fail; + } + + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", + c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), + c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den, + c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, "")); + c->warning_limit = 100; return 0; + +fail: + av_opt_free(c); + return ret; } -#define OFFSET(x) offsetof(BufferSourceContext, x) -#define A AV_OPT_FLAG_AUDIO_PARAM -static const AVOption audio_options[] = { - { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A }, - { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A }, - { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A }, - { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A }, +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM +static const AVOption abuffer_options[] = { + { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, + { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, + { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, + { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = FLAGS }, { NULL }, }; -static const AVClass abuffer_class = { - .class_name = "abuffer source", - .item_name = av_default_item_name, - .option = audio_options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(abuffer); static av_cold int init_audio(AVFilterContext *ctx, const char *args) { @@ -213,26 +256,45 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args) s->class = &abuffer_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args); + if ((ret = av_set_options_string(s, args, "=", ":")) < 0) goto fail; - } s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str); if (s->sample_fmt == AV_SAMPLE_FMT_NONE) { - av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n", + av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", s->sample_fmt_str); ret = AVERROR(EINVAL); goto fail; } + if (s->channel_layout_str) { + int n; + /* TODO reindent */ s->channel_layout = av_get_channel_layout(s->channel_layout_str); if (!s->channel_layout) { - av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n", + av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", s->channel_layout_str); ret = AVERROR(EINVAL); goto fail; } + n = av_get_channel_layout_nb_channels(s->channel_layout); + if (s->channels) { + if (n != s->channels) { + av_log(ctx, AV_LOG_ERROR, + "Mismatching channel count %d and layout '%s' " + "(%d channels)\n", + s->channels, s->channel_layout_str, n); + ret = AVERROR(EINVAL); + goto fail; + } + } + s->channels = n; + } else if (!s->channels) { + av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor " + "channel layout specified\n"); + ret = AVERROR(EINVAL); + goto fail; + } if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) { ret = AVERROR(ENOMEM); @@ -242,9 +304,11 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args) if (!s->time_base.num) s->time_base = (AVRational){1, s->sample_rate}; - av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d " - "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str, + av_log(ctx, AV_LOG_VERBOSE, + "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n", + s->time_base.num, s->time_base.den, s->sample_fmt_str, s->sample_rate, s->channel_layout_str); + s->warning_limit = 100; fail: av_opt_free(s); @@ -261,6 +325,7 @@ static av_cold void uninit(AVFilterContext *ctx) } av_fifo_free(s->fifo); s->fifo = NULL; + av_freep(&s->sws_param); } static int query_formats(AVFilterContext *ctx) @@ -282,7 +347,9 @@ static int query_formats(AVFilterContext *ctx) ff_add_format(&samplerates, c->sample_rate); ff_set_common_samplerates(ctx, samplerates); - ff_add_channel_layout(&channel_layouts, c->channel_layout); + ff_add_channel_layout(&channel_layouts, + c->channel_layout ? c->channel_layout : + FF_COUNT2LAYOUT(c->channels)); ff_set_common_channel_layouts(ctx, channel_layouts); break; default: @@ -303,14 +370,13 @@ static int config_props(AVFilterLink *link) link->sample_aspect_ratio = c->pixel_aspect; break; case AVMEDIA_TYPE_AUDIO: - link->channel_layout = c->channel_layout; - link->sample_rate = c->sample_rate; break; default: return AVERROR(EINVAL); } link->time_base = c->time_base; + link->frame_rate = c->frame_rate; return 0; } @@ -318,18 +384,16 @@ static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFilterBufferRef *buf; - int ret = 0; if (!av_fifo_size(c->fifo)) { if (c->eof) return AVERROR_EOF; + c->nb_failed_requests++; return AVERROR(EAGAIN); } av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL); - ff_filter_frame(link, buf); - - return ret; + return ff_filter_frame(link, buf); } static int poll_frame(AVFilterLink *link) @@ -363,6 +427,7 @@ AVFilter avfilter_vsrc_buffer = { .inputs = NULL, .outputs = avfilter_vsrc_buffer_outputs, + .priv_class = &buffer_class, }; static const AVFilterPad avfilter_asrc_abuffer_outputs[] = { @@ -387,4 +452,5 @@ AVFilter avfilter_asrc_abuffer = { .inputs = NULL, .outputs = avfilter_asrc_abuffer_outputs, + .priv_class = &abuffer_class, }; diff --git a/libavfilter/buffersrc.h b/libavfilter/buffersrc.h index 452c691..7f3c8d8 100644 --- a/libavfilter/buffersrc.h +++ b/libavfilter/buffersrc.h @@ -25,16 +25,61 @@ * Memory buffer source API. */ +#include "libavcodec/avcodec.h" #include "avfilter.h" +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + + /** + * Do not copy buffer data. + */ + AV_BUFFERSRC_FLAG_NO_COPY = 2, + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + +}; + +/** + * Add buffer data in picref to buffer_src. + * + * @param buffer_src pointer to a buffer source context + * @param picref a buffer reference, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_ref(AVFilterContext *buffer_src, + AVFilterBufferRef *picref, int flags); + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +#ifdef FF_API_BUFFERSRC_BUFFER /** * Add a buffer to the filtergraph s. * * @param buf buffer containing frame data to be passed down the filtergraph. * This function will take ownership of buf, the user must not free it. * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. + * @deprecated Use av_buffersrc_add_ref(s, picref, AV_BUFFERSRC_FLAG_NO_COPY) instead. */ +attribute_deprecated int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); +#endif /** * Add a frame to the buffer source. diff --git a/libavfilter/drawutils.c b/libavfilter/drawutils.c index e837760..aebc000 100644 --- a/libavfilter/drawutils.c +++ b/libavfilter/drawutils.c @@ -1,18 +1,21 @@ /* - * This file is part of Libav. + * Copyright 2011 Stefano Sabatini <stefano.sabatini-lala poste it> + * Copyright 2012 Nicolas George <nicolas.george normalesup org> * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -23,29 +26,39 @@ #include "libavutil/mem.h" #include "libavutil/pixdesc.h" #include "drawutils.h" +#include "formats.h" enum { RED = 0, GREEN, BLUE, ALPHA }; -int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4], - enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], - int *is_packed_rgba, uint8_t rgba_map_ptr[4]) +int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt) { - uint8_t rgba_map[4] = {0}; - int i; - const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt); - int hsub = pix_desc->log2_chroma_w; - - *is_packed_rgba = 1; switch (pix_fmt) { + case AV_PIX_FMT_0RGB: case AV_PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break; + case AV_PIX_FMT_0BGR: case AV_PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break; + case AV_PIX_FMT_RGB0: case AV_PIX_FMT_RGBA: case AV_PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break; case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_BGR0: case AV_PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break; - default: - *is_packed_rgba = 0; + default: /* unsupported */ + return AVERROR(EINVAL); } + return 0; +} + +int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4], + enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], + int *is_packed_rgba, uint8_t rgba_map_ptr[4]) +{ + uint8_t rgba_map[4] = {0}; + int i; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt); + int hsub = pix_desc->log2_chroma_w; + + *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0; if (*is_packed_rgba) { pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3; @@ -118,3 +131,422 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4], } } } + +int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags) +{ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format); + const AVComponentDescriptor *c; + unsigned i, nb_planes = 0; + int pixelstep[MAX_PLANES] = { 0 }; + + if (!desc->name) + return AVERROR(EINVAL); + if (desc->flags & ~(PIX_FMT_PLANAR | PIX_FMT_RGB | PIX_FMT_PSEUDOPAL | PIX_FMT_ALPHA)) + return AVERROR(ENOSYS); + for (i = 0; i < desc->nb_components; i++) { + c = &desc->comp[i]; + /* for now, only 8-bits formats */ + if (c->depth_minus1 != 8 - 1) + return AVERROR(ENOSYS); + if (c->plane >= MAX_PLANES) + return AVERROR(ENOSYS); + /* strange interleaving */ + if (pixelstep[c->plane] != 0 && + pixelstep[c->plane] != c->step_minus1 + 1) + return AVERROR(ENOSYS); + pixelstep[c->plane] = c->step_minus1 + 1; + if (pixelstep[c->plane] >= 8) + return AVERROR(ENOSYS); + nb_planes = FFMAX(nb_planes, c->plane + 1); + } + if ((desc->log2_chroma_w || desc->log2_chroma_h) && nb_planes < 3) + return AVERROR(ENOSYS); /* exclude NV12 and NV21 */ + memset(draw, 0, sizeof(*draw)); + draw->desc = desc; + draw->format = format; + draw->nb_planes = nb_planes; + memcpy(draw->pixelstep, pixelstep, sizeof(draw->pixelstep)); + if (nb_planes >= 3 && !(desc->flags & PIX_FMT_RGB)) { + draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w; + draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h; + } + for (i = 0; i < ((desc->nb_components - 1) | 1); i++) + draw->comp_mask[desc->comp[i].plane] |= + 1 << (desc->comp[i].offset_plus1 - 1); + return 0; +} + +void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]) +{ + unsigned i; + uint8_t rgba_map[4]; + + if (rgba != color->rgba) + memcpy(color->rgba, rgba, sizeof(color->rgba)); + if ((draw->desc->flags & PIX_FMT_RGB) && draw->nb_planes == 1 && + ff_fill_rgba_map(rgba_map, draw->format) >= 0) { + for (i = 0; i < 4; i++) + color->comp[0].u8[rgba_map[i]] = rgba[i]; + } else if (draw->nb_planes == 3 || draw->nb_planes == 4) { + /* assume YUV */ + color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); + color->comp[1].u8[0] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0); + color->comp[2].u8[0] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0); + color->comp[3].u8[0] = rgba[3]; + } else if (draw->format == AV_PIX_FMT_GRAY8 || draw->format == AV_PIX_FMT_GRAY8A) { + color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); + color->comp[1].u8[0] = rgba[3]; + } else { + av_log(NULL, AV_LOG_WARNING, + "Color conversion not implemented for %s\n", draw->desc->name); + memset(color, 128, sizeof(*color)); + } +} + +static uint8_t *pointer_at(FFDrawContext *draw, uint8_t *data[], int linesize[], + int plane, int x, int y) +{ + return data[plane] + + (y >> draw->vsub[plane]) * linesize[plane] + + (x >> draw->hsub[plane]) * draw->pixelstep[plane]; +} + +void ff_copy_rectangle2(FFDrawContext *draw, + uint8_t *dst[], int dst_linesize[], + uint8_t *src[], int src_linesize[], + int dst_x, int dst_y, int src_x, int src_y, + int w, int h) +{ + int plane, y, wp, hp; + uint8_t *p, *q; + + for (plane = 0; plane < draw->nb_planes; plane++) { + p = pointer_at(draw, src, src_linesize, plane, src_x, src_y); + q = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y); + wp = (w >> draw->hsub[plane]) * draw->pixelstep[plane]; + hp = (h >> draw->vsub[plane]); + for (y = 0; y < hp; y++) { + memcpy(q, p, wp); + p += src_linesize[plane]; + q += dst_linesize[plane]; + } + } +} + +void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_x, int dst_y, int w, int h) +{ + int plane, x, y, wp, hp; + uint8_t *p0, *p; + + for (plane = 0; plane < draw->nb_planes; plane++) { + p0 = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y); + wp = (w >> draw->hsub[plane]); + hp = (h >> draw->vsub[plane]); + if (!hp) + return; + p = p0; + /* copy first line from color */ + for (x = 0; x < wp; x++) { + memcpy(p, color->comp[plane].u8, draw->pixelstep[plane]); + p += draw->pixelstep[plane]; + } + wp *= draw->pixelstep[plane]; + /* copy next lines from first line */ + p = p0 + dst_linesize[plane]; + for (y = 1; y < hp; y++) { + memcpy(p, p0, wp); + p += dst_linesize[plane]; + } + } +} + +/** + * Clip interval [x; x+w[ within [0; wmax[. + * The resulting w may be negative if the final interval is empty. + * dx, if not null, return the difference between in and out value of x. + */ +static void clip_interval(int wmax, int *x, int *w, int *dx) +{ + if (dx) + *dx = 0; + if (*x < 0) { + if (dx) + *dx = -*x; + *w += *x; + *x = 0; + } + if (*x + *w > wmax) + *w = wmax - *x; +} + +/** + * Decompose w pixels starting at x + * into start + (w starting at x) + end + * with x and w aligned on multiples of 1<<sub. + */ +static void subsampling_bounds(int sub, int *x, int *w, int *start, int *end) +{ + int mask = (1 << sub) - 1; + + *start = (-*x) & mask; + *x += *start; + *start = FFMIN(*start, *w); + *w -= *start; + *end = *w & mask; + *w >>= sub; +} + +static int component_used(FFDrawContext *draw, int plane, int comp) +{ + return (draw->comp_mask[plane] >> comp) & 1; +} + +/* If alpha is in the [ 0 ; 0x1010101 ] range, + then alpha * value is in the [ 0 ; 0xFFFFFFFF ] range, + and >> 24 gives a correct rounding. */ +static void blend_line(uint8_t *dst, unsigned src, unsigned alpha, + int dx, int w, unsigned hsub, int left, int right) +{ + unsigned asrc = alpha * src; + unsigned tau = 0x1010101 - alpha; + int x; + + if (left) { + unsigned suba = (left * alpha) >> hsub; + *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24; + dst += dx; + } + for (x = 0; x < w; x++) { + *dst = (*dst * tau + asrc) >> 24; + dst += dx; + } + if (right) { + unsigned suba = (right * alpha) >> hsub; + *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24; + } +} + +void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_w, int dst_h, + int x0, int y0, int w, int h) +{ + unsigned alpha, nb_planes, nb_comp, plane, comp; + int w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y; + uint8_t *p0, *p; + + /* TODO optimize if alpha = 0xFF */ + clip_interval(dst_w, &x0, &w, NULL); + clip_interval(dst_h, &y0, &h, NULL); + if (w <= 0 || h <= 0 || !color->rgba[3]) + return; + /* 0x10203 * alpha + 2 is in the [ 2 ; 0x1010101 - 2 ] range */ + alpha = 0x10203 * color->rgba[3] + 0x2; + nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */ + for (plane = 0; plane < nb_planes; plane++) { + nb_comp = draw->pixelstep[plane]; + p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0); + w_sub = w; + h_sub = h; + x_sub = x0; + y_sub = y0; + subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right); + subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom); + for (comp = 0; comp < nb_comp; comp++) { + if (!component_used(draw, plane, comp)) + continue; + p = p0 + comp; + if (top) { + blend_line(p, color->comp[plane].u8[comp], alpha >> 1, + draw->pixelstep[plane], w_sub, + draw->hsub[plane], left, right); + p += dst_linesize[plane]; + } + for (y = 0; y < h_sub; y++) { + blend_line(p, color->comp[plane].u8[comp], alpha, + draw->pixelstep[plane], w_sub, + draw->hsub[plane], left, right); + p += dst_linesize[plane]; + } + if (bottom) + blend_line(p, color->comp[plane].u8[comp], alpha >> 1, + draw->pixelstep[plane], w_sub, + draw->hsub[plane], left, right); + } + } +} + +static void blend_pixel(uint8_t *dst, unsigned src, unsigned alpha, + uint8_t *mask, int mask_linesize, int l2depth, + unsigned w, unsigned h, unsigned shift, unsigned xm0) +{ + unsigned xm, x, y, t = 0; + unsigned xmshf = 3 - l2depth; + unsigned xmmod = 7 >> l2depth; + unsigned mbits = (1 << (1 << l2depth)) - 1; + unsigned mmult = 255 / mbits; + + for (y = 0; y < h; y++) { + xm = xm0; + for (x = 0; x < w; x++) { + t += ((mask[xm >> xmshf] >> ((~xm & xmmod) << l2depth)) & mbits) + * mmult; + xm++; + } + mask += mask_linesize; + } + alpha = (t >> shift) * alpha; + *dst = ((0x1010101 - alpha) * *dst + alpha * src) >> 24; +} + +static void blend_line_hv(uint8_t *dst, int dst_delta, + unsigned src, unsigned alpha, + uint8_t *mask, int mask_linesize, int l2depth, int w, + unsigned hsub, unsigned vsub, + int xm, int left, int right, int hband) +{ + int x; + + if (left) { + blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth, + left, hband, hsub + vsub, xm); + dst += dst_delta; + xm += left; + } + for (x = 0; x < w; x++) { + blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth, + 1 << hsub, hband, hsub + vsub, xm); + dst += dst_delta; + xm += 1 << hsub; + } + if (right) + blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth, + right, hband, hsub + vsub, xm); +} + +void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h, + uint8_t *mask, int mask_linesize, int mask_w, int mask_h, + int l2depth, unsigned endianness, int x0, int y0) +{ + unsigned alpha, nb_planes, nb_comp, plane, comp; + int xm0, ym0, w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y; + uint8_t *p0, *p, *m; + + clip_interval(dst_w, &x0, &mask_w, &xm0); + clip_interval(dst_h, &y0, &mask_h, &ym0); + mask += ym0 * mask_linesize; + if (mask_w <= 0 || mask_h <= 0 || !color->rgba[3]) + return; + /* alpha is in the [ 0 ; 0x10203 ] range, + alpha * mask is in the [ 0 ; 0x1010101 - 4 ] range */ + alpha = (0x10307 * color->rgba[3] + 0x3) >> 8; + nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */ + for (plane = 0; plane < nb_planes; plane++) { + nb_comp = draw->pixelstep[plane]; + p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0); + w_sub = mask_w; + h_sub = mask_h; + x_sub = x0; + y_sub = y0; + subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right); + subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom); + for (comp = 0; comp < nb_comp; comp++) { + if (!component_used(draw, plane, comp)) + continue; + p = p0 + comp; + m = mask; + if (top) { + blend_line_hv(p, draw->pixelstep[plane], + color->comp[plane].u8[comp], alpha, + m, mask_linesize, l2depth, w_sub, + draw->hsub[plane], draw->vsub[plane], + xm0, left, right, top); + p += dst_linesize[plane]; + m += top * mask_linesize; + } + for (y = 0; y < h_sub; y++) { + blend_line_hv(p, draw->pixelstep[plane], + color->comp[plane].u8[comp], alpha, + m, mask_linesize, l2depth, w_sub, + draw->hsub[plane], draw->vsub[plane], + xm0, left, right, 1 << draw->vsub[plane]); + p += dst_linesize[plane]; + m += mask_linesize << draw->vsub[plane]; + } + if (bottom) + blend_line_hv(p, draw->pixelstep[plane], + color->comp[plane].u8[comp], alpha, + m, mask_linesize, l2depth, w_sub, + draw->hsub[plane], draw->vsub[plane], + xm0, left, right, bottom); + } + } +} + +int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir, + int value) +{ + unsigned shift = sub_dir ? draw->vsub_max : draw->hsub_max; + + if (!shift) + return value; + if (round_dir >= 0) + value += round_dir ? (1 << shift) - 1 : 1 << (shift - 1); + return (value >> shift) << shift; +} + +AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags) +{ + enum AVPixelFormat i, pix_fmts[AV_PIX_FMT_NB + 1]; + unsigned n = 0; + FFDrawContext draw; + + for (i = 0; i < AV_PIX_FMT_NB; i++) + if (ff_draw_init(&draw, i, flags) >= 0) + pix_fmts[n++] = i; + pix_fmts[n++] = AV_PIX_FMT_NONE; + return ff_make_format_list(pix_fmts); +} + +#ifdef TEST + +#undef printf + +int main(void) +{ + enum AVPixelFormat f; + const AVPixFmtDescriptor *desc; + FFDrawContext draw; + FFDrawColor color; + int r, i; + + for (f = 0; f < AV_PIX_FMT_NB; f++) { + desc = av_pix_fmt_desc_get(f); + if (!desc->name) + continue; + printf("Testing %s...%*s", desc->name, + (int)(16 - strlen(desc->name)), ""); + r = ff_draw_init(&draw, f, 0); + if (r < 0) { + char buf[128]; + av_strerror(r, buf, sizeof(buf)); + printf("no: %s\n", buf); + continue; + } + ff_draw_color(&draw, &color, (uint8_t[]) { 1, 0, 0, 1 }); + for (i = 0; i < sizeof(color); i++) + if (((uint8_t *)&color)[i] != 128) + break; + if (i == sizeof(color)) { + printf("fallback color\n"); + continue; + } + printf("ok\n"); + } + return 0; +} + +#endif diff --git a/libavfilter/drawutils.h b/libavfilter/drawutils.h index 73f482e..5ffffe7 100644 --- a/libavfilter/drawutils.h +++ b/libavfilter/drawutils.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,8 +25,11 @@ */ #include <stdint.h> +#include "avfilter.h" #include "libavutil/pixfmt.h" +int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt); + int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4], enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], @@ -40,4 +43,113 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4], uint8_t *src[4], int src_linesize[4], int pixelstep[4], int hsub, int vsub, int x, int y, int y2, int w, int h); +#define MAX_PLANES 4 + +typedef struct FFDrawContext { + const struct AVPixFmtDescriptor *desc; + enum AVPixelFormat format; + unsigned nb_planes; + int pixelstep[MAX_PLANES]; /*< offset between pixels */ + uint8_t comp_mask[MAX_PLANES]; /*< bitmask of used non-alpha components */ + uint8_t hsub[MAX_PLANES]; /*< horizontal subsampling */ + uint8_t vsub[MAX_PLANES]; /*< vertical subsampling */ + uint8_t hsub_max; + uint8_t vsub_max; +} FFDrawContext; + +typedef struct FFDrawColor { + uint8_t rgba[4]; + union { + uint32_t u32; + uint16_t u16; + uint8_t u8[4]; + } comp[MAX_PLANES]; +} FFDrawColor; + +/** + * Init a draw context. + * + * Only a limited number of pixel formats are supported, if format is not + * supported the function will return an error. + * No flags currently defined. + * @return 0 for success, < 0 for error + */ +int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags); + +/** + * Prepare a color. + */ +void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]); + +/** + * Copy a rectangle from an image to another. + * + * The coordinates must be as even as the subsampling requires. + */ +void ff_copy_rectangle2(FFDrawContext *draw, + uint8_t *dst[], int dst_linesize[], + uint8_t *src[], int src_linesize[], + int dst_x, int dst_y, int src_x, int src_y, + int w, int h); + +/** + * Fill a rectangle with an uniform color. + * + * The coordinates must be as even as the subsampling requires. + * The color needs to be inited with ff_draw_color. + */ +void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_x, int dst_y, int w, int h); + +/** + * Blend a rectangle with an uniform color. + */ +void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], + int dst_w, int dst_h, + int x0, int y0, int w, int h); + +/** + * Blend an alpha mask with an uniform color. + * + * @param draw draw context + * @param color color for the overlay; + * @param dst destination image + * @param dst_linesize line stride of the destination + * @param dst_w width of the destination image + * @param dst_h height of the destination image + * @param mask mask + * @param mask_linesize line stride of the mask + * @param mask_w width of the mask + * @param mask_h height of the mask + * @param l2depth log2 of depth of the mask (0 for 1bpp, 3 for 8bpp) + * @param endianness bit order of the mask (0: MSB to the left) + * @param x0 horizontal position of the overlay + * @param y0 vertical position of the overlay + */ +void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color, + uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h, + uint8_t *mask, int mask_linesize, int mask_w, int mask_h, + int l2depth, unsigned endianness, int x0, int y0); + +/** + * Round a dimension according to subsampling. + * + * @param draw draw context + * @param sub_dir 0 for horizontal, 1 for vertical + * @param round_dir 0 nearest, -1 round down, +1 round up + * @param value value to round + * @return the rounded value + */ +int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir, + int value); + +/** + * Return the list of pixel formats supported by the draw functions. + * + * The flags are the same as ff_draw_init, i.e., none currently. + */ +AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags); + #endif /* AVFILTER_DRAWUTILS_H */ diff --git a/libavfilter/f_ebur128.c b/libavfilter/f_ebur128.c new file mode 100644 index 0000000..85fddad --- /dev/null +++ b/libavfilter/f_ebur128.c @@ -0,0 +1,750 @@ +/* + * Copyright (c) 2012 Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * EBU R.128 implementation + * @see http://tech.ebu.ch/loudness + * @see https://www.youtube.com/watch?v=iuEtQqC-Sqo "EBU R128 Introduction - Florian Camerer" + * @todo True Peak + * @todo implement start/stop/reset through filter command injection + * @todo support other frequencies to avoid resampling + */ + +#include <math.h> + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/xga_font_data.h" +#include "libavutil/opt.h" +#include "libavutil/timestamp.h" +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +#define MAX_CHANNELS 63 + +/* pre-filter coefficients */ +#define PRE_B0 1.53512485958697 +#define PRE_B1 -2.69169618940638 +#define PRE_B2 1.19839281085285 +#define PRE_A1 -1.69065929318241 +#define PRE_A2 0.73248077421585 + +/* RLB-filter coefficients */ +#define RLB_B0 1.0 +#define RLB_B1 -2.0 +#define RLB_B2 1.0 +#define RLB_A1 -1.99004745483398 +#define RLB_A2 0.99007225036621 + +#define ABS_THRES -70 ///< silence gate: we discard anything below this absolute (LUFS) threshold +#define ABS_UP_THRES 10 ///< upper loud limit to consider (ABS_THRES being the minimum) +#define HIST_GRAIN 100 ///< defines histogram precision +#define HIST_SIZE ((ABS_UP_THRES - ABS_THRES) * HIST_GRAIN + 1) + +/** + * An histogram is an array of HIST_SIZE hist_entry storing all the energies + * recorded (with an accuracy of 1/HIST_GRAIN) of the loudnesses from ABS_THRES + * (at 0) to ABS_UP_THRES (at HIST_SIZE-1). + * This fixed-size system avoids the need of a list of energies growing + * infinitely over the time and is thus more scalable. + */ +struct hist_entry { + int count; ///< how many times the corresponding value occurred + double energy; ///< E = 10^((L + 0.691) / 10) + double loudness; ///< L = -0.691 + 10 * log10(E) +}; + +struct integrator { + double *cache[MAX_CHANNELS]; ///< window of filtered samples (N ms) + int cache_pos; ///< focus on the last added bin in the cache array + double sum[MAX_CHANNELS]; ///< sum of the last N ms filtered samples (cache content) + int filled; ///< 1 if the cache is completely filled, 0 otherwise + double rel_threshold; ///< relative threshold + double sum_kept_powers; ///< sum of the powers (weighted sums) above absolute threshold + int nb_kept_powers; ///< number of sum above absolute threshold + struct hist_entry *histogram; ///< histogram of the powers, used to compute LRA and I +}; + +struct rect { int x, y, w, h; }; + +typedef struct { + const AVClass *class; ///< AVClass context for log and options purpose + + /* video */ + int do_video; ///< 1 if video output enabled, 0 otherwise + int w, h; ///< size of the video output + struct rect text; ///< rectangle for the LU legend on the left + struct rect graph; ///< rectangle for the main graph in the center + struct rect gauge; ///< rectangle for the gauge on the right + AVFilterBufferRef *outpicref; ///< output picture reference, updated regularly + int meter; ///< select a EBU mode between +9 and +18 + int scale_range; ///< the range of LU values according to the meter + int y_zero_lu; ///< the y value (pixel position) for 0 LU + int *y_line_ref; ///< y reference values for drawing the LU lines in the graph and the gauge + + /* audio */ + int nb_channels; ///< number of channels in the input + double *ch_weighting; ///< channel weighting mapping + int sample_count; ///< sample count used for refresh frequency, reset at refresh + + /* Filter caches. + * The mult by 3 in the following is for X[i], X[i-1] and X[i-2] */ + double x[MAX_CHANNELS * 3]; ///< 3 input samples cache for each channel + double y[MAX_CHANNELS * 3]; ///< 3 pre-filter samples cache for each channel + double z[MAX_CHANNELS * 3]; ///< 3 RLB-filter samples cache for each channel + +#define I400_BINS (48000 * 4 / 10) +#define I3000_BINS (48000 * 3) + struct integrator i400; ///< 400ms integrator, used for Momentary loudness (M), and Integrated loudness (I) + struct integrator i3000; ///< 3s integrator, used for Short term loudness (S), and Loudness Range (LRA) + + /* I and LRA specific */ + double integrated_loudness; ///< integrated loudness in LUFS (I) + double loudness_range; ///< loudness range in LU (LRA) + double lra_low, lra_high; ///< low and high LRA values +} EBUR128Context; + +#define OFFSET(x) offsetof(EBUR128Context, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +#define V AV_OPT_FLAG_VIDEO_PARAM +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption ebur128_options[] = { + { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, V|F }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x480"}, 0, 0, V|F }, + { "meter", "set scale meter (+9 to +18)", OFFSET(meter), AV_OPT_TYPE_INT, {.i64 = 9}, 9, 18, V|F }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(ebur128); + +static const uint8_t graph_colors[] = { + 0xdd, 0x66, 0x66, // value above 0LU non reached + 0x66, 0x66, 0xdd, // value below 0LU non reached + 0x96, 0x33, 0x33, // value above 0LU reached + 0x33, 0x33, 0x96, // value below 0LU reached + 0xdd, 0x96, 0x96, // value above 0LU line non reached + 0x96, 0x96, 0xdd, // value below 0LU line non reached + 0xdd, 0x33, 0x33, // value above 0LU line reached + 0x33, 0x33, 0xdd, // value below 0LU line reached +}; + +static const uint8_t *get_graph_color(const EBUR128Context *ebur128, int v, int y) +{ + const int below0 = y > ebur128->y_zero_lu; + const int reached = y >= v; + const int line = ebur128->y_line_ref[y] || y == ebur128->y_zero_lu; + const int colorid = 4*line + 2*reached + below0; + return graph_colors + 3*colorid; +} + +static inline int lu_to_y(const EBUR128Context *ebur128, double v) +{ + v += 2 * ebur128->meter; // make it in range [0;...] + v = av_clipf(v, 0, ebur128->scale_range); // make sure it's in the graph scale + v = ebur128->scale_range - v; // invert value (y=0 is on top) + return v * ebur128->graph.h / ebur128->scale_range; // rescale from scale range to px height +} + +#define FONT8 0 +#define FONT16 1 + +static const uint8_t font_colors[] = { + 0xdd, 0xdd, 0x00, + 0x00, 0x96, 0x96, +}; + +static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...) +{ + int i; + char buf[128] = {0}; + const uint8_t *font; + int font_height; + va_list vl; + + if (ftid == FONT16) font = avpriv_vga16_font, font_height = 16; + else if (ftid == FONT8) font = avpriv_cga_font, font_height = 8; + else return; + + va_start(vl, fmt); + vsnprintf(buf, sizeof(buf), fmt, vl); + va_end(vl); + + for (i = 0; buf[i]; i++) { + int char_y, mask; + uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*3; + + for (char_y = 0; char_y < font_height; char_y++) { + for (mask = 0x80; mask; mask >>= 1) { + if (font[buf[i] * font_height + char_y] & mask) + memcpy(p, color, 3); + else + memcpy(p, "\x00\x00\x00", 3); + p += 3; + } + p += pic->linesize[0] - 8*3; + } + } +} + +static void drawline(AVFilterBufferRef *pic, int x, int y, int len, int step) +{ + int i; + uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3; + + for (i = 0; i < len; i++) { + memcpy(p, "\x00\xff\x00", 3); + p += step; + } +} + +static int config_video_output(AVFilterLink *outlink) +{ + int i, x, y; + uint8_t *p; + AVFilterContext *ctx = outlink->src; + EBUR128Context *ebur128 = ctx->priv; + AVFilterBufferRef *outpicref; + + /* check if there is enough space to represent everything decently */ + if (ebur128->w < 640 || ebur128->h < 480) { + av_log(ctx, AV_LOG_ERROR, "Video size %dx%d is too small, " + "minimum size is 640x480\n", ebur128->w, ebur128->h); + return AVERROR(EINVAL); + } + outlink->w = ebur128->w; + outlink->h = ebur128->h; + +#define PAD 8 + + /* configure text area position and size */ + ebur128->text.x = PAD; + ebur128->text.y = 40; + ebur128->text.w = 3 * 8; // 3 characters + ebur128->text.h = ebur128->h - PAD - ebur128->text.y; + + /* configure gauge position and size */ + ebur128->gauge.w = 20; + ebur128->gauge.h = ebur128->text.h; + ebur128->gauge.x = ebur128->w - PAD - ebur128->gauge.w; + ebur128->gauge.y = ebur128->text.y; + + /* configure graph position and size */ + ebur128->graph.x = ebur128->text.x + ebur128->text.w + PAD; + ebur128->graph.y = ebur128->gauge.y; + ebur128->graph.w = ebur128->gauge.x - ebur128->graph.x - PAD; + ebur128->graph.h = ebur128->gauge.h; + + /* graph and gauge share the LU-to-pixel code */ + av_assert0(ebur128->graph.h == ebur128->gauge.h); + + /* prepare the initial picref buffer */ + avfilter_unref_bufferp(&ebur128->outpicref); + ebur128->outpicref = outpicref = + ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2, + outlink->w, outlink->h); + if (!outpicref) + return AVERROR(ENOMEM); + outlink->sample_aspect_ratio = (AVRational){1,1}; + + /* init y references values (to draw LU lines) */ + ebur128->y_line_ref = av_calloc(ebur128->graph.h + 1, sizeof(*ebur128->y_line_ref)); + if (!ebur128->y_line_ref) + return AVERROR(ENOMEM); + + /* black background */ + memset(outpicref->data[0], 0, ebur128->h * outpicref->linesize[0]); + + /* draw LU legends */ + drawtext(outpicref, PAD, PAD+16, FONT8, font_colors+3, " LU"); + for (i = ebur128->meter; i >= -ebur128->meter * 2; i--) { + y = lu_to_y(ebur128, i); + x = PAD + (i < 10 && i > -10) * 8; + ebur128->y_line_ref[y] = i; + y -= 4; // -4 to center vertically + drawtext(outpicref, x, y + ebur128->graph.y, FONT8, font_colors+3, + "%c%d", i < 0 ? '-' : i > 0 ? '+' : ' ', FFABS(i)); + } + + /* draw graph */ + ebur128->y_zero_lu = lu_to_y(ebur128, 0); + p = outpicref->data[0] + ebur128->graph.y * outpicref->linesize[0] + + ebur128->graph.x * 3; + for (y = 0; y < ebur128->graph.h; y++) { + const uint8_t *c = get_graph_color(ebur128, INT_MAX, y); + + for (x = 0; x < ebur128->graph.w; x++) + memcpy(p + x*3, c, 3); + p += outpicref->linesize[0]; + } + + /* draw fancy rectangles around the graph and the gauge */ +#define DRAW_RECT(r) do { \ + drawline(outpicref, r.x, r.y - 1, r.w, 3); \ + drawline(outpicref, r.x, r.y + r.h, r.w, 3); \ + drawline(outpicref, r.x - 1, r.y, r.h, outpicref->linesize[0]); \ + drawline(outpicref, r.x + r.w, r.y, r.h, outpicref->linesize[0]); \ +} while (0) + DRAW_RECT(ebur128->graph); + DRAW_RECT(ebur128->gauge); + + return 0; +} + +static int config_audio_output(AVFilterLink *outlink) +{ + int i; + AVFilterContext *ctx = outlink->src; + EBUR128Context *ebur128 = ctx->priv; + const int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); + +#define BACK_MASK (AV_CH_BACK_LEFT |AV_CH_BACK_CENTER |AV_CH_BACK_RIGHT| \ + AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_BACK_RIGHT) + + ebur128->nb_channels = nb_channels; + ebur128->ch_weighting = av_calloc(nb_channels, sizeof(*ebur128->ch_weighting)); + if (!ebur128->ch_weighting) + return AVERROR(ENOMEM); + + for (i = 0; i < nb_channels; i++) { + + /* channel weighting */ + if ((outlink->channel_layout & 1ULL<<i) == AV_CH_LOW_FREQUENCY) + continue; + if (outlink->channel_layout & 1ULL<<i & BACK_MASK) + ebur128->ch_weighting[i] = 1.41; + else + ebur128->ch_weighting[i] = 1.0; + + /* bins buffer for the two integration window (400ms and 3s) */ + ebur128->i400.cache[i] = av_calloc(I400_BINS, sizeof(*ebur128->i400.cache[0])); + ebur128->i3000.cache[i] = av_calloc(I3000_BINS, sizeof(*ebur128->i3000.cache[0])); + if (!ebur128->i400.cache[i] || !ebur128->i3000.cache[i]) + return AVERROR(ENOMEM); + } + + return 0; +} + +#define ENERGY(loudness) (pow(10, ((loudness) + 0.691) / 10.)) +#define LOUDNESS(energy) (-0.691 + 10 * log10(energy)) + +static struct hist_entry *get_histogram(void) +{ + int i; + struct hist_entry *h = av_calloc(HIST_SIZE, sizeof(*h)); + + for (i = 0; i < HIST_SIZE; i++) { + h[i].loudness = i / (double)HIST_GRAIN + ABS_THRES; + h[i].energy = ENERGY(h[i].loudness); + } + return h; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + EBUR128Context *ebur128 = ctx->priv; + AVFilterPad pad; + + ebur128->class = &ebur128_class; + av_opt_set_defaults(ebur128); + + if ((ret = av_set_options_string(ebur128, args, "=", ":")) < 0) + return ret; + + // if meter is +9 scale, scale range is from -18 LU to +9 LU (or 3*9) + // if meter is +18 scale, scale range is from -36 LU to +18 LU (or 3*18) + ebur128->scale_range = 3 * ebur128->meter; + + ebur128->i400.histogram = get_histogram(); + ebur128->i3000.histogram = get_histogram(); + + ebur128->integrated_loudness = ABS_THRES; + ebur128->loudness_range = 0; + + /* insert output pads */ + if (ebur128->do_video) { + pad = (AVFilterPad){ + .name = av_strdup("out0"), + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_video_output, + }; + if (!pad.name) + return AVERROR(ENOMEM); + ff_insert_outpad(ctx, 0, &pad); + } + pad = (AVFilterPad){ + .name = av_asprintf("out%d", ebur128->do_video), + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_audio_output, + }; + if (!pad.name) + return AVERROR(ENOMEM); + ff_insert_outpad(ctx, ebur128->do_video, &pad); + + /* summary */ + av_log(ctx, AV_LOG_VERBOSE, "EBU +%d scale\n", ebur128->meter); + + return 0; +} + +#define HIST_POS(power) (int)(((power) - ABS_THRES) * HIST_GRAIN) + +/* loudness and power should be set such as loudness = -0.691 + + * 10*log10(power), we just avoid doing that calculus two times */ +static int gate_update(struct integrator *integ, double power, + double loudness, int gate_thres) +{ + int ipower; + double relative_threshold; + int gate_hist_pos; + + /* update powers histograms by incrementing current power count */ + ipower = av_clip(HIST_POS(loudness), 0, HIST_SIZE - 1); + integ->histogram[ipower].count++; + + /* compute relative threshold and get its position in the histogram */ + integ->sum_kept_powers += power; + integ->nb_kept_powers++; + relative_threshold = integ->sum_kept_powers / integ->nb_kept_powers; + if (!relative_threshold) + relative_threshold = 1e-12; + integ->rel_threshold = LOUDNESS(relative_threshold) + gate_thres; + gate_hist_pos = av_clip(HIST_POS(integ->rel_threshold), 0, HIST_SIZE - 1); + + return gate_hist_pos; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) +{ + int i, ch, idx_insample; + AVFilterContext *ctx = inlink->dst; + EBUR128Context *ebur128 = ctx->priv; + const int nb_channels = ebur128->nb_channels; + const int nb_samples = insamples->audio->nb_samples; + const double *samples = (double *)insamples->data[0]; + AVFilterBufferRef *pic = ebur128->outpicref; + + for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) { + const int bin_id_400 = ebur128->i400.cache_pos; + const int bin_id_3000 = ebur128->i3000.cache_pos; + +#define MOVE_TO_NEXT_CACHED_ENTRY(time) do { \ + ebur128->i##time.cache_pos++; \ + if (ebur128->i##time.cache_pos == I##time##_BINS) { \ + ebur128->i##time.filled = 1; \ + ebur128->i##time.cache_pos = 0; \ + } \ +} while (0) + + MOVE_TO_NEXT_CACHED_ENTRY(400); + MOVE_TO_NEXT_CACHED_ENTRY(3000); + + for (ch = 0; ch < nb_channels; ch++) { + double bin; + + if (!ebur128->ch_weighting[ch]) + continue; + + /* Y[i] = X[i]*b0 + X[i-1]*b1 + X[i-2]*b2 - Y[i-1]*a1 - Y[i-2]*a2 */ +#define FILTER(Y, X, name) do { \ + double *dst = ebur128->Y + ch*3; \ + double *src = ebur128->X + ch*3; \ + dst[2] = dst[1]; \ + dst[1] = dst[0]; \ + dst[0] = src[0]*name##_B0 + src[1]*name##_B1 + src[2]*name##_B2 \ + - dst[1]*name##_A1 - dst[2]*name##_A2; \ +} while (0) + + ebur128->x[ch * 3] = *samples++; // set X[i] + + // TODO: merge both filters in one? + FILTER(y, x, PRE); // apply pre-filter + ebur128->x[ch * 3 + 2] = ebur128->x[ch * 3 + 1]; + ebur128->x[ch * 3 + 1] = ebur128->x[ch * 3 ]; + FILTER(z, y, RLB); // apply RLB-filter + + bin = ebur128->z[ch * 3] * ebur128->z[ch * 3]; + + /* add the new value, and limit the sum to the cache size (400ms or 3s) + * by removing the oldest one */ + ebur128->i400.sum [ch] = ebur128->i400.sum [ch] + bin - ebur128->i400.cache [ch][bin_id_400]; + ebur128->i3000.sum[ch] = ebur128->i3000.sum[ch] + bin - ebur128->i3000.cache[ch][bin_id_3000]; + + /* override old cache entry with the new value */ + ebur128->i400.cache [ch][bin_id_400 ] = bin; + ebur128->i3000.cache[ch][bin_id_3000] = bin; + } + + /* For integrated loudness, gating blocks are 400ms long with 75% + * overlap (see BS.1770-2 p5), so a re-computation is needed each 100ms + * (4800 samples at 48kHz). */ + if (++ebur128->sample_count == 4800) { + double loudness_400, loudness_3000; + double power_400 = 1e-12, power_3000 = 1e-12; + AVFilterLink *outlink = ctx->outputs[0]; + const int64_t pts = insamples->pts + + av_rescale_q(idx_insample, (AVRational){ 1, inlink->sample_rate }, + outlink->time_base); + + ebur128->sample_count = 0; + +#define COMPUTE_LOUDNESS(m, time) do { \ + if (ebur128->i##time.filled) { \ + /* weighting sum of the last <time> ms */ \ + for (ch = 0; ch < nb_channels; ch++) \ + power_##time += ebur128->ch_weighting[ch] * ebur128->i##time.sum[ch]; \ + power_##time /= I##time##_BINS; \ + } \ + loudness_##time = LOUDNESS(power_##time); \ +} while (0) + + COMPUTE_LOUDNESS(M, 400); + COMPUTE_LOUDNESS(S, 3000); + + /* Integrated loudness */ +#define I_GATE_THRES -10 // initially defined to -8 LU in the first EBU standard + + if (loudness_400 >= ABS_THRES) { + double integrated_sum = 0; + int nb_integrated = 0; + int gate_hist_pos = gate_update(&ebur128->i400, power_400, + loudness_400, I_GATE_THRES); + + /* compute integrated loudness by summing the histogram values + * above the relative threshold */ + for (i = gate_hist_pos; i < HIST_SIZE; i++) { + const int nb_v = ebur128->i400.histogram[i].count; + nb_integrated += nb_v; + integrated_sum += nb_v * ebur128->i400.histogram[i].energy; + } + if (nb_integrated) + ebur128->integrated_loudness = LOUDNESS(integrated_sum / nb_integrated); + } + + /* LRA */ +#define LRA_GATE_THRES -20 +#define LRA_LOWER_PRC 10 +#define LRA_HIGHER_PRC 95 + + /* XXX: example code in EBU 3342 is ">=" but formula in BS.1770 + * specs is ">" */ + if (loudness_3000 >= ABS_THRES) { + int nb_powers = 0; + int gate_hist_pos = gate_update(&ebur128->i3000, power_3000, + loudness_3000, LRA_GATE_THRES); + + for (i = gate_hist_pos; i < HIST_SIZE; i++) + nb_powers += ebur128->i3000.histogram[i].count; + if (nb_powers) { + int n, nb_pow; + + /* get lower loudness to consider */ + n = 0; + nb_pow = LRA_LOWER_PRC * nb_powers / 100. + 0.5; + for (i = gate_hist_pos; i < HIST_SIZE; i++) { + n += ebur128->i3000.histogram[i].count; + if (n >= nb_pow) { + ebur128->lra_low = ebur128->i3000.histogram[i].loudness; + break; + } + } + + /* get higher loudness to consider */ + n = nb_powers; + nb_pow = LRA_HIGHER_PRC * nb_powers / 100. + 0.5; + for (i = HIST_SIZE - 1; i >= 0; i--) { + n -= ebur128->i3000.histogram[i].count; + if (n < nb_pow) { + ebur128->lra_high = ebur128->i3000.histogram[i].loudness; + break; + } + } + + // XXX: show low & high on the graph? + ebur128->loudness_range = ebur128->lra_high - ebur128->lra_low; + } + } + +#define LOG_FMT "M:%6.1f S:%6.1f I:%6.1f LUFS LRA:%6.1f LU" + + /* push one video frame */ + if (ebur128->do_video) { + int x, y, ret; + uint8_t *p; + + const int y_loudness_lu_graph = lu_to_y(ebur128, loudness_3000 + 23); + const int y_loudness_lu_gauge = lu_to_y(ebur128, loudness_400 + 23); + + /* draw the graph using the short-term loudness */ + p = pic->data[0] + ebur128->graph.y*pic->linesize[0] + ebur128->graph.x*3; + for (y = 0; y < ebur128->graph.h; y++) { + const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_graph, y); + + memmove(p, p + 3, (ebur128->graph.w - 1) * 3); + memcpy(p + (ebur128->graph.w - 1) * 3, c, 3); + p += pic->linesize[0]; + } + + /* draw the gauge using the momentary loudness */ + p = pic->data[0] + ebur128->gauge.y*pic->linesize[0] + ebur128->gauge.x*3; + for (y = 0; y < ebur128->gauge.h; y++) { + const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_gauge, y); + + for (x = 0; x < ebur128->gauge.w; x++) + memcpy(p + x*3, c, 3); + p += pic->linesize[0]; + } + + /* draw textual info */ + drawtext(pic, PAD, PAD - PAD/2, FONT16, font_colors, + LOG_FMT " ", // padding to erase trailing characters + loudness_400, loudness_3000, + ebur128->integrated_loudness, ebur128->loudness_range); + + /* set pts and push frame */ + pic->pts = pts; + ret = ff_filter_frame(outlink, avfilter_ref_buffer(pic, ~AV_PERM_WRITE)); + if (ret < 0) + return ret; + } + + av_log(ctx, ebur128->do_video ? AV_LOG_VERBOSE : AV_LOG_INFO, + "t: %-10s " LOG_FMT "\n", av_ts2timestr(pts, &outlink->time_base), + loudness_400, loudness_3000, + ebur128->integrated_loudness, ebur128->loudness_range); + } + } + + return ff_filter_frame(ctx->outputs[ebur128->do_video], insamples); +} + +static int query_formats(AVFilterContext *ctx) +{ + EBUR128Context *ebur128 = ctx->priv; + AVFilterFormats *formats; + AVFilterChannelLayouts *layouts; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + + static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE }; + static const int input_srate[] = {48000, -1}; // ITU-R BS.1770 provides coeff only for 48kHz + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE }; + + /* set input audio formats */ + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_formats); + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts); + + formats = ff_make_format_list(input_srate); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &inlink->out_samplerates); + + /* set optional output video format */ + if (ebur128->do_video) { + formats = ff_make_format_list(pix_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_formats); + outlink = ctx->outputs[1]; + } + + /* set audio output formats (same as input since it's just a passthrough) */ + formats = ff_make_format_list(sample_fmts); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_formats); + + layouts = ff_all_channel_layouts(); + if (!layouts) + return AVERROR(ENOMEM); + ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts); + + formats = ff_make_format_list(input_srate); + if (!formats) + return AVERROR(ENOMEM); + ff_formats_ref(formats, &outlink->in_samplerates); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + int i; + EBUR128Context *ebur128 = ctx->priv; + + av_log(ctx, AV_LOG_INFO, "Summary:\n\n" + " Integrated loudness:\n" + " I: %5.1f LUFS\n" + " Threshold: %5.1f LUFS\n\n" + " Loudness range:\n" + " LRA: %5.1f LU\n" + " Threshold: %5.1f LUFS\n" + " LRA low: %5.1f LUFS\n" + " LRA high: %5.1f LUFS\n", + ebur128->integrated_loudness, ebur128->i400.rel_threshold, + ebur128->loudness_range, ebur128->i3000.rel_threshold, + ebur128->lra_low, ebur128->lra_high); + + av_freep(&ebur128->y_line_ref); + av_freep(&ebur128->ch_weighting); + av_freep(&ebur128->i400.histogram); + av_freep(&ebur128->i3000.histogram); + for (i = 0; i < ebur128->nb_channels; i++) { + av_freep(&ebur128->i400.cache[i]); + av_freep(&ebur128->i3000.cache[i]); + } + for (i = 0; i < ctx->nb_outputs; i++) + av_freep(&ctx->output_pads[i].name); + avfilter_unref_bufferp(&ebur128->outpicref); +} + +static const AVFilterPad ebur128_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +AVFilter avfilter_af_ebur128 = { + .name = "ebur128", + .description = NULL_IF_CONFIG_SMALL("EBU R128 scanner."), + .priv_size = sizeof(EBUR128Context), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = ebur128_inputs, + .outputs = NULL, + .priv_class = &ebur128_class, +}; diff --git a/libavfilter/f_select.c b/libavfilter/f_select.c new file mode 100644 index 0000000..4881a85 --- /dev/null +++ b/libavfilter/f_select.c @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * filter for selecting which frame passes in the filterchain + */ + +#include "libavutil/eval.h" +#include "libavutil/fifo.h" +#include "libavutil/internal.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "audio.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +#if CONFIG_AVCODEC +#include "libavcodec/dsputil.h" +#endif + +static const char *const var_names[] = { + "TB", ///< timebase + + "pts", ///< original pts in the file of the frame + "start_pts", ///< first PTS in the stream, expressed in TB units + "prev_pts", ///< previous frame PTS + "prev_selected_pts", ///< previous selected frame PTS + + "t", ///< first PTS in seconds + "start_t", ///< first PTS in the stream, expressed in seconds + "prev_t", ///< previous frame time + "prev_selected_t", ///< previously selected time + + "pict_type", ///< the type of picture in the movie + "I", + "P", + "B", + "S", + "SI", + "SP", + "BI", + + "interlace_type", ///< the frame interlace type + "PROGRESSIVE", + "TOPFIRST", + "BOTTOMFIRST", + + "consumed_samples_n",///< number of samples consumed by the filter (only audio) + "samples_n", ///< number of samples in the current frame (only audio) + "sample_rate", ///< sample rate (only audio) + + "n", ///< frame number (starting from zero) + "selected_n", ///< selected frame number (starting from zero) + "prev_selected_n", ///< number of the last selected frame + + "key", ///< tell if the frame is a key frame + "pos", ///< original position in the file of the frame + + "scene", + + NULL +}; + +enum var_name { + VAR_TB, + + VAR_PTS, + VAR_START_PTS, + VAR_PREV_PTS, + VAR_PREV_SELECTED_PTS, + + VAR_T, + VAR_START_T, + VAR_PREV_T, + VAR_PREV_SELECTED_T, + + VAR_PICT_TYPE, + VAR_PICT_TYPE_I, + VAR_PICT_TYPE_P, + VAR_PICT_TYPE_B, + VAR_PICT_TYPE_S, + VAR_PICT_TYPE_SI, + VAR_PICT_TYPE_SP, + VAR_PICT_TYPE_BI, + + VAR_INTERLACE_TYPE, + VAR_INTERLACE_TYPE_P, + VAR_INTERLACE_TYPE_T, + VAR_INTERLACE_TYPE_B, + + VAR_CONSUMED_SAMPLES_N, + VAR_SAMPLES_N, + VAR_SAMPLE_RATE, + + VAR_N, + VAR_SELECTED_N, + VAR_PREV_SELECTED_N, + + VAR_KEY, + VAR_POS, + + VAR_SCENE, + + VAR_VARS_NB +}; + +typedef struct { + const AVClass *class; + AVExpr *expr; + char *expr_str; + double var_values[VAR_VARS_NB]; + int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise +#if CONFIG_AVCODEC + AVCodecContext *avctx; ///< codec context required for the DSPContext (scene detect only) + DSPContext c; ///< context providing optimized SAD methods (scene detect only) + double prev_mafd; ///< previous MAFD (scene detect only) +#endif + AVFilterBufferRef *prev_picref; ///< previous frame (scene detect only) + double select; +} SelectContext; + +#define OFFSET(x) offsetof(SelectContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM +static const AVOption options[] = { + { "expr", "set selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str = "1"}, 0, 0, FLAGS }, + { "e", "set selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str = "1"}, 0, 0, FLAGS }, + {NULL}, +}; + +static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class) +{ + SelectContext *select = ctx->priv; + const char *shorthand[] = { "expr", NULL }; + int ret; + + select->class = class; + av_opt_set_defaults(select); + + if ((ret = av_opt_set_from_string(select, args, shorthand, "=", ":")) < 0) + return ret; + + if ((ret = av_expr_parse(&select->expr, select->expr_str, + var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", select->expr_str); + return ret; + } + select->do_scene_detect = !!strstr(select->expr_str, "scene"); + + return 0; +} + +#define INTERLACE_TYPE_P 0 +#define INTERLACE_TYPE_T 1 +#define INTERLACE_TYPE_B 2 + +static int config_input(AVFilterLink *inlink) +{ + SelectContext *select = inlink->dst->priv; + + select->var_values[VAR_N] = 0.0; + select->var_values[VAR_SELECTED_N] = 0.0; + + select->var_values[VAR_TB] = av_q2d(inlink->time_base); + + select->var_values[VAR_PREV_PTS] = NAN; + select->var_values[VAR_PREV_SELECTED_PTS] = NAN; + select->var_values[VAR_PREV_SELECTED_T] = NAN; + select->var_values[VAR_START_PTS] = NAN; + select->var_values[VAR_START_T] = NAN; + + select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I; + select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P; + select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B; + select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI; + select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP; + + select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P; + select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T; + select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B; + + select->var_values[VAR_PICT_TYPE] = NAN; + select->var_values[VAR_INTERLACE_TYPE] = NAN; + select->var_values[VAR_SCENE] = NAN; + select->var_values[VAR_CONSUMED_SAMPLES_N] = NAN; + select->var_values[VAR_SAMPLES_N] = NAN; + + select->var_values[VAR_SAMPLE_RATE] = + inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN; + +#if CONFIG_AVCODEC + if (select->do_scene_detect) { + select->avctx = avcodec_alloc_context3(NULL); + if (!select->avctx) + return AVERROR(ENOMEM); + dsputil_init(&select->c, select->avctx); + } +#endif + return 0; +} + +#if CONFIG_AVCODEC +static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + double ret = 0; + SelectContext *select = ctx->priv; + AVFilterBufferRef *prev_picref = select->prev_picref; + + if (prev_picref && + picref->video->h == prev_picref->video->h && + picref->video->w == prev_picref->video->w && + picref->linesize[0] == prev_picref->linesize[0]) { + int x, y, nb_sad = 0; + int64_t sad = 0; + double mafd, diff; + uint8_t *p1 = picref->data[0]; + uint8_t *p2 = prev_picref->data[0]; + const int linesize = picref->linesize[0]; + + for (y = 0; y < picref->video->h - 8; y += 8) { + for (x = 0; x < picref->video->w*3 - 8; x += 8) { + sad += select->c.sad[1](select, p1 + x, p2 + x, + linesize, 8); + nb_sad += 8 * 8; + } + p1 += 8 * linesize; + p2 += 8 * linesize; + } + emms_c(); + mafd = nb_sad ? sad / nb_sad : 0; + diff = fabs(mafd - select->prev_mafd); + ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1); + select->prev_mafd = mafd; + avfilter_unref_buffer(prev_picref); + } + select->prev_picref = avfilter_ref_buffer(picref, ~0); + return ret; +} +#endif + +#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) +#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) + +static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) +{ + SelectContext *select = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + double res; + + if (isnan(select->var_values[VAR_START_PTS])) + select->var_values[VAR_START_PTS] = TS2D(ref->pts); + if (isnan(select->var_values[VAR_START_T])) + select->var_values[VAR_START_T] = TS2D(ref->pts) * av_q2d(inlink->time_base); + + select->var_values[VAR_PTS] = TS2D(ref->pts); + select->var_values[VAR_T ] = TS2D(ref->pts) * av_q2d(inlink->time_base); + select->var_values[VAR_POS] = ref->pos == -1 ? NAN : ref->pos; + select->var_values[VAR_PREV_PTS] = TS2D(ref ->pts); + + switch (inlink->type) { + case AVMEDIA_TYPE_AUDIO: + select->var_values[VAR_SAMPLES_N] = ref->audio->nb_samples; + break; + + case AVMEDIA_TYPE_VIDEO: + select->var_values[VAR_INTERLACE_TYPE] = + !ref->video->interlaced ? INTERLACE_TYPE_P : + ref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; + select->var_values[VAR_PICT_TYPE] = ref->video->pict_type; +#if CONFIG_AVCODEC + if (select->do_scene_detect) { + char buf[32]; + select->var_values[VAR_SCENE] = get_scene_score(ctx, ref); + // TODO: document metadata + snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); + av_dict_set(&ref->metadata, "lavfi.scene_score", buf, 0); + } +#endif + break; + } + + res = av_expr_eval(select->expr, select->var_values, NULL); + av_log(inlink->dst, AV_LOG_DEBUG, + "n:%d pts:%d t:%f pos:%d key:%d", + (int)select->var_values[VAR_N], + (int)select->var_values[VAR_PTS], + select->var_values[VAR_T], + (int)select->var_values[VAR_POS], + (int)select->var_values[VAR_KEY]); + + switch (inlink->type) { + case AVMEDIA_TYPE_VIDEO: + av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f", + select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' : + select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' : + select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?', + av_get_picture_type_char(select->var_values[VAR_PICT_TYPE]), + select->var_values[VAR_SCENE]); + break; + case AVMEDIA_TYPE_AUDIO: + av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%d", + (int)select->var_values[VAR_SAMPLES_N], + (int)select->var_values[VAR_CONSUMED_SAMPLES_N]); + break; + } + + av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f\n", res); + + if (res) { + select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N]; + select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS]; + select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; + select->var_values[VAR_SELECTED_N] += 1.0; + if (inlink->type == AVMEDIA_TYPE_AUDIO) + select->var_values[VAR_CONSUMED_SAMPLES_N] += ref->audio->nb_samples; + } + + select->var_values[VAR_N] += 1.0; + + return res; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) +{ + SelectContext *select = inlink->dst->priv; + + select->select = select_frame(inlink->dst, frame); + if (select->select) + return ff_filter_frame(inlink->dst->outputs[0], frame); + + avfilter_unref_bufferp(&frame); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + SelectContext *select = ctx->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + select->select = 0; + + do { + int ret = ff_request_frame(inlink); + if (ret < 0) + return ret; + } while (!select->select); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + SelectContext *select = ctx->priv; + + av_expr_free(select->expr); + select->expr = NULL; + av_opt_free(select); + +#if CONFIG_AVCODEC + if (select->do_scene_detect) { + avfilter_unref_bufferp(&select->prev_picref); + if (select->avctx) { + avcodec_close(select->avctx); + av_freep(&select->avctx); + } + } +#endif +} + +static int query_formats(AVFilterContext *ctx) +{ + SelectContext *select = ctx->priv; + + if (!select->do_scene_detect) { + return ff_default_query_formats(ctx); + } else { + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_NONE + }; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + } + return 0; +} + +#if CONFIG_ASELECT_FILTER + +#define aselect_options options +AVFILTER_DEFINE_CLASS(aselect); + +static av_cold int aselect_init(AVFilterContext *ctx, const char *args) +{ + SelectContext *select = ctx->priv; + int ret; + + if ((ret = init(ctx, args, &aselect_class)) < 0) + return ret; + + if (select->do_scene_detect) { + av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static const AVFilterPad avfilter_af_aselect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .config_props = config_input, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_aselect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_aselect = { + .name = "aselect", + .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."), + .init = aselect_init, + .uninit = uninit, + .priv_size = sizeof(SelectContext), + .inputs = avfilter_af_aselect_inputs, + .outputs = avfilter_af_aselect_outputs, + .priv_class = &aselect_class, +}; +#endif /* CONFIG_ASELECT_FILTER */ + +#if CONFIG_SELECT_FILTER + +#define select_options options +AVFILTER_DEFINE_CLASS(select); + +static av_cold int select_init(AVFilterContext *ctx, const char *args) +{ + SelectContext *select = ctx->priv; + int ret; + + if ((ret = init(ctx, args, &select_class)) < 0) + return ret; + + if (select->do_scene_detect && !CONFIG_AVCODEC) { + av_log(ctx, AV_LOG_ERROR, "Scene detection is not available without libavcodec.\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static const AVFilterPad avfilter_vf_select_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .min_perms = AV_PERM_PRESERVE, + .config_props = config_input, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_vf_select_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_select = { + .name = "select", + .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."), + .init = select_init, + .uninit = uninit, + .query_formats = query_formats, + + .priv_size = sizeof(SelectContext), + + .inputs = avfilter_vf_select_inputs, + .outputs = avfilter_vf_select_outputs, + .priv_class = &select_class, +}; +#endif /* CONFIG_SELECT_FILTER */ diff --git a/libavfilter/f_sendcmd.c b/libavfilter/f_sendcmd.c new file mode 100644 index 0000000..b5cf01c --- /dev/null +++ b/libavfilter/f_sendcmd.c @@ -0,0 +1,598 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * send commands filter + */ + +#include "libavutil/avstring.h" +#include "libavutil/bprint.h" +#include "libavutil/file.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "avfilter.h" +#include "internal.h" +#include "avfiltergraph.h" +#include "audio.h" +#include "video.h" + +#define COMMAND_FLAG_ENTER 1 +#define COMMAND_FLAG_LEAVE 2 + +static inline char *make_command_flags_str(AVBPrint *pbuf, int flags) +{ + const char *flag_strings[] = { "enter", "leave" }; + int i, is_first = 1; + + av_bprint_init(pbuf, 0, AV_BPRINT_SIZE_AUTOMATIC); + for (i = 0; i < FF_ARRAY_ELEMS(flag_strings); i++) { + if (flags & 1<<i) { + if (!is_first) + av_bprint_chars(pbuf, '+', 1); + av_bprintf(pbuf, "%s", flag_strings[i]); + is_first = 0; + } + } + + return pbuf->str; +} + +typedef struct { + int flags; + char *target, *command, *arg; + int index; +} Command; + +typedef struct { + int64_t start_ts; ///< start timestamp expressed as microseconds units + int64_t end_ts; ///< end timestamp expressed as microseconds units + int index; ///< unique index for these interval commands + Command *commands; + int nb_commands; + int enabled; ///< current time detected inside this interval +} Interval; + +typedef struct { + const AVClass *class; + Interval *intervals; + int nb_intervals; + + char *commands_filename; + char *commands_str; +} SendCmdContext; + +#define OFFSET(x) offsetof(SendCmdContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM +static const AVOption options[] = { + { "commands", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "c", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "filename", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "f", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + {NULL}, +}; + +#define SPACES " \f\t\n\r" + +static void skip_comments(const char **buf) +{ + while (**buf) { + /* skip leading spaces */ + *buf += strspn(*buf, SPACES); + if (**buf != '#') + break; + + (*buf)++; + + /* skip comment until the end of line */ + *buf += strcspn(*buf, "\n"); + if (**buf) + (*buf)++; + } +} + +#define COMMAND_DELIMS " \f\t\n\r,;" + +static int parse_command(Command *cmd, int cmd_count, int interval_count, + const char **buf, void *log_ctx) +{ + int ret; + + memset(cmd, 0, sizeof(Command)); + cmd->index = cmd_count; + + /* format: [FLAGS] target command arg */ + *buf += strspn(*buf, SPACES); + + /* parse flags */ + if (**buf == '[') { + (*buf)++; /* skip "[" */ + + while (**buf) { + int len = strcspn(*buf, "|+]"); + + if (!strncmp(*buf, "enter", strlen("enter"))) cmd->flags |= COMMAND_FLAG_ENTER; + else if (!strncmp(*buf, "leave", strlen("leave"))) cmd->flags |= COMMAND_FLAG_LEAVE; + else { + char flag_buf[64]; + av_strlcpy(flag_buf, *buf, sizeof(flag_buf)); + av_log(log_ctx, AV_LOG_ERROR, + "Unknown flag '%s' in in interval #%d, command #%d\n", + flag_buf, interval_count, cmd_count); + return AVERROR(EINVAL); + } + *buf += len; + if (**buf == ']') + break; + if (!strspn(*buf, "+|")) { + av_log(log_ctx, AV_LOG_ERROR, + "Invalid flags char '%c' in interval #%d, command #%d\n", + **buf, interval_count, cmd_count); + return AVERROR(EINVAL); + } + if (**buf) + (*buf)++; + } + + if (**buf != ']') { + av_log(log_ctx, AV_LOG_ERROR, + "Missing flag terminator or extraneous data found at the end of flags " + "in interval #%d, command #%d\n", interval_count, cmd_count); + return AVERROR(EINVAL); + } + (*buf)++; /* skip "]" */ + } else { + cmd->flags = COMMAND_FLAG_ENTER; + } + + *buf += strspn(*buf, SPACES); + cmd->target = av_get_token(buf, COMMAND_DELIMS); + if (!cmd->target || !cmd->target[0]) { + av_log(log_ctx, AV_LOG_ERROR, + "No target specified in in interval #%d, command #%d\n", + interval_count, cmd_count); + ret = AVERROR(EINVAL); + goto fail; + } + + *buf += strspn(*buf, SPACES); + cmd->command = av_get_token(buf, COMMAND_DELIMS); + if (!cmd->command || !cmd->command[0]) { + av_log(log_ctx, AV_LOG_ERROR, + "No command specified in in interval #%d, command #%d\n", + interval_count, cmd_count); + ret = AVERROR(EINVAL); + goto fail; + } + + *buf += strspn(*buf, SPACES); + cmd->arg = av_get_token(buf, COMMAND_DELIMS); + + return 1; + +fail: + av_freep(&cmd->target); + av_freep(&cmd->command); + av_freep(&cmd->arg); + return ret; +} + +static int parse_commands(Command **cmds, int *nb_cmds, int interval_count, + const char **buf, void *log_ctx) +{ + int cmd_count = 0; + int ret, n = 0; + AVBPrint pbuf; + + *cmds = NULL; + *nb_cmds = 0; + + while (**buf) { + Command cmd; + + if ((ret = parse_command(&cmd, cmd_count, interval_count, buf, log_ctx)) < 0) + return ret; + cmd_count++; + + /* (re)allocate commands array if required */ + if (*nb_cmds == n) { + n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */ + *cmds = av_realloc_f(*cmds, n, 2*sizeof(Command)); + if (!*cmds) { + av_log(log_ctx, AV_LOG_ERROR, + "Could not (re)allocate command array\n"); + return AVERROR(ENOMEM); + } + } + + (*cmds)[(*nb_cmds)++] = cmd; + + *buf += strspn(*buf, SPACES); + if (**buf && **buf != ';' && **buf != ',') { + av_log(log_ctx, AV_LOG_ERROR, + "Missing separator or extraneous data found at the end of " + "interval #%d, in command #%d\n", + interval_count, cmd_count); + av_log(log_ctx, AV_LOG_ERROR, + "Command was parsed as: flags:[%s] target:%s command:%s arg:%s\n", + make_command_flags_str(&pbuf, cmd.flags), cmd.target, cmd.command, cmd.arg); + return AVERROR(EINVAL); + } + if (**buf == ';') + break; + if (**buf == ',') + (*buf)++; + } + + return 0; +} + +#define DELIMS " \f\t\n\r,;" + +static int parse_interval(Interval *interval, int interval_count, + const char **buf, void *log_ctx) +{ + char *intervalstr; + int ret; + + *buf += strspn(*buf, SPACES); + if (!**buf) + return 0; + + /* reset data */ + memset(interval, 0, sizeof(Interval)); + interval->index = interval_count; + + /* format: INTERVAL COMMANDS */ + + /* parse interval */ + intervalstr = av_get_token(buf, DELIMS); + if (intervalstr && intervalstr[0]) { + char *start, *end; + + start = av_strtok(intervalstr, "-", &end); + if ((ret = av_parse_time(&interval->start_ts, start, 1)) < 0) { + av_log(log_ctx, AV_LOG_ERROR, + "Invalid start time specification '%s' in interval #%d\n", + start, interval_count); + goto end; + } + + if (end) { + if ((ret = av_parse_time(&interval->end_ts, end, 1)) < 0) { + av_log(log_ctx, AV_LOG_ERROR, + "Invalid end time specification '%s' in interval #%d\n", + end, interval_count); + goto end; + } + } else { + interval->end_ts = INT64_MAX; + } + if (interval->end_ts < interval->start_ts) { + av_log(log_ctx, AV_LOG_ERROR, + "Invalid end time '%s' in interval #%d: " + "cannot be lesser than start time '%s'\n", + end, interval_count, start); + ret = AVERROR(EINVAL); + goto end; + } + } else { + av_log(log_ctx, AV_LOG_ERROR, + "No interval specified for interval #%d\n", interval_count); + ret = AVERROR(EINVAL); + goto end; + } + + /* parse commands */ + ret = parse_commands(&interval->commands, &interval->nb_commands, + interval_count, buf, log_ctx); + +end: + av_free(intervalstr); + return ret; +} + +static int parse_intervals(Interval **intervals, int *nb_intervals, + const char *buf, void *log_ctx) +{ + int interval_count = 0; + int ret, n = 0; + + *intervals = NULL; + *nb_intervals = 0; + + while (1) { + Interval interval; + + skip_comments(&buf); + if (!(*buf)) + break; + + if ((ret = parse_interval(&interval, interval_count, &buf, log_ctx)) < 0) + return ret; + + buf += strspn(buf, SPACES); + if (*buf) { + if (*buf != ';') { + av_log(log_ctx, AV_LOG_ERROR, + "Missing terminator or extraneous data found at the end of interval #%d\n", + interval_count); + return AVERROR(EINVAL); + } + buf++; /* skip ';' */ + } + interval_count++; + + /* (re)allocate commands array if required */ + if (*nb_intervals == n) { + n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */ + *intervals = av_realloc_f(*intervals, n, 2*sizeof(Interval)); + if (!*intervals) { + av_log(log_ctx, AV_LOG_ERROR, + "Could not (re)allocate intervals array\n"); + return AVERROR(ENOMEM); + } + } + + (*intervals)[(*nb_intervals)++] = interval; + } + + return 0; +} + +static int cmp_intervals(const void *a, const void *b) +{ + const Interval *i1 = a; + const Interval *i2 = b; + int64_t ts_diff = i1->start_ts - i2->start_ts; + int ret; + + ret = ts_diff > 0 ? 1 : ts_diff < 0 ? -1 : 0; + return ret == 0 ? i1->index - i2->index : ret; +} + +static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class) +{ + SendCmdContext *sendcmd = ctx->priv; + int ret, i, j; + + sendcmd->class = class; + av_opt_set_defaults(sendcmd); + + if ((ret = av_set_options_string(sendcmd, args, "=", ":")) < 0) + return ret; + + if (sendcmd->commands_filename && sendcmd->commands_str) { + av_log(ctx, AV_LOG_ERROR, + "Only one of the filename or commands options must be specified\n"); + return AVERROR(EINVAL); + } + + if (sendcmd->commands_filename) { + uint8_t *file_buf, *buf; + size_t file_bufsize; + ret = av_file_map(sendcmd->commands_filename, + &file_buf, &file_bufsize, 0, ctx); + if (ret < 0) + return ret; + + /* create a 0-terminated string based on the read file */ + buf = av_malloc(file_bufsize + 1); + if (!buf) { + av_file_unmap(file_buf, file_bufsize); + return AVERROR(ENOMEM); + } + memcpy(buf, file_buf, file_bufsize); + buf[file_bufsize] = 0; + av_file_unmap(file_buf, file_bufsize); + sendcmd->commands_str = buf; + } + + if ((ret = parse_intervals(&sendcmd->intervals, &sendcmd->nb_intervals, + sendcmd->commands_str, ctx)) < 0) + return ret; + + qsort(sendcmd->intervals, sendcmd->nb_intervals, sizeof(Interval), cmp_intervals); + + av_log(ctx, AV_LOG_DEBUG, "Parsed commands:\n"); + for (i = 0; i < sendcmd->nb_intervals; i++) { + AVBPrint pbuf; + Interval *interval = &sendcmd->intervals[i]; + av_log(ctx, AV_LOG_VERBOSE, "start_time:%f end_time:%f index:%d\n", + (double)interval->start_ts/1000000, (double)interval->end_ts/1000000, interval->index); + for (j = 0; j < interval->nb_commands; j++) { + Command *cmd = &interval->commands[j]; + av_log(ctx, AV_LOG_VERBOSE, + " [%s] target:%s command:%s arg:%s index:%d\n", + make_command_flags_str(&pbuf, cmd->flags), cmd->target, cmd->command, cmd->arg, cmd->index); + } + } + + return 0; +} + +static void av_cold uninit(AVFilterContext *ctx) +{ + SendCmdContext *sendcmd = ctx->priv; + int i, j; + + av_opt_free(sendcmd); + + for (i = 0; i < sendcmd->nb_intervals; i++) { + Interval *interval = &sendcmd->intervals[i]; + for (j = 0; j < interval->nb_commands; j++) { + Command *cmd = &interval->commands[j]; + av_free(cmd->target); + av_free(cmd->command); + av_free(cmd->arg); + } + av_free(interval->commands); + } + av_freep(&sendcmd->intervals); +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) +{ + AVFilterContext *ctx = inlink->dst; + SendCmdContext *sendcmd = ctx->priv; + int64_t ts; + int i, j, ret; + + if (ref->pts == AV_NOPTS_VALUE) + goto end; + + ts = av_rescale_q(ref->pts, inlink->time_base, AV_TIME_BASE_Q); + +#define WITHIN_INTERVAL(ts, start_ts, end_ts) ((ts) >= (start_ts) && (ts) < (end_ts)) + + for (i = 0; i < sendcmd->nb_intervals; i++) { + Interval *interval = &sendcmd->intervals[i]; + int flags = 0; + + if (!interval->enabled && WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) { + flags += COMMAND_FLAG_ENTER; + interval->enabled = 1; + } + if (interval->enabled && !WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) { + flags += COMMAND_FLAG_LEAVE; + interval->enabled = 0; + } + + if (flags) { + AVBPrint pbuf; + av_log(ctx, AV_LOG_VERBOSE, + "[%s] interval #%d start_ts:%f end_ts:%f ts:%f\n", + make_command_flags_str(&pbuf, flags), interval->index, + (double)interval->start_ts/1000000, (double)interval->end_ts/1000000, + (double)ts/1000000); + + for (j = 0; flags && j < interval->nb_commands; j++) { + Command *cmd = &interval->commands[j]; + char buf[1024]; + + if (cmd->flags & flags) { + av_log(ctx, AV_LOG_VERBOSE, + "Processing command #%d target:%s command:%s arg:%s\n", + cmd->index, cmd->target, cmd->command, cmd->arg); + ret = avfilter_graph_send_command(inlink->graph, + cmd->target, cmd->command, cmd->arg, + buf, sizeof(buf), + AVFILTER_CMD_FLAG_ONE); + av_log(ctx, AV_LOG_VERBOSE, + "Command reply for command #%d: ret:%s res:%s\n", + cmd->index, av_err2str(ret), buf); + } + } + } + } + +end: + switch (inlink->type) { + case AVMEDIA_TYPE_VIDEO: + case AVMEDIA_TYPE_AUDIO: + return ff_filter_frame(inlink->dst->outputs[0], ref); + } + + return AVERROR(ENOSYS); +} + +#if CONFIG_SENDCMD_FILTER + +#define sendcmd_options options +AVFILTER_DEFINE_CLASS(sendcmd); + +static av_cold int sendcmd_init(AVFilterContext *ctx, const char *args) +{ + return init(ctx, args, &sendcmd_class); +} + +static const AVFilterPad sendcmd_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad sendcmd_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_sendcmd = { + .name = "sendcmd", + .description = NULL_IF_CONFIG_SMALL("Send commands to filters."), + + .init = sendcmd_init, + .uninit = uninit, + .priv_size = sizeof(SendCmdContext), + .inputs = sendcmd_inputs, + .outputs = sendcmd_outputs, + .priv_class = &sendcmd_class, +}; + +#endif + +#if CONFIG_ASENDCMD_FILTER + +#define asendcmd_options options +AVFILTER_DEFINE_CLASS(asendcmd); + +static av_cold int asendcmd_init(AVFilterContext *ctx, const char *args) +{ + return init(ctx, args, &asendcmd_class); +} + +static const AVFilterPad asendcmd_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad asendcmd_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_asendcmd = { + .name = "asendcmd", + .description = NULL_IF_CONFIG_SMALL("Send commands to filters."), + + .init = asendcmd_init, + .uninit = uninit, + .priv_size = sizeof(SendCmdContext), + .inputs = asendcmd_inputs, + .outputs = asendcmd_outputs, + .priv_class = &asendcmd_class, +}; + +#endif diff --git a/libavfilter/f_setpts.c b/libavfilter/f_setpts.c new file mode 100644 index 0000000..1c2edb8 --- /dev/null +++ b/libavfilter/f_setpts.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2010 Stefano Sabatini + * Copyright (c) 2008 Victor Paesa + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * video presentation timestamp (PTS) modification filter + */ + +#include "libavutil/eval.h" +#include "libavutil/internal.h" +#include "libavutil/mathematics.h" +#include "libavutil/time.h" +#include "avfilter.h" +#include "internal.h" +#include "audio.h" +#include "video.h" + +static const char *const var_names[] = { + "FRAME_RATE", ///< defined only for constant frame-rate video + "INTERLACED", ///< tell if the current frame is interlaced + "N", ///< frame number (starting at zero) + "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio) + "NB_SAMPLES", ///< number of samples in the current frame (only audio) + "POS", ///< original position in the file of the frame + "PREV_INPTS", ///< previous input PTS + "PREV_INT", ///< previous input time in seconds + "PREV_OUTPTS", ///< previous output PTS + "PREV_OUTT", ///< previous output time in seconds + "PTS", ///< original pts in the file of the frame + "SAMPLE_RATE", ///< sample rate (only audio) + "STARTPTS", ///< PTS at start of movie + "STARTT", ///< time at start of movie + "T", ///< original time in the file of the frame + "TB", ///< timebase + "RTCTIME", ///< wallclock (RTC) time in micro seconds + "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds + NULL +}; + +enum var_name { + VAR_FRAME_RATE, + VAR_INTERLACED, + VAR_N, + VAR_NB_CONSUMED_SAMPLES, + VAR_NB_SAMPLES, + VAR_POS, + VAR_PREV_INPTS, + VAR_PREV_INT, + VAR_PREV_OUTPTS, + VAR_PREV_OUTT, + VAR_PTS, + VAR_SAMPLE_RATE, + VAR_STARTPTS, + VAR_STARTT, + VAR_T, + VAR_TB, + VAR_RTCTIME, + VAR_RTCSTART, + VAR_VARS_NB +}; + +typedef struct { + AVExpr *expr; + double var_values[VAR_VARS_NB]; + enum AVMediaType type; +} SetPTSContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + SetPTSContext *setpts = ctx->priv; + int ret; + + if ((ret = av_expr_parse(&setpts->expr, args ? args : "PTS", + var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", args); + return ret; + } + + setpts->var_values[VAR_N ] = 0.0; + setpts->var_values[VAR_PREV_INPTS ] = setpts->var_values[VAR_PREV_INT ] = NAN; + setpts->var_values[VAR_PREV_OUTPTS] = setpts->var_values[VAR_PREV_OUTT] = NAN; + setpts->var_values[VAR_STARTPTS ] = setpts->var_values[VAR_STARTT ] = NAN; + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + SetPTSContext *setpts = ctx->priv; + + setpts->type = inlink->type; + setpts->var_values[VAR_TB] = av_q2d(inlink->time_base); + setpts->var_values[VAR_RTCSTART] = av_gettime(); + + setpts->var_values[VAR_SAMPLE_RATE] = + setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN; + + setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ? + av_q2d(inlink->frame_rate) : NAN; + + av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n", + setpts->var_values[VAR_TB], + setpts->var_values[VAR_FRAME_RATE], + setpts->var_values[VAR_SAMPLE_RATE]); + return 0; +} + +#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) +#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) +#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb)) + +#define BUF_SIZE 64 + +static inline char *double2int64str(char *buf, double v) +{ + if (isnan(v)) snprintf(buf, BUF_SIZE, "nan"); + else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v); + return buf; +} + +#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) +{ + SetPTSContext *setpts = inlink->dst->priv; + int64_t in_pts = frame->pts; + double d; + + if (isnan(setpts->var_values[VAR_STARTPTS])) { + setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); + setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base); + } + setpts->var_values[VAR_PTS ] = TS2D(frame->pts); + setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); + setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos; + setpts->var_values[VAR_RTCTIME ] = av_gettime(); + + switch (inlink->type) { + case AVMEDIA_TYPE_VIDEO: + setpts->var_values[VAR_INTERLACED] = frame->video->interlaced; + break; + + case AVMEDIA_TYPE_AUDIO: + setpts->var_values[VAR_NB_SAMPLES] = frame->audio->nb_samples; + break; + } + + d = av_expr_eval(setpts->expr, setpts->var_values, NULL); + + av_log(inlink->dst, AV_LOG_DEBUG, + "N:%"PRId64" PTS:%s T:%f POS:%s", + (int64_t)setpts->var_values[VAR_N], + d2istr(setpts->var_values[VAR_PTS]), + setpts->var_values[VAR_T], + d2istr(setpts->var_values[VAR_POS])); + switch (inlink->type) { + case AVMEDIA_TYPE_VIDEO: + av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64, + (int64_t)setpts->var_values[VAR_INTERLACED]); + break; + case AVMEDIA_TYPE_AUDIO: + av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64, + (int64_t)setpts->var_values[VAR_NB_SAMPLES], + (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]); + break; + } + av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base)); + + frame->pts = D2TS(d); + + setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); + setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base); + setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); + setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); + setpts->var_values[VAR_N] += 1.0; + if (setpts->type == AVMEDIA_TYPE_AUDIO) { + setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->audio->nb_samples; + } + return ff_filter_frame(inlink->dst->outputs[0], frame); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + SetPTSContext *setpts = ctx->priv; + av_expr_free(setpts->expr); + setpts->expr = NULL; +} + +#if CONFIG_ASETPTS_FILTER +static const AVFilterPad avfilter_af_asetpts_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .config_props = config_input, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_asetpts_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +AVFilter avfilter_af_asetpts = { + .name = "asetpts", + .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."), + .init = init, + .uninit = uninit, + .priv_size = sizeof(SetPTSContext), + .inputs = avfilter_af_asetpts_inputs, + .outputs = avfilter_af_asetpts_outputs, +}; +#endif /* CONFIG_ASETPTS_FILTER */ + +#if CONFIG_SETPTS_FILTER +static const AVFilterPad avfilter_vf_setpts_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .config_props = config_input, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_vf_setpts_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_setpts = { + .name = "setpts", + .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."), + .init = init, + .uninit = uninit, + + .priv_size = sizeof(SetPTSContext), + + .inputs = avfilter_vf_setpts_inputs, + .outputs = avfilter_vf_setpts_outputs, +}; +#endif /* CONFIG_SETPTS_FILTER */ diff --git a/libavfilter/vf_settb.c b/libavfilter/f_settb.c index a572072..99ea7a7 100644 --- a/libavfilter/vf_settb.c +++ b/libavfilter/f_settb.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2010 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -33,23 +33,20 @@ #include "libavutil/rational.h" #include "avfilter.h" #include "internal.h" +#include "audio.h" #include "video.h" static const char *const var_names[] = { - "E", - "PHI", - "PI", "AVTB", /* default timebase 1/AV_TIME_BASE */ "intb", /* input timebase */ + "sr", /* sample rate */ NULL }; enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, VAR_AVTB, VAR_INTB, + VAR_SR, VAR_VARS_NB }; @@ -78,11 +75,9 @@ static int config_output_props(AVFilterLink *outlink) int ret; double res; - settb->var_values[VAR_E] = M_E; - settb->var_values[VAR_PHI] = M_PHI; - settb->var_values[VAR_PI] = M_PI; settb->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q); settb->var_values[VAR_INTB] = av_q2d(inlink->time_base); + settb->var_values[VAR_SR] = inlink->sample_rate; outlink->w = inlink->w; outlink->h = inlink->h; @@ -124,6 +119,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) return ff_filter_frame(outlink, frame); } +#if CONFIG_SETTB_FILTER static const AVFilterPad avfilter_vf_settb_inputs[] = { { .name = "default", @@ -145,12 +141,43 @@ static const AVFilterPad avfilter_vf_settb_outputs[] = { AVFilter avfilter_vf_settb = { .name = "settb", - .description = NULL_IF_CONFIG_SMALL("Set timebase for the output link."), + .description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."), .init = init, .priv_size = sizeof(SetTBContext), .inputs = avfilter_vf_settb_inputs, - .outputs = avfilter_vf_settb_outputs, }; +#endif + +#if CONFIG_ASETTB_FILTER +static const AVFilterPad avfilter_af_asettb_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .get_audio_buffer = ff_null_get_audio_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad avfilter_af_asettb_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = config_output_props, + }, + { NULL } +}; + +AVFilter avfilter_af_asettb = { + .name = "asettb", + .description = NULL_IF_CONFIG_SMALL("Set timebase for the audio output link."), + .init = init, + + .priv_size = sizeof(SetTBContext), + .inputs = avfilter_af_asettb_inputs, + .outputs = avfilter_af_asettb_outputs, +}; +#endif diff --git a/libavfilter/fifo.c b/libavfilter/fifo.c index 88c44fe..9597fb3 100644 --- a/libavfilter/fifo.c +++ b/libavfilter/fifo.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -228,6 +228,7 @@ static int request_frame(AVFilterLink *outlink) if (!fifo->root.next) { if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) return ret; + av_assert0(fifo->root.next); } if (outlink->request_samples) { @@ -246,7 +247,7 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = { .type = AVMEDIA_TYPE_VIDEO, .get_video_buffer = ff_null_get_video_buffer, .filter_frame = add_to_queue, - .rej_perms = AV_PERM_REUSE2, + .min_perms = AV_PERM_PRESERVE, }, { NULL } }; @@ -279,7 +280,7 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = { .type = AVMEDIA_TYPE_AUDIO, .get_audio_buffer = ff_null_get_audio_buffer, .filter_frame = add_to_queue, - .rej_perms = AV_PERM_REUSE2, + .min_perms = AV_PERM_PRESERVE, }, { NULL } }; diff --git a/libavfilter/filtfmts.c b/libavfilter/filtfmts.c index 480b277..7286729 100644 --- a/libavfilter/filtfmts.c +++ b/libavfilter/filtfmts.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2009 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -22,26 +22,66 @@ #include "libavformat/avformat.h" #include "libavutil/pixdesc.h" +#include "libavutil/samplefmt.h" #include "libavfilter/avfilter.h" #include "libavfilter/formats.h" +static void print_formats(AVFilterContext *filter_ctx) +{ + int i, j; + +#define PRINT_FMTS(inout, outin, INOUT) \ + for (i = 0; i < filter_ctx->inout##put_count; i++) { \ + if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \ + AVFilterFormats *fmts = \ + filter_ctx->inout##puts[i]->outin##_formats; \ + for (j = 0; j < fmts->format_count; j++) \ + if(av_get_pix_fmt_name(fmts->formats[j])) \ + printf(#INOUT "PUT[%d] %s: fmt:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, \ + av_get_pix_fmt_name(fmts->formats[j])); \ + } else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \ + AVFilterFormats *fmts; \ + AVFilterChannelLayouts *layouts; \ + \ + fmts = filter_ctx->inout##puts[i]->outin##_formats; \ + for (j = 0; j < fmts->format_count; j++) \ + printf(#INOUT "PUT[%d] %s: fmt:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, \ + av_get_sample_fmt_name(fmts->formats[j])); \ + \ + layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \ + for (j = 0; j < layouts->nb_channel_layouts; j++) { \ + char buf[256]; \ + av_get_channel_layout_string(buf, sizeof(buf), -1, \ + layouts->channel_layouts[j]); \ + printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \ + i, filter_ctx->filter->inout##puts[i].name, buf); \ + } \ + } \ + } \ + + PRINT_FMTS(in, out, IN); + PRINT_FMTS(out, in, OUT); +} + int main(int argc, char **argv) { AVFilter *filter; AVFilterContext *filter_ctx; const char *filter_name; const char *filter_args = NULL; - int i, j; + int i; av_log_set_level(AV_LOG_DEBUG); - if (!argv[1]) { + if (argc < 2) { fprintf(stderr, "Missing filter name as argument\n"); return 1; } filter_name = argv[1]; - if (argv[2]) + if (argc > 2) filter_args = argv[2]; avfilter_register_all(); @@ -80,23 +120,7 @@ int main(int argc, char **argv) else ff_default_query_formats(filter_ctx); - /* print the supported formats in input */ - for (i = 0; i < filter_ctx->input_count; i++) { - AVFilterFormats *fmts = filter_ctx->inputs[i]->out_formats; - for (j = 0; j < fmts->format_count; j++) - printf("INPUT[%d] %s: %s\n", - i, filter_ctx->filter->inputs[i].name, - av_get_pix_fmt_name(fmts->formats[j])); - } - - /* print the supported formats in output */ - for (i = 0; i < filter_ctx->output_count; i++) { - AVFilterFormats *fmts = filter_ctx->outputs[i]->in_formats; - for (j = 0; j < fmts->format_count; j++) - printf("OUTPUT[%d] %s: %s\n", - i, filter_ctx->filter->outputs[i].name, - av_get_pix_fmt_name(fmts->formats[j])); - } + print_formats(filter_ctx); avfilter_free(filter_ctx); fflush(stdout); diff --git a/libavfilter/formats.c b/libavfilter/formats.c index 3b890d2..0284db2 100644 --- a/libavfilter/formats.c +++ b/libavfilter/formats.c @@ -2,29 +2,35 @@ * Filter layer - format negotiation * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" #include "libavutil/common.h" +#include "libavutil/eval.h" #include "libavutil/pixdesc.h" +#include "libavutil/parseutils.h" #include "avfilter.h" #include "internal.h" #include "formats.h" +#define KNOWN(l) (!FF_LAYOUT2COUNT(l)) /* for readability */ + /** * Add all refs from a to ret and destroy a. */ @@ -64,11 +70,17 @@ do { goto fail; \ for (i = 0; i < a->nb; i++) \ for (j = 0; j < b->nb; j++) \ - if (a->fmts[i] == b->fmts[j]) \ + if (a->fmts[i] == b->fmts[j]) { \ + if(k >= FFMIN(a->nb, b->nb)){ \ + av_log(NULL, AV_LOG_ERROR, "Duplicate formats in avfilter_merge_formats() detected\n"); \ + av_free(ret->fmts); \ + av_free(ret); \ + return NULL; \ + } \ ret->fmts[k++] = a->fmts[i]; \ - \ - ret->nb = k; \ + } \ } \ + ret->nb = k; \ /* check that there was at least one common format */ \ if (!ret->nb) \ goto fail; \ @@ -127,21 +139,77 @@ AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a, AVFilterChannelLayouts *b) { AVFilterChannelLayouts *ret = NULL; + unsigned a_all = a->all_layouts + a->all_counts; + unsigned b_all = b->all_layouts + b->all_counts; + int ret_max, ret_nb = 0, i, j, round; if (a == b) return a; - if (a->nb_channel_layouts && b->nb_channel_layouts) { - MERGE_FORMATS(ret, a, b, channel_layouts, nb_channel_layouts, - AVFilterChannelLayouts, fail); - } else if (a->nb_channel_layouts) { - MERGE_REF(a, b, channel_layouts, AVFilterChannelLayouts, fail); - ret = a; - } else { + /* Put the most generic set in a, to avoid doing everything twice */ + if (a_all < b_all) { + FFSWAP(AVFilterChannelLayouts *, a, b); + FFSWAP(unsigned, a_all, b_all); + } + if (a_all) { + if (a_all == 1 && !b_all) { + /* keep only known layouts in b; works also for b_all = 1 */ + for (i = j = 0; i < b->nb_channel_layouts; i++) + if (KNOWN(b->channel_layouts[i])) + b->channel_layouts[j++] = b->channel_layouts[i]; + b->nb_channel_layouts = j; + } MERGE_REF(b, a, channel_layouts, AVFilterChannelLayouts, fail); - ret = b; + return b; + } + + ret_max = a->nb_channel_layouts + b->nb_channel_layouts; + if (!(ret = av_mallocz(sizeof(*ret))) || + !(ret->channel_layouts = av_malloc(sizeof(*ret->channel_layouts) * + ret_max))) + goto fail; + + /* a[known] intersect b[known] */ + for (i = 0; i < a->nb_channel_layouts; i++) { + if (!KNOWN(a->channel_layouts[i])) + continue; + for (j = 0; j < b->nb_channel_layouts; j++) { + if (a->channel_layouts[i] == b->channel_layouts[j]) { + ret->channel_layouts[ret_nb++] = a->channel_layouts[i]; + a->channel_layouts[i] = b->channel_layouts[j] = 0; + } + } + } + /* 1st round: a[known] intersect b[generic] + 2nd round: a[generic] intersect b[known] */ + for (round = 0; round < 2; round++) { + for (i = 0; i < a->nb_channel_layouts; i++) { + uint64_t fmt = a->channel_layouts[i], bfmt; + if (!fmt || !KNOWN(fmt)) + continue; + bfmt = FF_COUNT2LAYOUT(av_get_channel_layout_nb_channels(fmt)); + for (j = 0; j < b->nb_channel_layouts; j++) + if (b->channel_layouts[j] == bfmt) + ret->channel_layouts[ret_nb++] = a->channel_layouts[i]; + } + /* 1st round: swap to prepare 2nd round; 2nd round: put it back */ + FFSWAP(AVFilterChannelLayouts *, a, b); + } + /* a[generic] intersect b[generic] */ + for (i = 0; i < a->nb_channel_layouts; i++) { + if (KNOWN(a->channel_layouts[i])) + continue; + for (j = 0; j < b->nb_channel_layouts; j++) + if (a->channel_layouts[i] == b->channel_layouts[j]) + ret->channel_layouts[ret_nb++] = a->channel_layouts[i]; } + ret->nb_channel_layouts = ret_nb; + if (!ret->nb_channel_layouts) + goto fail; + MERGE_REF(ret, a, channel_layouts, AVFilterChannelLayouts, fail); + MERGE_REF(ret, b, channel_layouts, AVFilterChannelLayouts, fail); return ret; + fail: if (ret) { av_freep(&ret->refs); @@ -155,26 +223,72 @@ int ff_fmt_is_in(int fmt, const int *fmts) { const int *p; - for (p = fmts; *p != AV_PIX_FMT_NONE; p++) { + for (p = fmts; *p != -1; p++) { if (fmt == *p) return 1; } return 0; } +#define COPY_INT_LIST(list_copy, list, type) { \ + int count = 0; \ + if (list) \ + for (count = 0; list[count] != -1; count++) \ + ; \ + list_copy = av_calloc(count+1, sizeof(type)); \ + if (list_copy) { \ + memcpy(list_copy, list, sizeof(type) * count); \ + list_copy[count] = -1; \ + } \ +} + +int *ff_copy_int_list(const int * const list) +{ + int *ret = NULL; + COPY_INT_LIST(ret, list, int); + return ret; +} + +int64_t *ff_copy_int64_list(const int64_t * const list) +{ + int64_t *ret = NULL; + COPY_INT_LIST(ret, list, int64_t); + return ret; +} + +#define MAKE_FORMAT_LIST(type, field, count_field) \ + type *formats; \ + int count = 0; \ + if (fmts) \ + for (count = 0; fmts[count] != -1; count++) \ + ; \ + formats = av_mallocz(sizeof(*formats)); \ + if (!formats) return NULL; \ + formats->count_field = count; \ + if (count) { \ + formats->field = av_malloc(sizeof(*formats->field)*count); \ + if (!formats->field) { \ + av_free(formats); \ + return NULL; \ + } \ + } + AVFilterFormats *ff_make_format_list(const int *fmts) { - AVFilterFormats *formats; - int count; + MAKE_FORMAT_LIST(AVFilterFormats, formats, format_count); + while (count--) + formats->formats[count] = fmts[count]; - for (count = 0; fmts[count] != -1; count++) - ; + return formats; +} - formats = av_mallocz(sizeof(*formats)); +AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts) +{ + MAKE_FORMAT_LIST(AVFilterChannelLayouts, + channel_layouts, nb_channel_layouts); if (count) - formats->formats = av_malloc(sizeof(*formats->formats) * count); - formats->format_count = count; - memcpy(formats->formats, fmts, sizeof(*formats->formats) * count); + memcpy(formats->channel_layouts, fmts, + sizeof(*formats->channel_layouts) * count); return formats; } @@ -193,17 +307,19 @@ do { \ \ (*f)->list = fmts; \ (*f)->list[(*f)->nb++] = fmt; \ - return 0; \ } while (0) -int ff_add_format(AVFilterFormats **avff, int fmt) +int ff_add_format(AVFilterFormats **avff, int64_t fmt) { ADD_FORMAT(avff, fmt, int, formats, format_count); + return 0; } int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout) { + av_assert1(!(*l && (*l)->all_layouts)); ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts); + return 0; } AVFilterFormats *ff_all_formats(enum AVMediaType type) @@ -223,6 +339,16 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type) return ret; } +const int64_t avfilter_all_channel_layouts[] = { +#include "all_channel_layouts.inc" + -1 +}; + +// AVFilterFormats *avfilter_make_all_channel_layouts(void) +// { +// return avfilter_make_format64_list(avfilter_all_channel_layouts); +// } + AVFilterFormats *ff_planar_sample_fmts(void) { AVFilterFormats *ret = NULL; @@ -244,6 +370,18 @@ AVFilterFormats *ff_all_samplerates(void) AVFilterChannelLayouts *ff_all_channel_layouts(void) { AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret)); + if (!ret) + return NULL; + ret->all_layouts = 1; + return ret; +} + +AVFilterChannelLayouts *ff_all_channel_counts(void) +{ + AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret)); + if (!ret) + return NULL; + ret->all_layouts = ret->all_counts = 1; return ret; } @@ -334,13 +472,13 @@ void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref) int count = 0, i; \ \ for (i = 0; i < ctx->nb_inputs; i++) { \ - if (ctx->inputs[i]) { \ + if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \ ref(fmts, &ctx->inputs[i]->out_fmts); \ count++; \ } \ } \ for (i = 0; i < ctx->nb_outputs; i++) { \ - if (ctx->outputs[i]) { \ + if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \ ref(fmts, &ctx->outputs[i]->in_fmts); \ count++; \ } \ @@ -378,7 +516,8 @@ void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats) ff_formats_ref, formats); } -int ff_default_query_formats(AVFilterContext *ctx) +static int default_query_formats_common(AVFilterContext *ctx, + AVFilterChannelLayouts *(layouts)(void)) { enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type : ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type : @@ -386,9 +525,109 @@ int ff_default_query_formats(AVFilterContext *ctx) ff_set_common_formats(ctx, ff_all_formats(type)); if (type == AVMEDIA_TYPE_AUDIO) { - ff_set_common_channel_layouts(ctx, ff_all_channel_layouts()); + ff_set_common_channel_layouts(ctx, layouts()); ff_set_common_samplerates(ctx, ff_all_samplerates()); } return 0; } + +int ff_default_query_formats(AVFilterContext *ctx) +{ + return default_query_formats_common(ctx, ff_all_channel_layouts); +} + +int ff_query_formats_all(AVFilterContext *ctx) +{ + return default_query_formats_common(ctx, ff_all_channel_counts); +} + +/* internal functions for parsing audio format arguments */ + +int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx) +{ + char *tail; + int pix_fmt = av_get_pix_fmt(arg); + if (pix_fmt == AV_PIX_FMT_NONE) { + pix_fmt = strtol(arg, &tail, 0); + if (*tail || (unsigned)pix_fmt >= AV_PIX_FMT_NB) { + av_log(log_ctx, AV_LOG_ERROR, "Invalid pixel format '%s'\n", arg); + return AVERROR(EINVAL); + } + } + *ret = pix_fmt; + return 0; +} + +int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx) +{ + char *tail; + int sfmt = av_get_sample_fmt(arg); + if (sfmt == AV_SAMPLE_FMT_NONE) { + sfmt = strtol(arg, &tail, 0); + if (*tail || (unsigned)sfmt >= AV_SAMPLE_FMT_NB) { + av_log(log_ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg); + return AVERROR(EINVAL); + } + } + *ret = sfmt; + return 0; +} + +int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx) +{ + AVRational r; + if(av_parse_ratio(&r, arg, INT_MAX, 0, log_ctx) < 0 ||r.num<=0 ||r.den<=0) { + av_log(log_ctx, AV_LOG_ERROR, "Invalid time base '%s'\n", arg); + return AVERROR(EINVAL); + } + *ret = r; + return 0; +} + +int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx) +{ + char *tail; + double srate = av_strtod(arg, &tail); + if (*tail || srate < 1 || (int)srate != srate || srate > INT_MAX) { + av_log(log_ctx, AV_LOG_ERROR, "Invalid sample rate '%s'\n", arg); + return AVERROR(EINVAL); + } + *ret = srate; + return 0; +} + +int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx) +{ + char *tail; + int64_t chlayout = av_get_channel_layout(arg); + if (chlayout == 0) { + chlayout = strtol(arg, &tail, 10); + if (*tail || chlayout == 0) { + av_log(log_ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", arg); + return AVERROR(EINVAL); + } + } + *ret = chlayout; + return 0; +} + +#ifdef TEST + +#undef printf + +int main(void) +{ + const int64_t *cl; + char buf[512]; + + for (cl = avfilter_all_channel_layouts; *cl != -1; cl++) { + av_get_channel_layout_string(buf, sizeof(buf), -1, *cl); + printf("%s\n", buf); + } + + return 0; +} + +#endif + diff --git a/libavfilter/formats.h b/libavfilter/formats.h index 0e1628c..a476e70 100644 --- a/libavfilter/formats.h +++ b/libavfilter/formats.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -69,15 +69,46 @@ struct AVFilterFormats { struct AVFilterFormats ***refs; ///< references to this list }; +/** + * A list of supported channel layouts. + * + * The list works the same as AVFilterFormats, except for the following + * differences: + * - A list with all_layouts = 1 means all channel layouts with a known + * disposition; nb_channel_layouts must then be 0. + * - A list with all_counts = 1 means all channel counts, with a known or + * unknown disposition; nb_channel_layouts must then be 0 and all_layouts 1. + * - The list must not contain a layout with a known disposition and a + * channel count with unknown disposition with the same number of channels + * (e.g. AV_CH_LAYOUT_STEREO and FF_COUNT2LAYOUT(2). + */ typedef struct AVFilterChannelLayouts { uint64_t *channel_layouts; ///< list of channel layouts int nb_channel_layouts; ///< number of channel layouts + char all_layouts; ///< accept any known channel layout + char all_counts; ///< accept any channel layout or count unsigned refcount; ///< number of references to this list struct AVFilterChannelLayouts ***refs; ///< references to this list } AVFilterChannelLayouts; /** + * Encode a channel count as a channel layout. + * FF_COUNT2LAYOUT(c) means any channel layout with c channels, with a known + * or unknown disposition. + * The result is only valid inside AVFilterChannelLayouts and immediately + * related functions. + */ +#define FF_COUNT2LAYOUT(c) (0x8000000000000000ULL | (c)) + +/** + * Decode a channel count encoded as a channel layout. + * Return 0 if the channel layout was a real one. + */ +#define FF_LAYOUT2COUNT(l) (((l) & 0x8000000000000000ULL) ? \ + (int)((l) & 0x7FFFFFFF) : 0) + +/** * Return a channel layouts/samplerates list which contains the intersection of * the layouts/samplerates of a and b. Also, all the references of a, all the * references of b, and a and b themselves will be deallocated. @@ -92,12 +123,21 @@ AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a, /** * Construct an empty AVFilterChannelLayouts/AVFilterFormats struct -- - * representing any channel layout/sample rate. + * representing any channel layout (with known disposition)/sample rate. */ AVFilterChannelLayouts *ff_all_channel_layouts(void); AVFilterFormats *ff_all_samplerates(void); /** + * Construct an AVFilterChannelLayouts coding for any channel layout, with + * known or unknown disposition. + */ +AVFilterChannelLayouts *ff_all_channel_counts(void); + +AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts); + + +/** * A helper for query_formats() which sets all links to the same list of channel * layouts/sample rates. If there are no links hooked to this filter, the list * is freed. @@ -132,6 +172,14 @@ void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref, int ff_default_query_formats(AVFilterContext *ctx); +/** + * Set the formats list to all existing formats. + * This function behaves like ff_default_query_formats(), except it also + * accepts channel layouts with unknown disposition. It should only be used + * with audio filters. + */ +int ff_query_formats_all(AVFilterContext *ctx); + /** * Create a list of supported formats. This is intended for use in @@ -150,10 +198,10 @@ AVFilterFormats *ff_make_format_list(const int *fmts); * @return a non negative value in case of success, or a negative * value corresponding to an AVERROR code in case of error */ -int ff_add_format(AVFilterFormats **avff, int fmt); +int ff_add_format(AVFilterFormats **avff, int64_t fmt); /** - * Return a list of all formats supported by Libav for the given media type. + * Return a list of all formats supported by FFmpeg for the given media type. */ AVFilterFormats *ff_all_formats(enum AVMediaType type); diff --git a/libavfilter/gradfun.h b/libavfilter/gradfun.h index 876579a..801dddd 100644 --- a/libavfilter/gradfun.h +++ b/libavfilter/gradfun.h @@ -2,20 +2,20 @@ * Copyright (c) 2010 Nolan Lum <nol888@gmail.com> * Copyright (c) 2009 Loren Merritt <lorenm@u.washignton.edu> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -26,6 +26,8 @@ /// Holds instance-specific information for gradfun. typedef struct GradFunContext { + const AVClass *class; + double strength; ///< user specified strength, used to define thresh int thresh; ///< threshold for gradient algorithm int radius; ///< blur radius int chroma_w; ///< width of the chroma planes @@ -33,13 +35,13 @@ typedef struct GradFunContext { int chroma_r; ///< blur radius for the chroma planes uint16_t *buf; ///< holds image data for blur algorithm passed into filter. /// DSP functions. - void (*filter_line) (uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers); - void (*blur_line) (uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width); + void (*filter_line) (uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers); + void (*blur_line) (uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width); } GradFunContext; void ff_gradfun_init_x86(GradFunContext *gf); -void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers); -void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width); +void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers); +void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width); #endif /* AVFILTER_GRADFUN_H */ diff --git a/libavfilter/graphdump.c b/libavfilter/graphdump.c new file mode 100644 index 0000000..45f64c0 --- /dev/null +++ b/libavfilter/graphdump.c @@ -0,0 +1,164 @@ +/* + * Filter graphs to bad ASCII-art + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <string.h> + +#include "libavutil/channel_layout.h" +#include "libavutil/bprint.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" +#include "avfiltergraph.h" + +static int print_link_prop(AVBPrint *buf, AVFilterLink *link) +{ + char *format; + char layout[64]; + + if (!buf) + buf = &(AVBPrint){ 0 }; /* dummy buffer */ + switch (link->type) { + case AVMEDIA_TYPE_VIDEO: + format = av_x_if_null(av_get_pix_fmt_name(link->format), "?"); + av_bprintf(buf, "[%dx%d %d:%d %s]", link->w, link->h, + link->sample_aspect_ratio.num, + link->sample_aspect_ratio.den, + format); + break; + + case AVMEDIA_TYPE_AUDIO: + av_get_channel_layout_string(layout, sizeof(layout), + -1, link->channel_layout); + format = av_x_if_null(av_get_sample_fmt_name(link->format), "?"); + av_bprintf(buf, "[%dHz %s:%s]", + (int)link->sample_rate, format, layout); + break; + + default: + av_bprintf(buf, "?"); + break; + } + return buf->len; +} + +static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph) +{ + unsigned i, j, x, e; + + for (i = 0; i < graph->filter_count; i++) { + AVFilterContext *filter = graph->filters[i]; + unsigned max_src_name = 0, max_dst_name = 0; + unsigned max_in_name = 0, max_out_name = 0; + unsigned max_in_fmt = 0, max_out_fmt = 0; + unsigned width, height, in_indent; + unsigned lname = strlen(filter->name); + unsigned ltype = strlen(filter->filter->name); + + for (j = 0; j < filter->input_count; j++) { + AVFilterLink *l = filter->inputs[j]; + unsigned ln = strlen(l->src->name) + 1 + strlen(l->srcpad->name); + max_src_name = FFMAX(max_src_name, ln); + max_in_name = FFMAX(max_in_name, strlen(l->dstpad->name)); + max_in_fmt = FFMAX(max_in_fmt, print_link_prop(NULL, l)); + } + for (j = 0; j < filter->output_count; j++) { + AVFilterLink *l = filter->outputs[j]; + unsigned ln = strlen(l->dst->name) + 1 + strlen(l->dstpad->name); + max_dst_name = FFMAX(max_dst_name, ln); + max_out_name = FFMAX(max_out_name, strlen(l->srcpad->name)); + max_out_fmt = FFMAX(max_out_fmt, print_link_prop(NULL, l)); + } + in_indent = max_src_name + max_in_name + max_in_fmt; + in_indent += in_indent ? 4 : 0; + width = FFMAX(lname + 2, ltype + 4); + height = FFMAX3(2, filter->input_count, filter->output_count); + av_bprint_chars(buf, ' ', in_indent); + av_bprintf(buf, "+"); + av_bprint_chars(buf, '-', width); + av_bprintf(buf, "+\n"); + for (j = 0; j < height; j++) { + unsigned in_no = j - (height - filter->input_count ) / 2; + unsigned out_no = j - (height - filter->output_count) / 2; + + /* Input link */ + if (in_no < filter->input_count) { + AVFilterLink *l = filter->inputs[in_no]; + e = buf->len + max_src_name + 2; + av_bprintf(buf, "%s:%s", l->src->name, l->srcpad->name); + av_bprint_chars(buf, '-', e - buf->len); + e = buf->len + max_in_fmt + 2 + + max_in_name - strlen(l->dstpad->name); + print_link_prop(buf, l); + av_bprint_chars(buf, '-', e - buf->len); + av_bprintf(buf, "%s", l->dstpad->name); + } else { + av_bprint_chars(buf, ' ', in_indent); + } + + /* Filter */ + av_bprintf(buf, "|"); + if (j == (height - 2) / 2) { + x = (width - lname) / 2; + av_bprintf(buf, "%*s%-*s", x, "", width - x, filter->name); + } else if (j == (height - 2) / 2 + 1) { + x = (width - ltype - 2) / 2; + av_bprintf(buf, "%*s(%s)%*s", x, "", filter->filter->name, + width - ltype - 2 - x, ""); + } else { + av_bprint_chars(buf, ' ', width); + } + av_bprintf(buf, "|"); + + /* Output link */ + if (out_no < filter->output_count) { + AVFilterLink *l = filter->outputs[out_no]; + unsigned ln = strlen(l->dst->name) + 1 + + strlen(l->dstpad->name); + e = buf->len + max_out_name + 2; + av_bprintf(buf, "%s", l->srcpad->name); + av_bprint_chars(buf, '-', e - buf->len); + e = buf->len + max_out_fmt + 2 + + max_dst_name - ln; + print_link_prop(buf, l); + av_bprint_chars(buf, '-', e - buf->len); + av_bprintf(buf, "%s:%s", l->dst->name, l->dstpad->name); + } + av_bprintf(buf, "\n"); + } + av_bprint_chars(buf, ' ', in_indent); + av_bprintf(buf, "+"); + av_bprint_chars(buf, '-', width); + av_bprintf(buf, "+\n"); + av_bprintf(buf, "\n"); + } +} + +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options) +{ + AVBPrint buf; + char *dump; + + av_bprint_init(&buf, 0, 0); + avfilter_graph_dump_to_buf(&buf, graph); + av_bprint_init(&buf, buf.len + 1, buf.len + 1); + avfilter_graph_dump_to_buf(&buf, graph); + av_bprint_finalize(&buf, &dump); + return dump; +} diff --git a/libavfilter/graphparser.c b/libavfilter/graphparser.c index 04339c8..0ce823a 100644 --- a/libavfilter/graphparser.c +++ b/libavfilter/graphparser.c @@ -3,20 +3,20 @@ * Copyright (c) 2008 Vitor Sessak * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -101,7 +101,7 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind char tmp_args[256]; int ret; - snprintf(inst_name, sizeof(inst_name), "Parsed filter %d %s", index, filt_name); + snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index); filt = avfilter_get_by_name(filt_name); @@ -123,7 +123,8 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind return ret; } - if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags")) { + if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") + && ctx->scale_sws_opts) { snprintf(tmp_args, sizeof(tmp_args), "%s:%s", args, ctx->scale_sws_opts); args = tmp_args; @@ -385,7 +386,7 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs) { - int index = 0, ret; + int index = 0, ret = 0; char chr = 0; AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL; @@ -400,18 +401,17 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, filters += strspn(filters, WHITESPACES); if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, graph)) < 0) - goto fail; - + goto end; if ((ret = parse_filter(&filter, &filters, graph, index, graph)) < 0) - goto fail; + goto end; if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, graph)) < 0) - goto fail; + goto end; if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs, graph)) < 0) - goto fail; + goto end; filters += strspn(filters, WHITESPACES); chr = *filters++; @@ -426,16 +426,17 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, "Unable to parse graph description substring: \"%s\"\n", filters - 1); ret = AVERROR(EINVAL); - goto fail; + goto end; } append_inout(&open_outputs, &curr_inputs); + *inputs = open_inputs; *outputs = open_outputs; return 0; - fail: + fail:end: for (; graph->filter_count > 0; graph->filter_count--) avfilter_free(graph->filters[graph->filter_count - 1]); av_freep(&graph->filters); @@ -450,10 +451,13 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, } int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, - AVFilterInOut *open_inputs, - AVFilterInOut *open_outputs, void *log_ctx) + AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr, + void *log_ctx) { +#if 0 int ret; + AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL; + AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL; AVFilterInOut *cur, *match, *inputs = NULL, *outputs = NULL; if ((ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)) < 0) @@ -507,7 +511,92 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, } avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); - avfilter_inout_free(&open_inputs); - avfilter_inout_free(&open_outputs); + /* clear open_in/outputs only if not passed as parameters */ + if (open_inputs_ptr) *open_inputs_ptr = open_inputs; + else avfilter_inout_free(&open_inputs); + if (open_outputs_ptr) *open_outputs_ptr = open_outputs; + else avfilter_inout_free(&open_outputs); return ret; } +#else + int index = 0, ret = 0; + char chr = 0; + + AVFilterInOut *curr_inputs = NULL; + AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL; + AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL; + + if ((ret = parse_sws_flags(&filters, graph)) < 0) + goto end; + + do { + AVFilterContext *filter; + const char *filterchain = filters; + filters += strspn(filters, WHITESPACES); + + if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0) + goto end; + + if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0) + goto end; + + if (filter->input_count == 1 && !curr_inputs && !index) { + /* First input pad, assume it is "[in]" if not specified */ + const char *tmp = "[in]"; + if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0) + goto end; + } + + if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0) + goto end; + + if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs, + log_ctx)) < 0) + goto end; + + filters += strspn(filters, WHITESPACES); + chr = *filters++; + + if (chr == ';' && curr_inputs) { + av_log(log_ctx, AV_LOG_ERROR, + "Invalid filterchain containing an unlabelled output pad: \"%s\"\n", + filterchain); + ret = AVERROR(EINVAL); + goto end; + } + index++; + } while (chr == ',' || chr == ';'); + + if (chr) { + av_log(log_ctx, AV_LOG_ERROR, + "Unable to parse graph description substring: \"%s\"\n", + filters - 1); + ret = AVERROR(EINVAL); + goto end; + } + + if (curr_inputs) { + /* Last output pad, assume it is "[out]" if not specified */ + const char *tmp = "[out]"; + if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs, + log_ctx)) < 0) + goto end; + } + +end: + /* clear open_in/outputs only if not passed as parameters */ + if (open_inputs_ptr) *open_inputs_ptr = open_inputs; + else avfilter_inout_free(&open_inputs); + if (open_outputs_ptr) *open_outputs_ptr = open_outputs; + else avfilter_inout_free(&open_outputs); + avfilter_inout_free(&curr_inputs); + + if (ret < 0) { + for (; graph->filter_count > 0; graph->filter_count--) + avfilter_free(graph->filters[graph->filter_count - 1]); + av_freep(&graph->filters); + } + return ret; +} + +#endif diff --git a/libavfilter/internal.h b/libavfilter/internal.h index 216a355..d03de56 100644 --- a/libavfilter/internal.h +++ b/libavfilter/internal.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,6 +25,30 @@ */ #include "avfilter.h" +#include "avfiltergraph.h" +#include "formats.h" +#include "video.h" + +#define POOL_SIZE 32 +typedef struct AVFilterPool { + AVFilterBufferRef *pic[POOL_SIZE]; + int count; + int refcount; + int draining; +} AVFilterPool; + +typedef struct AVFilterCommand { + double time; ///< time expressed in seconds + char *command; ///< command + char *arg; ///< optional argument for the command + int flags; + struct AVFilterCommand *next; +} AVFilterCommand; + +/** + * Update the position of a link in the age heap. + */ +void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link); #if !FF_API_AVFILTERPAD_PUBLIC /** @@ -65,7 +89,7 @@ struct AVFilterPad { /** * Callback function to get a video buffer. If NULL, the filter system will - * use avfilter_default_get_video_buffer(). + * use ff_default_get_video_buffer(). * * Input video pads only. */ @@ -73,7 +97,7 @@ struct AVFilterPad { /** * Callback function to get an audio buffer. If NULL, the filter system will - * use avfilter_default_get_audio_buffer(). + * use ff_default_get_audio_buffer(). * * Input audio pads only. */ @@ -144,9 +168,93 @@ void ff_avfilter_default_free_buffer(AVFilterBuffer *buf); /** Tell is a format is contained in the provided list terminated by -1. */ int ff_fmt_is_in(int fmt, const int *fmts); -#define FF_DPRINTF_START(ctx, func) av_dlog(NULL, "%-16s: ", #func) +/** + * Return a copy of a list of integers terminated by -1, or NULL in + * case of copy failure. + */ +int *ff_copy_int_list(const int * const list); + +/** + * Return a copy of a list of 64-bit integers, or NULL in case of + * copy failure. + */ +int64_t *ff_copy_int64_list(const int64_t * const list); + +/* Functions to parse audio format arguments */ + +/** + * Parse a pixel format. + * + * @param ret pixel format pointer to where the value should be written + * @param arg string to parse + * @param log_ctx log context + * @return 0 in case of success, a negative AVERROR code on error + */ +int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx); + +/** + * Parse a sample rate. + * + * @param ret unsigned integer pointer to where the value should be written + * @param arg string to parse + * @param log_ctx log context + * @return 0 in case of success, a negative AVERROR code on error + */ +int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx); -void ff_dlog_link(void *ctx, AVFilterLink *link, int end); +/** + * Parse a time base. + * + * @param ret unsigned AVRational pointer to where the value should be written + * @param arg string to parse + * @param log_ctx log context + * @return 0 in case of success, a negative AVERROR code on error + */ +int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx); + +/** + * Parse a sample format name or a corresponding integer representation. + * + * @param ret integer pointer to where the value should be written + * @param arg string to parse + * @param log_ctx log context + * @return 0 in case of success, a negative AVERROR code on error + */ +int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx); + +/** + * Parse a channel layout or a corresponding integer representation. + * + * @param ret 64bit integer pointer to where the value should be written. + * @param arg string to parse + * @param log_ctx log context + * @return 0 in case of success, a negative AVERROR code on error + */ +int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx); + +void ff_update_link_current_pts(AVFilterLink *link, int64_t pts); + +void ff_free_pool(AVFilterPool *pool); + +void ff_command_queue_pop(AVFilterContext *filter); + +/* misc trace functions */ + +/* #define FF_AVFILTER_TRACE */ + +#ifdef FF_AVFILTER_TRACE +# define ff_tlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) +#else +# define ff_tlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) +#endif + +#define FF_TPRINTF_START(ctx, func) ff_tlog(NULL, "%-16s: ", #func) + +char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms); + +void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end); + +void ff_tlog_link(void *ctx, AVFilterLink *link, int end); /** * Insert a new pad. @@ -204,6 +312,29 @@ int ff_poll_frame(AVFilterLink *link); */ int ff_request_frame(AVFilterLink *link); +#define AVFILTER_DEFINE_CLASS(fname) \ + static const AVClass fname##_class = { \ + .class_name = #fname, \ + .item_name = av_default_item_name, \ + .option = fname##_options, \ + .version = LIBAVUTIL_VERSION_INT, \ + .category = AV_CLASS_CATEGORY_FILTER, \ + } + +AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink, + AVFilterBufferRef *ref); + +/** + * Find the index of a link. + * + * I.e. find i such that link == ctx->(in|out)puts[i] + */ +#define FF_INLINK_IDX(link) ((int)((link)->dstpad - (link)->dst->input_pads)) +#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads)) + +int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf); +int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf, + int nb_samples); /** * Send a frame of data to the next filter. * diff --git a/libavfilter/lavfutils.c b/libavfilter/lavfutils.c new file mode 100644 index 0000000..8b6b114 --- /dev/null +++ b/libavfilter/lavfutils.c @@ -0,0 +1,95 @@ +/* + * Copyright 2012 Stefano Sabatini <stefasab gmail com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/imgutils.h" +#include "lavfutils.h" + +int ff_load_image(uint8_t *data[4], int linesize[4], + int *w, int *h, enum AVPixelFormat *pix_fmt, + const char *filename, void *log_ctx) +{ + AVInputFormat *iformat = NULL; + AVFormatContext *format_ctx = NULL; + AVCodec *codec; + AVCodecContext *codec_ctx; + AVFrame *frame; + int frame_decoded, ret = 0; + AVPacket pkt; + + av_register_all(); + + iformat = av_find_input_format("image2"); + if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) { + av_log(log_ctx, AV_LOG_ERROR, + "Failed to open input file '%s'\n", filename); + return ret; + } + + codec_ctx = format_ctx->streams[0]->codec; + codec = avcodec_find_decoder(codec_ctx->codec_id); + if (!codec) { + av_log(log_ctx, AV_LOG_ERROR, "Failed to find codec\n"); + ret = AVERROR(EINVAL); + goto end; + } + + if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) { + av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n"); + goto end; + } + + if (!(frame = avcodec_alloc_frame()) ) { + av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n"); + ret = AVERROR(ENOMEM); + goto end; + } + + ret = av_read_frame(format_ctx, &pkt); + if (ret < 0) { + av_log(log_ctx, AV_LOG_ERROR, "Failed to read frame from file\n"); + goto end; + } + + ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt); + if (ret < 0 || !frame_decoded) { + av_log(log_ctx, AV_LOG_ERROR, "Failed to decode image from file\n"); + goto end; + } + ret = 0; + + *w = frame->width; + *h = frame->height; + *pix_fmt = frame->format; + + if ((ret = av_image_alloc(data, linesize, *w, *h, *pix_fmt, 16)) < 0) + goto end; + ret = 0; + + av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h); + +end: + avcodec_close(codec_ctx); + avformat_close_input(&format_ctx); + av_freep(&frame); + + if (ret < 0) + av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename); + return ret; +} diff --git a/libavfilter/lavfutils.h b/libavfilter/lavfutils.h new file mode 100644 index 0000000..a310e83 --- /dev/null +++ b/libavfilter/lavfutils.h @@ -0,0 +1,43 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Miscellaneous utilities which make use of the libavformat library + */ + +#ifndef AVFILTER_LAVFUTILS_H +#define AVFILTER_LAVFUTILS_H + +#include "libavformat/avformat.h" + +/** + * Load image from filename and put the resulting image in data. + * + * @param w pointer to the width of the loaded image + * @param h pointer to the height of the loaded image + * @param pix_fmt pointer to the pixel format of the loaded image + * @param filename the name of the image file to load + * @param log_ctx log context + * @return 0 in case of success, a negative error code otherwise. + */ +int ff_load_image(uint8_t *data[4], int linesize[4], + int *w, int *h, enum AVPixelFormat *pix_fmt, + const char *filename, void *log_ctx); + +#endif /* AVFILTER_LAVFUTILS_H */ diff --git a/libavfilter/libavfilter.v b/libavfilter/libavfilter.v index 83e8887..a3d33a3 100644 --- a/libavfilter/libavfilter.v +++ b/libavfilter/libavfilter.v @@ -1,4 +1,5 @@ LIBAVFILTER_$MAJOR { global: avfilter_*; av_*; + ff_default_query_formats; local: *; }; diff --git a/libavfilter/libmpcodecs/av_helpers.h b/libavfilter/libmpcodecs/av_helpers.h new file mode 100644 index 0000000..90b67d5 --- /dev/null +++ b/libavfilter/libmpcodecs/av_helpers.h @@ -0,0 +1,27 @@ +/* + * Generic libav* helpers + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_AV_HELPERS_H +#define MPLAYER_AV_HELPERS_H + +void ff_init_avcodec(void); +void ff_init_avformat(void); + +#endif /* MPLAYER_AV_HELPERS_H */ diff --git a/libavfilter/libmpcodecs/cpudetect.h b/libavfilter/libmpcodecs/cpudetect.h new file mode 100644 index 0000000..710f6e6 --- /dev/null +++ b/libavfilter/libmpcodecs/cpudetect.h @@ -0,0 +1,60 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_CPUDETECT_H +#define MPLAYER_CPUDETECT_H + +#define CPUTYPE_I386 3 +#define CPUTYPE_I486 4 +#define CPUTYPE_I586 5 +#define CPUTYPE_I686 6 + +#include "libavutil/x86_cpu.h" + +typedef struct cpucaps_s { + int cpuType; + int cpuModel; + int cpuStepping; + int hasMMX; + int hasMMX2; + int has3DNow; + int has3DNowExt; + int hasSSE; + int hasSSE2; + int hasSSE3; + int hasSSSE3; + int hasSSE4; + int hasSSE42; + int hasSSE4a; + int hasAVX; + int isX86; + unsigned cl_size; /* size of cache line */ + int hasAltiVec; + int hasTSC; +} CpuCaps; + +extern CpuCaps ff_gCpuCaps; + +void ff_do_cpuid(unsigned int ax, unsigned int *p); + +void ff_GetCpuCaps(CpuCaps *caps); + +/* returned value is malloc()'ed so free() it after use */ +char *ff_GetCpuFriendlyName(unsigned int regs[], unsigned int regs2[]); + +#endif /* MPLAYER_CPUDETECT_H */ diff --git a/libavfilter/libmpcodecs/help_mp.h b/libavfilter/libmpcodecs/help_mp.h new file mode 100644 index 0000000..6ceb630 --- /dev/null +++ b/libavfilter/libmpcodecs/help_mp.h @@ -0,0 +1,2126 @@ +/* WARNING! This is a generated file, do NOT edit. + * See the help/ subdirectory for the editable files. */ + +#ifndef MPLAYER_HELP_MP_H +#define MPLAYER_HELP_MP_H + +// $Revision: 32397 $ +// MASTER FILE. Use this file as base for translations. +// Translated files should be sent to the mplayer-DOCS mailing list or +// to the help messages maintainer, see DOCS/tech/MAINTAINERS. +// The header of the translated file should contain credits and contact +// information. Before major releases we will notify all translators to update +// their files. Please do not simply translate and forget this, outdated +// translations quickly become worthless. To help us spot outdated files put a +// note like "sync'ed with help_mp-en.h XXX" in the header of the translation. +// Do NOT translate the above lines, just follow the instructions. + + +// ========================= MPlayer help =========================== + +static const char help_text[]= +"Usage: mplayer [options] [url|path/]filename\n" +"\n" +"Basic options: (complete list in the man page)\n" +" -vo <drv> select video output driver ('-vo help' for a list)\n" +" -ao <drv> select audio output driver ('-ao help' for a list)\n" +#ifdef CONFIG_VCD +" vcd://<trackno> play (S)VCD (Super Video CD) track (raw device, no mount)\n" +#endif +#ifdef CONFIG_DVDREAD +" dvd://<titleno> play DVD title from device instead of plain file\n" +#endif +" -alang/-slang select DVD audio/subtitle language (by 2-char country code)\n" +" -ss <position> seek to given (seconds or hh:mm:ss) position\n" +" -nosound do not play sound\n" +" -fs fullscreen playback (or -vm, -zoom, details in the man page)\n" +" -x <x> -y <y> set display resolution (for use with -vm or -zoom)\n" +" -sub <file> specify subtitle file to use (also see -subfps, -subdelay)\n" +" -playlist <file> specify playlist file\n" +" -vid x -aid y select video (x) and audio (y) stream to play\n" +" -fps x -srate y change video (x fps) and audio (y Hz) rate\n" +" -pp <quality> enable postprocessing filter (details in the man page)\n" +" -framedrop enable frame dropping (for slow machines)\n" +"\n" +"Basic keys: (complete list in the man page, also check input.conf)\n" +" <- or -> seek backward/forward 10 seconds\n" +" down or up seek backward/forward 1 minute\n" +" pgdown or pgup seek backward/forward 10 minutes\n" +" < or > step backward/forward in playlist\n" +" p or SPACE pause movie (press any key to continue)\n" +" q or ESC stop playing and quit program\n" +" + or - adjust audio delay by +/- 0.1 second\n" +" o cycle OSD mode: none / seekbar / seekbar + timer\n" +" * or / increase or decrease PCM volume\n" +" x or z adjust subtitle delay by +/- 0.1 second\n" +" r or t adjust subtitle position up/down, also see -vf expand\n" +"\n" +" * * * SEE THE MAN PAGE FOR DETAILS, FURTHER (ADVANCED) OPTIONS AND KEYS * * *\n" +"\n"; + +// ========================= MPlayer messages =========================== + +// mplayer.c +#define MSGTR_Exiting "\nExiting...\n" +#define MSGTR_ExitingHow "\nExiting... (%s)\n" +#define MSGTR_Exit_quit "Quit" +#define MSGTR_Exit_eof "End of file" +#define MSGTR_Exit_error "Fatal error" +#define MSGTR_IntBySignal "\nMPlayer interrupted by signal %d in module: %s\n" +#define MSGTR_NoHomeDir "Cannot find HOME directory.\n" +#define MSGTR_GetpathProblem "get_path(\"config\") problem\n" +#define MSGTR_CreatingCfgFile "Creating config file: %s\n" +#define MSGTR_CantLoadFont "Cannot load bitmap font '%s'.\n" +#define MSGTR_CantLoadSub "Cannot load subtitles '%s'.\n" +#define MSGTR_DumpSelectedStreamMissing "dump: FATAL: Selected stream missing!\n" +#define MSGTR_CantOpenDumpfile "Cannot open dump file.\n" +#define MSGTR_CoreDumped "Core dumped ;)\n" +#define MSGTR_DumpBytesWrittenPercent "dump: %"PRIu64" bytes written (~%.1f%%)\r" +#define MSGTR_DumpBytesWritten "dump: %"PRIu64" bytes written\r" +#define MSGTR_DumpBytesWrittenTo "dump: %"PRIu64" bytes written to '%s'.\n" +#define MSGTR_FPSnotspecified "FPS not specified in the header or invalid, use the -fps option.\n" +#define MSGTR_TryForceAudioFmtStr "Trying to force audio codec driver family %s...\n" +#define MSGTR_CantFindAudioCodec "Cannot find codec for audio format 0x%X.\n" +#define MSGTR_TryForceVideoFmtStr "Trying to force video codec driver family %s...\n" +#define MSGTR_CantFindVideoCodec "Cannot find codec matching selected -vo and video format 0x%X.\n" +#define MSGTR_CannotInitVO "FATAL: Cannot initialize video driver.\n" +#define MSGTR_CannotInitAO "Could not open/initialize audio device -> no sound.\n" +#define MSGTR_StartPlaying "Starting playback...\n" + +#define MSGTR_SystemTooSlow "\n\n"\ +" ************************************************\n"\ +" **** Your system is too SLOW to play this! ****\n"\ +" ************************************************\n\n"\ +"Possible reasons, problems, workarounds:\n"\ +"- Most common: broken/buggy _audio_ driver\n"\ +" - Try -ao sdl or use the OSS emulation of ALSA.\n"\ +" - Experiment with different values for -autosync, 30 is a good start.\n"\ +"- Slow video output\n"\ +" - Try a different -vo driver (-vo help for a list) or try -framedrop!\n"\ +"- Slow CPU\n"\ +" - Don't try to play a big DVD/DivX on a slow CPU! Try some of the lavdopts,\n"\ +" e.g. -vfm ffmpeg -lavdopts lowres=1:fast:skiploopfilter=all.\n"\ +"- Broken file\n"\ +" - Try various combinations of -nobps -ni -forceidx -mc 0.\n"\ +"- Slow media (NFS/SMB mounts, DVD, VCD etc)\n"\ +" - Try -cache 8192.\n"\ +"- Are you using -cache to play a non-interleaved AVI file?\n"\ +" - Try -nocache.\n"\ +"Read DOCS/HTML/en/video.html for tuning/speedup tips.\n"\ +"If none of this helps you, read DOCS/HTML/en/bugreports.html.\n\n" + +#define MSGTR_NoGui "MPlayer was compiled WITHOUT GUI support.\n" +#define MSGTR_GuiNeedsX "MPlayer GUI requires X11.\n" +#define MSGTR_Playing "\nPlaying %s.\n" +#define MSGTR_NoSound "Audio: no sound\n" +#define MSGTR_FPSforced "FPS forced to be %5.3f (ftime: %5.3f).\n" +#define MSGTR_AvailableVideoOutputDrivers "Available video output drivers:\n" +#define MSGTR_AvailableAudioOutputDrivers "Available audio output drivers:\n" +#define MSGTR_AvailableAudioCodecs "Available audio codecs:\n" +#define MSGTR_AvailableVideoCodecs "Available video codecs:\n" +#define MSGTR_AvailableAudioFm "Available (compiled-in) audio codec families/drivers:\n" +#define MSGTR_AvailableVideoFm "Available (compiled-in) video codec families/drivers:\n" +#define MSGTR_AvailableFsType "Available fullscreen layer change modes:\n" +#define MSGTR_CannotReadVideoProperties "Video: Cannot read properties.\n" +#define MSGTR_NoStreamFound "No stream found.\n" +#define MSGTR_ErrorInitializingVODevice "Error opening/initializing the selected video_out (-vo) device.\n" +#define MSGTR_ForcedVideoCodec "Forced video codec: %s\n" +#define MSGTR_ForcedAudioCodec "Forced audio codec: %s\n" +#define MSGTR_Video_NoVideo "Video: no video\n" +#define MSGTR_NotInitializeVOPorVO "\nFATAL: Could not initialize video filters (-vf) or video output (-vo).\n" +#define MSGTR_Paused " ===== PAUSE =====" // no more than 23 characters (status line for audio files) +#define MSGTR_PlaylistLoadUnable "\nUnable to load playlist %s.\n" +#define MSGTR_Exit_SIGILL_RTCpuSel \ +"- MPlayer crashed by an 'Illegal Instruction'.\n"\ +" It may be a bug in our new runtime CPU-detection code...\n"\ +" Please read DOCS/HTML/en/bugreports.html.\n" +#define MSGTR_Exit_SIGILL \ +"- MPlayer crashed by an 'Illegal Instruction'.\n"\ +" It usually happens when you run it on a CPU different than the one it was\n"\ +" compiled/optimized for.\n"\ +" Verify this!\n" +#define MSGTR_Exit_SIGSEGV_SIGFPE \ +"- MPlayer crashed by bad usage of CPU/FPU/RAM.\n"\ +" Recompile MPlayer with --enable-debug and make a 'gdb' backtrace and\n"\ +" disassembly. Details in DOCS/HTML/en/bugreports_what.html#bugreports_crash.\n" +#define MSGTR_Exit_SIGCRASH \ +"- MPlayer crashed. This shouldn't happen.\n"\ +" It can be a bug in the MPlayer code _or_ in your drivers _or_ in your\n"\ +" gcc version. If you think it's MPlayer's fault, please read\n"\ +" DOCS/HTML/en/bugreports.html and follow the instructions there. We can't and\n"\ +" won't help unless you provide this information when reporting a possible bug.\n" +#define MSGTR_LoadingConfig "Loading config '%s'\n" +#define MSGTR_LoadingProtocolProfile "Loading protocol-related profile '%s'\n" +#define MSGTR_LoadingExtensionProfile "Loading extension-related profile '%s'\n" +#define MSGTR_AddedSubtitleFile "SUB: Added subtitle file (%d): %s\n" +#define MSGTR_RemovedSubtitleFile "SUB: Removed subtitle file (%d): %s\n" +#define MSGTR_ErrorOpeningOutputFile "Error opening file [%s] for writing!\n" +#define MSGTR_RTCDeviceNotOpenable "Failed to open %s: %s (it should be readable by the user.)\n" +#define MSGTR_LinuxRTCInitErrorIrqpSet "Linux RTC init error in ioctl (rtc_irqp_set %lu): %s\n" +#define MSGTR_IncreaseRTCMaxUserFreq "Try adding \"echo %lu > /proc/sys/dev/rtc/max-user-freq\" to your system startup scripts.\n" +#define MSGTR_LinuxRTCInitErrorPieOn "Linux RTC init error in ioctl (rtc_pie_on): %s\n" +#define MSGTR_UsingTimingType "Using %s timing.\n" +#define MSGTR_Getch2InitializedTwice "WARNING: getch2_init called twice!\n" +#define MSGTR_DumpstreamFdUnavailable "Cannot dump this stream - no file descriptor available.\n" +#define MSGTR_CantOpenLibmenuFilterWithThisRootMenu "Can't open libmenu video filter with root menu %s.\n" +#define MSGTR_AudioFilterChainPreinitError "Error at audio filter chain pre-init!\n" +#define MSGTR_LinuxRTCReadError "Linux RTC read error: %s\n" +#define MSGTR_SoftsleepUnderflow "Warning! Softsleep underflow!\n" +#define MSGTR_DvdnavNullEvent "DVDNAV Event NULL?!\n" +#define MSGTR_DvdnavHighlightEventBroken "DVDNAV Event: Highlight event broken\n" +#define MSGTR_DvdnavEvent "DVDNAV Event: %s\n" +#define MSGTR_DvdnavHighlightHide "DVDNAV Event: Highlight Hide\n" +#define MSGTR_DvdnavStillFrame "######################################## DVDNAV Event: Still Frame: %d sec(s)\n" +#define MSGTR_DvdnavNavStop "DVDNAV Event: Nav Stop\n" +#define MSGTR_DvdnavNavNOP "DVDNAV Event: Nav NOP\n" +#define MSGTR_DvdnavNavSpuStreamChangeVerbose "DVDNAV Event: Nav SPU Stream Change: phys: %d/%d/%d logical: %d\n" +#define MSGTR_DvdnavNavSpuStreamChange "DVDNAV Event: Nav SPU Stream Change: phys: %d logical: %d\n" +#define MSGTR_DvdnavNavAudioStreamChange "DVDNAV Event: Nav Audio Stream Change: phys: %d logical: %d\n" +#define MSGTR_DvdnavNavVTSChange "DVDNAV Event: Nav VTS Change\n" +#define MSGTR_DvdnavNavCellChange "DVDNAV Event: Nav Cell Change\n" +#define MSGTR_DvdnavNavSpuClutChange "DVDNAV Event: Nav SPU CLUT Change\n" +#define MSGTR_DvdnavNavSeekDone "DVDNAV Event: Nav Seek Done\n" +#define MSGTR_MenuCall "Menu call\n" +#define MSGTR_MasterQuit "Option -udp-slave: exiting because master exited\n" +#define MSGTR_InvalidIP "Option -udp-ip: invalid IP address\n" +#define MSGTR_Forking "Forking...\n" +#define MSGTR_Forked "Forked...\n" +#define MSGTR_CouldntStartGdb "Couldn't start gdb\n" +#define MSGTR_CouldntFork "Couldn't fork\n" +#define MSGTR_FilenameTooLong "Filename is too long, can not load file or directory specific config files\n" +#define MSGTR_AudioDeviceStuck "Audio device got stuck!\n" +#define MSGTR_AudioOutputTruncated "Audio output truncated at end.\n" +#define MSGTR_ASSCannotAddVideoFilter "ASS: cannot add video filter\n" +#define MSGTR_PtsAfterFiltersMissing "pts after filters MISSING\n" +#define MSGTR_CommandLine "CommandLine:" +#define MSGTR_MenuInitFailed "Menu init failed.\n" + +// --- edit decision lists +#define MSGTR_EdlOutOfMem "Can't allocate enough memory to hold EDL data.\n" +#define MSGTR_EdlOutOfMemFile "Can't allocate enough memory to hold EDL file name [%s].\n" +#define MSGTR_EdlRecordsNo "Read %d EDL actions.\n" +#define MSGTR_EdlQueueEmpty "There are no EDL actions to take care of.\n" +#define MSGTR_EdlCantOpenForWrite "Can't open EDL file [%s] for writing.\n" +#define MSGTR_EdlCantOpenForRead "Can't open EDL file [%s] for reading.\n" +#define MSGTR_EdlNOsh_video "Cannot use EDL without video, disabling.\n" +#define MSGTR_EdlNOValidLine "Invalid EDL line: %s\n" +#define MSGTR_EdlBadlyFormattedLine "Badly formatted EDL line [%d], discarding.\n" +#define MSGTR_EdlBadLineOverlap "Last stop position was [%f]; next start is [%f].\n"\ +"Entries must be in chronological order, cannot overlap. Discarding.\n" +#define MSGTR_EdlBadLineBadStop "Stop time has to be after start time.\n" +#define MSGTR_EdloutBadStop "EDL skip canceled, last start > stop\n" +#define MSGTR_EdloutStartSkip "EDL skip start, press 'i' again to end block.\n" +#define MSGTR_EdloutEndSkip "EDL skip end, line written.\n" + +// mplayer.c OSD +#define MSGTR_OSDenabled "enabled" +#define MSGTR_OSDdisabled "disabled" +#define MSGTR_OSDAudio "Audio: %s" +#define MSGTR_OSDVideo "Video: %s" +#define MSGTR_OSDChannel "Channel: %s" +#define MSGTR_OSDSubDelay "Sub delay: %d ms" +#define MSGTR_OSDSpeed "Speed: x %6.2f" +#define MSGTR_OSDosd "OSD: %s" +#define MSGTR_OSDChapter "Chapter: (%d) %s" +#define MSGTR_OSDAngle "Angle: %d/%d" +#define MSGTR_OSDDeinterlace "Deinterlace: %s" +#define MSGTR_OSDCapturing "Capturing: %s" +#define MSGTR_OSDCapturingFailure "Capturing failed" + +// property values +#define MSGTR_Enabled "enabled" +#define MSGTR_EnabledEdl "enabled (EDL)" +#define MSGTR_Disabled "disabled" +#define MSGTR_HardFrameDrop "hard" +#define MSGTR_Unknown "unknown" +#define MSGTR_Bottom "bottom" +#define MSGTR_Center "center" +#define MSGTR_Top "top" +#define MSGTR_SubSourceFile "file" +#define MSGTR_SubSourceVobsub "vobsub" +#define MSGTR_SubSourceDemux "embedded" + +// OSD bar names +#define MSGTR_Volume "Volume" +#define MSGTR_Panscan "Panscan" +#define MSGTR_Gamma "Gamma" +#define MSGTR_Brightness "Brightness" +#define MSGTR_Contrast "Contrast" +#define MSGTR_Saturation "Saturation" +#define MSGTR_Hue "Hue" +#define MSGTR_Balance "Balance" + +// property state +#define MSGTR_LoopStatus "Loop: %s" +#define MSGTR_MuteStatus "Mute: %s" +#define MSGTR_AVDelayStatus "A-V delay: %s" +#define MSGTR_OnTopStatus "Stay on top: %s" +#define MSGTR_RootwinStatus "Rootwin: %s" +#define MSGTR_BorderStatus "Border: %s" +#define MSGTR_FramedroppingStatus "Framedropping: %s" +#define MSGTR_VSyncStatus "VSync: %s" +#define MSGTR_SubSelectStatus "Subtitles: %s" +#define MSGTR_SubSourceStatus "Sub source: %s" +#define MSGTR_SubPosStatus "Sub position: %s/100" +#define MSGTR_SubAlignStatus "Sub alignment: %s" +#define MSGTR_SubDelayStatus "Sub delay: %s" +#define MSGTR_SubScale "Sub Scale: %s" +#define MSGTR_SubVisibleStatus "Subtitles: %s" +#define MSGTR_SubForcedOnlyStatus "Forced sub only: %s" + +// mencoder.c +#define MSGTR_UsingPass3ControlFile "Using pass3 control file: %s\n" +#define MSGTR_MissingFilename "\nFilename missing.\n\n" +#define MSGTR_CannotOpenFile_Device "Cannot open file/device.\n" +#define MSGTR_CannotOpenDemuxer "Cannot open demuxer.\n" +#define MSGTR_NoAudioEncoderSelected "\nNo audio encoder (-oac) selected. Select one (see -oac help) or use -nosound.\n" +#define MSGTR_NoVideoEncoderSelected "\nNo video encoder (-ovc) selected. Select one (see -ovc help).\n" +#define MSGTR_CannotOpenOutputFile "Cannot open output file '%s'.\n" +#define MSGTR_EncoderOpenFailed "Failed to open the encoder.\n" +#define MSGTR_MencoderWrongFormatAVI "\nWARNING: OUTPUT FILE FORMAT IS _AVI_. See -of help.\n" +#define MSGTR_MencoderWrongFormatMPG "\nWARNING: OUTPUT FILE FORMAT IS _MPEG_. See -of help.\n" +#define MSGTR_MissingOutputFilename "No output file specified, please see the -o option." +#define MSGTR_ForcingOutputFourcc "Forcing output FourCC to %x [%.4s].\n" +#define MSGTR_ForcingOutputAudiofmtTag "Forcing output audio format tag to 0x%x.\n" +#define MSGTR_DuplicateFrames "\n%d duplicate frame(s)!\n" +#define MSGTR_SkipFrame "\nSkipping frame!\n" +#define MSGTR_ResolutionDoesntMatch "\nNew video file has different resolution or colorspace than the previous one.\n" +#define MSGTR_FrameCopyFileMismatch "\nAll video files must have identical fps, resolution, and codec for -ovc copy.\n" +#define MSGTR_AudioCopyFileMismatch "\nAll files must have identical audio codec and format for -oac copy.\n" +#define MSGTR_NoAudioFileMismatch "\nCannot mix video-only files with audio and video files. Try -nosound.\n" +#define MSGTR_NoSpeedWithFrameCopy "WARNING: -speed is not guaranteed to work correctly with -oac copy!\n"\ +"Your encode might be broken!\n" +#define MSGTR_ErrorWritingFile "%s: Error writing file.\n" +#define MSGTR_FlushingVideoFrames "\nFlushing video frames.\n" +#define MSGTR_FiltersHaveNotBeenConfiguredEmptyFile "Filters have not been configured! Empty file?\n" +#define MSGTR_RecommendedVideoBitrate "Recommended video bitrate for %s CD: %d\n" +#define MSGTR_VideoStreamResult "\nVideo stream: %8.3f kbit/s (%d B/s) size: %"PRIu64" bytes %5.3f secs %d frames\n" +#define MSGTR_AudioStreamResult "\nAudio stream: %8.3f kbit/s (%d B/s) size: %"PRIu64" bytes %5.3f secs\n" +#define MSGTR_EdlSkipStartEndCurrent "EDL SKIP: Start: %.2f End: %.2f Current: V: %.2f A: %.2f \r" +#define MSGTR_OpenedStream "success: format: %d data: 0x%X - 0x%x\n" +#define MSGTR_VCodecFramecopy "videocodec: framecopy (%dx%d %dbpp fourcc=%x)\n" +#define MSGTR_ACodecFramecopy "audiocodec: framecopy (format=%x chans=%d rate=%d bits=%d B/s=%d sample-%d)\n" +#define MSGTR_CBRPCMAudioSelected "CBR PCM audio selected.\n" +#define MSGTR_MP3AudioSelected "MP3 audio selected.\n" +#define MSGTR_CannotAllocateBytes "Couldn't allocate %d bytes.\n" +#define MSGTR_SettingAudioDelay "Setting audio delay to %5.3fs.\n" +#define MSGTR_SettingVideoDelay "Setting video delay to %5.3fs.\n" +#define MSGTR_LimitingAudioPreload "Limiting audio preload to 0.4s.\n" +#define MSGTR_IncreasingAudioDensity "Increasing audio density to 4.\n" +#define MSGTR_ZeroingAudioPreloadAndMaxPtsCorrection "Forcing audio preload to 0, max pts correction to 0.\n" +#define MSGTR_LameVersion "LAME version %s (%s)\n\n" +#define MSGTR_InvalidBitrateForLamePreset "Error: The bitrate specified is out of the valid range for this preset.\n"\ +"\n"\ +"When using this mode you must enter a value between \"8\" and \"320\".\n"\ +"\n"\ +"For further information try: \"-lameopts preset=help\"\n" +#define MSGTR_InvalidLamePresetOptions "Error: You did not enter a valid profile and/or options with preset.\n"\ +"\n"\ +"Available profiles are:\n"\ +"\n"\ +" <fast> standard\n"\ +" <fast> extreme\n"\ +" insane\n"\ +" <cbr> (ABR Mode) - The ABR Mode is implied. To use it,\n"\ +" simply specify a bitrate. For example:\n"\ +" \"preset=185\" activates this\n"\ +" preset and uses 185 as an average kbps.\n"\ +"\n"\ +" Some examples:\n"\ +"\n"\ +" \"-lameopts fast:preset=standard \"\n"\ +" or \"-lameopts cbr:preset=192 \"\n"\ +" or \"-lameopts preset=172 \"\n"\ +" or \"-lameopts preset=extreme \"\n"\ +"\n"\ +"For further information try: \"-lameopts preset=help\"\n" +#define MSGTR_LamePresetsLongInfo "\n"\ +"The preset switches are designed to provide the highest possible quality.\n"\ +"\n"\ +"They have for the most part been subjected to and tuned via rigorous double\n"\ +"blind listening tests to verify and achieve this objective.\n"\ +"\n"\ +"These are continually updated to coincide with the latest developments that\n"\ +"occur and as a result should provide you with nearly the best quality\n"\ +"currently possible from LAME.\n"\ +"\n"\ +"To activate these presets:\n"\ +"\n"\ +" For VBR modes (generally highest quality):\n"\ +"\n"\ +" \"preset=standard\" This preset should generally be transparent\n"\ +" to most people on most music and is already\n"\ +" quite high in quality.\n"\ +"\n"\ +" \"preset=extreme\" If you have extremely good hearing and similar\n"\ +" equipment, this preset will generally provide\n"\ +" slightly higher quality than the \"standard\"\n"\ +" mode.\n"\ +"\n"\ +" For CBR 320kbps (highest quality possible from the preset switches):\n"\ +"\n"\ +" \"preset=insane\" This preset will usually be overkill for most\n"\ +" people and most situations, but if you must\n"\ +" have the absolute highest quality with no\n"\ +" regard to filesize, this is the way to go.\n"\ +"\n"\ +" For ABR modes (high quality per given bitrate but not as high as VBR):\n"\ +"\n"\ +" \"preset=<kbps>\" Using this preset will usually give you good\n"\ +" quality at a specified bitrate. Depending on the\n"\ +" bitrate entered, this preset will determine the\n"\ +" optimal settings for that particular situation.\n"\ +" While this approach works, it is not nearly as\n"\ +" flexible as VBR, and usually will not attain the\n"\ +" same level of quality as VBR at higher bitrates.\n"\ +"\n"\ +"The following options are also available for the corresponding profiles:\n"\ +"\n"\ +" <fast> standard\n"\ +" <fast> extreme\n"\ +" insane\n"\ +" <cbr> (ABR Mode) - The ABR Mode is implied. To use it,\n"\ +" simply specify a bitrate. For example:\n"\ +" \"preset=185\" activates this\n"\ +" preset and uses 185 as an average kbps.\n"\ +"\n"\ +" \"fast\" - Enables the new fast VBR for a particular profile. The\n"\ +" disadvantage to the speed switch is that often times the\n"\ +" bitrate will be slightly higher than with the normal mode\n"\ +" and quality may be slightly lower also.\n"\ +" Warning: with the current version fast presets might result in too\n"\ +" high bitrate compared to regular presets.\n"\ +"\n"\ +" \"cbr\" - If you use the ABR mode (read above) with a significant\n"\ +" bitrate such as 80, 96, 112, 128, 160, 192, 224, 256, 320,\n"\ +" you can use the \"cbr\" option to force CBR mode encoding\n"\ +" instead of the standard abr mode. ABR does provide higher\n"\ +" quality but CBR may be useful in situations such as when\n"\ +" streaming an MP3 over the internet may be important.\n"\ +"\n"\ +" For example:\n"\ +"\n"\ +" \"-lameopts fast:preset=standard \"\n"\ +" or \"-lameopts cbr:preset=192 \"\n"\ +" or \"-lameopts preset=172 \"\n"\ +" or \"-lameopts preset=extreme \"\n"\ +"\n"\ +"\n"\ +"A few aliases are available for ABR mode:\n"\ +"phone => 16kbps/mono phon+/lw/mw-eu/sw => 24kbps/mono\n"\ +"mw-us => 40kbps/mono voice => 56kbps/mono\n"\ +"fm/radio/tape => 112kbps hifi => 160kbps\n"\ +"cd => 192kbps studio => 256kbps" +#define MSGTR_LameCantInit \ +"Cannot set LAME options, check bitrate/samplerate, some very low bitrates\n"\ +"(<32) need lower samplerates (i.e. -srate 8000).\n"\ +"If everything else fails, try a preset." +#define MSGTR_ConfigFileError "Config file error" +#define MSGTR_ErrorParsingCommandLine "error parsing command line" +#define MSGTR_VideoStreamRequired "Video stream is mandatory!\n" +#define MSGTR_ForcingInputFPS "Input fps will be interpreted as %5.3f instead.\n" +#define MSGTR_RawvideoDoesNotSupportAudio "Output file format RAWVIDEO does not support audio - disabling audio.\n" +#define MSGTR_DemuxerDoesntSupportNosound "This demuxer doesn't support -nosound yet.\n" +#define MSGTR_MemAllocFailed "Memory allocation failed.\n" +#define MSGTR_NoMatchingFilter "Couldn't find matching filter/ao format!\n" +#define MSGTR_MP3WaveFormatSizeNot30 "sizeof(MPEGLAYER3WAVEFORMAT)==%d!=30, maybe broken C compiler?\n" +#define MSGTR_NoLavcAudioCodecName "Audio LAVC, Missing codec name!\n" +#define MSGTR_LavcAudioCodecNotFound "Audio LAVC, couldn't find encoder for codec %s.\n" +#define MSGTR_CouldntAllocateLavcContext "Audio LAVC, couldn't allocate context!\n" +#define MSGTR_CouldntOpenCodec "Couldn't open codec %s, br=%d.\n" +#define MSGTR_CantCopyAudioFormat "Audio format 0x%x is incompatible with '-oac copy', please try '-oac pcm' instead or use '-fafmttag' to override it.\n" + +// cfg-mencoder.h +#define MSGTR_MEncoderMP3LameHelp "\n\n"\ +" vbr=<0-4> variable bitrate method\n"\ +" 0: cbr (constant bitrate)\n"\ +" 1: mt (Mark Taylor VBR algorithm)\n"\ +" 2: rh (Robert Hegemann VBR algorithm - default)\n"\ +" 3: abr (average bitrate)\n"\ +" 4: mtrh (Mark Taylor Robert Hegemann VBR algorithm)\n"\ +"\n"\ +" abr average bitrate\n"\ +"\n"\ +" cbr constant bitrate\n"\ +" Also forces CBR mode encoding on subsequent ABR presets modes.\n"\ +"\n"\ +" br=<0-1024> specify bitrate in kBit (CBR and ABR only)\n"\ +"\n"\ +" q=<0-9> quality (0-highest, 9-lowest) (only for VBR)\n"\ +"\n"\ +" aq=<0-9> algorithmic quality (0-best/slowest, 9-worst/fastest)\n"\ +"\n"\ +" ratio=<1-100> compression ratio\n"\ +"\n"\ +" vol=<0-10> set audio input gain\n"\ +"\n"\ +" mode=<0-3> (default: auto)\n"\ +" 0: stereo\n"\ +" 1: joint-stereo\n"\ +" 2: dualchannel\n"\ +" 3: mono\n"\ +"\n"\ +" padding=<0-2>\n"\ +" 0: no\n"\ +" 1: all\n"\ +" 2: adjust\n"\ +"\n"\ +" fast Switch on faster encoding on subsequent VBR presets modes,\n"\ +" slightly lower quality and higher bitrates.\n"\ +"\n"\ +" preset=<value> Provide the highest possible quality settings.\n"\ +" medium: VBR encoding, good quality\n"\ +" (150-180 kbps bitrate range)\n"\ +" standard: VBR encoding, high quality\n"\ +" (170-210 kbps bitrate range)\n"\ +" extreme: VBR encoding, very high quality\n"\ +" (200-240 kbps bitrate range)\n"\ +" insane: CBR encoding, highest preset quality\n"\ +" (320 kbps bitrate)\n"\ +" <8-320>: ABR encoding at average given kbps bitrate.\n\n" + +// codec-cfg.c +#define MSGTR_DuplicateFourcc "duplicated FourCC" +#define MSGTR_TooManyFourccs "too many FourCCs/formats..." +#define MSGTR_ParseError "parse error" +#define MSGTR_ParseErrorFIDNotNumber "parse error (format ID not a number?)" +#define MSGTR_ParseErrorFIDAliasNotNumber "parse error (format ID alias not a number?)" +#define MSGTR_DuplicateFID "duplicated format ID" +#define MSGTR_TooManyOut "too many out..." +#define MSGTR_InvalidCodecName "\ncodec(%s) name is not valid!\n" +#define MSGTR_CodecLacksFourcc "\ncodec(%s) does not have FourCC/format!\n" +#define MSGTR_CodecLacksDriver "\ncodec(%s) does not have a driver!\n" +#define MSGTR_CodecNeedsDLL "\ncodec(%s) needs a 'dll'!\n" +#define MSGTR_CodecNeedsOutfmt "\ncodec(%s) needs an 'outfmt'!\n" +#define MSGTR_CantAllocateComment "Can't allocate memory for comment. " +#define MSGTR_GetTokenMaxNotLessThanMAX_NR_TOKEN "get_token(): max >= MAX_MR_TOKEN!" +#define MSGTR_CantGetMemoryForLine "Can't get memory for 'line': %s\n" +#define MSGTR_CantReallocCodecsp "Can't realloc '*codecsp': %s\n" +#define MSGTR_CodecNameNotUnique "Codec name '%s' isn't unique." +#define MSGTR_CantStrdupName "Can't strdup -> 'name': %s\n" +#define MSGTR_CantStrdupInfo "Can't strdup -> 'info': %s\n" +#define MSGTR_CantStrdupDriver "Can't strdup -> 'driver': %s\n" +#define MSGTR_CantStrdupDLL "Can't strdup -> 'dll': %s" +#define MSGTR_AudioVideoCodecTotals "%d audio & %d video codecs\n" +#define MSGTR_CodecDefinitionIncorrect "Codec is not defined correctly." +#define MSGTR_OutdatedCodecsConf "This codecs.conf is too old and incompatible with this MPlayer release!" + +// fifo.c +#define MSGTR_CannotMakePipe "Cannot make PIPE!\n" + +// parser-mecmd.c, parser-mpcmd.c +#define MSGTR_NoFileGivenOnCommandLine "'--' indicates no more options, but no filename was given on the command line.\n" +#define MSGTR_TheLoopOptionMustBeAnInteger "The loop option must be an integer: %s\n" +#define MSGTR_UnknownOptionOnCommandLine "Unknown option on the command line: -%s\n" +#define MSGTR_ErrorParsingOptionOnCommandLine "Error parsing option on the command line: -%s\n" +#define MSGTR_InvalidPlayEntry "Invalid play entry %s\n" +#define MSGTR_NotAnMEncoderOption "-%s is not an MEncoder option\n" +#define MSGTR_NoFileGiven "No file given\n" + +// m_config.c +#define MSGTR_SaveSlotTooOld "Save slot found from lvl %d is too old: %d !!!\n" +#define MSGTR_InvalidCfgfileOption "The %s option can't be used in a config file.\n" +#define MSGTR_InvalidCmdlineOption "The %s option can't be used on the command line.\n" +#define MSGTR_InvalidSuboption "Error: option '%s' has no suboption '%s'.\n" +#define MSGTR_MissingSuboptionParameter "Error: suboption '%s' of '%s' must have a parameter!\n" +#define MSGTR_MissingOptionParameter "Error: option '%s' must have a parameter!\n" +#define MSGTR_OptionListHeader "\n Name Type Min Max Global CL Cfg\n\n" +#define MSGTR_TotalOptions "\nTotal: %d options\n" +#define MSGTR_ProfileInclusionTooDeep "WARNING: Profile inclusion too deep.\n" +#define MSGTR_NoProfileDefined "No profiles have been defined.\n" +#define MSGTR_AvailableProfiles "Available profiles:\n" +#define MSGTR_UnknownProfile "Unknown profile '%s'.\n" +#define MSGTR_Profile "Profile %s: %s\n" + +// m_property.c +#define MSGTR_PropertyListHeader "\n Name Type Min Max\n\n" +#define MSGTR_TotalProperties "\nTotal: %d properties\n" + +// loader/ldt_keeper.c +#define MSGTR_LOADER_DYLD_Warning "WARNING: Attempting to use DLL codecs but environment variable\n DYLD_BIND_AT_LAUNCH not set. This will likely crash.\n" + + +// ====================== GUI messages/buttons ======================== + +// --- labels --- +#define MSGTR_About "About" +#define MSGTR_FileSelect "Select file..." +#define MSGTR_SubtitleSelect "Select subtitle..." +#define MSGTR_OtherSelect "Select..." +#define MSGTR_AudioFileSelect "Select external audio channel..." +#define MSGTR_FontSelect "Select font..." +// Note: If you change MSGTR_PlayList please see if it still fits MSGTR_MENU_PlayList +#define MSGTR_PlayList "Playlist" +#define MSGTR_Equalizer "Equalizer" +#define MSGTR_ConfigureEqualizer "Configure Equalizer" +#define MSGTR_SkinBrowser "Skin Browser" +#define MSGTR_Network "Network streaming..." +// Note: If you change MSGTR_Preferences please see if it still fits MSGTR_MENU_Preferences +#define MSGTR_Preferences "Preferences" +#define MSGTR_AudioPreferences "Audio driver configuration" +#define MSGTR_NoMediaOpened "No media opened." +#define MSGTR_Title "Title %d" +#define MSGTR_NoChapter "No chapter" +#define MSGTR_Chapter "Chapter %d" +#define MSGTR_NoFileLoaded "No file loaded." +#define MSGTR_Filter_UTF8Subtitles "UTF-8 encoded subtitles (*.utf, *.utf-8, *.utf8)" +#define MSGTR_Filter_AllSubtitles "All subtitles" +#define MSGTR_Filter_AllFiles "All files" +#define MSGTR_Filter_TTF "True Type fonts (*.ttf)" +#define MSGTR_Filter_Type1 "Type1 fonts (*.pfb)" +#define MSGTR_Filter_AllFonts "All fonts" +#define MSGTR_Filter_FontFiles "Font files (*.desc)" +#define MSGTR_Filter_DDRawAudio "Dolby Digital / PCM (*.ac3, *.pcm)" +#define MSGTR_Filter_MPEGAudio "MPEG audio (*.mp2, *.mp3, *.mpga, *.m4a, *.aac, *.f4a)" +#define MSGTR_Filter_MatroskaAudio "Matroska audio (*.mka)" +#define MSGTR_Filter_OGGAudio "Ogg audio (*.oga, *.ogg, *.spx)" +#define MSGTR_Filter_WAVAudio "WAV audio (*.wav)" +#define MSGTR_Filter_WMAAudio "Windows Media audio (*.wma)" +#define MSGTR_Filter_AllAudioFiles "All audio files" +#define MSGTR_Filter_AllVideoFiles "All video files" +#define MSGTR_Filter_AVIFiles "AVI files" +#define MSGTR_Filter_DivXFiles "DivX files" +#define MSGTR_Filter_FlashVideo "Flash Video" +#define MSGTR_Filter_MP3Files "MP3 files" +#define MSGTR_Filter_MP4Files "MP4 files" +#define MSGTR_Filter_MPEGFiles "MPEG files" +#define MSGTR_Filter_MP2TS "MPEG-2 transport streams" +#define MSGTR_Filter_MatroskaMedia "Matroska media" +#define MSGTR_Filter_OGGMedia "Ogg media" +#define MSGTR_Filter_QTMedia "QuickTime media" +#define MSGTR_Filter_RNMedia "RealNetworks media" +#define MSGTR_Filter_VideoCDImages "VCD/SVCD images" +#define MSGTR_Filter_WAVFiles "WAV files" +#define MSGTR_Filter_WindowsMedia "Windows media" +#define MSGTR_Filter_Playlists "Playlists" + +// --- buttons --- +#define MSGTR_Ok "OK" +#define MSGTR_Cancel "Cancel" +#define MSGTR_Add "Add" +#define MSGTR_Remove "Remove" +#define MSGTR_Clear "Clear" +#define MSGTR_Config "Config" +#define MSGTR_ConfigDriver "Configure driver" +#define MSGTR_Browse "Browse" + +// --- error messages --- +#define MSGTR_NEMDB "Sorry, not enough memory to draw buffer.\n" +#define MSGTR_NEMFMR "Sorry, not enough memory for menu rendering." +#define MSGTR_IDFGCVD "Sorry, no GUI-compatible video output driver found.\n" +#define MSGTR_NEEDLAVC "Sorry, you cannot play non-MPEG files with your DXR3/H+ device without reencoding.\nPlease enable lavc in the DXR3/H+ configuration box." +#define MSGTR_ICONERROR "Icon '%s' (size %d) not found or unsupported format.\n" + +// --- skin loader error messages +#define MSGTR_SKIN_ERRORMESSAGE "Error in skin config file on line %d: %s" +#define MSGTR_SKIN_ERROR_SECTION "No section specified for '%s'.\n" +#define MSGTR_SKIN_ERROR_WINDOW "No window specified for '%s'.\n" +#define MSGTR_SKIN_ERROR_ITEM "This item is not supported by '%s'.\n" +#define MSGTR_SKIN_UNKNOWN_ITEM "Unknown item '%s'\n" +#define MSGTR_SKIN_UNKNOWN_NAME "Unknown name '%s'\n" +#define MSGTR_SKIN_SkinFileNotFound "Skin file %s not found.\n" +#define MSGTR_SKIN_SkinFileNotReadable "Skin file %s not readable.\n" +#define MSGTR_SKIN_BITMAP_16bit "Color depth of bitmap %s is 16 bits or less which is not supported.\n" +#define MSGTR_SKIN_BITMAP_FileNotFound "Bitmap %s not found.\n" +#define MSGTR_SKIN_BITMAP_PNGReadError "PNG read error in %s\n" +#define MSGTR_SKIN_BITMAP_ConversionError "24 bit to 32 bit conversion error in %s\n" +#define MSGTR_SKIN_UnknownMessage "Unknown message '%s'\n" +#define MSGTR_SKIN_NotEnoughMemory "Not enough memory\n" +#define MSGTR_SKIN_TooManyItemsDeclared "Too many items declared.\n" +#define MSGTR_SKIN_FONT_TooManyFontsDeclared "Too many fonts declared.\n" +#define MSGTR_SKIN_FONT_FontFileNotFound "Font description file not found.\n" +#define MSGTR_SKIN_FONT_FontImageNotFound "Font image file not found.\n" +#define MSGTR_SKIN_FONT_NonExistentFont "Font '%s' not found.\n" +#define MSGTR_SKIN_UnknownParameter "Unknown parameter '%s'\n" +#define MSGTR_SKIN_SKINCFG_SkinNotFound "Skin '%s' not found.\n" +#define MSGTR_SKIN_SKINCFG_SelectedSkinNotFound "Selected skin '%s' not found, trying skin 'default'...\n" +#define MSGTR_SKIN_SKINCFG_SkinCfgError "Config file processing error with skin '%s'\n" +#define MSGTR_SKIN_LABEL "Skins:" + +// --- GTK menus +#define MSGTR_MENU_AboutMPlayer "About MPlayer" +#define MSGTR_MENU_Open "Open..." +#define MSGTR_MENU_PlayFile "Play file..." +#define MSGTR_MENU_PlayCD "Play CD..." +#define MSGTR_MENU_PlayVCD "Play VCD..." +#define MSGTR_MENU_PlayDVD "Play DVD..." +#define MSGTR_MENU_PlayURL "Play URL..." +#define MSGTR_MENU_LoadSubtitle "Load subtitle..." +#define MSGTR_MENU_DropSubtitle "Drop subtitle..." +#define MSGTR_MENU_LoadExternAudioFile "Load external audio file..." +#define MSGTR_MENU_Playing "Playing" +#define MSGTR_MENU_Play "Play" +#define MSGTR_MENU_Pause "Pause" +#define MSGTR_MENU_Stop "Stop" +#define MSGTR_MENU_NextStream "Next stream" +#define MSGTR_MENU_PrevStream "Prev stream" +#define MSGTR_MENU_Size "Size" +#define MSGTR_MENU_HalfSize "Half size" +#define MSGTR_MENU_NormalSize "Normal size" +#define MSGTR_MENU_DoubleSize "Double size" +#define MSGTR_MENU_FullScreen "Fullscreen" +#define MSGTR_MENU_CD "CD" +#define MSGTR_MENU_DVD "DVD" +#define MSGTR_MENU_VCD "VCD" +#define MSGTR_MENU_PlayDisc "Open disc..." +#define MSGTR_MENU_ShowDVDMenu "Show DVD menu" +#define MSGTR_MENU_Titles "Titles" +#define MSGTR_MENU_Title "Title %2d" +#define MSGTR_MENU_None "(none)" +#define MSGTR_MENU_Chapters "Chapters" +#define MSGTR_MENU_Chapter "Chapter %2d" +#define MSGTR_MENU_AudioLanguages "Audio languages" +#define MSGTR_MENU_SubtitleLanguages "Subtitle languages" +#define MSGTR_MENU_PlayList MSGTR_PlayList +#define MSGTR_MENU_SkinBrowser "Skin browser" +#define MSGTR_MENU_Preferences MSGTR_Preferences +#define MSGTR_MENU_Exit "Exit" +#define MSGTR_MENU_Mute "Mute" +#define MSGTR_MENU_Original "Original" +#define MSGTR_MENU_AspectRatio "Aspect ratio" +#define MSGTR_MENU_AudioTrack "Audio track" +#define MSGTR_MENU_Track "Track %d" +#define MSGTR_MENU_VideoTrack "Video track" +#define MSGTR_MENU_Subtitles "Subtitles" + +// --- equalizer +// Note: If you change MSGTR_EQU_Audio please see if it still fits MSGTR_PREFERENCES_Audio +#define MSGTR_EQU_Audio "Audio" +// Note: If you change MSGTR_EQU_Video please see if it still fits MSGTR_PREFERENCES_Video +#define MSGTR_EQU_Video "Video" +#define MSGTR_EQU_Contrast "Contrast: " +#define MSGTR_EQU_Brightness "Brightness: " +#define MSGTR_EQU_Hue "Hue: " +#define MSGTR_EQU_Saturation "Saturation: " +#define MSGTR_EQU_Front_Left "Front Left" +#define MSGTR_EQU_Front_Right "Front Right" +#define MSGTR_EQU_Back_Left "Rear Left" +#define MSGTR_EQU_Back_Right "Rear Right" +#define MSGTR_EQU_Center "Center" +#define MSGTR_EQU_Bass "Bass" +#define MSGTR_EQU_All "All" +#define MSGTR_EQU_Channel1 "Channel 1:" +#define MSGTR_EQU_Channel2 "Channel 2:" +#define MSGTR_EQU_Channel3 "Channel 3:" +#define MSGTR_EQU_Channel4 "Channel 4:" +#define MSGTR_EQU_Channel5 "Channel 5:" +#define MSGTR_EQU_Channel6 "Channel 6:" + +// --- playlist +#define MSGTR_PLAYLIST_Path "Path" +#define MSGTR_PLAYLIST_Selected "Selected files" +#define MSGTR_PLAYLIST_Files "Files" +#define MSGTR_PLAYLIST_DirectoryTree "Directory tree" + +// --- preferences +#define MSGTR_PREFERENCES_Audio MSGTR_EQU_Audio +#define MSGTR_PREFERENCES_Video MSGTR_EQU_Video +#define MSGTR_PREFERENCES_SubtitleOSD "Subtitles & OSD" +#define MSGTR_PREFERENCES_Codecs "Codecs & demuxer" +// Note: If you change MSGTR_PREFERENCES_Misc see if it still fits MSGTR_PREFERENCES_FRAME_Misc +#define MSGTR_PREFERENCES_Misc "Misc" +#define MSGTR_PREFERENCES_None "None" +#define MSGTR_PREFERENCES_DriverDefault "driver default" +#define MSGTR_PREFERENCES_AvailableDrivers "Available drivers:" +#define MSGTR_PREFERENCES_DoNotPlaySound "Do not play sound" +#define MSGTR_PREFERENCES_NormalizeSound "Normalize sound" +#define MSGTR_PREFERENCES_EnableEqualizer "Enable equalizer" +#define MSGTR_PREFERENCES_SoftwareMixer "Enable Software Mixer" +#define MSGTR_PREFERENCES_ExtraStereo "Enable extra stereo" +#define MSGTR_PREFERENCES_Coefficient "Coefficient:" +#define MSGTR_PREFERENCES_AudioDelay "Audio delay" +#define MSGTR_PREFERENCES_DoubleBuffer "Enable double buffering" +#define MSGTR_PREFERENCES_DirectRender "Enable direct rendering" +#define MSGTR_PREFERENCES_FrameDrop "Enable frame dropping" +#define MSGTR_PREFERENCES_HFrameDrop "Enable HARD frame dropping (dangerous)" +#define MSGTR_PREFERENCES_Flip "Flip image upside down" +#define MSGTR_PREFERENCES_Panscan "Panscan: " +#define MSGTR_PREFERENCES_OSD_LEVEL0 "Subtitles only" +#define MSGTR_PREFERENCES_OSD_LEVEL1 "Volume and seek" +#define MSGTR_PREFERENCES_OSD_LEVEL2 "Volume, seek, timer and percentage" +#define MSGTR_PREFERENCES_OSD_LEVEL3 "Volume, seek, timer, percentage and total time" +#define MSGTR_PREFERENCES_Subtitle "Subtitle:" +#define MSGTR_PREFERENCES_SUB_Delay "Delay: " +#define MSGTR_PREFERENCES_SUB_FPS "FPS:" +#define MSGTR_PREFERENCES_SUB_POS "Position: " +#define MSGTR_PREFERENCES_SUB_AutoLoad "Disable subtitle autoloading" +#define MSGTR_PREFERENCES_SUB_Unicode "Unicode subtitle" +#define MSGTR_PREFERENCES_SUB_MPSUB "Convert the given subtitle to MPlayer's subtitle format" +#define MSGTR_PREFERENCES_SUB_SRT "Convert the given subtitle to the time based SubViewer (SRT) format" +#define MSGTR_PREFERENCES_SUB_Overlap "Toggle subtitle overlapping" +#define MSGTR_PREFERENCES_SUB_USE_ASS "SSA/ASS subtitle rendering" +#define MSGTR_PREFERENCES_SUB_ASS_USE_MARGINS "Use margins" +#define MSGTR_PREFERENCES_SUB_ASS_TOP_MARGIN "Top: " +#define MSGTR_PREFERENCES_SUB_ASS_BOTTOM_MARGIN "Bottom: " +#define MSGTR_PREFERENCES_Font "Font:" +#define MSGTR_PREFERENCES_FontFactor "Font factor:" +#define MSGTR_PREFERENCES_PostProcess "Enable postprocessing" +#define MSGTR_PREFERENCES_AutoQuality "Auto quality: " +#define MSGTR_PREFERENCES_NI "Use non-interleaved AVI parser" +#define MSGTR_PREFERENCES_IDX "Rebuild index table, if needed" +#define MSGTR_PREFERENCES_VideoCodecFamily "Video codec family:" +#define MSGTR_PREFERENCES_AudioCodecFamily "Audio codec family:" +#define MSGTR_PREFERENCES_FRAME_OSD_Level "OSD level" +#define MSGTR_PREFERENCES_FRAME_Subtitle "Subtitle" +#define MSGTR_PREFERENCES_FRAME_Font "Font" +#define MSGTR_PREFERENCES_FRAME_PostProcess "Postprocessing" +#define MSGTR_PREFERENCES_FRAME_CodecDemuxer "Codec & demuxer" +#define MSGTR_PREFERENCES_FRAME_Cache "Cache" +#define MSGTR_PREFERENCES_FRAME_Misc MSGTR_PREFERENCES_Misc +#define MSGTR_PREFERENCES_Audio_Device "Device:" +#define MSGTR_PREFERENCES_Audio_Mixer "Mixer:" +#define MSGTR_PREFERENCES_Audio_MixerChannel "Mixer channel:" +#define MSGTR_PREFERENCES_Message "Please remember that you need to restart playback for some options to take effect!" +#define MSGTR_PREFERENCES_DXR3_VENC "Video encoder:" +#define MSGTR_PREFERENCES_DXR3_LAVC "Use LAVC (FFmpeg)" +#define MSGTR_PREFERENCES_FontEncoding1 "Unicode" +#define MSGTR_PREFERENCES_FontEncoding2 "Western European Languages (ISO-8859-1)" +#define MSGTR_PREFERENCES_FontEncoding3 "Western European Languages with Euro (ISO-8859-15)" +#define MSGTR_PREFERENCES_FontEncoding4 "Slavic/Central European Languages (ISO-8859-2)" +#define MSGTR_PREFERENCES_FontEncoding5 "Esperanto, Galician, Maltese, Turkish (ISO-8859-3)" +#define MSGTR_PREFERENCES_FontEncoding6 "Old Baltic charset (ISO-8859-4)" +#define MSGTR_PREFERENCES_FontEncoding7 "Cyrillic (ISO-8859-5)" +#define MSGTR_PREFERENCES_FontEncoding8 "Arabic (ISO-8859-6)" +#define MSGTR_PREFERENCES_FontEncoding9 "Modern Greek (ISO-8859-7)" +#define MSGTR_PREFERENCES_FontEncoding10 "Turkish (ISO-8859-9)" +#define MSGTR_PREFERENCES_FontEncoding11 "Baltic (ISO-8859-13)" +#define MSGTR_PREFERENCES_FontEncoding12 "Celtic (ISO-8859-14)" +#define MSGTR_PREFERENCES_FontEncoding13 "Hebrew charsets (ISO-8859-8)" +#define MSGTR_PREFERENCES_FontEncoding14 "Russian (KOI8-R)" +#define MSGTR_PREFERENCES_FontEncoding15 "Ukrainian, Belarusian (KOI8-U/RU)" +#define MSGTR_PREFERENCES_FontEncoding16 "Simplified Chinese charset (CP936)" +#define MSGTR_PREFERENCES_FontEncoding17 "Traditional Chinese charset (BIG5)" +#define MSGTR_PREFERENCES_FontEncoding18 "Japanese charsets (SHIFT-JIS)" +#define MSGTR_PREFERENCES_FontEncoding19 "Korean charset (CP949)" +#define MSGTR_PREFERENCES_FontEncoding20 "Thai charset (CP874)" +#define MSGTR_PREFERENCES_FontEncoding21 "Cyrillic Windows (CP1251)" +#define MSGTR_PREFERENCES_FontEncoding22 "Slavic/Central European Windows (CP1250)" +#define MSGTR_PREFERENCES_FontEncoding23 "Arabic Windows (CP1256)" +#define MSGTR_PREFERENCES_FontNoAutoScale "No autoscale" +#define MSGTR_PREFERENCES_FontPropWidth "Proportional to movie width" +#define MSGTR_PREFERENCES_FontPropHeight "Proportional to movie height" +#define MSGTR_PREFERENCES_FontPropDiagonal "Proportional to movie diagonal" +#define MSGTR_PREFERENCES_FontEncoding "Encoding:" +#define MSGTR_PREFERENCES_FontBlur "Blur:" +#define MSGTR_PREFERENCES_FontOutLine "Outline:" +#define MSGTR_PREFERENCES_FontTextScale "Text scale:" +#define MSGTR_PREFERENCES_FontOSDScale "OSD scale:" +#define MSGTR_PREFERENCES_Cache "Cache on/off" +#define MSGTR_PREFERENCES_CacheSize "Cache size: " +#define MSGTR_PREFERENCES_LoadFullscreen "Start in fullscreen" +#define MSGTR_PREFERENCES_SaveWinPos "Save window position" +#define MSGTR_PREFERENCES_XSCREENSAVER "Stop XScreenSaver" +#define MSGTR_PREFERENCES_PlayBar "Enable playbar" +#define MSGTR_PREFERENCES_NoIdle "Quit after playing" +#define MSGTR_PREFERENCES_AutoSync "AutoSync on/off" +#define MSGTR_PREFERENCES_AutoSyncValue "Autosync: " +#define MSGTR_PREFERENCES_CDROMDevice "CD-ROM device:" +#define MSGTR_PREFERENCES_DVDDevice "DVD device:" +#define MSGTR_PREFERENCES_FPS "Movie FPS:" +#define MSGTR_PREFERENCES_ShowVideoWindow "Show video window when inactive" +#define MSGTR_PREFERENCES_ArtsBroken "Newer aRts versions are incompatible "\ + "with GTK 1.x and will crash GMPlayer!" + +// -- aboutbox +#define MSGTR_ABOUT_UHU "GUI development sponsored by UHU Linux\n" +#define MSGTR_ABOUT_Contributors "Code and documentation contributors\n" +#define MSGTR_ABOUT_Codecs_libs_contributions "Codecs and third party libraries\n" +#define MSGTR_ABOUT_Translations "Translations\n" +#define MSGTR_ABOUT_Skins "Skins\n" + +// --- messagebox +#define MSGTR_MSGBOX_LABEL_FatalError "Fatal error!" +#define MSGTR_MSGBOX_LABEL_Error "Error!" +#define MSGTR_MSGBOX_LABEL_Warning "Warning!" + +// cfg.c +#define MSGTR_UnableToSaveOption "Unable to save option '%s'.\n" + +// interface.c +#define MSGTR_DeletingSubtitles "Deleting subtitles.\n" +#define MSGTR_LoadingSubtitles "Loading subtitles '%s'.\n" +#define MSGTR_AddingVideoFilter "Adding video filter '%s'.\n" + +// mw.c +#define MSGTR_NotAFile "This does not seem to be a file: %s !\n" + +// ws.c +#define MSGTR_WS_RemoteDisplay "Remote display, disabling XMITSHM.\n" +#define MSGTR_WS_NoXshm "Sorry, your system does not support the X shared memory extension.\n" +#define MSGTR_WS_NoXshape "Sorry, your system does not support the XShape extension.\n" +#define MSGTR_WS_ColorDepthTooLow "Sorry, the color depth is too low.\n" +#define MSGTR_WS_TooManyOpenWindows "There are too many open windows.\n" +#define MSGTR_WS_ShmError "shared memory extension error\n" +#define MSGTR_WS_NotEnoughMemoryDrawBuffer "Sorry, not enough memory to draw buffer.\n" +#define MSGTR_WS_DpmsUnavailable "DPMS not available?\n" +#define MSGTR_WS_DpmsNotEnabled "Could not enable DPMS.\n" +#define MSGTR_WS_XError "An X11 Error has occurred!\n" + +// wsxdnd.c +#define MSGTR_WS_NotAFile "This does not seem to be a file...\n" +#define MSGTR_WS_DDNothing "D&D: Nothing returned!\n" + +// Win32 GUI +#define MSGTR_Close "Close" +#define MSGTR_Default "Defaults" +#define MSGTR_Down "Down" +#define MSGTR_Load "Load" +#define MSGTR_Save "Save" +#define MSGTR_Up "Up" +#define MSGTR_DirectorySelect "Select directory..." +#define MSGTR_PlaylistSave "Save playlist..." +#define MSGTR_PlaylistSelect "Select playlist..." +#define MSGTR_SelectTitleChapter "Select title/chapter..." +#define MSGTR_MENU_DebugConsole "Debug Console" +#define MSGTR_MENU_OnlineHelp "Online Help" +#define MSGTR_MENU_PlayDirectory "Play directory..." +#define MSGTR_MENU_SeekBack "Seek Backwards" +#define MSGTR_MENU_SeekForw "Seek Forwards" +#define MSGTR_MENU_ShowHide "Show/Hide" +#define MSGTR_MENU_SubtitlesOnOff "Subtitle Visibility On/Off" +#define MSGTR_PLAYLIST_AddFile "Add File..." +#define MSGTR_PLAYLIST_AddURL "Add URL..." +#define MSGTR_PREFERENCES_Priority "Priority:" +#define MSGTR_PREFERENCES_PriorityHigh "high" +#define MSGTR_PREFERENCES_PriorityLow "low" +#define MSGTR_PREFERENCES_PriorityNormal "normal" +#define MSGTR_PREFERENCES_PriorityNormalAbove "above normal" +#define MSGTR_PREFERENCES_PriorityNormalBelow "below normal" +#define MSGTR_PREFERENCES_ShowInVideoWin "Display in the video window (DirectX only)" + + +// ======================= video output drivers ======================== + +#define MSGTR_VOincompCodec "The selected video_out device is incompatible with this codec.\n"\ + "Try appending the scale filter to your filter list,\n"\ + "e.g. -vf spp,scale instead of -vf spp.\n" +#define MSGTR_VO_GenericError "This error has occurred" +#define MSGTR_VO_UnableToAccess "Unable to access" +#define MSGTR_VO_ExistsButNoDirectory "already exists, but is not a directory." +#define MSGTR_VO_DirExistsButNotWritable "Output directory already exists, but is not writable." +#define MSGTR_VO_DirExistsAndIsWritable "Output directory already exists and is writable." +#define MSGTR_VO_CantCreateDirectory "Unable to create output directory." +#define MSGTR_VO_CantCreateFile "Unable to create output file." +#define MSGTR_VO_DirectoryCreateSuccess "Output directory successfully created." +#define MSGTR_VO_ValueOutOfRange "value out of range" +#define MSGTR_VO_NoValueSpecified "No value specified." +#define MSGTR_VO_UnknownSuboptions "unknown suboption(s)" + +// aspect.c +#define MSGTR_LIBVO_ASPECT_NoSuitableNewResFound "[ASPECT] Warning: No suitable new res found!\n" +#define MSGTR_LIBVO_ASPECT_NoNewSizeFoundThatFitsIntoRes "[ASPECT] Error: No new size found that fits into res!\n" + +// font_load_ft.c +#define MSGTR_LIBVO_FONT_LOAD_FT_NewFaceFailed "New_Face failed. Maybe the font path is wrong.\nPlease supply the text font file (~/.mplayer/subfont.ttf).\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_NewMemoryFaceFailed "New_Memory_Face failed..\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_SubFaceFailed "subtitle font: load_sub_face failed.\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_SubFontCharsetFailed "subtitle font: prepare_charset failed.\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_CannotPrepareSubtitleFont "Cannot prepare subtitle font.\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_CannotPrepareOSDFont "Cannot prepare OSD font.\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_CannotGenerateTables "Cannot generate tables.\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_DoneFreeTypeFailed "FT_Done_FreeType failed.\n" +#define MSGTR_LIBVO_FONT_LOAD_FT_FontconfigNoMatch "Fontconfig failed to select a font. Trying without fontconfig...\n" + +// sub.c +#define MSGTR_VO_SUB_Seekbar "Seekbar" +#define MSGTR_VO_SUB_Play "Play" +#define MSGTR_VO_SUB_Pause "Pause" +#define MSGTR_VO_SUB_Stop "Stop" +#define MSGTR_VO_SUB_Rewind "Rewind" +#define MSGTR_VO_SUB_Forward "Forward" +#define MSGTR_VO_SUB_Clock "Clock" +#define MSGTR_VO_SUB_Contrast "Contrast" +#define MSGTR_VO_SUB_Saturation "Saturation" +#define MSGTR_VO_SUB_Volume "Volume" +#define MSGTR_VO_SUB_Brightness "Brightness" +#define MSGTR_VO_SUB_Hue "Hue" +#define MSGTR_VO_SUB_Balance "Balance" + +// vo_3dfx.c +#define MSGTR_LIBVO_3DFX_Only16BppSupported "[VO_3DFX] Only 16bpp supported!" +#define MSGTR_LIBVO_3DFX_VisualIdIs "[VO_3DFX] Visual ID is %lx.\n" +#define MSGTR_LIBVO_3DFX_UnableToOpenDevice "[VO_3DFX] Unable to open /dev/3dfx.\n" +#define MSGTR_LIBVO_3DFX_Error "[VO_3DFX] Error: %d.\n" +#define MSGTR_LIBVO_3DFX_CouldntMapMemoryArea "[VO_3DFX] Couldn't map 3dfx memory areas: %p,%p,%d.\n" +#define MSGTR_LIBVO_3DFX_DisplayInitialized "[VO_3DFX] Initialized: %p.\n" +#define MSGTR_LIBVO_3DFX_UnknownSubdevice "[VO_3DFX] Unknown subdevice: %s.\n" + +// vo_aa.c +#define MSGTR_VO_AA_HelpHeader "\n\nHere are the aalib vo_aa suboptions:\n" +#define MSGTR_VO_AA_AdditionalOptions "Additional options vo_aa provides:\n" \ +" help print this help message\n" \ +" osdcolor set OSD color\n subcolor set subtitle color\n" \ +" the color parameters are:\n 0 : normal\n" \ +" 1 : dim\n 2 : bold\n 3 : boldfont\n" \ +" 4 : reverse\n 5 : special\n\n\n" + +// vo_dxr3.c +#define MSGTR_LIBVO_DXR3_UnableToLoadNewSPUPalette "[VO_DXR3] Unable to load new SPU palette!\n" +#define MSGTR_LIBVO_DXR3_UnableToSetPlaymode "[VO_DXR3] Unable to set playmode!\n" +#define MSGTR_LIBVO_DXR3_UnableToSetSubpictureMode "[VO_DXR3] Unable to set subpicture mode!\n" +#define MSGTR_LIBVO_DXR3_UnableToGetTVNorm "[VO_DXR3] Unable to get TV norm!\n" +#define MSGTR_LIBVO_DXR3_AutoSelectedTVNormByFrameRate "[VO_DXR3] Auto-selected TV norm by framerate: " +#define MSGTR_LIBVO_DXR3_UnableToSetTVNorm "[VO_DXR3] Unable to set TV norm!\n" +#define MSGTR_LIBVO_DXR3_SettingUpForNTSC "[VO_DXR3] Setting up for NTSC.\n" +#define MSGTR_LIBVO_DXR3_SettingUpForPALSECAM "[VO_DXR3] Setting up for PAL/SECAM.\n" +#define MSGTR_LIBVO_DXR3_SettingAspectRatioTo43 "[VO_DXR3] Setting aspect ratio to 4:3.\n" +#define MSGTR_LIBVO_DXR3_SettingAspectRatioTo169 "[VO_DXR3] Setting aspect ratio to 16:9.\n" +#define MSGTR_LIBVO_DXR3_OutOfMemory "[VO_DXR3] out of memory\n" +#define MSGTR_LIBVO_DXR3_UnableToAllocateKeycolor "[VO_DXR3] Unable to allocate keycolor!\n" +#define MSGTR_LIBVO_DXR3_UnableToAllocateExactKeycolor "[VO_DXR3] Unable to allocate exact keycolor, using closest match (0x%lx).\n" +#define MSGTR_LIBVO_DXR3_Uninitializing "[VO_DXR3] Uninitializing.\n" +#define MSGTR_LIBVO_DXR3_FailedRestoringTVNorm "[VO_DXR3] Failed restoring TV norm!\n" +#define MSGTR_LIBVO_DXR3_EnablingPrebuffering "[VO_DXR3] Enabling prebuffering.\n" +#define MSGTR_LIBVO_DXR3_UsingNewSyncEngine "[VO_DXR3] Using new sync engine.\n" +#define MSGTR_LIBVO_DXR3_UsingOverlay "[VO_DXR3] Using overlay.\n" +#define MSGTR_LIBVO_DXR3_ErrorYouNeedToCompileMplayerWithX11 "[VO_DXR3] Error: Overlay requires compiling with X11 libs/headers installed.\n" +#define MSGTR_LIBVO_DXR3_WillSetTVNormTo "[VO_DXR3] Will set TV norm to: " +#define MSGTR_LIBVO_DXR3_AutoAdjustToMovieFrameRatePALPAL60 "auto-adjust to movie framerate (PAL/PAL-60)" +#define MSGTR_LIBVO_DXR3_AutoAdjustToMovieFrameRatePALNTSC "auto-adjust to movie framerate (PAL/NTSC)" +#define MSGTR_LIBVO_DXR3_UseCurrentNorm "Use current norm." +#define MSGTR_LIBVO_DXR3_UseUnknownNormSuppliedCurrentNorm "Unknown norm supplied. Use current norm." +#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingTrying "[VO_DXR3] Error opening %s for writing, trying /dev/em8300 instead.\n" +#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingTryingMV "[VO_DXR3] Error opening %s for writing, trying /dev/em8300_mv instead.\n" +#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingAsWell "[VO_DXR3] Error opening /dev/em8300 for writing as well!\nBailing out.\n" +#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingAsWellMV "[VO_DXR3] Error opening /dev/em8300_mv for writing as well!\nBailing out.\n" +#define MSGTR_LIBVO_DXR3_Opened "[VO_DXR3] Opened: %s.\n" +#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingTryingSP "[VO_DXR3] Error opening %s for writing, trying /dev/em8300_sp instead.\n" +#define MSGTR_LIBVO_DXR3_ErrorOpeningForWritingAsWellSP "[VO_DXR3] Error opening /dev/em8300_sp for writing as well!\nBailing out.\n" +#define MSGTR_LIBVO_DXR3_UnableToOpenDisplayDuringHackSetup "[VO_DXR3] Unable to open display during overlay hack setup!\n" +#define MSGTR_LIBVO_DXR3_UnableToInitX11 "[VO_DXR3] Unable to init X11!\n" +#define MSGTR_LIBVO_DXR3_FailedSettingOverlayAttribute "[VO_DXR3] Failed setting overlay attribute.\n" +#define MSGTR_LIBVO_DXR3_FailedSettingOverlayScreen "[VO_DXR3] Failed setting overlay screen!\nExiting.\n" +#define MSGTR_LIBVO_DXR3_FailedEnablingOverlay "[VO_DXR3] Failed enabling overlay!\nExiting.\n" +#define MSGTR_LIBVO_DXR3_FailedResizingOverlayWindow "[VO_DXR3] Failed resizing overlay window!\n" +#define MSGTR_LIBVO_DXR3_FailedSettingOverlayBcs "[VO_DXR3] Failed setting overlay bcs!\n" +#define MSGTR_LIBVO_DXR3_FailedGettingOverlayYOffsetValues "[VO_DXR3] Failed getting overlay Y-offset values!\nExiting.\n" +#define MSGTR_LIBVO_DXR3_FailedGettingOverlayXOffsetValues "[VO_DXR3] Failed getting overlay X-offset values!\nExiting.\n" +#define MSGTR_LIBVO_DXR3_FailedGettingOverlayXScaleCorrection "[VO_DXR3] Failed getting overlay X scale correction!\nExiting.\n" +#define MSGTR_LIBVO_DXR3_YOffset "[VO_DXR3] Yoffset: %d.\n" +#define MSGTR_LIBVO_DXR3_XOffset "[VO_DXR3] Xoffset: %d.\n" +#define MSGTR_LIBVO_DXR3_XCorrection "[VO_DXR3] Xcorrection: %d.\n" +#define MSGTR_LIBVO_DXR3_FailedSetSignalMix "[VO_DXR3] Failed to set signal mix!\n" + +// vo_jpeg.c +#define MSGTR_VO_JPEG_ProgressiveJPEG "Progressive JPEG enabled." +#define MSGTR_VO_JPEG_NoProgressiveJPEG "Progressive JPEG disabled." +#define MSGTR_VO_JPEG_BaselineJPEG "Baseline JPEG enabled." +#define MSGTR_VO_JPEG_NoBaselineJPEG "Baseline JPEG disabled." + +// vo_mga.c +#define MSGTR_LIBVO_MGA_AspectResized "[VO_MGA] aspect(): resized to %dx%d.\n" +#define MSGTR_LIBVO_MGA_Uninit "[VO] uninit!\n" + +// mga_template.c +#define MSGTR_LIBVO_MGA_ErrorInConfigIoctl "[MGA] error in mga_vid_config ioctl (wrong mga_vid.o version?)" +#define MSGTR_LIBVO_MGA_CouldNotGetLumaValuesFromTheKernelModule "[MGA] Could not get luma values from the kernel module!\n" +#define MSGTR_LIBVO_MGA_CouldNotSetLumaValuesFromTheKernelModule "[MGA] Could not set luma values from the kernel module!\n" +#define MSGTR_LIBVO_MGA_ScreenWidthHeightUnknown "[MGA] Screen width/height unknown!\n" +#define MSGTR_LIBVO_MGA_InvalidOutputFormat "[MGA] invalid output format %0X\n" +#define MSGTR_LIBVO_MGA_IncompatibleDriverVersion "[MGA] Your mga_vid driver version is incompatible with this MPlayer version!\n" +#define MSGTR_LIBVO_MGA_CouldntOpen "[MGA] Couldn't open: %s\n" +#define MSGTR_LIBVO_MGA_ResolutionTooHigh "[MGA] Source resolution exceeds 1023x1023 in at least one dimension.\n[MGA] Rescale in software or use -lavdopts lowres=1.\n" +#define MSGTR_LIBVO_MGA_mgavidVersionMismatch "[MGA] mismatch between kernel (%u) and MPlayer (%u) mga_vid driver versions\n" + +// vo_null.c +#define MSGTR_LIBVO_NULL_UnknownSubdevice "[VO_NULL] Unknown subdevice: %s.\n" + +// vo_png.c +#define MSGTR_LIBVO_PNG_Warning1 "[VO_PNG] Warning: compression level set to 0, compression disabled!\n" +#define MSGTR_LIBVO_PNG_Warning2 "[VO_PNG] Info: Use -vo png:z=<n> to set compression level from 0 to 9.\n" +#define MSGTR_LIBVO_PNG_Warning3 "[VO_PNG] Info: (0 = no compression, 1 = fastest, lowest - 9 best, slowest compression)\n" +#define MSGTR_LIBVO_PNG_ErrorOpeningForWriting "\n[VO_PNG] Error opening '%s' for writing!\n" +#define MSGTR_LIBVO_PNG_ErrorInCreatePng "[VO_PNG] Error in create_png.\n" + +// vo_pnm.c +#define MSGTR_VO_PNM_ASCIIMode "ASCII mode enabled." +#define MSGTR_VO_PNM_RawMode "Raw mode enabled." +#define MSGTR_VO_PNM_PPMType "Will write PPM files." +#define MSGTR_VO_PNM_PGMType "Will write PGM files." +#define MSGTR_VO_PNM_PGMYUVType "Will write PGMYUV files." + +// vo_sdl.c +#define MSGTR_LIBVO_SDL_CouldntGetAnyAcceptableSDLModeForOutput "[VO_SDL] Couldn't get any acceptable SDL Mode for output.\n" +#define MSGTR_LIBVO_SDL_SetVideoModeFailed "[VO_SDL] set_video_mode: SDL_SetVideoMode failed: %s.\n" +#define MSGTR_LIBVO_SDL_MappingI420ToIYUV "[VO_SDL] Mapping I420 to IYUV.\n" +#define MSGTR_LIBVO_SDL_UnsupportedImageFormat "[VO_SDL] Unsupported image format (0x%X).\n" +#define MSGTR_LIBVO_SDL_InfoPleaseUseVmOrZoom "[VO_SDL] Info - please use -vm or -zoom to switch to the best resolution.\n" +#define MSGTR_LIBVO_SDL_FailedToSetVideoMode "[VO_SDL] Failed to set video mode: %s.\n" +#define MSGTR_LIBVO_SDL_CouldntCreateAYUVOverlay "[VO_SDL] Couldn't create a YUV overlay: %s.\n" +#define MSGTR_LIBVO_SDL_CouldntCreateARGBSurface "[VO_SDL] Couldn't create an RGB surface: %s.\n" +#define MSGTR_LIBVO_SDL_UsingDepthColorspaceConversion "[VO_SDL] Using depth/colorspace conversion, this will slow things down (%ibpp -> %ibpp).\n" +#define MSGTR_LIBVO_SDL_UnsupportedImageFormatInDrawslice "[VO_SDL] Unsupported image format in draw_slice, contact MPlayer developers!\n" +#define MSGTR_LIBVO_SDL_BlitFailed "[VO_SDL] Blit failed: %s.\n" +#define MSGTR_LIBVO_SDL_InitializationFailed "[VO_SDL] SDL initialization failed: %s.\n" +#define MSGTR_LIBVO_SDL_UsingDriver "[VO_SDL] Using driver: %s.\n" + +// vo_svga.c +#define MSGTR_LIBVO_SVGA_ForcedVidmodeNotAvailable "[VO_SVGA] Forced vid_mode %d (%s) not available.\n" +#define MSGTR_LIBVO_SVGA_ForcedVidmodeTooSmall "[VO_SVGA] Forced vid_mode %d (%s) too small.\n" +#define MSGTR_LIBVO_SVGA_Vidmode "[VO_SVGA] Vid_mode: %d, %dx%d %dbpp.\n" +#define MSGTR_LIBVO_SVGA_VgasetmodeFailed "[VO_SVGA] Vga_setmode(%d) failed.\n" +#define MSGTR_LIBVO_SVGA_VideoModeIsLinearAndMemcpyCouldBeUsed "[VO_SVGA] Video mode is linear and memcpy could be used for image transfer.\n" +#define MSGTR_LIBVO_SVGA_VideoModeHasHardwareAcceleration "[VO_SVGA] Video mode has hardware acceleration and put_image could be used.\n" +#define MSGTR_LIBVO_SVGA_IfItWorksForYouIWouldLikeToKnow "[VO_SVGA] If it works for you I would like to know.\n[VO_SVGA] (send log with `mplayer test.avi -v -v -v -v &> svga.log`). Thx!\n" +#define MSGTR_LIBVO_SVGA_VideoModeHas "[VO_SVGA] Video mode has %d page(s).\n" +#define MSGTR_LIBVO_SVGA_CenteringImageStartAt "[VO_SVGA] Centering image. Starting at (%d,%d)\n" +#define MSGTR_LIBVO_SVGA_UsingVidix "[VO_SVGA] Using VIDIX. w=%i h=%i mw=%i mh=%i\n" + +// vo_tdfx_vid.c +#define MSGTR_LIBVO_TDFXVID_Move "[VO_TDXVID] Move %d(%d) x %d => %d.\n" +#define MSGTR_LIBVO_TDFXVID_AGPMoveFailedToClearTheScreen "[VO_TDFXVID] AGP move failed to clear the screen.\n" +#define MSGTR_LIBVO_TDFXVID_BlitFailed "[VO_TDFXVID] Blit failed.\n" +#define MSGTR_LIBVO_TDFXVID_NonNativeOverlayFormatNeedConversion "[VO_TDFXVID] Non-native overlay format needs conversion.\n" +#define MSGTR_LIBVO_TDFXVID_UnsupportedInputFormat "[VO_TDFXVID] Unsupported input format 0x%x.\n" +#define MSGTR_LIBVO_TDFXVID_OverlaySetupFailed "[VO_TDFXVID] Overlay setup failed.\n" +#define MSGTR_LIBVO_TDFXVID_OverlayOnFailed "[VO_TDFXVID] Overlay on failed.\n" +#define MSGTR_LIBVO_TDFXVID_OverlayReady "[VO_TDFXVID] Overlay ready: %d(%d) x %d @ %d => %d(%d) x %d @ %d.\n" +#define MSGTR_LIBVO_TDFXVID_TextureBlitReady "[VO_TDFXVID] Texture blit ready: %d(%d) x %d @ %d => %d(%d) x %d @ %d.\n" +#define MSGTR_LIBVO_TDFXVID_OverlayOffFailed "[VO_TDFXVID] Overlay off failed\n" +#define MSGTR_LIBVO_TDFXVID_CantOpen "[VO_TDFXVID] Can't open %s: %s.\n" +#define MSGTR_LIBVO_TDFXVID_CantGetCurrentCfg "[VO_TDFXVID] Can't get current configuration: %s.\n" +#define MSGTR_LIBVO_TDFXVID_MemmapFailed "[VO_TDFXVID] Memmap failed !!!!!\n" +#define MSGTR_LIBVO_TDFXVID_GetImageTodo "Get image todo.\n" +#define MSGTR_LIBVO_TDFXVID_AgpMoveFailed "[VO_TDFXVID] AGP move failed.\n" +#define MSGTR_LIBVO_TDFXVID_SetYuvFailed "[VO_TDFXVID] Set YUV failed.\n" +#define MSGTR_LIBVO_TDFXVID_AgpMoveFailedOnYPlane "[VO_TDFXVID] AGP move failed on Y plane.\n" +#define MSGTR_LIBVO_TDFXVID_AgpMoveFailedOnUPlane "[VO_TDFXVID] AGP move failed on U plane.\n" +#define MSGTR_LIBVO_TDFXVID_AgpMoveFailedOnVPlane "[VO_TDFXVID] AGP move failed on V plane.\n" +#define MSGTR_LIBVO_TDFXVID_UnknownFormat "[VO_TDFXVID] unknown format: 0x%x.\n" + +// vo_tdfxfb.c +#define MSGTR_LIBVO_TDFXFB_CantOpen "[VO_TDFXFB] Can't open %s: %s.\n" +#define MSGTR_LIBVO_TDFXFB_ProblemWithFbitgetFscreenInfo "[VO_TDFXFB] Problem with FBITGET_FSCREENINFO ioctl: %s.\n" +#define MSGTR_LIBVO_TDFXFB_ProblemWithFbitgetVscreenInfo "[VO_TDFXFB] Problem with FBITGET_VSCREENINFO ioctl: %s.\n" +#define MSGTR_LIBVO_TDFXFB_ThisDriverOnlySupports "[VO_TDFXFB] This driver only supports the 3Dfx Banshee, Voodoo3 and Voodoo 5.\n" +#define MSGTR_LIBVO_TDFXFB_OutputIsNotSupported "[VO_TDFXFB] %d bpp output is not supported.\n" +#define MSGTR_LIBVO_TDFXFB_CouldntMapMemoryAreas "[VO_TDFXFB] Couldn't map memory areas: %s.\n" +#define MSGTR_LIBVO_TDFXFB_BppOutputIsNotSupported "[VO_TDFXFB] %d bpp output is not supported (This should never have happened).\n" +#define MSGTR_LIBVO_TDFXFB_SomethingIsWrongWithControl "[VO_TDFXFB] Eik! Something's wrong with control().\n" +#define MSGTR_LIBVO_TDFXFB_NotEnoughVideoMemoryToPlay "[VO_TDFXFB] Not enough video memory to play this movie. Try at a lower resolution.\n" +#define MSGTR_LIBVO_TDFXFB_ScreenIs "[VO_TDFXFB] Screen is %dx%d at %d bpp, in is %dx%d at %d bpp, norm is %dx%d.\n" + +// vo_tga.c +#define MSGTR_LIBVO_TGA_UnknownSubdevice "[VO_TGA] Unknown subdevice: %s.\n" + +// vo_vesa.c +#define MSGTR_LIBVO_VESA_FatalErrorOccurred "[VO_VESA] Fatal error occurred! Can't continue.\n" +#define MSGTR_LIBVO_VESA_UnknownSubdevice "[VO_VESA] unknown subdevice: '%s'.\n" +#define MSGTR_LIBVO_VESA_YouHaveTooLittleVideoMemory "[VO_VESA] You have too little video memory for this mode:\n[VO_VESA] Required: %08lX present: %08lX.\n" +#define MSGTR_LIBVO_VESA_YouHaveToSpecifyTheCapabilitiesOfTheMonitor "[VO_VESA] You have to specify the capabilities of the monitor. Not changing refresh rate.\n" +#define MSGTR_LIBVO_VESA_UnableToFitTheMode "[VO_VESA] The mode does not fit the monitor limits. Not changing refresh rate.\n" +#define MSGTR_LIBVO_VESA_DetectedInternalFatalError "[VO_VESA] Detected internal fatal error: init is called before preinit.\n" +#define MSGTR_LIBVO_VESA_SwitchFlipIsNotSupported "[VO_VESA] The -flip option is not supported.\n" +#define MSGTR_LIBVO_VESA_PossibleReasonNoVbe2BiosFound "[VO_VESA] Possible reason: No VBE2 BIOS found.\n" +#define MSGTR_LIBVO_VESA_FoundVesaVbeBiosVersion "[VO_VESA] Found VESA VBE BIOS Version %x.%x Revision: %x.\n" +#define MSGTR_LIBVO_VESA_VideoMemory "[VO_VESA] Video memory: %u Kb.\n" +#define MSGTR_LIBVO_VESA_Capabilites "[VO_VESA] VESA Capabilities: %s %s %s %s %s.\n" +#define MSGTR_LIBVO_VESA_BelowWillBePrintedOemInfo "[VO_VESA] !!! OEM info will be printed below !!!\n" +#define MSGTR_LIBVO_VESA_YouShouldSee5OemRelatedLines "[VO_VESA] You should see 5 OEM related lines below; If not, you've broken vm86.\n" +#define MSGTR_LIBVO_VESA_OemInfo "[VO_VESA] OEM info: %s.\n" +#define MSGTR_LIBVO_VESA_OemRevision "[VO_VESA] OEM Revision: %x.\n" +#define MSGTR_LIBVO_VESA_OemVendor "[VO_VESA] OEM vendor: %s.\n" +#define MSGTR_LIBVO_VESA_OemProductName "[VO_VESA] OEM Product Name: %s.\n" +#define MSGTR_LIBVO_VESA_OemProductRev "[VO_VESA] OEM Product Rev: %s.\n" +#define MSGTR_LIBVO_VESA_Hint "[VO_VESA] Hint: For working TV-Out you should have plugged in the TV connector\n"\ +"[VO_VESA] before booting since VESA BIOS initializes itself only during POST.\n" +#define MSGTR_LIBVO_VESA_UsingVesaMode "[VO_VESA] Using VESA mode (%u) = %x [%ux%u@%u]\n" +#define MSGTR_LIBVO_VESA_CantInitializeSwscaler "[VO_VESA] Can't initialize software scaler.\n" +#define MSGTR_LIBVO_VESA_CantUseDga "[VO_VESA] Can't use DGA. Force bank switching mode. :(\n" +#define MSGTR_LIBVO_VESA_UsingDga "[VO_VESA] Using DGA (physical resources: %08lXh, %08lXh)" +#define MSGTR_LIBVO_VESA_CantUseDoubleBuffering "[VO_VESA] Can't use double buffering: not enough video memory.\n" +#define MSGTR_LIBVO_VESA_CantFindNeitherDga "[VO_VESA] Can find neither DGA nor relocatable window frame.\n" +#define MSGTR_LIBVO_VESA_YouveForcedDga "[VO_VESA] You've forced DGA. Exiting\n" +#define MSGTR_LIBVO_VESA_CantFindValidWindowAddress "[VO_VESA] Can't find valid window address.\n" +#define MSGTR_LIBVO_VESA_UsingBankSwitchingMode "[VO_VESA] Using bank switching mode (physical resources: %08lXh, %08lXh).\n" +#define MSGTR_LIBVO_VESA_CantAllocateTemporaryBuffer "[VO_VESA] Can't allocate temporary buffer.\n" +#define MSGTR_LIBVO_VESA_SorryUnsupportedMode "[VO_VESA] Sorry, unsupported mode -- try -x 640 -zoom.\n" +#define MSGTR_LIBVO_VESA_OhYouReallyHavePictureOnTv "[VO_VESA] Oh you really have a picture on the TV!\n" +#define MSGTR_LIBVO_VESA_CantInitialozeLinuxVideoOverlay "[VO_VESA] Can't initialize Linux Video Overlay.\n" +#define MSGTR_LIBVO_VESA_UsingVideoOverlay "[VO_VESA] Using video overlay: %s.\n" +#define MSGTR_LIBVO_VESA_CantInitializeVidixDriver "[VO_VESA] Can't initialize VIDIX driver.\n" +#define MSGTR_LIBVO_VESA_UsingVidix "[VO_VESA] Using VIDIX.\n" +#define MSGTR_LIBVO_VESA_CantFindModeFor "[VO_VESA] Can't find mode for: %ux%u@%u.\n" +#define MSGTR_LIBVO_VESA_InitializationComplete "[VO_VESA] VESA initialization complete.\n" + +// vesa_lvo.c +#define MSGTR_LIBVO_VESA_ThisBranchIsNoLongerSupported "[VESA_LVO] This branch is no longer supported.\n[VESA_LVO] Please use -vo vesa:vidix instead.\n" +#define MSGTR_LIBVO_VESA_CouldntOpen "[VESA_LVO] Couldn't open: '%s'\n" +#define MSGTR_LIBVO_VESA_InvalidOutputFormat "[VESA_LVI] Invalid output format: %s(%0X)\n" +#define MSGTR_LIBVO_VESA_IncompatibleDriverVersion "[VESA_LVO] Your fb_vid driver version is incompatible with this MPlayer version!\n" + +// vo_x11.c +#define MSGTR_LIBVO_X11_DrawFrameCalled "[VO_X11] draw_frame() called!!!!!!\n" + +// vo_xv.c +#define MSGTR_LIBVO_XV_DrawFrameCalled "[VO_XV] draw_frame() called!!!!!!\n" +#define MSGTR_LIBVO_XV_SharedMemoryNotSupported "[VO_XV] Shared memory not supported\nReverting to normal Xv.\n" +#define MSGTR_LIBVO_XV_XvNotSupportedByX11 "[VO_XV] Sorry, Xv not supported by this X11 version/driver\n[VO_XV] ******** Try with -vo x11 or -vo sdl *********\n" +#define MSGTR_LIBVO_XV_XvQueryAdaptorsFailed "[VO_XV] XvQueryAdaptors failed.\n" +#define MSGTR_LIBVO_XV_InvalidPortParameter "[VO_XV] Invalid port parameter, overriding with port 0.\n" +#define MSGTR_LIBVO_XV_CouldNotGrabPort "[VO_XV] Could not grab port %i.\n" +#define MSGTR_LIBVO_XV_CouldNotFindFreePort "[VO_XV] Could not find free Xvideo port - maybe another process is already\n"\ +"[VO_XV] using it. Close all video applications, and try again. If that does\n"\ +"[VO_XV] not help, see 'mplayer -vo help' for other (non-xv) video out drivers.\n" +#define MSGTR_LIBVO_XV_NoXvideoSupport "[VO_XV] It seems there is no Xvideo support for your video card available.\n"\ +"[VO_XV] Run 'xvinfo' to verify its Xv support and read\n"\ +"[VO_XV] DOCS/HTML/en/video.html#xv!\n"\ +"[VO_XV] See 'mplayer -vo help' for other (non-xv) video out drivers.\n"\ +"[VO_XV] Try -vo x11.\n" +#define MSGTR_VO_XV_ImagedimTooHigh "Source image dimensions are too high: %ux%u (maximum is %ux%u)\n" + +// vo_yuv4mpeg.c +#define MSGTR_VO_YUV4MPEG_InterlacedHeightDivisibleBy4 "Interlaced mode requires image height to be divisible by 4." +#define MSGTR_VO_YUV4MPEG_InterlacedLineBufAllocFail "Unable to allocate line buffer for interlaced mode." +#define MSGTR_VO_YUV4MPEG_WidthDivisibleBy2 "Image width must be divisible by 2." +#define MSGTR_VO_YUV4MPEG_OutFileOpenError "Can't get memory or file handle to write \"%s\"!" +#define MSGTR_VO_YUV4MPEG_OutFileWriteError "Error writing image to output!" +#define MSGTR_VO_YUV4MPEG_UnknownSubDev "Unknown subdevice: %s" +#define MSGTR_VO_YUV4MPEG_InterlacedTFFMode "Using interlaced output mode, top-field first." +#define MSGTR_VO_YUV4MPEG_InterlacedBFFMode "Using interlaced output mode, bottom-field first." +#define MSGTR_VO_YUV4MPEG_ProgressiveMode "Using (default) progressive frame mode." + +// vosub_vidix.c +#define MSGTR_LIBVO_SUB_VIDIX_CantStartPlayback "[VO_SUB_VIDIX] Can't start playback: %s\n" +#define MSGTR_LIBVO_SUB_VIDIX_CantStopPlayback "[VO_SUB_VIDIX] Can't stop playback: %s\n" +#define MSGTR_LIBVO_SUB_VIDIX_InterleavedUvForYuv410pNotSupported "[VO_SUB_VIDIX] Interleaved UV for YUV410P not supported.\n" +#define MSGTR_LIBVO_SUB_VIDIX_DummyVidixdrawsliceWasCalled "[VO_SUB_VIDIX] Dummy vidix_draw_slice() was called.\n" +#define MSGTR_LIBVO_SUB_VIDIX_DummyVidixdrawframeWasCalled "[VO_SUB_VIDIX] Dummy vidix_draw_frame() was called.\n" +#define MSGTR_LIBVO_SUB_VIDIX_UnsupportedFourccForThisVidixDriver "[VO_SUB_VIDIX] Unsupported FourCC for this VIDIX driver: %x (%s).\n" +#define MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedResolution "[VO_SUB_VIDIX] Video server has unsupported resolution (%dx%d), supported: %dx%d-%dx%d.\n" +#define MSGTR_LIBVO_SUB_VIDIX_VideoServerHasUnsupportedColorDepth "[VO_SUB_VIDIX] Video server has unsupported color depth by vidix (%d).\n" +#define MSGTR_LIBVO_SUB_VIDIX_DriverCantUpscaleImage "[VO_SUB_VIDIX] VIDIX driver can't upscale image (%d%d -> %d%d).\n" +#define MSGTR_LIBVO_SUB_VIDIX_DriverCantDownscaleImage "[VO_SUB_VIDIX] VIDIX driver can't downscale image (%d%d -> %d%d).\n" +#define MSGTR_LIBVO_SUB_VIDIX_CantConfigurePlayback "[VO_SUB_VIDIX] Can't configure playback: %s.\n" +#define MSGTR_LIBVO_SUB_VIDIX_YouHaveWrongVersionOfVidixLibrary "[VO_SUB_VIDIX] You have the wrong version of the VIDIX library.\n" +#define MSGTR_LIBVO_SUB_VIDIX_CouldntFindWorkingVidixDriver "[VO_SUB_VIDIX] Couldn't find working VIDIX driver.\n" +#define MSGTR_LIBVO_SUB_VIDIX_CouldntGetCapability "[VO_SUB_VIDIX] Couldn't get capability: %s.\n" + +// x11_common.c +#define MSGTR_EwmhFullscreenStateFailed "\nX11: Couldn't send EWMH fullscreen event!\n" +#define MSGTR_CouldNotFindXScreenSaver "xscreensaver_disable: Could not find XScreenSaver window.\n" +#define MSGTR_SelectedVideoMode "XF86VM: Selected video mode %dx%d for image size %dx%d.\n" + +#define MSGTR_InsertingAfVolume "[Mixer] No hardware mixing, inserting volume filter.\n" +#define MSGTR_NoVolume "[Mixer] No volume control available.\n" +#define MSGTR_NoBalance "[Mixer] No balance control available.\n" + +// old vo drivers that have been replaced +#define MSGTR_VO_PGM_HasBeenReplaced "The pgm video output driver has been replaced by -vo pnm:pgmyuv.\n" +#define MSGTR_VO_MD5_HasBeenReplaced "The md5 video output driver has been replaced by -vo md5sum.\n" +#define MSGTR_VO_GL2_HasBeenRenamed "The gl2 video output driver has been renamed to -vo gl_tiled, but you really should be using -vo gl instead.\n" + + +// ======================= audio output drivers ======================== + +// audio_out.c +#define MSGTR_AO_ALSA9_1x_Removed "audio_out: alsa9 and alsa1x modules were removed, use -ao alsa instead.\n" +#define MSGTR_AO_NoSuchDriver "No such audio driver '%.*s'\n" +#define MSGTR_AO_FailedInit "Failed to initialize audio driver '%s'\n" + +// ao_oss.c +#define MSGTR_AO_OSS_CantOpenMixer "[AO OSS] audio_setup: Can't open mixer device %s: %s\n" +#define MSGTR_AO_OSS_ChanNotFound "[AO OSS] audio_setup: Audio card mixer does not have channel '%s', using default.\n" +#define MSGTR_AO_OSS_CantOpenDev "[AO OSS] audio_setup: Can't open audio device %s: %s\n" +#define MSGTR_AO_OSS_CantMakeFd "[AO OSS] audio_setup: Can't make file descriptor blocking: %s\n" +#define MSGTR_AO_OSS_CantSet "[AO OSS] Can't set audio device %s to %s output, trying %s...\n" +#define MSGTR_AO_OSS_CantSetChans "[AO OSS] audio_setup: Failed to set audio device to %d channels.\n" +#define MSGTR_AO_OSS_CantUseGetospace "[AO OSS] audio_setup: driver doesn't support SNDCTL_DSP_GETOSPACE :-(\n" +#define MSGTR_AO_OSS_CantUseSelect "[AO OSS]\n *** Your audio driver DOES NOT support select() ***\n Recompile MPlayer with #undef HAVE_AUDIO_SELECT in config.h !\n\n" +#define MSGTR_AO_OSS_CantReopen "[AO OSS]\nFatal error: *** CANNOT RE-OPEN / RESET AUDIO DEVICE *** %s\n" +#define MSGTR_AO_OSS_UnknownUnsupportedFormat "[AO OSS] Unknown/Unsupported OSS format: %x.\n" + +// ao_arts.c +#define MSGTR_AO_ARTS_CantInit "[AO ARTS] %s\n" +#define MSGTR_AO_ARTS_ServerConnect "[AO ARTS] Connected to sound server.\n" +#define MSGTR_AO_ARTS_CantOpenStream "[AO ARTS] Unable to open a stream.\n" +#define MSGTR_AO_ARTS_StreamOpen "[AO ARTS] Stream opened.\n" +#define MSGTR_AO_ARTS_BufferSize "[AO ARTS] buffer size: %d\n" + +// ao_dxr2.c +#define MSGTR_AO_DXR2_SetVolFailed "[AO DXR2] Setting volume to %d failed.\n" +#define MSGTR_AO_DXR2_UnsupSamplerate "[AO DXR2] %d Hz not supported, try to resample.\n" + +// ao_esd.c +#define MSGTR_AO_ESD_CantOpenSound "[AO ESD] esd_open_sound failed: %s\n" +#define MSGTR_AO_ESD_LatencyInfo "[AO ESD] latency: [server: %0.2fs, net: %0.2fs] (adjust %0.2fs)\n" +#define MSGTR_AO_ESD_CantOpenPBStream "[AO ESD] failed to open ESD playback stream: %s\n" + +// ao_mpegpes.c +#define MSGTR_AO_MPEGPES_CantSetMixer "[AO MPEGPES] DVB audio set mixer failed: %s.\n" +#define MSGTR_AO_MPEGPES_UnsupSamplerate "[AO MPEGPES] %d Hz not supported, try to resample.\n" + +// ao_pcm.c +#define MSGTR_AO_PCM_FileInfo "[AO PCM] File: %s (%s)\nPCM: Samplerate: %iHz Channels: %s Format %s\n" +#define MSGTR_AO_PCM_HintInfo "[AO PCM] Info: Faster dumping is achieved with -benchmark -vc null -vo null -ao pcm:fast\n[AO PCM] Info: To write WAVE files use -ao pcm:waveheader (default).\n" +#define MSGTR_AO_PCM_CantOpenOutputFile "[AO PCM] Failed to open %s for writing!\n" + +// ao_sdl.c +#define MSGTR_AO_SDL_INFO "[AO SDL] Samplerate: %iHz Channels: %s Format %s\n" +#define MSGTR_AO_SDL_DriverInfo "[AO SDL] using %s audio driver.\n" +#define MSGTR_AO_SDL_UnsupportedAudioFmt "[AO SDL] Unsupported audio format: 0x%x.\n" +#define MSGTR_AO_SDL_CantInit "[AO SDL] SDL Audio initialization failed: %s\n" +#define MSGTR_AO_SDL_CantOpenAudio "[AO SDL] Unable to open audio: %s\n" + +// ao_sgi.c +#define MSGTR_AO_SGI_INFO "[AO SGI] control.\n" +#define MSGTR_AO_SGI_InitInfo "[AO SGI] init: Samplerate: %iHz Channels: %s Format %s\n" +#define MSGTR_AO_SGI_InvalidDevice "[AO SGI] play: invalid device.\n" +#define MSGTR_AO_SGI_CantSetParms_Samplerate "[AO SGI] init: setparams failed: %s\nCould not set desired samplerate.\n" +#define MSGTR_AO_SGI_CantSetAlRate "[AO SGI] init: AL_RATE was not accepted on the given resource.\n" +#define MSGTR_AO_SGI_CantGetParms "[AO SGI] init: getparams failed: %s\n" +#define MSGTR_AO_SGI_SampleRateInfo "[AO SGI] init: samplerate is now %f (desired rate is %f)\n" +#define MSGTR_AO_SGI_InitConfigError "[AO SGI] init: %s\n" +#define MSGTR_AO_SGI_InitOpenAudioFailed "[AO SGI] init: Unable to open audio channel: %s\n" +#define MSGTR_AO_SGI_Uninit "[AO SGI] uninit: ...\n" +#define MSGTR_AO_SGI_Reset "[AO SGI] reset: ...\n" +#define MSGTR_AO_SGI_PauseInfo "[AO SGI] audio_pause: ...\n" +#define MSGTR_AO_SGI_ResumeInfo "[AO SGI] audio_resume: ...\n" + +// ao_sun.c +#define MSGTR_AO_SUN_RtscSetinfoFailed "[AO SUN] rtsc: SETINFO failed.\n" +#define MSGTR_AO_SUN_RtscWriteFailed "[AO SUN] rtsc: write failed.\n" +#define MSGTR_AO_SUN_CantOpenAudioDev "[AO SUN] Can't open audio device %s, %s -> nosound.\n" +#define MSGTR_AO_SUN_UnsupSampleRate "[AO SUN] audio_setup: your card doesn't support %d channel, %s, %d Hz samplerate.\n" +#define MSGTR_AO_SUN_CantUseSelect "[AO SUN]\n *** Your audio driver DOES NOT support select() ***\nRecompile MPlayer with #undef HAVE_AUDIO_SELECT in config.h !\n\n" +#define MSGTR_AO_SUN_CantReopenReset "[AO SUN]\nFatal error: *** CANNOT REOPEN / RESET AUDIO DEVICE (%s) ***\n" + +// ao_alsa.c +#define MSGTR_AO_ALSA_InvalidMixerIndexDefaultingToZero "[AO_ALSA] Invalid mixer index. Defaulting to 0.\n" +#define MSGTR_AO_ALSA_MixerOpenError "[AO_ALSA] Mixer open error: %s\n" +#define MSGTR_AO_ALSA_MixerAttachError "[AO_ALSA] Mixer attach %s error: %s\n" +#define MSGTR_AO_ALSA_MixerRegisterError "[AO_ALSA] Mixer register error: %s\n" +#define MSGTR_AO_ALSA_MixerLoadError "[AO_ALSA] Mixer load error: %s\n" +#define MSGTR_AO_ALSA_UnableToFindSimpleControl "[AO_ALSA] Unable to find simple control '%s',%i.\n" +#define MSGTR_AO_ALSA_ErrorSettingLeftChannel "[AO_ALSA] Error setting left channel, %s\n" +#define MSGTR_AO_ALSA_ErrorSettingRightChannel "[AO_ALSA] Error setting right channel, %s\n" +#define MSGTR_AO_ALSA_CommandlineHelp "\n[AO_ALSA] -ao alsa commandline help:\n"\ +"[AO_ALSA] Example: mplayer -ao alsa:device=hw=0.3\n"\ +"[AO_ALSA] Sets first card fourth hardware device.\n\n"\ +"[AO_ALSA] Options:\n"\ +"[AO_ALSA] noblock\n"\ +"[AO_ALSA] Opens device in non-blocking mode.\n"\ +"[AO_ALSA] device=<device-name>\n"\ +"[AO_ALSA] Sets device (change , to . and : to =)\n" +#define MSGTR_AO_ALSA_ChannelsNotSupported "[AO_ALSA] %d channels are not supported.\n" +#define MSGTR_AO_ALSA_OpenInNonblockModeFailed "[AO_ALSA] Open in nonblock-mode failed, trying to open in block-mode.\n" +#define MSGTR_AO_ALSA_PlaybackOpenError "[AO_ALSA] Playback open error: %s\n" +#define MSGTR_AO_ALSA_ErrorSetBlockMode "[AL_ALSA] Error setting block-mode %s.\n" +#define MSGTR_AO_ALSA_UnableToGetInitialParameters "[AO_ALSA] Unable to get initial parameters: %s\n" +#define MSGTR_AO_ALSA_UnableToSetAccessType "[AO_ALSA] Unable to set access type: %s\n" +#define MSGTR_AO_ALSA_FormatNotSupportedByHardware "[AO_ALSA] Format %s is not supported by hardware, trying default.\n" +#define MSGTR_AO_ALSA_UnableToSetFormat "[AO_ALSA] Unable to set format: %s\n" +#define MSGTR_AO_ALSA_UnableToSetChannels "[AO_ALSA] Unable to set channels: %s\n" +#define MSGTR_AO_ALSA_UnableToDisableResampling "[AO_ALSA] Unable to disable resampling: %s\n" +#define MSGTR_AO_ALSA_UnableToSetSamplerate2 "[AO_ALSA] Unable to set samplerate-2: %s\n" +#define MSGTR_AO_ALSA_UnableToSetBufferTimeNear "[AO_ALSA] Unable to set buffer time near: %s\n" +#define MSGTR_AO_ALSA_UnableToGetPeriodSize "[AO ALSA] Unable to get period size: %s\n" +#define MSGTR_AO_ALSA_UnableToSetPeriods "[AO_ALSA] Unable to set periods: %s\n" +#define MSGTR_AO_ALSA_UnableToSetHwParameters "[AO_ALSA] Unable to set hw-parameters: %s\n" +#define MSGTR_AO_ALSA_UnableToGetBufferSize "[AO_ALSA] Unable to get buffersize: %s\n" +#define MSGTR_AO_ALSA_UnableToGetSwParameters "[AO_ALSA] Unable to get sw-parameters: %s\n" +#define MSGTR_AO_ALSA_UnableToSetSwParameters "[AO_ALSA] Unable to set sw-parameters: %s\n" +#define MSGTR_AO_ALSA_UnableToGetBoundary "[AO_ALSA] Unable to get boundary: %s\n" +#define MSGTR_AO_ALSA_UnableToSetStartThreshold "[AO_ALSA] Unable to set start threshold: %s\n" +#define MSGTR_AO_ALSA_UnableToSetStopThreshold "[AO_ALSA] Unable to set stop threshold: %s\n" +#define MSGTR_AO_ALSA_UnableToSetSilenceSize "[AO_ALSA] Unable to set silence size: %s\n" +#define MSGTR_AO_ALSA_PcmCloseError "[AO_ALSA] pcm close error: %s\n" +#define MSGTR_AO_ALSA_NoHandlerDefined "[AO_ALSA] No handler defined!\n" +#define MSGTR_AO_ALSA_PcmPrepareError "[AO_ALSA] pcm prepare error: %s\n" +#define MSGTR_AO_ALSA_PcmPauseError "[AO_ALSA] pcm pause error: %s\n" +#define MSGTR_AO_ALSA_PcmDropError "[AO_ALSA] pcm drop error: %s\n" +#define MSGTR_AO_ALSA_PcmResumeError "[AO_ALSA] pcm resume error: %s\n" +#define MSGTR_AO_ALSA_DeviceConfigurationError "[AO_ALSA] Device configuration error." +#define MSGTR_AO_ALSA_PcmInSuspendModeTryingResume "[AO_ALSA] Pcm in suspend mode, trying to resume.\n" +#define MSGTR_AO_ALSA_WriteError "[AO_ALSA] Write error: %s\n" +#define MSGTR_AO_ALSA_TryingToResetSoundcard "[AO_ALSA] Trying to reset soundcard.\n" +#define MSGTR_AO_ALSA_CannotGetPcmStatus "[AO_ALSA] Cannot get pcm status: %s\n" + +// ao_plugin.c +#define MSGTR_AO_PLUGIN_InvalidPlugin "[AO PLUGIN] invalid plugin: %s\n" + + +// ======================= audio filters ================================ + +// af_scaletempo.c +#define MSGTR_AF_ValueOutOfRange MSGTR_VO_ValueOutOfRange + +// af_ladspa.c +#define MSGTR_AF_LADSPA_AvailableLabels "available labels in" +#define MSGTR_AF_LADSPA_WarnNoInputs "WARNING! This LADSPA plugin has no audio inputs.\n The incoming audio signal will be lost." +#define MSGTR_AF_LADSPA_ErrMultiChannel "Multi-channel (>2) plugins are not supported (yet).\n Use only mono and stereo plugins." +#define MSGTR_AF_LADSPA_ErrNoOutputs "This LADSPA plugin has no audio outputs." +#define MSGTR_AF_LADSPA_ErrInOutDiff "The number of audio inputs and audio outputs of the LADSPA plugin differ." +#define MSGTR_AF_LADSPA_ErrFailedToLoad "failed to load" +#define MSGTR_AF_LADSPA_ErrNoDescriptor "Couldn't find ladspa_descriptor() function in the specified library file." +#define MSGTR_AF_LADSPA_ErrLabelNotFound "Couldn't find label in plugin library." +#define MSGTR_AF_LADSPA_ErrNoSuboptions "No suboptions specified." +#define MSGTR_AF_LADSPA_ErrNoLibFile "No library file specified." +#define MSGTR_AF_LADSPA_ErrNoLabel "No filter label specified." +#define MSGTR_AF_LADSPA_ErrNotEnoughControls "Not enough controls specified on the command line." +#define MSGTR_AF_LADSPA_ErrControlBelow "%s: Input control #%d is below lower boundary of %0.4f.\n" +#define MSGTR_AF_LADSPA_ErrControlAbove "%s: Input control #%d is above upper boundary of %0.4f.\n" + +// format.c +#define MSGTR_AF_FORMAT_UnknownFormat "unknown format " + + +// ========================== INPUT ========================================= + +// joystick.c +#define MSGTR_INPUT_JOYSTICK_CantOpen "Can't open joystick device %s: %s\n" +#define MSGTR_INPUT_JOYSTICK_ErrReading "Error while reading joystick device: %s\n" +#define MSGTR_INPUT_JOYSTICK_LoosingBytes "Joystick: We lose %d bytes of data\n" +#define MSGTR_INPUT_JOYSTICK_WarnLostSync "Joystick: warning init event, we have lost sync with driver.\n" +#define MSGTR_INPUT_JOYSTICK_WarnUnknownEvent "Joystick warning unknown event type %d\n" + +// appleir.c +#define MSGTR_INPUT_APPLE_IR_CantOpen "Can't open Apple IR device: %s\n" + +// input.c +#define MSGTR_INPUT_INPUT_ErrCantRegister2ManyCmdFds "Too many command file descriptors, cannot register file descriptor %d.\n" +#define MSGTR_INPUT_INPUT_ErrCantRegister2ManyKeyFds "Too many key file descriptors, cannot register file descriptor %d.\n" +#define MSGTR_INPUT_INPUT_ErrArgMustBeInt "Command %s: argument %d isn't an integer.\n" +#define MSGTR_INPUT_INPUT_ErrArgMustBeFloat "Command %s: argument %d isn't a float.\n" +#define MSGTR_INPUT_INPUT_ErrUnterminatedArg "Command %s: argument %d is unterminated.\n" +#define MSGTR_INPUT_INPUT_ErrUnknownArg "Unknown argument %d\n" +#define MSGTR_INPUT_INPUT_Err2FewArgs "Command %s requires at least %d arguments, we found only %d so far.\n" +#define MSGTR_INPUT_INPUT_ErrReadingCmdFd "Error while reading command file descriptor %d: %s\n" +#define MSGTR_INPUT_INPUT_ErrCmdBufferFullDroppingContent "Command buffer of file descriptor %d is full: dropping content.\n" +#define MSGTR_INPUT_INPUT_ErrInvalidCommandForKey "Invalid command for bound key %s" +#define MSGTR_INPUT_INPUT_ErrSelect "Select error: %s\n" +#define MSGTR_INPUT_INPUT_ErrOnKeyInFd "Error on key input file descriptor %d\n" +#define MSGTR_INPUT_INPUT_ErrDeadKeyOnFd "Dead key input on file descriptor %d\n" +#define MSGTR_INPUT_INPUT_Err2ManyKeyDowns "Too many key down events at the same time\n" +#define MSGTR_INPUT_INPUT_ErrOnCmdFd "Error on command file descriptor %d\n" +#define MSGTR_INPUT_INPUT_ErrReadingInputConfig "Error while reading input config file %s: %s\n" +#define MSGTR_INPUT_INPUT_ErrUnknownKey "Unknown key '%s'\n" +#define MSGTR_INPUT_INPUT_ErrUnfinishedBinding "Unfinished binding %s\n" +#define MSGTR_INPUT_INPUT_ErrBuffer2SmallForKeyName "Buffer is too small for this key name: %s\n" +#define MSGTR_INPUT_INPUT_ErrNoCmdForKey "No command found for key %s" +#define MSGTR_INPUT_INPUT_ErrBuffer2SmallForCmd "Buffer is too small for command %s\n" +#define MSGTR_INPUT_INPUT_ErrWhyHere "What are we doing here?\n" +#define MSGTR_INPUT_INPUT_ErrCantInitJoystick "Can't init input joystick\n" +#define MSGTR_INPUT_INPUT_ErrCantOpenFile "Can't open %s: %s\n" +#define MSGTR_INPUT_INPUT_ErrCantInitAppleRemote "Can't init Apple Remote.\n" + +// lirc.c +#define MSGTR_LIRCopenfailed "Failed to open LIRC support. You will not be able to use your remote control.\n" +#define MSGTR_LIRCcfgerr "Failed to read LIRC config file %s.\n" + + +// ========================== LIBMPDEMUX =================================== + +// muxer.c, muxer_*.c +#define MSGTR_TooManyStreams "Too many streams!" +#define MSGTR_RawMuxerOnlyOneStream "Rawaudio muxer supports only one audio stream!\n" +#define MSGTR_IgnoringVideoStream "Ignoring video stream!\n" +#define MSGTR_UnknownStreamType "Warning, unknown stream type: %d\n" +#define MSGTR_WarningLenIsntDivisible "Warning, len isn't divisible by samplesize!\n" +#define MSGTR_MuxbufMallocErr "Muxer frame buffer cannot allocate memory!\n" +#define MSGTR_MuxbufReallocErr "Muxer frame buffer cannot reallocate memory!\n" +#define MSGTR_WritingHeader "Writing header...\n" +#define MSGTR_WritingTrailer "Writing index...\n" + +// demuxer.c, demux_*.c +#define MSGTR_AudioStreamRedefined "WARNING: Audio stream header %d redefined.\n" +#define MSGTR_VideoStreamRedefined "WARNING: Video stream header %d redefined.\n" +#define MSGTR_TooManyAudioInBuffer "\nToo many audio packets in the buffer: (%d in %d bytes).\n" +#define MSGTR_TooManyVideoInBuffer "\nToo many video packets in the buffer: (%d in %d bytes).\n" +#define MSGTR_MaybeNI "Maybe you are playing a non-interleaved stream/file or the codec failed?\n" \ + "For AVI files, try to force non-interleaved mode with the -ni option.\n" +#define MSGTR_WorkAroundBlockAlignHeaderBug "AVI: Working around CBR-MP3 nBlockAlign header bug!\n" +#define MSGTR_SwitchToNi "\nBadly interleaved AVI file detected - switching to -ni mode...\n" +#define MSGTR_InvalidAudioStreamNosound "AVI: invalid audio stream ID: %d - ignoring (nosound)\n" +#define MSGTR_InvalidAudioStreamUsingDefault "AVI: invalid video stream ID: %d - ignoring (using default)\n" +#define MSGTR_ON2AviFormat "ON2 AVI format" +#define MSGTR_Detected_XXX_FileFormat "%s file format detected.\n" +#define MSGTR_DetectedAudiofile "Audio file detected.\n" +#define MSGTR_InvalidMPEGES "Invalid MPEG-ES stream??? Contact the author, it may be a bug :(\n" +#define MSGTR_FormatNotRecognized "============ Sorry, this file format is not recognized/supported =============\n"\ + "=== If this file is an AVI, ASF or MPEG stream, please contact the author! ===\n" +#define MSGTR_SettingProcessPriority "Setting process priority: %s\n" +#define MSGTR_FilefmtFourccSizeFpsFtime "[V] filefmt:%d fourcc:0x%X size:%dx%d fps:%5.3f ftime:=%6.4f\n" +#define MSGTR_CannotInitializeMuxer "Cannot initialize muxer." +#define MSGTR_MissingVideoStream "No video stream found.\n" +#define MSGTR_MissingAudioStream "No audio stream found -> no sound.\n" +#define MSGTR_MissingVideoStreamBug "Missing video stream!? Contact the author, it may be a bug :(\n" + +#define MSGTR_DoesntContainSelectedStream "demux: File doesn't contain the selected audio or video stream.\n" + +#define MSGTR_NI_Forced "Forced" +#define MSGTR_NI_Detected "Detected" +#define MSGTR_NI_Message "%s NON-INTERLEAVED AVI file format.\n" + +#define MSGTR_UsingNINI "Using NON-INTERLEAVED broken AVI file format.\n" +#define MSGTR_CouldntDetFNo "Could not determine number of frames (for absolute seek).\n" +#define MSGTR_CantSeekRawAVI "Cannot seek in raw AVI streams. (Index required, try with the -idx switch.)\n" +#define MSGTR_CantSeekFile "Cannot seek in this file.\n" + +#define MSGTR_MOVcomprhdr "MOV: Compressed headers support requires ZLIB!\n" +#define MSGTR_MOVvariableFourCC "MOV: WARNING: Variable FourCC detected!?\n" +#define MSGTR_MOVtooManyTrk "MOV: WARNING: too many tracks" +#define MSGTR_DetectedTV "TV detected! ;-)\n" +#define MSGTR_ErrorOpeningOGGDemuxer "Unable to open the Ogg demuxer.\n" +#define MSGTR_CannotOpenAudioStream "Cannot open audio stream: %s\n" +#define MSGTR_CannotOpenSubtitlesStream "Cannot open subtitle stream: %s\n" +#define MSGTR_OpeningAudioDemuxerFailed "Failed to open audio demuxer: %s\n" +#define MSGTR_OpeningSubtitlesDemuxerFailed "Failed to open subtitle demuxer: %s\n" +#define MSGTR_TVInputNotSeekable "TV input is not seekable! (Seeking will probably be for changing channels ;)\n" +#define MSGTR_DemuxerInfoChanged "Demuxer info %s changed to %s\n" +#define MSGTR_ClipInfo "Clip info:\n" + +#define MSGTR_LeaveTelecineMode "\ndemux_mpg: 30000/1001fps NTSC content detected, switching framerate.\n" +#define MSGTR_EnterTelecineMode "\ndemux_mpg: 24000/1001fps progressive NTSC content detected, switching framerate.\n" + +#define MSGTR_CacheFill "\rCache fill: %5.2f%% (%"PRId64" bytes) " +#define MSGTR_NoBindFound "No bind found for key '%s'.\n" +#define MSGTR_FailedToOpen "Failed to open %s.\n" + +#define MSGTR_VideoID "[%s] Video stream found, -vid %d\n" +#define MSGTR_AudioID "[%s] Audio stream found, -aid %d\n" +#define MSGTR_SubtitleID "[%s] Subtitle stream found, -sid %d\n" + +// asfheader.c +#define MSGTR_MPDEMUX_ASFHDR_HeaderSizeOver1MB "FATAL: header size bigger than 1 MB (%d)!\nPlease contact MPlayer authors, and upload/send this file.\n" +#define MSGTR_MPDEMUX_ASFHDR_HeaderMallocFailed "Could not allocate %d bytes for header.\n" +#define MSGTR_MPDEMUX_ASFHDR_EOFWhileReadingHeader "EOF while reading ASF header, broken/incomplete file?\n" +#define MSGTR_MPDEMUX_ASFHDR_DVRWantsLibavformat "DVR will probably only work with libavformat, try -demuxer 35 if you have problems\n" +#define MSGTR_MPDEMUX_ASFHDR_NoDataChunkAfterHeader "No data chunk following header!\n" +#define MSGTR_MPDEMUX_ASFHDR_AudioVideoHeaderNotFound "ASF: no audio or video headers found - broken file?\n" +#define MSGTR_MPDEMUX_ASFHDR_InvalidLengthInASFHeader "Invalid length in ASF header!\n" +#define MSGTR_MPDEMUX_ASFHDR_DRMLicenseURL "DRM License URL: %s\n" +#define MSGTR_MPDEMUX_ASFHDR_DRMProtected "This file has been encumbered with DRM encryption, it will not play in MPlayer!\n" + +// aviheader.c +#define MSGTR_MPDEMUX_AVIHDR_EmptyList "** empty list?!\n" +#define MSGTR_MPDEMUX_AVIHDR_WarnNotExtendedAVIHdr "** Warning: this is no extended AVI header..\n" +#define MSGTR_MPDEMUX_AVIHDR_BuildingODMLidx "AVI: ODML: Building ODML index (%d superindexchunks).\n" +#define MSGTR_MPDEMUX_AVIHDR_BrokenODMLfile "AVI: ODML: Broken (incomplete?) file detected. Will use traditional index.\n" +#define MSGTR_MPDEMUX_AVIHDR_CantReadIdxFile "Can't read index file %s: %s\n" +#define MSGTR_MPDEMUX_AVIHDR_NotValidMPidxFile "%s is not a valid MPlayer index file.\n" +#define MSGTR_MPDEMUX_AVIHDR_FailedMallocForIdxFile "Could not allocate memory for index data from %s.\n" +#define MSGTR_MPDEMUX_AVIHDR_PrematureEOF "premature end of index file %s\n" +#define MSGTR_MPDEMUX_AVIHDR_IdxFileLoaded "Loaded index file: %s\n" +#define MSGTR_MPDEMUX_AVIHDR_GeneratingIdx "Generating Index: %3lu %s \r" +#define MSGTR_MPDEMUX_AVIHDR_IdxGeneratedForHowManyChunks "AVI: Generated index table for %d chunks!\n" +#define MSGTR_MPDEMUX_AVIHDR_Failed2WriteIdxFile "Couldn't write index file %s: %s\n" +#define MSGTR_MPDEMUX_AVIHDR_IdxFileSaved "Saved index file: %s\n" + +// demux_audio.c +#define MSGTR_MPDEMUX_AUDIO_BadID3v2TagSize "Audio demuxer: bad ID3v2 tag size: larger than stream (%u).\n" +#define MSGTR_MPDEMUX_AUDIO_DamagedAppendedID3v2Tag "Audio demuxer: damaged appended ID3v2 tag detected.\n" +#define MSGTR_MPDEMUX_AUDIO_UnknownFormat "Audio demuxer: unknown format %d.\n" + +// demux_demuxers.c +#define MSGTR_MPDEMUX_DEMUXERS_FillBufferError "fill_buffer error: bad demuxer: not vd, ad or sd.\n" + +// demux_mkv.c +#define MSGTR_MPDEMUX_MKV_ZlibInitializationFailed "[mkv] zlib initialization failed.\n" +#define MSGTR_MPDEMUX_MKV_ZlibDecompressionFailed "[mkv] zlib decompression failed.\n" +#define MSGTR_MPDEMUX_MKV_LzoInitializationFailed "[mkv] lzo initialization failed.\n" +#define MSGTR_MPDEMUX_MKV_LzoDecompressionFailed "[mkv] lzo decompression failed.\n" +#define MSGTR_MPDEMUX_MKV_TrackEncrypted "[mkv] Track number %u has been encrypted and decryption has not yet been\n[mkv] implemented. Skipping track.\n" +#define MSGTR_MPDEMUX_MKV_UnknownContentEncoding "[mkv] Unknown content encoding type for track %u. Skipping track.\n" +#define MSGTR_MPDEMUX_MKV_UnknownCompression "[mkv] Track %u has been compressed with an unknown/unsupported compression\n[mkv] algorithm (%u). Skipping track.\n" +#define MSGTR_MPDEMUX_MKV_ZlibCompressionUnsupported "[mkv] Track %u was compressed with zlib but mplayer has not been compiled\n[mkv] with support for zlib compression. Skipping track.\n" +#define MSGTR_MPDEMUX_MKV_TrackIDName "[mkv] Track ID %u: %s (%s) \"%s\", %s\n" +#define MSGTR_MPDEMUX_MKV_TrackID "[mkv] Track ID %u: %s (%s), %s\n" +#define MSGTR_MPDEMUX_MKV_UnknownCodecID "[mkv] Unknown/unsupported CodecID (%s) or missing/bad CodecPrivate\n[mkv] data (track %u).\n" +#define MSGTR_MPDEMUX_MKV_FlacTrackDoesNotContainValidHeaders "[mkv] FLAC track does not contain valid headers.\n" +#define MSGTR_MPDEMUX_MKV_UnknownAudioCodec "[mkv] Unknown/unsupported audio codec ID '%s' for track %u or missing/faulty\n[mkv] private codec data.\n" +#define MSGTR_MPDEMUX_MKV_SubtitleTypeNotSupported "[mkv] Subtitle type '%s' is not supported.\n" +#define MSGTR_MPDEMUX_MKV_WillPlayVideoTrack "[mkv] Will play video track %u.\n" +#define MSGTR_MPDEMUX_MKV_NoVideoTrackFound "[mkv] No video track found/wanted.\n" +#define MSGTR_MPDEMUX_MKV_NoAudioTrackFound "[mkv] No audio track found/wanted.\n" +#define MSGTR_MPDEMUX_MKV_WillDisplaySubtitleTrack "[mkv] Will display subtitle track %u.\n" +#define MSGTR_MPDEMUX_MKV_NoBlockDurationForSubtitleTrackFound "[mkv] Warning: No BlockDuration for subtitle track found.\n" +#define MSGTR_MPDEMUX_MKV_TooManySublines "[mkv] Warning: too many sublines to render, skipping.\n" +#define MSGTR_MPDEMUX_MKV_TooManySublinesSkippingAfterFirst "\n[mkv] Warning: too many sublines to render, skipping after first %i.\n" + +// demux_nuv.c +#define MSGTR_MPDEMUX_NUV_NoVideoBlocksInFile "No video blocks in file.\n" + +// demux_xmms.c +#define MSGTR_MPDEMUX_XMMS_FoundPlugin "Found plugin: %s (%s).\n" +#define MSGTR_MPDEMUX_XMMS_ClosingPlugin "Closing plugin: %s.\n" +#define MSGTR_MPDEMUX_XMMS_WaitForStart "Waiting for the XMMS plugin to start playback of '%s'...\n" + + +// ========================== LIBMENU =================================== + +// common +#define MSGTR_LIBMENU_NoEntryFoundInTheMenuDefinition "[MENU] No entry found in the menu definition.\n" + +// libmenu/menu.c +#define MSGTR_LIBMENU_SyntaxErrorAtLine "[MENU] syntax error at line: %d\n" +#define MSGTR_LIBMENU_MenuDefinitionsNeedANameAttrib "[MENU] Menu definitions need a name attribute (line %d).\n" +#define MSGTR_LIBMENU_BadAttrib "[MENU] bad attribute %s=%s in menu '%s' at line %d\n" +#define MSGTR_LIBMENU_UnknownMenuType "[MENU] unknown menu type '%s' at line %d\n" +#define MSGTR_LIBMENU_CantOpenConfigFile "[MENU] Can't open menu config file: %s\n" +#define MSGTR_LIBMENU_ConfigFileIsTooBig "[MENU] Config file is too big (> %d KB)\n" +#define MSGTR_LIBMENU_ConfigFileIsEmpty "[MENU] Config file is empty.\n" +#define MSGTR_LIBMENU_MenuNotFound "[MENU] Menu %s not found.\n" +#define MSGTR_LIBMENU_MenuInitFailed "[MENU] Menu '%s': Init failed.\n" +#define MSGTR_LIBMENU_UnsupportedOutformat "[MENU] Unsupported output format!!!!\n" + +// libmenu/menu_cmdlist.c +#define MSGTR_LIBMENU_ListMenuEntryDefinitionsNeedAName "[MENU] List menu entry definitions need a name (line %d).\n" +#define MSGTR_LIBMENU_ListMenuNeedsAnArgument "[MENU] List menu needs an argument.\n" + +// libmenu/menu_console.c +#define MSGTR_LIBMENU_WaitPidError "[MENU] Waitpid error: %s.\n" +#define MSGTR_LIBMENU_SelectError "[MENU] Select error.\n" +#define MSGTR_LIBMENU_ReadErrorOnChildFD "[MENU] Read error on child's file descriptor: %s.\n" +#define MSGTR_LIBMENU_ConsoleRun "[MENU] Console run: %s ...\n" +#define MSGTR_LIBMENU_AChildIsAlreadyRunning "[MENU] A child is already running.\n" +#define MSGTR_LIBMENU_ForkFailed "[MENU] Fork failed !!!\n" +#define MSGTR_LIBMENU_WriteError "[MENU] write error\n" + +// libmenu/menu_filesel.c +#define MSGTR_LIBMENU_OpendirError "[MENU] opendir error: %s\n" +#define MSGTR_LIBMENU_ReallocError "[MENU] realloc error: %s\n" +#define MSGTR_LIBMENU_MallocError "[MENU] memory allocation error: %s\n" +#define MSGTR_LIBMENU_ReaddirError "[MENU] readdir error: %s\n" +#define MSGTR_LIBMENU_CantOpenDirectory "[MENU] Can't open directory %s.\n" + +// libmenu/menu_param.c +#define MSGTR_LIBMENU_SubmenuDefinitionNeedAMenuAttribut "[MENU] Submenu definition needs a 'menu' attribute.\n" +#define MSGTR_LIBMENU_InvalidProperty "[MENU] Invalid property '%s' in pref menu entry. (line %d).\n" +#define MSGTR_LIBMENU_PrefMenuEntryDefinitionsNeed "[MENU] Pref menu entry definitions need a valid 'property' or 'txt' attribute (line %d).\n" +#define MSGTR_LIBMENU_PrefMenuNeedsAnArgument "[MENU] Pref menu needs an argument.\n" + +// libmenu/menu_pt.c +#define MSGTR_LIBMENU_CantfindTheTargetItem "[MENU] Can't find the target item ????\n" +#define MSGTR_LIBMENU_FailedToBuildCommand "[MENU] Failed to build command: %s.\n" + +// libmenu/menu_txt.c +#define MSGTR_LIBMENU_MenuTxtNeedATxtFileName "[MENU] Text menu needs a textfile name (parameter file).\n" +#define MSGTR_LIBMENU_MenuTxtCantOpen "[MENU] Can't open %s.\n" +#define MSGTR_LIBMENU_WarningTooLongLineSplitting "[MENU] Warning, line too long. Splitting it.\n" +#define MSGTR_LIBMENU_ParsedLines "[MENU] Parsed %d lines.\n" + +// libmenu/vf_menu.c +#define MSGTR_LIBMENU_UnknownMenuCommand "[MENU] Unknown command: '%s'.\n" +#define MSGTR_LIBMENU_FailedToOpenMenu "[MENU] Failed to open menu: '%s'.\n" + + +// ========================== LIBMPCODECS =================================== + +// dec_video.c & dec_audio.c: +#define MSGTR_CantOpenCodec "Could not open codec.\n" +#define MSGTR_CantCloseCodec "Could not close codec.\n" + +#define MSGTR_MissingDLLcodec "ERROR: Could not open required DirectShow codec %s.\n" +#define MSGTR_ACMiniterror "Could not load/initialize Win32/ACM audio codec (missing DLL file?).\n" +#define MSGTR_MissingLAVCcodec "Cannot find codec '%s' in libavcodec...\n" + +#define MSGTR_MpegNoSequHdr "MPEG: FATAL: EOF while searching for sequence header.\n" +#define MSGTR_CannotReadMpegSequHdr "FATAL: Cannot read sequence header.\n" +#define MSGTR_CannotReadMpegSequHdrEx "FATAL: Cannot read sequence header extension.\n" +#define MSGTR_BadMpegSequHdr "MPEG: bad sequence header\n" +#define MSGTR_BadMpegSequHdrEx "MPEG: bad sequence header extension\n" + +#define MSGTR_ShMemAllocFail "Cannot allocate shared memory.\n" +#define MSGTR_CantAllocAudioBuf "Cannot allocate audio out buffer.\n" + +#define MSGTR_UnknownAudio "Unknown/missing audio format -> no sound\n" + +#define MSGTR_UsingExternalPP "[PP] Using external postprocessing filter, max q = %d.\n" +#define MSGTR_UsingCodecPP "[PP] Using codec's postprocessing, max q = %d.\n" +#define MSGTR_VideoCodecFamilyNotAvailableStr "Requested video codec family [%s] (vfm=%s) not available.\nEnable it at compilation.\n" +#define MSGTR_AudioCodecFamilyNotAvailableStr "Requested audio codec family [%s] (afm=%s) not available.\nEnable it at compilation.\n" +#define MSGTR_OpeningVideoDecoder "Opening video decoder: [%s] %s\n" +#define MSGTR_SelectedVideoCodec "Selected video codec: [%s] vfm: %s (%s)\n" +#define MSGTR_OpeningAudioDecoder "Opening audio decoder: [%s] %s\n" +#define MSGTR_SelectedAudioCodec "Selected audio codec: [%s] afm: %s (%s)\n" +#define MSGTR_VDecoderInitFailed "VDecoder init failed :(\n" +#define MSGTR_ADecoderInitFailed "ADecoder init failed :(\n" +#define MSGTR_ADecoderPreinitFailed "ADecoder preinit failed :(\n" + +// ad_dvdpcm.c: +#define MSGTR_SamplesWanted "Samples of this format are needed to improve support. Please contact the developers.\n" + +// libmpcodecs/ad_libdv.c +#define MSGTR_MPCODECS_AudioFramesizeDiffers "[AD_LIBDV] Warning! Audio framesize differs! read=%d hdr=%d.\n" + +// vd.c +#define MSGTR_CodecDidNotSet "VDec: Codec did not set sh->disp_w and sh->disp_h, trying workaround.\n" +#define MSGTR_CouldNotFindColorspace "Could not find matching colorspace - retrying with -vf scale...\n" +#define MSGTR_MovieAspectIsSet "Movie-Aspect is %.2f:1 - prescaling to correct movie aspect.\n" +#define MSGTR_MovieAspectUndefined "Movie-Aspect is undefined - no prescaling applied.\n" + +// vd_dshow.c, vd_dmo.c +#define MSGTR_DownloadCodecPackage "You need to upgrade/install the binary codecs package.\nGo to http://www.mplayerhq.hu/dload.html\n" + +// libmpcodecs/vd_dmo.c vd_dshow.c vd_vfw.c +#define MSGTR_MPCODECS_CouldntAllocateImageForCinepakCodec "[VD_DMO] Couldn't allocate image for cinepak codec.\n" + +// libmpcodecs/vd_ffmpeg.c +#define MSGTR_MPCODECS_XVMCAcceleratedCodec "[VD_FFMPEG] XVMC accelerated codec.\n" +#define MSGTR_MPCODECS_ArithmeticMeanOfQP "[VD_FFMPEG] Arithmetic mean of QP: %2.4f, Harmonic mean of QP: %2.4f\n" +#define MSGTR_MPCODECS_DRIFailure "[VD_FFMPEG] DRI failure.\n" +#define MSGTR_MPCODECS_CouldntAllocateImageForCodec "[VD_FFMPEG] Couldn't allocate image for codec.\n" +#define MSGTR_MPCODECS_XVMCAcceleratedMPEG2 "[VD_FFMPEG] XVMC-accelerated MPEG-2.\n" +#define MSGTR_MPCODECS_TryingPixfmt "[VD_FFMPEG] Trying pixfmt=%d.\n" +#define MSGTR_MPCODECS_McGetBufferShouldWorkOnlyWithXVMC "[VD_FFMPEG] The mc_get_buffer should work only with XVMC acceleration!!" +#define MSGTR_MPCODECS_UnexpectedInitVoError "[VD_FFMPEG] Unexpected init_vo error.\n" +#define MSGTR_MPCODECS_UnrecoverableErrorRenderBuffersNotTaken "[VD_FFMPEG] Unrecoverable error, render buffers not taken.\n" +#define MSGTR_MPCODECS_OnlyBuffersAllocatedByVoXvmcAllowed "[VD_FFMPEG] Only buffers allocated by vo_xvmc allowed.\n" + +// libmpcodecs/ve_lavc.c +#define MSGTR_MPCODECS_HighQualityEncodingSelected "[VE_LAVC] High quality encoding selected (non-realtime)!\n" +#define MSGTR_MPCODECS_UsingConstantQscale "[VE_LAVC] Using constant qscale = %f (VBR).\n" + +// libmpcodecs/ve_raw.c +#define MSGTR_MPCODECS_OutputWithFourccNotSupported "[VE_RAW] Raw output with FourCC [%x] not supported!\n" +#define MSGTR_MPCODECS_NoVfwCodecSpecified "[VE_RAW] Required VfW codec not specified!!\n" + +// vf.c +#define MSGTR_CouldNotFindVideoFilter "Couldn't find video filter '%s'.\n" +#define MSGTR_CouldNotOpenVideoFilter "Couldn't open video filter '%s'.\n" +#define MSGTR_OpeningVideoFilter "Opening video filter: " +#define MSGTR_CannotFindColorspace "Cannot find matching colorspace, even by inserting 'scale' :(\n" + +// libmpcodecs/vf_crop.c +#define MSGTR_MPCODECS_CropBadPositionWidthHeight "[CROP] Bad position/width/height - cropped area outside of the original!\n" + +// libmpcodecs/vf_cropdetect.c +#define MSGTR_MPCODECS_CropArea "[CROP] Crop area: X: %d..%d Y: %d..%d (-vf crop=%d:%d:%d:%d).\n" + +// libmpcodecs/vf_format.c, vf_palette.c, vf_noformat.c +#define MSGTR_MPCODECS_UnknownFormatName "[VF_FORMAT] Unknown format name: '%s'.\n" + +// libmpcodecs/vf_framestep.c vf_noformat.c vf_palette.c vf_tile.c +#define MSGTR_MPCODECS_ErrorParsingArgument "[VF_FRAMESTEP] Error parsing argument.\n" + +// libmpcodecs/ve_vfw.c +#define MSGTR_MPCODECS_CompressorType "Compressor type: %.4lx\n" +#define MSGTR_MPCODECS_CompressorSubtype "Compressor subtype: %.4lx\n" +#define MSGTR_MPCODECS_CompressorFlags "Compressor flags: %lu, version %lu, ICM version: %lu\n" +#define MSGTR_MPCODECS_Flags "Flags:" +#define MSGTR_MPCODECS_Quality " quality" + +// libmpcodecs/vf_expand.c +#define MSGTR_MPCODECS_FullDRNotPossible "Full DR not possible, trying SLICES instead!\n" +#define MSGTR_MPCODECS_WarnNextFilterDoesntSupportSlices "WARNING! Next filter doesn't support SLICES, get ready for sig11...\n" +#define MSGTR_MPCODECS_FunWhydowegetNULL "Why do we get NULL??\n" + +// libmpcodecs/vf_test.c, vf_yuy2.c, vf_yvu9.c +#define MSGTR_MPCODECS_WarnNextFilterDoesntSupport "%s not supported by next filter/vo :(\n" + + +// ================================== LIBASS ==================================== + +// ass_bitmap.c +#define MSGTR_LIBASS_FT_Glyph_To_BitmapError "[ass] FT_Glyph_To_Bitmap error %d \n" +#define MSGTR_LIBASS_UnsupportedPixelMode "[ass] Unsupported pixel mode: %d\n" +#define MSGTR_LIBASS_GlyphBBoxTooLarge "[ass] Glyph bounding box too large: %dx%dpx\n" + +// ass.c +#define MSGTR_LIBASS_NoStyleNamedXFoundUsingY "[ass] [%p] Warning: no style named '%s' found, using '%s'\n" +#define MSGTR_LIBASS_BadTimestamp "[ass] bad timestamp\n" +#define MSGTR_LIBASS_BadEncodedDataSize "[ass] bad encoded data size\n" +#define MSGTR_LIBASS_FontLineTooLong "[ass] Font line too long: %d, %s\n" +#define MSGTR_LIBASS_EventFormatHeaderMissing "[ass] Event format header missing\n" +#define MSGTR_LIBASS_ErrorOpeningIconvDescriptor "[ass] error opening iconv descriptor.\n" +#define MSGTR_LIBASS_ErrorRecodingFile "[ass] error recoding file.\n" +#define MSGTR_LIBASS_FopenFailed "[ass] ass_read_file(%s): fopen failed\n" +#define MSGTR_LIBASS_FseekFailed "[ass] ass_read_file(%s): fseek failed\n" +#define MSGTR_LIBASS_RefusingToLoadSubtitlesLargerThan100M "[ass] ass_read_file(%s): Refusing to load subtitles larger than 100M\n" +#define MSGTR_LIBASS_ReadFailed "Read failed, %d: %s\n" +#define MSGTR_LIBASS_AddedSubtitleFileMemory "[ass] Added subtitle file: <memory> (%d styles, %d events)\n" +#define MSGTR_LIBASS_AddedSubtitleFileFname "[ass] Added subtitle file: %s (%d styles, %d events)\n" +#define MSGTR_LIBASS_FailedToCreateDirectory "[ass] Failed to create directory %s\n" +#define MSGTR_LIBASS_NotADirectory "[ass] Not a directory: %s\n" + +// ass_cache.c +#define MSGTR_LIBASS_TooManyFonts "[ass] Too many fonts\n" +#define MSGTR_LIBASS_ErrorOpeningFont "[ass] Error opening font: %s, %d\n" + +// ass_fontconfig.c +#define MSGTR_LIBASS_SelectedFontFamilyIsNotTheRequestedOne "[ass] fontconfig: Selected font is not the requested one: '%s' != '%s'\n" +#define MSGTR_LIBASS_UsingDefaultFontFamily "[ass] fontconfig_select: Using default font family: (%s, %d, %d) -> %s, %d\n" +#define MSGTR_LIBASS_UsingDefaultFont "[ass] fontconfig_select: Using default font: (%s, %d, %d) -> %s, %d\n" +#define MSGTR_LIBASS_UsingArialFontFamily "[ass] fontconfig_select: Using 'Arial' font family: (%s, %d, %d) -> %s, %d\n" +#define MSGTR_LIBASS_FcInitLoadConfigAndFontsFailed "[ass] FcInitLoadConfigAndFonts failed.\n" +#define MSGTR_LIBASS_UpdatingFontCache "[ass] Updating font cache.\n" +#define MSGTR_LIBASS_BetaVersionsOfFontconfigAreNotSupported "[ass] Beta versions of fontconfig are not supported.\n[ass] Update before reporting any bugs.\n" +#define MSGTR_LIBASS_FcStrSetAddFailed "[ass] FcStrSetAdd failed.\n" +#define MSGTR_LIBASS_FcDirScanFailed "[ass] FcDirScan failed.\n" +#define MSGTR_LIBASS_FcDirSave "[ass] FcDirSave failed.\n" +#define MSGTR_LIBASS_FcConfigAppFontAddDirFailed "[ass] FcConfigAppFontAddDir failed\n" +#define MSGTR_LIBASS_FontconfigDisabledDefaultFontWillBeUsed "[ass] Fontconfig disabled, only default font will be used.\n" +#define MSGTR_LIBASS_FunctionCallFailed "[ass] %s failed\n" + +// ass_render.c +#define MSGTR_LIBASS_NeitherPlayResXNorPlayResYDefined "[ass] Neither PlayResX nor PlayResY defined. Assuming 384x288.\n" +#define MSGTR_LIBASS_PlayResYUndefinedSettingY "[ass] PlayResY undefined, setting %d.\n" +#define MSGTR_LIBASS_PlayResXUndefinedSettingX "[ass] PlayResX undefined, setting %d.\n" +#define MSGTR_LIBASS_FT_Init_FreeTypeFailed "[ass] FT_Init_FreeType failed.\n" +#define MSGTR_LIBASS_Init "[ass] Init\n" +#define MSGTR_LIBASS_InitFailed "[ass] Init failed.\n" +#define MSGTR_LIBASS_BadCommand "[ass] Bad command: %c%c\n" +#define MSGTR_LIBASS_ErrorLoadingGlyph "[ass] Error loading glyph.\n" +#define MSGTR_LIBASS_FT_Glyph_Stroke_Error "[ass] FT_Glyph_Stroke error %d \n" +#define MSGTR_LIBASS_UnknownEffectType_InternalError "[ass] Unknown effect type (internal error)\n" +#define MSGTR_LIBASS_NoStyleFound "[ass] No style found!\n" +#define MSGTR_LIBASS_EmptyEvent "[ass] Empty event!\n" +#define MSGTR_LIBASS_MAX_GLYPHS_Reached "[ass] MAX_GLYPHS reached: event %d, start = %llu, duration = %llu\n Text = %s\n" +#define MSGTR_LIBASS_EventHeightHasChanged "[ass] Warning! Event height has changed! \n" + +// ass_font.c +#define MSGTR_LIBASS_GlyphNotFoundReselectingFont "[ass] Glyph 0x%X not found, selecting one more font for (%s, %d, %d)\n" +#define MSGTR_LIBASS_GlyphNotFound "[ass] Glyph 0x%X not found in font for (%s, %d, %d)\n" +#define MSGTR_LIBASS_ErrorOpeningMemoryFont "[ass] Error opening memory font: %s\n" +#define MSGTR_LIBASS_NoCharmaps "[ass] font face with no charmaps\n" +#define MSGTR_LIBASS_NoCharmapAutodetected "[ass] no charmap autodetected, trying the first one\n" + + +// ================================== stream ==================================== + +// ai_alsa.c +#define MSGTR_MPDEMUX_AIALSA_CannotSetSamplerate "Cannot set samplerate.\n" +#define MSGTR_MPDEMUX_AIALSA_CannotSetBufferTime "Cannot set buffer time.\n" +#define MSGTR_MPDEMUX_AIALSA_CannotSetPeriodTime "Cannot set period time.\n" + +// ai_alsa.c +#define MSGTR_MPDEMUX_AIALSA_PcmBrokenConfig "Broken configuration for this PCM: no configurations available.\n" +#define MSGTR_MPDEMUX_AIALSA_UnavailableAccessType "Access type not available.\n" +#define MSGTR_MPDEMUX_AIALSA_UnavailableSampleFmt "Sample format not available.\n" +#define MSGTR_MPDEMUX_AIALSA_UnavailableChanCount "Channel count not available - reverting to default: %d\n" +#define MSGTR_MPDEMUX_AIALSA_CannotInstallHWParams "Unable to install hardware parameters: %s" +#define MSGTR_MPDEMUX_AIALSA_PeriodEqualsBufferSize "Can't use period equal to buffer size (%u == %lu)\n" +#define MSGTR_MPDEMUX_AIALSA_CannotInstallSWParams "Unable to install software parameters:\n" +#define MSGTR_MPDEMUX_AIALSA_ErrorOpeningAudio "Error opening audio: %s\n" +#define MSGTR_MPDEMUX_AIALSA_AlsaStatusError "ALSA status error: %s" +#define MSGTR_MPDEMUX_AIALSA_AlsaXRUN "ALSA xrun!!! (at least %.3f ms long)\n" +#define MSGTR_MPDEMUX_AIALSA_AlsaXRUNPrepareError "ALSA xrun: prepare error: %s" +#define MSGTR_MPDEMUX_AIALSA_AlsaReadWriteError "ALSA read/write error" + +// ai_oss.c +#define MSGTR_MPDEMUX_AIOSS_Unable2SetChanCount "Unable to set channel count: %d\n" +#define MSGTR_MPDEMUX_AIOSS_Unable2SetStereo "Unable to set stereo: %d\n" +#define MSGTR_MPDEMUX_AIOSS_Unable2Open "Unable to open '%s': %s\n" +#define MSGTR_MPDEMUX_AIOSS_UnsupportedFmt "unsupported format\n" +#define MSGTR_MPDEMUX_AIOSS_Unable2SetAudioFmt "Unable to set audio format." +#define MSGTR_MPDEMUX_AIOSS_Unable2SetSamplerate "Unable to set samplerate: %d\n" +#define MSGTR_MPDEMUX_AIOSS_Unable2SetTrigger "Unable to set trigger: %d\n" +#define MSGTR_MPDEMUX_AIOSS_Unable2GetBlockSize "Unable to get block size!\n" +#define MSGTR_MPDEMUX_AIOSS_AudioBlockSizeZero "Audio block size is zero, setting to %d!\n" +#define MSGTR_MPDEMUX_AIOSS_AudioBlockSize2Low "Audio block size too low, setting to %d!\n" + +// asf_mmst_streaming.c +#define MSGTR_MPDEMUX_MMST_WriteError "write error\n" +#define MSGTR_MPDEMUX_MMST_EOFAlert "\nAlert! EOF\n" +#define MSGTR_MPDEMUX_MMST_PreHeaderReadFailed "pre-header read failed\n" +#define MSGTR_MPDEMUX_MMST_InvalidHeaderSize "Invalid header size, giving up.\n" +#define MSGTR_MPDEMUX_MMST_HeaderDataReadFailed "Header data read failed.\n" +#define MSGTR_MPDEMUX_MMST_packet_lenReadFailed "packet_len read failed.\n" +#define MSGTR_MPDEMUX_MMST_InvalidRTSPPacketSize "Invalid RTSP packet size, giving up.\n" +#define MSGTR_MPDEMUX_MMST_CmdDataReadFailed "Command data read failed.\n" +#define MSGTR_MPDEMUX_MMST_HeaderObject "header object\n" +#define MSGTR_MPDEMUX_MMST_DataObject "data object\n" +#define MSGTR_MPDEMUX_MMST_FileObjectPacketLen "file object, packet length = %d (%d)\n" +#define MSGTR_MPDEMUX_MMST_StreamObjectStreamID "stream object, stream ID: %d\n" +#define MSGTR_MPDEMUX_MMST_2ManyStreamID "Too many IDs, stream skipped." +#define MSGTR_MPDEMUX_MMST_UnknownObject "unknown object\n" +#define MSGTR_MPDEMUX_MMST_MediaDataReadFailed "Media data read failed.\n" +#define MSGTR_MPDEMUX_MMST_MissingSignature "missing signature\n" +#define MSGTR_MPDEMUX_MMST_PatentedTechnologyJoke "Everything done. Thank you for downloading a media file containing proprietary and patented technology.\n" +#define MSGTR_MPDEMUX_MMST_UnknownCmd "unknown command %02x\n" +#define MSGTR_MPDEMUX_MMST_GetMediaPacketErr "get_media_packet error : %s\n" +#define MSGTR_MPDEMUX_MMST_Connected "Connected\n" + +// asf_streaming.c +#define MSGTR_MPDEMUX_ASF_StreamChunkSize2Small "Ahhhh, stream_chunck size is too small: %d\n" +#define MSGTR_MPDEMUX_ASF_SizeConfirmMismatch "size_confirm mismatch!: %d %d\n" +#define MSGTR_MPDEMUX_ASF_WarnDropHeader "Warning: drop header ????\n" +#define MSGTR_MPDEMUX_ASF_ErrorParsingChunkHeader "Error while parsing chunk header\n" +#define MSGTR_MPDEMUX_ASF_NoHeaderAtFirstChunk "Didn't get a header as first chunk !!!!\n" +#define MSGTR_MPDEMUX_ASF_BufferMallocFailed "Error: Can't allocate %d bytes buffer.\n" +#define MSGTR_MPDEMUX_ASF_ErrReadingNetworkStream "Error while reading network stream.\n" +#define MSGTR_MPDEMUX_ASF_ErrChunk2Small "Error: Chunk is too small.\n" +#define MSGTR_MPDEMUX_ASF_ErrSubChunkNumberInvalid "Error: Subchunk number is invalid.\n" +#define MSGTR_MPDEMUX_ASF_Bandwidth2SmallCannotPlay "Bandwidth too small, file cannot be played!\n" +#define MSGTR_MPDEMUX_ASF_Bandwidth2SmallDeselectedAudio "Bandwidth too small, deselected audio stream.\n" +#define MSGTR_MPDEMUX_ASF_Bandwidth2SmallDeselectedVideo "Bandwidth too small, deselected video stream.\n" +#define MSGTR_MPDEMUX_ASF_InvalidLenInHeader "Invalid length in ASF header!\n" +#define MSGTR_MPDEMUX_ASF_ErrReadingChunkHeader "Error while reading chunk header.\n" +#define MSGTR_MPDEMUX_ASF_ErrChunkBiggerThanPacket "Error: chunk_size > packet_size\n" +#define MSGTR_MPDEMUX_ASF_ErrReadingChunk "Error while reading chunk.\n" +#define MSGTR_MPDEMUX_ASF_ASFRedirector "=====> ASF Redirector\n" +#define MSGTR_MPDEMUX_ASF_InvalidProxyURL "invalid proxy URL\n" +#define MSGTR_MPDEMUX_ASF_UnknownASFStreamType "unknown ASF stream type\n" +#define MSGTR_MPDEMUX_ASF_Failed2ParseHTTPResponse "Failed to parse HTTP response.\n" +#define MSGTR_MPDEMUX_ASF_ServerReturn "Server returned %d:%s\n" +#define MSGTR_MPDEMUX_ASF_ASFHTTPParseWarnCuttedPragma "ASF HTTP PARSE WARNING : Pragma %s cut from %zu bytes to %zu\n" +#define MSGTR_MPDEMUX_ASF_SocketWriteError "socket write error: %s\n" +#define MSGTR_MPDEMUX_ASF_HeaderParseFailed "Failed to parse header.\n" +#define MSGTR_MPDEMUX_ASF_NoStreamFound "No stream found.\n" +#define MSGTR_MPDEMUX_ASF_UnknownASFStreamingType "unknown ASF streaming type\n" +#define MSGTR_MPDEMUX_ASF_InfoStreamASFURL "STREAM_ASF, URL: %s\n" +#define MSGTR_MPDEMUX_ASF_StreamingFailed "Failed, exiting.\n" + +// audio_in.c +#define MSGTR_MPDEMUX_AUDIOIN_ErrReadingAudio "\nError reading audio: %s\n" +#define MSGTR_MPDEMUX_AUDIOIN_XRUNSomeFramesMayBeLeftOut "Recovered from cross-run, some frames may be left out!\n" +#define MSGTR_MPDEMUX_AUDIOIN_ErrFatalCannotRecover "Fatal error, cannot recover!\n" +#define MSGTR_MPDEMUX_AUDIOIN_NotEnoughSamples "\nNot enough audio samples!\n" + +// cache2.c +#define MSGTR_MPDEMUX_CACHE2_NonCacheableStream "\rThis stream is non-cacheable.\n" +#define MSGTR_MPDEMUX_CACHE2_ReadFileposDiffers "!!! read_filepos differs!!! Report this bug...\n" + +// network.c +#define MSGTR_MPDEMUX_NW_UnknownAF "Unknown address family %d\n" +#define MSGTR_MPDEMUX_NW_ResolvingHostForAF "Resolving %s for %s...\n" +#define MSGTR_MPDEMUX_NW_CantResolv "Couldn't resolve name for %s: %s\n" +#define MSGTR_MPDEMUX_NW_ConnectingToServer "Connecting to server %s[%s]: %d...\n" +#define MSGTR_MPDEMUX_NW_CantConnect2Server "Failed to connect to server with %s\n" +#define MSGTR_MPDEMUX_NW_SelectFailed "Select failed.\n" +#define MSGTR_MPDEMUX_NW_ConnTimeout "connection timeout\n" +#define MSGTR_MPDEMUX_NW_GetSockOptFailed "getsockopt failed: %s\n" +#define MSGTR_MPDEMUX_NW_ConnectError "connect error: %s\n" +#define MSGTR_MPDEMUX_NW_InvalidProxySettingTryingWithout "Invalid proxy setting... Trying without proxy.\n" +#define MSGTR_MPDEMUX_NW_CantResolvTryingWithoutProxy "Could not resolve remote hostname for AF_INET. Trying without proxy.\n" +#define MSGTR_MPDEMUX_NW_ErrSendingHTTPRequest "Error while sending HTTP request: Didn't send all the request.\n" +#define MSGTR_MPDEMUX_NW_ReadFailed "Read failed.\n" +#define MSGTR_MPDEMUX_NW_Read0CouldBeEOF "http_read_response read 0 (i.e. EOF).\n" +#define MSGTR_MPDEMUX_NW_AuthFailed "Authentication failed. Please use the -user and -passwd options to provide your\n"\ +"username/password for a list of URLs, or form an URL like:\n"\ +"http://username:password@hostname/file\n" +#define MSGTR_MPDEMUX_NW_AuthRequiredFor "Authentication required for %s\n" +#define MSGTR_MPDEMUX_NW_AuthRequired "Authentication required.\n" +#define MSGTR_MPDEMUX_NW_NoPasswdProvidedTryingBlank "No password provided, trying blank password.\n" +#define MSGTR_MPDEMUX_NW_ErrServerReturned "Server returns %d: %s\n" +#define MSGTR_MPDEMUX_NW_CacheSizeSetTo "Cache size set to %d KBytes\n" + +// open.c, stream.c: +#define MSGTR_CdDevNotfound "CD-ROM Device '%s' not found.\n" +#define MSGTR_ErrTrackSelect "Error selecting VCD track." +#define MSGTR_ReadSTDIN "Reading from stdin...\n" +#define MSGTR_UnableOpenURL "Unable to open URL: %s\n" +#define MSGTR_ConnToServer "Connected to server: %s\n" +#define MSGTR_FileNotFound "File not found: '%s'\n" + +#define MSGTR_SMBInitError "Cannot init the libsmbclient library: %d\n" +#define MSGTR_SMBFileNotFound "Could not open from LAN: '%s'\n" +#define MSGTR_SMBNotCompiled "MPlayer was not compiled with SMB reading support.\n" + +#define MSGTR_CantOpenBluray "Couldn't open Blu-ray device: %s\n" +#define MSGTR_CantOpenDVD "Couldn't open DVD device: %s (%s)\n" + +#define MSGTR_URLParsingFailed "URL parsing failed on url %s\n" +#define MSGTR_FailedSetStreamOption "Failed to set stream option %s=%s\n" +#define MSGTR_StreamNeedType "Streams need a type!\n" +#define MSGTR_StreamProtocolNULL "Stream type %s has protocols == NULL, it's a bug\n" +#define MSGTR_StreamCantHandleURL "No stream found to handle url %s\n" +#define MSGTR_StreamNULLFilename "open_output_stream(), NULL filename, report this bug\n" +#define MSGTR_StreamErrorWritingCapture "Error writing capture file: %s\n" +#define MSGTR_StreamSeekFailed "Seek failed\n" +#define MSGTR_StreamNotSeekable "Stream not seekable!\n" +#define MSGTR_StreamCannotSeekBackward "Cannot seek backward in linear streams!\n" + +// stream_cdda.c +#define MSGTR_MPDEMUX_CDDA_CantOpenCDDADevice "Can't open CDDA device.\n" +#define MSGTR_MPDEMUX_CDDA_CantOpenDisc "Can't open disc.\n" +#define MSGTR_MPDEMUX_CDDA_AudioCDFoundWithNTracks "Found audio CD with %d tracks.\n" + +// stream_cddb.c +#define MSGTR_MPDEMUX_CDDB_FailedToReadTOC "Failed to read TOC.\n" +#define MSGTR_MPDEMUX_CDDB_FailedToOpenDevice "Failed to open %s device.\n" +#define MSGTR_MPDEMUX_CDDB_NotAValidURL "not a valid URL\n" +#define MSGTR_MPDEMUX_CDDB_FailedToSendHTTPRequest "Failed to send the HTTP request.\n" +#define MSGTR_MPDEMUX_CDDB_FailedToReadHTTPResponse "Failed to read the HTTP response.\n" +#define MSGTR_MPDEMUX_CDDB_HTTPErrorNOTFOUND "Not Found.\n" +#define MSGTR_MPDEMUX_CDDB_HTTPErrorUnknown "unknown error code\n" +#define MSGTR_MPDEMUX_CDDB_NoCacheFound "No cache found.\n" +#define MSGTR_MPDEMUX_CDDB_NotAllXMCDFileHasBeenRead "Not all the xmcd file has been read.\n" +#define MSGTR_MPDEMUX_CDDB_FailedToCreateDirectory "Failed to create directory %s.\n" +#define MSGTR_MPDEMUX_CDDB_NotAllXMCDFileHasBeenWritten "Not all of the xmcd file has been written.\n" +#define MSGTR_MPDEMUX_CDDB_InvalidXMCDDatabaseReturned "Invalid xmcd database file returned.\n" +#define MSGTR_MPDEMUX_CDDB_UnexpectedFIXME "unexpected FIXME\n" +#define MSGTR_MPDEMUX_CDDB_UnhandledCode "unhandled code\n" +#define MSGTR_MPDEMUX_CDDB_UnableToFindEOL "Unable to find end of line.\n" +#define MSGTR_MPDEMUX_CDDB_ParseOKFoundAlbumTitle "Parse OK, found: %s\n" +#define MSGTR_MPDEMUX_CDDB_AlbumNotFound "Album not found.\n" +#define MSGTR_MPDEMUX_CDDB_ServerReturnsCommandSyntaxErr "Server returns: Command syntax error\n" +#define MSGTR_MPDEMUX_CDDB_NoSitesInfoAvailable "No sites information available.\n" +#define MSGTR_MPDEMUX_CDDB_FailedToGetProtocolLevel "Failed to get the protocol level.\n" +#define MSGTR_MPDEMUX_CDDB_NoCDInDrive "No CD in the drive.\n" + +// stream_cue.c +#define MSGTR_MPDEMUX_CUEREAD_UnexpectedCuefileLine "[bincue] Unexpected cuefile line: %s\n" +#define MSGTR_MPDEMUX_CUEREAD_BinFilenameTested "[bincue] bin filename tested: %s\n" +#define MSGTR_MPDEMUX_CUEREAD_CannotFindBinFile "[bincue] Couldn't find the bin file - giving up.\n" +#define MSGTR_MPDEMUX_CUEREAD_UsingBinFile "[bincue] Using bin file %s.\n" +#define MSGTR_MPDEMUX_CUEREAD_UnknownModeForBinfile "[bincue] unknown mode for binfile. Should not happen. Aborting.\n" +#define MSGTR_MPDEMUX_CUEREAD_CannotOpenCueFile "[bincue] Cannot open %s.\n" +#define MSGTR_MPDEMUX_CUEREAD_ErrReadingFromCueFile "[bincue] Error reading from %s\n" +#define MSGTR_MPDEMUX_CUEREAD_ErrGettingBinFileSize "[bincue] Error getting size of bin file.\n" +#define MSGTR_MPDEMUX_CUEREAD_InfoTrackFormat "track %02d: format=%d %02d:%02d:%02d\n" +#define MSGTR_MPDEMUX_CUEREAD_UnexpectedBinFileEOF "[bincue] unexpected end of bin file\n" +#define MSGTR_MPDEMUX_CUEREAD_CannotReadNBytesOfPayload "[bincue] Couldn't read %d bytes of payload.\n" +#define MSGTR_MPDEMUX_CUEREAD_CueStreamInfo_FilenameTrackTracksavail "CUE stream_open, filename=%s, track=%d, available tracks: %d -> %d\n" + +// stream_dvd.c +#define MSGTR_DVDspeedCantOpen "Couldn't open DVD device for writing, changing DVD speed needs write access.\n" +#define MSGTR_DVDrestoreSpeed "Restoring DVD speed... " +#define MSGTR_DVDlimitSpeed "Limiting DVD speed to %dKB/s... " +#define MSGTR_DVDlimitFail "failed\n" +#define MSGTR_DVDlimitOk "successful\n" +#define MSGTR_NoDVDSupport "MPlayer was compiled without DVD support, exiting.\n" +#define MSGTR_DVDnumTitles "There are %d titles on this DVD.\n" +#define MSGTR_DVDinvalidTitle "Invalid DVD title number: %d\n" +#define MSGTR_DVDnumChapters "There are %d chapters in this DVD title.\n" +#define MSGTR_DVDinvalidChapter "Invalid DVD chapter number: %d\n" +#define MSGTR_DVDinvalidChapterRange "Invalid chapter range specification %s\n" +#define MSGTR_DVDinvalidLastChapter "Invalid DVD last chapter number: %d\n" +#define MSGTR_DVDnumAngles "There are %d angles in this DVD title.\n" +#define MSGTR_DVDinvalidAngle "Invalid DVD angle number: %d\n" +#define MSGTR_DVDnoIFO "Cannot open the IFO file for DVD title %d.\n" +#define MSGTR_DVDnoVMG "Can't open VMG info!\n" +#define MSGTR_DVDnoVOBs "Cannot open title VOBS (VTS_%02d_1.VOB).\n" +#define MSGTR_DVDnoMatchingAudio "No matching DVD audio language found!\n" +#define MSGTR_DVDaudioChannel "Selected DVD audio channel: %d language: %c%c\n" +#define MSGTR_DVDaudioStreamInfo "audio stream: %d format: %s (%s) language: %s aid: %d.\n" +#define MSGTR_DVDnumAudioChannels "number of audio channels on disk: %d.\n" +#define MSGTR_DVDnoMatchingSubtitle "No matching DVD subtitle language found!\n" +#define MSGTR_DVDsubtitleChannel "Selected DVD subtitle channel: %d language: %c%c\n" +#define MSGTR_DVDsubtitleLanguage "subtitle ( sid ): %d language: %s\n" +#define MSGTR_DVDnumSubtitles "number of subtitles on disk: %d\n" + +// stream_bluray.c +#define MSGTR_BlurayNoDevice "No Blu-ray device/location was specified ...\n" +#define MSGTR_BlurayNoTitles "Can't find any Blu-ray-compatible title here.\n" + +// stream_radio.c +#define MSGTR_RADIO_ChannelNamesDetected "[radio] Radio channel names detected.\n" +#define MSGTR_RADIO_WrongFreqForChannel "[radio] Wrong frequency for channel %s\n" +#define MSGTR_RADIO_WrongChannelNumberFloat "[radio] Wrong channel number: %.2f\n" +#define MSGTR_RADIO_WrongChannelNumberInt "[radio] Wrong channel number: %d\n" +#define MSGTR_RADIO_WrongChannelName "[radio] Wrong channel name: %s\n" +#define MSGTR_RADIO_FreqParameterDetected "[radio] Radio frequency parameter detected.\n" +#define MSGTR_RADIO_GetTunerFailed "[radio] Warning: ioctl get tuner failed: %s. Setting frac to %d.\n" +#define MSGTR_RADIO_NotRadioDevice "[radio] %s is no radio device!\n" +#define MSGTR_RADIO_SetFreqFailed "[radio] ioctl set frequency 0x%x (%.2f) failed: %s\n" +#define MSGTR_RADIO_GetFreqFailed "[radio] ioctl get frequency failed: %s\n" +#define MSGTR_RADIO_SetMuteFailed "[radio] ioctl set mute failed: %s\n" +#define MSGTR_RADIO_QueryControlFailed "[radio] ioctl query control failed: %s\n" +#define MSGTR_RADIO_GetVolumeFailed "[radio] ioctl get volume failed: %s\n" +#define MSGTR_RADIO_SetVolumeFailed "[radio] ioctl set volume failed: %s\n" +#define MSGTR_RADIO_DroppingFrame "\n[radio] too bad - dropping audio frame (%d bytes)!\n" +#define MSGTR_RADIO_BufferEmpty "[radio] grab_audio_frame: buffer empty, waiting for %d data bytes.\n" +#define MSGTR_RADIO_AudioInitFailed "[radio] audio_in_init failed: %s\n" +#define MSGTR_RADIO_AllocateBufferFailed "[radio] cannot allocate audio buffer (block=%d,buf=%d): %s\n" +#define MSGTR_RADIO_CurrentFreq "[radio] Current frequency: %.2f\n" +#define MSGTR_RADIO_SelectedChannel "[radio] Selected channel: %d - %s (freq: %.2f)\n" +#define MSGTR_RADIO_ChangeChannelNoChannelList "[radio] Can not change channel: no channel list given.\n" +#define MSGTR_RADIO_UnableOpenDevice "[radio] Unable to open '%s': %s\n" +#define MSGTR_RADIO_InitFracFailed "[radio] init_frac failed.\n" +#define MSGTR_RADIO_WrongFreq "[radio] Wrong frequency: %.2f\n" +#define MSGTR_RADIO_UsingFreq "[radio] Using frequency: %.2f.\n" +#define MSGTR_RADIO_AudioInInitFailed "[radio] audio_in_init failed.\n" +#define MSGTR_RADIO_AudioInSetupFailed "[radio] audio_in_setup call failed: %s\n" +#define MSGTR_RADIO_ClearBufferFailed "[radio] Clearing buffer failed: %s\n" +#define MSGTR_RADIO_StreamEnableCacheFailed "[radio] Call to stream_enable_cache failed: %s\n" +#define MSGTR_RADIO_DriverUnknownStr "[radio] Unknown driver name: %s\n" +#define MSGTR_RADIO_DriverV4L2 "[radio] Using V4Lv2 radio interface.\n" +#define MSGTR_RADIO_DriverV4L "[radio] Using V4Lv1 radio interface.\n" +#define MSGTR_RADIO_DriverBSDBT848 "[radio] Using *BSD BT848 radio interface.\n" + +//tv.c +#define MSGTR_TV_BogusNormParameter "tv.c: norm_from_string(%s): Bogus norm parameter, setting %s.\n" +#define MSGTR_TV_NoVideoInputPresent "Error: No video input present!\n" +#define MSGTR_TV_UnknownImageFormat ""\ +"==================================================================\n"\ +" WARNING: UNTESTED OR UNKNOWN OUTPUT IMAGE FORMAT REQUESTED (0x%x)\n"\ +" This may cause buggy playback or program crash! Bug reports will\n"\ +" be ignored! You should try again with YV12 (which is the default\n"\ +" colorspace) and read the documentation!\n"\ +"==================================================================\n" +#define MSGTR_TV_CannotSetNorm "Error: Cannot set norm!\n" +#define MSGTR_TV_MJP_WidthHeight " MJP: width %d height %d\n" +#define MSGTR_TV_UnableToSetWidth "Unable to set requested width: %d\n" +#define MSGTR_TV_UnableToSetHeight "Unable to set requested height: %d\n" +#define MSGTR_TV_NoTuner "Selected input hasn't got a tuner!\n" +#define MSGTR_TV_UnableFindChanlist "Unable to find selected channel list! (%s)\n" +#define MSGTR_TV_ChannelFreqParamConflict "You can't set frequency and channel simultaneously!\n" +#define MSGTR_TV_ChannelNamesDetected "TV channel names detected.\n" +#define MSGTR_TV_NoFreqForChannel "Couldn't find frequency for channel %s (%s)\n" +#define MSGTR_TV_SelectedChannel3 "Selected channel: %s - %s (freq: %.3f)\n" +#define MSGTR_TV_SelectedChannel2 "Selected channel: %s (freq: %.3f)\n" +#define MSGTR_TV_UnsupportedAudioType "Audio type '%s (%x)' unsupported!\n" +#define MSGTR_TV_AvailableDrivers "Available drivers:\n" +#define MSGTR_TV_DriverInfo "Selected driver: %s\n name: %s\n author: %s\n comment: %s\n" +#define MSGTR_TV_NoSuchDriver "No such driver: %s\n" +#define MSGTR_TV_DriverAutoDetectionFailed "TV driver autodetection failed.\n" +#define MSGTR_TV_UnknownColorOption "Unknown color option (%d) specified!\n" +#define MSGTR_TV_NoTeletext "No teletext" +#define MSGTR_TV_Bt848IoctlFailed "tvi_bsdbt848: Call to %s ioctl failed. Error: %s\n" +#define MSGTR_TV_Bt848InvalidAudioRate "tvi_bsdbt848: Invalid audio rate. Error: %s\n" +#define MSGTR_TV_Bt848ErrorOpeningBktrDev "tvi_bsdbt848: Unable to open bktr device. Error: %s\n" +#define MSGTR_TV_Bt848ErrorOpeningTunerDev "tvi_bsdbt848: Unable to open tuner device. Error: %s\n" +#define MSGTR_TV_Bt848ErrorOpeningDspDev "tvi_bsdbt848: Unable to open dsp device. Error: %s\n" +#define MSGTR_TV_Bt848ErrorConfiguringDsp "tvi_bsdbt848: Configuration of dsp failed. Error: %s\n" +#define MSGTR_TV_Bt848ErrorReadingAudio "tvi_bsdbt848: Error reading audio data. Error: %s\n" +#define MSGTR_TV_Bt848MmapFailed "tvi_bsdbt848: mmap failed. Error: %s\n" +#define MSGTR_TV_Bt848FrameBufAllocFailed "tvi_bsdbt848: Frame buffer allocation failed. Error: %s\n" +#define MSGTR_TV_Bt848ErrorSettingWidth "tvi_bsdbt848: Error setting picture width. Error: %s\n" +#define MSGTR_TV_Bt848ErrorSettingHeight "tvi_bsdbt848: Error setting picture height. Error: %s\n" +#define MSGTR_TV_Bt848UnableToStopCapture "tvi_bsdbt848: Unable to stop capture. Error: %s\n" +#define MSGTR_TV_TTSupportedLanguages "Supported Teletext languages:\n" +#define MSGTR_TV_TTSelectedLanguage "Selected default teletext language: %s\n" +#define MSGTR_TV_ScannerNotAvailableWithoutTuner "Channel scanner is not available without tuner\n" + +//tvi_dshow.c +#define MSGTR_TVI_DS_UnableConnectInputVideoDecoder "Unable to connect given input to video decoder. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableConnectInputAudioDecoder "Unable to connect given input to audio decoder. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableSelectVideoFormat "tvi_dshow: Unable to select video format. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableSelectAudioFormat "tvi_dshow: Unable to select audio format. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableGetMediaControlInterface "tvi_dshow: Unable to get IMediaControl interface. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableStartGraph "tvi_dshow: Unable to start graph! Error:0x%x\n" +#define MSGTR_TVI_DS_DeviceNotFound "tvi_dshow: Device #%d not found\n" +#define MSGTR_TVI_DS_UnableGetDeviceName "tvi_dshow: Unable to get name for device #%d\n" +#define MSGTR_TVI_DS_UsingDevice "tvi_dshow: Using device #%d: %s\n" +#define MSGTR_TVI_DS_DirectGetFreqFailed "tvi_dshow: Unable to get frequency directly. OS built-in channels table will be used.\n" +#define MSGTR_TVI_DS_UnableExtractFreqTable "tvi_dshow: Unable to load frequency table from kstvtune.ax\n" +#define MSGTR_TVI_DS_WrongDeviceParam "tvi_dshow: Wrong device parameter: %s\n" +#define MSGTR_TVI_DS_WrongDeviceIndex "tvi_dshow: Wrong device index: %d\n" +#define MSGTR_TVI_DS_WrongADeviceParam "tvi_dshow: Wrong adevice parameter: %s\n" +#define MSGTR_TVI_DS_WrongADeviceIndex "tvi_dshow: Wrong adevice index: %d\n" + +#define MSGTR_TVI_DS_SamplerateNotsupported "tvi_dshow: Samplerate %d is not supported by device. Failing back to first available.\n" +#define MSGTR_TVI_DS_VideoAdjustigNotSupported "tvi_dshow: Adjusting of brightness/hue/saturation/contrast is not supported by device\n" + +#define MSGTR_TVI_DS_ChangingWidthHeightNotSupported "tvi_dshow: Changing video width/height is not supported by device.\n" +#define MSGTR_TVI_DS_SelectingInputNotSupported "tvi_dshow: Selection of capture source is not supported by device\n" +#define MSGTR_TVI_DS_ErrorParsingAudioFormatStruct "tvi_dshow: Unable to parse audio format structure.\n" +#define MSGTR_TVI_DS_ErrorParsingVideoFormatStruct "tvi_dshow: Unable to parse video format structure.\n" +#define MSGTR_TVI_DS_UnableSetAudioMode "tvi_dshow: Unable to set audio mode %d. Error:0x%x\n" +#define MSGTR_TVI_DS_UnsupportedMediaType "tvi_dshow: Unsupported media type passed to %s\n" +#define MSGTR_TVI_DS_UnableGetsupportedVideoFormats "tvi_dshow: Unable to get supported media formats from video pin. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableGetsupportedAudioFormats "tvi_dshow: Unable to get supported media formats from audio pin. Error:0x%x Disabling audio.\n" +#define MSGTR_TVI_DS_UnableFindNearestChannel "tvi_dshow: Unable to find nearest channel in system frequency table\n" +#define MSGTR_TVI_DS_UnableToSetChannel "tvi_dshow: Unable to switch to nearest channel from system frequency table. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableTerminateVPPin "tvi_dshow: Unable to terminate VideoPort pin with any filter in graph. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableBuildVideoSubGraph "tvi_dshow: Unable to build video chain of capture graph. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableBuildAudioSubGraph "tvi_dshow: Unable to build audio chain of capture graph. Error:0x%x\n" +#define MSGTR_TVI_DS_UnableBuildVBISubGraph "tvi_dshow: Unable to build VBI chain of capture graph. Error:0x%x\n" +#define MSGTR_TVI_DS_GraphInitFailure "tvi_dshow: Directshow graph initialization failure.\n" +#define MSGTR_TVI_DS_NoVideoCaptureDevice "tvi_dshow: Unable to find video capture device\n" +#define MSGTR_TVI_DS_NoAudioCaptureDevice "tvi_dshow: Unable to find audio capture device\n" +#define MSGTR_TVI_DS_GetActualMediatypeFailed "tvi_dshow: Unable to get actual mediatype (Error:0x%x). Assuming equal to requested.\n" + +// url.c +#define MSGTR_MPDEMUX_URL_StringAlreadyEscaped "String appears to be already escaped in url_escape %c%c1%c2\n" + +// subtitles +#define MSGTR_SUBTITLES_SubRip_UnknownFontColor "SubRip: unknown font color in subtitle: %s\n" + + +/* untranslated messages from the English master file */ + + +#endif /* MPLAYER_HELP_MP_H */ diff --git a/libavfilter/libmpcodecs/img_format.c b/libavfilter/libmpcodecs/img_format.c new file mode 100644 index 0000000..61bf898 --- /dev/null +++ b/libavfilter/libmpcodecs/img_format.c @@ -0,0 +1,233 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include "config.h" +#include "img_format.h" +#include "stdio.h" +#include "libavutil/bswap.h" + +const char *ff_vo_format_name(int format) +{ + static char unknown_format[20]; + switch(format) + { + case IMGFMT_RGB1: return "RGB 1-bit"; + case IMGFMT_RGB4: return "RGB 4-bit"; + case IMGFMT_RG4B: return "RGB 4-bit per byte"; + case IMGFMT_RGB8: return "RGB 8-bit"; + case IMGFMT_RGB12: return "RGB 12-bit"; + case IMGFMT_RGB15: return "RGB 15-bit"; + case IMGFMT_RGB16: return "RGB 16-bit"; + case IMGFMT_RGB24: return "RGB 24-bit"; +// case IMGFMT_RGB32: return "RGB 32-bit"; + case IMGFMT_RGB48LE: return "RGB 48-bit LE"; + case IMGFMT_RGB48BE: return "RGB 48-bit BE"; + case IMGFMT_RGB64LE: return "RGB 64-bit LE"; + case IMGFMT_RGB64BE: return "RGB 64-bit BE"; + case IMGFMT_BGR1: return "BGR 1-bit"; + case IMGFMT_BGR4: return "BGR 4-bit"; + case IMGFMT_BG4B: return "BGR 4-bit per byte"; + case IMGFMT_BGR8: return "BGR 8-bit"; + case IMGFMT_BGR12: return "BGR 12-bit"; + case IMGFMT_BGR15: return "BGR 15-bit"; + case IMGFMT_BGR16: return "BGR 16-bit"; + case IMGFMT_BGR24: return "BGR 24-bit"; +// case IMGFMT_BGR32: return "BGR 32-bit"; + case IMGFMT_ABGR: return "ABGR"; + case IMGFMT_BGRA: return "BGRA"; + case IMGFMT_ARGB: return "ARGB"; + case IMGFMT_RGBA: return "RGBA"; + case IMGFMT_GBR24P: return "Planar GBR 24-bit"; + case IMGFMT_GBR12P: return "Planar GBR 36-bit"; + case IMGFMT_GBR14P: return "Planar GBR 42-bit"; + case IMGFMT_YVU9: return "Planar YVU9"; + case IMGFMT_IF09: return "Planar IF09"; + case IMGFMT_YV12: return "Planar YV12"; + case IMGFMT_I420: return "Planar I420"; + case IMGFMT_IYUV: return "Planar IYUV"; + case IMGFMT_CLPL: return "Planar CLPL"; + case IMGFMT_Y800: return "Planar Y800"; + case IMGFMT_Y8: return "Planar Y8"; + case IMGFMT_Y8A: return "Planar Y8 with alpha"; + case IMGFMT_Y16_LE: return "Planar Y16 little-endian"; + case IMGFMT_Y16_BE: return "Planar Y16 big-endian"; + case IMGFMT_420P16_LE: return "Planar 420P 16-bit little-endian"; + case IMGFMT_420P16_BE: return "Planar 420P 16-bit big-endian"; + case IMGFMT_420P14_LE: return "Planar 420P 14-bit little-endian"; + case IMGFMT_420P14_BE: return "Planar 420P 14-bit big-endian"; + case IMGFMT_420P12_LE: return "Planar 420P 12-bit little-endian"; + case IMGFMT_420P12_BE: return "Planar 420P 12-bit big-endian"; + case IMGFMT_420P10_LE: return "Planar 420P 10-bit little-endian"; + case IMGFMT_420P10_BE: return "Planar 420P 10-bit big-endian"; + case IMGFMT_420P9_LE: return "Planar 420P 9-bit little-endian"; + case IMGFMT_420P9_BE: return "Planar 420P 9-bit big-endian"; + case IMGFMT_422P16_LE: return "Planar 422P 16-bit little-endian"; + case IMGFMT_422P16_BE: return "Planar 422P 16-bit big-endian"; + case IMGFMT_422P14_LE: return "Planar 422P 14-bit little-endian"; + case IMGFMT_422P14_BE: return "Planar 422P 14-bit big-endian"; + case IMGFMT_422P12_LE: return "Planar 422P 12-bit little-endian"; + case IMGFMT_422P12_BE: return "Planar 422P 12-bit big-endian"; + case IMGFMT_422P10_LE: return "Planar 422P 10-bit little-endian"; + case IMGFMT_422P10_BE: return "Planar 422P 10-bit big-endian"; + case IMGFMT_422P9_LE: return "Planar 422P 9-bit little-endian"; + case IMGFMT_422P9_BE: return "Planar 422P 9-bit big-endian"; + case IMGFMT_444P16_LE: return "Planar 444P 16-bit little-endian"; + case IMGFMT_444P16_BE: return "Planar 444P 16-bit big-endian"; + case IMGFMT_444P14_LE: return "Planar 444P 14-bit little-endian"; + case IMGFMT_444P14_BE: return "Planar 444P 14-bit big-endian"; + case IMGFMT_444P12_LE: return "Planar 444P 12-bit little-endian"; + case IMGFMT_444P12_BE: return "Planar 444P 12-bit big-endian"; + case IMGFMT_444P10_LE: return "Planar 444P 10-bit little-endian"; + case IMGFMT_444P10_BE: return "Planar 444P 10-bit big-endian"; + case IMGFMT_444P9_LE: return "Planar 444P 9-bit little-endian"; + case IMGFMT_444P9_BE: return "Planar 444P 9-bit big-endian"; + case IMGFMT_420A: return "Planar 420P with alpha"; + case IMGFMT_444P: return "Planar 444P"; + case IMGFMT_444A: return "Planar 444P with alpha"; + case IMGFMT_422P: return "Planar 422P"; + case IMGFMT_422A: return "Planar 422P with alpha"; + case IMGFMT_411P: return "Planar 411P"; + case IMGFMT_NV12: return "Planar NV12"; + case IMGFMT_NV21: return "Planar NV21"; + case IMGFMT_HM12: return "Planar NV12 Macroblock"; + case IMGFMT_IUYV: return "Packed IUYV"; + case IMGFMT_IY41: return "Packed IY41"; + case IMGFMT_IYU1: return "Packed IYU1"; + case IMGFMT_IYU2: return "Packed IYU2"; + case IMGFMT_UYVY: return "Packed UYVY"; + case IMGFMT_UYNV: return "Packed UYNV"; + case IMGFMT_cyuv: return "Packed CYUV"; + case IMGFMT_Y422: return "Packed Y422"; + case IMGFMT_YUY2: return "Packed YUY2"; + case IMGFMT_YUNV: return "Packed YUNV"; + case IMGFMT_YVYU: return "Packed YVYU"; + case IMGFMT_Y41P: return "Packed Y41P"; + case IMGFMT_Y211: return "Packed Y211"; + case IMGFMT_Y41T: return "Packed Y41T"; + case IMGFMT_Y42T: return "Packed Y42T"; + case IMGFMT_V422: return "Packed V422"; + case IMGFMT_V655: return "Packed V655"; + case IMGFMT_CLJR: return "Packed CLJR"; + case IMGFMT_YUVP: return "Packed YUVP"; + case IMGFMT_UYVP: return "Packed UYVP"; + case IMGFMT_MPEGPES: return "Mpeg PES"; + case IMGFMT_ZRMJPEGNI: return "Zoran MJPEG non-interlaced"; + case IMGFMT_ZRMJPEGIT: return "Zoran MJPEG top field first"; + case IMGFMT_ZRMJPEGIB: return "Zoran MJPEG bottom field first"; + case IMGFMT_XVMC_MOCO_MPEG2: return "MPEG1/2 Motion Compensation"; + case IMGFMT_XVMC_IDCT_MPEG2: return "MPEG1/2 Motion Compensation and IDCT"; + case IMGFMT_VDPAU_MPEG1: return "MPEG1 VDPAU acceleration"; + case IMGFMT_VDPAU_MPEG2: return "MPEG2 VDPAU acceleration"; + case IMGFMT_VDPAU_H264: return "H.264 VDPAU acceleration"; + case IMGFMT_VDPAU_MPEG4: return "MPEG-4 Part 2 VDPAU acceleration"; + case IMGFMT_VDPAU_WMV3: return "WMV3 VDPAU acceleration"; + case IMGFMT_VDPAU_VC1: return "VC1 VDPAU acceleration"; + } + snprintf(unknown_format,20,"Unknown 0x%04x",format); + return unknown_format; +} + +int ff_mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits) +{ + int xs = 0, ys = 0; + int bpp; + int err = 0; + int bits = 8; + if ((format & 0xff0000f0) == 0x34000050) + format = av_bswap32(format); + if ((format & 0xf00000ff) == 0x50000034) { + switch (format >> 24) { + case 0x50: + break; + case 0x51: + bits = 16; + break; + case 0x52: + bits = 10; + break; + case 0x53: + bits = 9; + break; + default: + err = 1; + break; + } + switch (format & 0x00ffffff) { + case 0x00343434: // 444 + xs = 0; + ys = 0; + break; + case 0x00323234: // 422 + xs = 1; + ys = 0; + break; + case 0x00303234: // 420 + xs = 1; + ys = 1; + break; + case 0x00313134: // 411 + xs = 2; + ys = 0; + break; + case 0x00303434: // 440 + xs = 0; + ys = 1; + break; + default: + err = 1; + break; + } + } else switch (format) { + case IMGFMT_444A: + xs = 0; + ys = 0; + break; + case IMGFMT_422A: + xs = 1; + ys = 0; + break; + case IMGFMT_420A: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_YV12: + xs = 1; + ys = 1; + break; + case IMGFMT_IF09: + case IMGFMT_YVU9: + xs = 2; + ys = 2; + break; + case IMGFMT_Y8: + case IMGFMT_Y800: + xs = 31; + ys = 31; + break; + default: + err = 1; + break; + } + if (x_shift) *x_shift = xs; + if (y_shift) *y_shift = ys; + if (component_bits) *component_bits = bits; + bpp = 8 + ((16 >> xs) >> ys); + if (format == IMGFMT_420A || format == IMGFMT_422A || format == IMGFMT_444A) + bpp += 8; + bpp *= (bits + 7) >> 3; + return err ? 0 : bpp; +} diff --git a/libavfilter/libmpcodecs/img_format.h b/libavfilter/libmpcodecs/img_format.h new file mode 100644 index 0000000..d4d64d8 --- /dev/null +++ b/libavfilter/libmpcodecs/img_format.h @@ -0,0 +1,300 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_IMG_FORMAT_H +#define MPLAYER_IMG_FORMAT_H + +#include "config.h" + +/* RGB/BGR Formats */ + +#define IMGFMT_RGB_MASK 0xFFFFFF00 +#define IMGFMT_RGB (('R'<<24)|('G'<<16)|('B'<<8)) +#define IMGFMT_RGB1 (IMGFMT_RGB|1) +#define IMGFMT_RGB4 (IMGFMT_RGB|4) +#define IMGFMT_RGB4_CHAR (IMGFMT_RGB|4|128) // RGB4 with 1 pixel per byte +#define IMGFMT_RGB8 (IMGFMT_RGB|8) +#define IMGFMT_RGB12 (IMGFMT_RGB|12) +#define IMGFMT_RGB15 (IMGFMT_RGB|15) +#define IMGFMT_RGB16 (IMGFMT_RGB|16) +#define IMGFMT_RGB24 (IMGFMT_RGB|24) +#define IMGFMT_RGB32 (IMGFMT_RGB|32) +#define IMGFMT_RGB48LE (IMGFMT_RGB|48) +#define IMGFMT_RGB48BE (IMGFMT_RGB|48|128) +#define IMGFMT_RGB64LE (IMGFMT_RGB|64) +#define IMGFMT_RGB64BE (IMGFMT_RGB|64|128) + +#define IMGFMT_BGR_MASK 0xFFFFFF00 +#define IMGFMT_BGR (('B'<<24)|('G'<<16)|('R'<<8)) +#define IMGFMT_BGR1 (IMGFMT_BGR|1) +#define IMGFMT_BGR4 (IMGFMT_BGR|4) +#define IMGFMT_BGR4_CHAR (IMGFMT_BGR|4|128) // BGR4 with 1 pixel per byte +#define IMGFMT_BGR8 (IMGFMT_BGR|8) +#define IMGFMT_BGR12 (IMGFMT_BGR|12) +#define IMGFMT_BGR15 (IMGFMT_BGR|15) +#define IMGFMT_BGR16 (IMGFMT_BGR|16) +#define IMGFMT_BGR24 (IMGFMT_BGR|24) +#define IMGFMT_BGR32 (IMGFMT_BGR|32) + +#define IMGFMT_GBR24P (('G'<<24)|('B'<<16)|('R'<<8)|24) +#define IMGFMT_GBR12PLE (('G'<<24)|('B'<<16)|('R'<<8)|36) +#define IMGFMT_GBR12PBE (('G'<<24)|('B'<<16)|('R'<<8)|36|128) +#define IMGFMT_GBR14PLE (('G'<<24)|('B'<<16)|('R'<<8)|42) +#define IMGFMT_GBR14PBE (('G'<<24)|('B'<<16)|('R'<<8)|42|128) + +#if HAVE_BIGENDIAN +#define IMGFMT_ABGR IMGFMT_RGB32 +#define IMGFMT_BGRA (IMGFMT_RGB32|128) +#define IMGFMT_ARGB IMGFMT_BGR32 +#define IMGFMT_RGBA (IMGFMT_BGR32|128) +#define IMGFMT_RGB64NE IMGFMT_RGB64BE +#define IMGFMT_RGB48NE IMGFMT_RGB48BE +#define IMGFMT_RGB12BE IMGFMT_RGB12 +#define IMGFMT_RGB12LE (IMGFMT_RGB12|128) +#define IMGFMT_RGB15BE IMGFMT_RGB15 +#define IMGFMT_RGB15LE (IMGFMT_RGB15|128) +#define IMGFMT_RGB16BE IMGFMT_RGB16 +#define IMGFMT_RGB16LE (IMGFMT_RGB16|128) +#define IMGFMT_BGR12BE IMGFMT_BGR12 +#define IMGFMT_BGR12LE (IMGFMT_BGR12|128) +#define IMGFMT_BGR15BE IMGFMT_BGR15 +#define IMGFMT_BGR15LE (IMGFMT_BGR15|128) +#define IMGFMT_BGR16BE IMGFMT_BGR16 +#define IMGFMT_BGR16LE (IMGFMT_BGR16|128) +#define IMGFMT_GBR12P IMGFMT_GBR12PBE +#define IMGFMT_GBR14P IMGFMT_GBR14PBE +#else +#define IMGFMT_ABGR (IMGFMT_BGR32|128) +#define IMGFMT_BGRA IMGFMT_BGR32 +#define IMGFMT_ARGB (IMGFMT_RGB32|128) +#define IMGFMT_RGBA IMGFMT_RGB32 +#define IMGFMT_RGB64NE IMGFMT_RGB64LE +#define IMGFMT_RGB48NE IMGFMT_RGB48LE +#define IMGFMT_RGB12BE (IMGFMT_RGB12|128) +#define IMGFMT_RGB12LE IMGFMT_RGB12 +#define IMGFMT_RGB15BE (IMGFMT_RGB15|128) +#define IMGFMT_RGB15LE IMGFMT_RGB15 +#define IMGFMT_RGB16BE (IMGFMT_RGB16|128) +#define IMGFMT_RGB16LE IMGFMT_RGB16 +#define IMGFMT_BGR12BE (IMGFMT_BGR12|128) +#define IMGFMT_BGR12LE IMGFMT_BGR12 +#define IMGFMT_BGR15BE (IMGFMT_BGR15|128) +#define IMGFMT_BGR15LE IMGFMT_BGR15 +#define IMGFMT_BGR16BE (IMGFMT_BGR16|128) +#define IMGFMT_BGR16LE IMGFMT_BGR16 +#define IMGFMT_GBR12P IMGFMT_GBR12PLE +#define IMGFMT_GBR14P IMGFMT_GBR14PLE +#endif + +/* old names for compatibility */ +#define IMGFMT_RG4B IMGFMT_RGB4_CHAR +#define IMGFMT_BG4B IMGFMT_BGR4_CHAR + +#define IMGFMT_IS_RGB(fmt) (((fmt)&IMGFMT_RGB_MASK)==IMGFMT_RGB) +#define IMGFMT_IS_BGR(fmt) (((fmt)&IMGFMT_BGR_MASK)==IMGFMT_BGR) + +#define IMGFMT_RGB_DEPTH(fmt) ((fmt)&0x7F) +#define IMGFMT_BGR_DEPTH(fmt) ((fmt)&0x7F) + + +/* Planar YUV Formats */ + +#define IMGFMT_YVU9 0x39555659 +#define IMGFMT_IF09 0x39304649 +#define IMGFMT_YV12 0x32315659 +#define IMGFMT_I420 0x30323449 +#define IMGFMT_IYUV 0x56555949 +#define IMGFMT_CLPL 0x4C504C43 +#define IMGFMT_Y800 0x30303859 +#define IMGFMT_Y8 0x20203859 +#define IMGFMT_NV12 0x3231564E +#define IMGFMT_NV21 0x3132564E +#define IMGFMT_Y16_LE 0x20363159 + +/* unofficial Planar Formats, FIXME if official 4CC exists */ +#define IMGFMT_444P 0x50343434 +#define IMGFMT_422P 0x50323234 +#define IMGFMT_411P 0x50313134 +#define IMGFMT_440P 0x50303434 +#define IMGFMT_HM12 0x32314D48 +#define IMGFMT_Y16_BE 0x59313620 + +// Gray with alpha +#define IMGFMT_Y8A 0x59320008 +// 4:2:0 planar with alpha +#define IMGFMT_420A 0x41303234 +// 4:2:2 planar with alpha +#define IMGFMT_422A 0x41323234 +// 4:4:4 planar with alpha +#define IMGFMT_444A 0x41343434 + +#define IMGFMT_444P16_LE 0x51343434 +#define IMGFMT_444P16_BE 0x34343451 +#define IMGFMT_444P14_LE 0x54343434 +#define IMGFMT_444P14_BE 0x34343454 +#define IMGFMT_444P12_LE 0x55343434 +#define IMGFMT_444P12_BE 0x34343455 +#define IMGFMT_444P10_LE 0x52343434 +#define IMGFMT_444P10_BE 0x34343452 +#define IMGFMT_444P9_LE 0x53343434 +#define IMGFMT_444P9_BE 0x34343453 +#define IMGFMT_422P16_LE 0x51323234 +#define IMGFMT_422P16_BE 0x34323251 +#define IMGFMT_422P14_LE 0x54323234 +#define IMGFMT_422P14_BE 0x34323254 +#define IMGFMT_422P12_LE 0x55323234 +#define IMGFMT_422P12_BE 0x34323255 +#define IMGFMT_422P10_LE 0x52323234 +#define IMGFMT_422P10_BE 0x34323252 +#define IMGFMT_422P9_LE 0x53323234 +#define IMGFMT_422P9_BE 0x34323253 +#define IMGFMT_420P16_LE 0x51303234 +#define IMGFMT_420P16_BE 0x34323051 +#define IMGFMT_420P14_LE 0x54303234 +#define IMGFMT_420P14_BE 0x34323054 +#define IMGFMT_420P12_LE 0x55303234 +#define IMGFMT_420P12_BE 0x34323055 +#define IMGFMT_420P10_LE 0x52303234 +#define IMGFMT_420P10_BE 0x34323052 +#define IMGFMT_420P9_LE 0x53303234 +#define IMGFMT_420P9_BE 0x34323053 +#if HAVE_BIGENDIAN +#define IMGFMT_444P16 IMGFMT_444P16_BE +#define IMGFMT_444P14 IMGFMT_444P14_BE +#define IMGFMT_444P12 IMGFMT_444P12_BE +#define IMGFMT_444P10 IMGFMT_444P10_BE +#define IMGFMT_444P9 IMGFMT_444P9_BE +#define IMGFMT_422P16 IMGFMT_422P16_BE +#define IMGFMT_422P14 IMGFMT_422P14_BE +#define IMGFMT_422P12 IMGFMT_422P12_BE +#define IMGFMT_422P10 IMGFMT_422P10_BE +#define IMGFMT_422P9 IMGFMT_422P9_BE +#define IMGFMT_420P16 IMGFMT_420P16_BE +#define IMGFMT_420P14 IMGFMT_420P14_BE +#define IMGFMT_420P12 IMGFMT_420P12_BE +#define IMGFMT_420P10 IMGFMT_420P10_BE +#define IMGFMT_420P9 IMGFMT_420P9_BE +#define IMGFMT_Y16 IMGFMT_Y16_BE +#define IMGFMT_IS_YUVP16_NE(fmt) IMGFMT_IS_YUVP16_BE(fmt) +#else +#define IMGFMT_444P16 IMGFMT_444P16_LE +#define IMGFMT_444P14 IMGFMT_444P14_LE +#define IMGFMT_444P12 IMGFMT_444P12_LE +#define IMGFMT_444P10 IMGFMT_444P10_LE +#define IMGFMT_444P9 IMGFMT_444P9_LE +#define IMGFMT_422P16 IMGFMT_422P16_LE +#define IMGFMT_422P14 IMGFMT_422P14_LE +#define IMGFMT_422P12 IMGFMT_422P12_LE +#define IMGFMT_422P10 IMGFMT_422P10_LE +#define IMGFMT_422P9 IMGFMT_422P9_LE +#define IMGFMT_420P16 IMGFMT_420P16_LE +#define IMGFMT_420P14 IMGFMT_420P14_LE +#define IMGFMT_420P12 IMGFMT_420P12_LE +#define IMGFMT_420P10 IMGFMT_420P10_LE +#define IMGFMT_420P9 IMGFMT_420P9_LE +#define IMGFMT_Y16 IMGFMT_Y16_LE +#define IMGFMT_IS_YUVP16_NE(fmt) IMGFMT_IS_YUVP16_LE(fmt) +#endif + +#define IMGFMT_IS_YUVP16_LE(fmt) (((fmt - 0x51000034) & 0xfc0000ff) == 0) +#define IMGFMT_IS_YUVP16_BE(fmt) (((fmt - 0x34000051) & 0xff0000fc) == 0) +#define IMGFMT_IS_YUVP16(fmt) (IMGFMT_IS_YUVP16_LE(fmt) || IMGFMT_IS_YUVP16_BE(fmt)) + +/** + * \brief Find the corresponding full 16 bit format, i.e. IMGFMT_420P10_LE -> IMGFMT_420P16_LE + * \return normalized format ID or 0 if none exists. + */ +static inline int normalize_yuvp16(int fmt) { + if (IMGFMT_IS_YUVP16_LE(fmt)) + return (fmt & 0x00ffffff) | 0x51000000; + if (IMGFMT_IS_YUVP16_BE(fmt)) + return (fmt & 0xffffff00) | 0x00000051; + return 0; +} + +/* Packed YUV Formats */ + +#define IMGFMT_IUYV 0x56595549 // Interlaced UYVY +#define IMGFMT_IY41 0x31435949 // Interlaced Y41P +#define IMGFMT_IYU1 0x31555949 +#define IMGFMT_IYU2 0x32555949 +#define IMGFMT_UYVY 0x59565955 +#define IMGFMT_UYNV 0x564E5955 // Exactly same as UYVY +#define IMGFMT_cyuv 0x76757963 // upside-down UYVY +#define IMGFMT_Y422 0x32323459 // Exactly same as UYVY +#define IMGFMT_YUY2 0x32595559 +#define IMGFMT_YUNV 0x564E5559 // Exactly same as YUY2 +#define IMGFMT_YVYU 0x55595659 +#define IMGFMT_Y41P 0x50313459 +#define IMGFMT_Y211 0x31313259 +#define IMGFMT_Y41T 0x54313459 // Y41P, Y lsb = transparency +#define IMGFMT_Y42T 0x54323459 // UYVY, Y lsb = transparency +#define IMGFMT_V422 0x32323456 // upside-down UYVY? +#define IMGFMT_V655 0x35353656 +#define IMGFMT_CLJR 0x524A4C43 +#define IMGFMT_YUVP 0x50565559 // 10-bit YUYV +#define IMGFMT_UYVP 0x50565955 // 10-bit UYVY + +/* Compressed Formats */ +#define IMGFMT_MPEGPES (('M'<<24)|('P'<<16)|('E'<<8)|('S')) +#define IMGFMT_MJPEG (('M')|('J'<<8)|('P'<<16)|('G'<<24)) +/* Formats that are understood by zoran chips, we include + * non-interlaced, interlaced top-first, interlaced bottom-first */ +#define IMGFMT_ZRMJPEGNI (('Z'<<24)|('R'<<16)|('N'<<8)|('I')) +#define IMGFMT_ZRMJPEGIT (('Z'<<24)|('R'<<16)|('I'<<8)|('T')) +#define IMGFMT_ZRMJPEGIB (('Z'<<24)|('R'<<16)|('I'<<8)|('B')) + +// I think that this code could not be used by any other codec/format +#define IMGFMT_XVMC 0x1DC70000 +#define IMGFMT_XVMC_MASK 0xFFFF0000 +#define IMGFMT_IS_XVMC(fmt) (((fmt)&IMGFMT_XVMC_MASK)==IMGFMT_XVMC) +//these are chroma420 +#define IMGFMT_XVMC_MOCO_MPEG2 (IMGFMT_XVMC|0x02) +#define IMGFMT_XVMC_IDCT_MPEG2 (IMGFMT_XVMC|0x82) + +// VDPAU specific format. +#define IMGFMT_VDPAU 0x1DC80000 +#define IMGFMT_VDPAU_MASK 0xFFFF0000 +#define IMGFMT_IS_VDPAU(fmt) (((fmt)&IMGFMT_VDPAU_MASK)==IMGFMT_VDPAU) +#define IMGFMT_VDPAU_MPEG1 (IMGFMT_VDPAU|0x01) +#define IMGFMT_VDPAU_MPEG2 (IMGFMT_VDPAU|0x02) +#define IMGFMT_VDPAU_H264 (IMGFMT_VDPAU|0x03) +#define IMGFMT_VDPAU_WMV3 (IMGFMT_VDPAU|0x04) +#define IMGFMT_VDPAU_VC1 (IMGFMT_VDPAU|0x05) +#define IMGFMT_VDPAU_MPEG4 (IMGFMT_VDPAU|0x06) + +#define IMGFMT_IS_HWACCEL(fmt) (IMGFMT_IS_VDPAU(fmt) || IMGFMT_IS_XVMC(fmt)) + +typedef struct { + void* data; + int size; + int id; // stream id. usually 0x1E0 + int timestamp; // pts, 90000 Hz counter based +} vo_mpegpes_t; + +const char *ff_vo_format_name(int format); + +/** + * Calculates the scale shifts for the chroma planes for planar YUV + * + * \param component_bits bits per component + * \return bits-per-pixel for format if successful (i.e. format is 3 or 4-planes planar YUV), 0 otherwise + */ +int ff_mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits); + +#endif /* MPLAYER_IMG_FORMAT_H */ diff --git a/libavfilter/libmpcodecs/libvo/fastmemcpy.h b/libavfilter/libmpcodecs/libvo/fastmemcpy.h new file mode 100644 index 0000000..5a17d01 --- /dev/null +++ b/libavfilter/libmpcodecs/libvo/fastmemcpy.h @@ -0,0 +1,99 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with MPlayer; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef MPLAYER_FASTMEMCPY_H +#define MPLAYER_FASTMEMCPY_H + +#include <inttypes.h> +#include <string.h> +#include <stddef.h> + +void * fast_memcpy(void * to, const void * from, size_t len); +void * mem2agpcpy(void * to, const void * from, size_t len); + +#if ! defined(CONFIG_FASTMEMCPY) || ! (HAVE_MMX || HAVE_MMX2 || HAVE_AMD3DNOW /* || HAVE_SSE || HAVE_SSE2 */) +#define mem2agpcpy(a,b,c) memcpy(a,b,c) +#define fast_memcpy(a,b,c) memcpy(a,b,c) +#endif + +static inline void * mem2agpcpy_pic(void * dst, const void * src, int bytesPerLine, int height, int dstStride, int srcStride) +{ + int i; + void *retval=dst; + + if(dstStride == srcStride) + { + if (srcStride < 0) { + src = (const uint8_t*)src + (height-1)*srcStride; + dst = (uint8_t*)dst + (height-1)*dstStride; + srcStride = -srcStride; + } + + mem2agpcpy(dst, src, srcStride*height); + } + else + { + for(i=0; i<height; i++) + { + mem2agpcpy(dst, src, bytesPerLine); + src = (const uint8_t*)src + srcStride; + dst = (uint8_t*)dst + dstStride; + } + } + + return retval; +} + +#define memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 0) +#define my_memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 1) + +/** + * \param limit2width always skip data between end of line and start of next + * instead of copying the full block when strides are the same + */ +static inline void * memcpy_pic2(void * dst, const void * src, + int bytesPerLine, int height, + int dstStride, int srcStride, int limit2width) +{ + int i; + void *retval=dst; + + if(!limit2width && dstStride == srcStride) + { + if (srcStride < 0) { + src = (const uint8_t*)src + (height-1)*srcStride; + dst = (uint8_t*)dst + (height-1)*dstStride; + srcStride = -srcStride; + } + + fast_memcpy(dst, src, srcStride*height); + } + else + { + for(i=0; i<height; i++) + { + fast_memcpy(dst, src, bytesPerLine); + src = (const uint8_t*)src + srcStride; + dst = (uint8_t*)dst + dstStride; + } + } + + return retval; +} + +#endif /* MPLAYER_FASTMEMCPY_H */ diff --git a/libavfilter/libmpcodecs/libvo/video_out.h b/libavfilter/libmpcodecs/libvo/video_out.h new file mode 100644 index 0000000..2a3a0fa --- /dev/null +++ b/libavfilter/libmpcodecs/libvo/video_out.h @@ -0,0 +1,281 @@ +/* + * Copyright (C) Aaron Holtzman - Aug 1999 + * Strongly modified, most parts rewritten: A'rpi/ESP-team - 2000-2001 + * (C) MPlayer developers + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_VIDEO_OUT_H +#define MPLAYER_VIDEO_OUT_H + +#include <inttypes.h> +#include <stdarg.h> + +//#include "sub/font_load.h" +#include "../img_format.h" +//#include "vidix/vidix.h" + +#define VO_EVENT_EXPOSE 1 +#define VO_EVENT_RESIZE 2 +#define VO_EVENT_KEYPRESS 4 +#define VO_EVENT_REINIT 8 +#define VO_EVENT_MOVE 16 + +/* Obsolete: VOCTRL_QUERY_VAA 1 */ +/* does the device support the required format */ +#define VOCTRL_QUERY_FORMAT 2 +/* signal a device reset seek */ +#define VOCTRL_RESET 3 +/* true if vo driver can use GUI created windows */ +#define VOCTRL_GUISUPPORT 4 +#define VOCTRL_GUI_NOWINDOW 19 +/* used to switch to fullscreen */ +#define VOCTRL_FULLSCREEN 5 +/* signal a device pause */ +#define VOCTRL_PAUSE 7 +/* start/resume playback */ +#define VOCTRL_RESUME 8 +/* libmpcodecs direct rendering: */ +#define VOCTRL_GET_IMAGE 9 +#define VOCTRL_DRAW_IMAGE 13 +#define VOCTRL_SET_SPU_PALETTE 14 +/* decoding ahead: */ +#define VOCTRL_GET_NUM_FRAMES 10 +#define VOCTRL_GET_FRAME_NUM 11 +#define VOCTRL_SET_FRAME_NUM 12 +#define VOCTRL_GET_PANSCAN 15 +#define VOCTRL_SET_PANSCAN 16 +/* equalizer controls */ +#define VOCTRL_SET_EQUALIZER 17 +#define VOCTRL_GET_EQUALIZER 18 +//#define VOCTRL_GUI_NOWINDOW 19 +/* Frame duplication */ +#define VOCTRL_DUPLICATE_FRAME 20 +// ... 21 +#define VOCTRL_START_SLICE 21 + +#define VOCTRL_ONTOP 25 +#define VOCTRL_ROOTWIN 26 +#define VOCTRL_BORDER 27 +#define VOCTRL_DRAW_EOSD 28 +#define VOCTRL_GET_EOSD_RES 29 + +#define VOCTRL_SET_DEINTERLACE 30 +#define VOCTRL_GET_DEINTERLACE 31 + +#define VOCTRL_UPDATE_SCREENINFO 32 + +// Vo can be used by xover +#define VOCTRL_XOVERLAY_SUPPORT 22 + +#define VOCTRL_XOVERLAY_SET_COLORKEY 24 +typedef struct { + uint32_t x11; // The raw x11 color + uint16_t r,g,b; +} mp_colorkey_t; + +#define VOCTRL_XOVERLAY_SET_WIN 23 +typedef struct { + int x,y; + int w,h; +} mp_win_t; + +#define VO_TRUE 1 +#define VO_FALSE 0 +#define VO_ERROR -1 +#define VO_NOTAVAIL -2 +#define VO_NOTIMPL -3 + +#define VOFLAG_FULLSCREEN 0x01 +#define VOFLAG_MODESWITCHING 0x02 +#define VOFLAG_SWSCALE 0x04 +#define VOFLAG_FLIPPING 0x08 +#define VOFLAG_HIDDEN 0x10 //< Use to create a hidden window +#define VOFLAG_STEREO 0x20 //< Use to create a stereo-capable window +#define VOFLAG_XOVERLAY_SUB_VO 0x10000 + +typedef struct vo_info_s +{ + /* driver name ("Matrox Millennium G200/G400" */ + const char *name; + /* short name (for config strings) ("mga") */ + const char *short_name; + /* author ("Aaron Holtzman <aholtzma@ess.engr.uvic.ca>") */ + const char *author; + /* any additional comments */ + const char *comment; +} vo_info_t; + +typedef struct vo_functions_s +{ + const vo_info_t *info; + /* + * Preinitializes driver (real INITIALIZATION) + * arg - currently it's vo_subdevice + * returns: zero on successful initialization, non-zero on error. + */ + int (*preinit)(const char *arg); + /* + * Initialize (means CONFIGURE) the display driver. + * params: + * width,height: image source size + * d_width,d_height: size of the requested window size, just a hint + * fullscreen: flag, 0=windowd 1=fullscreen, just a hint + * title: window title, if available + * format: fourcc of pixel format + * returns : zero on successful initialization, non-zero on error. + */ + int (*config)(uint32_t width, uint32_t height, uint32_t d_width, + uint32_t d_height, uint32_t fullscreen, char *title, + uint32_t format); + + /* + * Control interface + */ + int (*control)(uint32_t request, void *data, ...); + + /* + * Display a new RGB/BGR frame of the video to the screen. + * params: + * src[0] - pointer to the image + */ + int (*draw_frame)(uint8_t *src[]); + + /* + * Draw a planar YUV slice to the buffer: + * params: + * src[3] = source image planes (Y,U,V) + * stride[3] = source image planes line widths (in bytes) + * w,h = width*height of area to be copied (in Y pixels) + * x,y = position at the destination image (in Y pixels) + */ + int (*draw_slice)(uint8_t *src[], int stride[], int w,int h, int x,int y); + + /* + * Draws OSD to the screen buffer + */ + void (*draw_osd)(void); + + /* + * Blit/Flip buffer to the screen. Must be called after each frame! + */ + void (*flip_page)(void); + + /* + * This func is called after every frames to handle keyboard and + * other events. It's called in PAUSE mode too! + */ + void (*check_events)(void); + + /* + * Closes driver. Should restore the original state of the system. + */ + void (*uninit)(void); +} vo_functions_t; + +const vo_functions_t* init_best_video_out(char** vo_list); +int config_video_out(const vo_functions_t *vo, uint32_t width, uint32_t height, + uint32_t d_width, uint32_t d_height, uint32_t flags, + char *title, uint32_t format); +void list_video_out(void); + +// NULL terminated array of all drivers +extern const vo_functions_t* const video_out_drivers[]; + +extern int vo_flags; + +extern int vo_config_count; + +extern int xinerama_screen; +extern int xinerama_x; +extern int xinerama_y; + +// correct resolution/bpp on screen: (should be autodetected by vo_init()) +extern int vo_depthonscreen; +extern int vo_screenwidth; +extern int vo_screenheight; + +// requested resolution/bpp: (-x -y -bpp options) +extern int vo_dx; +extern int vo_dy; +extern int vo_dwidth; +extern int vo_dheight; +extern int vo_dbpp; + +extern int vo_grabpointer; +extern int vo_doublebuffering; +extern int vo_directrendering; +extern int vo_vsync; +extern int vo_fsmode; +extern float vo_panscan; +extern int vo_adapter_num; +extern int vo_refresh_rate; +extern int vo_keepaspect; +extern int vo_rootwin; +extern int vo_ontop; +extern int vo_border; + +extern int vo_gamma_gamma; +extern int vo_gamma_brightness; +extern int vo_gamma_saturation; +extern int vo_gamma_contrast; +extern int vo_gamma_hue; +extern int vo_gamma_red_intensity; +extern int vo_gamma_green_intensity; +extern int vo_gamma_blue_intensity; + +extern int vo_nomouse_input; +extern int enable_mouse_movements; + +extern int vo_pts; +extern float vo_fps; + +extern char *vo_subdevice; + +extern int vo_colorkey; + +extern char *vo_winname; +extern char *vo_wintitle; + +extern int64_t WinID; + +typedef struct { + float min; + float max; + } range_t; + +float range_max(range_t *r); +int in_range(range_t *r, float f); +range_t *str2range(char *s); +extern char *monitor_hfreq_str; +extern char *monitor_vfreq_str; +extern char *monitor_dotclock_str; + +struct mp_keymap { + int from; + int to; +}; +int lookup_keymap_table(const struct mp_keymap *map, int key); +struct vo_rect { + int left, right, top, bottom, width, height; +}; +void calc_src_dst_rects(int src_width, int src_height, struct vo_rect *src, struct vo_rect *dst, + struct vo_rect *borders, const struct vo_rect *crop); +void vo_mouse_movement(int posx, int posy); + +#endif /* MPLAYER_VIDEO_OUT_H */ diff --git a/libavfilter/libmpcodecs/mp_image.c b/libavfilter/libmpcodecs/mp_image.c new file mode 100644 index 0000000..33d5c07 --- /dev/null +++ b/libavfilter/libmpcodecs/mp_image.c @@ -0,0 +1,246 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include "config.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#if HAVE_MALLOC_H +#include <malloc.h> +#endif + +#include "img_format.h" +#include "mp_image.h" + +#include "libvo/fastmemcpy.h" +//#include "libavutil/mem.h" + +void ff_mp_image_alloc_planes(mp_image_t *mpi) { + // IF09 - allocate space for 4. plane delta info - unused + if (mpi->imgfmt == IMGFMT_IF09) { + mpi->planes[0]=av_malloc(mpi->bpp*mpi->width*(mpi->height+2)/8+ + mpi->chroma_width*mpi->chroma_height); + } else + mpi->planes[0]=av_malloc(mpi->bpp*mpi->width*(mpi->height+2)/8); + if (mpi->flags&MP_IMGFLAG_PLANAR) { + int bpp = IMGFMT_IS_YUVP16(mpi->imgfmt)? 2 : 1; + // YV12/I420/YVU9/IF09. feel free to add other planar formats here... + mpi->stride[0]=mpi->stride[3]=bpp*mpi->width; + if(mpi->num_planes > 2){ + mpi->stride[1]=mpi->stride[2]=bpp*mpi->chroma_width; + if(mpi->flags&MP_IMGFLAG_SWAPPED){ + // I420/IYUV (Y,U,V) + mpi->planes[1]=mpi->planes[0]+mpi->stride[0]*mpi->height; + mpi->planes[2]=mpi->planes[1]+mpi->stride[1]*mpi->chroma_height; + if (mpi->num_planes > 3) + mpi->planes[3]=mpi->planes[2]+mpi->stride[2]*mpi->chroma_height; + } else { + // YV12,YVU9,IF09 (Y,V,U) + mpi->planes[2]=mpi->planes[0]+mpi->stride[0]*mpi->height; + mpi->planes[1]=mpi->planes[2]+mpi->stride[1]*mpi->chroma_height; + if (mpi->num_planes > 3) + mpi->planes[3]=mpi->planes[1]+mpi->stride[1]*mpi->chroma_height; + } + } else { + // NV12/NV21 + mpi->stride[1]=mpi->chroma_width; + mpi->planes[1]=mpi->planes[0]+mpi->stride[0]*mpi->height; + } + } else { + mpi->stride[0]=mpi->width*mpi->bpp/8; + if (mpi->flags & MP_IMGFLAG_RGB_PALETTE) + mpi->planes[1] = av_malloc(1024); + } + mpi->flags|=MP_IMGFLAG_ALLOCATED; +} + +mp_image_t* ff_alloc_mpi(int w, int h, unsigned long int fmt) { + mp_image_t* mpi = ff_new_mp_image(w,h); + + ff_mp_image_setfmt(mpi,fmt); + ff_mp_image_alloc_planes(mpi); + + return mpi; +} + +void ff_copy_mpi(mp_image_t *dmpi, mp_image_t *mpi) { + if(mpi->flags&MP_IMGFLAG_PLANAR){ + memcpy_pic(dmpi->planes[0],mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0],mpi->stride[0]); + memcpy_pic(dmpi->planes[1],mpi->planes[1], mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1],mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2],mpi->stride[2]); + } else { + memcpy_pic(dmpi->planes[0],mpi->planes[0], + mpi->w*(dmpi->bpp/8), mpi->h, + dmpi->stride[0],mpi->stride[0]); + } +} + +void ff_mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt){ + mpi->flags&=~(MP_IMGFLAG_PLANAR|MP_IMGFLAG_YUV|MP_IMGFLAG_SWAPPED); + mpi->imgfmt=out_fmt; + // compressed formats + if(out_fmt == IMGFMT_MPEGPES || + out_fmt == IMGFMT_ZRMJPEGNI || out_fmt == IMGFMT_ZRMJPEGIT || out_fmt == IMGFMT_ZRMJPEGIB || + IMGFMT_IS_HWACCEL(out_fmt)){ + mpi->bpp=0; + return; + } + mpi->num_planes=1; + if (IMGFMT_IS_RGB(out_fmt)) { + if (IMGFMT_RGB_DEPTH(out_fmt) < 8 && !(out_fmt&128)) + mpi->bpp = IMGFMT_RGB_DEPTH(out_fmt); + else + mpi->bpp=(IMGFMT_RGB_DEPTH(out_fmt)+7)&(~7); + return; + } + if (IMGFMT_IS_BGR(out_fmt)) { + if (IMGFMT_BGR_DEPTH(out_fmt) < 8 && !(out_fmt&128)) + mpi->bpp = IMGFMT_BGR_DEPTH(out_fmt); + else + mpi->bpp=(IMGFMT_BGR_DEPTH(out_fmt)+7)&(~7); + mpi->flags|=MP_IMGFLAG_SWAPPED; + return; + } + mpi->num_planes=3; + if (out_fmt == IMGFMT_GBR24P) { + mpi->bpp=24; + mpi->flags|=MP_IMGFLAG_PLANAR; + return; + } else if (out_fmt == IMGFMT_GBR12P) { + mpi->bpp=36; + mpi->flags|=MP_IMGFLAG_PLANAR; + return; + } else if (out_fmt == IMGFMT_GBR14P) { + mpi->bpp=42; + mpi->flags|=MP_IMGFLAG_PLANAR; + return; + } + mpi->flags|=MP_IMGFLAG_YUV; + if (ff_mp_get_chroma_shift(out_fmt, NULL, NULL, NULL)) { + mpi->flags|=MP_IMGFLAG_PLANAR; + mpi->bpp = ff_mp_get_chroma_shift(out_fmt, &mpi->chroma_x_shift, &mpi->chroma_y_shift, NULL); + mpi->chroma_width = mpi->width >> mpi->chroma_x_shift; + mpi->chroma_height = mpi->height >> mpi->chroma_y_shift; + } + switch(out_fmt){ + case IMGFMT_I420: + case IMGFMT_IYUV: + mpi->flags|=MP_IMGFLAG_SWAPPED; + case IMGFMT_YV12: + return; + case IMGFMT_420A: + case IMGFMT_422A: + case IMGFMT_444A: + case IMGFMT_IF09: + mpi->num_planes=4; + case IMGFMT_YVU9: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + case IMGFMT_440P: + case IMGFMT_444P16_LE: + case IMGFMT_444P16_BE: + case IMGFMT_444P14_LE: + case IMGFMT_444P14_BE: + case IMGFMT_444P12_LE: + case IMGFMT_444P12_BE: + case IMGFMT_444P10_LE: + case IMGFMT_444P10_BE: + case IMGFMT_444P9_LE: + case IMGFMT_444P9_BE: + case IMGFMT_422P16_LE: + case IMGFMT_422P16_BE: + case IMGFMT_422P14_LE: + case IMGFMT_422P14_BE: + case IMGFMT_422P12_LE: + case IMGFMT_422P12_BE: + case IMGFMT_422P10_LE: + case IMGFMT_422P10_BE: + case IMGFMT_422P9_LE: + case IMGFMT_422P9_BE: + case IMGFMT_420P16_LE: + case IMGFMT_420P16_BE: + case IMGFMT_420P14_LE: + case IMGFMT_420P14_BE: + case IMGFMT_420P12_LE: + case IMGFMT_420P12_BE: + case IMGFMT_420P10_LE: + case IMGFMT_420P10_BE: + case IMGFMT_420P9_LE: + case IMGFMT_420P9_BE: + return; + case IMGFMT_Y16_LE: + case IMGFMT_Y16_BE: + mpi->bpp=16; + case IMGFMT_Y800: + case IMGFMT_Y8: + /* they're planar ones, but for easier handling use them as packed */ + mpi->flags&=~MP_IMGFLAG_PLANAR; + mpi->num_planes=1; + return; + case IMGFMT_Y8A: + mpi->num_planes=2; + return; + case IMGFMT_UYVY: + mpi->flags|=MP_IMGFLAG_SWAPPED; + case IMGFMT_YUY2: + mpi->chroma_x_shift = 1; + mpi->bpp=16; + mpi->num_planes=1; + return; + case IMGFMT_NV12: + mpi->flags|=MP_IMGFLAG_SWAPPED; + case IMGFMT_NV21: + mpi->flags|=MP_IMGFLAG_PLANAR; + mpi->bpp=12; + mpi->num_planes=2; + mpi->chroma_width=(mpi->width>>0); + mpi->chroma_height=(mpi->height>>1); + mpi->chroma_x_shift=0; + mpi->chroma_y_shift=1; + return; + } + ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"mp_image: unknown out_fmt: 0x%X\n",out_fmt); + mpi->bpp=0; +} + +mp_image_t* ff_new_mp_image(int w,int h){ + mp_image_t* mpi = malloc(sizeof(mp_image_t)); + if(!mpi) return NULL; // error! + memset(mpi,0,sizeof(mp_image_t)); + mpi->width=mpi->w=w; + mpi->height=mpi->h=h; + return mpi; +} + +void ff_free_mp_image(mp_image_t* mpi){ + if(!mpi) return; + if(mpi->flags&MP_IMGFLAG_ALLOCATED){ + /* becouse we allocate the whole image in once */ + av_free(mpi->planes[0]); + if (mpi->flags & MP_IMGFLAG_RGB_PALETTE) + av_free(mpi->planes[1]); + } + free(mpi); +} + diff --git a/libavfilter/libmpcodecs/mp_image.h b/libavfilter/libmpcodecs/mp_image.h new file mode 100644 index 0000000..35b50a6 --- /dev/null +++ b/libavfilter/libmpcodecs/mp_image.h @@ -0,0 +1,157 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_MP_IMAGE_H +#define MPLAYER_MP_IMAGE_H + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#undef printf //FIXME +#undef fprintf //FIXME +#include "mp_msg.h" +#include "libavutil/avutil.h" +#include "libavutil/avassert.h" +#undef realloc +#undef malloc +#undef free +#undef rand +#undef srand +#undef printf +#undef strncpy +#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t" +#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC). + +//--------- codec's requirements (filled by the codec/vf) --------- + +//--- buffer content restrictions: +// set if buffer content shouldn't be modified: +#define MP_IMGFLAG_PRESERVE 0x01 +// set if buffer content will be READ. +// This can be e.g. for next frame's MC: (I/P mpeg frames) - +// then in combination with MP_IMGFLAG_PRESERVE - or it +// can be because a video filter or codec will read a significant +// amount of data while processing that frame (e.g. blending something +// onto the frame, MV based intra prediction). +// A frame marked like this should not be placed in to uncachable +// video RAM for example. +#define MP_IMGFLAG_READABLE 0x02 + +//--- buffer width/stride/plane restrictions: (used for direct rendering) +// stride _have_to_ be aligned to MB boundary: [for DR restrictions] +#define MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE 0x4 +// stride should be aligned to MB boundary: [for buffer allocation] +#define MP_IMGFLAG_PREFER_ALIGNED_STRIDE 0x8 +// codec accept any stride (>=width): +#define MP_IMGFLAG_ACCEPT_STRIDE 0x10 +// codec accept any width (width*bpp=stride -> stride%bpp==0) (>=width): +#define MP_IMGFLAG_ACCEPT_WIDTH 0x20 +//--- for planar formats only: +// uses only stride[0], and stride[1]=stride[2]=stride[0]>>mpi->chroma_x_shift +#define MP_IMGFLAG_COMMON_STRIDE 0x40 +// uses only planes[0], and calculates planes[1,2] from width,height,imgfmt +#define MP_IMGFLAG_COMMON_PLANE 0x80 + +#define MP_IMGFLAGMASK_RESTRICTIONS 0xFF + +//--------- color info (filled by ff_mp_image_setfmt() ) ----------- +// set if number of planes > 1 +#define MP_IMGFLAG_PLANAR 0x100 +// set if it's YUV colorspace +#define MP_IMGFLAG_YUV 0x200 +// set if it's swapped (BGR or YVU) plane/byteorder +#define MP_IMGFLAG_SWAPPED 0x400 +// set if you want memory for palette allocated and managed by ff_vf_get_image etc. +#define MP_IMGFLAG_RGB_PALETTE 0x800 + +#define MP_IMGFLAGMASK_COLORS 0xF00 + +// codec uses drawing/rendering callbacks (draw_slice()-like thing, DR method 2) +// [the codec will set this flag if it supports callbacks, and the vo _may_ +// clear it in get_image() if draw_slice() not implemented] +#define MP_IMGFLAG_DRAW_CALLBACK 0x1000 +// set if it's in video buffer/memory: [set by vo/vf's get_image() !!!] +#define MP_IMGFLAG_DIRECT 0x2000 +// set if buffer is allocated (used in destination images): +#define MP_IMGFLAG_ALLOCATED 0x4000 + +// buffer type was printed (do NOT set this flag - it's for INTERNAL USE!!!) +#define MP_IMGFLAG_TYPE_DISPLAYED 0x8000 + +// codec doesn't support any form of direct rendering - it has own buffer +// allocation. so we just export its buffer pointers: +#define MP_IMGTYPE_EXPORT 0 +// codec requires a static WO buffer, but it does only partial updates later: +#define MP_IMGTYPE_STATIC 1 +// codec just needs some WO memory, where it writes/copies the whole frame to: +#define MP_IMGTYPE_TEMP 2 +// I+P type, requires 2+ independent static R/W buffers +#define MP_IMGTYPE_IP 3 +// I+P+B type, requires 2+ independent static R/W and 1+ temp WO buffers +#define MP_IMGTYPE_IPB 4 +// Upper 16 bits give desired buffer number, -1 means get next available +#define MP_IMGTYPE_NUMBERED 5 +// Doesn't need any buffer, incomplete image (probably a first field only) +// we need this type to be able to differentiate between half frames and +// all other cases +#define MP_IMGTYPE_INCOMPLETE 6 + +#define MP_MAX_PLANES 4 + +#define MP_IMGFIELD_ORDERED 0x01 +#define MP_IMGFIELD_TOP_FIRST 0x02 +#define MP_IMGFIELD_REPEAT_FIRST 0x04 +#define MP_IMGFIELD_TOP 0x08 +#define MP_IMGFIELD_BOTTOM 0x10 +#define MP_IMGFIELD_INTERLACED 0x20 + +typedef struct mp_image { + unsigned int flags; + unsigned char type; + int number; + unsigned char bpp; // bits/pixel. NOT depth! for RGB it will be n*8 + unsigned int imgfmt; + int width,height; // stored dimensions + int x,y,w,h; // visible dimensions + unsigned char* planes[MP_MAX_PLANES]; + int stride[MP_MAX_PLANES]; + char * qscale; + int qstride; + int pict_type; // 0->unknown, 1->I, 2->P, 3->B + int fields; + int qscale_type; // 0->mpeg1/4/h263, 1->mpeg2 + int num_planes; + /* these are only used by planar formats Y,U(Cb),V(Cr) */ + int chroma_width; + int chroma_height; + int chroma_x_shift; // horizontal + int chroma_y_shift; // vertical + int usage_count; + /* for private use by filter or vo driver (to store buffer id or dmpi) */ + void* priv; +} mp_image_t; + +void ff_mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt); +mp_image_t* ff_new_mp_image(int w,int h); +void ff_free_mp_image(mp_image_t* mpi); + +mp_image_t* ff_alloc_mpi(int w, int h, unsigned long int fmt); +void ff_mp_image_alloc_planes(mp_image_t *mpi); +void ff_copy_mpi(mp_image_t *dmpi, mp_image_t *mpi); + +#endif /* MPLAYER_MP_IMAGE_H */ diff --git a/libavfilter/libmpcodecs/mp_msg.h b/libavfilter/libmpcodecs/mp_msg.h new file mode 100644 index 0000000..51cdff3 --- /dev/null +++ b/libavfilter/libmpcodecs/mp_msg.h @@ -0,0 +1,166 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_MP_MSG_H +#define MPLAYER_MP_MSG_H + +#include <stdarg.h> + +// defined in mplayer.c and mencoder.c +extern int verbose; + +// verbosity elevel: + +/* Only messages level MSGL_FATAL-MSGL_STATUS should be translated, + * messages level MSGL_V and above should not be translated. */ + +#define MSGL_FATAL 0 // will exit/abort +#define MSGL_ERR 1 // continues +#define MSGL_WARN 2 // only warning +#define MSGL_HINT 3 // short help message +#define MSGL_INFO 4 // -quiet +#define MSGL_STATUS 5 // v=0 +#define MSGL_V 6 // v=1 +#define MSGL_DBG2 7 // v=2 +#define MSGL_DBG3 8 // v=3 +#define MSGL_DBG4 9 // v=4 +#define MSGL_DBG5 10 // v=5 + +#define MSGL_FIXME 1 // for conversions from printf where the appropriate MSGL is not known; set equal to ERR for obtrusiveness +#define MSGT_FIXME 0 // for conversions from printf where the appropriate MSGT is not known; set equal to GLOBAL for obtrusiveness + +// code/module: + +#define MSGT_GLOBAL 0 // common player stuff errors +#define MSGT_CPLAYER 1 // console player (mplayer.c) +#define MSGT_GPLAYER 2 // gui player + +#define MSGT_VO 3 // libvo +#define MSGT_AO 4 // libao + +#define MSGT_DEMUXER 5 // demuxer.c (general stuff) +#define MSGT_DS 6 // demux stream (add/read packet etc) +#define MSGT_DEMUX 7 // fileformat-specific stuff (demux_*.c) +#define MSGT_HEADER 8 // fileformat-specific header (*header.c) + +#define MSGT_AVSYNC 9 // mplayer.c timer stuff +#define MSGT_AUTOQ 10 // mplayer.c auto-quality stuff + +#define MSGT_CFGPARSER 11 // cfgparser.c + +#define MSGT_DECAUDIO 12 // av decoder +#define MSGT_DECVIDEO 13 + +#define MSGT_SEEK 14 // seeking code +#define MSGT_WIN32 15 // win32 dll stuff +#define MSGT_OPEN 16 // open.c (stream opening) +#define MSGT_DVD 17 // open.c (DVD init/read/seek) + +#define MSGT_PARSEES 18 // parse_es.c (mpeg stream parser) +#define MSGT_LIRC 19 // lirc_mp.c and input lirc driver + +#define MSGT_STREAM 20 // stream.c +#define MSGT_CACHE 21 // cache2.c + +#define MSGT_MENCODER 22 + +#define MSGT_XACODEC 23 // XAnim codecs + +#define MSGT_TV 24 // TV input subsystem + +#define MSGT_OSDEP 25 // OS-dependent parts + +#define MSGT_SPUDEC 26 // spudec.c + +#define MSGT_PLAYTREE 27 // Playtree handeling (playtree.c, playtreeparser.c) + +#define MSGT_INPUT 28 + +#define MSGT_VFILTER 29 + +#define MSGT_OSD 30 + +#define MSGT_NETWORK 31 + +#define MSGT_CPUDETECT 32 + +#define MSGT_CODECCFG 33 + +#define MSGT_SWS 34 + +#define MSGT_VOBSUB 35 +#define MSGT_SUBREADER 36 + +#define MSGT_AFILTER 37 // Audio filter messages + +#define MSGT_NETST 38 // Netstream + +#define MSGT_MUXER 39 // muxer layer + +#define MSGT_OSD_MENU 40 + +#define MSGT_IDENTIFY 41 // -identify output + +#define MSGT_RADIO 42 + +#define MSGT_ASS 43 // libass messages + +#define MSGT_LOADER 44 // dll loader messages + +#define MSGT_STATUSLINE 45 // playback/encoding status line + +#define MSGT_TELETEXT 46 // Teletext decoder + +#define MSGT_MAX 64 + + +extern char *ff_mp_msg_charset; +extern int ff_mp_msg_color; +extern int ff_mp_msg_module; + +extern int ff_mp_msg_levels[MSGT_MAX]; +extern int ff_mp_msg_level_all; + + +void ff_mp_msg_init(void); +int ff_mp_msg_test(int mod, int lev); + +#include "config.h" + +void ff_mp_msg_va(int mod, int lev, const char *format, va_list va); +#ifdef __GNUC__ +void ff_mp_msg(int mod, int lev, const char *format, ... ) __attribute__ ((format (printf, 3, 4))); +# ifdef MP_DEBUG +# define mp_dbg(mod,lev, args... ) ff_mp_msg(mod, lev, ## args ) +# else + // only useful for developers, disable but check syntax +# define mp_dbg(mod,lev, args... ) do { if (0) ff_mp_msg(mod, lev, ## args ); } while (0) +# endif +#else // not GNU C +void ff_mp_msg(int mod, int lev, const char *format, ... ); +# ifdef MP_DEBUG +# define mp_dbg(mod,lev, ... ) ff_mp_msg(mod, lev, __VA_ARGS__) +# else + // only useful for developers, disable but check syntax +# define mp_dbg(mod,lev, ... ) do { if (0) ff_mp_msg(mod, lev, __VA_ARGS__); } while (0) +# endif +#endif /* __GNUC__ */ + +const char* ff_filename_recode(const char* filename); + +#endif /* MPLAYER_MP_MSG_H */ diff --git a/libavfilter/libmpcodecs/mpbswap.h b/libavfilter/libmpcodecs/mpbswap.h new file mode 100644 index 0000000..28f7337 --- /dev/null +++ b/libavfilter/libmpcodecs/mpbswap.h @@ -0,0 +1,34 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_MPBSWAP_H +#define MPLAYER_MPBSWAP_H + +#include <sys/types.h> +#include "config.h" +#include "libavutil/bswap.h" + +#define bswap_16(v) av_bswap16(v) +#define bswap_32(v) av_bswap32(v) +#define le2me_16(v) av_le2ne16(v) +#define le2me_32(v) av_le2ne32(v) +#define le2me_64(v) av_le2ne64(v) +#define be2me_16(v) av_be2ne16(v) +#define be2me_32(v) av_be2ne32(v) + +#endif /* MPLAYER_MPBSWAP_H */ diff --git a/libavfilter/libmpcodecs/mpc_info.h b/libavfilter/libmpcodecs/mpc_info.h new file mode 100644 index 0000000..8554699 --- /dev/null +++ b/libavfilter/libmpcodecs/mpc_info.h @@ -0,0 +1,43 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_MPC_INFO_H +#define MPLAYER_MPC_INFO_H + +typedef struct mp_codec_info_s +{ + /* codec long name ("Autodesk FLI/FLC Animation decoder" */ + const char *name; + /* short name (same as driver name in codecs.conf) ("dshow") */ + const char *short_name; + /* interface author/maintainer */ + const char *maintainer; + /* codec author ("Aaron Holtzman <aholtzma@ess.engr.uvic.ca>") */ + const char *author; + /* any additional comments */ + const char *comment; +} mp_codec_info_t; + +#define CONTROL_OK 1 +#define CONTROL_TRUE 1 +#define CONTROL_FALSE 0 +#define CONTROL_UNKNOWN -1 +#define CONTROL_ERROR -2 +#define CONTROL_NA -3 + +#endif /* MPLAYER_MPC_INFO_H */ diff --git a/libavfilter/libmpcodecs/pullup.c b/libavfilter/libmpcodecs/pullup.c new file mode 100644 index 0000000..b5fae9b --- /dev/null +++ b/libavfilter/libmpcodecs/pullup.c @@ -0,0 +1,823 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "libavutil/x86/asm.h" +#include "config.h" +#include "pullup.h" + + + +#if ARCH_X86 +#if HAVE_MMX +static int diff_y_mmx(unsigned char *a, unsigned char *b, int s) +{ + int ret; + __asm__ volatile ( + "movl $4, %%ecx \n\t" + "pxor %%mm4, %%mm4 \n\t" + "pxor %%mm7, %%mm7 \n\t" + + "1: \n\t" + + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "movq (%%"REG_D"), %%mm1 \n\t" + "add %%"REG_a", %%"REG_D" \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm1, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddw %%mm0, %%mm4 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm2, %%mm4 \n\t" + "paddw %%mm3, %%mm4 \n\t" + + "decl %%ecx \n\t" + "jnz 1b \n\t" + + "movq %%mm4, %%mm3 \n\t" + "punpcklwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "paddd %%mm4, %%mm3 \n\t" + "movd %%mm3, %%eax \n\t" + "psrlq $32, %%mm3 \n\t" + "movd %%mm3, %%edx \n\t" + "addl %%edx, %%eax \n\t" + "emms \n\t" + : "=a" (ret) + : "S" (a), "D" (b), "a" (s) + : "%ecx", "%edx" + ); + return ret; +} + +static int licomb_y_mmx(unsigned char *a, unsigned char *b, int s) +{ + int ret; + __asm__ volatile ( + "movl $4, %%ecx \n\t" + "pxor %%mm6, %%mm6 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "sub %%"REG_a", %%"REG_D" \n\t" + + "2: \n\t" + + "movq (%%"REG_D"), %%mm0 \n\t" + "movq (%%"REG_D"), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "movq (%%"REG_D",%%"REG_a"), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "paddw %%mm0, %%mm0 \n\t" + "paddw %%mm2, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "psubusw %%mm1, %%mm0 \n\t" + "psubusw %%mm2, %%mm1 \n\t" + "paddw %%mm0, %%mm6 \n\t" + "paddw %%mm1, %%mm6 \n\t" + + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_D"), %%mm1 \n\t" + "punpckhbw %%mm7, %%mm0 \n\t" + "movq (%%"REG_D",%%"REG_a"), %%mm2 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "paddw %%mm0, %%mm0 \n\t" + "paddw %%mm2, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "psubusw %%mm1, %%mm0 \n\t" + "psubusw %%mm2, %%mm1 \n\t" + "paddw %%mm0, %%mm6 \n\t" + "paddw %%mm1, %%mm6 \n\t" + + "movq (%%"REG_D",%%"REG_a"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "paddw %%mm0, %%mm0 \n\t" + "paddw %%mm2, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "psubusw %%mm1, %%mm0 \n\t" + "psubusw %%mm2, %%mm1 \n\t" + "paddw %%mm0, %%mm6 \n\t" + "paddw %%mm1, %%mm6 \n\t" + + "movq (%%"REG_D",%%"REG_a"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm1 \n\t" + "punpckhbw %%mm7, %%mm0 \n\t" + "movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "paddw %%mm0, %%mm0 \n\t" + "paddw %%mm2, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "psubusw %%mm1, %%mm0 \n\t" + "psubusw %%mm2, %%mm1 \n\t" + "paddw %%mm0, %%mm6 \n\t" + "paddw %%mm1, %%mm6 \n\t" + + "add %%"REG_a", %%"REG_S" \n\t" + "add %%"REG_a", %%"REG_D" \n\t" + "decl %%ecx \n\t" + "jnz 2b \n\t" + + "movq %%mm6, %%mm5 \n\t" + "punpcklwd %%mm7, %%mm6 \n\t" + "punpckhwd %%mm7, %%mm5 \n\t" + "paddd %%mm6, %%mm5 \n\t" + "movd %%mm5, %%eax \n\t" + "psrlq $32, %%mm5 \n\t" + "movd %%mm5, %%edx \n\t" + "addl %%edx, %%eax \n\t" + + "emms \n\t" + : "=a" (ret) + : "S" (a), "D" (b), "a" (s) + : "%ecx", "%edx" + ); + return ret; +} + +static int var_y_mmx(unsigned char *a, unsigned char *b, int s) +{ + int ret; + __asm__ volatile ( + "movl $3, %%ecx \n\t" + "pxor %%mm4, %%mm4 \n\t" + "pxor %%mm7, %%mm7 \n\t" + + "1: \n\t" + + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm1, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddw %%mm0, %%mm4 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm2, %%mm4 \n\t" + "paddw %%mm3, %%mm4 \n\t" + + "decl %%ecx \n\t" + "jnz 1b \n\t" + + "movq %%mm4, %%mm3 \n\t" + "punpcklwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "paddd %%mm4, %%mm3 \n\t" + "movd %%mm3, %%eax \n\t" + "psrlq $32, %%mm3 \n\t" + "movd %%mm3, %%edx \n\t" + "addl %%edx, %%eax \n\t" + "emms \n\t" + : "=a" (ret) + : "S" (a), "a" (s) + : "%ecx", "%edx" + ); + return 4*ret; +} +#endif +#endif + +#define ABS(a) (((a)^((a)>>31))-((a)>>31)) + +static int diff_y(unsigned char *a, unsigned char *b, int s) +{ + int i, j, diff=0; + for (i=4; i; i--) { + for (j=0; j<8; j++) diff += ABS(a[j]-b[j]); + a+=s; b+=s; + } + return diff; +} + +static int licomb_y(unsigned char *a, unsigned char *b, int s) +{ + int i, j, diff=0; + for (i=4; i; i--) { + for (j=0; j<8; j++) + diff += ABS((a[j]<<1) - b[j-s] - b[j]) + + ABS((b[j]<<1) - a[j] - a[j+s]); + a+=s; b+=s; + } + return diff; +} + +#if 0 +static int qpcomb_y(unsigned char *a, unsigned char *b, int s) +{ + int i, j, diff=0; + for (i=4; i; i--) { + for (j=0; j<8; j++) + diff += ABS(a[j] - 3*b[j-s] + 3*a[j+s] - b[j]); + a+=s; b+=s; + } + return diff; +} + +static int licomb_y_test(unsigned char *a, unsigned char *b, int s) +{ + int c = licomb_y(a,b,s); + int m = licomb_y_mmx(a,b,s); + if (c != m) printf("%d != %d\n", c, m); + return m; +} +#endif + +static int var_y(unsigned char *a, unsigned char *b, int s) +{ + int i, j, var=0; + for (i=3; i; i--) { + for (j=0; j<8; j++) { + var += ABS(a[j]-a[j+s]); + } + a+=s; b+=s; + } + return 4*var; /* match comb scaling */ +} + + + + + + + + + +static void alloc_buffer(struct pullup_context *c, struct pullup_buffer *b) +{ + int i; + if (b->planes) return; + b->planes = calloc(c->nplanes, sizeof(unsigned char *)); + for (i = 0; i < c->nplanes; i++) { + b->planes[i] = malloc(c->h[i]*c->stride[i]); + /* Deal with idiotic 128=0 for chroma: */ + memset(b->planes[i], c->background[i], c->h[i]*c->stride[i]); + } +} + +struct pullup_buffer *ff_pullup_lock_buffer(struct pullup_buffer *b, int parity) +{ + if (!b) return 0; + if ((parity+1) & 1) b->lock[0]++; + if ((parity+1) & 2) b->lock[1]++; + return b; +} + +void ff_pullup_release_buffer(struct pullup_buffer *b, int parity) +{ + if (!b) return; + if ((parity+1) & 1) b->lock[0]--; + if ((parity+1) & 2) b->lock[1]--; +} + +struct pullup_buffer *ff_pullup_get_buffer(struct pullup_context *c, int parity) +{ + int i; + + /* Try first to get the sister buffer for the previous field */ + if (parity < 2 && c->last && parity != c->last->parity + && !c->last->buffer->lock[parity]) { + alloc_buffer(c, c->last->buffer); + return ff_pullup_lock_buffer(c->last->buffer, parity); + } + + /* Prefer a buffer with both fields open */ + for (i = 0; i < c->nbuffers; i++) { + if (c->buffers[i].lock[0]) continue; + if (c->buffers[i].lock[1]) continue; + alloc_buffer(c, &c->buffers[i]); + return ff_pullup_lock_buffer(&c->buffers[i], parity); + } + + if (parity == 2) return 0; + + /* Search for any half-free buffer */ + for (i = 0; i < c->nbuffers; i++) { + if (((parity+1) & 1) && c->buffers[i].lock[0]) continue; + if (((parity+1) & 2) && c->buffers[i].lock[1]) continue; + alloc_buffer(c, &c->buffers[i]); + return ff_pullup_lock_buffer(&c->buffers[i], parity); + } + + return 0; +} + + + + + + +static void compute_metric(struct pullup_context *c, + struct pullup_field *fa, int pa, + struct pullup_field *fb, int pb, + int (*func)(unsigned char *, unsigned char *, int), int *dest) +{ + unsigned char *a, *b; + int x, y; + int mp = c->metric_plane; + int xstep = c->bpp[mp]; + int ystep = c->stride[mp]<<3; + int s = c->stride[mp]<<1; /* field stride */ + int w = c->metric_w*xstep; + + if (!fa->buffer || !fb->buffer) return; + + /* Shortcut for duplicate fields (e.g. from RFF flag) */ + if (fa->buffer == fb->buffer && pa == pb) { + memset(dest, 0, c->metric_len * sizeof(int)); + return; + } + + a = fa->buffer->planes[mp] + pa * c->stride[mp] + c->metric_offset; + b = fb->buffer->planes[mp] + pb * c->stride[mp] + c->metric_offset; + + for (y = c->metric_h; y; y--) { + for (x = 0; x < w; x += xstep) { + *dest++ = func(a + x, b + x, s); + } + a += ystep; b += ystep; + } +} + + + + + +static void alloc_metrics(struct pullup_context *c, struct pullup_field *f) +{ + f->diffs = calloc(c->metric_len, sizeof(int)); + f->comb = calloc(c->metric_len, sizeof(int)); + f->var = calloc(c->metric_len, sizeof(int)); + /* add more metrics here as needed */ +} + +static struct pullup_field *make_field_queue(struct pullup_context *c, int len) +{ + struct pullup_field *head, *f; + f = head = calloc(1, sizeof(struct pullup_field)); + alloc_metrics(c, f); + for (; len > 0; len--) { + f->next = calloc(1, sizeof(struct pullup_field)); + f->next->prev = f; + f = f->next; + alloc_metrics(c, f); + } + f->next = head; + head->prev = f; + return head; +} + +static void check_field_queue(struct pullup_context *c) +{ + if (c->head->next == c->first) { + struct pullup_field *f = calloc(1, sizeof(struct pullup_field)); + alloc_metrics(c, f); + f->prev = c->head; + f->next = c->first; + c->head->next = f; + c->first->prev = f; + } +} + +void ff_pullup_submit_field(struct pullup_context *c, struct pullup_buffer *b, int parity) +{ + struct pullup_field *f; + + /* Grow the circular list if needed */ + check_field_queue(c); + + /* Cannot have two fields of same parity in a row; drop the new one */ + if (c->last && c->last->parity == parity) return; + + f = c->head; + f->parity = parity; + f->buffer = ff_pullup_lock_buffer(b, parity); + f->flags = 0; + f->breaks = 0; + f->affinity = 0; + + compute_metric(c, f, parity, f->prev->prev, parity, c->diff, f->diffs); + compute_metric(c, parity?f->prev:f, 0, parity?f:f->prev, 1, c->comb, f->comb); + compute_metric(c, f, parity, f, -1, c->var, f->var); + + /* Advance the circular list */ + if (!c->first) c->first = c->head; + c->last = c->head; + c->head = c->head->next; +} + +void ff_pullup_flush_fields(struct pullup_context *c) +{ + struct pullup_field *f; + + for (f = c->first; f && f != c->head; f = f->next) { + ff_pullup_release_buffer(f->buffer, f->parity); + f->buffer = 0; + } + c->first = c->last = 0; +} + + + + + + + + +#define F_HAVE_BREAKS 1 +#define F_HAVE_AFFINITY 2 + + +#define BREAK_LEFT 1 +#define BREAK_RIGHT 2 + + + + +static int queue_length(struct pullup_field *begin, struct pullup_field *end) +{ + int count = 1; + struct pullup_field *f; + + if (!begin || !end) return 0; + for (f = begin; f != end; f = f->next) count++; + return count; +} + +static int find_first_break(struct pullup_field *f, int max) +{ + int i; + for (i = 0; i < max; i++) { + if (f->breaks & BREAK_RIGHT || f->next->breaks & BREAK_LEFT) + return i+1; + f = f->next; + } + return 0; +} + +static void compute_breaks(struct pullup_context *c, struct pullup_field *f0) +{ + int i; + struct pullup_field *f1 = f0->next; + struct pullup_field *f2 = f1->next; + struct pullup_field *f3 = f2->next; + int l, max_l=0, max_r=0; + //struct pullup_field *ff; + //for (i=0, ff=c->first; ff != f0; i++, ff=ff->next); + + if (f0->flags & F_HAVE_BREAKS) return; + //printf("\n%d: ", i); + f0->flags |= F_HAVE_BREAKS; + + /* Special case when fields are 100% identical */ + if (f0->buffer == f2->buffer && f1->buffer != f3->buffer) { + f2->breaks |= BREAK_RIGHT; + return; + } + if (f0->buffer != f2->buffer && f1->buffer == f3->buffer) { + f1->breaks |= BREAK_LEFT; + return; + } + + for (i = 0; i < c->metric_len; i++) { + l = f2->diffs[i] - f3->diffs[i]; + if (l > max_l) max_l = l; + if (-l > max_r) max_r = -l; + } + /* Don't get tripped up when differences are mostly quant error */ + //printf("%d %d\n", max_l, max_r); + if (max_l + max_r < 128) return; + if (max_l > 4*max_r) f1->breaks |= BREAK_LEFT; + if (max_r > 4*max_l) f2->breaks |= BREAK_RIGHT; +} + +static void compute_affinity(struct pullup_context *c, struct pullup_field *f) +{ + int i; + int max_l=0, max_r=0, l; + if (f->flags & F_HAVE_AFFINITY) return; + f->flags |= F_HAVE_AFFINITY; + if (f->buffer == f->next->next->buffer) { + f->affinity = 1; + f->next->affinity = 0; + f->next->next->affinity = -1; + f->next->flags |= F_HAVE_AFFINITY; + f->next->next->flags |= F_HAVE_AFFINITY; + return; + } + if (1) { + for (i = 0; i < c->metric_len; i++) { + int lv = f->prev->var[i]; + int rv = f->next->var[i]; + int v = f->var[i]; + int lc = f->comb[i] - (v+lv) + ABS(v-lv); + int rc = f->next->comb[i] - (v+rv) + ABS(v-rv); + lc = lc>0 ? lc : 0; + rc = rc>0 ? rc : 0; + l = lc - rc; + if (l > max_l) max_l = l; + if (-l > max_r) max_r = -l; + } + if (max_l + max_r < 64) return; + if (max_r > 6*max_l) f->affinity = -1; + else if (max_l > 6*max_r) f->affinity = 1; + } else { + for (i = 0; i < c->metric_len; i++) { + l = f->comb[i] - f->next->comb[i]; + if (l > max_l) max_l = l; + if (-l > max_r) max_r = -l; + } + if (max_l + max_r < 64) return; + if (max_r > 2*max_l) f->affinity = -1; + else if (max_l > 2*max_r) f->affinity = 1; + } +} + +static void foo(struct pullup_context *c) +{ + struct pullup_field *f = c->first; + int i, n = queue_length(f, c->last); + for (i = 0; i < n-1; i++) { + if (i < n-3) compute_breaks(c, f); + compute_affinity(c, f); + f = f->next; + } +} + +static int decide_frame_length(struct pullup_context *c) +{ + struct pullup_field *f0 = c->first; + struct pullup_field *f1 = f0->next; + struct pullup_field *f2 = f1->next; + int l; + + if (queue_length(c->first, c->last) < 4) return 0; + foo(c); + + if (f0->affinity == -1) return 1; + + l = find_first_break(f0, 3); + if (l == 1 && c->strict_breaks < 0) l = 0; + + switch (l) { + case 1: + if (c->strict_breaks < 1 && f0->affinity == 1 && f1->affinity == -1) + return 2; + else return 1; + case 2: + /* FIXME: strictly speaking, f0->prev is no longer valid... :) */ + if (c->strict_pairs + && (f0->prev->breaks & BREAK_RIGHT) && (f2->breaks & BREAK_LEFT) + && (f0->affinity != 1 || f1->affinity != -1) ) + return 1; + if (f1->affinity == 1) return 1; + else return 2; + case 3: + if (f2->affinity == 1) return 2; + else return 3; + default: + /* 9 possibilities covered before switch */ + if (f1->affinity == 1) return 1; /* covers 6 */ + else if (f1->affinity == -1) return 2; /* covers 6 */ + else if (f2->affinity == -1) { /* covers 2 */ + if (f0->affinity == 1) return 3; + else return 1; + } + else return 2; /* the remaining 6 */ + } +} + + +static void print_aff_and_breaks(struct pullup_context *c, struct pullup_field *f) +{ + int i; + struct pullup_field *f0 = f; + const char aff_l[] = "+..", aff_r[] = "..+"; + printf("\naffinity: "); + for (i = 0; i < 4; i++) { + printf("%c%d%c", aff_l[1+f->affinity], i, aff_r[1+f->affinity]); + f = f->next; + } + f = f0; + printf("\nbreaks: "); + for (i=0; i<4; i++) { + printf("%c%d%c", f->breaks & BREAK_LEFT ? '|' : '.', i, f->breaks & BREAK_RIGHT ? '|' : '.'); + f = f->next; + } + printf("\n"); +} + + + + + +struct pullup_frame *ff_pullup_get_frame(struct pullup_context *c) +{ + int i; + struct pullup_frame *fr = c->frame; + int n = decide_frame_length(c); + int aff = c->first->next->affinity; + + if (!n) return 0; + if (fr->lock) return 0; + + if (c->verbose) { + print_aff_and_breaks(c, c->first); + printf("duration: %d \n", n); + } + + fr->lock++; + fr->length = n; + fr->parity = c->first->parity; + fr->buffer = 0; + for (i = 0; i < n; i++) { + /* We cheat and steal the buffer without release+relock */ + fr->ifields[i] = c->first->buffer; + c->first->buffer = 0; + c->first = c->first->next; + } + + if (n == 1) { + fr->ofields[fr->parity] = fr->ifields[0]; + fr->ofields[fr->parity^1] = 0; + } else if (n == 2) { + fr->ofields[fr->parity] = fr->ifields[0]; + fr->ofields[fr->parity^1] = fr->ifields[1]; + } else if (n == 3) { + if (aff == 0) + aff = (fr->ifields[0] == fr->ifields[1]) ? -1 : 1; + /* else if (c->verbose) printf("forced aff: %d \n", aff); */ + fr->ofields[fr->parity] = fr->ifields[1+aff]; + fr->ofields[fr->parity^1] = fr->ifields[1]; + } + ff_pullup_lock_buffer(fr->ofields[0], 0); + ff_pullup_lock_buffer(fr->ofields[1], 1); + + if (fr->ofields[0] == fr->ofields[1]) { + fr->buffer = fr->ofields[0]; + ff_pullup_lock_buffer(fr->buffer, 2); + return fr; + } + return fr; +} + +static void copy_field(struct pullup_context *c, struct pullup_buffer *dest, + struct pullup_buffer *src, int parity) +{ + int i, j; + unsigned char *d, *s; + for (i = 0; i < c->nplanes; i++) { + s = src->planes[i] + parity*c->stride[i]; + d = dest->planes[i] + parity*c->stride[i]; + for (j = c->h[i]>>1; j; j--) { + memcpy(d, s, c->stride[i]); + s += c->stride[i]<<1; + d += c->stride[i]<<1; + } + } +} + +void ff_pullup_pack_frame(struct pullup_context *c, struct pullup_frame *fr) +{ + int i; + if (fr->buffer) return; + if (fr->length < 2) return; /* FIXME: deal with this */ + for (i = 0; i < 2; i++) + { + if (fr->ofields[i]->lock[i^1]) continue; + fr->buffer = fr->ofields[i]; + ff_pullup_lock_buffer(fr->buffer, 2); + copy_field(c, fr->buffer, fr->ofields[i^1], i^1); + return; + } + fr->buffer = ff_pullup_get_buffer(c, 2); + copy_field(c, fr->buffer, fr->ofields[0], 0); + copy_field(c, fr->buffer, fr->ofields[1], 1); +} + +void ff_pullup_release_frame(struct pullup_frame *fr) +{ + int i; + for (i = 0; i < fr->length; i++) + ff_pullup_release_buffer(fr->ifields[i], fr->parity ^ (i&1)); + ff_pullup_release_buffer(fr->ofields[0], 0); + ff_pullup_release_buffer(fr->ofields[1], 1); + if (fr->buffer) ff_pullup_release_buffer(fr->buffer, 2); + fr->lock--; +} + + + + + + +struct pullup_context *ff_pullup_alloc_context(void) +{ + struct pullup_context *c; + + c = calloc(1, sizeof(struct pullup_context)); + + return c; +} + +void ff_pullup_preinit_context(struct pullup_context *c) +{ + c->bpp = calloc(c->nplanes, sizeof(int)); + c->w = calloc(c->nplanes, sizeof(int)); + c->h = calloc(c->nplanes, sizeof(int)); + c->stride = calloc(c->nplanes, sizeof(int)); + c->background = calloc(c->nplanes, sizeof(int)); +} + +void ff_pullup_init_context(struct pullup_context *c) +{ + int mp = c->metric_plane; + if (c->nbuffers < 10) c->nbuffers = 10; + c->buffers = calloc(c->nbuffers, sizeof (struct pullup_buffer)); + + c->metric_w = (c->w[mp] - ((c->junk_left + c->junk_right) << 3)) >> 3; + c->metric_h = (c->h[mp] - ((c->junk_top + c->junk_bottom) << 1)) >> 3; + c->metric_offset = c->junk_left*c->bpp[mp] + (c->junk_top<<1)*c->stride[mp]; + c->metric_len = c->metric_w * c->metric_h; + + c->head = make_field_queue(c, 8); + + c->frame = calloc(1, sizeof (struct pullup_frame)); + c->frame->ifields = calloc(3, sizeof (struct pullup_buffer *)); + + switch(c->format) { + case PULLUP_FMT_Y: + c->diff = diff_y; + c->comb = licomb_y; + c->var = var_y; +#if ARCH_X86 +#if HAVE_MMX + if (c->cpu & PULLUP_CPU_MMX) { + c->diff = diff_y_mmx; + c->comb = licomb_y_mmx; + c->var = var_y_mmx; + } +#endif +#endif + /* c->comb = qpcomb_y; */ + break; +#if 0 + case PULLUP_FMT_YUY2: + c->diff = diff_yuy2; + break; + case PULLUP_FMT_RGB32: + c->diff = diff_rgb32; + break; +#endif + } +} + +void ff_pullup_free_context(struct pullup_context *c) +{ + struct pullup_field *f; + free(c->buffers); + f = c->head; + do { + if (!f) break; + free(f->diffs); + free(f->comb); + f = f->next; + free(f->prev); + } while (f != c->head); + free(c->frame); + free(c); +} diff --git a/libavfilter/libmpcodecs/pullup.h b/libavfilter/libmpcodecs/pullup.h new file mode 100644 index 0000000..cd6ec00 --- /dev/null +++ b/libavfilter/libmpcodecs/pullup.h @@ -0,0 +1,102 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_PULLUP_H +#define MPLAYER_PULLUP_H + +#define PULLUP_CPU_MMX 1 +#define PULLUP_CPU_MMX2 2 +#define PULLUP_CPU_3DNOW 4 +#define PULLUP_CPU_3DNOWEXT 8 +#define PULLUP_CPU_SSE 16 +#define PULLUP_CPU_SSE2 32 + +#define PULLUP_FMT_Y 1 +#define PULLUP_FMT_YUY2 2 +#define PULLUP_FMT_UYVY 3 +#define PULLUP_FMT_RGB32 4 + +struct pullup_buffer +{ + int lock[2]; + unsigned char **planes; +}; + +struct pullup_field +{ + int parity; + struct pullup_buffer *buffer; + unsigned int flags; + int breaks; + int affinity; + int *diffs; + int *comb; + int *var; + struct pullup_field *prev, *next; +}; + +struct pullup_frame +{ + int lock; + int length; + int parity; + struct pullup_buffer **ifields, *ofields[2]; + struct pullup_buffer *buffer; +}; + +struct pullup_context +{ + /* Public interface */ + int format; + int nplanes; + int *bpp, *w, *h, *stride, *background; + unsigned int cpu; + int junk_left, junk_right, junk_top, junk_bottom; + int verbose; + int metric_plane; + int strict_breaks; + int strict_pairs; + /* Internal data */ + struct pullup_field *first, *last, *head; + struct pullup_buffer *buffers; + int nbuffers; + int (*diff)(unsigned char *, unsigned char *, int); + int (*comb)(unsigned char *, unsigned char *, int); + int (*var)(unsigned char *, unsigned char *, int); + int metric_w, metric_h, metric_len, metric_offset; + struct pullup_frame *frame; +}; + + +struct pullup_buffer *ff_pullup_lock_buffer(struct pullup_buffer *b, int parity); +void ff_pullup_release_buffer(struct pullup_buffer *b, int parity); +struct pullup_buffer *ff_pullup_get_buffer(struct pullup_context *c, int parity); + +void ff_pullup_submit_field(struct pullup_context *c, struct pullup_buffer *b, int parity); +void ff_pullup_flush_fields(struct pullup_context *c); + +struct pullup_frame *ff_pullup_get_frame(struct pullup_context *c); +void ff_pullup_pack_frame(struct pullup_context *c, struct pullup_frame *fr); +void ff_pullup_release_frame(struct pullup_frame *fr); + +struct pullup_context *ff_pullup_alloc_context(void); +void ff_pullup_preinit_context(struct pullup_context *c); +void ff_pullup_init_context(struct pullup_context *c); +void ff_pullup_free_context(struct pullup_context *c); + +#endif /* MPLAYER_PULLUP_H */ diff --git a/libavfilter/libmpcodecs/vf.h b/libavfilter/libmpcodecs/vf.h new file mode 100644 index 0000000..0d26296 --- /dev/null +++ b/libavfilter/libmpcodecs/vf.h @@ -0,0 +1,169 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_VF_H +#define MPLAYER_VF_H + +//#include "m_option.h" +#include "mp_image.h" + +//extern m_obj_settings_t* vf_settings; +//extern const m_obj_list_t vf_obj_list; + +struct vf_instance; +struct vf_priv_s; + +typedef struct vf_info_s { + const char *info; + const char *name; + const char *author; + const char *comment; + int (*vf_open)(struct vf_instance *vf,char* args); + // Ptr to a struct dscribing the options + const void* opts; +} vf_info_t; + +#define NUM_NUMBERED_MPI 50 + +typedef struct vf_image_context_s { + mp_image_t* static_images[2]; + mp_image_t* temp_images[1]; + mp_image_t* export_images[1]; + mp_image_t* numbered_images[NUM_NUMBERED_MPI]; + int static_idx; +} vf_image_context_t; + +typedef struct vf_format_context_t { + int have_configured; + int orig_width, orig_height, orig_fmt; +} vf_format_context_t; + +typedef struct vf_instance { + const vf_info_t* info; + // funcs: + int (*config)(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt); + int (*control)(struct vf_instance *vf, + int request, void* data); + int (*query_format)(struct vf_instance *vf, + unsigned int fmt); + void (*get_image)(struct vf_instance *vf, + mp_image_t *mpi); + int (*put_image)(struct vf_instance *vf, + mp_image_t *mpi, double pts); + void (*start_slice)(struct vf_instance *vf, + mp_image_t *mpi); + void (*draw_slice)(struct vf_instance *vf, + unsigned char** src, int* stride, int w,int h, int x, int y); + void (*uninit)(struct vf_instance *vf); + + int (*continue_buffered_image)(struct vf_instance *vf); + // caps: + unsigned int default_caps; // used by default query_format() + unsigned int default_reqs; // used by default config() + // data: + int w, h; + vf_image_context_t imgctx; + vf_format_context_t fmt; + struct vf_instance *next; + mp_image_t *dmpi; + struct vf_priv_s* priv; +} vf_instance_t; + +// control codes: +#include "mpc_info.h" + +typedef struct vf_seteq_s +{ + const char *item; + int value; +} vf_equalizer_t; + +#define VFCTRL_QUERY_MAX_PP_LEVEL 4 /* test for postprocessing support (max level) */ +#define VFCTRL_SET_PP_LEVEL 5 /* set postprocessing level */ +#define VFCTRL_SET_EQUALIZER 6 /* set color options (brightness,contrast etc) */ +#define VFCTRL_GET_EQUALIZER 8 /* gset color options (brightness,contrast etc) */ +#define VFCTRL_DRAW_OSD 7 +#define VFCTRL_CHANGE_RECTANGLE 9 /* Change the rectangle boundaries */ +#define VFCTRL_FLIP_PAGE 10 /* Tell the vo to flip pages */ +#define VFCTRL_DUPLICATE_FRAME 11 /* For encoding - encode zero-change frame */ +#define VFCTRL_SKIP_NEXT_FRAME 12 /* For encoding - drop the next frame that passes thru */ +#define VFCTRL_FLUSH_FRAMES 13 /* For encoding - flush delayed frames */ +#define VFCTRL_SCREENSHOT 14 /* Make a screenshot */ +#define VFCTRL_INIT_EOSD 15 /* Select EOSD renderer */ +#define VFCTRL_DRAW_EOSD 16 /* Render EOSD */ +#define VFCTRL_GET_PTS 17 /* Return last pts value that reached vf_vo*/ +#define VFCTRL_SET_DEINTERLACE 18 /* Set deinterlacing status */ +#define VFCTRL_GET_DEINTERLACE 19 /* Get deinterlacing status */ + +#include "vfcap.h" + +//FIXME this should be in a common header, but i dunno which +#define MP_NOPTS_VALUE (-1LL<<63) //both int64_t and double should be able to represent this exactly + + +// functions: +void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h); +mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h); + +vf_instance_t* vf_open_plugin(const vf_info_t* const* filter_list, vf_instance_t* next, const char *name, char **args); +vf_instance_t* vf_open_filter(vf_instance_t* next, const char *name, char **args); +vf_instance_t* ff_vf_add_before_vo(vf_instance_t **vf, char *name, char **args); +vf_instance_t* vf_open_encoder(vf_instance_t* next, const char *name, char *args); + +unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred); +void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src); +void ff_vf_queue_frame(vf_instance_t *vf, int (*)(vf_instance_t *)); +int ff_vf_output_queued_frame(vf_instance_t *vf); + +// default wrappers: +int ff_vf_next_config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt); +int ff_vf_next_control(struct vf_instance *vf, int request, void* data); +void ff_vf_extra_flip(struct vf_instance *vf); +int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt); +int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts); +void ff_vf_next_draw_slice (struct vf_instance *vf, unsigned char** src, int* stride, int w,int h, int x, int y); + +vf_instance_t* ff_append_filters(vf_instance_t* last); + +void ff_vf_uninit_filter(vf_instance_t* vf); +void ff_vf_uninit_filter_chain(vf_instance_t* vf); + +int ff_vf_config_wrapper(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt); + +static inline int norm_qscale(int qscale, int type) +{ + switch (type) { + case 0: // MPEG-1 + return qscale; + case 1: // MPEG-2 + return qscale >> 1; + case 2: // H264 + return qscale >> 2; + case 3: // VP56 + return (63 - qscale + 2) >> 2; + } + return qscale; +} + +#endif /* MPLAYER_VF_H */ diff --git a/libavfilter/libmpcodecs/vf_detc.c b/libavfilter/libmpcodecs/vf_detc.c new file mode 100644 index 0000000..751e2b8 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_detc.c @@ -0,0 +1,453 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +struct metrics { + int even; + int odd; + int noise; + int temp; +}; + +struct vf_priv_s { + int frame; + int drop, lastdrop; + struct metrics pm; + int thres[5]; + int inframes, outframes; + int mode; + int (*analyze)(struct vf_priv_s *, mp_image_t *, mp_image_t *); + int needread; +}; + +#define COMPE(a,b,e) (abs((a)-(b)) < (((a)+(b))>>(e))) +#define COMPARABLE(a,b) COMPE((a),(b),2) +#define VERYCLOSE(a,b) COMPE((a),(b),3) + +#define OUTER_TC_NBHD(s) ( \ + COMPARABLE((s)[-1].m.even,(s)[-1].m.odd) && \ + COMPARABLE((s)[1].m.even,(s)[0].m.odd) && \ + COMPARABLE((s)[2].m.even,(s)[1].m.odd) && \ + COMPARABLE((s)[-1].m.noise,(s)[0].m.temp) && \ + COMPARABLE((s)[2].m.noise,(s)[2].m.temp) ) + +#define INNER_TC_NBHD(s,l,h) ( \ + COMPARABLE((s)[0].m.even,(l)) && \ + COMPARABLE((s)[2].m.odd,(l)) && ( \ + COMPARABLE((s)[0].m.noise,(h)) || \ + COMPARABLE((s)[1].m.noise,(h)) ) ) + +enum { + TC_DROP, + TC_PROG, + TC_IL1, + TC_IL2 +}; + +static void block_diffs(struct metrics *m, unsigned char *old, unsigned char *new, int os, int ns) +{ + int x, y, even=0, odd=0, noise, temp; + unsigned char *oldp, *newp; + m->noise = m->temp = 0; + for (x = 8; x; x--) { + oldp = old++; + newp = new++; + noise = temp = 0; + for (y = 4; y; y--) { + even += abs(newp[0]-oldp[0]); + odd += abs(newp[ns]-oldp[os]); + noise += newp[ns]-newp[0]; + temp += oldp[os]-newp[0]; + oldp += os<<1; + newp += ns<<1; + } + m->noise += abs(noise); + m->temp += abs(temp); + } + m->even = even; + m->odd = odd; +} + +static void diff_planes(struct metrics *m, unsigned char *old, unsigned char *new, int w, int h, int os, int ns) +{ + int x, y, me=0, mo=0, mn=0, mt=0; + struct metrics l; + for (y = 0; y < h-7; y += 8) { + for (x = 0; x < w-7; x += 8) { + block_diffs(&l, old+x+y*os, new+x+y*ns, os, ns); + if (l.even > me) me = l.even; + if (l.odd > mo) mo = l.odd; + if (l.noise > mn) mn = l.noise; + if (l.temp > mt) mt = l.temp; + } + } + m->even = me; + m->odd = mo; + m->noise = mn; + m->temp = mt; +} + +static void diff_fields(struct metrics *metr, mp_image_t *old, mp_image_t *new) +{ + struct metrics m, mu, mv; + diff_planes(&m, old->planes[0], new->planes[0], + new->w, new->h, old->stride[0], new->stride[0]); + if (new->flags & MP_IMGFLAG_PLANAR) { + diff_planes(&mu, old->planes[1], new->planes[1], + new->chroma_width, new->chroma_height, + old->stride[1], new->stride[1]); + diff_planes(&mv, old->planes[2], new->planes[2], + new->chroma_width, new->chroma_height, + old->stride[2], new->stride[2]); + if (mu.even > m.even) m.even = mu.even; + if (mu.odd > m.odd) m.odd = mu.odd; + if (mu.noise > m.noise) m.noise = mu.noise; + if (mu.temp > m.temp) m.temp = mu.temp; + if (mv.even > m.even) m.even = mv.even; + if (mv.odd > m.odd) m.odd = mv.odd; + if (mv.noise > m.noise) m.noise = mv.noise; + if (mv.temp > m.temp) m.temp = mv.temp; + } + *metr = m; +} + +static void status(int f, struct metrics *m) +{ + ff_mp_msg(MSGT_VFILTER, MSGL_V, "frame %d: e=%d o=%d n=%d t=%d\n", + f, m->even, m->odd, m->noise, m->temp); +} + +static int analyze_fixed_pattern(struct vf_priv_s *p, mp_image_t *new, mp_image_t *old) +{ + if (p->frame >= 0) p->frame = (p->frame+1)%5; + ff_mp_msg(MSGT_VFILTER, MSGL_V, "frame %d\n", p->frame); + switch (p->frame) { + case -1: case 0: case 1: case 2: + return TC_PROG; + case 3: + return TC_IL1; + case 4: + return TC_IL2; + } + return 0; +} + +static int analyze_aggressive(struct vf_priv_s *p, mp_image_t *new, mp_image_t *old) +{ + struct metrics m, pm; + + if (p->frame >= 0) p->frame = (p->frame+1)%5; + + diff_fields(&m, old, new); + + status(p->frame, &m); + + pm = p->pm; + p->pm = m; + + if (p->frame == 4) { + /* We need to break at scene changes, but is this a valid test? */ + if ((m.even > p->thres[2]) && (m.odd > p->thres[2]) && (m.temp > p->thres[3]) + && (m.temp > 5*pm.temp) && (m.temp*2 > m.noise)) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "scene change breaking telecine!\n"); + p->frame = -1; + return TC_DROP; + } + /* Thres. is to compensate for quantization errors when noise is low */ + if (m.noise - m.temp > -p->thres[4]) { + if (COMPARABLE(m.even, pm.odd)) { + //ff_mp_msg(MSGT_VFILTER, MSGL_V, "confirmed field match!\n"); + return TC_IL2; + } else if ((m.even < p->thres[0]) && (m.odd < p->thres[0]) && VERYCLOSE(m.even, m.odd) + && VERYCLOSE(m.noise,m.temp) && VERYCLOSE(m.noise,pm.noise)) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "interlaced frame appears in duplicate!!!\n"); + p->pm = pm; /* hack :) */ + p->frame = 3; + return TC_IL1; + } + } else { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "mismatched telecine fields!\n"); + p->frame = -1; + } + } + + if (2*m.even*m.temp < m.odd*m.noise) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "caught telecine sync!\n"); + p->frame = 3; + return TC_IL1; + } + + if (p->frame < 3) { + if (m.noise > p->thres[3]) { + if (m.noise > 2*m.temp) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "merging fields out of sequence!\n"); + return TC_IL2; + } + if ((m.noise > 2*pm.noise) && (m.even > p->thres[2]) && (m.odd > p->thres[2])) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "dropping horrible interlaced frame!\n"); + return TC_DROP; + } + } + } + + switch (p->frame) { + case -1: + if (4*m.noise > 5*m.temp) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "merging fields out of sequence!\n"); + return TC_IL2; + } + case 0: + case 1: + case 2: + return TC_PROG; + case 3: + if ((m.even > p->thres[1]) && (m.even > m.odd) && (m.temp > m.noise)) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "lost telecine tracking!\n"); + p->frame = -1; + return TC_PROG; + } + return TC_IL1; + case 4: + return TC_IL2; + } + return 0; +} + +static void copy_image(mp_image_t *dmpi, mp_image_t *mpi, int field) +{ + switch (field) { + case 0: + my_memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + break; + case 1: + my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0], + mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1], + mpi->planes[1]+mpi->stride[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2], + mpi->planes[2]+mpi->stride[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + break; + case 2: + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0], mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2], mpi->stride[2]); + } + break; + } +} + +static int do_put_image(struct vf_instance *vf, mp_image_t *dmpi) +{ + struct vf_priv_s *p = vf->priv; + int dropflag; + + switch (p->drop) { + default: + dropflag = 0; + break; + case 1: + dropflag = (++p->lastdrop >= 5); + break; + case 2: + dropflag = (++p->lastdrop >= 5) && (4*p->inframes <= 5*p->outframes); + break; + } + + if (dropflag) { + ff_mp_msg(MSGT_VFILTER, MSGL_V, "drop! [%d/%d=%g]\n", + p->outframes, p->inframes, (float)p->outframes/p->inframes); + p->lastdrop = 0; + return 0; + } + + p->outframes++; + return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + int ret=0; + mp_image_t *dmpi; + struct vf_priv_s *p = vf->priv; + + p->inframes++; + + if (p->needread) dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE | MP_IMGFLAG_READABLE, + mpi->width, mpi->height); + /* FIXME: is there a good way to get rid of static type? */ + else dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE, mpi->width, mpi->height); + + switch (p->analyze(p, mpi, dmpi)) { + case TC_DROP: + /* Don't copy anything unless we'll need to read it. */ + if (p->needread) copy_image(dmpi, mpi, 2); + p->lastdrop = 0; + break; + case TC_PROG: + /* Copy and display the whole frame. */ + copy_image(dmpi, mpi, 2); + ret = do_put_image(vf, dmpi); + break; + case TC_IL1: + /* Only copy bottom field unless we need to read. */ + if (p->needread) copy_image(dmpi, mpi, 2); + else copy_image(dmpi, mpi, 1); + p->lastdrop = 0; + break; + case TC_IL2: + /* Copy top field and show frame, then copy bottom if needed. */ + copy_image(dmpi, mpi, 0); + ret = do_put_image(vf, dmpi); + if (p->needread) copy_image(dmpi, mpi, 1); + break; + } + return ret; +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - figure out which other formats work */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static struct { + const char *name; + int (*func)(struct vf_priv_s *p, mp_image_t *new, mp_image_t *old); + int needread; +} anal_funcs[] = { + { "fixed", analyze_fixed_pattern, 0 }, + { "aggressive", analyze_aggressive, 1 }, + { NULL, NULL, 0 } +}; + +#define STARTVARS if (0) +#define GETVAR(str, name, out, func) \ + else if (!strncmp((str), name "=", sizeof(name))) \ + (out) = (func)((str) + sizeof(name)) + +static void parse_var(struct vf_priv_s *p, char *var) +{ + STARTVARS; + GETVAR(var, "dr", p->drop, atoi); + GETVAR(var, "t0", p->thres[0], atoi); + GETVAR(var, "t1", p->thres[1], atoi); + GETVAR(var, "t2", p->thres[2], atoi); + GETVAR(var, "t3", p->thres[3], atoi); + GETVAR(var, "t4", p->thres[4], atoi); + GETVAR(var, "fr", p->frame, atoi); + GETVAR(var, "am", p->mode, atoi); +} + +static void parse_args(struct vf_priv_s *p, char *args) +{ + char *next, *orig; + for (args=orig=strdup(args); args; args=next) { + next = strchr(args, ':'); + if (next) *next++ = 0; + parse_var(p, args); + } + free(orig); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + struct vf_priv_s *p; + vf->config = config; + vf->put_image = put_image; + vf->query_format = query_format; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = p = calloc(1, sizeof(struct vf_priv_s)); + p->frame = -1; + p->thres[0] = 440; + p->thres[1] = 720; + p->thres[2] = 2500; + p->thres[3] = 2500; + p->thres[4] = 800; + p->drop = 0; + p->mode = 1; + if (args) parse_args(p, args); + p->analyze = anal_funcs[p->mode].func; + p->needread = anal_funcs[p->mode].needread; + return 1; +} + +const vf_info_t ff_vf_info_detc = { + "de-telecine filter", + "detc", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_dint.c b/libavfilter/libmpcodecs/vf_dint.c new file mode 100644 index 0000000..950e835 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_dint.c @@ -0,0 +1,214 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> + +#include "config.h" +#include "mp_msg.h" + +#include "mp_image.h" +#include "img_format.h" +#include "vf.h" + +struct vf_priv_s { + float sense; // first parameter + float level; // second parameter + unsigned int imgfmt; + int diff; + uint32_t max; +// int dfr; +// int rdfr; + int was_dint; + mp_image_t *pmpi; // previous mpi +}; + +#define MAXROWSIZE 1200 + +static int config (struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + int rowsize; + + vf->priv->pmpi = ff_vf_get_image (vf->next, outfmt, MP_IMGTYPE_TEMP, + 0, width, height); + if (!(vf->priv->pmpi->flags & MP_IMGFLAG_PLANAR) && + outfmt != IMGFMT_RGB32 && outfmt != IMGFMT_BGR32 && + outfmt != IMGFMT_RGB24 && outfmt != IMGFMT_BGR24 && + outfmt != IMGFMT_RGB16 && outfmt != IMGFMT_BGR16) + { + ff_mp_msg (MSGT_VFILTER, MSGL_WARN, "Drop-interlaced filter doesn't support this outfmt :(\n"); + return 0; + } + vf->priv->imgfmt = outfmt; + // recalculate internal values + rowsize = vf->priv->pmpi->width; + if (rowsize > MAXROWSIZE) rowsize = MAXROWSIZE; + vf->priv->max = vf->priv->level * vf->priv->pmpi->height * rowsize / 2; + if (vf->priv->pmpi->flags & MP_IMGFLAG_PLANAR) // planar YUV + vf->priv->diff = vf->priv->sense * 256; + else + vf->priv->diff = vf->priv->sense * (1 << (vf->priv->pmpi->bpp/3)); + if (vf->priv->diff < 0) vf->priv->diff = 0; + if (!(vf->priv->pmpi->flags & MP_IMGFLAG_PLANAR) && + vf->priv->pmpi->bpp < 24 && vf->priv->diff > 31) + vf->priv->diff = 31; + ff_mp_msg (MSGT_VFILTER, MSGL_INFO, "Drop-interlaced: %dx%d diff %d / level %u\n", + vf->priv->pmpi->width, vf->priv->pmpi->height, + vf->priv->diff, (unsigned int)vf->priv->max); +// vf->priv->rdfr = vf->priv->dfr = 0; + vf->priv->was_dint = 0; + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static int put_image (struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + int8_t rrow0[MAXROWSIZE]; + int8_t rrow1[MAXROWSIZE]; + int8_t rrow2[MAXROWSIZE]; + int8_t *row0 = rrow0, *row1 = rrow1, *row2 = rrow2/*, *row3 = rrow3*/; + int rowsize = mpi->width; + uint32_t nok = 0, max = vf->priv->max; + int diff = vf->priv->diff; + int i, j; + register int n1, n2; + unsigned char *cur0, *prv0; + register unsigned char *cur, *prv; + + if (rowsize > MAXROWSIZE) rowsize = MAXROWSIZE; + // check if nothing to do + if (mpi->imgfmt == vf->priv->imgfmt) + { + cur0 = mpi->planes[0] + mpi->stride[0]; + prv0 = mpi->planes[0]; + for (j = 1; j < mpi->height && nok <= max; j++) + { + cur = cur0; + prv = prv0; + // analyse row (row0) + if (mpi->flags & MP_IMGFLAG_PLANAR) // planar YUV - check luminance + for (i = 0; i < rowsize; i++) + { + if (cur[0] - prv[0] > diff) + row0[i] = 1; + else if (cur[0] - prv[0] < -diff) + row0[i] = -1; + else + row0[i] = 0; + cur++; + prv++; + // check if row0 is 1 but row1 is 0, and row2 is 1 or row2 is 0 + // but row3 is 1 so it's interlaced ptr (nok++) + if (j > 2 && row0[i] > 0 && (row1[i] < 0 || (!row1[i] && row2[i] < 0)) && + (++nok) > max) + break; + } + else if (mpi->bpp < 24) // RGB/BGR 16 - check all colors + for (i = 0; i < rowsize; i++) + { + n1 = cur[0] + (cur[1]<<8); + n2 = prv[0] + (prv[1]<<8); + if ((n1&0x1f) - (n2&0x1f) > diff || + ((n1>>5)&0x3f) - ((n2>>5)&0x3f) > diff || + ((n1>>11)&0x1f) - ((n2>>11)&0x1f) > diff) + row0[i] = 1; + else if ((n1&0x1f) - (n2&0x1f) < -diff || + ((n1>>5)&0x3f) - ((n2>>5)&0x3f) < -diff || + ((n1>>11)&0x1f) - ((n2>>11)&0x1f) < -diff) + row0[i] = -1; + else + row0[i] = 0; + cur += 2; + prv += 2; + // check if row0 is 1 but row1 is 0, and row2 is 1 or row2 is 0 + // but row3 is 1 so it's interlaced ptr (nok++) + if (j > 2 && row0[i] > 0 && (row1[i] < 0 || (!row1[i] && row2[i] < 0)) && + (++nok) > max) + break; + } + else // RGB/BGR 24/32 + for (i = 0; i < rowsize; i++) + { + if (cur[0] - prv[0] > diff || + cur[1] - prv[1] > diff || + cur[2] - prv[2] > diff) + row0[i] = 1; + else if (prv[0] - cur[0] > diff || + prv[1] - cur[1] > diff || + prv[2] - cur[2] > diff) + row0[i] = -1; + else + row0[i] = 0; + cur += mpi->bpp/8; + prv += mpi->bpp/8; + // check if row0 is 1 but row1 is 0, and row2 is 1 or row2 is 0 + // but row3 is 1 so it's interlaced ptr (nok++) + if (j > 2 && row0[i] > 0 && (row1[i] < 0 || (!row1[i] && row2[i] < 0)) && + (++nok) > max) + break; + } + cur0 += mpi->stride[0]; + prv0 += mpi->stride[0]; + // rotate rows + cur = row2; + row2 = row1; + row1 = row0; + row0 = cur; + } + } + // check if number of interlaced is above of max + if (nok > max) + { +// vf->priv->dfr++; + if (vf->priv->was_dint < 1) // can skip at most one frame! + { + vf->priv->was_dint++; +// vf->priv->rdfr++; +// ff_mp_msg (MSGT_VFILTER, MSGL_INFO, "DI:%d/%d ", vf->priv->rdfr, vf->priv->dfr); + return 0; + } + } + vf->priv->was_dint = 0; +// ff_mp_msg (MSGT_VFILTER, MSGL_INFO, "DI:%d/%d ", vf->priv->rdfr, vf->priv->dfr); + return ff_vf_next_put_image (vf, mpi, pts); +} + +static int vf_open(vf_instance_t *vf, char *args){ + vf->config = config; + vf->put_image = put_image; +// vf->default_reqs=VFCAP_ACCEPT_STRIDE; + vf->priv = malloc (sizeof(struct vf_priv_s)); + vf->priv->sense = 0.1; + vf->priv->level = 0.15; + vf->priv->pmpi = NULL; + if (args) + sscanf (args, "%f:%f", &vf->priv->sense, &vf->priv->level); + return 1; +} + +const vf_info_t ff_vf_info_dint = { + "drop interlaced frames", + "dint", + "A.G.", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_divtc.c b/libavfilter/libmpcodecs/vf_divtc.c new file mode 100644 index 0000000..61f6e35 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_divtc.c @@ -0,0 +1,722 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <limits.h> +#include <math.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" +#include "libavutil/common.h" +#include "libavutil/x86/asm.h" +#include "mpbswap.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +const vf_info_t ff_vf_info_divtc; + +struct vf_priv_s + { + int deghost, pass, phase, window, fcount, bcount, frameno, misscount, + ocount, sum[5]; + double threshold; + FILE *file; + int8_t *bdata; + unsigned int *csdata; + int *history; + }; + +/* + * diff_MMX and diff_C stolen from vf_decimate.c + */ + +#if HAVE_MMX && HAVE_EBX_AVAILABLE +static int diff_MMX(unsigned char *old, unsigned char *new, int os, int ns) + { + volatile short out[4]; + __asm__ ( + "movl $8, %%ecx \n\t" + "pxor %%mm4, %%mm4 \n\t" + "pxor %%mm7, %%mm7 \n\t" + + ASMALIGN(4) + "1: \n\t" + + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "movq (%%"REG_D"), %%mm1 \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm1, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddw %%mm0, %%mm4 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm2, %%mm4 \n\t" + "paddw %%mm3, %%mm4 \n\t" + + "decl %%ecx \n\t" + "jnz 1b \n\t" + "movq %%mm4, (%%"REG_d") \n\t" + "emms \n\t" + : + : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out) + : "%ecx", "memory" + ); + return out[0]+out[1]+out[2]+out[3]; + } +#endif + +static int diff_C(unsigned char *old, unsigned char *new, int os, int ns) + { + int x, y, d=0; + + for(y=8; y; y--, new+=ns, old+=os) + for(x=8; x; x--) + d+=abs(new[x]-old[x]); + + return d; + } + +static int (*diff)(unsigned char *, unsigned char *, int, int); + +static int diff_plane(unsigned char *old, unsigned char *new, + int w, int h, int os, int ns, int arg) + { + int x, y, d, max=0, sum=0, n=0; + + for(y=0; y<h-7; y+=8) + { + for(x=0; x<w-7; x+=8) + { + d=diff(old+x+y*os, new+x+y*ns, os, ns); + if(d>max) max=d; + sum+=d; + n++; + } + } + + return (sum+n*max)/2; + } + +/* +static unsigned int checksum_plane(unsigned char *p, unsigned char *z, + int w, int h, int s, int zs, int arg) + { + unsigned int shift, sum; + unsigned char *e; + + for(sum=0; h; h--, p+=s-w) + for(e=p+w, shift=32; p<e;) + sum^=(*p++)<<(shift=(shift-8)&31); + + return sum; + } +*/ + +static unsigned int checksum_plane(unsigned char *p, unsigned char *z, + int w, int h, int s, int zs, int arg) + { + unsigned int shift; + uint32_t sum, t; + unsigned char *e, *e2; +#if HAVE_FAST_64BIT + typedef uint64_t wsum_t; +#else + typedef uint32_t wsum_t; +#endif + wsum_t wsum; + + for(sum=0; h; h--, p+=s-w) + { + for(shift=0, e=p+w; (int)p&(sizeof(wsum_t)-1) && p<e;) + sum^=*p++<<(shift=(shift-8)&31); + + for(wsum=0, e2=e-sizeof(wsum_t)+1; p<e2; p+=sizeof(wsum_t)) + wsum^=*(wsum_t *)p; + +#if HAVE_FAST_64BIT + t=be2me_32((uint32_t)(wsum>>32^wsum)); +#else + t=be2me_32(wsum); +#endif + + for(sum^=(t<<shift|t>>(32-shift)); p<e;) + sum^=*p++<<(shift=(shift-8)&31); + } + + return sum; + } + +static int deghost_plane(unsigned char *d, unsigned char *s, + int w, int h, int ds, int ss, int threshold) + { + int t; + unsigned char *e; + + for(; h; h--, s+=ss-w, d+=ds-w) + for(e=d+w; d<e; d++, s++) + if(abs(*d-*s)>=threshold) + *d=(t=(*d<<1)-*s)<0?0:t>255?255:t; + + return 0; + } + +static int copyop(unsigned char *d, unsigned char *s, int bpl, int h, int dstride, int sstride, int dummy) { + memcpy_pic(d, s, bpl, h, dstride, sstride); + return 0; +} + +static int imgop(int(*planeop)(unsigned char *, unsigned char *, + int, int, int, int, int), + mp_image_t *dst, mp_image_t *src, int arg) + { + if(dst->flags&MP_IMGFLAG_PLANAR) + return planeop(dst->planes[0], src?src->planes[0]:0, + dst->w, dst->h, + dst->stride[0], src?src->stride[0]:0, arg)+ + planeop(dst->planes[1], src?src->planes[1]:0, + dst->chroma_width, dst->chroma_height, + dst->stride[1], src?src->stride[1]:0, arg)+ + planeop(dst->planes[2], src?src->planes[2]:0, + dst->chroma_width, dst->chroma_height, + dst->stride[2], src?src->stride[2]:0, arg); + + return planeop(dst->planes[0], src?src->planes[0]:0, + dst->w*(dst->bpp/8), dst->h, + dst->stride[0], src?src->stride[0]:0, arg); + } + +/* + * Find the phase in which the telecine pattern fits best to the + * given 5 frame slice of frame difference measurements. + * + * If phase1 and phase2 are not negative, only the two specified + * phases are tested. + */ + +static int match(struct vf_priv_s *p, int *diffs, + int phase1, int phase2, double *strength) + { + static const int pattern1[]={ -4, 1, 1, 1, 1 }, + pattern2[]={ -2, -3, 4, 4, -3 }, *pattern; + int f, m, n, t[5]; + + pattern=p->deghost>0?pattern2:pattern1; + + for(f=0; f<5; f++) + { + if(phase1<0 || phase2<0 || f==phase1 || f==phase2) + { + for(n=t[f]=0; n<5; n++) + t[f]+=diffs[n]*pattern[(n-f+5)%5]; + } + else + t[f]=INT_MIN; + } + + /* find the best match */ + for(m=0, n=1; n<5; n++) + if(t[n]>t[m]) m=n; + + if(strength) + { + /* the second best match */ + for(f=m?0:1, n=f+1; n<5; n++) + if(n!=m && t[n]>t[f]) f=n; + + *strength=(t[m]>0?(double)(t[m]-t[f])/t[m]:0.0); + } + + return m; + } + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) + { + mp_image_t *dmpi, *tmpi=0; + int n, m, f, newphase; + struct vf_priv_s *p=vf->priv; + unsigned int checksum; + double d; + + dmpi=ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE | MP_IMGFLAG_READABLE, + mpi->width, mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + + newphase=p->phase; + + switch(p->pass) + { + case 1: + fprintf(p->file, "%08x %d\n", + (unsigned int)imgop((void *)checksum_plane, mpi, 0, 0), + p->frameno?imgop(diff_plane, dmpi, mpi, 0):0); + break; + + case 2: + if(p->frameno/5>p->bcount) + { + ff_mp_msg(MSGT_VFILTER, MSGL_ERR, + "\n%s: Log file ends prematurely! " + "Switching to one pass mode.\n", vf->info->name); + p->pass=0; + break; + } + + checksum=(unsigned int)imgop((void *)checksum_plane, mpi, 0, 0); + + if(checksum!=p->csdata[p->frameno]) + { + for(f=0; f<100; f++) + if(p->frameno+f<p->fcount && p->csdata[p->frameno+f]==checksum) + break; + else if(p->frameno-f>=0 && p->csdata[p->frameno-f]==checksum) + { + f=-f; + break; + } + + if(f<100) + { + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, + "\n%s: Mismatch with pass-1: %+d frame(s).\n", + vf->info->name, f); + + p->frameno+=f; + p->misscount=0; + } + else if(p->misscount++>=30) + { + ff_mp_msg(MSGT_VFILTER, MSGL_ERR, + "\n%s: Sync with pass-1 lost! " + "Switching to one pass mode.\n", vf->info->name); + p->pass=0; + break; + } + } + + n=(p->frameno)/5; + if(n>=p->bcount) n=p->bcount-1; + + newphase=p->bdata[n]; + break; + + default: + if(p->frameno) + { + int *sump=p->sum+p->frameno%5, + *histp=p->history+p->frameno%p->window; + + *sump-=*histp; + *sump+=(*histp=imgop(diff_plane, dmpi, mpi, 0)); + } + + m=match(p, p->sum, -1, -1, &d); + + if(d>=p->threshold) + newphase=m; + } + + n=p->ocount++%5; + + if(newphase!=p->phase && ((p->phase+4)%5<n)==((newphase+4)%5<n)) + { + p->phase=newphase; + ff_mp_msg(MSGT_VFILTER, MSGL_STATUS, + "\n%s: Telecine phase %d.\n", vf->info->name, p->phase); + } + + switch((p->frameno++-p->phase+10)%5) + { + case 0: + imgop(copyop, dmpi, mpi, 0); + return 0; + + case 4: + if(p->deghost>0) + { + tmpi=ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_READABLE, + mpi->width, mpi->height); + ff_vf_clone_mpi_attributes(tmpi, mpi); + + imgop(copyop, tmpi, mpi, 0); + imgop(deghost_plane, tmpi, dmpi, p->deghost); + imgop(copyop, dmpi, mpi, 0); + return ff_vf_next_put_image(vf, tmpi, MP_NOPTS_VALUE); + } + } + + imgop(copyop, dmpi, mpi, 0); + return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + } + +static int analyze(struct vf_priv_s *p) + { + int *buf=0, *bp, bufsize=0, n, b, f, i, j, m, s; + unsigned int *cbuf=0, *cp; + int8_t *pbuf; + int8_t lbuf[256]; + int sum[5]; + double d; + + /* read the file */ + + n=15; + while(fgets(lbuf, 256, p->file)) + { + if(n>=bufsize-19) + { + bufsize=bufsize?bufsize*2:30000; + if((bp=realloc(buf, bufsize*sizeof *buf))) buf=bp; + if((cp=realloc(cbuf, bufsize*sizeof *cbuf))) cbuf=cp; + + if(!bp || !cp) + { + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "%s: Not enough memory.\n", + ff_vf_info_divtc.name); + free(buf); + free(cbuf); + return 0; + } + } + sscanf(lbuf, "%x %d", cbuf+n, buf+n); + n++; + } + + if(n <= 15) + { + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "%s: Empty 2-pass log file.\n", + ff_vf_info_divtc.name); + free(buf); + free(cbuf); + return 0; + } + + /* generate some dummy data past the beginning and end of the array */ + + buf+=15, cbuf+=15; + n-=15; + + memcpy(buf-15, buf, 15*sizeof *buf); + memset(cbuf-15, 0, 15*sizeof *cbuf); + + while(n%5) + buf[n]=buf[n-5], cbuf[n]=0, n++; + + memcpy(buf+n, buf+n-15, 15*sizeof *buf); + memset(cbuf+n, 0, 15*sizeof *cbuf); + + p->csdata=cbuf; + p->fcount=n; + + /* array with one slot for each slice of 5 frames */ + + p->bdata=pbuf=malloc(p->bcount=b=(n/5)); + memset(pbuf, 255, b); + + /* resolve the automatic mode */ + + if(p->deghost<0) + { + int deghost=-p->deghost; + double s0=0.0, s1=0.0; + + for(f=0; f<n; f+=5) + { + p->deghost=0; match(p, buf+f, -1, -1, &d); s0+=d; + p->deghost=1; match(p, buf+f, -1, -1, &d); s1+=d; + } + + p->deghost=s1>s0?deghost:0; + + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, + "%s: Deghosting %-3s (relative pattern strength %+.2fdB).\n", + ff_vf_info_divtc.name, + p->deghost?"ON":"OFF", + 10.0*log10(s1/s0)); + } + + /* analyze the data */ + + for(f=0; f<5; f++) + for(sum[f]=0, n=-15; n<20; n+=5) + sum[f]+=buf[n+f]; + + for(f=0; f<b; f++) + { + m=match(p, sum, -1, -1, &d); + + if(d>=p->threshold) + pbuf[f]=m; + + if(f<b-1) + for(n=0; n<5; n++) + sum[n]=sum[n]-buf[5*(f-3)+n]+buf[5*(f+4)+n]; + } + + /* fill in the gaps */ + + /* the beginning */ + for(f=0; f<b && pbuf[f]==-1; f++); + + if(f==b) + { + free(buf-15); + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "%s: No telecine pattern found!\n", + ff_vf_info_divtc.name); + return 0; + } + + for(n=0; n<f; pbuf[n++]=pbuf[f]); + + /* the end */ + for(f=b-1; pbuf[f]==-1; f--); + for(n=f+1; n<b; pbuf[n++]=pbuf[f]); + + /* the rest */ + for(f=0;;) + { + while(f<b && pbuf[f]!=-1) f++; + if(f==b) break; + for(n=f; pbuf[n]==-1; n++); + + if(pbuf[f-1]==pbuf[n]) + { + /* just a gap */ + while(f<n) pbuf[f++]=pbuf[n]; + } + else + { + /* phase change, reanalyze the original data in the gap with zero + threshold for only the two phases that appear at the ends */ + + for(i=0; i<5; i++) + for(sum[i]=0, j=5*f-15; j<5*f; j+=5) + sum[i]+=buf[i+j]; + + for(i=f; i<n; i++) + { + pbuf[i]=match(p, sum, pbuf[f-1], pbuf[n], 0); + + for(j=0; j<5; j++) + sum[j]=sum[j]-buf[5*(i-3)+j]+buf[5*(i+4)+j]; + } + + /* estimate the transition point by dividing the gap + in the same proportion as the number of matches of each kind */ + + for(i=f, m=f; i<n; i++) + if(pbuf[i]==pbuf[f-1]) m++; + + /* find the transition of the right direction nearest to the + estimated point */ + + if(m>f && m<n) + { + for(j=m; j>f; j--) + if(pbuf[j-1]==pbuf[f-1] && pbuf[j]==pbuf[n]) break; + for(s=m; s<n; s++) + if(pbuf[s-1]==pbuf[f-1] && pbuf[s]==pbuf[n]) break; + + m=(s-m<m-j)?s:j; + } + + /* and rewrite the data to allow only this one transition */ + + for(i=f; i<m; i++) + pbuf[i]=pbuf[f-1]; + + for(; i<n; i++) + pbuf[i]=pbuf[n]; + + f=n; + } + } + + free(buf-15); + + return 1; + } + +static int query_format(struct vf_instance *vf, unsigned int fmt) + { + switch(fmt) + { + case IMGFMT_444P: case IMGFMT_IYUV: case IMGFMT_RGB24: + case IMGFMT_422P: case IMGFMT_UYVY: case IMGFMT_BGR24: + case IMGFMT_411P: case IMGFMT_YUY2: case IMGFMT_IF09: + case IMGFMT_YV12: case IMGFMT_I420: case IMGFMT_YVU9: + case IMGFMT_IUYV: case IMGFMT_Y800: case IMGFMT_Y8: + return ff_vf_next_query_format(vf,fmt); + } + + return 0; + } + +static void uninit(struct vf_instance *vf) + { + if(vf->priv) + { + if(vf->priv->file) fclose(vf->priv->file); + if(vf->priv->csdata) free(vf->priv->csdata-15); + free(vf->priv->bdata); + free(vf->priv->history); + free(vf->priv); + } + } + +static int vf_open(vf_instance_t *vf, char *args) + { + struct vf_priv_s *p; + const char *filename="framediff.log"; + char *ap, *q, *a; + + if(args && !(args=strdup(args))) + { + nomem: + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, + "%s: Not enough memory.\n", vf->info->name); + fail: + uninit(vf); + free(args); + return 0; + } + + vf->put_image=put_image; + vf->uninit=uninit; + vf->query_format=query_format; + vf->default_reqs=VFCAP_ACCEPT_STRIDE; + if(!(vf->priv=p=calloc(1, sizeof(struct vf_priv_s)))) + goto nomem; + + p->phase=5; + p->threshold=0.5; + p->window=30; + + if((ap=args)) + while(*ap) + { + q=ap; + if((ap=strchr(q, ':'))) *ap++=0; else ap=q+strlen(q); + if((a=strchr(q, '='))) *a++=0; else a=q+strlen(q); + + switch(*q) + { + case 0: break; + case 'f': filename=a; break; + case 't': p->threshold=atof(a); break; + case 'w': p->window=5*(atoi(a)+4)/5; break; + case 'd': p->deghost=atoi(a); break; + case 'p': + if(q[1]=='h') p->phase=atoi(a); + else p->pass=atoi(a); + break; + + case 'h': + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, + "\n%s options:\n\n" + "pass=1|2 - Use 2-pass mode.\n" + "file=filename - Set the 2-pass log file name " + "(default %s).\n" + "threshold=value - Set the pattern recognition " + "sensitivity (default %g).\n" + "deghost=value - Select deghosting threshold " + "(default %d).\n" + "window=numframes - Set the statistics window " + "for 1-pass mode (default %d).\n" + "phase=0|1|2|3|4 - Set the initial phase " + "for 1-pass mode (default %d).\n\n" + "The option names can be abbreviated to the shortest " + "unique prefix.\n\n", + vf->info->name, filename, p->threshold, p->deghost, + p->window, p->phase%5); + break; + + default: + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, + "%s: Unknown argument %s.\n", vf->info->name, q); + goto fail; + } + } + + switch(p->pass) + { + case 1: + if(!(p->file=fopen(filename, "w"))) + { + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, + "%s: Can't create file %s.\n", vf->info->name, filename); + goto fail; + } + + break; + + case 2: + if(!(p->file=fopen(filename, "r"))) + { + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, + "%s: Can't open file %s.\n", vf->info->name, filename); + goto fail; + } + + if(!analyze(p)) + goto fail; + + fclose(p->file); + p->file=0; + break; + } + + if(p->window<5) p->window=5; + if(!(p->history=calloc(sizeof *p->history, p->window))) + goto nomem; + + diff = diff_C; +#if HAVE_MMX && HAVE_EBX_AVAILABLE + if(ff_gCpuCaps.hasMMX) diff = diff_MMX; +#endif + + free(args); + return 1; + } + +const vf_info_t ff_vf_info_divtc = + { + "inverse telecine for deinterlaced video", + "divtc", + "Ville Saari", + "", + vf_open, + NULL + }; diff --git a/libavfilter/libmpcodecs/vf_down3dright.c b/libavfilter/libmpcodecs/vf_down3dright.c new file mode 100644 index 0000000..5c95ce6 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_down3dright.c @@ -0,0 +1,166 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +struct vf_priv_s { + int skipline; + int scalew; + int scaleh; +}; + +static void toright(unsigned char *dst[3], unsigned char *src[3], + int dststride[3], int srcstride[3], + int w, int h, struct vf_priv_s* p) +{ + int k; + + for (k = 0; k < 3; k++) { + unsigned char* fromL = src[k]; + unsigned char* fromR = src[k]; + unsigned char* to = dst[k]; + int src = srcstride[k]; + int dst = dststride[k]; + int ss; + unsigned int dd; + int i; + + if (k > 0) { + i = h / 4 - p->skipline / 2; + ss = src * (h / 4 + p->skipline / 2); + dd = w / 4; + } else { + i = h / 2 - p->skipline; + ss = src * (h / 2 + p->skipline); + dd = w / 2; + } + fromR += ss; + for ( ; i > 0; i--) { + int j; + unsigned char* t = to; + unsigned char* sL = fromL; + unsigned char* sR = fromR; + + if (p->scalew == 1) { + for (j = dd; j > 0; j--) { + *t++ = (sL[0] + sL[1]) / 2; + sL+=2; + } + for (j = dd ; j > 0; j--) { + *t++ = (sR[0] + sR[1]) / 2; + sR+=2; + } + } else { + for (j = dd * 2 ; j > 0; j--) + *t++ = *sL++; + for (j = dd * 2 ; j > 0; j--) + *t++ = *sR++; + } + if (p->scaleh == 1) { + fast_memcpy(to + dst, to, dst); + to += dst; + } + to += dst; + fromL += src; + fromR += src; + } + //printf("K %d %d %d %d %d \n", k, w, h, src, dst); + } +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + + // hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next, IMGFMT_YV12, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE | + ((vf->priv->scaleh == 1) ? MP_IMGFLAG_READABLE : 0), + mpi->w * vf->priv->scalew, + mpi->h / vf->priv->scaleh - vf->priv->skipline); + + toright(dmpi->planes, mpi->planes, dmpi->stride, + mpi->stride, mpi->w, mpi->h, vf->priv); + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + /* FIXME - also support UYVY output? */ + return ff_vf_next_config(vf, width * vf->priv->scalew, + height / vf->priv->scaleh - vf->priv->skipline, d_width, d_height, flags, IMGFMT_YV12); +} + + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - really any YUV 4:2:0 input format should work */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, IMGFMT_YV12); + } + return 0; +} + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->config=config; + vf->query_format=query_format; + vf->put_image=put_image; + vf->uninit=uninit; + + vf->priv = calloc(1, sizeof (struct vf_priv_s)); + vf->priv->skipline = 0; + vf->priv->scalew = 1; + vf->priv->scaleh = 2; + if (args) sscanf(args, "%d:%d:%d", &vf->priv->skipline, &vf->priv->scalew, &vf->priv->scaleh); + + return 1; +} + +const vf_info_t ff_vf_info_down3dright = { + "convert stereo movie from top-bottom to left-right field", + "down3dright", + "Zdenek Kabelac", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_dsize.c b/libavfilter/libmpcodecs/vf_dsize.c new file mode 100644 index 0000000..21e0bf8 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_dsize.c @@ -0,0 +1,123 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +struct vf_priv_s { + int w, h; + int method; // aspect method, 0 -> downscale, 1-> upscale. +2 -> original aspect. + int round; + float aspect; +}; + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + if (vf->priv->aspect < 0.001) { // did the user input aspect or w,h params + if (vf->priv->w == 0) vf->priv->w = d_width; + if (vf->priv->h == 0) vf->priv->h = d_height; + if (vf->priv->w == -1) vf->priv->w = width; + if (vf->priv->h == -1) vf->priv->h = height; + if (vf->priv->w == -2) vf->priv->w = vf->priv->h * (double)d_width / d_height; + if (vf->priv->w == -3) vf->priv->w = vf->priv->h * (double)width / height; + if (vf->priv->h == -2) vf->priv->h = vf->priv->w * (double)d_height / d_width; + if (vf->priv->h == -3) vf->priv->h = vf->priv->w * (double)height / width; + if (vf->priv->method > -1) { + double aspect = (vf->priv->method & 2) ? ((double)height / width) : ((double)d_height / d_width); + if ((vf->priv->h > vf->priv->w * aspect) ^ (vf->priv->method & 1)) { + vf->priv->h = vf->priv->w * aspect; + } else { + vf->priv->w = vf->priv->h / aspect; + } + } + if (vf->priv->round > 1) { // round up + vf->priv->w += (vf->priv->round - 1 - (vf->priv->w - 1) % vf->priv->round); + vf->priv->h += (vf->priv->round - 1 - (vf->priv->h - 1) % vf->priv->round); + } + d_width = vf->priv->w; + d_height = vf->priv->h; + } else { + if (vf->priv->aspect * height > width) { + d_width = height * vf->priv->aspect + .5; + d_height = height; + } else { + d_height = width / vf->priv->aspect + .5; + d_width = width; + } + } + return ff_vf_next_config(vf, width, height, d_width, d_height, flags, outfmt); +} + +static void uninit(vf_instance_t *vf) { + free(vf->priv); + vf->priv = NULL; +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->config = config; + vf->draw_slice = ff_vf_next_draw_slice; + vf->uninit = uninit; + //vf->default_caps = 0; + vf->priv = calloc(sizeof(struct vf_priv_s), 1); + vf->priv->aspect = 0.; + vf->priv->w = -1; + vf->priv->h = -1; + vf->priv->method = -1; + vf->priv->round = 1; + if (args) { + if (strchr(args, '/')) { + int w, h; + sscanf(args, "%d/%d", &w, &h); + vf->priv->aspect = (float)w/h; + } else if (strchr(args, '.')) { + sscanf(args, "%f", &vf->priv->aspect); + } else { + sscanf(args, "%d:%d:%d:%d", &vf->priv->w, &vf->priv->h, &vf->priv->method, &vf->priv->round); + } + } + if ((vf->priv->aspect < 0.) || (vf->priv->w < -3) || (vf->priv->h < -3) || + ((vf->priv->w < -1) && (vf->priv->h < -1)) || + (vf->priv->method < -1) || (vf->priv->method > 3) || + (vf->priv->round < 0)) { + ff_mp_msg(MSGT_VFILTER, MSGL_ERR, "[dsize] Illegal value(s): aspect: %f w: %d h: %d aspect_method: %d round: %d\n", vf->priv->aspect, vf->priv->w, vf->priv->h, vf->priv->method, vf->priv->round); + free(vf->priv); vf->priv = NULL; + return -1; + } + return 1; +} + +const vf_info_t ff_vf_info_dsize = { + "reset displaysize/aspect", + "dsize", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_eq.c b/libavfilter/libmpcodecs/vf_eq.c new file mode 100644 index 0000000..4e256d9 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_eq.c @@ -0,0 +1,240 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/video_out.h" + +static struct vf_priv_s { + unsigned char *buf; + int brightness; + int contrast; +}; + +#if HAVE_MMX +static void process_MMX(unsigned char *dest, int dstride, unsigned char *src, int sstride, + int w, int h, int brightness, int contrast) +{ + int i; + int pel; + int dstep = dstride-w; + int sstep = sstride-w; + short brvec[4]; + short contvec[4]; + + contrast = ((contrast+100)*256*16)/100; + brightness = ((brightness+100)*511)/200-128 - contrast/32; + + brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness; + contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast; + + while (h--) { + __asm__ volatile ( + "movq (%5), %%mm3 \n\t" + "movq (%6), %%mm4 \n\t" + "pxor %%mm0, %%mm0 \n\t" + "movl %4, %%eax\n\t" + ASMALIGN(4) + "1: \n\t" + "movq (%0), %%mm1 \n\t" + "movq (%0), %%mm2 \n\t" + "punpcklbw %%mm0, %%mm1 \n\t" + "punpckhbw %%mm0, %%mm2 \n\t" + "psllw $4, %%mm1 \n\t" + "psllw $4, %%mm2 \n\t" + "pmulhw %%mm4, %%mm1 \n\t" + "pmulhw %%mm4, %%mm2 \n\t" + "paddw %%mm3, %%mm1 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "packuswb %%mm2, %%mm1 \n\t" + "add $8, %0 \n\t" + "movq %%mm1, (%1) \n\t" + "add $8, %1 \n\t" + "decl %%eax \n\t" + "jnz 1b \n\t" + : "=r" (src), "=r" (dest) + : "0" (src), "1" (dest), "r" (w>>3), "r" (brvec), "r" (contvec) + : "%eax" + ); + + for (i = w&7; i; i--) + { + pel = ((*src++* contrast)>>12) + brightness; + if(pel&768) pel = (-pel)>>31; + *dest++ = pel; + } + + src += sstep; + dest += dstep; + } + __asm__ volatile ( "emms \n\t" ::: "memory" ); +} +#endif + +static void process_C(unsigned char *dest, int dstride, unsigned char *src, int sstride, + int w, int h, int brightness, int contrast) +{ + int i; + int pel; + int dstep = dstride-w; + int sstep = sstride-w; + + contrast = ((contrast+100)*256*256)/100; + brightness = ((brightness+100)*511)/200-128 - contrast/512; + + while (h--) { + for (i = w; i; i--) + { + pel = ((*src++* contrast)>>16) + brightness; + if(pel&768) pel = (-pel)>>31; + *dest++ = pel; + } + src += sstep; + dest += dstep; + } +} + +static void (*process)(unsigned char *dest, int dstride, unsigned char *src, int sstride, + int w, int h, int brightness, int contrast); + +/* FIXME: add packed yuv version of process */ + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + + dmpi=ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_EXPORT, 0, + mpi->w, mpi->h); + + dmpi->stride[0] = mpi->stride[0]; + dmpi->planes[1] = mpi->planes[1]; + dmpi->planes[2] = mpi->planes[2]; + dmpi->stride[1] = mpi->stride[1]; + dmpi->stride[2] = mpi->stride[2]; + + if (!vf->priv->buf) vf->priv->buf = malloc(mpi->stride[0]*mpi->h); + + if ((vf->priv->brightness == 0) && (vf->priv->contrast == 0)) + dmpi->planes[0] = mpi->planes[0]; + else { + dmpi->planes[0] = vf->priv->buf; + process(dmpi->planes[0], dmpi->stride[0], + mpi->planes[0], mpi->stride[0], + mpi->w, mpi->h, vf->priv->brightness, + vf->priv->contrast); + } + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static int control(struct vf_instance *vf, int request, void* data) +{ + vf_equalizer_t *eq; + + switch (request) { + case VFCTRL_SET_EQUALIZER: + eq = data; + if (!strcmp(eq->item,"brightness")) { + vf->priv->brightness = eq->value; + return CONTROL_TRUE; + } + else if (!strcmp(eq->item,"contrast")) { + vf->priv->contrast = eq->value; + return CONTROL_TRUE; + } + break; + case VFCTRL_GET_EQUALIZER: + eq = data; + if (!strcmp(eq->item,"brightness")) { + eq->value = vf->priv->brightness; + return CONTROL_TRUE; + } + else if (!strcmp(eq->item,"contrast")) { + eq->value = vf->priv->contrast; + return CONTROL_TRUE; + } + break; + } + return ff_vf_next_control(vf, request, data); +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + switch (fmt) { + case IMGFMT_YVU9: + case IMGFMT_IF09: + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_CLPL: + case IMGFMT_Y800: + case IMGFMT_Y8: + case IMGFMT_NV12: + case IMGFMT_NV21: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv->buf); + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->control=control; + vf->query_format=query_format; + vf->put_image=put_image; + vf->uninit=uninit; + + vf->priv = malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + if (args) sscanf(args, "%d:%d", &vf->priv->brightness, &vf->priv->contrast); + + process = process_C; +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) process = process_MMX; +#endif + + return 1; +} + +const vf_info_t ff_vf_info_eq = { + "soft video equalizer", + "eq", + "Richard Felker", + "", + vf_open, +}; diff --git a/libavfilter/libmpcodecs/vf_eq2.c b/libavfilter/libmpcodecs/vf_eq2.c new file mode 100644 index 0000000..7a3ef31 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_eq2.c @@ -0,0 +1,519 @@ +/* + * Software equalizer (brightness, contrast, gamma, saturation) + * + * Hampa Hug <hampa@hampa.ch> (original LUT gamma/contrast/brightness filter) + * Daniel Moreno <comac@comac.darktech.org> (saturation, R/G/B gamma support) + * Richard Felker (original MMX contrast/brightness code (vf_eq.c)) + * Michael Niedermayer <michalni@gmx.at> (LUT16) + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <math.h> +#include <inttypes.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#define LUT16 + +/* Per channel parameters */ +typedef struct eq2_param_t { + unsigned char lut[256]; +#ifdef LUT16 + uint16_t lut16[256*256]; +#endif + int lut_clean; + + void (*adjust) (struct eq2_param_t *par, unsigned char *dst, unsigned char *src, + unsigned w, unsigned h, unsigned dstride, unsigned sstride); + + double c; + double b; + double g; + double w; +} eq2_param_t; + +typedef struct vf_priv_s { + eq2_param_t param[3]; + + double contrast; + double brightness; + double saturation; + + double gamma; + double gamma_weight; + double rgamma; + double ggamma; + double bgamma; + + unsigned buf_w[3]; + unsigned buf_h[3]; + unsigned char *buf[3]; +} vf_eq2_t; + + +static +void create_lut (eq2_param_t *par) +{ + unsigned i; + double g, v; + double lw, gw; + + g = par->g; + gw = par->w; + lw = 1.0 - gw; + + if ((g < 0.001) || (g > 1000.0)) { + g = 1.0; + } + + g = 1.0 / g; + + for (i = 0; i < 256; i++) { + v = (double) i / 255.0; + v = par->c * (v - 0.5) + 0.5 + par->b; + + if (v <= 0.0) { + par->lut[i] = 0; + } + else { + v = v*lw + pow(v, g)*gw; + + if (v >= 1.0) { + par->lut[i] = 255; + } + else { + par->lut[i] = (unsigned char) (256.0 * v); + } + } + } + +#ifdef LUT16 + for(i=0; i<256*256; i++){ + par->lut16[i]= par->lut[i&0xFF] + (par->lut[i>>8]<<8); + } +#endif + + par->lut_clean = 1; +} + +#if HAVE_MMX +static +void affine_1d_MMX (eq2_param_t *par, unsigned char *dst, unsigned char *src, + unsigned w, unsigned h, unsigned dstride, unsigned sstride) +{ + unsigned i; + int contrast, brightness; + unsigned dstep, sstep; + int pel; + short brvec[4]; + short contvec[4]; + +// printf("\nmmx: src=%p dst=%p w=%d h=%d ds=%d ss=%d\n",src,dst,w,h,dstride,sstride); + + contrast = (int) (par->c * 256 * 16); + brightness = ((int) (100.0 * par->b + 100.0) * 511) / 200 - 128 - contrast / 32; + + brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness; + contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast; + + sstep = sstride - w; + dstep = dstride - w; + + while (h-- > 0) { + __asm__ volatile ( + "movq (%5), %%mm3 \n\t" + "movq (%6), %%mm4 \n\t" + "pxor %%mm0, %%mm0 \n\t" + "movl %4, %%eax\n\t" + ASMALIGN(4) + "1: \n\t" + "movq (%0), %%mm1 \n\t" + "movq (%0), %%mm2 \n\t" + "punpcklbw %%mm0, %%mm1 \n\t" + "punpckhbw %%mm0, %%mm2 \n\t" + "psllw $4, %%mm1 \n\t" + "psllw $4, %%mm2 \n\t" + "pmulhw %%mm4, %%mm1 \n\t" + "pmulhw %%mm4, %%mm2 \n\t" + "paddw %%mm3, %%mm1 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "packuswb %%mm2, %%mm1 \n\t" + "add $8, %0 \n\t" + "movq %%mm1, (%1) \n\t" + "add $8, %1 \n\t" + "decl %%eax \n\t" + "jnz 1b \n\t" + : "=r" (src), "=r" (dst) + : "0" (src), "1" (dst), "r" (w >> 3), "r" (brvec), "r" (contvec) + : "%eax" + ); + + for (i = w & 7; i > 0; i--) { + pel = ((*src++ * contrast) >> 12) + brightness; + if (pel & 768) { + pel = (-pel) >> 31; + } + *dst++ = pel; + } + + src += sstep; + dst += dstep; + } + + __asm__ volatile ( "emms \n\t" ::: "memory" ); +} +#endif + +static +void apply_lut (eq2_param_t *par, unsigned char *dst, unsigned char *src, + unsigned w, unsigned h, unsigned dstride, unsigned sstride) +{ + unsigned i, j, w2; + unsigned char *lut; + uint16_t *lut16; + + if (!par->lut_clean) { + create_lut (par); + } + + lut = par->lut; +#ifdef LUT16 + lut16 = par->lut16; + w2= (w>>3)<<2; + for (j = 0; j < h; j++) { + uint16_t *src16= (uint16_t*)src; + uint16_t *dst16= (uint16_t*)dst; + for (i = 0; i < w2; i+=4) { + dst16[i+0] = lut16[src16[i+0]]; + dst16[i+1] = lut16[src16[i+1]]; + dst16[i+2] = lut16[src16[i+2]]; + dst16[i+3] = lut16[src16[i+3]]; + } + i <<= 1; +#else + w2= (w>>3)<<3; + for (j = 0; j < h; j++) { + for (i = 0; i < w2; i+=8) { + dst[i+0] = lut[src[i+0]]; + dst[i+1] = lut[src[i+1]]; + dst[i+2] = lut[src[i+2]]; + dst[i+3] = lut[src[i+3]]; + dst[i+4] = lut[src[i+4]]; + dst[i+5] = lut[src[i+5]]; + dst[i+6] = lut[src[i+6]]; + dst[i+7] = lut[src[i+7]]; + } +#endif + for (; i < w; i++) { + dst[i] = lut[src[i]]; + } + + src += sstride; + dst += dstride; + } +} + +static +int put_image (vf_instance_t *vf, mp_image_t *src, double pts) +{ + unsigned i; + vf_eq2_t *eq2; + mp_image_t *dst; + unsigned long img_n,img_c; + + eq2 = vf->priv; + + if ((eq2->buf_w[0] != src->w) || (eq2->buf_h[0] != src->h)) { + eq2->buf_w[0] = src->w; + eq2->buf_h[0] = src->h; + eq2->buf_w[1] = eq2->buf_w[2] = src->w >> src->chroma_x_shift; + eq2->buf_h[1] = eq2->buf_h[2] = src->h >> src->chroma_y_shift; + img_n = eq2->buf_w[0]*eq2->buf_h[0]; + if(src->num_planes>1){ + img_c = eq2->buf_w[1]*eq2->buf_h[1]; + eq2->buf[0] = realloc (eq2->buf[0], img_n + 2*img_c); + eq2->buf[1] = eq2->buf[0] + img_n; + eq2->buf[2] = eq2->buf[1] + img_c; + } else + eq2->buf[0] = realloc (eq2->buf[0], img_n); + } + + dst = ff_vf_get_image (vf->next, src->imgfmt, MP_IMGTYPE_EXPORT, 0, src->w, src->h); + + for (i = 0; i < ((src->num_planes>1)?3:1); i++) { + if (eq2->param[i].adjust != NULL) { + dst->planes[i] = eq2->buf[i]; + dst->stride[i] = eq2->buf_w[i]; + + eq2->param[i].adjust (&eq2->param[i], dst->planes[i], src->planes[i], + eq2->buf_w[i], eq2->buf_h[i], dst->stride[i], src->stride[i]); + } + else { + dst->planes[i] = src->planes[i]; + dst->stride[i] = src->stride[i]; + } + } + + return ff_vf_next_put_image (vf, dst, pts); +} + +static +void check_values (eq2_param_t *par) +{ + /* yuck! floating point comparisons... */ + + if ((par->c == 1.0) && (par->b == 0.0) && (par->g == 1.0)) { + par->adjust = NULL; + } +#if HAVE_MMX + else if (par->g == 1.0 && ff_gCpuCaps.hasMMX) { + par->adjust = &affine_1d_MMX; + } +#endif + else { + par->adjust = &apply_lut; + } +} + +static +void print_values (vf_eq2_t *eq2) +{ + ff_mp_msg (MSGT_VFILTER, MSGL_V, "vf_eq2: c=%.2f b=%.2f g=%.4f s=%.2f \n", + eq2->contrast, eq2->brightness, eq2->gamma, eq2->saturation + ); +} + +static +void set_contrast (vf_eq2_t *eq2, double c) +{ + eq2->contrast = c; + eq2->param[0].c = c; + eq2->param[0].lut_clean = 0; + check_values (&eq2->param[0]); + print_values (eq2); +} + +static +void set_brightness (vf_eq2_t *eq2, double b) +{ + eq2->brightness = b; + eq2->param[0].b = b; + eq2->param[0].lut_clean = 0; + check_values (&eq2->param[0]); + print_values (eq2); +} + +static +void set_gamma (vf_eq2_t *eq2, double g) +{ + eq2->gamma = g; + + eq2->param[0].g = eq2->gamma * eq2->ggamma; + eq2->param[1].g = sqrt (eq2->bgamma / eq2->ggamma); + eq2->param[2].g = sqrt (eq2->rgamma / eq2->ggamma); + eq2->param[0].w = eq2->param[1].w = eq2->param[2].w = eq2->gamma_weight; + + eq2->param[0].lut_clean = 0; + eq2->param[1].lut_clean = 0; + eq2->param[2].lut_clean = 0; + + check_values (&eq2->param[0]); + check_values (&eq2->param[1]); + check_values (&eq2->param[2]); + + print_values (eq2); +} + +static +void set_saturation (vf_eq2_t *eq2, double s) +{ + eq2->saturation = s; + + eq2->param[1].c = s; + eq2->param[2].c = s; + + eq2->param[1].lut_clean = 0; + eq2->param[2].lut_clean = 0; + + check_values (&eq2->param[1]); + check_values (&eq2->param[2]); + + print_values (eq2); +} + +static +int control (vf_instance_t *vf, int request, void *data) +{ + vf_equalizer_t *eq; + + switch (request) { + case VFCTRL_SET_EQUALIZER: + eq = (vf_equalizer_t *) data; + + if (strcmp (eq->item, "gamma") == 0) { + set_gamma (vf->priv, exp (log (8.0) * eq->value / 100.0)); + return CONTROL_TRUE; + } + else if (strcmp (eq->item, "contrast") == 0) { + set_contrast (vf->priv, (1.0 / 100.0) * (eq->value + 100)); + return CONTROL_TRUE; + } + else if (strcmp (eq->item, "brightness") == 0) { + set_brightness (vf->priv, (1.0 / 100.0) * eq->value); + return CONTROL_TRUE; + } + else if (strcmp (eq->item, "saturation") == 0) { + set_saturation (vf->priv, (double) (eq->value + 100) / 100.0); + return CONTROL_TRUE; + } + break; + + case VFCTRL_GET_EQUALIZER: + eq = (vf_equalizer_t *) data; + if (strcmp (eq->item, "gamma") == 0) { + eq->value = (int) (100.0 * log (vf->priv->gamma) / log (8.0)); + return CONTROL_TRUE; + } + else if (strcmp (eq->item, "contrast") == 0) { + eq->value = (int) (100.0 * vf->priv->contrast) - 100; + return CONTROL_TRUE; + } + else if (strcmp (eq->item, "brightness") == 0) { + eq->value = (int) (100.0 * vf->priv->brightness); + return CONTROL_TRUE; + } + else if (strcmp (eq->item, "saturation") == 0) { + eq->value = (int) (100.0 * vf->priv->saturation) - 100; + return CONTROL_TRUE; + } + break; + } + + return ff_vf_next_control (vf, request, data); +} + +static +int query_format (vf_instance_t *vf, unsigned fmt) +{ + switch (fmt) { + case IMGFMT_YVU9: + case IMGFMT_IF09: + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_Y800: + case IMGFMT_Y8: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format (vf, fmt); + } + + return 0; +} + +static +void uninit (vf_instance_t *vf) +{ + if (vf->priv != NULL) { + free (vf->priv->buf[0]); + free (vf->priv); + } +} + +static +int vf_open(vf_instance_t *vf, char *args) +{ + unsigned i; + vf_eq2_t *eq2; + double par[8]; + + vf->control = control; + vf->query_format = query_format; + vf->put_image = put_image; + vf->uninit = uninit; + + vf->priv = malloc (sizeof (vf_eq2_t)); + eq2 = vf->priv; + + for (i = 0; i < 3; i++) { + eq2->buf[i] = NULL; + eq2->buf_w[i] = 0; + eq2->buf_h[i] = 0; + + eq2->param[i].adjust = NULL; + eq2->param[i].c = 1.0; + eq2->param[i].b = 0.0; + eq2->param[i].g = 1.0; + eq2->param[i].lut_clean = 0; + } + + eq2->contrast = 1.0; + eq2->brightness = 0.0; + eq2->saturation = 1.0; + + eq2->gamma = 1.0; + eq2->gamma_weight = 1.0; + eq2->rgamma = 1.0; + eq2->ggamma = 1.0; + eq2->bgamma = 1.0; + + if (args != NULL) { + par[0] = 1.0; + par[1] = 1.0; + par[2] = 0.0; + par[3] = 1.0; + par[4] = 1.0; + par[5] = 1.0; + par[6] = 1.0; + par[7] = 1.0; + sscanf (args, "%lf:%lf:%lf:%lf:%lf:%lf:%lf:%lf", + par, par + 1, par + 2, par + 3, par + 4, par + 5, par + 6, par + 7 + ); + + eq2->rgamma = par[4]; + eq2->ggamma = par[5]; + eq2->bgamma = par[6]; + eq2->gamma_weight = par[7]; + + set_gamma (eq2, par[0]); + set_contrast (eq2, par[1]); + set_brightness (eq2, par[2]); + set_saturation (eq2, par[3]); + } + + return 1; +} + +const vf_info_t ff_vf_info_eq2 = { + "Software equalizer", + "eq2", + "Hampa Hug, Daniel Moreno, Richard Felker", + "", + &vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_fil.c b/libavfilter/libmpcodecs/vf_fil.c new file mode 100644 index 0000000..80c6648 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_fil.c @@ -0,0 +1,116 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "mp_image.h" +#include "vf.h" + +struct vf_priv_s { + int interleave; + int height; + int width; + int stridefactor; +}; + +//===========================================================================// + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int pixel_stride= (width+15)&~15; //FIXME this is ust a guess ... especially for non planar its somewhat bad one + +#if 0 + if(mpi->flags&MP_IMGFLAG_PLANAR) + pixel_stride= mpi->stride[0]; + else + pixel_stride= 8*mpi->stride[0] / mpi->bpp; + +#endif + + if(vf->priv->interleave){ + vf->priv->height= 2*height; + vf->priv->width= width - (pixel_stride/2); + vf->priv->stridefactor=1; + }else{ + vf->priv->height= height/2; + vf->priv->width= width + pixel_stride; + vf->priv->stridefactor=4; + } +//printf("hX %d %d %d\n", vf->priv->width,vf->priv->height,vf->priv->stridefactor); + + return ff_vf_next_config(vf, vf->priv->width, vf->priv->height, + (d_width*vf->priv->stridefactor)>>1, 2*d_height/vf->priv->stridefactor, flags, outfmt); +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + if(mpi->flags&MP_IMGFLAG_DIRECT){ + // we've used DR, so we're ready... + return ff_vf_next_put_image(vf,(mp_image_t*)mpi->priv, pts); + } + + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_EXPORT, MP_IMGFLAG_ACCEPT_STRIDE, + vf->priv->width, vf->priv->height); + + // set up mpi as a double-stride image of dmpi: + vf->dmpi->planes[0]=mpi->planes[0]; + vf->dmpi->stride[0]=(mpi->stride[0]*vf->priv->stridefactor)>>1; + if(vf->dmpi->flags&MP_IMGFLAG_PLANAR){ + vf->dmpi->planes[1]=mpi->planes[1]; + vf->dmpi->stride[1]=(mpi->stride[1]*vf->priv->stridefactor)>>1; + vf->dmpi->planes[2]=mpi->planes[2]; + vf->dmpi->stride[2]=(mpi->stride[2]*vf->priv->stridefactor)>>1; + } else + vf->dmpi->planes[1]=mpi->planes[1]; // passthru bgr8 palette!!! + + return ff_vf_next_put_image(vf,vf->dmpi, pts); +} + +//===========================================================================// + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args){ + vf->config=config; + vf->put_image=put_image; + vf->uninit=uninit; + vf->default_reqs=VFCAP_ACCEPT_STRIDE; + vf->priv=calloc(1, sizeof(struct vf_priv_s)); + vf->priv->interleave= args && (*args == 'i'); + return 1; +} + +const vf_info_t ff_vf_info_fil = { + "fast (de)interleaver", + "fil", + "Michael Niedermayer", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_filmdint.c b/libavfilter/libmpcodecs/vf_filmdint.c new file mode 100644 index 0000000..93354e2 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_filmdint.c @@ -0,0 +1,1461 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/time.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vd.h" +#include "vf.h" +#include "cmmx.h" +#include "libavutil/x86/asm.h" +#include "libvo/fastmemcpy.h" + +#define NUM_STORED 4 + +enum pu_field_type_t { + PU_1ST_OF_3, + PU_2ND_OF_3, + PU_3RD_OF_3, + PU_1ST_OF_2, + PU_2ND_OF_2, + PU_INTERLACED +}; + +struct metrics { + /* This struct maps to a packed word 64-bit MMX register */ + unsigned short int even; + unsigned short int odd; + unsigned short int noise; + unsigned short int temp; +} __attribute__ ((aligned (8))); + +struct frame_stats { + struct metrics tiny, low, high, bigger, twox, max; + struct { unsigned int even, odd, noise, temp; } sad; + unsigned short interlaced_high; + unsigned short interlaced_low; + unsigned short num_blocks; +}; + +struct vf_priv_s { + unsigned long inframes; + unsigned long outframes; + enum pu_field_type_t prev_type; + unsigned swapped, chroma_swapped; + unsigned luma_only; + unsigned verbose; + unsigned fast; + unsigned long w, h, cw, ch, stride, chroma_stride, nplanes; + unsigned long sad_thres; + unsigned long dint_thres; + unsigned char *memory_allocated; + unsigned char *planes[2*NUM_STORED][4]; + unsigned char **old_planes; + unsigned long static_idx; + unsigned long temp_idx; + unsigned long crop_x, crop_y, crop_cx, crop_cy; + unsigned long export_count, merge_count; + unsigned long num_breaks; + unsigned long num_copies; + long in_inc, out_dec, iosync; + long num_fields; + long prev_fields; + long notout; + long mmx2; + unsigned small_bytes[2]; + unsigned mmx_temp[2]; + struct frame_stats stats[2]; + struct metrics thres; + char chflag; + double diff_time, merge_time, decode_time, vo_time, filter_time; +}; + +#define PPZ { 2000, 2000, 0, 2000 } +#define PPR { 2000, 2000, 0, 2000 } +static const struct frame_stats ppzs = {PPZ,PPZ,PPZ,PPZ,PPZ,PPZ,PPZ,0,0,9999}; +static const struct frame_stats pprs = {PPR,PPR,PPR,PPR,PPR,PPR,PPR,0,0,9999}; + +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define PDIFFUB(X,Y,T) "movq " #X "," #T "\n\t" \ + "psubusb " #Y "," #T "\n\t" \ + "psubusb " #X "," #Y "\n\t" \ + "paddusb " #Y "," #T "\n\t" + +#define PDIFFUBT(X,Y,T) "movq " #X "," #T "\n\t" \ + "psubusb " #Y "," #T "\n\t" \ + "psubusb " #X "," #Y "\n\t" \ + "paddusb " #T "," #Y "\n\t" + +#define PSUMBW(X,T,Z) "movq " #X "," #T "\n\t" \ + "punpcklbw " #Z "," #X "\n\t" \ + "punpckhbw " #Z "," #T "\n\t" \ + "paddw " #T "," #X "\n\t" \ + "movq " #X "," #T "\n\t" \ + "psllq $32, " #T "\n\t" \ + "paddw " #T "," #X "\n\t" \ + "movq " #X "," #T "\n\t" \ + "psllq $16, " #T "\n\t" \ + "paddw " #T "," #X "\n\t" \ + "psrlq $48, " #X "\n\t" + +#define PSADBW(X,Y,T,Z) PDIFFUBT(X,Y,T) PSUMBW(Y,T,Z) + +#define PMAXUB(X,Y) "psubusb " #X "," #Y "\n\tpaddusb " #X "," #Y "\n\t" +#define PMAXUW(X,Y) "psubusw " #X "," #Y "\n\tpaddusw " #X "," #Y "\n\t" +#define PMINUBT(X,Y,T) "movq " #Y "," #T "\n\t" \ + "psubusb " #X "," #T "\n\t" \ + "psubusb " #T "," #Y "\n\t" +#define PAVGB(X,Y) "pavgusb " #X "," #Y "\n\t" + +static inline void +get_metrics_c(unsigned char *a, unsigned char *b, int as, int bs, int lines, + struct metrics *m) +{ + a -= as; + b -= bs; + do { + cmmx_t old_po = *(cmmx_t*)(a ); + cmmx_t po = *(cmmx_t*)(b ); + cmmx_t e = *(cmmx_t*)(b + bs); + cmmx_t old_o = *(cmmx_t*)(a + 2*as); + cmmx_t o = *(cmmx_t*)(b + 2*bs); + cmmx_t ne = *(cmmx_t*)(b + 3*bs); + cmmx_t old_no = *(cmmx_t*)(a + 4*as); + cmmx_t no = *(cmmx_t*)(b + 4*bs); + + cmmx_t qup_old_odd = p31avgb(old_o, old_po); + cmmx_t qup_odd = p31avgb( o, po); + cmmx_t qdown_old_odd = p31avgb(old_o, old_no); + cmmx_t qdown_odd = p31avgb( o, no); + + cmmx_t qup_even = p31avgb(ne, e); + cmmx_t qdown_even = p31avgb(e, ne); + + cmmx_t temp_up_diff = pdiffub(qdown_even, qup_old_odd); + cmmx_t noise_up_diff = pdiffub(qdown_even, qup_odd); + cmmx_t temp_down_diff = pdiffub(qup_even, qdown_old_odd); + cmmx_t noise_down_diff = pdiffub(qup_even, qdown_odd); + + cmmx_t odd_diff = pdiffub(o, old_o); + m->odd += psumbw(odd_diff); + m->even += psadbw(e, *(cmmx_t*)(a+as)); + + temp_up_diff = pminub(temp_up_diff, temp_down_diff); + temp_up_diff = pminub(temp_up_diff, odd_diff); + m->temp += psumbw(temp_up_diff); + noise_up_diff = pminub(noise_up_diff, odd_diff); + noise_up_diff = pminub(noise_up_diff, noise_down_diff); + + m->noise += psumbw(noise_up_diff); + a += 2*as; + b += 2*bs; + } while (--lines); +} + +static inline void +get_metrics_fast_c(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct metrics *m) +{ + a -= as; + b -= bs; + do { + cmmx_t old_po = (*(cmmx_t*)(a ) >> 1) & ~SIGN_BITS; + cmmx_t po = (*(cmmx_t*)(b ) >> 1) & ~SIGN_BITS; + cmmx_t old_e = (*(cmmx_t*)(a + as) >> 1) & ~SIGN_BITS; + cmmx_t e = (*(cmmx_t*)(b + bs) >> 1) & ~SIGN_BITS; + cmmx_t old_o = (*(cmmx_t*)(a + 2*as) >> 1) & ~SIGN_BITS; + cmmx_t o = (*(cmmx_t*)(b + 2*bs) >> 1) & ~SIGN_BITS; + cmmx_t ne = (*(cmmx_t*)(b + 3*bs) >> 1) & ~SIGN_BITS; + cmmx_t old_no = (*(cmmx_t*)(a + 4*as) >> 1) & ~SIGN_BITS; + cmmx_t no = (*(cmmx_t*)(b + 4*bs) >> 1) & ~SIGN_BITS; + + cmmx_t qup_old_odd = p31avgb_s(old_o, old_po); + cmmx_t qup_odd = p31avgb_s( o, po); + cmmx_t qdown_old_odd = p31avgb_s(old_o, old_no); + cmmx_t qdown_odd = p31avgb_s( o, no); + + cmmx_t qup_even = p31avgb_s(ne, e); + cmmx_t qdown_even = p31avgb_s(e, ne); + + cmmx_t temp_up_diff = pdiffub_s(qdown_even, qup_old_odd); + cmmx_t noise_up_diff = pdiffub_s(qdown_even, qup_odd); + cmmx_t temp_down_diff = pdiffub_s(qup_even, qdown_old_odd); + cmmx_t noise_down_diff = pdiffub_s(qup_even, qdown_odd); + + cmmx_t odd_diff = pdiffub_s(o, old_o); + m->odd += psumbw_s(odd_diff) << 1; + m->even += psadbw_s(e, old_e) << 1; + + temp_up_diff = pminub_s(temp_up_diff, temp_down_diff); + temp_up_diff = pminub_s(temp_up_diff, odd_diff); + m->temp += psumbw_s(temp_up_diff) << 1; + noise_up_diff = pminub_s(noise_up_diff, odd_diff); + noise_up_diff = pminub_s(noise_up_diff, noise_down_diff); + + m->noise += psumbw_s(noise_up_diff) << 1; + a += 2*as; + b += 2*bs; + } while (--lines); +} + +static inline void +get_metrics_faster_c(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct metrics *m) +{ + a -= as; + b -= bs; + do { + cmmx_t old_po = (*(cmmx_t*)(a )>>1) & ~SIGN_BITS; + cmmx_t po = (*(cmmx_t*)(b )>>1) & ~SIGN_BITS; + cmmx_t old_e = (*(cmmx_t*)(a + as)>>1) & ~SIGN_BITS; + cmmx_t e = (*(cmmx_t*)(b + bs)>>1) & ~SIGN_BITS; + cmmx_t old_o = (*(cmmx_t*)(a + 2*as)>>1) & ~SIGN_BITS; + cmmx_t o = (*(cmmx_t*)(b + 2*bs)>>1) & ~SIGN_BITS; + cmmx_t ne = (*(cmmx_t*)(b + 3*bs)>>1) & ~SIGN_BITS; + + cmmx_t down_even = p31avgb_s(e, ne); + cmmx_t up_odd = p31avgb_s(o, po); + cmmx_t up_old_odd = p31avgb_s(old_o, old_po); + + cmmx_t odd_diff = pdiffub_s(o, old_o); + cmmx_t temp_diff = pdiffub_s(down_even, up_old_odd); + cmmx_t noise_diff = pdiffub_s(down_even, up_odd); + + m->even += psadbw_s(e, old_e) << 1; + m->odd += psumbw_s(odd_diff) << 1; + + temp_diff = pminub_s(temp_diff, odd_diff); + noise_diff = pminub_s(noise_diff, odd_diff); + + m->noise += psumbw_s(noise_diff) << 1; + m->temp += psumbw_s(temp_diff) << 1; + a += 2*as; + b += 2*bs; + } while (--lines); + +} + +static inline void +get_block_stats(struct metrics *m, struct vf_priv_s *p, struct frame_stats *s) +{ + unsigned two_e = m->even + MAX(m->even , p->thres.even ); + unsigned two_o = m->odd + MAX(m->odd , p->thres.odd ); + unsigned two_n = m->noise + MAX(m->noise, p->thres.noise); + unsigned two_t = m->temp + MAX(m->temp , p->thres.temp ); + + unsigned e_big = m->even >= (m->odd + two_o + 1)/2; + unsigned o_big = m->odd >= (m->even + two_e + 1)/2; + unsigned n_big = m->noise >= (m->temp + two_t + 1)/2; + unsigned t_big = m->temp >= (m->noise + two_n + 1)/2; + + unsigned e2x = m->even >= two_o; + unsigned o2x = m->odd >= two_e; + unsigned n2x = m->noise >= two_t; + unsigned t2x = m->temp >= two_n; + + unsigned ntiny_e = m->even > p->thres.even ; + unsigned ntiny_o = m->odd > p->thres.odd ; + unsigned ntiny_n = m->noise > p->thres.noise; + unsigned ntiny_t = m->temp > p->thres.temp ; + + unsigned nlow_e = m->even > 2*p->thres.even ; + unsigned nlow_o = m->odd > 2*p->thres.odd ; + unsigned nlow_n = m->noise > 2*p->thres.noise; + unsigned nlow_t = m->temp > 2*p->thres.temp ; + + unsigned high_e = m->even > 4*p->thres.even ; + unsigned high_o = m->odd > 4*p->thres.odd ; + unsigned high_n = m->noise > 4*p->thres.noise; + unsigned high_t = m->temp > 4*p->thres.temp ; + + unsigned low_il = !n_big && !t_big && ntiny_n && ntiny_t; + unsigned high_il = !n_big && !t_big && nlow_n && nlow_t; + + if (low_il | high_il) { + s->interlaced_low += low_il; + s->interlaced_high += high_il; + } else { + s->tiny.even += ntiny_e; + s->tiny.odd += ntiny_o; + s->tiny.noise += ntiny_n; + s->tiny.temp += ntiny_t; + + s->low .even += nlow_e ; + s->low .odd += nlow_o ; + s->low .noise += nlow_n ; + s->low .temp += nlow_t ; + + s->high.even += high_e ; + s->high.odd += high_o ; + s->high.noise += high_n ; + s->high.temp += high_t ; + + if (m->even >= p->sad_thres) s->sad.even += m->even ; + if (m->odd >= p->sad_thres) s->sad.odd += m->odd ; + if (m->noise >= p->sad_thres) s->sad.noise += m->noise; + if (m->temp >= p->sad_thres) s->sad.temp += m->temp ; + } + s->num_blocks++; + s->max.even = MAX(s->max.even , m->even ); + s->max.odd = MAX(s->max.odd , m->odd ); + s->max.noise = MAX(s->max.noise, m->noise); + s->max.temp = MAX(s->max.temp , m->temp ); + + s->bigger.even += e_big ; + s->bigger.odd += o_big ; + s->bigger.noise += n_big ; + s->bigger.temp += t_big ; + + s->twox.even += e2x ; + s->twox.odd += o2x ; + s->twox.noise += n2x ; + s->twox.temp += t2x ; + +} + +static inline struct metrics +block_metrics_c(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct vf_priv_s *p, struct frame_stats *s) +{ + struct metrics tm; + tm.even = tm.odd = tm.noise = tm.temp = 0; + get_metrics_c(a, b, as, bs, lines, &tm); + if (sizeof(cmmx_t) < 8) + get_metrics_c(a+4, b+4, as, bs, lines, &tm); + get_block_stats(&tm, p, s); + return tm; +} + +static inline struct metrics +block_metrics_fast_c(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct vf_priv_s *p, struct frame_stats *s) +{ + struct metrics tm; + tm.even = tm.odd = tm.noise = tm.temp = 0; + get_metrics_fast_c(a, b, as, bs, lines, &tm); + if (sizeof(cmmx_t) < 8) + get_metrics_fast_c(a+4, b+4, as, bs, lines, &tm); + get_block_stats(&tm, p, s); + return tm; +} + +static inline struct metrics +block_metrics_faster_c(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct vf_priv_s *p, struct frame_stats *s) +{ + struct metrics tm; + tm.even = tm.odd = tm.noise = tm.temp = 0; + get_metrics_faster_c(a, b, as, bs, lines, &tm); + if (sizeof(cmmx_t) < 8) + get_metrics_faster_c(a+4, b+4, as, bs, lines, &tm); + get_block_stats(&tm, p, s); + return tm; +} + +#define MEQ(X,Y) ((X).even == (Y).even && (X).odd == (Y).odd && (X).temp == (Y).temp && (X).noise == (Y).noise) + +#define BLOCK_METRICS_TEMPLATE() \ + __asm__ volatile("pxor %mm7, %mm7\n\t" /* The result is colleted in mm7 */ \ + "pxor %mm6, %mm6\n\t" /* Temp to stay at 0 */ \ + ); \ + a -= as; \ + b -= bs; \ + do { \ + __asm__ volatile( \ + "movq (%0,%2), %%mm0\n\t" \ + "movq (%1,%3), %%mm1\n\t" /* mm1 = even */ \ + PSADBW(%%mm1, %%mm0, %%mm4, %%mm6) \ + "paddusw %%mm0, %%mm7\n\t" /* even diff */ \ + "movq (%0,%2,2), %%mm0\n\t" /* mm0 = old odd */ \ + "movq (%1,%3,2), %%mm2\n\t" /* mm2 = odd */ \ + "movq (%0), %%mm3\n\t" \ + "psubusb %4, %%mm3\n\t" \ + PAVGB(%%mm0, %%mm3) \ + PAVGB(%%mm0, %%mm3) /* mm3 = qup old odd */ \ + "movq %%mm0, %%mm5\n\t" \ + PSADBW(%%mm2, %%mm0, %%mm4, %%mm6) \ + "psllq $16, %%mm0\n\t" \ + "paddusw %%mm0, %%mm7\n\t" \ + "movq (%1), %%mm4\n\t" \ + "lea (%0,%2,2), %0\n\t" \ + "lea (%1,%3,2), %1\n\t" \ + "psubusb %4, %%mm4\n\t" \ + PAVGB(%%mm2, %%mm4) \ + PAVGB(%%mm2, %%mm4) /* mm4 = qup odd */ \ + PDIFFUBT(%%mm5, %%mm2, %%mm0) /* mm2 =abs(oldodd-odd) */ \ + "movq (%1,%3), %%mm5\n\t" \ + "psubusb %4, %%mm5\n\t" \ + PAVGB(%%mm1, %%mm5) \ + PAVGB(%%mm5, %%mm1) /* mm1 = qdown even */ \ + PAVGB((%1,%3), %%mm5) /* mm5 = qup next even */ \ + PDIFFUBT(%%mm1, %%mm3, %%mm0) /* mm3 = abs(qupoldo-qde) */ \ + PDIFFUBT(%%mm1, %%mm4, %%mm0) /* mm4 = abs(qupodd-qde) */ \ + PMINUBT(%%mm2, %%mm3, %%mm0) /* limit temp to odd diff */ \ + PMINUBT(%%mm2, %%mm4, %%mm0) /* limit noise to odd diff */ \ + "movq (%1,%3,2), %%mm2\n\t" \ + "psubusb %4, %%mm2\n\t" \ + PAVGB((%1), %%mm2) \ + PAVGB((%1), %%mm2) /* mm2 = qdown odd */ \ + "movq (%0,%2,2), %%mm1\n\t" \ + "psubusb %4, %%mm1\n\t" \ + PAVGB((%0), %%mm1) \ + PAVGB((%0), %%mm1) /* mm1 = qdown old odd */ \ + PDIFFUBT(%%mm5, %%mm2, %%mm0) /* mm2 = abs(qdo-qune) */ \ + PDIFFUBT(%%mm5, %%mm1, %%mm0) /* mm1 = abs(qdoo-qune) */ \ + PMINUBT(%%mm4, %%mm2, %%mm0) /* current */ \ + PMINUBT(%%mm3, %%mm1, %%mm0) /* old */ \ + PSUMBW(%%mm2, %%mm0, %%mm6) \ + PSUMBW(%%mm1, %%mm0, %%mm6) \ + "psllq $32, %%mm2\n\t" \ + "psllq $48, %%mm1\n\t" \ + "paddusw %%mm2, %%mm7\n\t" \ + "paddusw %%mm1, %%mm7\n\t" \ + : "=r" (a), "=r" (b) \ + : "r"((x86_reg)as), "r"((x86_reg)bs), "m" (ones), "0"(a), "1"(b), "X"(*a), "X"(*b) \ + ); \ + } while (--lines); + +static inline struct metrics +block_metrics_3dnow(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct vf_priv_s *p, struct frame_stats *s) +{ + struct metrics tm; +#if !HAVE_AMD3DNOW + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "block_metrics_3dnow: internal error\n"); +#else + static const unsigned long long ones = 0x0101010101010101ull; + + BLOCK_METRICS_TEMPLATE(); + __asm__ volatile("movq %%mm7, %0\n\temms" : "=m" (tm)); + get_block_stats(&tm, p, s); +#endif + return tm; +} + +#undef PSUMBW +#undef PSADBW +#undef PMAXUB +#undef PMINUBT +#undef PAVGB + +#define PSUMBW(X,T,Z) "psadbw " #Z "," #X "\n\t" +#define PSADBW(X,Y,T,Z) "psadbw " #X "," #Y "\n\t" +#define PMAXUB(X,Y) "pmaxub " #X "," #Y "\n\t" +#define PMINUBT(X,Y,T) "pminub " #X "," #Y "\n\t" +#define PAVGB(X,Y) "pavgb " #X "," #Y "\n\t" + +static inline struct metrics +block_metrics_mmx2(unsigned char *a, unsigned char *b, int as, int bs, + int lines, struct vf_priv_s *p, struct frame_stats *s) +{ + struct metrics tm; +#if !HAVE_MMX + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "block_metrics_mmx2: internal error\n"); +#else + static const unsigned long long ones = 0x0101010101010101ull; + x86_reg interlaced; + x86_reg prefetch_line = (((long)a>>3) & 7) + 10; +#ifdef DEBUG + struct frame_stats ts = *s; +#endif + __asm__ volatile("prefetcht0 (%0,%2)\n\t" + "prefetcht0 (%1,%3)\n\t" : + : "r" (a), "r" (b), + "r" (prefetch_line * as), "r" (prefetch_line * bs)); + + BLOCK_METRICS_TEMPLATE(); + + s->num_blocks++; + __asm__ volatile( + "movq %3, %%mm0\n\t" + "movq %%mm7, %%mm1\n\t" + "psubusw %%mm0, %%mm1\n\t" + "movq %%mm1, %%mm2\n\t" + "paddusw %%mm0, %%mm2\n\t" + "paddusw %%mm7, %%mm2\n\t" + "pshufw $0xb1, %%mm2, %%mm3\n\t" + "pavgw %%mm7, %%mm2\n\t" + "pshufw $0xb1, %%mm2, %%mm2\n\t" + "psubusw %%mm7, %%mm2\n\t" + "pcmpeqw %%mm6, %%mm2\n\t" /* 1 if >= 1.5x */ + "psubusw %%mm7, %%mm3\n\t" + "pcmpeqw %%mm6, %%mm3\n\t" /* 1 if >= 2x */ + "movq %1, %%mm4\n\t" + "movq %2, %%mm5\n\t" + "psubw %%mm2, %%mm4\n\t" + "psubw %%mm3, %%mm5\n\t" + "movq %%mm4, %1\n\t" + "movq %%mm5, %2\n\t" + "pxor %%mm4, %%mm4\n\t" + "pcmpeqw %%mm1, %%mm4\n\t" /* 1 if <= t */ + "psubusw %%mm0, %%mm1\n\t" + "pxor %%mm5, %%mm5\n\t" + "pcmpeqw %%mm1, %%mm5\n\t" /* 1 if <= 2t */ + "psubusw %%mm0, %%mm1\n\t" + "psubusw %%mm0, %%mm1\n\t" + "pcmpeqw %%mm6, %%mm1\n\t" /* 1 if <= 4t */ + "pshufw $0xb1, %%mm2, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" /* 1 if not close */ + "punpckhdq %%mm0, %%mm0\n\t" + "movq %%mm4, %%mm2\n\t" /* tttt */ + "punpckhdq %%mm5, %%mm2\n\t" /* ttll */ + "por %%mm2, %%mm0\n\t" + "pcmpeqd %%mm6, %%mm0\n\t" /* close && big */ + "psrlq $16, %%mm0\n\t" + "psrlw $15, %%mm0\n\t" + "movd %%mm0, %0\n\t" + : "=r" (interlaced), "=m" (s->bigger), "=m" (s->twox) + : "m" (p->thres) + ); + + if (interlaced) { + s->interlaced_high += interlaced >> 16; + s->interlaced_low += interlaced; + } else { + __asm__ volatile( + "pcmpeqw %%mm0, %%mm0\n\t" /* -1 */ + "psubw %%mm0, %%mm4\n\t" + "psubw %%mm0, %%mm5\n\t" + "psubw %%mm0, %%mm1\n\t" + "paddw %0, %%mm4\n\t" + "paddw %1, %%mm5\n\t" + "paddw %2, %%mm1\n\t" + "movq %%mm4, %0\n\t" + "movq %%mm5, %1\n\t" + "movq %%mm1, %2\n\t" + : "=m" (s->tiny), "=m" (s->low), "=m" (s->high) + ); + + __asm__ volatile( + "pshufw $0, %2, %%mm0\n\t" + "psubusw %%mm7, %%mm0\n\t" + "pcmpeqw %%mm6, %%mm0\n\t" /* 0 if below sad_thres */ + "pand %%mm7, %%mm0\n\t" + "movq %%mm0, %%mm1\n\t" + "punpcklwd %%mm6, %%mm0\n\t" /* sad even, odd */ + "punpckhwd %%mm6, %%mm1\n\t" /* sad noise, temp */ + "paddd %0, %%mm0\n\t" + "paddd %1, %%mm1\n\t" + "movq %%mm0, %0\n\t" + "movq %%mm1, %1\n\t" + : "=m" (s->sad.even), "=m" (s->sad.noise) + : "m" (p->sad_thres) + ); + } + + __asm__ volatile( + "movq %%mm7, (%1)\n\t" + PMAXUW((%0), %%mm7) + "movq %%mm7, (%0)\n\t" + "emms" + : : "r" (&s->max), "r" (&tm), "X" (s->max) + : "memory" + ); +#ifdef DEBUG + if (1) { + struct metrics cm; + a -= 7*as; + b -= 7*bs; + cm = block_metrics_c(a, b, as, bs, 4, p, &ts); + if (!MEQ(tm, cm)) + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, "Bad metrics\n"); + if (s) { +# define CHECK(X) if (!MEQ(s->X, ts.X)) \ + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, "Bad " #X "\n"); + CHECK(tiny); + CHECK(low); + CHECK(high); + CHECK(sad); + CHECK(max); + } + } +#endif +#endif + return tm; +} + +static inline int +dint_copy_line_mmx2(unsigned char *dst, unsigned char *a, long bos, + long cos, int ds, int ss, int w, int t) +{ +#if !HAVE_MMX + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, "dint_copy_line_mmx2: internal error\n"); + return 0; +#else + unsigned long len = (w+7) >> 3; + int ret; + __asm__ volatile ( + "pxor %%mm6, %%mm6 \n\t" /* deinterlaced pixel counter */ + "movd %0, %%mm7 \n\t" + "punpcklbw %%mm7, %%mm7 \n\t" + "punpcklwd %%mm7, %%mm7 \n\t" + "punpckldq %%mm7, %%mm7 \n\t" /* mm7 = threshold */ + : /* no output */ + : "rm" (t) + ); + do { + __asm__ volatile ( + "movq (%0), %%mm0\n\t" + "movq (%0,%3,2), %%mm1\n\t" + "movq %%mm0, (%2)\n\t" + "pmaxub %%mm1, %%mm0\n\t" + "pavgb (%0), %%mm1\n\t" + "psubusb %%mm1, %%mm0\n\t" + "paddusb %%mm7, %%mm0\n\t" /* mm0 = max-avg+thr */ + "movq (%0,%1), %%mm2\n\t" + "movq (%0,%5), %%mm3\n\t" + "movq %%mm2, %%mm4\n\t" + PDIFFUBT(%%mm1, %%mm2, %%mm5) + PDIFFUBT(%%mm1, %%mm3, %%mm5) + "pminub %%mm2, %%mm3\n\t" + "pcmpeqb %%mm3, %%mm2\n\t" /* b = min */ + "pand %%mm2, %%mm4\n\t" + "pandn (%0,%5), %%mm2\n\t" + "por %%mm4, %%mm2\n\t" + "pminub %%mm0, %%mm3\n\t" + "pcmpeqb %%mm0, %%mm3\n\t" /* set to 1s if >= threshold */ + "psubb %%mm3, %%mm6\n\t" /* count pixels above thr. */ + "pand %%mm3, %%mm1 \n\t" + "pandn %%mm2, %%mm3 \n\t" + "por %%mm3, %%mm1 \n\t" /* avg if >= threshold */ + "movq %%mm1, (%2,%4) \n\t" + : /* no output */ + : "r" (a), "r" ((x86_reg)bos), "r" ((x86_reg)dst), "r" ((x86_reg)ss), "r" ((x86_reg)ds), "r" ((x86_reg)cos) + ); + a += 8; + dst += 8; + } while (--len); + + __asm__ volatile ("pxor %%mm7, %%mm7 \n\t" + "psadbw %%mm6, %%mm7 \n\t" + "movd %%mm7, %0 \n\t" + "emms \n\t" + : "=r" (ret) + ); + return ret; +#endif +} + +static inline int +dint_copy_line(unsigned char *dst, unsigned char *a, long bos, + long cos, int ds, int ss, int w, int t) +{ + unsigned long len = ((unsigned long)w+sizeof(cmmx_t)-1) / sizeof(cmmx_t); + cmmx_t dint_count = 0; + cmmx_t thr; + t |= t << 8; + thr = t | (t << 16); + if (sizeof(cmmx_t) > 4) + thr |= thr << (sizeof(cmmx_t)*4); + do { + cmmx_t e = *(cmmx_t*)a; + cmmx_t ne = *(cmmx_t*)(a+2*ss); + cmmx_t o = *(cmmx_t*)(a+bos); + cmmx_t oo = *(cmmx_t*)(a+cos); + cmmx_t maxe = pmaxub(e, ne); + cmmx_t avge = pavgb(e, ne); + cmmx_t max_diff = maxe - avge + thr; /* 0<=max-avg<128, thr<128 */ + cmmx_t diffo = pdiffub(avge, o); + cmmx_t diffoo = pdiffub(avge, oo); + cmmx_t diffcmp = pcmpgtub(diffo, diffoo); + cmmx_t bo = ((oo ^ o) & diffcmp) ^ o; + cmmx_t diffbo = ((diffoo ^ diffo) & diffcmp) ^ diffo; + cmmx_t above_thr = ~pcmpgtub(max_diff, diffbo); + cmmx_t bo_or_avg = ((avge ^ bo) & above_thr) ^ bo; + dint_count += above_thr & ONE_BYTES; + *(cmmx_t*)(dst) = e; + *(cmmx_t*)(dst+ds) = bo_or_avg; + a += sizeof(cmmx_t); + dst += sizeof(cmmx_t); + } while (--len); + return psumbw(dint_count); +} + +static int +dint_copy_plane(unsigned char *d, unsigned char *a, unsigned char *b, + unsigned char *c, unsigned long w, unsigned long h, + unsigned long ds, unsigned long ss, unsigned long threshold, + long field, long mmx2) +{ + unsigned long ret = 0; + long bos = b - a; + long cos = c - a; + if (field) { + fast_memcpy(d, b, w); + h--; + d += ds; + a += ss; + } + bos += ss; + cos += ss; + while (h > 2) { + if (threshold >= 128) { + fast_memcpy(d, a, w); + fast_memcpy(d+ds, a+bos, w); + } else if (mmx2 == 1) { + ret += dint_copy_line_mmx2(d, a, bos, cos, ds, ss, w, threshold); + } else + ret += dint_copy_line(d, a, bos, cos, ds, ss, w, threshold); + h -= 2; + d += 2*ds; + a += 2*ss; + } + fast_memcpy(d, a, w); + if (h == 2) + fast_memcpy(d+ds, a+bos, w); + return ret; +} + +static void +copy_merge_fields(struct vf_priv_s *p, mp_image_t *dmpi, + unsigned char **old, unsigned char **new, unsigned long show) +{ + unsigned long threshold = 256; + unsigned long field = p->swapped; + unsigned long dint_pixels = 0; + unsigned char **other = old; + if (show >= 12 || !(show & 3)) + show >>= 2, other = new, new = old; + if (show <= 2) { /* Single field: de-interlace */ + threshold = p->dint_thres; + field ^= show & 1; + old = new; + } else if (show == 3) + old = new; + else + field ^= 1; + dint_pixels +=dint_copy_plane(dmpi->planes[0], old[0], new[0], + other[0], p->w, p->h, dmpi->stride[0], + p->stride, threshold, field, p->mmx2); + if (dmpi->flags & MP_IMGFLAG_PLANAR) { + if (p->luma_only) + old = new, other = new; + else + threshold = threshold/2 + 1; + field ^= p->chroma_swapped; + dint_copy_plane(dmpi->planes[1], old[1], new[1], + other[1], p->cw, p->ch, dmpi->stride[1], + p->chroma_stride, threshold, field, p->mmx2); + dint_copy_plane(dmpi->planes[2], old[2], new[2], + other[2], p->cw, p->ch, dmpi->stride[2], + p->chroma_stride, threshold, field, p->mmx2); + } + if (dint_pixels > 0 && p->verbose) + ff_mp_msg(MSGT_VFILTER,MSGL_INFO,"Deinterlaced %lu pixels\n",dint_pixels); +} + +static void diff_planes(struct vf_priv_s *p, struct frame_stats *s, + unsigned char *of, unsigned char *nf, + int w, int h, int os, int ns, int swapped) +{ + int i, y; + int align = -(long)nf & 7; + of += align; + nf += align; + w -= align; + if (swapped) + of -= os, nf -= ns; + i = (h*3 >> 7) & ~1; + of += i*os + 8; + nf += i*ns + 8; + h -= i; + w -= 16; + + memset(s, 0, sizeof(*s)); + + for (y = (h-8) >> 3; y; y--) { + if (p->mmx2 == 1) { + for (i = 0; i < w; i += 8) + block_metrics_mmx2(of+i, nf+i, os, ns, 4, p, s); + } else if (p->mmx2 == 2) { + for (i = 0; i < w; i += 8) + block_metrics_3dnow(of+i, nf+i, os, ns, 4, p, s); + } else if (p->fast > 3) { + for (i = 0; i < w; i += 8) + block_metrics_faster_c(of+i, nf+i, os, ns, 4, p, s); + } else if (p->fast > 1) { + for (i = 0; i < w; i += 8) + block_metrics_fast_c(of+i, nf+i, os, ns, 4, p, s); + } else { + for (i = 0; i < w; i += 8) + block_metrics_c(of+i, nf+i, os, ns, 4, p, s); + } + of += 8*os; + nf += 8*ns; + } +} + +#define METRICS(X) (X).even, (X).odd, (X).noise, (X).temp + +static void diff_fields(struct vf_priv_s *p, struct frame_stats *s, + unsigned char **old, unsigned char **new) +{ + diff_planes(p, s, old[0], new[0], p->w, p->h, + p->stride, p->stride, p->swapped); + s->sad.even = (s->sad.even * 16ul) / s->num_blocks; + s->sad.odd = (s->sad.odd * 16ul) / s->num_blocks; + s->sad.noise = (s->sad.noise * 16ul) / s->num_blocks; + s->sad.temp = (s->sad.temp * 16ul) / s->num_blocks; + if (p->verbose) + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "%lu%c M:%d/%d/%d/%d - %d, " + "t:%d/%d/%d/%d, l:%d/%d/%d/%d, h:%d/%d/%d/%d, bg:%d/%d/%d/%d, " + "2x:%d/%d/%d/%d, sad:%d/%d/%d/%d, lil:%d, hil:%d, ios:%.1f\n", + p->inframes, p->chflag, METRICS(s->max), s->num_blocks, + METRICS(s->tiny), METRICS(s->low), METRICS(s->high), + METRICS(s->bigger), METRICS(s->twox), METRICS(s->sad), + s->interlaced_low, s->interlaced_high, + p->iosync / (double) p->in_inc); +} + +static const char *parse_args(struct vf_priv_s *p, const char *args) +{ + args--; + while (args && *++args && + (sscanf(args, "io=%lu:%lu", &p->out_dec, &p->in_inc) == 2 || + sscanf(args, "diff_thres=%hu", &p->thres.even ) == 1 || + sscanf(args, "comb_thres=%hu", &p->thres.noise) == 1 || + sscanf(args, "sad_thres=%lu", &p->sad_thres ) == 1 || + sscanf(args, "dint_thres=%lu", &p->dint_thres ) == 1 || + sscanf(args, "fast=%u", &p->fast ) == 1 || + sscanf(args, "mmx2=%lu", &p->mmx2 ) == 1 || + sscanf(args, "luma_only=%u", &p->luma_only ) == 1 || + sscanf(args, "verbose=%u", &p->verbose ) == 1 || + sscanf(args, "crop=%lu:%lu:%lu:%lu", &p->w, + &p->h, &p->crop_x, &p->crop_y) == 4)) + args = strchr(args, '/'); + return args; +} + +static unsigned long gcd(unsigned long x, unsigned long y) +{ + unsigned long t; + if (x > y) + t = x, x = y, y = t; + + while (x) { + t = y % x; + y = x; + x = t; + } + return y; +} + +static void init(struct vf_priv_s *p, mp_image_t *mpi) +{ + unsigned long i; + unsigned long plane_size, chroma_plane_size; + unsigned char *plane; + unsigned long cos, los; + p->crop_cx = p->crop_x >> mpi->chroma_x_shift; + p->crop_cy = p->crop_y >> mpi->chroma_y_shift; + if (mpi->flags & MP_IMGFLAG_ACCEPT_STRIDE) { + p->stride = (mpi->w + 15) & ~15; + p->chroma_stride = p->stride >> mpi->chroma_x_shift; + } else { + p->stride = mpi->width; + p->chroma_stride = mpi->chroma_width; + } + p->cw = p->w >> mpi->chroma_x_shift; + p->ch = p->h >> mpi->chroma_y_shift; + p->nplanes = 1; + p->static_idx = 0; + p->temp_idx = 0; + p->old_planes = p->planes[0]; + plane_size = mpi->h * p->stride; + chroma_plane_size = mpi->flags & MP_IMGFLAG_PLANAR ? + mpi->chroma_height * p->chroma_stride : 0; + p->memory_allocated = + malloc(NUM_STORED * (plane_size+2*chroma_plane_size) + + 8*p->chroma_stride + 4096); + /* align to page boundary */ + plane = p->memory_allocated + (-(long)p->memory_allocated & 4095); + memset(plane, 0, NUM_STORED * plane_size); + los = p->crop_x + p->crop_y * p->stride; + cos = p->crop_cx + p->crop_cy * p->chroma_stride; + for (i = 0; i != NUM_STORED; i++, plane += plane_size) { + p->planes[i][0] = plane; + p->planes[NUM_STORED + i][0] = plane + los; + } + if (mpi->flags & MP_IMGFLAG_PLANAR) { + p->nplanes = 3; + memset(plane, 0x80, NUM_STORED * 2 * chroma_plane_size); + for (i = 0; i != NUM_STORED; i++) { + p->planes[i][1] = plane; + p->planes[NUM_STORED + i][1] = plane + cos; + plane += chroma_plane_size; + p->planes[i][2] = plane; + p->planes[NUM_STORED + i][2] = plane + cos; + plane += chroma_plane_size; + } + } + p->out_dec <<= 2; + i = gcd(p->in_inc, p->out_dec); + p->in_inc /= i; + p->out_dec /= i; + p->iosync = 0; + p->num_fields = 3; +} + +static inline double get_time(void) +{ + struct timeval tv; + gettimeofday(&tv, 0); + return tv.tv_sec + tv.tv_usec * 1e-6; +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi) +{ + struct vf_priv_s *p = vf->priv; + static unsigned char **planes, planes_idx; + + if (mpi->type == MP_IMGTYPE_STATIC) return; + + if (!p->planes[0][0]) init(p, mpi); + + if (mpi->type == MP_IMGTYPE_TEMP || + (mpi->type == MP_IMGTYPE_IPB && !(mpi->flags & MP_IMGFLAG_READABLE))) + planes_idx = NUM_STORED/2 + (++p->temp_idx % (NUM_STORED/2)); + else + planes_idx = ++p->static_idx % (NUM_STORED/2); + planes = p->planes[planes_idx]; + mpi->priv = p->planes[NUM_STORED + planes_idx]; + if (mpi->priv == p->old_planes) { + unsigned char **old_planes = + p->planes[NUM_STORED + 2 + (++p->temp_idx & 1)]; + my_memcpy_pic(old_planes[0], p->old_planes[0], + p->w, p->h, p->stride, p->stride); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(old_planes[1], p->old_planes[1], + p->cw, p->ch, p->chroma_stride, p->chroma_stride); + my_memcpy_pic(old_planes[2], p->old_planes[2], + p->cw, p->ch, p->chroma_stride, p->chroma_stride); + } + p->old_planes = old_planes; + p->num_copies++; + } + mpi->planes[0] = planes[0]; + mpi->stride[0] = p->stride; + if (mpi->flags & MP_IMGFLAG_PLANAR) { + mpi->planes[1] = planes[1]; + mpi->planes[2] = planes[2]; + mpi->stride[1] = mpi->stride[2] = p->chroma_stride; + } + mpi->width = p->stride; + + mpi->flags |= MP_IMGFLAG_DIRECT; + mpi->flags &= ~MP_IMGFLAG_DRAW_CALLBACK; +} + +static inline long +cmpe(unsigned long x, unsigned long y, unsigned long err, unsigned long e) +{ + long diff = x-y; + long unit = ((x+y+err) >> e); + long ret = (diff > unit) - (diff < -unit); + unit >>= 1; + return ret + (diff > unit) - (diff < -unit); +} + +static unsigned long +find_breaks(struct vf_priv_s *p, struct frame_stats *s) +{ + struct frame_stats *ps = &p->stats[(p->inframes-1) & 1]; + long notfilm = 5*p->in_inc - p->out_dec; + unsigned long n = s->num_blocks >> 8; + unsigned long sad_comb_cmp = cmpe(s->sad.temp, s->sad.noise, 512, 1); + unsigned long ret = 8; + + if (cmpe(s->sad.temp, s->sad.even, 512, 1) > 0) + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, + "@@@@@@@@ Bottom-first field??? @@@@@@@@\n"); + if (s->sad.temp > 1000 && s->sad.noise > 1000) + return 3; + if (s->interlaced_high >= 2*n && s->sad.temp > 256 && s->sad.noise > 256) + return 3; + if (s->high.noise > s->num_blocks/4 && s->sad.noise > 10000 && + s->sad.noise > 2*s->sad.even && s->sad.noise > 2*ps->sad.odd) { + // Mid-frame scene change + if (s->tiny.temp + s->interlaced_low < n || + s->low.temp + s->interlaced_high < n/4 || + s->high.temp + s->interlaced_high < n/8 || + s->sad.temp < 160) + return 1; + return 3; + } + if (s->high.temp > s->num_blocks/4 && s->sad.temp > 10000 && + s->sad.temp > 2*ps->sad.odd && s->sad.temp > 2*ps->sad.even) { + // Start frame scene change + if (s->tiny.noise + s->interlaced_low < n || + s->low.noise + s->interlaced_high < n/4 || + s->high.noise + s->interlaced_high < n/8 || + s->sad.noise < 160) + return 2; + return 3; + } + if (sad_comb_cmp == 2) + return 2; + if (sad_comb_cmp == -2) + return 1; + + if (s->tiny.odd > 3*MAX(n,s->tiny.even) + s->interlaced_low) + return 1; + if (s->tiny.even > 3*MAX(n,s->tiny.odd)+s->interlaced_low && + (!sad_comb_cmp || (s->low.noise <= n/4 && s->low.temp <= n/4))) + return 4; + + if (s->sad.noise < 64 && s->sad.temp < 64 && + s->low.noise <= n/2 && s->high.noise <= n/4 && + s->low.temp <= n/2 && s->high.temp <= n/4) + goto still; + + if (s->tiny.temp > 3*MAX(n,s->tiny.noise) + s->interlaced_low) + return 2; + if (s->tiny.noise > 3*MAX(n,s->tiny.temp) + s->interlaced_low) + return 1; + + if (s->low.odd > 3*MAX(n/4,s->low.even) + s->interlaced_high) + return 1; + if (s->low.even > 3*MAX(n/4,s->low.odd)+s->interlaced_high && + s->sad.even > 2*s->sad.odd && + (!sad_comb_cmp || (s->low.noise <= n/4 && s->low.temp <= n/4))) + return 4; + + if (s->low.temp > 3*MAX(n/4,s->low.noise) + s->interlaced_high) + return 2; + if (s->low.noise > 3*MAX(n/4,s->low.temp) + s->interlaced_high) + return 1; + + if (sad_comb_cmp == 1 && s->sad.noise < 64) + return 2; + if (sad_comb_cmp == -1 && s->sad.temp < 64) + return 1; + + if (s->tiny.odd <= n || (s->tiny.noise <= n/2 && s->tiny.temp <= n/2)) { + if (s->interlaced_low <= n) { + if (p->num_fields == 1) + goto still; + if (s->tiny.even <= n || ps->tiny.noise <= n/2) + /* Still frame */ + goto still; + if (s->bigger.even >= 2*MAX(n,s->bigger.odd) + s->interlaced_low) + return 4; + if (s->low.even >= 2*n + s->interlaced_low) + return 4; + goto still; + } + } + if (s->low.odd <= n/4) { + if (s->interlaced_high <= n/4) { + if (p->num_fields == 1) + goto still; + if (s->low.even <= n/4) + /* Still frame */ + goto still; + if (s->bigger.even >= 2*MAX(n/4,s->bigger.odd)+s->interlaced_high) + return 4; + if (s->low.even >= n/2 + s->interlaced_high) + return 4; + goto still; + } + } + if (s->bigger.temp > 2*MAX(n,s->bigger.noise) + s->interlaced_low) + return 2; + if (s->bigger.noise > 2*MAX(n,s->bigger.temp) + s->interlaced_low) + return 1; + if (s->bigger.temp > 2*MAX(n,s->bigger.noise) + s->interlaced_high) + return 2; + if (s->bigger.noise > 2*MAX(n,s->bigger.temp) + s->interlaced_high) + return 1; + if (s->twox.temp > 2*MAX(n,s->twox.noise) + s->interlaced_high) + return 2; + if (s->twox.noise > 2*MAX(n,s->twox.temp) + s->interlaced_high) + return 1; + if (s->bigger.even > 2*MAX(n,s->bigger.odd) + s->interlaced_low && + s->bigger.temp < n && s->bigger.noise < n) + return 4; + if (s->interlaced_low > MIN(2*n, s->tiny.odd)) + return 3; + ret = 8 + (1 << (s->sad.temp > s->sad.noise)); + still: + if (p->num_fields == 1 && p->prev_fields == 3 && notfilm >= 0 && + (s->tiny.temp <= s->tiny.noise || s->sad.temp < s->sad.noise+16)) + return 1; + if (p->notout < p->num_fields && p->iosync > 2*p->in_inc && notfilm < 0) + notfilm = 0; + if (p->num_fields < 2 || + (p->num_fields == 2 && p->prev_fields == 2 && notfilm < 0)) + return ret; + if (!notfilm && (p->prev_fields&~1) == 2) { + if (p->prev_fields + p->num_fields == 5) { + if (s->tiny.noise <= s->tiny.temp || + s->low.noise == 0 || s->low.noise < s->low.temp || + s->sad.noise < s->sad.temp+16) + return 2; + } + if (p->prev_fields + p->num_fields == 4) { + if (s->tiny.temp <= s->tiny.noise || + s->low.temp == 0 || s->low.temp < s->low.noise || + s->sad.temp < s->sad.noise+16) + return 1; + } + } + if (p->num_fields > 2 && + ps->sad.noise > s->sad.noise && ps->sad.noise > s->sad.temp) + return 4; + return 2 >> (s->sad.noise > s->sad.temp); +} + +#define ITOC(X) (!(X) ? ' ' : (X) + ((X)>9 ? 'a'-10 : '0')) + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + struct vf_priv_s *p = vf->priv; + unsigned char **planes, **old_planes; + struct frame_stats *s = &p->stats[p->inframes & 1]; + struct frame_stats *ps = &p->stats[(p->inframes-1) & 1]; + int swapped = 0; + const int flags = mpi->fields; + int breaks, prev; + int show_fields = 0; + int dropped_fields = 0; + double start_time, diff_time; + char prev_chflag = p->chflag; + int keep_rate; + + if (!p->planes[0][0]) init(p, mpi); + + old_planes = p->old_planes; + + if ((mpi->flags & MP_IMGFLAG_DIRECT) && mpi->priv) { + planes = mpi->priv; + mpi->priv = 0; + } else { + planes = p->planes[2 + (++p->temp_idx & 1)]; + my_memcpy_pic(planes[0], + mpi->planes[0] + p->crop_x + p->crop_y * mpi->stride[0], + p->w, p->h, p->stride, mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(planes[1], + mpi->planes[1] + p->crop_cx + p->crop_cy * mpi->stride[1], + p->cw, p->ch, p->chroma_stride, mpi->stride[1]); + my_memcpy_pic(planes[2], + mpi->planes[2] + p->crop_cx + p->crop_cy * mpi->stride[2], + p->cw, p->ch, p->chroma_stride, mpi->stride[2]); + p->num_copies++; + } + } + + p->old_planes = planes; + p->chflag = ';'; + if (flags & MP_IMGFIELD_ORDERED) { + swapped = !(flags & MP_IMGFIELD_TOP_FIRST); + p->chflag = (flags & MP_IMGFIELD_REPEAT_FIRST ? '|' : + flags & MP_IMGFIELD_TOP_FIRST ? ':' : '.'); + } + p->swapped = swapped; + + start_time = get_time(); + if (p->chflag == '|') { + *s = ppzs; + p->iosync += p->in_inc; + } else if ((p->fast & 1) && prev_chflag == '|') + *s = pprs; + else + diff_fields(p, s, old_planes, planes); + diff_time = get_time(); + p->diff_time += diff_time - start_time; + breaks = p->inframes ? find_breaks(p, s) : 2; + p->inframes++; + keep_rate = 4*p->in_inc == p->out_dec; + + switch (breaks) { + case 0: + case 8: + case 9: + case 10: + if (!keep_rate && p->notout < p->num_fields && p->iosync < 2*p->in_inc) + break; + if (p->notout < p->num_fields) + dropped_fields = -2; + case 4: + if (keep_rate || p->iosync >= -2*p->in_inc) + show_fields = (4<<p->num_fields)-1; + break; + case 3: + if (keep_rate) + show_fields = 2; + else if (p->iosync > 0) { + if (p->notout >= p->num_fields && p->iosync > 2*p->in_inc) { + show_fields = 4; /* prev odd only */ + if (p->num_fields > 1) + show_fields |= 8; /* + prev even */ + } else { + show_fields = 2; /* even only */ + if (p->notout >= p->num_fields) + dropped_fields += p->num_fields; + } + } + break; + case 2: + if (p->iosync <= -3*p->in_inc) { + if (p->notout >= p->num_fields) + dropped_fields = p->num_fields; + break; + } + if (p->num_fields == 1) { + int prevbreak = ps->sad.noise >= 128; + if (p->iosync < 4*p->in_inc) { + show_fields = 3; + dropped_fields = prevbreak; + } else { + show_fields = 4 | (!prevbreak << 3); + if (p->notout < 1 + p->prev_fields) + dropped_fields = -!prevbreak; + } + break; + } + default: + if (keep_rate) + show_fields = 3 << (breaks & 1); + else if (p->notout >= p->num_fields && + p->iosync >= (breaks == 1 ? -p->in_inc : + p->in_inc << (p->num_fields == 1))) { + show_fields = (1 << (2 + p->num_fields)) - (1<<breaks); + } else { + if (p->notout >= p->num_fields) + dropped_fields += p->num_fields + 2 - breaks; + if (breaks == 1) { + if (p->iosync >= 4*p->in_inc) + show_fields = 6; + } else if (p->iosync > -3*p->in_inc) + show_fields = 3; /* odd+even */ + } + break; + } + + show_fields &= 15; + prev = p->prev_fields; + if (breaks < 8) { + if (p->num_fields == 1) + breaks &= ~4; + if (breaks) + p->num_breaks++; + if (breaks == 3) + p->prev_fields = p->num_fields = 1; + else if (breaks) { + p->prev_fields = p->num_fields + (breaks==1) - (breaks==4); + p->num_fields = breaks - (breaks == 4) + (p->chflag == '|'); + } else + p->num_fields += 2; + } else + p->num_fields += 2; + + p->iosync += 4 * p->in_inc; + if (p->chflag == '|') + p->iosync += p->in_inc; + + if (show_fields) { + p->iosync -= p->out_dec; + p->notout = !(show_fields & 1) + !(show_fields & 3); + if (((show_fields & 3) == 3 && + (s->low.noise + s->interlaced_low < (s->num_blocks>>8) || + s->sad.noise < 160)) || + ((show_fields & 12) == 12 && + (ps->low.noise + ps->interlaced_low < (s->num_blocks>>8) || + ps->sad.noise < 160))) { + p->export_count++; + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, MP_IMGTYPE_EXPORT, + MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE, + p->w, p->h); + if ((show_fields & 3) != 3) planes = old_planes; + dmpi->planes[0] = planes[0]; + dmpi->stride[0] = p->stride; + dmpi->width = mpi->width; + if (mpi->flags & MP_IMGFLAG_PLANAR) { + dmpi->planes[1] = planes[1]; + dmpi->planes[2] = planes[2]; + dmpi->stride[1] = p->chroma_stride; + dmpi->stride[2] = p->chroma_stride; + } + } else { + p->merge_count++; + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + p->w, p->h); + copy_merge_fields(p, dmpi, old_planes, planes, show_fields); + } + p->outframes++; + } else + p->notout += 2; + + if (p->verbose) + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "%lu %lu: %x %c %c %lu%s%s%c%s\n", + p->inframes, p->outframes, + breaks, breaks<8 && breaks>0 ? (int) p->prev_fields+'0' : ' ', + ITOC(show_fields), + p->num_breaks, 5*p->in_inc == p->out_dec && breaks<8 && + breaks>0 && ((prev&~1)!=2 || prev+p->prev_fields!=5) ? + " ######## bad telecine ########" : "", + dropped_fields ? " ======== dropped ":"", ITOC(dropped_fields), + !show_fields || (show_fields & (show_fields-1)) ? + "" : " @@@@@@@@@@@@@@@@@"); + + p->merge_time += get_time() - diff_time; + return show_fields ? ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE) : 0; +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - support more formats */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + case IMGFMT_411P: + case IMGFMT_422P: + case IMGFMT_444P: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + unsigned long cxm = 0; + unsigned long cym = 0; + struct vf_priv_s *p = vf->priv; + // rounding: + if(!IMGFMT_IS_RGB(outfmt) && !IMGFMT_IS_BGR(outfmt)){ + switch(outfmt){ + case IMGFMT_444P: + case IMGFMT_Y800: + case IMGFMT_Y8: + break; + case IMGFMT_YVU9: + case IMGFMT_IF09: + cym = 3; + case IMGFMT_411P: + cxm = 3; + break; + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + cym = 1; + default: + cxm = 1; + } + } + p->chroma_swapped = !!(p->crop_y & (cym+1)); + if (p->w) p->w += p->crop_x & cxm; + if (p->h) p->h += p->crop_y & cym; + p->crop_x &= ~cxm; + p->crop_y &= ~cym; + if (!p->w || p->w > width ) p->w = width; + if (!p->h || p->h > height) p->h = height; + if (p->crop_x + p->w > width ) p->crop_x = 0; + if (p->crop_y + p->h > height) p->crop_y = 0; + + if(!opt_screen_size_x && !opt_screen_size_y){ + d_width = d_width * p->w/width; + d_height = d_height * p->h/height; + } + return ff_vf_next_config(vf, p->w, p->h, d_width, d_height, flags, outfmt); +} + +static void uninit(struct vf_instance *vf) +{ + struct vf_priv_s *p = vf->priv; + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "diff_time: %.3f, merge_time: %.3f, " + "export: %lu, merge: %lu, copy: %lu\n", p->diff_time, p->merge_time, + p->export_count, p->merge_count, p->num_copies); + free(p->memory_allocated); + free(p); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + struct vf_priv_s *p; + vf->get_image = get_image; + vf->put_image = put_image; + vf->config = config; + vf->query_format = query_format; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = p = calloc(1, sizeof(struct vf_priv_s)); + p->out_dec = 5; + p->in_inc = 4; + p->thres.noise = 128; + p->thres.even = 128; + p->sad_thres = 64; + p->dint_thres = 4; + p->luma_only = 0; + p->fast = 3; + p->mmx2 = ff_gCpuCaps.hasMMX2 ? 1 : ff_gCpuCaps.has3DNow ? 2 : 0; + if (args) { + const char *args_remain = parse_args(p, args); + if (args_remain) { + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, + "filmdint: unknown suboption: %s\n", args_remain); + return 0; + } + if (p->out_dec < p->in_inc) { + ff_mp_msg(MSGT_VFILTER, MSGL_FATAL, + "filmdint: increasing the frame rate is not supported\n"); + return 0; + } + } + if (p->mmx2 > 2) + p->mmx2 = 0; +#if !HAVE_MMX + p->mmx2 = 0; +#endif +#if !HAVE_AMD3DNOW + p->mmx2 &= 1; +#endif + p->thres.odd = p->thres.even; + p->thres.temp = p->thres.noise; + p->diff_time = 0; + p->merge_time = 0; + return 1; +} + +const vf_info_t ff_vf_info_filmdint = { + "Advanced inverse telecine filer", + "filmdint", + "Zoltan Hidvegi", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_fspp.c b/libavfilter/libmpcodecs/vf_fspp.c new file mode 100644 index 0000000..a8a33e2 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_fspp.c @@ -0,0 +1,2118 @@ +/* + * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * This implementation is based on an algorithm described in + * "Aria Nosratinia Embedded Post-Processing for + * Enhancement of Compressed Images (1999)" + * (http://citeseer.nj.nec.com/nosratinia99embedded.html) + * Futher, with splitting (i)dct into hor/ver passes, one of them can be + * performed once per block, not pixel. This allows for much better speed. + */ + +/* + Heavily optimized version of SPP filter by Nikolaj + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "config.h" + +#include "mp_msg.h" +#include "cpudetect.h" +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "av_helpers.h" +#include "libvo/fastmemcpy.h" + +#include "libavutil/internal.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/mem.h" +#include "libavutil/x86/asm.h" +#include "libavcodec/avcodec.h" +#include "libavcodec/dsputil.h" + +#undef free +#undef malloc + +//===========================================================================// +#define BLOCKSZ 12 + +static const short custom_threshold[64]= +// values (296) can't be too high +// -it causes too big quant dependence +// or maybe overflow(check), which results in some flashing +{ 71, 296, 295, 237, 71, 40, 38, 19, + 245, 193, 185, 121, 102, 73, 53, 27, + 158, 129, 141, 107, 97, 73, 50, 26, + 102, 116, 109, 98, 82, 66, 45, 23, + 71, 94, 95, 81, 70, 56, 38, 20, + 56, 77, 74, 66, 56, 44, 30, 15, + 38, 53, 50, 45, 38, 30, 21, 11, + 20, 27, 26, 23, 20, 15, 11, 5 +}; + +static const uint8_t __attribute__((aligned(32))) dither[8][8]={ + { 0, 48, 12, 60, 3, 51, 15, 63, }, + { 32, 16, 44, 28, 35, 19, 47, 31, }, + { 8, 56, 4, 52, 11, 59, 7, 55, }, + { 40, 24, 36, 20, 43, 27, 39, 23, }, + { 2, 50, 14, 62, 1, 49, 13, 61, }, + { 34, 18, 46, 30, 33, 17, 45, 29, }, + { 10, 58, 6, 54, 9, 57, 5, 53, }, + { 42, 26, 38, 22, 41, 25, 37, 21, }, +}; + +struct vf_priv_s { //align 16 ! + uint64_t threshold_mtx_noq[8*2]; + uint64_t threshold_mtx[8*2];//used in both C & MMX (& later SSE2) versions + + int log2_count; + int temp_stride; + int qp; + int mpeg2; + int prev_q; + uint8_t *src; + int16_t *temp; + int bframes; + char *non_b_qp; +}; + + +#if !HAVE_MMX + +//This func reads from 1 slice, 1 and clears 0 & 1 +static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale) +{int y, x; +#define STORE(pos) \ + temp= (src[x + pos] + (d[pos]>>log2_scale))>>(6-log2_scale); \ + src[x + pos]=src[x + pos - 8*src_stride]=0; \ + if(temp & 0x100) temp= ~(temp>>31); \ + dst[x + pos]= temp; + + for(y=0; y<height; y++){ + const uint8_t *d= dither[y]; + for(x=0; x<width; x+=8){ + int temp; + STORE(0); + STORE(1); + STORE(2); + STORE(3); + STORE(4); + STORE(5); + STORE(6); + STORE(7); + } + src+=src_stride; + dst+=dst_stride; + } +} + +//This func reads from 2 slices, 0 & 2 and clears 2-nd +static void store_slice2_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale) +{int y, x; +#define STORE2(pos) \ + temp= (src[x + pos] + src[x + pos + 16*src_stride] + (d[pos]>>log2_scale))>>(6-log2_scale); \ + src[x + pos + 16*src_stride]=0; \ + if(temp & 0x100) temp= ~(temp>>31); \ + dst[x + pos]= temp; + + for(y=0; y<height; y++){ + const uint8_t *d= dither[y]; + for(x=0; x<width; x+=8){ + int temp; + STORE2(0); + STORE2(1); + STORE2(2); + STORE2(3); + STORE2(4); + STORE2(5); + STORE2(6); + STORE2(7); + } + src+=src_stride; + dst+=dst_stride; + } +} + +static void mul_thrmat_c(struct vf_priv_s *p,int q) +{ + int a; + for(a=0;a<64;a++) + ((short*)p->threshold_mtx)[a]=q * ((short*)p->threshold_mtx_noq)[a];//ints faster in C +} + +static void column_fidct_c(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt); +static void row_idct_c(int16_t* workspace, + int16_t* output_adr, int output_stride, int cnt); +static void row_fdct_c(int16_t *data, const uint8_t *pixels, int line_size, int cnt); + +//this is rather ugly, but there is no need for function pointers +#define store_slice_s store_slice_c +#define store_slice2_s store_slice2_c +#define mul_thrmat_s mul_thrmat_c +#define column_fidct_s column_fidct_c +#define row_idct_s row_idct_c +#define row_fdct_s row_fdct_c + +#else /* HAVE_MMX */ + +//This func reads from 1 slice, 1 and clears 0 & 1 +static void store_slice_mmx(uint8_t *dst, int16_t *src, long dst_stride, long src_stride, long width, long height, long log2_scale) +{ + const uint8_t *od=&dither[0][0]; + const uint8_t *end=&dither[height][0]; + width = (width+7)&~7; + dst_stride-=width; + //src_stride=(src_stride-width)*2; + __asm__ volatile( + "mov %5, %%"REG_d" \n\t" + "mov %6, %%"REG_S" \n\t" + "mov %7, %%"REG_D" \n\t" + "mov %1, %%"REG_a" \n\t" + "movd %%"REG_d", %%mm5 \n\t" + "xor $-1, %%"REG_d" \n\t" + "mov %%"REG_a", %%"REG_c" \n\t" + "add $7, %%"REG_d" \n\t" + "neg %%"REG_a" \n\t" + "sub %0, %%"REG_c" \n\t" + "add %%"REG_c", %%"REG_c" \n\t" + "movd %%"REG_d", %%mm2 \n\t" + "mov %%"REG_c", %1 \n\t" + "mov %2, %%"REG_d" \n\t" + "shl $4, %%"REG_a" \n\t" + + "2: \n\t" + "movq (%%"REG_d"), %%mm3 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "punpckhbw %%mm7, %%mm4 \n\t" + "mov %0, %%"REG_c" \n\t" + "psraw %%mm5, %%mm3 \n\t" + "psraw %%mm5, %%mm4 \n\t" + "1: \n\t" + "movq %%mm7, (%%"REG_S",%%"REG_a",) \n\t" + "movq (%%"REG_S"), %%mm0 \n\t" + "movq 8(%%"REG_S"), %%mm1 \n\t" + + "movq %%mm7, 8(%%"REG_S",%%"REG_a",) \n\t" + "paddw %%mm3, %%mm0 \n\t" + "paddw %%mm4, %%mm1 \n\t" + + "movq %%mm7, (%%"REG_S") \n\t" + "psraw %%mm2, %%mm0 \n\t" + "psraw %%mm2, %%mm1 \n\t" + + "movq %%mm7, 8(%%"REG_S") \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "add $16, %%"REG_S" \n\t" + + "movq %%mm0, (%%"REG_D") \n\t" + "add $8, %%"REG_D" \n\t" + "sub $8, %%"REG_c" \n\t" + "jg 1b \n\t" + "add %1, %%"REG_S" \n\t" + "add $8, %%"REG_d" \n\t" + "add %3, %%"REG_D" \n\t" + "cmp %4, %%"REG_d" \n\t" + "jl 2b \n\t" + + : + : "m" (width), "m" (src_stride), "erm" (od), "m" (dst_stride), "erm" (end), + "m" (log2_scale), "m" (src), "m" (dst) //input + : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D + ); +} + +//This func reads from 2 slices, 0 & 2 and clears 2-nd +static void store_slice2_mmx(uint8_t *dst, int16_t *src, long dst_stride, long src_stride, long width, long height, long log2_scale) +{ + const uint8_t *od=&dither[0][0]; + const uint8_t *end=&dither[height][0]; + width = (width+7)&~7; + dst_stride-=width; + //src_stride=(src_stride-width)*2; + __asm__ volatile( + "mov %5, %%"REG_d" \n\t" + "mov %6, %%"REG_S" \n\t" + "mov %7, %%"REG_D" \n\t" + "mov %1, %%"REG_a" \n\t" + "movd %%"REG_d", %%mm5 \n\t" + "xor $-1, %%"REG_d" \n\t" + "mov %%"REG_a", %%"REG_c" \n\t" + "add $7, %%"REG_d" \n\t" + "sub %0, %%"REG_c" \n\t" + "add %%"REG_c", %%"REG_c" \n\t" + "movd %%"REG_d", %%mm2 \n\t" + "mov %%"REG_c", %1 \n\t" + "mov %2, %%"REG_d" \n\t" + "shl $5, %%"REG_a" \n\t" + + "2: \n\t" + "movq (%%"REG_d"), %%mm3 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "punpckhbw %%mm7, %%mm4 \n\t" + "mov %0, %%"REG_c" \n\t" + "psraw %%mm5, %%mm3 \n\t" + "psraw %%mm5, %%mm4 \n\t" + "1: \n\t" + "movq (%%"REG_S"), %%mm0 \n\t" + "movq 8(%%"REG_S"), %%mm1 \n\t" + "paddw %%mm3, %%mm0 \n\t" + + "paddw (%%"REG_S",%%"REG_a",), %%mm0 \n\t" + "paddw %%mm4, %%mm1 \n\t" + "movq 8(%%"REG_S",%%"REG_a",), %%mm6 \n\t" + + "movq %%mm7, (%%"REG_S",%%"REG_a",) \n\t" + "psraw %%mm2, %%mm0 \n\t" + "paddw %%mm6, %%mm1 \n\t" + + "movq %%mm7, 8(%%"REG_S",%%"REG_a",) \n\t" + "psraw %%mm2, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + + "movq %%mm0, (%%"REG_D") \n\t" + "add $16, %%"REG_S" \n\t" + "add $8, %%"REG_D" \n\t" + "sub $8, %%"REG_c" \n\t" + "jg 1b \n\t" + "add %1, %%"REG_S" \n\t" + "add $8, %%"REG_d" \n\t" + "add %3, %%"REG_D" \n\t" + "cmp %4, %%"REG_d" \n\t" + "jl 2b \n\t" + + : + : "m" (width), "m" (src_stride), "erm" (od), "m" (dst_stride), "erm" (end), + "m" (log2_scale), "m" (src), "m" (dst) //input + : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_D, "%"REG_S + ); +} + +static void mul_thrmat_mmx(struct vf_priv_s *p, int q) +{ + uint64_t *adr=&p->threshold_mtx_noq[0]; + __asm__ volatile( + "movd %0, %%mm7 \n\t" + "add $8*8*2, %%"REG_D" \n\t" + "movq 0*8(%%"REG_S"), %%mm0 \n\t" + "punpcklwd %%mm7, %%mm7 \n\t" + "movq 1*8(%%"REG_S"), %%mm1 \n\t" + "punpckldq %%mm7, %%mm7 \n\t" + "pmullw %%mm7, %%mm0 \n\t" + + "movq 2*8(%%"REG_S"), %%mm2 \n\t" + "pmullw %%mm7, %%mm1 \n\t" + + "movq 3*8(%%"REG_S"), %%mm3 \n\t" + "pmullw %%mm7, %%mm2 \n\t" + + "movq %%mm0, 0*8(%%"REG_D") \n\t" + "movq 4*8(%%"REG_S"), %%mm4 \n\t" + "pmullw %%mm7, %%mm3 \n\t" + + "movq %%mm1, 1*8(%%"REG_D") \n\t" + "movq 5*8(%%"REG_S"), %%mm5 \n\t" + "pmullw %%mm7, %%mm4 \n\t" + + "movq %%mm2, 2*8(%%"REG_D") \n\t" + "movq 6*8(%%"REG_S"), %%mm6 \n\t" + "pmullw %%mm7, %%mm5 \n\t" + + "movq %%mm3, 3*8(%%"REG_D") \n\t" + "movq 7*8+0*8(%%"REG_S"), %%mm0 \n\t" + "pmullw %%mm7, %%mm6 \n\t" + + "movq %%mm4, 4*8(%%"REG_D") \n\t" + "movq 7*8+1*8(%%"REG_S"), %%mm1 \n\t" + "pmullw %%mm7, %%mm0 \n\t" + + "movq %%mm5, 5*8(%%"REG_D") \n\t" + "movq 7*8+2*8(%%"REG_S"), %%mm2 \n\t" + "pmullw %%mm7, %%mm1 \n\t" + + "movq %%mm6, 6*8(%%"REG_D") \n\t" + "movq 7*8+3*8(%%"REG_S"), %%mm3 \n\t" + "pmullw %%mm7, %%mm2 \n\t" + + "movq %%mm0, 7*8+0*8(%%"REG_D") \n\t" + "movq 7*8+4*8(%%"REG_S"), %%mm4 \n\t" + "pmullw %%mm7, %%mm3 \n\t" + + "movq %%mm1, 7*8+1*8(%%"REG_D") \n\t" + "movq 7*8+5*8(%%"REG_S"), %%mm5 \n\t" + "pmullw %%mm7, %%mm4 \n\t" + + "movq %%mm2, 7*8+2*8(%%"REG_D") \n\t" + "movq 7*8+6*8(%%"REG_S"), %%mm6 \n\t" + "pmullw %%mm7, %%mm5 \n\t" + + "movq %%mm3, 7*8+3*8(%%"REG_D") \n\t" + "movq 14*8+0*8(%%"REG_S"), %%mm0 \n\t" + "pmullw %%mm7, %%mm6 \n\t" + + "movq %%mm4, 7*8+4*8(%%"REG_D") \n\t" + "movq 14*8+1*8(%%"REG_S"), %%mm1 \n\t" + "pmullw %%mm7, %%mm0 \n\t" + + "movq %%mm5, 7*8+5*8(%%"REG_D") \n\t" + "pmullw %%mm7, %%mm1 \n\t" + + "movq %%mm6, 7*8+6*8(%%"REG_D") \n\t" + "movq %%mm0, 14*8+0*8(%%"REG_D") \n\t" + "movq %%mm1, 14*8+1*8(%%"REG_D") \n\t" + + : "+g" (q), "+S" (adr), "+D" (adr) + : + ); +} + +static void column_fidct_mmx(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt); +static void row_idct_mmx(int16_t* workspace, + int16_t* output_adr, int output_stride, int cnt); +static void row_fdct_mmx(int16_t *data, const uint8_t *pixels, int line_size, int cnt); + +#define store_slice_s store_slice_mmx +#define store_slice2_s store_slice2_mmx +#define mul_thrmat_s mul_thrmat_mmx +#define column_fidct_s column_fidct_mmx +#define row_idct_s row_idct_mmx +#define row_fdct_s row_fdct_mmx +#endif // HAVE_MMX + +static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, + int dst_stride, int src_stride, + int width, int height, + uint8_t *qp_store, int qp_stride, int is_luma) +{ + int x, x0, y, es, qy, t; + const int stride= is_luma ? p->temp_stride : (width+16);//((width+16+15)&(~15)) + const int step=6-p->log2_count; + const int qps= 3 + is_luma; + int32_t __attribute__((aligned(32))) block_align[4*8*BLOCKSZ+ 4*8*BLOCKSZ]; + int16_t *block= (int16_t *)block_align; + int16_t *block3=(int16_t *)(block_align+4*8*BLOCKSZ); + + memset(block3, 0, 4*8*BLOCKSZ); + + //p->src=src-src_stride*8-8;//! + if (!src || !dst) return; // HACK avoid crash for Y8 colourspace + for(y=0; y<height; y++){ + int index= 8 + 8*stride + y*stride; + fast_memcpy(p->src + index, src + y*src_stride, width);//this line can be avoided by using DR & user fr.buffers + for(x=0; x<8; x++){ + p->src[index - x - 1]= p->src[index + x ]; + p->src[index + width + x ]= p->src[index + width - x - 1]; + } + } + for(y=0; y<8; y++){ + fast_memcpy(p->src + ( 7-y)*stride, p->src + ( y+8)*stride, stride); + fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride); + } + //FIXME (try edge emu) + + for(y=8; y<24; y++) + memset(p->temp+ 8 +y*stride, 0,width*sizeof(int16_t)); + + for(y=step; y<height+8; y+=step){ //step= 1,2 + qy=y-4; + if (qy>height-1) qy=height-1; + if (qy<0) qy=0; + qy=(qy>>qps)*qp_stride; + row_fdct_s(block, p->src + y*stride +2-(y&1), stride, 2); + for(x0=0; x0<width+8-8*(BLOCKSZ-1); x0+=8*(BLOCKSZ-1)){ + row_fdct_s(block+8*8, p->src + y*stride+8+x0 +2-(y&1), stride, 2*(BLOCKSZ-1)); + if(p->qp) + column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block+0*8, block3+0*8, 8*(BLOCKSZ-1)); //yes, this is a HOTSPOT + else + for (x=0; x<8*(BLOCKSZ-1); x+=8) { + t=x+x0-2; //correct t=x+x0-2-(y&1), but its the same + if (t<0) t=0;//t always < width-2 + t=qp_store[qy+(t>>qps)]; + t=norm_qscale(t, p->mpeg2); + if (t!=p->prev_q) p->prev_q=t, mul_thrmat_s(p, t); + column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block+x*8, block3+x*8, 8); //yes, this is a HOTSPOT + } + row_idct_s(block3+0*8, p->temp + (y&15)*stride+x0+2-(y&1), stride, 2*(BLOCKSZ-1)); + memmove(block, block+(BLOCKSZ-1)*64, 8*8*sizeof(int16_t)); //cycling + memmove(block3, block3+(BLOCKSZ-1)*64, 6*8*sizeof(int16_t)); + } + // + es=width+8-x0; // 8, ... + if (es>8) + row_fdct_s(block+8*8, p->src + y*stride+8+x0 +2-(y&1), stride, (es-4)>>2); + column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block, block3, es&(~1)); + row_idct_s(block3+0*8, p->temp + (y&15)*stride+x0+2-(y&1), stride, es>>2); + {const int y1=y-8+step;//l5-7 l4-6 + if (!(y1&7) && y1) { + if (y1&8) store_slice_s(dst + (y1-8)*dst_stride, p->temp+ 8 +8*stride, + dst_stride, stride, width, 8, 5-p->log2_count); + else store_slice2_s(dst + (y1-8)*dst_stride, p->temp+ 8 +0*stride, + dst_stride, stride, width, 8, 5-p->log2_count); + } } + } + + if (y&7) { // == height & 7 + if (y&8) store_slice_s(dst + ((y-8)&~7)*dst_stride, p->temp+ 8 +8*stride, + dst_stride, stride, width, y&7, 5-p->log2_count); + else store_slice2_s(dst + ((y-8)&~7)*dst_stride, p->temp+ 8 +0*stride, + dst_stride, stride, width, y&7, 5-p->log2_count); + } +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + int h= (height+16+15)&(~15); + + vf->priv->temp_stride= (width+16+15)&(~15); + vf->priv->temp= (int16_t*)av_mallocz(vf->priv->temp_stride*3*8*sizeof(int16_t)); + //this can also be avoided, see above + vf->priv->src = (uint8_t*)av_malloc(vf->priv->temp_stride*h*sizeof(uint8_t)); + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi) +{ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags, mpi->width, mpi->height); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->width,mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + }else{ + dmpi=vf->dmpi; + } + + vf->priv->mpeg2= mpi->qscale_type; + if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){ + int w = mpi->qstride; + int h = (mpi->h + 15) >> 4; + if (!w) { + w = (mpi->w + 15) >> 4; + h = 1; + } + if(!vf->priv->non_b_qp) + vf->priv->non_b_qp= malloc(w*h); + fast_memcpy(vf->priv->non_b_qp, mpi->qscale, w*h); + } + if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){ + char *qp_tab= vf->priv->non_b_qp; + if(vf->priv->bframes || !qp_tab) + qp_tab= mpi->qscale; + + if(qp_tab || vf->priv->qp){ + filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], + mpi->w, mpi->h, qp_tab, mpi->qstride, 1); + filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], + mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0); + filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], + mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0); + }else{ + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]); + memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]); + } + } + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t"); +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t"); +#endif + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf) +{ + if(!vf->priv) return; + + av_free(vf->priv->temp); + vf->priv->temp= NULL; + av_free(vf->priv->src); + vf->priv->src= NULL; + //free(vf->priv->avctx); + //vf->priv->avctx= NULL; + free(vf->priv->non_b_qp); + vf->priv->non_b_qp= NULL; + + av_free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + switch(fmt){ + case IMGFMT_YVU9: + case IMGFMT_IF09: + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_CLPL: + case IMGFMT_Y800: + case IMGFMT_Y8: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf,fmt); + } + return 0; +} + +static int control(struct vf_instance *vf, int request, void* data) +{ + switch(request){ + case VFCTRL_QUERY_MAX_PP_LEVEL: + return 5; + case VFCTRL_SET_PP_LEVEL: + vf->priv->log2_count= *((unsigned int*)data); + if (vf->priv->log2_count < 4) vf->priv->log2_count=4; + return CONTROL_TRUE; + } + return ff_vf_next_control(vf,request,data); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + int i=0, bias; + int custom_threshold_m[64]; + int log2c=-1; + + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->control= control; + vf->priv=av_mallocz(sizeof(struct vf_priv_s));//assumes align 16 ! + + ff_init_avcodec(); + + //vf->priv->avctx= avcodec_alloc_context(); + //dsputil_init(&vf->priv->dsp, vf->priv->avctx); + + vf->priv->log2_count= 4; + vf->priv->bframes = 0; + + if (args) sscanf(args, "%d:%d:%d:%d", &log2c, &vf->priv->qp, &i, &vf->priv->bframes); + + if( log2c >=4 && log2c <=5 ) + vf->priv->log2_count = log2c; + else if( log2c >= 6 ) + vf->priv->log2_count = 5; + + if(vf->priv->qp < 0) + vf->priv->qp = 0; + + if (i < -15) i = -15; + if (i > 32) i = 32; + + bias= (1<<4)+i; //regulable + vf->priv->prev_q=0; + // + for(i=0;i<64;i++) //FIXME: tune custom_threshold[] and remove this ! + custom_threshold_m[i]=(int)(custom_threshold[i]*(bias/71.)+ 0.5); + for(i=0;i<8;i++){ + vf->priv->threshold_mtx_noq[2*i]=(uint64_t)custom_threshold_m[i*8+2] + |(((uint64_t)custom_threshold_m[i*8+6])<<16) + |(((uint64_t)custom_threshold_m[i*8+0])<<32) + |(((uint64_t)custom_threshold_m[i*8+4])<<48); + vf->priv->threshold_mtx_noq[2*i+1]=(uint64_t)custom_threshold_m[i*8+5] + |(((uint64_t)custom_threshold_m[i*8+3])<<16) + |(((uint64_t)custom_threshold_m[i*8+1])<<32) + |(((uint64_t)custom_threshold_m[i*8+7])<<48); + } + + if (vf->priv->qp) vf->priv->prev_q=vf->priv->qp, mul_thrmat_s(vf->priv, vf->priv->qp); + + return 1; +} + +const vf_info_t ff_vf_info_fspp = { + "fast simple postprocess", + "fspp", + "Michael Niedermayer, Nikolaj Poroshin", + "", + vf_open, + NULL +}; + +//==================================================================== +//Specific spp's dct, idct and threshold functions +//I'd prefer to have them in the separate file. + +//#define MANGLE(a) #a + +//typedef int16_t int16_t; //! only int16_t + +#define DCTSIZE 8 +#define DCTSIZE_S "8" + +#define FIX(x,s) ((int) ((x) * (1<<s) + 0.5)&0xffff) +#define C64(x) ((uint64_t)((x)|(x)<<16))<<32 | (uint64_t)(x) | (uint64_t)(x)<<16 +#define FIX64(x,s) C64(FIX(x,s)) + +#define MULTIPLY16H(x,k) (((x)*(k))>>16) +#define THRESHOLD(r,x,t) if(((unsigned)((x)+t))>t*2) r=(x);else r=0; +#define DESCALE(x,n) (((x) + (1 << ((n)-1))) >> n) + +#if HAVE_MMX + +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_382683433)=FIX64(0.382683433, 14); +DECLARE_ALIGNED(8, uint64_t, ff_MM_FIX_0_541196100)=FIX64(0.541196100, 14); +DECLARE_ALIGNED(8, uint64_t, ff_MM_FIX_0_707106781)=FIX64(0.707106781, 14); +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_306562965)=FIX64(1.306562965, 14); + +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_414213562_A)=FIX64(1.414213562, 14); + +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_847759065)=FIX64(1.847759065, 13); +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_2_613125930)=FIX64(-2.613125930, 13); //- +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_414213562)=FIX64(1.414213562, 13); +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_082392200)=FIX64(1.082392200, 13); +//for t3,t5,t7 == 0 shortcut +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_847759065)=FIX64(0.847759065, 14); +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_566454497)=FIX64(0.566454497, 14); +DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_198912367)=FIX64(0.198912367, 14); + +DECLARE_ASM_CONST(8, uint64_t, MM_DESCALE_RND)=C64(4); +DECLARE_ASM_CONST(8, uint64_t, MM_2)=C64(2); + +#else /* !HAVE_MMX */ + +typedef int32_t int_simd16_t; +static const int16_t FIX_0_382683433=FIX(0.382683433, 14); +static const int16_t FIX_0_541196100=FIX(0.541196100, 14); +static const int16_t FIX_0_707106781=FIX(0.707106781, 14); +static const int16_t FIX_1_306562965=FIX(1.306562965, 14); +static const int16_t FIX_1_414213562_A=FIX(1.414213562, 14); +static const int16_t FIX_1_847759065=FIX(1.847759065, 13); +static const int16_t FIX_2_613125930=FIX(-2.613125930, 13); //- +static const int16_t FIX_1_414213562=FIX(1.414213562, 13); +static const int16_t FIX_1_082392200=FIX(1.082392200, 13); + +#endif + +#if !HAVE_MMX + +static void column_fidct_c(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt) +{ + int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_simd16_t tmp10, tmp11, tmp12, tmp13; + int_simd16_t z1,z2,z3,z4,z5, z10, z11, z12, z13; + int_simd16_t d0, d1, d2, d3, d4, d5, d6, d7; + + int16_t* dataptr; + int16_t* wsptr; + int16_t *threshold; + int ctr; + + dataptr = data; + wsptr = output; + + for (; cnt > 0; cnt-=2) { //start positions + threshold=(int16_t*)thr_adr;//threshold_mtx + for (ctr = DCTSIZE; ctr > 0; ctr--) { + // Process columns from input, add to output. + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; + tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; + + tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; + tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; + + tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; + + tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; + tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; + + // Even part of FDCT + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + + d0 = tmp10 + tmp11; + d4 = tmp10 - tmp11; + + z1 = MULTIPLY16H((tmp12 + tmp13) <<2, FIX_0_707106781); + d2 = tmp13 + z1; + d6 = tmp13 - z1; + + // Even part of IDCT + + THRESHOLD(tmp0, d0, threshold[0*8]); + THRESHOLD(tmp1, d2, threshold[2*8]); + THRESHOLD(tmp2, d4, threshold[4*8]); + THRESHOLD(tmp3, d6, threshold[6*8]); + tmp0+=2; + tmp10 = (tmp0 + tmp2)>>2; + tmp11 = (tmp0 - tmp2)>>2; + + tmp13 = (tmp1 + tmp3)>>2; //+2 ! (psnr decides) + tmp12 = MULTIPLY16H((tmp1 - tmp3), FIX_1_414213562_A) - tmp13; //<<2 + + tmp0 = tmp10 + tmp13; //->temps + tmp3 = tmp10 - tmp13; //->temps + tmp1 = tmp11 + tmp12; //->temps + tmp2 = tmp11 - tmp12; //->temps + + // Odd part of FDCT + + tmp10 = tmp4 + tmp5; + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + z5 = MULTIPLY16H((tmp10 - tmp12)<<2, FIX_0_382683433); + z2 = MULTIPLY16H(tmp10 <<2, FIX_0_541196100) + z5; + z4 = MULTIPLY16H(tmp12 <<2, FIX_1_306562965) + z5; + z3 = MULTIPLY16H(tmp11 <<2, FIX_0_707106781); + + z11 = tmp7 + z3; + z13 = tmp7 - z3; + + d5 = z13 + z2; + d3 = z13 - z2; + d1 = z11 + z4; + d7 = z11 - z4; + + // Odd part of IDCT + + THRESHOLD(tmp4, d1, threshold[1*8]); + THRESHOLD(tmp5, d3, threshold[3*8]); + THRESHOLD(tmp6, d5, threshold[5*8]); + THRESHOLD(tmp7, d7, threshold[7*8]); + + //Simd version uses here a shortcut for the tmp5,tmp6,tmp7 == 0 + z13 = tmp6 + tmp5; + z10 = (tmp6 - tmp5)<<1; + z11 = tmp4 + tmp7; + z12 = (tmp4 - tmp7)<<1; + + tmp7 = (z11 + z13)>>2; //+2 ! + tmp11 = MULTIPLY16H((z11 - z13)<<1, FIX_1_414213562); + z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065); + tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5; + tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - !! + + tmp6 = tmp12 - tmp7; + tmp5 = tmp11 - tmp6; + tmp4 = tmp10 + tmp5; + + wsptr[DCTSIZE*0]+= (tmp0 + tmp7); + wsptr[DCTSIZE*1]+= (tmp1 + tmp6); + wsptr[DCTSIZE*2]+= (tmp2 + tmp5); + wsptr[DCTSIZE*3]+= (tmp3 - tmp4); + wsptr[DCTSIZE*4]+= (tmp3 + tmp4); + wsptr[DCTSIZE*5]+= (tmp2 - tmp5); + wsptr[DCTSIZE*6]= (tmp1 - tmp6); + wsptr[DCTSIZE*7]= (tmp0 - tmp7); + // + dataptr++; //next column + wsptr++; + threshold++; + } + dataptr+=8; //skip each second start pos + wsptr +=8; + } +} + +#else /* HAVE_MMX */ + +static void column_fidct_mmx(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt) +{ + uint64_t __attribute__((aligned(8))) temps[4]; + __asm__ volatile( + ASMALIGN(4) + "1: \n\t" + "movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm1 \n\t" + // + "movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm7 \n\t" + "movq %%mm1, %%mm0 \n\t" + + "paddw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm1 \n\t" //t0 + "movq %%mm7, %%mm3 \n\t" + + "paddw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm7 \n\t" //t3 + "movq %%mm1, %%mm5 \n\t" + + "movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm6 \n\t" + "psubw %%mm7, %%mm1 \n\t" //t13 + + "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t" + "movq %%mm6, %%mm4 \n\t" + + "paddw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm6 \n\t" //t1 + "paddw %%mm7, %%mm5 \n\t" //t10 + + "paddw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t2 + "movq %%mm6, %%mm7 \n\t" + + "paddw %%mm2, %%mm6 \n\t" //t11 + "psubw %%mm2, %%mm7 \n\t" //t12 + + "movq %%mm5, %%mm2 \n\t" + "paddw %%mm6, %%mm5 \n\t" //d0 + // i0 t13 t12 i3 i1 d0 - d4 + "psubw %%mm6, %%mm2 \n\t" //d4 + "paddw %%mm1, %%mm7 \n\t" + + "movq 4*16(%%"REG_d"), %%mm6 \n\t" + "psllw $2, %%mm7 \n\t" + + "psubw 0*16(%%"REG_d"), %%mm5 \n\t" + "psubw %%mm6, %%mm2 \n\t" + + "paddusw 0*16(%%"REG_d"), %%mm5 \n\t" + "paddusw %%mm6, %%mm2 \n\t" + + "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm7 \n\t" + // + "paddw 0*16(%%"REG_d"), %%mm5 \n\t" + "paddw %%mm6, %%mm2 \n\t" + + "psubusw 0*16(%%"REG_d"), %%mm5 \n\t" + "psubusw %%mm6, %%mm2 \n\t" + +//This func is totally compute-bound, operates at huge speed. So, DC shortcut +// at this place isn't worthwhile due to BTB miss penalty (checked on Pent. 3). +//However, typical numbers: nondc - 29%%, dc - 46%%, zero - 25%%. All <> 0 case is very rare. + "paddw "MANGLE(MM_2)", %%mm5 \n\t" + "movq %%mm2, %%mm6 \n\t" + + "paddw %%mm5, %%mm2 \n\t" + "psubw %%mm6, %%mm5 \n\t" + + "movq %%mm1, %%mm6 \n\t" + "paddw %%mm7, %%mm1 \n\t" //d2 + + "psubw 2*16(%%"REG_d"), %%mm1 \n\t" + "psubw %%mm7, %%mm6 \n\t" //d6 + + "movq 6*16(%%"REG_d"), %%mm7 \n\t" + "psraw $2, %%mm5 \n\t" + + "paddusw 2*16(%%"REG_d"), %%mm1 \n\t" + "psubw %%mm7, %%mm6 \n\t" + // t7 d2 /t11 t4 t6 - d6 /t10 + + "paddw 2*16(%%"REG_d"), %%mm1 \n\t" + "paddusw %%mm7, %%mm6 \n\t" + + "psubusw 2*16(%%"REG_d"), %%mm1 \n\t" + "paddw %%mm7, %%mm6 \n\t" + + "psubw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm3 \n\t" + "psubusw %%mm7, %%mm6 \n\t" + + //movq [edi+"DCTSIZE_S"*2*2], mm1 + //movq [edi+"DCTSIZE_S"*6*2], mm6 + "movq %%mm1, %%mm7 \n\t" + "psraw $2, %%mm2 \n\t" + + "psubw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm4 \n\t" + "psubw %%mm6, %%mm1 \n\t" + + "psubw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm0 \n\t" + "paddw %%mm7, %%mm6 \n\t" //'t13 + + "psraw $2, %%mm6 \n\t" //paddw mm6, MM_2 !! --- + "movq %%mm2, %%mm7 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm1 \n\t" + "paddw %%mm6, %%mm2 \n\t" //'t0 + + "movq %%mm2, 0*8+%3 \n\t" //! + "psubw %%mm6, %%mm7 \n\t" //'t3 + + "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t" + "psubw %%mm6, %%mm1 \n\t" //'t12 + + "psubw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t5 + "movq %%mm5, %%mm6 \n\t" + + "movq %%mm7, 3*8+%3 \n\t" + "paddw %%mm2, %%mm3 \n\t" //t10 + + "paddw %%mm4, %%mm2 \n\t" //t11 + "paddw %%mm0, %%mm4 \n\t" //t12 + + "movq %%mm3, %%mm7 \n\t" + "psubw %%mm4, %%mm3 \n\t" + + "psllw $2, %%mm3 \n\t" + "psllw $2, %%mm7 \n\t" //opt for P6 + + "pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t" + "psllw $2, %%mm4 \n\t" + + "pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm7 \n\t" + "psllw $2, %%mm2 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm4 \n\t" + "paddw %%mm1, %%mm5 \n\t" //'t1 + + "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm2 \n\t" + "psubw %%mm1, %%mm6 \n\t" //'t2 + // t7 't12 't11 t4 t6 - 't13 't10 --- + + "paddw %%mm3, %%mm7 \n\t" //z2 + + "movq %%mm5, 1*8+%3 \n\t" + "paddw %%mm3, %%mm4 \n\t" //z4 + + "movq 3*16(%%"REG_d"), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + + "movq %%mm6, 2*8+%3 \n\t" + "psubw %%mm2, %%mm1 \n\t" //z13 + +//=== + "paddw %%mm2, %%mm0 \n\t" //z11 + "movq %%mm1, %%mm5 \n\t" + + "movq 5*16(%%"REG_d"), %%mm2 \n\t" + "psubw %%mm7, %%mm1 \n\t" //d3 + + "paddw %%mm7, %%mm5 \n\t" //d5 + "psubw %%mm3, %%mm1 \n\t" + + "movq 1*16(%%"REG_d"), %%mm7 \n\t" + "psubw %%mm2, %%mm5 \n\t" + + "movq %%mm0, %%mm6 \n\t" + "paddw %%mm4, %%mm0 \n\t" //d1 + + "paddusw %%mm3, %%mm1 \n\t" + "psubw %%mm4, %%mm6 \n\t" //d7 + + // d1 d3 - - - d5 d7 - + "movq 7*16(%%"REG_d"), %%mm4 \n\t" + "psubw %%mm7, %%mm0 \n\t" + + "psubw %%mm4, %%mm6 \n\t" + "paddusw %%mm2, %%mm5 \n\t" + + "paddusw %%mm4, %%mm6 \n\t" + "paddw %%mm3, %%mm1 \n\t" + + "paddw %%mm2, %%mm5 \n\t" + "paddw %%mm4, %%mm6 \n\t" + + "psubusw %%mm3, %%mm1 \n\t" + "psubusw %%mm2, %%mm5 \n\t" + + "psubusw %%mm4, %%mm6 \n\t" + "movq %%mm1, %%mm4 \n\t" + + "por %%mm5, %%mm4 \n\t" + "paddusw %%mm7, %%mm0 \n\t" + + "por %%mm6, %%mm4 \n\t" + "paddw %%mm7, %%mm0 \n\t" + + "packssdw %%mm4, %%mm4 \n\t" + "psubusw %%mm7, %%mm0 \n\t" + + "movd %%mm4, %%"REG_a" \n\t" + "or %%"REG_a", %%"REG_a" \n\t" + "jnz 2f \n\t" + //movq [edi+"DCTSIZE_S"*3*2], mm1 + //movq [edi+"DCTSIZE_S"*5*2], mm5 + //movq [edi+"DCTSIZE_S"*1*2], mm0 + //movq [edi+"DCTSIZE_S"*7*2], mm6 + // t4 t5 - - - t6 t7 - + //--- t4 (mm0) may be <>0; mm1, mm5, mm6 == 0 +//Typical numbers: nondc - 19%%, dc - 26%%, zero - 55%%. zero case alone isn't worthwhile + "movq 0*8+%3, %%mm4 \n\t" + "movq %%mm0, %%mm1 \n\t" + + "pmulhw "MANGLE(MM_FIX_0_847759065)", %%mm0 \n\t" //tmp6 + "movq %%mm1, %%mm2 \n\t" + + "movq "DCTSIZE_S"*0*2(%%"REG_D"), %%mm5 \n\t" + "movq %%mm2, %%mm3 \n\t" + + "pmulhw "MANGLE(MM_FIX_0_566454497)", %%mm1 \n\t" //tmp5 + "paddw %%mm4, %%mm5 \n\t" + + "movq 1*8+%3, %%mm6 \n\t" + //paddw mm3, MM_2 + "psraw $2, %%mm3 \n\t" //tmp7 + + "pmulhw "MANGLE(MM_FIX_0_198912367)", %%mm2 \n\t" //-tmp4 + "psubw %%mm3, %%mm4 \n\t" + + "movq "DCTSIZE_S"*1*2(%%"REG_D"), %%mm7 \n\t" + "paddw %%mm3, %%mm5 \n\t" + + "movq %%mm4, "DCTSIZE_S"*7*2(%%"REG_D") \n\t" + "paddw %%mm6, %%mm7 \n\t" + + "movq 2*8+%3, %%mm3 \n\t" + "psubw %%mm0, %%mm6 \n\t" + + "movq "DCTSIZE_S"*2*2(%%"REG_D"), %%mm4 \n\t" + "paddw %%mm0, %%mm7 \n\t" + + "movq %%mm5, "DCTSIZE_S"*0*2(%%"REG_D") \n\t" + "paddw %%mm3, %%mm4 \n\t" + + "movq %%mm6, "DCTSIZE_S"*6*2(%%"REG_D") \n\t" + "psubw %%mm1, %%mm3 \n\t" + + "movq "DCTSIZE_S"*5*2(%%"REG_D"), %%mm5 \n\t" + "paddw %%mm1, %%mm4 \n\t" + + "movq "DCTSIZE_S"*3*2(%%"REG_D"), %%mm6 \n\t" + "paddw %%mm3, %%mm5 \n\t" + + "movq 3*8+%3, %%mm0 \n\t" + "add $8, %%"REG_S" \n\t" + + "movq %%mm7, "DCTSIZE_S"*1*2(%%"REG_D") \n\t" + "paddw %%mm0, %%mm6 \n\t" + + "movq %%mm4, "DCTSIZE_S"*2*2(%%"REG_D") \n\t" + "psubw %%mm2, %%mm0 \n\t" + + "movq "DCTSIZE_S"*4*2(%%"REG_D"), %%mm7 \n\t" + "paddw %%mm2, %%mm6 \n\t" + + "movq %%mm5, "DCTSIZE_S"*5*2(%%"REG_D") \n\t" + "paddw %%mm0, %%mm7 \n\t" + + "movq %%mm6, "DCTSIZE_S"*3*2(%%"REG_D") \n\t" + + "movq %%mm7, "DCTSIZE_S"*4*2(%%"REG_D") \n\t" + "add $8, %%"REG_D" \n\t" + "jmp 4f \n\t" + + "2: \n\t" + //--- non DC2 + //psraw mm1, 2 w/o it -> offset. thr1, thr1, thr1 (actually thr1, thr1, thr1-1) + //psraw mm5, 2 + //psraw mm0, 2 + //psraw mm6, 2 + "movq %%mm5, %%mm3 \n\t" + "psubw %%mm1, %%mm5 \n\t" + + "psllw $1, %%mm5 \n\t" //'z10 + "paddw %%mm1, %%mm3 \n\t" //'z13 + + "movq %%mm0, %%mm2 \n\t" + "psubw %%mm6, %%mm0 \n\t" + + "movq %%mm5, %%mm1 \n\t" + "psllw $1, %%mm0 \n\t" //'z12 + + "pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm1 \n\t" //- + "paddw %%mm0, %%mm5 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm5 \n\t" //'z5 + "paddw %%mm6, %%mm2 \n\t" //'z11 + + "pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm0 \n\t" + "movq %%mm2, %%mm7 \n\t" + + //--- + "movq 0*8+%3, %%mm4 \n\t" + "psubw %%mm3, %%mm2 \n\t" + + "psllw $1, %%mm2 \n\t" + "paddw %%mm3, %%mm7 \n\t" //'t7 + + "pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //'t11 + "movq %%mm4, %%mm6 \n\t" + //paddw mm7, MM_2 + "psraw $2, %%mm7 \n\t" + + "paddw "DCTSIZE_S"*0*2(%%"REG_D"), %%mm4 \n\t" + "psubw %%mm7, %%mm6 \n\t" + + "movq 1*8+%3, %%mm3 \n\t" + "paddw %%mm7, %%mm4 \n\t" + + "movq %%mm6, "DCTSIZE_S"*7*2(%%"REG_D") \n\t" + "paddw %%mm5, %%mm1 \n\t" //'t12 + + "movq %%mm4, "DCTSIZE_S"*0*2(%%"REG_D") \n\t" + "psubw %%mm7, %%mm1 \n\t" //'t6 + + "movq 2*8+%3, %%mm7 \n\t" + "psubw %%mm5, %%mm0 \n\t" //'t10 + + "movq 3*8+%3, %%mm6 \n\t" + "movq %%mm3, %%mm5 \n\t" + + "paddw "DCTSIZE_S"*1*2(%%"REG_D"), %%mm3 \n\t" + "psubw %%mm1, %%mm5 \n\t" + + "psubw %%mm1, %%mm2 \n\t" //'t5 + "paddw %%mm1, %%mm3 \n\t" + + "movq %%mm5, "DCTSIZE_S"*6*2(%%"REG_D") \n\t" + "movq %%mm7, %%mm4 \n\t" + + "paddw "DCTSIZE_S"*2*2(%%"REG_D"), %%mm7 \n\t" + "psubw %%mm2, %%mm4 \n\t" + + "paddw "DCTSIZE_S"*5*2(%%"REG_D"), %%mm4 \n\t" + "paddw %%mm2, %%mm7 \n\t" + + "movq %%mm3, "DCTSIZE_S"*1*2(%%"REG_D") \n\t" + "paddw %%mm2, %%mm0 \n\t" //'t4 + + // 't4 't6 't5 - - - - 't7 + "movq %%mm7, "DCTSIZE_S"*2*2(%%"REG_D") \n\t" + "movq %%mm6, %%mm1 \n\t" + + "paddw "DCTSIZE_S"*4*2(%%"REG_D"), %%mm6 \n\t" + "psubw %%mm0, %%mm1 \n\t" + + "paddw "DCTSIZE_S"*3*2(%%"REG_D"), %%mm1 \n\t" + "paddw %%mm0, %%mm6 \n\t" + + "movq %%mm4, "DCTSIZE_S"*5*2(%%"REG_D") \n\t" + "add $8, %%"REG_S" \n\t" + + "movq %%mm6, "DCTSIZE_S"*4*2(%%"REG_D") \n\t" + + "movq %%mm1, "DCTSIZE_S"*3*2(%%"REG_D") \n\t" + "add $8, %%"REG_D" \n\t" + + "4: \n\t" +//=part 2 (the same)=========================================================== + "movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm1 \n\t" + // + "movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm7 \n\t" + "movq %%mm1, %%mm0 \n\t" + + "paddw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm1 \n\t" //t0 + "movq %%mm7, %%mm3 \n\t" + + "paddw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm7 \n\t" //t3 + "movq %%mm1, %%mm5 \n\t" + + "movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm6 \n\t" + "psubw %%mm7, %%mm1 \n\t" //t13 + + "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t" + "movq %%mm6, %%mm4 \n\t" + + "paddw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm6 \n\t" //t1 + "paddw %%mm7, %%mm5 \n\t" //t10 + + "paddw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t2 + "movq %%mm6, %%mm7 \n\t" + + "paddw %%mm2, %%mm6 \n\t" //t11 + "psubw %%mm2, %%mm7 \n\t" //t12 + + "movq %%mm5, %%mm2 \n\t" + "paddw %%mm6, %%mm5 \n\t" //d0 + // i0 t13 t12 i3 i1 d0 - d4 + "psubw %%mm6, %%mm2 \n\t" //d4 + "paddw %%mm1, %%mm7 \n\t" + + "movq 1*8+4*16(%%"REG_d"), %%mm6 \n\t" + "psllw $2, %%mm7 \n\t" + + "psubw 1*8+0*16(%%"REG_d"), %%mm5 \n\t" + "psubw %%mm6, %%mm2 \n\t" + + "paddusw 1*8+0*16(%%"REG_d"), %%mm5 \n\t" + "paddusw %%mm6, %%mm2 \n\t" + + "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm7 \n\t" + // + "paddw 1*8+0*16(%%"REG_d"), %%mm5 \n\t" + "paddw %%mm6, %%mm2 \n\t" + + "psubusw 1*8+0*16(%%"REG_d"), %%mm5 \n\t" + "psubusw %%mm6, %%mm2 \n\t" + +//This func is totally compute-bound, operates at huge speed. So, DC shortcut +// at this place isn't worthwhile due to BTB miss penalty (checked on Pent. 3). +//However, typical numbers: nondc - 29%%, dc - 46%%, zero - 25%%. All <> 0 case is very rare. + "paddw "MANGLE(MM_2)", %%mm5 \n\t" + "movq %%mm2, %%mm6 \n\t" + + "paddw %%mm5, %%mm2 \n\t" + "psubw %%mm6, %%mm5 \n\t" + + "movq %%mm1, %%mm6 \n\t" + "paddw %%mm7, %%mm1 \n\t" //d2 + + "psubw 1*8+2*16(%%"REG_d"), %%mm1 \n\t" + "psubw %%mm7, %%mm6 \n\t" //d6 + + "movq 1*8+6*16(%%"REG_d"), %%mm7 \n\t" + "psraw $2, %%mm5 \n\t" + + "paddusw 1*8+2*16(%%"REG_d"), %%mm1 \n\t" + "psubw %%mm7, %%mm6 \n\t" + // t7 d2 /t11 t4 t6 - d6 /t10 + + "paddw 1*8+2*16(%%"REG_d"), %%mm1 \n\t" + "paddusw %%mm7, %%mm6 \n\t" + + "psubusw 1*8+2*16(%%"REG_d"), %%mm1 \n\t" + "paddw %%mm7, %%mm6 \n\t" + + "psubw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm3 \n\t" + "psubusw %%mm7, %%mm6 \n\t" + + //movq [edi+"DCTSIZE_S"*2*2], mm1 + //movq [edi+"DCTSIZE_S"*6*2], mm6 + "movq %%mm1, %%mm7 \n\t" + "psraw $2, %%mm2 \n\t" + + "psubw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm4 \n\t" + "psubw %%mm6, %%mm1 \n\t" + + "psubw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm0 \n\t" + "paddw %%mm7, %%mm6 \n\t" //'t13 + + "psraw $2, %%mm6 \n\t" //paddw mm6, MM_2 !! --- + "movq %%mm2, %%mm7 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm1 \n\t" + "paddw %%mm6, %%mm2 \n\t" //'t0 + + "movq %%mm2, 0*8+%3 \n\t" //! + "psubw %%mm6, %%mm7 \n\t" //'t3 + + "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t" + "psubw %%mm6, %%mm1 \n\t" //'t12 + + "psubw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t5 + "movq %%mm5, %%mm6 \n\t" + + "movq %%mm7, 3*8+%3 \n\t" + "paddw %%mm2, %%mm3 \n\t" //t10 + + "paddw %%mm4, %%mm2 \n\t" //t11 + "paddw %%mm0, %%mm4 \n\t" //t12 + + "movq %%mm3, %%mm7 \n\t" + "psubw %%mm4, %%mm3 \n\t" + + "psllw $2, %%mm3 \n\t" + "psllw $2, %%mm7 \n\t" //opt for P6 + + "pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t" + "psllw $2, %%mm4 \n\t" + + "pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm7 \n\t" + "psllw $2, %%mm2 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm4 \n\t" + "paddw %%mm1, %%mm5 \n\t" //'t1 + + "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm2 \n\t" + "psubw %%mm1, %%mm6 \n\t" //'t2 + // t7 't12 't11 t4 t6 - 't13 't10 --- + + "paddw %%mm3, %%mm7 \n\t" //z2 + + "movq %%mm5, 1*8+%3 \n\t" + "paddw %%mm3, %%mm4 \n\t" //z4 + + "movq 1*8+3*16(%%"REG_d"), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + + "movq %%mm6, 2*8+%3 \n\t" + "psubw %%mm2, %%mm1 \n\t" //z13 + +//=== + "paddw %%mm2, %%mm0 \n\t" //z11 + "movq %%mm1, %%mm5 \n\t" + + "movq 1*8+5*16(%%"REG_d"), %%mm2 \n\t" + "psubw %%mm7, %%mm1 \n\t" //d3 + + "paddw %%mm7, %%mm5 \n\t" //d5 + "psubw %%mm3, %%mm1 \n\t" + + "movq 1*8+1*16(%%"REG_d"), %%mm7 \n\t" + "psubw %%mm2, %%mm5 \n\t" + + "movq %%mm0, %%mm6 \n\t" + "paddw %%mm4, %%mm0 \n\t" //d1 + + "paddusw %%mm3, %%mm1 \n\t" + "psubw %%mm4, %%mm6 \n\t" //d7 + + // d1 d3 - - - d5 d7 - + "movq 1*8+7*16(%%"REG_d"), %%mm4 \n\t" + "psubw %%mm7, %%mm0 \n\t" + + "psubw %%mm4, %%mm6 \n\t" + "paddusw %%mm2, %%mm5 \n\t" + + "paddusw %%mm4, %%mm6 \n\t" + "paddw %%mm3, %%mm1 \n\t" + + "paddw %%mm2, %%mm5 \n\t" + "paddw %%mm4, %%mm6 \n\t" + + "psubusw %%mm3, %%mm1 \n\t" + "psubusw %%mm2, %%mm5 \n\t" + + "psubusw %%mm4, %%mm6 \n\t" + "movq %%mm1, %%mm4 \n\t" + + "por %%mm5, %%mm4 \n\t" + "paddusw %%mm7, %%mm0 \n\t" + + "por %%mm6, %%mm4 \n\t" + "paddw %%mm7, %%mm0 \n\t" + + "packssdw %%mm4, %%mm4 \n\t" + "psubusw %%mm7, %%mm0 \n\t" + + "movd %%mm4, %%"REG_a" \n\t" + "or %%"REG_a", %%"REG_a" \n\t" + "jnz 3f \n\t" + //movq [edi+"DCTSIZE_S"*3*2], mm1 + //movq [edi+"DCTSIZE_S"*5*2], mm5 + //movq [edi+"DCTSIZE_S"*1*2], mm0 + //movq [edi+"DCTSIZE_S"*7*2], mm6 + // t4 t5 - - - t6 t7 - + //--- t4 (mm0) may be <>0; mm1, mm5, mm6 == 0 +//Typical numbers: nondc - 19%%, dc - 26%%, zero - 55%%. zero case alone isn't worthwhile + "movq 0*8+%3, %%mm4 \n\t" + "movq %%mm0, %%mm1 \n\t" + + "pmulhw "MANGLE(MM_FIX_0_847759065)", %%mm0 \n\t" //tmp6 + "movq %%mm1, %%mm2 \n\t" + + "movq "DCTSIZE_S"*0*2(%%"REG_D"), %%mm5 \n\t" + "movq %%mm2, %%mm3 \n\t" + + "pmulhw "MANGLE(MM_FIX_0_566454497)", %%mm1 \n\t" //tmp5 + "paddw %%mm4, %%mm5 \n\t" + + "movq 1*8+%3, %%mm6 \n\t" + //paddw mm3, MM_2 + "psraw $2, %%mm3 \n\t" //tmp7 + + "pmulhw "MANGLE(MM_FIX_0_198912367)", %%mm2 \n\t" //-tmp4 + "psubw %%mm3, %%mm4 \n\t" + + "movq "DCTSIZE_S"*1*2(%%"REG_D"), %%mm7 \n\t" + "paddw %%mm3, %%mm5 \n\t" + + "movq %%mm4, "DCTSIZE_S"*7*2(%%"REG_D") \n\t" + "paddw %%mm6, %%mm7 \n\t" + + "movq 2*8+%3, %%mm3 \n\t" + "psubw %%mm0, %%mm6 \n\t" + + "movq "DCTSIZE_S"*2*2(%%"REG_D"), %%mm4 \n\t" + "paddw %%mm0, %%mm7 \n\t" + + "movq %%mm5, "DCTSIZE_S"*0*2(%%"REG_D") \n\t" + "paddw %%mm3, %%mm4 \n\t" + + "movq %%mm6, "DCTSIZE_S"*6*2(%%"REG_D") \n\t" + "psubw %%mm1, %%mm3 \n\t" + + "movq "DCTSIZE_S"*5*2(%%"REG_D"), %%mm5 \n\t" + "paddw %%mm1, %%mm4 \n\t" + + "movq "DCTSIZE_S"*3*2(%%"REG_D"), %%mm6 \n\t" + "paddw %%mm3, %%mm5 \n\t" + + "movq 3*8+%3, %%mm0 \n\t" + "add $24, %%"REG_S" \n\t" + + "movq %%mm7, "DCTSIZE_S"*1*2(%%"REG_D") \n\t" + "paddw %%mm0, %%mm6 \n\t" + + "movq %%mm4, "DCTSIZE_S"*2*2(%%"REG_D") \n\t" + "psubw %%mm2, %%mm0 \n\t" + + "movq "DCTSIZE_S"*4*2(%%"REG_D"), %%mm7 \n\t" + "paddw %%mm2, %%mm6 \n\t" + + "movq %%mm5, "DCTSIZE_S"*5*2(%%"REG_D") \n\t" + "paddw %%mm0, %%mm7 \n\t" + + "movq %%mm6, "DCTSIZE_S"*3*2(%%"REG_D") \n\t" + + "movq %%mm7, "DCTSIZE_S"*4*2(%%"REG_D") \n\t" + "add $24, %%"REG_D" \n\t" + "sub $2, %%"REG_c" \n\t" + "jnz 1b \n\t" + "jmp 5f \n\t" + + "3: \n\t" + //--- non DC2 + //psraw mm1, 2 w/o it -> offset. thr1, thr1, thr1 (actually thr1, thr1, thr1-1) + //psraw mm5, 2 + //psraw mm0, 2 + //psraw mm6, 2 + "movq %%mm5, %%mm3 \n\t" + "psubw %%mm1, %%mm5 \n\t" + + "psllw $1, %%mm5 \n\t" //'z10 + "paddw %%mm1, %%mm3 \n\t" //'z13 + + "movq %%mm0, %%mm2 \n\t" + "psubw %%mm6, %%mm0 \n\t" + + "movq %%mm5, %%mm1 \n\t" + "psllw $1, %%mm0 \n\t" //'z12 + + "pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm1 \n\t" //- + "paddw %%mm0, %%mm5 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm5 \n\t" //'z5 + "paddw %%mm6, %%mm2 \n\t" //'z11 + + "pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm0 \n\t" + "movq %%mm2, %%mm7 \n\t" + + //--- + "movq 0*8+%3, %%mm4 \n\t" + "psubw %%mm3, %%mm2 \n\t" + + "psllw $1, %%mm2 \n\t" + "paddw %%mm3, %%mm7 \n\t" //'t7 + + "pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //'t11 + "movq %%mm4, %%mm6 \n\t" + //paddw mm7, MM_2 + "psraw $2, %%mm7 \n\t" + + "paddw "DCTSIZE_S"*0*2(%%"REG_D"), %%mm4 \n\t" + "psubw %%mm7, %%mm6 \n\t" + + "movq 1*8+%3, %%mm3 \n\t" + "paddw %%mm7, %%mm4 \n\t" + + "movq %%mm6, "DCTSIZE_S"*7*2(%%"REG_D") \n\t" + "paddw %%mm5, %%mm1 \n\t" //'t12 + + "movq %%mm4, "DCTSIZE_S"*0*2(%%"REG_D") \n\t" + "psubw %%mm7, %%mm1 \n\t" //'t6 + + "movq 2*8+%3, %%mm7 \n\t" + "psubw %%mm5, %%mm0 \n\t" //'t10 + + "movq 3*8+%3, %%mm6 \n\t" + "movq %%mm3, %%mm5 \n\t" + + "paddw "DCTSIZE_S"*1*2(%%"REG_D"), %%mm3 \n\t" + "psubw %%mm1, %%mm5 \n\t" + + "psubw %%mm1, %%mm2 \n\t" //'t5 + "paddw %%mm1, %%mm3 \n\t" + + "movq %%mm5, "DCTSIZE_S"*6*2(%%"REG_D") \n\t" + "movq %%mm7, %%mm4 \n\t" + + "paddw "DCTSIZE_S"*2*2(%%"REG_D"), %%mm7 \n\t" + "psubw %%mm2, %%mm4 \n\t" + + "paddw "DCTSIZE_S"*5*2(%%"REG_D"), %%mm4 \n\t" + "paddw %%mm2, %%mm7 \n\t" + + "movq %%mm3, "DCTSIZE_S"*1*2(%%"REG_D") \n\t" + "paddw %%mm2, %%mm0 \n\t" //'t4 + + // 't4 't6 't5 - - - - 't7 + "movq %%mm7, "DCTSIZE_S"*2*2(%%"REG_D") \n\t" + "movq %%mm6, %%mm1 \n\t" + + "paddw "DCTSIZE_S"*4*2(%%"REG_D"), %%mm6 \n\t" + "psubw %%mm0, %%mm1 \n\t" + + "paddw "DCTSIZE_S"*3*2(%%"REG_D"), %%mm1 \n\t" + "paddw %%mm0, %%mm6 \n\t" + + "movq %%mm4, "DCTSIZE_S"*5*2(%%"REG_D") \n\t" + "add $24, %%"REG_S" \n\t" + + "movq %%mm6, "DCTSIZE_S"*4*2(%%"REG_D") \n\t" + + "movq %%mm1, "DCTSIZE_S"*3*2(%%"REG_D") \n\t" + "add $24, %%"REG_D" \n\t" + "sub $2, %%"REG_c" \n\t" + "jnz 1b \n\t" + "5: \n\t" + + : "+S"(data), "+D"(output), "+c"(cnt), "=o"(temps) + : "d"(thr_adr) + : "%"REG_a + ); +} + +#endif // HAVE_MMX + +#if !HAVE_MMX + +static void row_idct_c(int16_t* workspace, + int16_t* output_adr, int output_stride, int cnt) +{ + int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_simd16_t tmp10, tmp11, tmp12, tmp13; + int_simd16_t z5, z10, z11, z12, z13; + int16_t* outptr; + int16_t* wsptr; + + cnt*=4; + wsptr = workspace; + outptr = output_adr; + for (; cnt > 0; cnt--) { + // Even part + //Simd version reads 4x4 block and transposes it + tmp10 = ( wsptr[2] + wsptr[3]); + tmp11 = ( wsptr[2] - wsptr[3]); + + tmp13 = ( wsptr[0] + wsptr[1]); + tmp12 = (MULTIPLY16H( wsptr[0] - wsptr[1], FIX_1_414213562_A)<<2) - tmp13;//this shift order to avoid overflow + + tmp0 = tmp10 + tmp13; //->temps + tmp3 = tmp10 - tmp13; //->temps + tmp1 = tmp11 + tmp12; + tmp2 = tmp11 - tmp12; + + // Odd part + //Also transpose, with previous: + // ---- ---- |||| + // ---- ---- idct |||| + // ---- ---- ---> |||| + // ---- ---- |||| + z13 = wsptr[4] + wsptr[5]; + z10 = wsptr[4] - wsptr[5]; + z11 = wsptr[6] + wsptr[7]; + z12 = wsptr[6] - wsptr[7]; + + tmp7 = z11 + z13; + tmp11 = MULTIPLY16H(z11 - z13, FIX_1_414213562); + + z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065); + tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5; + tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - FIX_ + + tmp6 = (tmp12<<3) - tmp7; + tmp5 = (tmp11<<3) - tmp6; + tmp4 = (tmp10<<3) + tmp5; + + // Final output stage: descale and write column + outptr[0*output_stride]+= DESCALE(tmp0 + tmp7, 3); + outptr[1*output_stride]+= DESCALE(tmp1 + tmp6, 3); + outptr[2*output_stride]+= DESCALE(tmp2 + tmp5, 3); + outptr[3*output_stride]+= DESCALE(tmp3 - tmp4, 3); + outptr[4*output_stride]+= DESCALE(tmp3 + tmp4, 3); + outptr[5*output_stride]+= DESCALE(tmp2 - tmp5, 3); + outptr[6*output_stride]+= DESCALE(tmp1 - tmp6, 3); //no += ? + outptr[7*output_stride]+= DESCALE(tmp0 - tmp7, 3); //no += ? + outptr++; + + wsptr += DCTSIZE; // advance pointer to next row + } +} + +#else /* HAVE_MMX */ + +static void row_idct_mmx (int16_t* workspace, + int16_t* output_adr, int output_stride, int cnt) +{ + uint64_t __attribute__((aligned(8))) temps[4]; + __asm__ volatile( + "lea (%%"REG_a",%%"REG_a",2), %%"REG_d" \n\t" + "1: \n\t" + "movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm0 \n\t" + // + + "movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm1 \n\t" + "movq %%mm0, %%mm4 \n\t" + + "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t" + "punpcklwd %%mm1, %%mm0 \n\t" + + "movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm3 \n\t" + "punpckhwd %%mm1, %%mm4 \n\t" + + //transpose 4x4 + "movq %%mm2, %%mm7 \n\t" + "punpcklwd %%mm3, %%mm2 \n\t" + + "movq %%mm0, %%mm6 \n\t" + "punpckldq %%mm2, %%mm0 \n\t" //0 + + "punpckhdq %%mm2, %%mm6 \n\t" //1 + "movq %%mm0, %%mm5 \n\t" + + "punpckhwd %%mm3, %%mm7 \n\t" + "psubw %%mm6, %%mm0 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm0 \n\t" + "movq %%mm4, %%mm2 \n\t" + + "punpckldq %%mm7, %%mm4 \n\t" //2 + "paddw %%mm6, %%mm5 \n\t" + + "punpckhdq %%mm7, %%mm2 \n\t" //3 + "movq %%mm4, %%mm1 \n\t" + + "psllw $2, %%mm0 \n\t" + "paddw %%mm2, %%mm4 \n\t" //t10 + + "movq "DCTSIZE_S"*0*2+"DCTSIZE_S"(%%"REG_S"), %%mm3 \n\t" + "psubw %%mm2, %%mm1 \n\t" //t11 + + "movq "DCTSIZE_S"*1*2+"DCTSIZE_S"(%%"REG_S"), %%mm2 \n\t" + "psubw %%mm5, %%mm0 \n\t" + + "movq %%mm4, %%mm6 \n\t" + "paddw %%mm5, %%mm4 \n\t" //t0 + + "psubw %%mm5, %%mm6 \n\t" //t3 + "movq %%mm1, %%mm7 \n\t" + + "movq "DCTSIZE_S"*2*2+"DCTSIZE_S"(%%"REG_S"), %%mm5 \n\t" + "paddw %%mm0, %%mm1 \n\t" //t1 + + "movq %%mm4, 0*8+%3 \n\t" //t0 + "movq %%mm3, %%mm4 \n\t" + + "movq %%mm6, 1*8+%3 \n\t" //t3 + "punpcklwd %%mm2, %%mm3 \n\t" + + //transpose 4x4 + "movq "DCTSIZE_S"*3*2+"DCTSIZE_S"(%%"REG_S"), %%mm6 \n\t" + "punpckhwd %%mm2, %%mm4 \n\t" + + "movq %%mm5, %%mm2 \n\t" + "punpcklwd %%mm6, %%mm5 \n\t" + + "psubw %%mm0, %%mm7 \n\t" //t2 + "punpckhwd %%mm6, %%mm2 \n\t" + + "movq %%mm3, %%mm0 \n\t" + "punpckldq %%mm5, %%mm3 \n\t" //4 + + "punpckhdq %%mm5, %%mm0 \n\t" //5 + "movq %%mm4, %%mm5 \n\t" + + // + "movq %%mm3, %%mm6 \n\t" + "punpckldq %%mm2, %%mm4 \n\t" //6 + + "psubw %%mm0, %%mm3 \n\t" //z10 + "punpckhdq %%mm2, %%mm5 \n\t" //7 + + "paddw %%mm0, %%mm6 \n\t" //z13 + "movq %%mm4, %%mm2 \n\t" + + "movq %%mm3, %%mm0 \n\t" + "psubw %%mm5, %%mm4 \n\t" //z12 + + "pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm0 \n\t" //- + "paddw %%mm4, %%mm3 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm3 \n\t" //z5 + "paddw %%mm5, %%mm2 \n\t" //z11 > + + "pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + + "psubw %%mm6, %%mm2 \n\t" + "paddw %%mm6, %%mm5 \n\t" //t7 + + "pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //t11 + "paddw %%mm3, %%mm0 \n\t" //t12 + + "psllw $3, %%mm0 \n\t" + "psubw %%mm3, %%mm4 \n\t" //t10 + + "movq 0*8+%3, %%mm6 \n\t" + "movq %%mm1, %%mm3 \n\t" + + "psllw $3, %%mm4 \n\t" + "psubw %%mm5, %%mm0 \n\t" //t6 + + "psllw $3, %%mm2 \n\t" + "paddw %%mm0, %%mm1 \n\t" //d1 + + "psubw %%mm0, %%mm2 \n\t" //t5 + "psubw %%mm0, %%mm3 \n\t" //d6 + + "paddw %%mm2, %%mm4 \n\t" //t4 + "movq %%mm7, %%mm0 \n\t" + + "paddw %%mm2, %%mm7 \n\t" //d2 + "psubw %%mm2, %%mm0 \n\t" //d5 + + "movq "MANGLE(MM_DESCALE_RND)", %%mm2 \n\t" //4 + "psubw %%mm5, %%mm6 \n\t" //d7 + + "paddw 0*8+%3, %%mm5 \n\t" //d0 + "paddw %%mm2, %%mm1 \n\t" + + "paddw %%mm2, %%mm5 \n\t" + "psraw $3, %%mm1 \n\t" + + "paddw %%mm2, %%mm7 \n\t" + "psraw $3, %%mm5 \n\t" + + "paddw (%%"REG_D"), %%mm5 \n\t" + "psraw $3, %%mm7 \n\t" + + "paddw (%%"REG_D",%%"REG_a",), %%mm1 \n\t" + "paddw %%mm2, %%mm0 \n\t" + + "paddw (%%"REG_D",%%"REG_a",2), %%mm7 \n\t" + "paddw %%mm2, %%mm3 \n\t" + + "movq %%mm5, (%%"REG_D") \n\t" + "paddw %%mm2, %%mm6 \n\t" + + "movq %%mm1, (%%"REG_D",%%"REG_a",) \n\t" + "psraw $3, %%mm0 \n\t" + + "movq %%mm7, (%%"REG_D",%%"REG_a",2) \n\t" + "add %%"REG_d", %%"REG_D" \n\t" //3*ls + + "movq 1*8+%3, %%mm5 \n\t" //t3 + "psraw $3, %%mm3 \n\t" + + "paddw (%%"REG_D",%%"REG_a",2), %%mm0 \n\t" + "psubw %%mm4, %%mm5 \n\t" //d3 + + "paddw (%%"REG_D",%%"REG_d",), %%mm3 \n\t" + "psraw $3, %%mm6 \n\t" + + "paddw 1*8+%3, %%mm4 \n\t" //d4 + "paddw %%mm2, %%mm5 \n\t" + + "paddw (%%"REG_D",%%"REG_a",4), %%mm6 \n\t" + "paddw %%mm2, %%mm4 \n\t" + + "movq %%mm0, (%%"REG_D",%%"REG_a",2) \n\t" + "psraw $3, %%mm5 \n\t" + + "paddw (%%"REG_D"), %%mm5 \n\t" + "psraw $3, %%mm4 \n\t" + + "paddw (%%"REG_D",%%"REG_a",), %%mm4 \n\t" + "add $"DCTSIZE_S"*2*4, %%"REG_S" \n\t" //4 rows + + "movq %%mm3, (%%"REG_D",%%"REG_d",) \n\t" + "movq %%mm6, (%%"REG_D",%%"REG_a",4) \n\t" + "movq %%mm5, (%%"REG_D") \n\t" + "movq %%mm4, (%%"REG_D",%%"REG_a",) \n\t" + + "sub %%"REG_d", %%"REG_D" \n\t" + "add $8, %%"REG_D" \n\t" + "dec %%"REG_c" \n\t" + "jnz 1b \n\t" + + : "+S"(workspace), "+D"(output_adr), "+c"(cnt), "=o"(temps) + : "a"(output_stride*sizeof(short)) + : "%"REG_d + ); +} + +#endif // HAVE_MMX + +#if !HAVE_MMX + +static void row_fdct_c(int16_t *data, const uint8_t *pixels, int line_size, int cnt) +{ + int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_simd16_t tmp10, tmp11, tmp12, tmp13; + int_simd16_t z1, z2, z3, z4, z5, z11, z13; + int16_t *dataptr; + + cnt*=4; + // Pass 1: process rows. + + dataptr = data; + for (; cnt > 0; cnt--) { + tmp0 = pixels[line_size*0] + pixels[line_size*7]; + tmp7 = pixels[line_size*0] - pixels[line_size*7]; + tmp1 = pixels[line_size*1] + pixels[line_size*6]; + tmp6 = pixels[line_size*1] - pixels[line_size*6]; + tmp2 = pixels[line_size*2] + pixels[line_size*5]; + tmp5 = pixels[line_size*2] - pixels[line_size*5]; + tmp3 = pixels[line_size*3] + pixels[line_size*4]; + tmp4 = pixels[line_size*3] - pixels[line_size*4]; + + // Even part + + tmp10 = tmp0 + tmp3; + tmp13 = tmp0 - tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + //Even columns are written first, this leads to different order of columns + //in column_fidct(), but they are processed independently, so all ok. + //Later in the row_idct() columns readed at the same order. + dataptr[2] = tmp10 + tmp11; + dataptr[3] = tmp10 - tmp11; + + z1 = MULTIPLY16H((tmp12 + tmp13)<<2, FIX_0_707106781); + dataptr[0] = tmp13 + z1; + dataptr[1] = tmp13 - z1; + + // Odd part + + tmp10 = (tmp4 + tmp5) <<2; + tmp11 = (tmp5 + tmp6) <<2; + tmp12 = (tmp6 + tmp7) <<2; + + z5 = MULTIPLY16H(tmp10 - tmp12, FIX_0_382683433); + z2 = MULTIPLY16H(tmp10, FIX_0_541196100) + z5; + z4 = MULTIPLY16H(tmp12, FIX_1_306562965) + z5; + z3 = MULTIPLY16H(tmp11, FIX_0_707106781); + + z11 = tmp7 + z3; + z13 = tmp7 - z3; + + dataptr[4] = z13 + z2; + dataptr[5] = z13 - z2; + dataptr[6] = z11 + z4; + dataptr[7] = z11 - z4; + + pixels++; // advance pointer to next column + dataptr += DCTSIZE; + } +} + +#else /* HAVE_MMX */ + +static void row_fdct_mmx(int16_t *data, const uint8_t *pixels, int line_size, int cnt) +{ + uint64_t __attribute__((aligned(8))) temps[4]; + __asm__ volatile( + "lea (%%"REG_a",%%"REG_a",2), %%"REG_d" \n\t" + "6: \n\t" + "movd (%%"REG_S"), %%mm0 \n\t" + "pxor %%mm7, %%mm7 \n\t" + + "movd (%%"REG_S",%%"REG_a",), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + + "movd (%%"REG_S",%%"REG_a",2), %%mm2 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + + "punpcklbw %%mm7, %%mm2 \n\t" + "add %%"REG_d", %%"REG_S" \n\t" + + "movq %%mm0, %%mm5 \n\t" + // + + "movd (%%"REG_S",%%"REG_a",4), %%mm3 \n\t" //7 ;prefetch! + "movq %%mm1, %%mm6 \n\t" + + "movd (%%"REG_S",%%"REG_d",), %%mm4 \n\t" //6 + "punpcklbw %%mm7, %%mm3 \n\t" + + "psubw %%mm3, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + + "paddw %%mm3, %%mm0 \n\t" + "psubw %%mm4, %%mm6 \n\t" + + "movd (%%"REG_S",%%"REG_a",2), %%mm3 \n\t" //5 + "paddw %%mm4, %%mm1 \n\t" + + "movq %%mm5, 0*8+%3 \n\t" //t7 + "punpcklbw %%mm7, %%mm3 \n\t" + + "movq %%mm6, 1*8+%3 \n\t" //t6 + "movq %%mm2, %%mm4 \n\t" + + "movd (%%"REG_S"), %%mm5 \n\t" //3 + "paddw %%mm3, %%mm2 \n\t" + + "movd (%%"REG_S",%%"REG_a",), %%mm6 \n\t" //4 + "punpcklbw %%mm7, %%mm5 \n\t" + + "psubw %%mm3, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm6 \n\t" + + "movq %%mm5, %%mm3 \n\t" + "paddw %%mm6, %%mm5 \n\t" //t3 + + "psubw %%mm6, %%mm3 \n\t" //t4 ; t0 t1 t2 t4 t5 t3 - - + "movq %%mm0, %%mm6 \n\t" + + "movq %%mm1, %%mm7 \n\t" + "psubw %%mm5, %%mm0 \n\t" //t13 + + "psubw %%mm2, %%mm1 \n\t" + "paddw %%mm2, %%mm7 \n\t" //t11 + + "paddw %%mm0, %%mm1 \n\t" + "movq %%mm7, %%mm2 \n\t" + + "psllw $2, %%mm1 \n\t" + "paddw %%mm5, %%mm6 \n\t" //t10 + + "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm1 \n\t" + "paddw %%mm6, %%mm7 \n\t" //d2 + + "psubw %%mm2, %%mm6 \n\t" //d3 + "movq %%mm0, %%mm5 \n\t" + + //transpose 4x4 + "movq %%mm7, %%mm2 \n\t" + "punpcklwd %%mm6, %%mm7 \n\t" + + "paddw %%mm1, %%mm0 \n\t" //d0 + "punpckhwd %%mm6, %%mm2 \n\t" + + "psubw %%mm1, %%mm5 \n\t" //d1 + "movq %%mm0, %%mm6 \n\t" + + "movq 1*8+%3, %%mm1 \n\t" + "punpcklwd %%mm5, %%mm0 \n\t" + + "punpckhwd %%mm5, %%mm6 \n\t" + "movq %%mm0, %%mm5 \n\t" + + "punpckldq %%mm7, %%mm0 \n\t" //0 + "paddw %%mm4, %%mm3 \n\t" + + "punpckhdq %%mm7, %%mm5 \n\t" //1 + "movq %%mm6, %%mm7 \n\t" + + "movq %%mm0, "DCTSIZE_S"*0*2(%%"REG_D") \n\t" + "punpckldq %%mm2, %%mm6 \n\t" //2 + + "movq %%mm5, "DCTSIZE_S"*1*2(%%"REG_D") \n\t" + "punpckhdq %%mm2, %%mm7 \n\t" //3 + + "movq %%mm6, "DCTSIZE_S"*2*2(%%"REG_D") \n\t" + "paddw %%mm1, %%mm4 \n\t" + + "movq %%mm7, "DCTSIZE_S"*3*2(%%"REG_D") \n\t" + "psllw $2, %%mm3 \n\t" //t10 + + "movq 0*8+%3, %%mm2 \n\t" + "psllw $2, %%mm4 \n\t" //t11 + + "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm4 \n\t" //z3 + "paddw %%mm2, %%mm1 \n\t" + + "psllw $2, %%mm1 \n\t" //t12 + "movq %%mm3, %%mm0 \n\t" + + "pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm0 \n\t" + "psubw %%mm1, %%mm3 \n\t" + + "pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t" //z5 + "movq %%mm2, %%mm5 \n\t" + + "pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm1 \n\t" + "psubw %%mm4, %%mm2 \n\t" //z13 + + "paddw %%mm4, %%mm5 \n\t" //z11 + "movq %%mm2, %%mm6 \n\t" + + "paddw %%mm3, %%mm0 \n\t" //z2 + "movq %%mm5, %%mm7 \n\t" + + "paddw %%mm0, %%mm2 \n\t" //d4 + "psubw %%mm0, %%mm6 \n\t" //d5 + + "movq %%mm2, %%mm4 \n\t" + "paddw %%mm3, %%mm1 \n\t" //z4 + + //transpose 4x4 + "punpcklwd %%mm6, %%mm2 \n\t" + "paddw %%mm1, %%mm5 \n\t" //d6 + + "punpckhwd %%mm6, %%mm4 \n\t" + "psubw %%mm1, %%mm7 \n\t" //d7 + + "movq %%mm5, %%mm6 \n\t" + "punpcklwd %%mm7, %%mm5 \n\t" + + "punpckhwd %%mm7, %%mm6 \n\t" + "movq %%mm2, %%mm7 \n\t" + + "punpckldq %%mm5, %%mm2 \n\t" //4 + "sub %%"REG_d", %%"REG_S" \n\t" + + "punpckhdq %%mm5, %%mm7 \n\t" //5 + "movq %%mm4, %%mm5 \n\t" + + "movq %%mm2, "DCTSIZE_S"*0*2+"DCTSIZE_S"(%%"REG_D") \n\t" + "punpckldq %%mm6, %%mm4 \n\t" //6 + + "movq %%mm7, "DCTSIZE_S"*1*2+"DCTSIZE_S"(%%"REG_D") \n\t" + "punpckhdq %%mm6, %%mm5 \n\t" //7 + + "movq %%mm4, "DCTSIZE_S"*2*2+"DCTSIZE_S"(%%"REG_D") \n\t" + "add $4, %%"REG_S" \n\t" + + "movq %%mm5, "DCTSIZE_S"*3*2+"DCTSIZE_S"(%%"REG_D") \n\t" + "add $"DCTSIZE_S"*2*4, %%"REG_D" \n\t" //4 rows + "dec %%"REG_c" \n\t" + "jnz 6b \n\t" + + : "+S"(pixels), "+D"(data), "+c"(cnt), "=o"(temps) + : "a"(line_size) + : "%"REG_d); +} + +#endif // HAVE_MMX diff --git a/libavfilter/libmpcodecs/vf_harddup.c b/libavfilter/libmpcodecs/vf_harddup.c new file mode 100644 index 0000000..7ba62d4 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_harddup.c @@ -0,0 +1,92 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +struct vf_priv_s { + mp_image_t *last_mpi; +}; + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + + vf->priv->last_mpi = mpi; + + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_EXPORT, 0, mpi->width, mpi->height); + + dmpi->planes[0] = mpi->planes[0]; + dmpi->stride[0] = mpi->stride[0]; + if (dmpi->flags&MP_IMGFLAG_PLANAR) { + dmpi->planes[1] = mpi->planes[1]; + dmpi->stride[1] = mpi->stride[1]; + dmpi->planes[2] = mpi->planes[2]; + dmpi->stride[2] = mpi->stride[2]; + } + + return ff_vf_next_put_image(vf, dmpi, pts); +} + +static int control(struct vf_instance *vf, int request, void* data) +{ + switch (request) { + case VFCTRL_DUPLICATE_FRAME: + if (!vf->priv->last_mpi) break; + // This is a huge hack. We assume nothing + // has been called earlier in the filter chain + // since the last put_image. This is reasonable + // because we're handling a duplicate frame! + if (put_image(vf, vf->priv->last_mpi, MP_NOPTS_VALUE)) + return CONTROL_TRUE; + break; + } + return ff_vf_next_control(vf, request, data); +} + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->put_image = put_image; + vf->control = control; + vf->uninit = uninit; + vf->priv = calloc(1, sizeof(struct vf_priv_s)); + return 1; +} + +const vf_info_t ff_vf_info_harddup = { + "resubmit duplicate frames for encoding", + "harddup", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_il.c b/libavfilter/libmpcodecs/vf_il.c new file mode 100644 index 0000000..ee10d7b --- /dev/null +++ b/libavfilter/libmpcodecs/vf_il.c @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> + +#include "mp_msg.h" +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libvo/fastmemcpy.h" + + +//===========================================================================// + +typedef struct FilterParam{ + int interleave; + int swap; +}FilterParam; + +struct vf_priv_s { + FilterParam lumaParam; + FilterParam chromaParam; +}; + +/***************************************************************************/ + +static void interleave(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, int interleave, int swap){ + const int a= swap; + const int b= 1-a; + const int m= h>>1; + int y; + + switch(interleave){ + case -1: + for(y=0; y < m; y++){ + fast_memcpy(dst + dstStride* y , src + srcStride*(y*2 + a), w); + fast_memcpy(dst + dstStride*(y + m), src + srcStride*(y*2 + b), w); + } + break; + case 0: + for(y=0; y < m; y++){ + fast_memcpy(dst + dstStride* y*2 , src + srcStride*(y*2 + a), w); + fast_memcpy(dst + dstStride*(y*2+1), src + srcStride*(y*2 + b), w); + } + break; + case 1: + for(y=0; y < m; y++){ + fast_memcpy(dst + dstStride*(y*2+a), src + srcStride* y , w); + fast_memcpy(dst + dstStride*(y*2+b), src + srcStride*(y + m), w); + } + break; + } +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + int w; + FilterParam *luma = &vf->priv->lumaParam; + FilterParam *chroma= &vf->priv->chromaParam; + + mp_image_t *dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w,mpi->h); + + if(mpi->flags&MP_IMGFLAG_PLANAR) + w= mpi->w; + else + w= mpi->w * mpi->bpp/8; + + interleave(dmpi->planes[0], mpi->planes[0], + w, mpi->h, dmpi->stride[0], mpi->stride[0], luma->interleave, luma->swap); + + if(mpi->flags&MP_IMGFLAG_PLANAR){ + int cw= mpi->w >> mpi->chroma_x_shift; + int ch= mpi->h >> mpi->chroma_y_shift; + + interleave(dmpi->planes[1], mpi->planes[1], cw,ch, + dmpi->stride[1], mpi->stride[1], chroma->interleave, luma->swap); + interleave(dmpi->planes[2], mpi->planes[2], cw,ch, + dmpi->stride[2], mpi->stride[2], chroma->interleave, luma->swap); + } + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +//===========================================================================// + +static void parse(FilterParam *fp, char* args){ + char *pos; + char *max= strchr(args, ':'); + + if(!max) max= args + strlen(args); + + pos= strchr(args, 's'); + if(pos && pos<max) fp->swap=1; + pos= strchr(args, 'i'); + if(pos && pos<max) fp->interleave=1; + pos= strchr(args, 'd'); + if(pos && pos<max) fp->interleave=-1; +} + +static int vf_open(vf_instance_t *vf, char *args){ + + vf->put_image=put_image; +// vf->get_image=get_image; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + if(args) + { + char *arg2= strchr(args,':'); + if(arg2) parse(&vf->priv->chromaParam, arg2+1); + parse(&vf->priv->lumaParam, args); + } + + return 1; +} + +const vf_info_t ff_vf_info_il = { + "(de)interleave", + "il", + "Michael Niedermayer", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_ilpack.c b/libavfilter/libmpcodecs/vf_ilpack.c new file mode 100644 index 0000000..4db6c0a --- /dev/null +++ b/libavfilter/libmpcodecs/vf_ilpack.c @@ -0,0 +1,458 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libavutil/attributes.h" +#include "libavutil/x86/asm.h" + +typedef void (pack_func_t)(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, int us, int vs); + +struct vf_priv_s { + int mode; + pack_func_t *pack[2]; +}; + +static void pack_nn_C(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, + int av_unused us, int av_unused vs) +{ + int j; + for (j = w/2; j; j--) { + *dst++ = *y++; + *dst++ = *u++; + *dst++ = *y++; + *dst++ = *v++; + } +} + +static void pack_li_0_C(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, int us, int vs) +{ + int j; + for (j = w/2; j; j--) { + *dst++ = *y++; + *dst++ = (u[us+us] + 7*u[0])>>3; + *dst++ = *y++; + *dst++ = (v[vs+vs] + 7*v[0])>>3; + u++; v++; + } +} + +static void pack_li_1_C(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, int us, int vs) +{ + int j; + for (j = w/2; j; j--) { + *dst++ = *y++; + *dst++ = (3*u[us+us] + 5*u[0])>>3; + *dst++ = *y++; + *dst++ = (3*v[vs+vs] + 5*v[0])>>3; + u++; v++; + } +} + +#if HAVE_MMX +static void pack_nn_MMX(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, + int av_unused us, int av_unused vs) +{ + __asm__ volatile ("" + ASMALIGN(4) + "1: \n\t" + "movq (%0), %%mm1 \n\t" + "movq (%0), %%mm2 \n\t" + "movq (%1), %%mm4 \n\t" + "movq (%2), %%mm6 \n\t" + "punpcklbw %%mm6, %%mm4 \n\t" + "punpcklbw %%mm4, %%mm1 \n\t" + "punpckhbw %%mm4, %%mm2 \n\t" + + "add $8, %0 \n\t" + "add $4, %1 \n\t" + "add $4, %2 \n\t" + "movq %%mm1, (%3) \n\t" + "movq %%mm2, 8(%3) \n\t" + "add $16, %3 \n\t" + "decl %4 \n\t" + "jnz 1b \n\t" + "emms \n\t" + : + : "r" (y), "r" (u), "r" (v), "r" (dst), "r" (w/8) + : "memory" + ); + pack_nn_C(dst, y, u, v, (w&7), 0, 0); +} + +#if HAVE_EBX_AVAILABLE +static void pack_li_0_MMX(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, int us, int vs) +{ + __asm__ volatile ("" + "push %%"REG_BP" \n\t" +#if ARCH_X86_64 + "mov %6, %%"REG_BP" \n\t" +#else + "movl 4(%%"REG_d"), %%"REG_BP" \n\t" + "movl (%%"REG_d"), %%"REG_d" \n\t" +#endif + "pxor %%mm0, %%mm0 \n\t" + + ASMALIGN(4) + ".Lli0: \n\t" + "movq (%%"REG_S"), %%mm1 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + + "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t" + "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t" + "punpcklbw %%mm0, %%mm4 \n\t" + "punpcklbw %%mm0, %%mm6 \n\t" + "movq (%%"REG_a"), %%mm3 \n\t" + "movq (%%"REG_b"), %%mm5 \n\t" + "punpcklbw %%mm0, %%mm3 \n\t" + "punpcklbw %%mm0, %%mm5 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "psrlw $3, %%mm4 \n\t" + "psrlw $3, %%mm6 \n\t" + "packuswb %%mm4, %%mm4 \n\t" + "packuswb %%mm6, %%mm6 \n\t" + "punpcklbw %%mm6, %%mm4 \n\t" + "punpcklbw %%mm4, %%mm1 \n\t" + "punpckhbw %%mm4, %%mm2 \n\t" + + "movq %%mm1, (%%"REG_D") \n\t" + "movq %%mm2, 8(%%"REG_D") \n\t" + + "movq 8(%%"REG_S"), %%mm1 \n\t" + "movq 8(%%"REG_S"), %%mm2 \n\t" + + "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t" + "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t" + "punpckhbw %%mm0, %%mm4 \n\t" + "punpckhbw %%mm0, %%mm6 \n\t" + "movq (%%"REG_a"), %%mm3 \n\t" + "movq (%%"REG_b"), %%mm5 \n\t" + "punpckhbw %%mm0, %%mm3 \n\t" + "punpckhbw %%mm0, %%mm5 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "psrlw $3, %%mm4 \n\t" + "psrlw $3, %%mm6 \n\t" + "packuswb %%mm4, %%mm4 \n\t" + "packuswb %%mm6, %%mm6 \n\t" + "punpcklbw %%mm6, %%mm4 \n\t" + "punpcklbw %%mm4, %%mm1 \n\t" + "punpckhbw %%mm4, %%mm2 \n\t" + + "add $16, %%"REG_S" \n\t" + "add $8, %%"REG_a" \n\t" + "add $8, %%"REG_b" \n\t" + + "movq %%mm1, 16(%%"REG_D") \n\t" + "movq %%mm2, 24(%%"REG_D") \n\t" + "add $32, %%"REG_D" \n\t" + + "decl %%ecx \n\t" + "jnz .Lli0 \n\t" + "emms \n\t" + "pop %%"REG_BP" \n\t" + : + : "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16), +#if ARCH_X86_64 + "d" ((x86_reg)us), "r" ((x86_reg)vs) +#else + "d" (&us) +#endif + : "memory" + ); + pack_li_0_C(dst, y, u, v, (w&15), us, vs); +} + +static void pack_li_1_MMX(unsigned char *dst, unsigned char *y, + unsigned char *u, unsigned char *v, int w, int us, int vs) +{ + __asm__ volatile ("" + "push %%"REG_BP" \n\t" +#if ARCH_X86_64 + "mov %6, %%"REG_BP" \n\t" +#else + "movl 4(%%"REG_d"), %%"REG_BP" \n\t" + "movl (%%"REG_d"), %%"REG_d" \n\t" +#endif + "pxor %%mm0, %%mm0 \n\t" + + ASMALIGN(4) + ".Lli1: \n\t" + "movq (%%"REG_S"), %%mm1 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + + "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t" + "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t" + "punpcklbw %%mm0, %%mm4 \n\t" + "punpcklbw %%mm0, %%mm6 \n\t" + "movq (%%"REG_a"), %%mm3 \n\t" + "movq (%%"REG_b"), %%mm5 \n\t" + "punpcklbw %%mm0, %%mm3 \n\t" + "punpcklbw %%mm0, %%mm5 \n\t" + "movq %%mm4, %%mm7 \n\t" + "paddw %%mm4, %%mm4 \n\t" + "paddw %%mm7, %%mm4 \n\t" + "movq %%mm6, %%mm7 \n\t" + "paddw %%mm6, %%mm6 \n\t" + "paddw %%mm7, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "psrlw $3, %%mm4 \n\t" + "psrlw $3, %%mm6 \n\t" + "packuswb %%mm4, %%mm4 \n\t" + "packuswb %%mm6, %%mm6 \n\t" + "punpcklbw %%mm6, %%mm4 \n\t" + "punpcklbw %%mm4, %%mm1 \n\t" + "punpckhbw %%mm4, %%mm2 \n\t" + + "movq %%mm1, (%%"REG_D") \n\t" + "movq %%mm2, 8(%%"REG_D") \n\t" + + "movq 8(%%"REG_S"), %%mm1 \n\t" + "movq 8(%%"REG_S"), %%mm2 \n\t" + + "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t" + "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t" + "punpckhbw %%mm0, %%mm4 \n\t" + "punpckhbw %%mm0, %%mm6 \n\t" + "movq (%%"REG_a"), %%mm3 \n\t" + "movq (%%"REG_b"), %%mm5 \n\t" + "punpckhbw %%mm0, %%mm3 \n\t" + "punpckhbw %%mm0, %%mm5 \n\t" + "movq %%mm4, %%mm7 \n\t" + "paddw %%mm4, %%mm4 \n\t" + "paddw %%mm7, %%mm4 \n\t" + "movq %%mm6, %%mm7 \n\t" + "paddw %%mm6, %%mm6 \n\t" + "paddw %%mm7, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "paddw %%mm3, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "psrlw $3, %%mm4 \n\t" + "psrlw $3, %%mm6 \n\t" + "packuswb %%mm4, %%mm4 \n\t" + "packuswb %%mm6, %%mm6 \n\t" + "punpcklbw %%mm6, %%mm4 \n\t" + "punpcklbw %%mm4, %%mm1 \n\t" + "punpckhbw %%mm4, %%mm2 \n\t" + + "add $16, %%"REG_S" \n\t" + "add $8, %%"REG_a" \n\t" + "add $8, %%"REG_b" \n\t" + + "movq %%mm1, 16(%%"REG_D") \n\t" + "movq %%mm2, 24(%%"REG_D") \n\t" + "add $32, %%"REG_D" \n\t" + + "decl %%ecx \n\t" + "jnz .Lli1 \n\t" + "emms \n\t" + "pop %%"REG_BP" \n\t" + : + : "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16), +#if ARCH_X86_64 + "d" ((x86_reg)us), "r" ((x86_reg)vs) +#else + "d" (&us) +#endif + : "memory" + ); + pack_li_1_C(dst, y, u, v, (w&15), us, vs); +} +#endif /* HAVE_EBX_AVAILABLE */ +#endif + +static pack_func_t *pack_nn; +static pack_func_t *pack_li_0; +static pack_func_t *pack_li_1; + +static void ilpack(unsigned char *dst, unsigned char *src[3], + int dststride, int srcstride[3], int w, int h, pack_func_t *pack[2]) +{ + int i; + unsigned char *y, *u, *v; + int ys = srcstride[0], us = srcstride[1], vs = srcstride[2]; + int a, b; + + y = src[0]; + u = src[1]; + v = src[2]; + + pack_nn(dst, y, u, v, w, 0, 0); + y += ys; dst += dststride; + pack_nn(dst, y, u+us, v+vs, w, 0, 0); + y += ys; dst += dststride; + for (i=2; i<h-2; i++) { + a = (i&2) ? 1 : -1; + b = (i&1) ^ ((i&2)>>1); + pack[b](dst, y, u, v, w, us*a, vs*a); + y += ys; + if ((i&3) == 1) { + u -= us; + v -= vs; + } else { + u += us; + v += vs; + } + dst += dststride; + } + pack_nn(dst, y, u, v, w, 0, 0); + y += ys; dst += dststride; u += us; v += vs; + pack_nn(dst, y, u, v, w, 0, 0); +} + + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + + // hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next, IMGFMT_YUY2, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w, mpi->h); + + ilpack(dmpi->planes[0], mpi->planes, dmpi->stride[0], mpi->stride, mpi->w, mpi->h, vf->priv->pack); + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + /* FIXME - also support UYVY output? */ + return ff_vf_next_config(vf, width, height, d_width, d_height, flags, IMGFMT_YUY2); +} + + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - really any YUV 4:2:0 input format should work */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf,IMGFMT_YUY2); + } + return 0; +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->config=config; + vf->query_format=query_format; + vf->put_image=put_image; + vf->priv = calloc(1, sizeof(struct vf_priv_s)); + vf->priv->mode = 1; + if (args) sscanf(args, "%d", &vf->priv->mode); + + pack_nn = pack_nn_C; + pack_li_0 = pack_li_0_C; + pack_li_1 = pack_li_1_C; +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) { + pack_nn = pack_nn_MMX; +#if HAVE_EBX_AVAILABLE + pack_li_0 = pack_li_0_MMX; + pack_li_1 = pack_li_1_MMX; +#endif + } +#endif + + switch(vf->priv->mode) { + case 0: + vf->priv->pack[0] = vf->priv->pack[1] = pack_nn; + break; + default: + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, + "ilpack: unknown mode %d (fallback to linear)\n", + vf->priv->mode); + /* Fallthrough */ + case 1: + vf->priv->pack[0] = pack_li_0; + vf->priv->pack[1] = pack_li_1; + break; + } + + return 1; +} + +const vf_info_t ff_vf_info_ilpack = { + "4:2:0 planar -> 4:2:2 packed reinterlacer", + "ilpack", + "Richard Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_ivtc.c b/libavfilter/libmpcodecs/vf_ivtc.c new file mode 100644 index 0000000..8a47a57 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_ivtc.c @@ -0,0 +1,550 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libavutil/x86/asm.h" +#include "libvo/fastmemcpy.h" + + +struct metrics { + /* difference: total, even lines, odd lines */ + int d, e, o; + /* noise: temporal, spacial (current), spacial (past) */ + int t, s, p; +}; + +struct frameinfo { + /* peak, relative, mean */ + struct metrics p, r, m; +}; + +struct vf_priv_s { + struct frameinfo fi[2]; + mp_image_t *dmpi; + int first; + int drop, lastdrop, dropnext; + int inframes, outframes; +}; + +enum { + F_DROP, + F_MERGE, + F_NEXT, + F_SHOW +}; + +#if HAVE_MMX && HAVE_EBX_AVAILABLE +static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char *new, int os, int ns) +{ + int i; + short out[24]; // output buffer for the partial metrics from the mmx code + + __asm__ ( + "movl $4, %%ecx \n\t" + "pxor %%mm4, %%mm4 \n\t" // 4 even difference sums + "pxor %%mm5, %%mm5 \n\t" // 4 odd difference sums + "pxor %%mm7, %%mm7 \n\t" // all zeros + + ASMALIGN(4) + "1: \n\t" + + // Even difference + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "movq (%%"REG_D"), %%mm1 \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm1, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddw %%mm0, %%mm4 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm2, %%mm4 \n\t" + "paddw %%mm3, %%mm4 \n\t" + + // Odd difference + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S"), %%mm2 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "movq (%%"REG_D"), %%mm1 \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm1, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddw %%mm0, %%mm5 \n\t" + "paddw %%mm1, %%mm5 \n\t" + "paddw %%mm2, %%mm5 \n\t" + "paddw %%mm3, %%mm5 \n\t" + + "decl %%ecx \n\t" + "jnz 1b \n\t" + "movq %%mm4, (%%"REG_d") \n\t" + "movq %%mm5, 8(%%"REG_d") \n\t" + : + : "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out) + : "memory" + ); + m->e = out[0]+out[1]+out[2]+out[3]; + m->o = out[4]+out[5]+out[6]+out[7]; + m->d = m->e + m->o; + + __asm__ ( + // First loop to measure first four columns + "movl $4, %%ecx \n\t" + "pxor %%mm4, %%mm4 \n\t" // Past spacial noise + "pxor %%mm5, %%mm5 \n\t" // Temporal noise + "pxor %%mm6, %%mm6 \n\t" // Current spacial noise + + ASMALIGN(4) + "2: \n\t" + + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "movq (%%"REG_D"), %%mm2 \n\t" + "movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm1, %%mm5 \n\t" + "paddw %%mm3, %%mm6 \n\t" + "psubw %%mm0, %%mm4 \n\t" + "psubw %%mm2, %%mm5 \n\t" + "psubw %%mm2, %%mm6 \n\t" + + "decl %%ecx \n\t" + "jnz 2b \n\t" + + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "pcmpgtw %%mm4, %%mm1 \n\t" + "pcmpgtw %%mm5, %%mm2 \n\t" + "pcmpgtw %%mm6, %%mm3 \n\t" + "pxor %%mm1, %%mm4 \n\t" + "pxor %%mm2, %%mm5 \n\t" + "pxor %%mm3, %%mm6 \n\t" + "psubw %%mm1, %%mm4 \n\t" + "psubw %%mm2, %%mm5 \n\t" + "psubw %%mm3, %%mm6 \n\t" + "movq %%mm4, (%%"REG_d") \n\t" + "movq %%mm5, 16(%%"REG_d") \n\t" + "movq %%mm6, 32(%%"REG_d") \n\t" + + "mov %%"REG_a", %%"REG_c" \n\t" + "shl $3, %%"REG_c" \n\t" + "sub %%"REG_c", %%"REG_S" \n\t" + "mov %%"REG_b", %%"REG_c" \n\t" + "shl $3, %%"REG_c" \n\t" + "sub %%"REG_c", %%"REG_D" \n\t" + + // Second loop for the last four columns + "movl $4, %%ecx \n\t" + "pxor %%mm4, %%mm4 \n\t" + "pxor %%mm5, %%mm5 \n\t" + "pxor %%mm6, %%mm6 \n\t" + + ASMALIGN(4) + "3: \n\t" + + "movq (%%"REG_S"), %%mm0 \n\t" + "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "add %%"REG_a", %%"REG_S" \n\t" + "movq (%%"REG_D"), %%mm2 \n\t" + "movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "add %%"REG_b", %%"REG_D" \n\t" + "punpckhbw %%mm7, %%mm0 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddw %%mm1, %%mm4 \n\t" + "paddw %%mm1, %%mm5 \n\t" + "paddw %%mm3, %%mm6 \n\t" + "psubw %%mm0, %%mm4 \n\t" + "psubw %%mm2, %%mm5 \n\t" + "psubw %%mm2, %%mm6 \n\t" + + "decl %%ecx \n\t" + "jnz 3b \n\t" + + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "pcmpgtw %%mm4, %%mm1 \n\t" + "pcmpgtw %%mm5, %%mm2 \n\t" + "pcmpgtw %%mm6, %%mm3 \n\t" + "pxor %%mm1, %%mm4 \n\t" + "pxor %%mm2, %%mm5 \n\t" + "pxor %%mm3, %%mm6 \n\t" + "psubw %%mm1, %%mm4 \n\t" + "psubw %%mm2, %%mm5 \n\t" + "psubw %%mm3, %%mm6 \n\t" + "movq %%mm4, 8(%%"REG_d") \n\t" + "movq %%mm5, 24(%%"REG_d") \n\t" + "movq %%mm6, 40(%%"REG_d") \n\t" + + "emms \n\t" + : + : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out) + : "memory" + ); + m->p = m->t = m->s = 0; + for (i=0; i<8; i++) { + m->p += out[i]; + m->t += out[8+i]; + m->s += out[16+i]; + } + //printf("e=%d o=%d d=%d p=%d t=%d s=%d\n", m->e, m->o, m->d, m->p, m->t, m->s); +} +#endif + +//#define MAG(a) ((a)*(a)) +//#define MAG(a) (abs(a)) +#define MAG(a) (((a)^((a)>>31))-((a)>>31)) + +//#define LOWPASS(s) (((s)[-2] + 4*(s)[-1] + 6*(s)[0] + 4*(s)[1] + (s)[2])>>4) +//#define LOWPASS(s) (((s)[-1] + 2*(s)[0] + (s)[1])>>2) +#define LOWPASS(s) ((s)[0]) + + +static void block_diffs_C(struct metrics *m, unsigned char *old, unsigned char *new, int os, int ns) +{ + int x, y, e=0, o=0, s=0, p=0, t=0; + unsigned char *oldp, *newp; + m->s = m->p = m->t = 0; + for (x = 8; x; x--) { + oldp = old++; + newp = new++; + s = p = t = 0; + for (y = 4; y; y--) { + e += MAG(newp[0]-oldp[0]); + o += MAG(newp[ns]-oldp[os]); + s += newp[ns]-newp[0]; + p += oldp[os]-oldp[0]; + t += oldp[os]-newp[0]; + oldp += os<<1; + newp += ns<<1; + } + m->s += MAG(s); + m->p += MAG(p); + m->t += MAG(t); + } + m->e = e; + m->o = o; + m->d = e+o; +} + +static void (*block_diffs)(struct metrics *, unsigned char *, unsigned char *, int, int); + +#define MAXUP(a,b) ((a) = ((a)>(b)) ? (a) : (b)) + +static void diff_planes(struct frameinfo *fi, + unsigned char *old, unsigned char *new, int w, int h, int os, int ns) +{ + int x, y; + struct metrics l; + struct metrics *peak=&fi->p, *rel=&fi->r, *mean=&fi->m; + memset(peak, 0, sizeof(struct metrics)); + memset(rel, 0, sizeof(struct metrics)); + memset(mean, 0, sizeof(struct metrics)); + for (y = 0; y < h-7; y += 8) { + for (x = 8; x < w-8-7; x += 8) { + block_diffs(&l, old+x+y*os, new+x+y*ns, os, ns); + mean->d += l.d; + mean->e += l.e; + mean->o += l.o; + mean->s += l.s; + mean->p += l.p; + mean->t += l.t; + MAXUP(peak->d, l.d); + MAXUP(peak->e, l.e); + MAXUP(peak->o, l.o); + MAXUP(peak->s, l.s); + MAXUP(peak->p, l.p); + MAXUP(peak->t, l.t); + MAXUP(rel->e, l.e-l.o); + MAXUP(rel->o, l.o-l.e); + MAXUP(rel->s, l.s-l.t); + MAXUP(rel->p, l.p-l.t); + MAXUP(rel->t, l.t-l.p); + MAXUP(rel->d, l.t-l.s); /* hack */ + } + } + x = (w/8-2)*(h/8); + mean->d /= x; + mean->e /= x; + mean->o /= x; + mean->s /= x; + mean->p /= x; + mean->t /= x; +} + +static void diff_fields(struct frameinfo *fi, mp_image_t *old, mp_image_t *new) +{ + diff_planes(fi, old->planes[0], new->planes[0], + new->w, new->h, old->stride[0], new->stride[0]); +} + +static void stats(struct frameinfo *f) +{ + ff_mp_msg(MSGT_VFILTER, MSGL_V, " pd=%d re=%d ro=%d rp=%d rt=%d rs=%d rd=%d pp=%d pt=%d ps=%d\r", + f->p.d, f->r.e, f->r.o, f->r.p, f->r.t, f->r.s, f->r.d, f->p.p, f->p.t, f->p.s); +} + +static int foo(struct vf_priv_s *p, mp_image_t *new, mp_image_t *cur) +{ + struct frameinfo *f = p->fi; + + f[0] = f[1]; + diff_fields(&f[1], cur, new); + stats(&f[1]); + + // Immediately drop this frame if it's already been used. + if (p->dropnext) { + p->dropnext = 0; + return F_DROP; + } + + // Sometimes a pulldown frame comes all by itself, so both + // its top and bottom field are duplicates from the adjacent + // two frames. We can just drop such a frame, but we + // immediately show the next frame instead to keep the frame + // drops evenly spaced during normal 3:2 pulldown sequences. + if ((3*f[1].r.o < f[1].r.e) && (f[1].r.s < f[1].r.d)) { + p->dropnext = 1; + return F_NEXT; + } + + // If none of these conditions hold, we will consider the frame + // progressive and just show it as-is. + if (!( (3*f[0].r.e < f[0].r.o) || + ((2*f[0].r.d < f[0].r.s) && (f[0].r.s > 1200)) || + ((2*f[1].r.t < f[1].r.p) && (f[1].r.p > 1200)) )) + return F_SHOW; + + // Otherwise, we have to decide whether to merge or drop. + // If the noise metric only increases minimally, we're off + // to a good start... + if (((2*f[1].r.t < 3*f[1].r.p) && (f[1].r.t < 3600)) || + (f[1].r.t < 900) || (f[1].r.d < 900)) { + // ...and if noise decreases or the duplicate even field + // is detected, we go ahead with the merge. + if ((3*f[0].r.e < f[0].r.o) || (2*f[1].r.t < f[1].r.p)) { + p->dropnext = 1; + return F_MERGE; + } + } + return F_DROP; +} + + + +static void copy_image(mp_image_t *dmpi, mp_image_t *mpi, int field) +{ + switch (field) { + case 0: + my_memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + break; + case 1: + my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0], + mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1], + mpi->planes[1]+mpi->stride[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2], + mpi->planes[2]+mpi->stride[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + break; + case 2: + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0], mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2], mpi->stride[2]); + } + break; + } +} + +static int do_put_image(struct vf_instance *vf, mp_image_t *dmpi) +{ + struct vf_priv_s *p = vf->priv; + int dropflag=0; + + if (!p->dropnext) switch (p->drop) { + case 0: + dropflag = 0; + break; + case 1: + dropflag = (++p->lastdrop >= 5); + break; + case 2: + dropflag = (++p->lastdrop >= 5) && (4*p->inframes <= 5*p->outframes); + break; + } + + if (dropflag) { + //ff_mp_msg(MSGT_VFILTER, MSGL_V, "drop! [%d/%d=%g]\n", + // p->outframes, p->inframes, (float)p->outframes/p->inframes); + ff_mp_msg(MSGT_VFILTER, MSGL_V, "!"); + p->lastdrop = 0; + return 0; + } + + p->outframes++; + return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + int ret=0; + struct vf_priv_s *p = vf->priv; + + p->inframes++; + + if (p->first) { /* hack */ + p->first = 0; + return 1; + } + + if (!p->dmpi) p->dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE | MP_IMGFLAG_READABLE, + mpi->width, mpi->height); + /* FIXME -- not correct, off by one frame! */ + p->dmpi->qscale = mpi->qscale; + p->dmpi->qstride = mpi->qstride; + p->dmpi->qscale_type = mpi->qscale_type; + + switch (foo(p, mpi, p->dmpi)) { + case F_DROP: + copy_image(p->dmpi, mpi, 2); + ret = 0; + p->lastdrop = 0; + ff_mp_msg(MSGT_VFILTER, MSGL_V, "DROP\n"); + break; + case F_MERGE: + copy_image(p->dmpi, mpi, 0); + ret = do_put_image(vf, p->dmpi); + copy_image(p->dmpi, mpi, 1); + ff_mp_msg(MSGT_VFILTER, MSGL_V, "MERGE\n"); + p->dmpi = NULL; + break; + case F_NEXT: + copy_image(p->dmpi, mpi, 2); + ret = do_put_image(vf, p->dmpi); + ff_mp_msg(MSGT_VFILTER, MSGL_V, "NEXT\n"); + p->dmpi = NULL; + break; + case F_SHOW: + ret = do_put_image(vf, p->dmpi); + copy_image(p->dmpi, mpi, 2); + ff_mp_msg(MSGT_VFILTER, MSGL_V, "OK\n"); + p->dmpi = NULL; + break; + } + return ret; +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + struct vf_priv_s *p; + vf->put_image = put_image; + vf->query_format = query_format; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = p = calloc(1, sizeof(struct vf_priv_s)); + p->drop = 0; + p->first = 1; + if (args) sscanf(args, "%d", &p->drop); + block_diffs = block_diffs_C; +#if HAVE_MMX && HAVE_EBX_AVAILABLE + if(ff_gCpuCaps.hasMMX) block_diffs = block_diffs_MMX; +#endif + return 1; +} + +const vf_info_t ff_vf_info_ivtc = { + "inverse telecine, take 2", + "ivtc", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_kerndeint.c b/libavfilter/libmpcodecs/vf_kerndeint.c new file mode 100644 index 0000000..fca1ff1 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_kerndeint.c @@ -0,0 +1,345 @@ +/* + * Original AVISynth Filter Copyright (C) 2003 Donald A. Graft + * Adapted to MPlayer by Tobias Diedrich + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "mp_msg.h" +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libvo/fastmemcpy.h" + +//===========================================================================// + +struct vf_priv_s { + int frame; + int map; + int order; + int thresh; + int sharp; + int twoway; + int do_deinterlace; +}; + + +/***************************************************************************/ + + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static inline int IsRGB(mp_image_t *mpi) +{ + return mpi->imgfmt == IMGFMT_RGB; +} + +static inline int IsYUY2(mp_image_t *mpi) +{ + return mpi->imgfmt == IMGFMT_YUY2; +} + +#define PLANAR_Y 0 +#define PLANAR_U 1 +#define PLANAR_V 2 + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + int cw= mpi->w >> mpi->chroma_x_shift; + int ch= mpi->h >> mpi->chroma_y_shift; + int W = mpi->w, H = mpi->h; + const unsigned char *prvp, *prvpp, *prvpn, *prvpnn, *prvppp, *prvp4p, *prvp4n; + const unsigned char *srcp_saved; + const unsigned char *srcp, *srcpp, *srcpn, *srcpnn, *srcppp, *srcp3p, *srcp3n, *srcp4p, *srcp4n; + unsigned char *dstp, *dstp_saved; + int src_pitch; + int psrc_pitch; + int dst_pitch; + int x, y, z; + int n = vf->priv->frame++; + int val, hi, lo, w, h; + double valf; + int plane; + int threshold = vf->priv->thresh; + int order = vf->priv->order; + int map = vf->priv->map; + int sharp = vf->priv->sharp; + int twoway = vf->priv->twoway; + mp_image_t *dmpi, *pmpi; + + if(!vf->priv->do_deinterlace) + return ff_vf_next_put_image(vf, mpi, pts); + + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_IP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w,mpi->h); + pmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w,mpi->h); + if(!dmpi) return 0; + + for (z=0; z<mpi->num_planes; z++) { + if (z == 0) plane = PLANAR_Y; + else if (z == 1) plane = PLANAR_U; + else plane = PLANAR_V; + + h = plane == PLANAR_Y ? H : ch; + w = plane == PLANAR_Y ? W : cw; + + srcp = srcp_saved = mpi->planes[z]; + src_pitch = mpi->stride[z]; + psrc_pitch = pmpi->stride[z]; + dstp = dstp_saved = dmpi->planes[z]; + dst_pitch = dmpi->stride[z]; + srcp = srcp_saved + (1-order) * src_pitch; + dstp = dstp_saved + (1-order) * dst_pitch; + + for (y=0; y<h; y+=2) { + fast_memcpy(dstp, srcp, w); + srcp += 2*src_pitch; + dstp += 2*dst_pitch; + } + + // Copy through the lines that will be missed below. + fast_memcpy(dstp_saved + order*dst_pitch, srcp_saved + (1-order)*src_pitch, w); + fast_memcpy(dstp_saved + (2+order)*dst_pitch, srcp_saved + (3-order)*src_pitch, w); + fast_memcpy(dstp_saved + (h-2+order)*dst_pitch, srcp_saved + (h-1-order)*src_pitch, w); + fast_memcpy(dstp_saved + (h-4+order)*dst_pitch, srcp_saved + (h-3-order)*src_pitch, w); + /* For the other field choose adaptively between using the previous field + or the interpolant from the current field. */ + + prvp = pmpi->planes[z] + 5*psrc_pitch - (1-order)*psrc_pitch; + prvpp = prvp - psrc_pitch; + prvppp = prvp - 2*psrc_pitch; + prvp4p = prvp - 4*psrc_pitch; + prvpn = prvp + psrc_pitch; + prvpnn = prvp + 2*psrc_pitch; + prvp4n = prvp + 4*psrc_pitch; + srcp = srcp_saved + 5*src_pitch - (1-order)*src_pitch; + srcpp = srcp - src_pitch; + srcppp = srcp - 2*src_pitch; + srcp3p = srcp - 3*src_pitch; + srcp4p = srcp - 4*src_pitch; + srcpn = srcp + src_pitch; + srcpnn = srcp + 2*src_pitch; + srcp3n = srcp + 3*src_pitch; + srcp4n = srcp + 4*src_pitch; + dstp = dstp_saved + 5*dst_pitch - (1-order)*dst_pitch; + for (y = 5 - (1-order); y <= h - 5 - (1-order); y+=2) + { + for (x = 0; x < w; x++) + { + if ((threshold == 0) || (n == 0) || + (abs((int)prvp[x] - (int)srcp[x]) > threshold) || + (abs((int)prvpp[x] - (int)srcpp[x]) > threshold) || + (abs((int)prvpn[x] - (int)srcpn[x]) > threshold)) + { + if (map == 1) + { + int g = x & ~3; + if (IsRGB(mpi) == 1) + { + dstp[g++] = 255; + dstp[g++] = 255; + dstp[g++] = 255; + dstp[g] = 255; + x = g; + } + else if (IsYUY2(mpi) == 1) + { + dstp[g++] = 235; + dstp[g++] = 128; + dstp[g++] = 235; + dstp[g] = 128; + x = g; + } + else + { + if (plane == PLANAR_Y) dstp[x] = 235; + else dstp[x] = 128; + } + } + else + { + if (IsRGB(mpi)) + { + hi = 255; + lo = 0; + } + else if (IsYUY2(mpi)) + { + hi = (x & 1) ? 240 : 235; + lo = 16; + } + else + { + hi = (plane == PLANAR_Y) ? 235 : 240; + lo = 16; + } + + if (sharp == 1) + { + if (twoway == 1) + valf = + 0.526*((int)srcpp[x] + (int)srcpn[x]) + + 0.170*((int)srcp[x] + (int)prvp[x]) + - 0.116*((int)srcppp[x] + (int)srcpnn[x] + (int)prvppp[x] + (int)prvpnn[x]) + - 0.026*((int)srcp3p[x] + (int)srcp3n[x]) + + 0.031*((int)srcp4p[x] + (int)srcp4n[x] + (int)prvp4p[x] + (int)prvp4n[x]); + else + valf = + 0.526*((int)srcpp[x] + (int)srcpn[x]) + + 0.170*((int)prvp[x]) + - 0.116*((int)prvppp[x] + (int)prvpnn[x]) + - 0.026*((int)srcp3p[x] + (int)srcp3n[x]) + + 0.031*((int)prvp4p[x] + (int)prvp4p[x]); + if (valf > hi) valf = hi; + else if (valf < lo) valf = lo; + dstp[x] = (int) valf; + } + else + { + if (twoway == 1) + val = (8*((int)srcpp[x] + (int)srcpn[x]) + 2*((int)srcp[x] + (int)prvp[x]) - + (int)(srcppp[x]) - (int)(srcpnn[x]) - + (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4; + else + val = (8*((int)srcpp[x] + (int)srcpn[x]) + 2*((int)prvp[x]) - + (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4; + if (val > hi) val = hi; + else if (val < lo) val = lo; + dstp[x] = (int) val; + } + } + } + else + { + dstp[x] = srcp[x]; + } + } + prvp += 2*psrc_pitch; + prvpp += 2*psrc_pitch; + prvppp += 2*psrc_pitch; + prvpn += 2*psrc_pitch; + prvpnn += 2*psrc_pitch; + prvp4p += 2*psrc_pitch; + prvp4n += 2*psrc_pitch; + srcp += 2*src_pitch; + srcpp += 2*src_pitch; + srcppp += 2*src_pitch; + srcp3p += 2*src_pitch; + srcp4p += 2*src_pitch; + srcpn += 2*src_pitch; + srcpnn += 2*src_pitch; + srcp3n += 2*src_pitch; + srcp4n += 2*src_pitch; + dstp += 2*dst_pitch; + } + + srcp = mpi->planes[z]; + dstp = pmpi->planes[z]; + for (y=0; y<h; y++) { + fast_memcpy(dstp, srcp, w); + srcp += src_pitch; + dstp += psrc_pitch; + } + } + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +//===========================================================================// + +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt) + { + case IMGFMT_YV12: + case IMGFMT_RGB: + case IMGFMT_YUY2: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int control(struct vf_instance *vf, int request, void* data){ + switch (request) + { + case VFCTRL_GET_DEINTERLACE: + *(int*)data = vf->priv->do_deinterlace; + return CONTROL_OK; + case VFCTRL_SET_DEINTERLACE: + vf->priv->do_deinterlace = *(int*)data; + return CONTROL_OK; + } + return ff_vf_next_control (vf, request, data); +} + +static int vf_open(vf_instance_t *vf, char *args){ + + vf->control=control; + vf->config=config; + vf->put_image=put_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + vf->priv->frame = 0; + + vf->priv->map = 0; + vf->priv->order = 0; + vf->priv->thresh = 10; + vf->priv->sharp = 0; + vf->priv->twoway = 0; + vf->priv->do_deinterlace=1; + + if (args) + { + sscanf(args, "%d:%d:%d:%d:%d", + &vf->priv->thresh, &vf->priv->map, + &vf->priv->order, &vf->priv->sharp, + &vf->priv->twoway); + } + if (vf->priv->order > 1) vf->priv->order = 1; + + return 1; +} + +const vf_info_t ff_vf_info_kerndeint = { + "Kernel Deinterlacer", + "kerndeint", + "Donald Graft", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_mcdeint.c b/libavfilter/libmpcodecs/vf_mcdeint.c new file mode 100644 index 0000000..b9ffaf2 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_mcdeint.c @@ -0,0 +1,340 @@ +/* + * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + + +/* +Known Issues: +* The motion estimation is somewhat at the mercy of the input, if the input + frames are created purely based on spatial interpolation then for example + a thin black line or another random and not interpolateable pattern + will cause problems + Note: completly ignoring the "unavailable" lines during motion estimation + didnt look any better, so the most obvious solution would be to improve + tfields or penalize problematic motion vectors ... + +* If non iterative ME is used then snow currently ignores the OBMC window + and as a result sometimes creates artifacts + +* only past frames are used, we should ideally use future frames too, something + like filtering the whole movie in forward and then backward direction seems + like a interresting idea but the current filter framework is FAR from + supporting such things + +* combining the motion compensated image with the input image also isnt + as trivial as it seems, simple blindly taking even lines from one and + odd ones from the other doesnt work at all as ME/MC sometimes simple + has nothing in the previous frames which matches the current, the current + algo has been found by trial and error and almost certainly can be + improved ... +*/ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "mp_msg.h" +#include "cpudetect.h" + +#include "libavutil/common.h" +#include "libavutil/internal.h" +#include "libavutil/intreadwrite.h" +#include "libavcodec/avcodec.h" +#include "libavcodec/dsputil.h" + +#undef fprintf +#undef free +#undef malloc + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "av_helpers.h" + +#define MIN(a,b) ((a) > (b) ? (b) : (a)) +#define MAX(a,b) ((a) < (b) ? (b) : (a)) +#define ABS(a) ((a) > 0 ? (a) : (-(a))) + +//===========================================================================// + +struct vf_priv_s { + int mode; + int qp; + int parity; +#if 0 + int temp_stride[3]; + uint8_t *src[3]; + int16_t *temp[3]; +#endif + int outbuf_size; + uint8_t *outbuf; + AVCodecContext *avctx_enc; + AVFrame *frame; + AVFrame *frame_dec; +}; + +static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){ + int x, y, i; + + for(i=0; i<3; i++){ + p->frame->data[i]= src[i]; + p->frame->linesize[i]= src_stride[i]; + } + + p->avctx_enc->me_cmp= + p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/; + p->frame->quality= p->qp*FF_QP2LAMBDA; + avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame); + p->frame_dec = p->avctx_enc->coded_frame; + + for(i=0; i<3; i++){ + int is_chroma= !!i; + int w= width >>is_chroma; + int h= height>>is_chroma; + int fils= p->frame_dec->linesize[i]; + int srcs= src_stride[i]; + + for(y=0; y<h; y++){ + if((y ^ p->parity) & 1){ + for(x=0; x<w; x++){ + if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this + uint8_t *filp= &p->frame_dec->data[i][x + y*fils]; + uint8_t *srcp= &src[i][x + y*srcs]; + int diff0= filp[-fils] - srcp[-srcs]; + int diff1= filp[+fils] - srcp[+srcs]; + int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1]) + +ABS(srcp[-srcs ] - srcp[+srcs ]) + +ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1; + int temp= filp[0]; + +#define CHECK(j)\ + { int score= ABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])\ + + ABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])\ + + ABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)]);\ + if(score < spatial_score){\ + spatial_score= score;\ + diff0= filp[-fils+(j)] - srcp[-srcs+(j)];\ + diff1= filp[+fils-(j)] - srcp[+srcs-(j)]; + + CHECK(-1) CHECK(-2) }} }} + CHECK( 1) CHECK( 2) }} }} +#if 0 + if((diff0 ^ diff1) > 0){ + int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0; + temp-= mindiff; + } +#elif 1 + if(diff0 + diff1 > 0) + temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2; + else + temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2; +#else + temp-= (diff0 + diff1)/2; +#endif +#if 1 + filp[0]= + dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp; +#else + dst[i][x + y*dst_stride[i]]= filp[0]; + filp[0]= temp > 255U ? ~(temp>>31) : temp; +#endif + }else + dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils]; + } + } + } + for(y=0; y<h; y++){ + if(!((y ^ p->parity) & 1)){ + for(x=0; x<w; x++){ +#if 1 + p->frame_dec->data[i][x + y*fils]= + dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs]; +#else + dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils]; + p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs]; +#endif + } + } + } + } + p->parity ^= 1; + +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int i; + AVCodec *enc= avcodec_find_encoder(AV_CODEC_ID_SNOW); + + for(i=0; i<3; i++){ + AVCodecContext *avctx_enc; + AVDictionary *opts = NULL; +#if 0 + int is_chroma= !!i; + int w= ((width + 31) & (~31))>>is_chroma; + int h= ((height + 31) & (~31))>>is_chroma; + + vf->priv->temp_stride[i]= w; + vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t)); + vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t)); +#endif + avctx_enc= + vf->priv->avctx_enc= avcodec_alloc_context3(enc); + avctx_enc->width = width; + avctx_enc->height = height; + avctx_enc->time_base= (AVRational){1,25}; // meaningless + avctx_enc->gop_size = 300; + avctx_enc->max_b_frames= 0; + avctx_enc->pix_fmt = AV_PIX_FMT_YUV420P; + avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY; + avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; + avctx_enc->global_quality= 1; + av_dict_set(&opts, "memc_only", "1", 0); + avctx_enc->me_cmp= + avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE; + avctx_enc->mb_cmp= FF_CMP_SSE; + + switch(vf->priv->mode){ + case 3: + avctx_enc->refs= 3; + case 2: + avctx_enc->me_method= ME_ITER; + case 1: + avctx_enc->flags |= CODEC_FLAG_4MV; + avctx_enc->dia_size=2; +// avctx_enc->mb_decision = MB_DECISION_RD; + case 0: + avctx_enc->flags |= CODEC_FLAG_QPEL; + } + + avcodec_open2(avctx_enc, enc, &opts); + av_dict_free(&opts); + + } + vf->priv->frame= avcodec_alloc_frame(); + + vf->priv->outbuf_size= width*height*10; + vf->priv->outbuf= malloc(vf->priv->outbuf_size); + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change +return; //caused problems, dunno why + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->width,mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + }else{ + dmpi=vf->dmpi; + } + + filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h); + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + +#if 0 + for(i=0; i<3; i++){ + free(vf->priv->temp[i]); + vf->priv->temp[i]= NULL; + free(vf->priv->src[i]); + vf->priv->src[i]= NULL; + } +#endif + if (vf->priv->avctx_enc) { + avcodec_close(vf->priv->avctx_enc); + av_freep(&vf->priv->avctx_enc); + } + + free(vf->priv->outbuf); + free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt){ + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_Y800: + case IMGFMT_Y8: + return ff_vf_next_query_format(vf,fmt); + } + return 0; +} + +static int vf_open(vf_instance_t *vf, char *args){ + + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + ff_init_avcodec(); + + vf->priv->mode=0; + vf->priv->parity= -1; + vf->priv->qp=1; + + if (args) sscanf(args, "%d:%d:%d", &vf->priv->mode, &vf->priv->parity, &vf->priv->qp); + + return 1; +} + +const vf_info_t ff_vf_info_mcdeint = { + "motion compensating deinterlacer", + "mcdeint", + "Michael Niedermayer", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_noise.c b/libavfilter/libmpcodecs/vf_noise.c new file mode 100644 index 0000000..3b946e9 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_noise.c @@ -0,0 +1,475 @@ +/* + * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#if HAVE_MALLOC_H +#include <malloc.h> +#endif + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libvo/fastmemcpy.h" +#include "libavutil/mem.h" +#include "libavutil/x86/asm.h" + +#define MAX_NOISE 4096 +#define MAX_SHIFT 1024 +#define MAX_RES (MAX_NOISE-MAX_SHIFT) + +//===========================================================================// + +static inline void lineNoise_C(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift); +static inline void lineNoiseAvg_C(uint8_t *dst, uint8_t *src, int len, int8_t **shift); + +static void (*lineNoise)(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift)= lineNoise_C; +static void (*lineNoiseAvg)(uint8_t *dst, uint8_t *src, int len, int8_t **shift)= lineNoiseAvg_C; + +typedef struct FilterParam{ + int strength; + int uniform; + int temporal; + int quality; + int averaged; + int pattern; + int shiftptr; + int8_t *noise; + int8_t *prev_shift[MAX_RES][3]; +}FilterParam; + +struct vf_priv_s { + FilterParam lumaParam; + FilterParam chromaParam; + unsigned int outfmt; +}; + +static int nonTempRandShift_init; +static int nonTempRandShift[MAX_RES]; + +static int patt[4] = { + -1,0,1,0 +}; + +#define RAND_N(range) ((int) ((double)range*rand()/(RAND_MAX+1.0))) +static int8_t *initNoise(FilterParam *fp){ + int strength= fp->strength; + int uniform= fp->uniform; + int averaged= fp->averaged; + int pattern= fp->pattern; + int8_t *noise= av_malloc(MAX_NOISE*sizeof(int8_t)); + int i, j; + + srand(123457); + + for(i=0,j=0; i<MAX_NOISE; i++,j++) + { + if(uniform) { + if (averaged) { + if (pattern) { + noise[i]= (RAND_N(strength) - strength/2)/6 + +patt[j%4]*strength*0.25/3; + } else { + noise[i]= (RAND_N(strength) - strength/2)/3; + } + } else { + if (pattern) { + noise[i]= (RAND_N(strength) - strength/2)/2 + + patt[j%4]*strength*0.25; + } else { + noise[i]= RAND_N(strength) - strength/2; + } + } + } else { + double x1, x2, w, y1; + do { + x1 = 2.0 * rand()/(float)RAND_MAX - 1.0; + x2 = 2.0 * rand()/(float)RAND_MAX - 1.0; + w = x1 * x1 + x2 * x2; + } while ( w >= 1.0 ); + + w = sqrt( (-2.0 * log( w ) ) / w ); + y1= x1 * w; + y1*= strength / sqrt(3.0); + if (pattern) { + y1 /= 2; + y1 += patt[j%4]*strength*0.35; + } + if (y1<-128) y1=-128; + else if(y1> 127) y1= 127; + if (averaged) y1 /= 3.0; + noise[i]= (int)y1; + } + if (RAND_N(6) == 0) j--; + } + + + for (i = 0; i < MAX_RES; i++) + for (j = 0; j < 3; j++) + fp->prev_shift[i][j] = noise + (rand()&(MAX_SHIFT-1)); + + if(!nonTempRandShift_init){ + for(i=0; i<MAX_RES; i++){ + nonTempRandShift[i]= rand()&(MAX_SHIFT-1); + } + nonTempRandShift_init = 1; + } + + fp->noise= noise; + fp->shiftptr= 0; + return noise; +} + +/***************************************************************************/ + +#if HAVE_MMX +static inline void lineNoise_MMX(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){ + x86_reg mmx_len= len&(~7); + noise+=shift; + + __asm__ volatile( + "mov %3, %%"REG_a" \n\t" + "pcmpeqb %%mm7, %%mm7 \n\t" + "psllw $15, %%mm7 \n\t" + "packsswb %%mm7, %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + "movq (%0, %%"REG_a"), %%mm0 \n\t" + "movq (%1, %%"REG_a"), %%mm1 \n\t" + "pxor %%mm7, %%mm0 \n\t" + "paddsb %%mm1, %%mm0 \n\t" + "pxor %%mm7, %%mm0 \n\t" + "movq %%mm0, (%2, %%"REG_a") \n\t" + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len) + : "%"REG_a + ); + if(mmx_len!=len) + lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0); +} +#endif + +//duplicate of previous except movntq +#if HAVE_MMX2 +static inline void lineNoise_MMX2(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){ + x86_reg mmx_len= len&(~7); + noise+=shift; + + __asm__ volatile( + "mov %3, %%"REG_a" \n\t" + "pcmpeqb %%mm7, %%mm7 \n\t" + "psllw $15, %%mm7 \n\t" + "packsswb %%mm7, %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + "movq (%0, %%"REG_a"), %%mm0 \n\t" + "movq (%1, %%"REG_a"), %%mm1 \n\t" + "pxor %%mm7, %%mm0 \n\t" + "paddsb %%mm1, %%mm0 \n\t" + "pxor %%mm7, %%mm0 \n\t" + "movntq %%mm0, (%2, %%"REG_a") \n\t" + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len) + : "%"REG_a + ); + if(mmx_len!=len) + lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0); +} +#endif + +static inline void lineNoise_C(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){ + int i; + noise+= shift; + for(i=0; i<len; i++) + { + int v= src[i]+ noise[i]; + if(v>255) dst[i]=255; //FIXME optimize + else if(v<0) dst[i]=0; + else dst[i]=v; + } +} + +/***************************************************************************/ + +#if HAVE_MMX +static inline void lineNoiseAvg_MMX(uint8_t *dst, uint8_t *src, int len, int8_t **shift){ + x86_reg mmx_len= len&(~7); + + __asm__ volatile( + "mov %5, %%"REG_a" \n\t" + ASMALIGN(4) + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm1 \n\t" + "movq (%0, %%"REG_a"), %%mm0 \n\t" + "paddb (%2, %%"REG_a"), %%mm1 \n\t" + "paddb (%3, %%"REG_a"), %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + "punpcklbw %%mm0, %%mm0 \n\t" + "punpckhbw %%mm2, %%mm2 \n\t" + "punpcklbw %%mm1, %%mm1 \n\t" + "punpckhbw %%mm3, %%mm3 \n\t" + "pmulhw %%mm0, %%mm1 \n\t" + "pmulhw %%mm2, %%mm3 \n\t" + "paddw %%mm1, %%mm1 \n\t" + "paddw %%mm3, %%mm3 \n\t" + "paddw %%mm0, %%mm1 \n\t" + "paddw %%mm2, %%mm3 \n\t" + "psrlw $8, %%mm1 \n\t" + "psrlw $8, %%mm3 \n\t" + "packuswb %%mm3, %%mm1 \n\t" + "movq %%mm1, (%4, %%"REG_a") \n\t" + "add $8, %%"REG_a" \n\t" + " js 1b \n\t" + :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len), + "r" (dst+mmx_len), "g" (-mmx_len) + : "%"REG_a + ); + + if(mmx_len!=len){ + int8_t *shift2[3]={shift[0]+mmx_len, shift[1]+mmx_len, shift[2]+mmx_len}; + lineNoiseAvg_C(dst+mmx_len, src+mmx_len, len-mmx_len, shift2); + } +} +#endif + +static inline void lineNoiseAvg_C(uint8_t *dst, uint8_t *src, int len, int8_t **shift){ + int i; + int8_t *src2= (int8_t*)src; + + for(i=0; i<len; i++) + { + const int n= shift[0][i] + shift[1][i] + shift[2][i]; + dst[i]= src2[i]+((n*src2[i])>>7); + } +} + +/***************************************************************************/ + +static void noise(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int width, int height, FilterParam *fp){ + int8_t *noise= fp->noise; + int y; + int shift=0; + + if(!noise) + { + if(src==dst) return; + + if(dstStride==srcStride) fast_memcpy(dst, src, srcStride*height); + else + { + for(y=0; y<height; y++) + { + fast_memcpy(dst, src, width); + dst+= dstStride; + src+= srcStride; + } + } + return; + } + + for(y=0; y<height; y++) + { + if(fp->temporal) shift= rand()&(MAX_SHIFT -1); + else shift= nonTempRandShift[y]; + + if(fp->quality==0) shift&= ~7; + if (fp->averaged) { + lineNoiseAvg(dst, src, width, fp->prev_shift[y]); + fp->prev_shift[y][fp->shiftptr] = noise + shift; + } else { + lineNoise(dst, src, noise, width, shift); + } + dst+= dstStride; + src+= srcStride; + } + fp->shiftptr++; + if (fp->shiftptr == 3) fp->shiftptr = 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + if(mpi->imgfmt!=vf->priv->outfmt) return; // colorspace differ + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags, mpi->w, mpi->h); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + vf->dmpi=ff_vf_get_image(vf->next,vf->priv->outfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w,mpi->h); +//printf("nodr\n"); + } +//else printf("dr\n"); + dmpi= vf->dmpi; + + noise(dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, &vf->priv->lumaParam); + noise(dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w/2, mpi->h/2, &vf->priv->chromaParam); + noise(dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w/2, mpi->h/2, &vf->priv->chromaParam); + + ff_vf_clone_mpi_attributes(dmpi, mpi); + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t"); +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t"); +#endif + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + + av_free(vf->priv->chromaParam.noise); + vf->priv->chromaParam.noise= NULL; + + av_free(vf->priv->lumaParam.noise); + vf->priv->lumaParam.noise= NULL; + + free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// + +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt) + { + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + return ff_vf_next_query_format(vf,vf->priv->outfmt); + } + return 0; +} + +static void parse(FilterParam *fp, char* args){ + char *pos; + char *max= strchr(args, ':'); + + if(!max) max= args + strlen(args); + + fp->strength= atoi(args); + pos= strchr(args, 'u'); + if(pos && pos<max) fp->uniform=1; + pos= strchr(args, 't'); + if(pos && pos<max) fp->temporal=1; + pos= strchr(args, 'h'); + if(pos && pos<max) fp->quality=1; + pos= strchr(args, 'p'); + if(pos && pos<max) fp->pattern=1; + pos= strchr(args, 'a'); + if(pos && pos<max) { + fp->temporal=1; + fp->averaged=1; + } + + if(fp->strength) initNoise(fp); +} + +static const unsigned int fmt_list[]={ + IMGFMT_YV12, + IMGFMT_I420, + IMGFMT_IYUV, + 0 +}; + +static int vf_open(vf_instance_t *vf, char *args){ + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + if(args) + { + char *arg2= strchr(args,':'); + if(arg2) parse(&vf->priv->chromaParam, arg2+1); + parse(&vf->priv->lumaParam, args); + } + + // check csp: + vf->priv->outfmt=ff_vf_match_csp(&vf->next,fmt_list,IMGFMT_YV12); + if(!vf->priv->outfmt) + { + uninit(vf); + return 0; // no csp match :( + } + + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX){ + lineNoise= lineNoise_MMX; + lineNoiseAvg= lineNoiseAvg_MMX; + } +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) lineNoise= lineNoise_MMX2; +// if(ff_gCpuCaps.hasMMX) lineNoiseAvg= lineNoiseAvg_MMX2; +#endif + + return 1; +} + +const vf_info_t ff_vf_info_noise = { + "noise generator", + "noise", + "Michael Niedermayer", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_ow.c b/libavfilter/libmpcodecs/vf_ow.c new file mode 100644 index 0000000..69b07ef --- /dev/null +++ b/libavfilter/libmpcodecs/vf_ow.c @@ -0,0 +1,322 @@ +/* + * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @todo try to change to int + * @todo try lifting based implementation + * @todo optimize optimize optimize + * @todo hard tresholding + * @todo use QP to decide filter strength + * @todo wavelet normalization / least squares optimal signal vs. noise thresholds + */ + +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "mp_msg.h" +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +//===========================================================================// +static const uint8_t __attribute__((aligned(8))) dither[8][8]={ +{ 0, 48, 12, 60, 3, 51, 15, 63, }, +{ 32, 16, 44, 28, 35, 19, 47, 31, }, +{ 8, 56, 4, 52, 11, 59, 7, 55, }, +{ 40, 24, 36, 20, 43, 27, 39, 23, }, +{ 2, 50, 14, 62, 1, 49, 13, 61, }, +{ 34, 18, 46, 30, 33, 17, 45, 29, }, +{ 10, 58, 6, 54, 9, 57, 5, 53, }, +{ 42, 26, 38, 22, 41, 25, 37, 21, }, +}; +//FIXME the above is duplicated in many filters + +struct vf_priv_s { + float strength[2]; + float delta; + int mode; + int depth; + float *plane[16][4]; + int stride; +}; + +#define S 1.41421356237 //sqrt(2) + +static const double coeff[2][5]={ + { + 0.6029490182363579 *S, + 0.2668641184428723 *S, + -0.07822326652898785 *S, + -0.01686411844287495 *S, + 0.02674875741080976 *S + },{ + 1.115087052456994 /S, + -0.5912717631142470 /S, + -0.05754352622849957 /S, + 0.09127176311424948 /S + } +}; + +static const double icoeff[2][5]={ + { + 1.115087052456994 /S, + 0.5912717631142470 /S, + -0.05754352622849957 /S, + -0.09127176311424948 /S + },{ + 0.6029490182363579 *S, + -0.2668641184428723 *S, + -0.07822326652898785 *S, + 0.01686411844287495 *S, + 0.02674875741080976 *S + } +}; +#undef S + +static inline int mirror(int x, int w){ + while((unsigned)x > (unsigned)w){ + x=-x; + if(x<0) x+= 2*w; + } + return x; +} + +static inline void decompose(float *dstL, float *dstH, float *src, int stride, int w){ + int x, i; + for(x=0; x<w; x++){ + double sumL= src[x*stride] * coeff[0][0]; + double sumH= src[x*stride] * coeff[1][0]; + for(i=1; i<=4; i++){ + double s= (src[mirror(x-i, w-1)*stride] + src[mirror(x+i, w-1)*stride]); + + sumL+= coeff[0][i]*s; + sumH+= coeff[1][i]*s; + } + dstL[x*stride]= sumL; + dstH[x*stride]= sumH; + } +} + +static inline void compose(float *dst, float *srcL, float *srcH, int stride, int w){ + int x, i; + for(x=0; x<w; x++){ + double sumL= srcL[x*stride] * icoeff[0][0]; + double sumH= srcH[x*stride] * icoeff[1][0]; + for(i=1; i<=4; i++){ + int x0= mirror(x-i, w-1)*stride; + int x1= mirror(x+i, w-1)*stride; + + sumL+= icoeff[0][i]*(srcL[x0] + srcL[x1]); + sumH+= icoeff[1][i]*(srcH[x0] + srcH[x1]); + } + dst[x*stride]= (sumL + sumH)*0.5; + } +} + +static inline void decompose2D(float *dstL, float *dstH, float *src, int xstride, int ystride, int step, int w, int h){ + int y, x; + for(y=0; y<h; y++) + for(x=0; x<step; x++) + decompose(dstL + ystride*y + xstride*x, dstH + ystride*y + xstride*x, src + ystride*y + xstride*x, step*xstride, (w-x+step-1)/step); +} + +static inline void compose2D(float *dst, float *srcL, float *srcH, int xstride, int ystride, int step, int w, int h){ + int y, x; + for(y=0; y<h; y++) + for(x=0; x<step; x++) + compose(dst + ystride*y + xstride*x, srcL + ystride*y + xstride*x, srcH + ystride*y + xstride*x, step*xstride, (w-x+step-1)/step); +} + +static void decompose2D2(float *dst[4], float *src, float *temp[2], int stride, int step, int w, int h){ + decompose2D(temp[0], temp[1], src , 1, stride, step , w, h); + decompose2D( dst[0], dst[1], temp[0], stride, 1, step , h, w); + decompose2D( dst[2], dst[3], temp[1], stride, 1, step , h, w); +} + +static void compose2D2(float *dst, float *src[4], float *temp[2], int stride, int step, int w, int h){ + compose2D(temp[0], src[0], src[1], stride, 1, step , h, w); + compose2D(temp[1], src[2], src[3], stride, 1, step , h, w); + compose2D(dst , temp[0], temp[1], 1, stride, step , w, h); +} + +static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, int is_luma){ + int x,y, i, j; +// double sum=0; + double s= p->strength[!is_luma]; + int depth= p->depth; + + while(1<<depth > width || 1<<depth > height) + depth--; + + for(y=0; y<height; y++) + for(x=0; x<width; x++) + p->plane[0][0][x + y*p->stride]= src[x + y*src_stride]; + + for(i=0; i<depth; i++){ + decompose2D2(p->plane[i+1], p->plane[i][0], p->plane[0]+1,p->stride, 1<<i, width, height); + } + for(i=0; i<depth; i++){ + for(j=1; j<4; j++){ + for(y=0; y<height; y++){ + for(x=0; x<width; x++){ + double v= p->plane[i+1][j][x + y*p->stride]; + if (v> s) v-=s; + else if(v<-s) v+=s; + else v =0; + p->plane[i+1][j][x + y*p->stride]= v; + } + } + } + } + for(i=depth-1; i>=0; i--){ + compose2D2(p->plane[i][0], p->plane[i+1], p->plane[0]+1, p->stride, 1<<i, width, height); + } + + for(y=0; y<height; y++) + for(x=0; x<width; x++){ + i= p->plane[0][0][x + y*p->stride] + dither[x&7][y&7]*(1.0/64) + 1.0/128; //yes the rounding is insane but optimal :) +// double e= i - src[x + y*src_stride]; +// sum += e*e; + if((unsigned)i > 255U) i= ~(i>>31); + dst[x + y*dst_stride]= i; + } + +// printf("%f\n", sum/height/width); +} + +static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt){ + int h= (height+15)&(~15); + int i,j; + + vf->priv->stride= (width+15)&(~15); + for(j=0; j<4; j++){ + for(i=0; i<=vf->priv->depth; i++) + vf->priv->plane[i][j]= malloc(vf->priv->stride*h*sizeof(vf->priv->plane[0][0][0])); + } + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->width,mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + }else{ + dmpi=vf->dmpi; + } + + filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, 1); + filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, 0); + filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, 0); + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + int i,j; + if(!vf->priv) return; + + for(j=0; j<4; j++){ + for(i=0; i<16; i++){ + free(vf->priv->plane[i][j]); + vf->priv->plane[i][j]= NULL; + } + } + + free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt){ + case IMGFMT_YVU9: + case IMGFMT_IF09: + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_CLPL: + case IMGFMT_Y800: + case IMGFMT_Y8: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf,fmt); + } + return 0; +} + + +static int vf_open(vf_instance_t *vf, char *args){ + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + vf->priv->depth= 8; + vf->priv->strength[0]= 1.0; + vf->priv->strength[1]= 1.0; + vf->priv->delta= 1.0; + + if (args) sscanf(args, "%d:%f:%f:%d:%f", &vf->priv->depth, + &vf->priv->strength[0], + &vf->priv->strength[1], + &vf->priv->mode, + &vf->priv->delta); + + return 1; +} + +const vf_info_t ff_vf_info_ow = { + "overcomplete wavelet denoiser", + "ow", + "Michael Niedermayer", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_perspective.c b/libavfilter/libmpcodecs/vf_perspective.c new file mode 100644 index 0000000..aed5c4d --- /dev/null +++ b/libavfilter/libmpcodecs/vf_perspective.c @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> +#include <math.h> + +#include "config.h" +#include "mp_msg.h" + +#if HAVE_MALLOC_H +#include <malloc.h> +#endif + +#include "libavutil/mem.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#define SUB_PIXEL_BITS 8 +#define SUB_PIXELS (1<<SUB_PIXEL_BITS) +#define COEFF_BITS 11 + +//===========================================================================// + +struct vf_priv_s { + double ref[4][2]; + int32_t coeff[1<<SUB_PIXEL_BITS][4]; + int32_t (*pv)[2]; + int pvStride; + int cubic; +}; + + +/***************************************************************************/ + +static void initPv(struct vf_priv_s *priv, int W, int H){ + double a,b,c,d,e,f,g,h,D; + double (*ref)[2]= priv->ref; + int x,y; + + g= ( (ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0])*(ref[2][1] - ref[3][1]) + - (ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1])*(ref[2][0] - ref[3][0]))*H; + h= ( (ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1])*(ref[1][0] - ref[3][0]) + - (ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0])*(ref[1][1] - ref[3][1]))*W; + D= (ref[1][0] - ref[3][0])*(ref[2][1] - ref[3][1]) + - (ref[2][0] - ref[3][0])*(ref[1][1] - ref[3][1]); + + a= D*(ref[1][0] - ref[0][0])*H + g*ref[1][0]; + b= D*(ref[2][0] - ref[0][0])*W + h*ref[2][0]; + c= D*ref[0][0]*W*H; + d= D*(ref[1][1] - ref[0][1])*H + g*ref[1][1]; + e= D*(ref[2][1] - ref[0][1])*W + h*ref[2][1]; + f= D*ref[0][1]*W*H; + + for(y=0; y<H; y++){ + for(x=0; x<W; x++){ + int u, v; + + u= (int)floor( SUB_PIXELS*(a*x + b*y + c)/(g*x + h*y + D*W*H) + 0.5); + v= (int)floor( SUB_PIXELS*(d*x + e*y + f)/(g*x + h*y + D*W*H) + 0.5); + + priv->pv[x + y*W][0]= u; + priv->pv[x + y*W][1]= v; + } + } +} + +static double getCoeff(double d){ + double A= -0.60; + double coeff; + + d= fabs(d); + + // Equation is from VirtualDub + if(d<1.0) + coeff = (1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d); + else if(d<2.0) + coeff = (-4.0*A + 8.0*A*d - 5.0*A*d*d + A*d*d*d); + else + coeff=0.0; + + return coeff; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int i, j; + + vf->priv->pvStride= width; + vf->priv->pv= av_malloc(width*height*2*sizeof(int32_t)); + initPv(vf->priv, width, height); + + for(i=0; i<SUB_PIXELS; i++){ + double d= i/(double)SUB_PIXELS; + double temp[4]; + double sum=0; + + for(j=0; j<4; j++) + temp[j]= getCoeff(j - d - 1); + + for(j=0; j<4; j++) + sum+= temp[j]; + + for(j=0; j<4; j++) + vf->priv->coeff[i][j]= (int)floor((1<<COEFF_BITS)*temp[j]/sum + 0.5); + } + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + + av_free(vf->priv->pv); + vf->priv->pv= NULL; + + free(vf->priv); + vf->priv=NULL; +} + +static inline void resampleCubic(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, struct vf_priv_s *privParam, int xShift, int yShift){ + int x, y; + struct vf_priv_s priv= *privParam; + + for(y=0; y<h; y++){ + for(x=0; x<w; x++){ + int u, v, subU, subV, sum, sx, sy; + + sx= x << xShift; + sy= y << yShift; + u= priv.pv[sx + sy*priv.pvStride][0]>>xShift; + v= priv.pv[sx + sy*priv.pvStride][1]>>yShift; + subU= u & (SUB_PIXELS-1); + subV= v & (SUB_PIXELS-1); + u >>= SUB_PIXEL_BITS; + v >>= SUB_PIXEL_BITS; + + if(u>0 && v>0 && u<w-2 && v<h-2){ + const int index= u + v*srcStride; + const int a= priv.coeff[subU][0]; + const int b= priv.coeff[subU][1]; + const int c= priv.coeff[subU][2]; + const int d= priv.coeff[subU][3]; + + sum= + priv.coeff[subV][0]*( a*src[index - 1 - srcStride] + b*src[index - 0 - srcStride] + + c*src[index + 1 - srcStride] + d*src[index + 2 - srcStride]) + +priv.coeff[subV][1]*( a*src[index - 1 ] + b*src[index - 0 ] + + c*src[index + 1 ] + d*src[index + 2 ]) + +priv.coeff[subV][2]*( a*src[index - 1 + srcStride] + b*src[index - 0 + srcStride] + + c*src[index + 1 + srcStride] + d*src[index + 2 + srcStride]) + +priv.coeff[subV][3]*( a*src[index - 1+2*srcStride] + b*src[index - 0+2*srcStride] + + c*src[index + 1+2*srcStride] + d*src[index + 2+2*srcStride]); + }else{ + int dx, dy; + sum=0; + + for(dy=0; dy<4; dy++){ + int iy= v + dy - 1; + if (iy< 0) iy=0; + else if(iy>=h) iy=h-1; + for(dx=0; dx<4; dx++){ + int ix= u + dx - 1; + if (ix< 0) ix=0; + else if(ix>=w) ix=w-1; + + sum+= priv.coeff[subU][dx]*priv.coeff[subV][dy] + *src[ ix + iy*srcStride]; + } + } + } + sum= (sum + (1<<(COEFF_BITS*2-1)) ) >> (COEFF_BITS*2); + if(sum&~255){ + if(sum<0) sum=0; + else sum=255; + } + dst[ x + y*dstStride]= sum; + } + } +} + +static inline void resampleLinear(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, + struct vf_priv_s *privParam, int xShift, int yShift){ + int x, y; + struct vf_priv_s priv= *privParam; + + for(y=0; y<h; y++){ + for(x=0; x<w; x++){ + int u, v, subU, subV, sum, sx, sy, index, subUI, subVI; + + sx= x << xShift; + sy= y << yShift; + u= priv.pv[sx + sy*priv.pvStride][0]>>xShift; + v= priv.pv[sx + sy*priv.pvStride][1]>>yShift; + subU= u & (SUB_PIXELS-1); + subV= v & (SUB_PIXELS-1); + u >>= SUB_PIXEL_BITS; + v >>= SUB_PIXEL_BITS; + index= u + v*srcStride; + subUI= SUB_PIXELS - subU; + subVI= SUB_PIXELS - subV; + + if((unsigned)u < (unsigned)(w - 1)){ + if((unsigned)v < (unsigned)(h - 1)){ + sum= subVI*(subUI*src[index ] + subU*src[index +1]) + +subV *(subUI*src[index+srcStride] + subU*src[index+srcStride+1]); + sum= (sum + (1<<(SUB_PIXEL_BITS*2-1)) ) >> (SUB_PIXEL_BITS*2); + }else{ + if(v<0) v= 0; + else v= h-1; + index= u + v*srcStride; + sum= subUI*src[index] + subU*src[index+1]; + sum= (sum + (1<<(SUB_PIXEL_BITS-1)) ) >> SUB_PIXEL_BITS; + } + }else{ + if((unsigned)v < (unsigned)(h - 1)){ + if(u<0) u= 0; + else u= w-1; + index= u + v*srcStride; + sum= subVI*src[index] + subV*src[index+srcStride]; + sum= (sum + (1<<(SUB_PIXEL_BITS-1)) ) >> SUB_PIXEL_BITS; + }else{ + if(u<0) u= 0; + else u= w-1; + if(v<0) v= 0; + else v= h-1; + index= u + v*srcStride; + sum= src[index]; + } + } + if(sum&~255){ + if(sum<0) sum=0; + else sum=255; + } + dst[ x + y*dstStride]= sum; + } + } +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + int cw= mpi->w >> mpi->chroma_x_shift; + int ch= mpi->h >> mpi->chroma_y_shift; + + mp_image_t *dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w,mpi->h); + + assert(mpi->flags&MP_IMGFLAG_PLANAR); + + if(vf->priv->cubic){ + resampleCubic(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, dmpi->stride[0], mpi->stride[0], + vf->priv, 0, 0); + resampleCubic(dmpi->planes[1], mpi->planes[1], cw , ch , dmpi->stride[1], mpi->stride[1], + vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift); + resampleCubic(dmpi->planes[2], mpi->planes[2], cw , ch , dmpi->stride[2], mpi->stride[2], + vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift); + }else{ + resampleLinear(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, dmpi->stride[0], mpi->stride[0], + vf->priv, 0, 0); + resampleLinear(dmpi->planes[1], mpi->planes[1], cw , ch , dmpi->stride[1], mpi->stride[1], + vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift); + resampleLinear(dmpi->planes[2], mpi->planes[2], cw , ch , dmpi->stride[2], mpi->stride[2], + vf->priv, mpi->chroma_x_shift, mpi->chroma_y_shift); + } + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +//===========================================================================// + +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt) + { + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_YVU9: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int vf_open(vf_instance_t *vf, char *args){ + int e; + + vf->config=config; + vf->put_image=put_image; +// vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + if(args==NULL) return 0; + + e=sscanf(args, "%lf:%lf:%lf:%lf:%lf:%lf:%lf:%lf:%d", + &vf->priv->ref[0][0], &vf->priv->ref[0][1], + &vf->priv->ref[1][0], &vf->priv->ref[1][1], + &vf->priv->ref[2][0], &vf->priv->ref[2][1], + &vf->priv->ref[3][0], &vf->priv->ref[3][1], + &vf->priv->cubic + ); + + if(e!=9) + return 0; + + return 1; +} + +const vf_info_t ff_vf_info_perspective = { + "perspective correcture", + "perspective", + "Michael Niedermayer", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_phase.c b/libavfilter/libmpcodecs/vf_phase.c new file mode 100644 index 0000000..25abc5b --- /dev/null +++ b/libavfilter/libmpcodecs/vf_phase.c @@ -0,0 +1,303 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <limits.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +enum mode { PROGRESSIVE, TOP_FIRST, BOTTOM_FIRST, + TOP_FIRST_ANALYZE, BOTTOM_FIRST_ANALYZE, + ANALYZE, FULL_ANALYZE, AUTO, AUTO_ANALYZE }; + +#define fixed_mode(p) ((p)<=BOTTOM_FIRST) + +struct vf_priv_s + { + enum mode mode; + int verbose; + unsigned char *buf[3]; + }; + +/* + * Copy fields from either current or buffered previous frame to the + * output and store the current frame unmodified to the buffer. + */ + +static void do_plane(unsigned char *to, unsigned char *from, + int w, int h, int ts, int fs, + unsigned char **bufp, enum mode mode) + { + unsigned char *buf, *end; + int top; + + if(!*bufp) + { + mode=PROGRESSIVE; + if(!(*bufp=malloc(h*w))) return; + } + + for(end=to+h*ts, buf=*bufp, top=1; to<end; from+=fs, to+=ts, buf+=w, top^=1) + { + fast_memcpy(to, mode==(top?BOTTOM_FIRST:TOP_FIRST)?buf:from, w); + fast_memcpy(buf, from, w); + } + } + +/* + * This macro interpolates the value of both fields at a point halfway + * between lines and takes the squared difference. In field resolution + * the point is a quarter pixel below a line in one field and a quarter + * pixel above a line in other. + * + * (the result is actually multiplied by 25) + */ + +#define diff(a, as, b, bs) (t=((*a-b[bs])<<2)+a[as<<1]-b[-bs], t*t) + +/* + * Find which field combination has the smallest average squared difference + * between the fields. + */ + +static enum mode analyze_plane(unsigned char *old, unsigned char *new, + int w, int h, int os, int ns, enum mode mode, + int verbose, int fields) + { + double bdiff, pdiff, tdiff, scale; + int bdif, tdif, pdif; + int top, t; + unsigned char *end, *rend; + + if(mode==AUTO) + mode=fields&MP_IMGFIELD_ORDERED?fields&MP_IMGFIELD_TOP_FIRST? + TOP_FIRST:BOTTOM_FIRST:PROGRESSIVE; + else if(mode==AUTO_ANALYZE) + mode=fields&MP_IMGFIELD_ORDERED?fields&MP_IMGFIELD_TOP_FIRST? + TOP_FIRST_ANALYZE:BOTTOM_FIRST_ANALYZE:FULL_ANALYZE; + + if(fixed_mode(mode)) + bdiff=pdiff=tdiff=65536.0; + else + { + bdiff=pdiff=tdiff=0.0; + + for(end=new+(h-2)*ns, new+=ns, old+=os, top=0; + new<end; new+=ns-w, old+=os-w, top^=1) + { + pdif=tdif=bdif=0; + + switch(mode) + { + case TOP_FIRST_ANALYZE: + if(top) + for(rend=new+w; new<rend; new++, old++) + pdif+=diff(new, ns, new, ns), + tdif+=diff(new, ns, old, os); + else + for(rend=new+w; new<rend; new++, old++) + pdif+=diff(new, ns, new, ns), + tdif+=diff(old, os, new, ns); + break; + + case BOTTOM_FIRST_ANALYZE: + if(top) + for(rend=new+w; new<rend; new++, old++) + pdif+=diff(new, ns, new, ns), + bdif+=diff(old, os, new, ns); + else + for(rend=new+w; new<rend; new++, old++) + pdif+=diff(new, ns, new, ns), + bdif+=diff(new, ns, old, os); + break; + + case ANALYZE: + if(top) + for(rend=new+w; new<rend; new++, old++) + tdif+=diff(new, ns, old, os), + bdif+=diff(old, os, new, ns); + else + for(rend=new+w; new<rend; new++, old++) + bdif+=diff(new, ns, old, os), + tdif+=diff(old, os, new, ns); + break; + + default: /* FULL_ANALYZE */ + if(top) + for(rend=new+w; new<rend; new++, old++) + pdif+=diff(new, ns, new, ns), + tdif+=diff(new, ns, old, os), + bdif+=diff(old, os, new, ns); + else + for(rend=new+w; new<rend; new++, old++) + pdif+=diff(new, ns, new, ns), + bdif+=diff(new, ns, old, os), + tdif+=diff(old, os, new, ns); + } + + pdiff+=(double)pdif; + tdiff+=(double)tdif; + bdiff+=(double)bdif; + } + + scale=1.0/(w*(h-3))/25.0; + pdiff*=scale; + tdiff*=scale; + bdiff*=scale; + + if(mode==TOP_FIRST_ANALYZE) + bdiff=65536.0; + else if(mode==BOTTOM_FIRST_ANALYZE) + tdiff=65536.0; + else if(mode==ANALYZE) + pdiff=65536.0; + + if(bdiff<pdiff && bdiff<tdiff) + mode=BOTTOM_FIRST; + else if(tdiff<pdiff && tdiff<bdiff) + mode=TOP_FIRST; + else + mode=PROGRESSIVE; + } + + if( ff_mp_msg_test(MSGT_VFILTER,MSGL_V) ) + { + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "%c", mode==BOTTOM_FIRST?'b':mode==TOP_FIRST?'t':'p'); + if(tdiff==65536.0) ff_mp_msg(MSGT_VFILTER, MSGL_INFO," N/A "); else ff_mp_msg(MSGT_VFILTER, MSGL_INFO," %8.2f", tdiff); + if(bdiff==65536.0) ff_mp_msg(MSGT_VFILTER, MSGL_INFO," N/A "); else ff_mp_msg(MSGT_VFILTER, MSGL_INFO," %8.2f", bdiff); + if(pdiff==65536.0) ff_mp_msg(MSGT_VFILTER, MSGL_INFO," N/A "); else ff_mp_msg(MSGT_VFILTER, MSGL_INFO," %8.2f", pdiff); + ff_mp_msg(MSGT_VFILTER, MSGL_INFO," \n"); + } + + return mode; + } + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) + { + mp_image_t *dmpi; + int w; + enum mode mode; + + if(!(dmpi=ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w, mpi->h))) + return 0; + + w=dmpi->w; + if(!(dmpi->flags&MP_IMGFLAG_PLANAR)) + w*=dmpi->bpp/8; + + mode=vf->priv->mode; + + if(!vf->priv->buf[0]) + mode=PROGRESSIVE; + else + mode=analyze_plane(vf->priv->buf[0], mpi->planes[0], + w, dmpi->h, w, mpi->stride[0], mode, + vf->priv->verbose, mpi->fields); + + do_plane(dmpi->planes[0], mpi->planes[0], + w, dmpi->h, + dmpi->stride[0], mpi->stride[0], + &vf->priv->buf[0], mode); + + if(dmpi->flags&MP_IMGFLAG_PLANAR) + { + do_plane(dmpi->planes[1], mpi->planes[1], + dmpi->chroma_width, dmpi->chroma_height, + dmpi->stride[1], mpi->stride[1], + &vf->priv->buf[1], mode); + do_plane(dmpi->planes[2], mpi->planes[2], + dmpi->chroma_width, dmpi->chroma_height, + dmpi->stride[2], mpi->stride[2], + &vf->priv->buf[2], mode); + } + + return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + } + +static void uninit(struct vf_instance *vf) + { + if (!vf->priv) + return; + free(vf->priv->buf[0]); + free(vf->priv->buf[1]); + free(vf->priv->buf[2]); + free(vf->priv); + } + +static int vf_open(vf_instance_t *vf, char *args) + { + vf->put_image = put_image; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + + if(!(vf->priv = calloc(1, sizeof(struct vf_priv_s)))) + { + uninit(vf); + return 0; + } + + vf->priv->mode=AUTO_ANALYZE; + vf->priv->verbose=0; + + while(args && *args) + { + switch(*args) + { + case 't': vf->priv->mode=TOP_FIRST; break; + case 'a': vf->priv->mode=AUTO; break; + case 'b': vf->priv->mode=BOTTOM_FIRST; break; + case 'u': vf->priv->mode=ANALYZE; break; + case 'T': vf->priv->mode=TOP_FIRST_ANALYZE; break; + case 'A': vf->priv->mode=AUTO_ANALYZE; break; + case 'B': vf->priv->mode=BOTTOM_FIRST_ANALYZE; break; + case 'U': vf->priv->mode=FULL_ANALYZE; break; + case 'p': vf->priv->mode=PROGRESSIVE; break; + case 'v': vf->priv->verbose=1; break; + case ':': break; + + default: + uninit(vf); + return 0; /* bad args */ + } + + if( (args=strchr(args, ':')) ) args++; + } + + return 1; + } + +const vf_info_t ff_vf_info_phase = + { + "phase shift fields", + "phase", + "Ville Saari", + "", + vf_open, + NULL + }; diff --git a/libavfilter/libmpcodecs/vf_pp7.c b/libavfilter/libmpcodecs/vf_pp7.c new file mode 100644 index 0000000..30f9530 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_pp7.c @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "config.h" + +#include "mp_msg.h" +#include "cpudetect.h" + +#if HAVE_MALLOC_H +#include <malloc.h> +#endif + +#include "libavutil/mem.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libvo/fastmemcpy.h" + +#define XMIN(a,b) ((a) < (b) ? (a) : (b)) +#define XMAX(a,b) ((a) > (b) ? (a) : (b)) + +//===========================================================================// +static const uint8_t __attribute__((aligned(8))) dither[8][8]={ +{ 0, 48, 12, 60, 3, 51, 15, 63, }, +{ 32, 16, 44, 28, 35, 19, 47, 31, }, +{ 8, 56, 4, 52, 11, 59, 7, 55, }, +{ 40, 24, 36, 20, 43, 27, 39, 23, }, +{ 2, 50, 14, 62, 1, 49, 13, 61, }, +{ 34, 18, 46, 30, 33, 17, 45, 29, }, +{ 10, 58, 6, 54, 9, 57, 5, 53, }, +{ 42, 26, 38, 22, 41, 25, 37, 21, }, +}; + +struct vf_priv_s { + int qp; + int mode; + int mpeg2; + int temp_stride; + uint8_t *src; +}; +#if 0 +static inline void dct7_c(int16_t *dst, int s0, int s1, int s2, int s3, int step){ + int s, d; + int dst2[64]; +//#define S0 (1024/0.37796447300922719759) +#define C0 ((int)(1024*0.37796447300922719759+0.5)) //sqrt(1/7) +#define C1 ((int)(1024*0.53452248382484879308/6+0.5)) //sqrt(2/7)/6 + +#define C2 ((int)(1024*0.45221175985034745004/2+0.5)) +#define C3 ((int)(1024*0.36264567479870879474/2+0.5)) + +//0.1962505182412941918 0.0149276808419397944-0.2111781990832339584 +#define C4 ((int)(1024*0.1962505182412941918+0.5)) +#define C5 ((int)(1024*0.0149276808419397944+0.5)) +//#define C6 ((int)(1024*0.2111781990832339584+0.5)) +#if 0 + s= s0 + s1 + s2; + dst[0*step] = ((s + s3)*C0 + 512) >> 10; + s= (s - 6*s3)*C1 + 512; + d= (s0-s2)*C4 + (s1-s2)*C5; + dst[1*step] = (s + 2*d)>>10; + s -= d; + d= (s1-s0)*C2 + (s1-s2)*C3; + dst[2*step] = (s + d)>>10; + dst[3*step] = (s - d)>>10; +#elif 1 + s = s3+s3; + s3= s-s0; + s0= s+s0; + s = s2+s1; + s2= s2-s1; + dst[0*step]= s0 + s; + dst[2*step]= s0 - s; + dst[1*step]= 2*s3 + s2; + dst[3*step]= s3 - 2*s2; +#else + int i,j,n=7; + for(i=0; i<7; i+=2){ + dst2[i*step/2]= 0; + for(j=0; j<4; j++) + dst2[i*step/2] += src[j*step] * cos(i*M_PI/n*(j+0.5)) * sqrt((i?2.0:1.0)/n); + if(fabs(dst2[i*step/2] - dst[i*step/2]) > 20) + printf("%d %d %d (%d %d %d %d) -> (%d %d %d %d)\n", i,dst2[i*step/2], dst[i*step/2],src[0*step], src[1*step], src[2*step], src[3*step], dst[0*step], dst[1*step],dst[2*step],dst[3*step]); + } +#endif +} +#endif + +static inline void dctA_c(int16_t *dst, uint8_t *src, int stride){ + int i; + + for(i=0; i<4; i++){ + int s0= src[0*stride] + src[6*stride]; + int s1= src[1*stride] + src[5*stride]; + int s2= src[2*stride] + src[4*stride]; + int s3= src[3*stride]; + int s= s3+s3; + s3= s-s0; + s0= s+s0; + s = s2+s1; + s2= s2-s1; + dst[0]= s0 + s; + dst[2]= s0 - s; + dst[1]= 2*s3 + s2; + dst[3]= s3 - 2*s2; + src++; + dst+=4; + } +} + +static void dctB_c(int16_t *dst, int16_t *src){ + int i; + + for(i=0; i<4; i++){ + int s0= src[0*4] + src[6*4]; + int s1= src[1*4] + src[5*4]; + int s2= src[2*4] + src[4*4]; + int s3= src[3*4]; + int s= s3+s3; + s3= s-s0; + s0= s+s0; + s = s2+s1; + s2= s2-s1; + dst[0*4]= s0 + s; + dst[2*4]= s0 - s; + dst[1*4]= 2*s3 + s2; + dst[3*4]= s3 - 2*s2; + src++; + dst++; + } +} + +#if HAVE_MMX +static void dctB_mmx(int16_t *dst, int16_t *src){ + __asm__ volatile ( + "movq (%0), %%mm0 \n\t" + "movq 1*4*2(%0), %%mm1 \n\t" + "paddw 6*4*2(%0), %%mm0 \n\t" + "paddw 5*4*2(%0), %%mm1 \n\t" + "movq 2*4*2(%0), %%mm2 \n\t" + "movq 3*4*2(%0), %%mm3 \n\t" + "paddw 4*4*2(%0), %%mm2 \n\t" + "paddw %%mm3, %%mm3 \n\t" //s + "movq %%mm3, %%mm4 \n\t" //s + "psubw %%mm0, %%mm3 \n\t" //s-s0 + "paddw %%mm0, %%mm4 \n\t" //s+s0 + "movq %%mm2, %%mm0 \n\t" //s2 + "psubw %%mm1, %%mm2 \n\t" //s2-s1 + "paddw %%mm1, %%mm0 \n\t" //s2+s1 + "movq %%mm4, %%mm1 \n\t" //s0' + "psubw %%mm0, %%mm4 \n\t" //s0'-s' + "paddw %%mm0, %%mm1 \n\t" //s0'+s' + "movq %%mm3, %%mm0 \n\t" //s3' + "psubw %%mm2, %%mm3 \n\t" + "psubw %%mm2, %%mm3 \n\t" + "paddw %%mm0, %%mm2 \n\t" + "paddw %%mm0, %%mm2 \n\t" + "movq %%mm1, (%1) \n\t" + "movq %%mm4, 2*4*2(%1) \n\t" + "movq %%mm2, 1*4*2(%1) \n\t" + "movq %%mm3, 3*4*2(%1) \n\t" + :: "r" (src), "r"(dst) + ); +} +#endif + +static void (*dctB)(int16_t *dst, int16_t *src)= dctB_c; + +#define N0 4 +#define N1 5 +#define N2 10 +#define SN0 2 +#define SN1 2.2360679775 +#define SN2 3.16227766017 +#define N (1<<16) + +static const int factor[16]={ + N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2), + N/(N1*N0), N/(N1*N1), N/(N1*N0),N/(N1*N2), + N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2), + N/(N2*N0), N/(N2*N1), N/(N2*N0),N/(N2*N2), +}; + +static const int thres[16]={ + N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2), + N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2), + N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2), + N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2), +}; + +static int thres2[99][16]; + +static void init_thres2(void){ + int qp, i; + int bias= 0; //FIXME + + for(qp=0; qp<99; qp++){ + for(i=0; i<16; i++){ + thres2[qp][i]= ((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) * XMAX(1,qp) * (1<<2) - 1 - bias; + } + } +} + +static int hardthresh_c(int16_t *src, int qp){ + int i; + int a; + + a= src[0] * factor[0]; + for(i=1; i<16; i++){ + unsigned int threshold1= thres2[qp][i]; + unsigned int threshold2= (threshold1<<1); + int level= src[i]; + if(((unsigned)(level+threshold1))>threshold2){ + a += level * factor[i]; + } + } + return (a + (1<<11))>>12; +} + +static int mediumthresh_c(int16_t *src, int qp){ + int i; + int a; + + a= src[0] * factor[0]; + for(i=1; i<16; i++){ + unsigned int threshold1= thres2[qp][i]; + unsigned int threshold2= (threshold1<<1); + int level= src[i]; + if(((unsigned)(level+threshold1))>threshold2){ + if(((unsigned)(level+2*threshold1))>2*threshold2){ + a += level * factor[i]; + }else{ + if(level>0) a+= 2*(level - (int)threshold1)*factor[i]; + else a+= 2*(level + (int)threshold1)*factor[i]; + } + } + } + return (a + (1<<11))>>12; +} + +static int softthresh_c(int16_t *src, int qp){ + int i; + int a; + + a= src[0] * factor[0]; + for(i=1; i<16; i++){ + unsigned int threshold1= thres2[qp][i]; + unsigned int threshold2= (threshold1<<1); + int level= src[i]; + if(((unsigned)(level+threshold1))>threshold2){ + if(level>0) a+= (level - (int)threshold1)*factor[i]; + else a+= (level + (int)threshold1)*factor[i]; + } + } + return (a + (1<<11))>>12; +} + +static int (*requantize)(int16_t *src, int qp)= hardthresh_c; + +static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){ + int x, y; + const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15)); + uint8_t *p_src= p->src + 8*stride; + int16_t *block= (int16_t *)p->src; + int16_t *temp= (int16_t *)(p->src + 32); + + if (!src || !dst) return; // HACK avoid crash for Y8 colourspace + for(y=0; y<height; y++){ + int index= 8 + 8*stride + y*stride; + fast_memcpy(p_src + index, src + y*src_stride, width); + for(x=0; x<8; x++){ + p_src[index - x - 1]= p_src[index + x ]; + p_src[index + width + x ]= p_src[index + width - x - 1]; + } + } + for(y=0; y<8; y++){ + fast_memcpy(p_src + ( 7-y)*stride, p_src + ( y+8)*stride, stride); + fast_memcpy(p_src + (height+8+y)*stride, p_src + (height-y+7)*stride, stride); + } + //FIXME (try edge emu) + + for(y=0; y<height; y++){ + for(x=-8; x<0; x+=4){ + const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset + uint8_t *src = p_src + index; + int16_t *tp= temp+4*x; + + dctA_c(tp+4*8, src, stride); + } + for(x=0; x<width; ){ + const int qps= 3 + is_luma; + int qp; + int end= XMIN(x+8, width); + + if(p->qp) + qp= p->qp; + else{ + qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride]; + qp=norm_qscale(qp, p->mpeg2); + } + for(; x<end; x++){ + const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset + uint8_t *src = p_src + index; + int16_t *tp= temp+4*x; + int v; + + if((x&3)==0) + dctA_c(tp+4*8, src, stride); + + dctB(block, tp); + + v= requantize(block, qp); + v= (v + dither[y&7][x&7])>>6; + if((unsigned)v > 255) + v= (-v)>>31; + dst[x + y*dst_stride]= v; + } + } + } +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int h= (height+16+15)&(~15); + + vf->priv->temp_stride= (width+16+15)&(~15); + vf->priv->src = av_malloc(vf->priv->temp_stride*(h+8)*sizeof(uint8_t)); + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + + if(mpi->flags&MP_IMGFLAG_DIRECT){ + dmpi=vf->dmpi; + }else{ + // no DR, so get a new image! hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->width,mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + } + + vf->priv->mpeg2= mpi->qscale_type; + if(mpi->qscale || vf->priv->qp){ + filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, mpi->qscale, mpi->qstride, 1); + filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0); + filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0); + }else{ + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]); + memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]); + } + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t"); +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t"); +#endif + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + + av_free(vf->priv->src); + vf->priv->src= NULL; + + free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt){ + case IMGFMT_YVU9: + case IMGFMT_IF09: + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_CLPL: + case IMGFMT_Y800: + case IMGFMT_Y8: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf,fmt); + } + return 0; +} + +static int control(struct vf_instance *vf, int request, void* data){ + return ff_vf_next_control(vf,request,data); +} + +static int vf_open(vf_instance_t *vf, char *args){ + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->control= control; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + if (args) sscanf(args, "%d:%d", &vf->priv->qp, &vf->priv->mode); + + if(vf->priv->qp < 0) + vf->priv->qp = 0; + + init_thres2(); + + switch(vf->priv->mode){ + case 0: requantize= hardthresh_c; break; + case 1: requantize= softthresh_c; break; + default: + case 2: requantize= mediumthresh_c; break; + } + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX){ + dctB= dctB_mmx; + } +#endif +#if 0 + if(ff_gCpuCaps.hasMMX){ + switch(vf->priv->mode){ + case 0: requantize= hardthresh_mmx; break; + case 1: requantize= softthresh_mmx; break; + } + } +#endif + + return 1; +} + +const vf_info_t ff_vf_info_pp7 = { + "postprocess 7", + "pp7", + "Michael Niedermayer", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_pullup.c b/libavfilter/libmpcodecs/vf_pullup.c new file mode 100644 index 0000000..e4a28c4 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_pullup.c @@ -0,0 +1,316 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +#include "pullup.h" + +#undef MAX +#define MAX(a,b) ((a)>(b)?(a):(b)) + +struct vf_priv_s { + struct pullup_context *ctx; + int init; + int fakecount; + char *qbuf; +}; + +static void init_pullup(struct vf_instance *vf, mp_image_t *mpi) +{ + struct pullup_context *c = vf->priv->ctx; + + if (mpi->flags & MP_IMGFLAG_PLANAR) { + c->format = PULLUP_FMT_Y; + c->nplanes = 4; + ff_pullup_preinit_context(c); + c->bpp[0] = c->bpp[1] = c->bpp[2] = 8; + c->w[0] = mpi->w; + c->h[0] = mpi->h; + c->w[1] = c->w[2] = mpi->chroma_width; + c->h[1] = c->h[2] = mpi->chroma_height; + c->w[3] = ((mpi->w+15)/16) * ((mpi->h+15)/16); + c->h[3] = 2; + c->stride[0] = mpi->width; + c->stride[1] = c->stride[2] = mpi->chroma_width; + c->stride[3] = c->w[3]; + c->background[1] = c->background[2] = 128; + } + + if (ff_gCpuCaps.hasMMX) c->cpu |= PULLUP_CPU_MMX; + if (ff_gCpuCaps.hasMMX2) c->cpu |= PULLUP_CPU_MMX2; + if (ff_gCpuCaps.has3DNow) c->cpu |= PULLUP_CPU_3DNOW; + if (ff_gCpuCaps.has3DNowExt) c->cpu |= PULLUP_CPU_3DNOWEXT; + if (ff_gCpuCaps.hasSSE) c->cpu |= PULLUP_CPU_SSE; + if (ff_gCpuCaps.hasSSE2) c->cpu |= PULLUP_CPU_SSE2; + + ff_pullup_init_context(c); + + vf->priv->init = 1; + vf->priv->qbuf = malloc(c->w[3]); +} + + +#if 0 +static void get_image(struct vf_instance *vf, mp_image_t *mpi) +{ + struct pullup_context *c = vf->priv->ctx; + struct pullup_buffer *b; + + if (mpi->type == MP_IMGTYPE_STATIC) return; + + if (!vf->priv->init) init_pullup(vf, mpi); + + b = ff_pullup_get_buffer(c, 2); + if (!b) return; /* shouldn't happen... */ + + mpi->priv = b; + + mpi->planes[0] = b->planes[0]; + mpi->planes[1] = b->planes[1]; + mpi->planes[2] = b->planes[2]; + mpi->stride[0] = c->stride[0]; + mpi->stride[1] = c->stride[1]; + mpi->stride[2] = c->stride[2]; + + mpi->flags |= MP_IMGFLAG_DIRECT; + mpi->flags &= ~MP_IMGFLAG_DRAW_CALLBACK; +} +#endif + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + struct pullup_context *c = vf->priv->ctx; + struct pullup_buffer *b; + struct pullup_frame *f; + mp_image_t *dmpi; + int ret; + int p; + int i; + + if (!vf->priv->init) init_pullup(vf, mpi); + + if (mpi->flags & MP_IMGFLAG_DIRECT) { + b = mpi->priv; + mpi->priv = 0; + } else { + b = ff_pullup_get_buffer(c, 2); + if (!b) { + ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"Could not get buffer from pullup!\n"); + f = ff_pullup_get_frame(c); + ff_pullup_release_frame(f); + return 0; + } + memcpy_pic(b->planes[0], mpi->planes[0], mpi->w, mpi->h, + c->stride[0], mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(b->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + c->stride[1], mpi->stride[1]); + memcpy_pic(b->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + c->stride[2], mpi->stride[2]); + } + } + if (mpi->qscale) { + fast_memcpy(b->planes[3], mpi->qscale, c->w[3]); + fast_memcpy(b->planes[3]+c->w[3], mpi->qscale, c->w[3]); + } + + p = mpi->fields & MP_IMGFIELD_TOP_FIRST ? 0 : + (mpi->fields & MP_IMGFIELD_ORDERED ? 1 : 0); + ff_pullup_submit_field(c, b, p); + ff_pullup_submit_field(c, b, p^1); + if (mpi->fields & MP_IMGFIELD_REPEAT_FIRST) + ff_pullup_submit_field(c, b, p); + + ff_pullup_release_buffer(b, 2); + + f = ff_pullup_get_frame(c); + + /* Fake yes for first few frames (buffer depth) to keep from + * breaking A/V sync with G1's bad architecture... */ + if (!f) return vf->priv->fakecount ? (--vf->priv->fakecount,1) : 0; + + if (f->length < 2) { + ff_pullup_release_frame(f); + f = ff_pullup_get_frame(c); + if (!f) return 0; + if (f->length < 2) { + ff_pullup_release_frame(f); + if (!(mpi->fields & MP_IMGFIELD_REPEAT_FIRST)) + return 0; + f = ff_pullup_get_frame(c); + if (!f) return 0; + if (f->length < 2) { + ff_pullup_release_frame(f); + return 0; + } + } + } + +#if 0 + /* Average qscale tables from both frames. */ + if (mpi->qscale) { + for (i=0; i<c->w[3]; i++) { + vf->priv->qbuf[i] = (f->ofields[0]->planes[3][i] + + f->ofields[1]->planes[3][i+c->w[3]])>>1; + } + } +#else + /* Take worst of qscale tables from both frames. */ + if (mpi->qscale) { + for (i=0; i<c->w[3]; i++) { + vf->priv->qbuf[i] = MAX(f->ofields[0]->planes[3][i], f->ofields[1]->planes[3][i+c->w[3]]); + } + } +#endif + + /* If the frame isn't already exportable... */ + while (!f->buffer) { + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->width, mpi->height); + /* FIXME: Is it ok to discard dmpi if it's not direct? */ + if (!(dmpi->flags & MP_IMGFLAG_DIRECT)) { + ff_pullup_pack_frame(c, f); + break; + } + /* Direct render fields into output buffer */ + my_memcpy_pic(dmpi->planes[0], f->ofields[0]->planes[0], + mpi->w, mpi->h/2, dmpi->stride[0]*2, c->stride[0]*2); + my_memcpy_pic(dmpi->planes[0] + dmpi->stride[0], + f->ofields[1]->planes[0] + c->stride[0], + mpi->w, mpi->h/2, dmpi->stride[0]*2, c->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], f->ofields[0]->planes[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, c->stride[1]*2); + my_memcpy_pic(dmpi->planes[1] + dmpi->stride[1], + f->ofields[1]->planes[1] + c->stride[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, c->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], f->ofields[0]->planes[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, c->stride[2]*2); + my_memcpy_pic(dmpi->planes[2] + dmpi->stride[2], + f->ofields[1]->planes[2] + c->stride[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, c->stride[2]*2); + } + ff_pullup_release_frame(f); + if (mpi->qscale) { + dmpi->qscale = vf->priv->qbuf; + dmpi->qstride = mpi->qstride; + dmpi->qscale_type = mpi->qscale_type; + } + return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + } + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_EXPORT, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->width, mpi->height); + + dmpi->planes[0] = f->buffer->planes[0]; + dmpi->planes[1] = f->buffer->planes[1]; + dmpi->planes[2] = f->buffer->planes[2]; + + dmpi->stride[0] = c->stride[0]; + dmpi->stride[1] = c->stride[1]; + dmpi->stride[2] = c->stride[2]; + + if (mpi->qscale) { + dmpi->qscale = vf->priv->qbuf; + dmpi->qstride = mpi->qstride; + dmpi->qscale_type = mpi->qscale_type; + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + ff_pullup_release_frame(f); + return ret; +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - support more formats */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + if (height&3) { + ff_mp_msg(MSGT_VFILTER, MSGL_ERR, "height must be divisible by four\n"); + return 0; + } + return ff_vf_next_config(vf, width, height, d_width, d_height, flags, outfmt); +} + +static void uninit(struct vf_instance *vf) +{ + ff_pullup_free_context(vf->priv->ctx); + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + struct vf_priv_s *p; + struct pullup_context *c; + //vf->get_image = get_image; + vf->put_image = put_image; + vf->config = config; + vf->query_format = query_format; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = p = calloc(1, sizeof(struct vf_priv_s)); + p->ctx = c = ff_pullup_alloc_context(); + p->fakecount = 1; + c->junk_left = c->junk_right = 1; + c->junk_top = c->junk_bottom = 4; + c->strict_breaks = 0; + c->metric_plane = 0; + if (args) { + sscanf(args, "%d:%d:%d:%d:%d:%d", &c->junk_left, &c->junk_right, &c->junk_top, &c->junk_bottom, &c->strict_breaks, &c->metric_plane); + } + return 1; +} + +const vf_info_t ff_vf_info_pullup = { + "pullup (from field sequence to frames)", + "pullup", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_qp.c b/libavfilter/libmpcodecs/vf_qp.c new file mode 100644 index 0000000..579ec1c --- /dev/null +++ b/libavfilter/libmpcodecs/vf_qp.c @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <math.h> +#include <inttypes.h> + +#include "mp_msg.h" +#include "cpudetect.h" +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libvo/fastmemcpy.h" + +#include "libavcodec/avcodec.h" +#include "libavutil/eval.h" +#include "libavutil/mem.h" + + +struct vf_priv_s { + char eq[200]; + int8_t *qp; + int8_t lut[257]; + int qp_stride; +}; + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int h= (height+15)>>4; + int i; + + vf->priv->qp_stride= (width+15)>>4; + vf->priv->qp= av_malloc(vf->priv->qp_stride*h*sizeof(int8_t)); + + for(i=-129; i<128; i++){ + double const_values[]={ + M_PI, + M_E, + i != -129, + i, + 0 + }; + static const char *const_names[]={ + "PI", + "E", + "known", + "qp", + NULL + }; + double temp_val; + int res; + + res= av_expr_parse_and_eval(&temp_val, vf->priv->eq, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL); + + if (res < 0){ + ff_mp_msg(MSGT_VFILTER, MSGL_ERR, "qp: Error evaluating \"%s\" \n", vf->priv->eq); + return 0; + } + vf->priv->lut[i+129]= lrintf(temp_val); + } + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags, mpi->w, mpi->h); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + int x,y; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->w,mpi->h); + } + + dmpi= vf->dmpi; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]); + if(mpi->flags&MP_IMGFLAG_PLANAR){ + memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]); + } + } + ff_vf_clone_mpi_attributes(dmpi, mpi); + + dmpi->qscale = vf->priv->qp; + dmpi->qstride= vf->priv->qp_stride; + if(mpi->qscale){ + for(y=0; y<((dmpi->h+15)>>4); y++){ + for(x=0; x<vf->priv->qp_stride; x++){ + dmpi->qscale[x + dmpi->qstride*y]= + vf->priv->lut[ 129 + ((int8_t)mpi->qscale[x + mpi->qstride*y]) ]; + } + } + }else{ + int qp= vf->priv->lut[0]; + for(y=0; y<((dmpi->h+15)>>4); y++){ + for(x=0; x<vf->priv->qp_stride; x++){ + dmpi->qscale[x + dmpi->qstride*y]= qp; + } + } + } + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + + av_free(vf->priv->qp); + vf->priv->qp= NULL; + + av_free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// +static int vf_open(vf_instance_t *vf, char *args){ + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->uninit=uninit; + vf->priv=av_malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + +// avcodec_init(); + + if (args) strncpy(vf->priv->eq, args, 199); + + return 1; +} + +const vf_info_t ff_vf_info_qp = { + "QP changer", + "qp", + "Michael Niedermayer", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_sab.c b/libavfilter/libmpcodecs/vf_sab.c new file mode 100644 index 0000000..2928a85 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_sab.c @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> + +#include "config.h" +#include "mp_msg.h" + +#if HAVE_MALLOC_H +#include <malloc.h> +#endif + +#include "libavutil/avutil.h" +#include "libavutil/mem.h" +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libswscale/swscale.h" +#include "vf_scale.h" + + +//===========================================================================// + +typedef struct FilterParam{ + float radius; + float preFilterRadius; + float strength; + float quality; + struct SwsContext *preFilterContext; + uint8_t *preFilterBuf; + int preFilterStride; + int distWidth; + int distStride; + int *distCoeff; + int colorDiffCoeff[512]; +}FilterParam; + +struct vf_priv_s { + FilterParam luma; + FilterParam chroma; +}; + + +/***************************************************************************/ + +//FIXME stupid code duplication +static void getSubSampleFactors(int *h, int *v, int format){ + switch(format){ + default: + assert(0); + case IMGFMT_YV12: + case IMGFMT_I420: + *h=1; + *v=1; + break; + case IMGFMT_YVU9: + *h=2; + *v=2; + break; + case IMGFMT_444P: + *h=0; + *v=0; + break; + case IMGFMT_422P: + *h=1; + *v=0; + break; + case IMGFMT_411P: + *h=2; + *v=0; + break; + } +} + +static int allocStuff(FilterParam *f, int width, int height){ + int stride= (width+7)&~7; + SwsVector *vec; + SwsFilter swsF; + int i,x,y; + f->preFilterBuf= av_malloc(stride*height); + f->preFilterStride= stride; + + vec = sws_getGaussianVec(f->preFilterRadius, f->quality); + swsF.lumH= swsF.lumV= vec; + swsF.chrH= swsF.chrV= NULL; + f->preFilterContext= sws_getContext( + width, height, AV_PIX_FMT_GRAY8, width, height, AV_PIX_FMT_GRAY8, SWS_POINT, &swsF, NULL, NULL); + + sws_freeVec(vec); + vec = sws_getGaussianVec(f->strength, 5.0); + for(i=0; i<512; i++){ + double d; + int index= i-256 + vec->length/2; + + if(index<0 || index>=vec->length) d= 0.0; + else d= vec->coeff[index]; + + f->colorDiffCoeff[i]= (int)(d/vec->coeff[vec->length/2]*(1<<12) + 0.5); + } + sws_freeVec(vec); + vec = sws_getGaussianVec(f->radius, f->quality); + f->distWidth= vec->length; + f->distStride= (vec->length+7)&~7; + f->distCoeff= av_malloc(f->distWidth*f->distStride*sizeof(int32_t)); + + for(y=0; y<vec->length; y++){ + for(x=0; x<vec->length; x++){ + double d= vec->coeff[x] * vec->coeff[y]; + + f->distCoeff[x + y*f->distStride]= (int)(d*(1<<10) + 0.5); +// if(y==vec->length/2) +// printf("%6d ", f->distCoeff[x + y*f->distStride]); + } + } + sws_freeVec(vec); + + return 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + + int sw, sh; +//__asm__ volatile("emms\n\t"); + allocStuff(&vf->priv->luma, width, height); + + getSubSampleFactors(&sw, &sh, outfmt); + allocStuff(&vf->priv->chroma, width>>sw, height>>sh); + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void freeBuffers(FilterParam *f){ + if(f->preFilterContext) sws_freeContext(f->preFilterContext); + f->preFilterContext=NULL; + + av_free(f->preFilterBuf); + f->preFilterBuf=NULL; + + av_free(f->distCoeff); + f->distCoeff=NULL; +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + + freeBuffers(&vf->priv->luma); + freeBuffers(&vf->priv->chroma); + + free(vf->priv); + vf->priv=NULL; +} + +static inline void blur(uint8_t *dst, uint8_t *src, int w, int h, int dstStride, int srcStride, FilterParam *fp){ + int x, y; + FilterParam f= *fp; + const int radius= f.distWidth/2; + const uint8_t* const srcArray[MP_MAX_PLANES] = {src}; + uint8_t *dstArray[MP_MAX_PLANES]= {f.preFilterBuf}; + int srcStrideArray[MP_MAX_PLANES]= {srcStride}; + int dstStrideArray[MP_MAX_PLANES]= {f.preFilterStride}; + +// f.preFilterContext->swScale(f.preFilterContext, srcArray, srcStrideArray, 0, h, dstArray, dstStrideArray); + sws_scale(f.preFilterContext, srcArray, srcStrideArray, 0, h, dstArray, dstStrideArray); + + for(y=0; y<h; y++){ + for(x=0; x<w; x++){ + int sum=0; + int div=0; + int dy; + const int preVal= f.preFilterBuf[x + y*f.preFilterStride]; +#if 0 + const int srcVal= src[x + y*srcStride]; +if((x/32)&1){ + dst[x + y*dstStride]= srcVal; + if(y%32==0) dst[x + y*dstStride]= 0; + continue; +} +#endif + if(x >= radius && x < w - radius){ + for(dy=0; dy<radius*2+1; dy++){ + int dx; + int iy= y+dy - radius; + if (iy<0) iy= -iy; + else if(iy>=h) iy= h+h-iy-1; + + for(dx=0; dx<radius*2+1; dx++){ + const int ix= x+dx - radius; + int factor; + + factor= f.colorDiffCoeff[256+preVal - f.preFilterBuf[ix + iy*f.preFilterStride] ] + *f.distCoeff[dx + dy*f.distStride]; + sum+= src[ix + iy*srcStride] *factor; + div+= factor; + } + } + }else{ + for(dy=0; dy<radius*2+1; dy++){ + int dx; + int iy= y+dy - radius; + if (iy<0) iy= -iy; + else if(iy>=h) iy= h+h-iy-1; + + for(dx=0; dx<radius*2+1; dx++){ + int ix= x+dx - radius; + int factor; + if (ix<0) ix= -ix; + else if(ix>=w) ix= w+w-ix-1; + + factor= f.colorDiffCoeff[256+preVal - f.preFilterBuf[ix + iy*f.preFilterStride] ] + *f.distCoeff[dx + dy*f.distStride]; + sum+= src[ix + iy*srcStride] *factor; + div+= factor; + } + } + } + dst[x + y*dstStride]= (sum + div/2)/div; + } + } +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + int cw= mpi->w >> mpi->chroma_x_shift; + int ch= mpi->h >> mpi->chroma_y_shift; + + mp_image_t *dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->w,mpi->h); + + assert(mpi->flags&MP_IMGFLAG_PLANAR); + + blur(dmpi->planes[0], mpi->planes[0], mpi->w,mpi->h, dmpi->stride[0], mpi->stride[0], &vf->priv->luma); + blur(dmpi->planes[1], mpi->planes[1], cw , ch , dmpi->stride[1], mpi->stride[1], &vf->priv->chroma); + blur(dmpi->planes[2], mpi->planes[2], cw , ch , dmpi->stride[2], mpi->stride[2], &vf->priv->chroma); + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +//===========================================================================// + +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt) + { + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_YVU9: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int vf_open(vf_instance_t *vf, char *args){ + int e; + + vf->config=config; + vf->put_image=put_image; +// vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + if(args==NULL) return 0; + + e=sscanf(args, "%f:%f:%f:%f:%f:%f", + &vf->priv->luma.radius, + &vf->priv->luma.preFilterRadius, + &vf->priv->luma.strength, + &vf->priv->chroma.radius, + &vf->priv->chroma.preFilterRadius, + &vf->priv->chroma.strength + ); + + vf->priv->luma.quality = vf->priv->chroma.quality= 3.0; + + if(e==3){ + vf->priv->chroma.radius= vf->priv->luma.radius; + vf->priv->chroma.preFilterRadius = vf->priv->luma.preFilterRadius; + vf->priv->chroma.strength= vf->priv->luma.strength; + }else if(e!=6) + return 0; + +// if(vf->priv->luma.radius < 0) return 0; +// if(vf->priv->chroma.radius < 0) return 0; + + return 1; +} + +const vf_info_t ff_vf_info_sab = { + "shape adaptive blur", + "sab", + "Michael Niedermayer", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_scale.h b/libavfilter/libmpcodecs/vf_scale.h new file mode 100644 index 0000000..177fbe5 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_scale.h @@ -0,0 +1,34 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_VF_SCALE_H +#define MPLAYER_VF_SCALE_H + +extern int ff_sws_chr_vshift; +extern int ff_sws_chr_hshift; + +extern float ff_sws_chr_gblur; +extern float ff_sws_lum_gblur; +extern float ff_sws_chr_sharpen; +extern float ff_sws_lum_sharpen; + +extern int ff_sws_flags; + +struct SwsContext *ff_sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat); + +#endif /* MPLAYER_VF_SCALE_H */ diff --git a/libavfilter/libmpcodecs/vf_softpulldown.c b/libavfilter/libmpcodecs/vf_softpulldown.c new file mode 100644 index 0000000..556374e --- /dev/null +++ b/libavfilter/libmpcodecs/vf_softpulldown.c @@ -0,0 +1,163 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +struct vf_priv_s { + int state; + long long in; + long long out; +}; + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + int ret = 0; + int flags = mpi->fields; + int state = vf->priv->state; + + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE, mpi->width, mpi->height); + + vf->priv->in++; + + if ((state == 0 && + !(flags & MP_IMGFIELD_TOP_FIRST)) || + (state == 1 && + flags & MP_IMGFIELD_TOP_FIRST)) { + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, + "softpulldown: Unexpected field flags: state=%d top_field_first=%d repeat_first_field=%d\n", + state, + (flags & MP_IMGFIELD_TOP_FIRST) != 0, + (flags & MP_IMGFIELD_REPEAT_FIRST) != 0); + state ^= 1; + } + + if (state == 0) { + ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE); + vf->priv->out++; + if (flags & MP_IMGFIELD_REPEAT_FIRST) { + my_memcpy_pic(dmpi->planes[0], + mpi->planes[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], + mpi->planes[1], + mpi->chroma_width, + mpi->chroma_height/2, + dmpi->stride[1]*2, + mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], + mpi->planes[2], + mpi->chroma_width, + mpi->chroma_height/2, + dmpi->stride[2]*2, + mpi->stride[2]*2); + } + state=1; + } + } else { + my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0], + mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1], + mpi->planes[1]+mpi->stride[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2], + mpi->planes[2]+mpi->stride[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + vf->priv->out++; + if (flags & MP_IMGFIELD_REPEAT_FIRST) { + ret |= ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE); + vf->priv->out++; + state=0; + } else { + my_memcpy_pic(dmpi->planes[0], + mpi->planes[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], + mpi->planes[1], + mpi->chroma_width, + mpi->chroma_height/2, + dmpi->stride[1]*2, + mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], + mpi->planes[2], + mpi->chroma_width, + mpi->chroma_height/2, + dmpi->stride[2]*2, + mpi->stride[2]*2); + } + } + } + + vf->priv->state = state; + + return ret; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void uninit(struct vf_instance *vf) +{ + ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "softpulldown: %lld frames in, %lld frames out\n", vf->priv->in, vf->priv->out); + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->config = config; + vf->put_image = put_image; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = calloc(1, sizeof(struct vf_priv_s)); + vf->priv->state = 0; + return 1; +} + +const vf_info_t ff_vf_info_softpulldown = { + "mpeg2 soft 3:2 pulldown", + "softpulldown", + "Tobias Diedrich <ranma+mplayer@tdiedrich.de>", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_softskip.c b/libavfilter/libmpcodecs/vf_softskip.c new file mode 100644 index 0000000..085f921 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_softskip.c @@ -0,0 +1,102 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +struct vf_priv_s { + int skipflag; +}; + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + + if (vf->priv->skipflag) + return vf->priv->skipflag = 0; + + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_EXPORT, 0, mpi->width, mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + + dmpi->planes[0] = mpi->planes[0]; + dmpi->stride[0] = mpi->stride[0]; + if (dmpi->flags&MP_IMGFLAG_PLANAR) { + dmpi->planes[1] = mpi->planes[1]; + dmpi->stride[1] = mpi->stride[1]; + dmpi->planes[2] = mpi->planes[2]; + dmpi->stride[2] = mpi->stride[2]; + } + + return ff_vf_next_put_image(vf, dmpi, pts); +} + +static int control(struct vf_instance *vf, int request, void* data) +{ + switch (request) { + case VFCTRL_SKIP_NEXT_FRAME: + vf->priv->skipflag = 1; + return CONTROL_TRUE; + } + return ff_vf_next_control(vf, request, data); +} + +#if 0 +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - figure out which other formats work */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} +#endif + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->put_image = put_image; + vf->control = control; + vf->uninit = uninit; + vf->priv = calloc(1, sizeof(struct vf_priv_s)); + return 1; +} + +const vf_info_t ff_vf_info_softskip = { + "soft (post-filter) frame skipping for encoding", + "softskip", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_spp.c b/libavfilter/libmpcodecs/vf_spp.c new file mode 100644 index 0000000..75ede23 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_spp.c @@ -0,0 +1,621 @@ +/* + * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * This implementation is based on an algorithm described in + * "Aria Nosratinia Embedded Post-Processing for + * Enhancement of Compressed Images (1999)" + * (http://citeseer.nj.nec.com/nosratinia99embedded.html) + */ + + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "config.h" + +#include "mp_msg.h" +#include "cpudetect.h" + +#include "libavutil/common.h" +#include "libavutil/internal.h" +#include "libavutil/intreadwrite.h" +#include "libavcodec/avcodec.h" +#include "libavcodec/dsputil.h" + +#undef fprintf +#undef free +#undef malloc + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "av_helpers.h" +#include "libvo/fastmemcpy.h" + +#define XMIN(a,b) ((a) < (b) ? (a) : (b)) + +//===========================================================================// +static const uint8_t __attribute__((aligned(8))) dither[8][8]={ +{ 0, 48, 12, 60, 3, 51, 15, 63, }, +{ 32, 16, 44, 28, 35, 19, 47, 31, }, +{ 8, 56, 4, 52, 11, 59, 7, 55, }, +{ 40, 24, 36, 20, 43, 27, 39, 23, }, +{ 2, 50, 14, 62, 1, 49, 13, 61, }, +{ 34, 18, 46, 30, 33, 17, 45, 29, }, +{ 10, 58, 6, 54, 9, 57, 5, 53, }, +{ 42, 26, 38, 22, 41, 25, 37, 21, }, +}; + +static const uint8_t offset[127][2]= { +{0,0}, +{0,0}, {4,4}, +{0,0}, {2,2}, {6,4}, {4,6}, +{0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, + +{0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, +{0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7}, + +{0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, +{2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7}, +{4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7}, +{6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7}, + +{0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, +{0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0}, +{1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3}, +{1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1}, +{0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3}, +{0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1}, +{1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2}, +{1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0}, +}; + +struct vf_priv_s { + int log2_count; + int qp; + int mode; + int mpeg2; + int temp_stride; + uint8_t *src; + int16_t *temp; + AVCodecContext *avctx; + DSPContext dsp; + char *non_b_qp; +}; + +#define SHIFT 22 + +static void hardthresh_c(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){ + int i; + int bias= 0; //FIXME + unsigned int threshold1, threshold2; + + threshold1= qp*((1<<4) - bias) - 1; + threshold2= (threshold1<<1); + + memset(dst, 0, 64*sizeof(int16_t)); + dst[0]= (src[0] + 4)>>3; + + for(i=1; i<64; i++){ + int level= src[i]; + if(((unsigned)(level+threshold1))>threshold2){ + const int j= permutation[i]; + dst[j]= (level + 4)>>3; + } + } +} + +static void softthresh_c(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){ + int i; + int bias= 0; //FIXME + unsigned int threshold1, threshold2; + + threshold1= qp*((1<<4) - bias) - 1; + threshold2= (threshold1<<1); + + memset(dst, 0, 64*sizeof(int16_t)); + dst[0]= (src[0] + 4)>>3; + + for(i=1; i<64; i++){ + int level= src[i]; + if(((unsigned)(level+threshold1))>threshold2){ + const int j= permutation[i]; + if(level>0) + dst[j]= (level - threshold1 + 4)>>3; + else + dst[j]= (level + threshold1 + 4)>>3; + } + } +} + +#if HAVE_MMX +static void hardthresh_mmx(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){ + int bias= 0; //FIXME + unsigned int threshold1; + + threshold1= qp*((1<<4) - bias) - 1; + + __asm__ volatile( +#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \ + "movq " #src0 ", %%mm0 \n\t"\ + "movq " #src1 ", %%mm1 \n\t"\ + "movq " #src2 ", %%mm2 \n\t"\ + "movq " #src3 ", %%mm3 \n\t"\ + "psubw %%mm4, %%mm0 \n\t"\ + "psubw %%mm4, %%mm1 \n\t"\ + "psubw %%mm4, %%mm2 \n\t"\ + "psubw %%mm4, %%mm3 \n\t"\ + "paddusw %%mm5, %%mm0 \n\t"\ + "paddusw %%mm5, %%mm1 \n\t"\ + "paddusw %%mm5, %%mm2 \n\t"\ + "paddusw %%mm5, %%mm3 \n\t"\ + "paddw %%mm6, %%mm0 \n\t"\ + "paddw %%mm6, %%mm1 \n\t"\ + "paddw %%mm6, %%mm2 \n\t"\ + "paddw %%mm6, %%mm3 \n\t"\ + "psubusw %%mm6, %%mm0 \n\t"\ + "psubusw %%mm6, %%mm1 \n\t"\ + "psubusw %%mm6, %%mm2 \n\t"\ + "psubusw %%mm6, %%mm3 \n\t"\ + "psraw $3, %%mm0 \n\t"\ + "psraw $3, %%mm1 \n\t"\ + "psraw $3, %%mm2 \n\t"\ + "psraw $3, %%mm3 \n\t"\ +\ + "movq %%mm0, %%mm7 \n\t"\ + "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\ + "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\ + "movq %%mm1, %%mm2 \n\t"\ + "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\ + "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\ + "movq %%mm0, %%mm3 \n\t"\ + "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\ + "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\ + "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\ + "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\ +\ + "movq %%mm0, " #dst0 " \n\t"\ + "movq %%mm7, " #dst1 " \n\t"\ + "movq %%mm3, " #dst2 " \n\t"\ + "movq %%mm1, " #dst3 " \n\t" + + "movd %2, %%mm4 \n\t" + "movd %3, %%mm5 \n\t" + "movd %4, %%mm6 \n\t" + "packssdw %%mm4, %%mm4 \n\t" + "packssdw %%mm5, %%mm5 \n\t" + "packssdw %%mm6, %%mm6 \n\t" + "packssdw %%mm4, %%mm4 \n\t" + "packssdw %%mm5, %%mm5 \n\t" + "packssdw %%mm6, %%mm6 \n\t" + REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0)) + REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0)) + REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0)) + REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0)) + : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed? + ); + dst[0]= (src[0] + 4)>>3; +} + +static void softthresh_mmx(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation){ + int bias= 0; //FIXME + unsigned int threshold1; + + threshold1= qp*((1<<4) - bias) - 1; + + __asm__ volatile( +#undef REQUANT_CORE +#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \ + "movq " #src0 ", %%mm0 \n\t"\ + "movq " #src1 ", %%mm1 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + "pcmpgtw %%mm0, %%mm6 \n\t"\ + "pcmpgtw %%mm1, %%mm7 \n\t"\ + "pxor %%mm6, %%mm0 \n\t"\ + "pxor %%mm7, %%mm1 \n\t"\ + "psubusw %%mm4, %%mm0 \n\t"\ + "psubusw %%mm4, %%mm1 \n\t"\ + "pxor %%mm6, %%mm0 \n\t"\ + "pxor %%mm7, %%mm1 \n\t"\ + "movq " #src2 ", %%mm2 \n\t"\ + "movq " #src3 ", %%mm3 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + "pcmpgtw %%mm2, %%mm6 \n\t"\ + "pcmpgtw %%mm3, %%mm7 \n\t"\ + "pxor %%mm6, %%mm2 \n\t"\ + "pxor %%mm7, %%mm3 \n\t"\ + "psubusw %%mm4, %%mm2 \n\t"\ + "psubusw %%mm4, %%mm3 \n\t"\ + "pxor %%mm6, %%mm2 \n\t"\ + "pxor %%mm7, %%mm3 \n\t"\ +\ + "paddsw %%mm5, %%mm0 \n\t"\ + "paddsw %%mm5, %%mm1 \n\t"\ + "paddsw %%mm5, %%mm2 \n\t"\ + "paddsw %%mm5, %%mm3 \n\t"\ + "psraw $3, %%mm0 \n\t"\ + "psraw $3, %%mm1 \n\t"\ + "psraw $3, %%mm2 \n\t"\ + "psraw $3, %%mm3 \n\t"\ +\ + "movq %%mm0, %%mm7 \n\t"\ + "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\ + "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\ + "movq %%mm1, %%mm2 \n\t"\ + "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\ + "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\ + "movq %%mm0, %%mm3 \n\t"\ + "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\ + "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\ + "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\ + "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\ +\ + "movq %%mm0, " #dst0 " \n\t"\ + "movq %%mm7, " #dst1 " \n\t"\ + "movq %%mm3, " #dst2 " \n\t"\ + "movq %%mm1, " #dst3 " \n\t" + + "movd %2, %%mm4 \n\t" + "movd %3, %%mm5 \n\t" + "packssdw %%mm4, %%mm4 \n\t" + "packssdw %%mm5, %%mm5 \n\t" + "packssdw %%mm4, %%mm4 \n\t" + "packssdw %%mm5, %%mm5 \n\t" + REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0)) + REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0)) + REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0)) + REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0)) + : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed? + ); + + dst[0]= (src[0] + 4)>>3; +} +#endif + +static inline void add_block(int16_t *dst, int stride, int16_t block[64]){ + int y; + + for(y=0; y<8; y++){ + *(uint32_t*)&dst[0 + y*stride]+= *(uint32_t*)&block[0 + y*8]; + *(uint32_t*)&dst[2 + y*stride]+= *(uint32_t*)&block[2 + y*8]; + *(uint32_t*)&dst[4 + y*stride]+= *(uint32_t*)&block[4 + y*8]; + *(uint32_t*)&dst[6 + y*stride]+= *(uint32_t*)&block[6 + y*8]; + } +} + +static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){ + int y, x; + +#define STORE(pos) \ + temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>6;\ + if(temp & 0x100) temp= ~(temp>>31);\ + dst[x + y*dst_stride + pos]= temp; + + for(y=0; y<height; y++){ + const uint8_t *d= dither[y]; + for(x=0; x<width; x+=8){ + int temp; + STORE(0); + STORE(1); + STORE(2); + STORE(3); + STORE(4); + STORE(5); + STORE(6); + STORE(7); + } + } +} + +#if HAVE_MMX +static void store_slice_mmx(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){ + int y; + + for(y=0; y<height; y++){ + uint8_t *dst1= dst; + int16_t *src1= src; + __asm__ volatile( + "movq (%3), %%mm3 \n\t" + "movq (%3), %%mm4 \n\t" + "movd %4, %%mm2 \n\t" + "pxor %%mm0, %%mm0 \n\t" + "punpcklbw %%mm0, %%mm3 \n\t" + "punpckhbw %%mm0, %%mm4 \n\t" + "psraw %%mm2, %%mm3 \n\t" + "psraw %%mm2, %%mm4 \n\t" + "movd %5, %%mm2 \n\t" + "1: \n\t" + "movq (%0), %%mm0 \n\t" + "movq 8(%0), %%mm1 \n\t" + "paddw %%mm3, %%mm0 \n\t" + "paddw %%mm4, %%mm1 \n\t" + "psraw %%mm2, %%mm0 \n\t" + "psraw %%mm2, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, (%1) \n\t" + "add $16, %0 \n\t" + "add $8, %1 \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (src1), "+r"(dst1) + : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(6-log2_scale) + ); + src += src_stride; + dst += dst_stride; + } +// if(width != mmxw) +// store_slice_c(dst + mmxw, src + mmxw, dst_stride, src_stride, width - mmxw, log2_scale); +} +#endif + +static void (*store_slice)(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)= store_slice_c; + +static void (*requantize)(int16_t dst[64], int16_t src[64], int qp, uint8_t *permutation)= hardthresh_c; + +static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){ + int x, y, i; + const int count= 1<<p->log2_count; + const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15)); + uint64_t __attribute__((aligned(16))) block_align[32]; + int16_t *block = (int16_t *)block_align; + int16_t *block2= (int16_t *)(block_align+16); + + if (!src || !dst) return; // HACK avoid crash for Y8 colourspace + for(y=0; y<height; y++){ + int index= 8 + 8*stride + y*stride; + fast_memcpy(p->src + index, src + y*src_stride, width); + for(x=0; x<8; x++){ + p->src[index - x - 1]= p->src[index + x ]; + p->src[index + width + x ]= p->src[index + width - x - 1]; + } + } + for(y=0; y<8; y++){ + fast_memcpy(p->src + ( 7-y)*stride, p->src + ( y+8)*stride, stride); + fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride); + } + //FIXME (try edge emu) + + for(y=0; y<height+8; y+=8){ + memset(p->temp + (8+y)*stride, 0, 8*stride*sizeof(int16_t)); + for(x=0; x<width+8; x+=8){ + const int qps= 3 + is_luma; + int qp; + + if(p->qp) + qp= p->qp; + else{ + qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride]; + qp = FFMAX(1, norm_qscale(qp, p->mpeg2)); + } + for(i=0; i<count; i++){ + const int x1= x + offset[i+count-1][0]; + const int y1= y + offset[i+count-1][1]; + const int index= x1 + y1*stride; + p->dsp.get_pixels(block, p->src + index, stride); + p->dsp.fdct(block); + requantize(block2, block, qp, p->dsp.idct_permutation); + p->dsp.idct(block2); + add_block(p->temp + index, stride, block2); + } + } + if(y) + store_slice(dst + (y-8)*dst_stride, p->temp + 8 + y*stride, dst_stride, stride, width, XMIN(8, height+8-y), 6-p->log2_count); + } +#if 0 + for(y=0; y<height; y++){ + for(x=0; x<width; x++){ + if((((x>>6) ^ (y>>6)) & 1) == 0) + dst[x + y*dst_stride]= p->src[8 + 8*stride + x + y*stride]; + if((x&63) == 0 || (y&63)==0) + dst[x + y*dst_stride] += 128; + } + } +#endif + //FIXME reorder for better caching +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int h= (height+16+15)&(~15); + + vf->priv->temp_stride= (width+16+15)&(~15); + vf->priv->temp= malloc(vf->priv->temp_stride*h*sizeof(int16_t)); + vf->priv->src = malloc(vf->priv->temp_stride*h*sizeof(uint8_t)); + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->width,mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + }else{ + dmpi=vf->dmpi; + } + + vf->priv->mpeg2= mpi->qscale_type; + if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){ + int w = mpi->qstride; + int h = (mpi->h + 15) >> 4; + if (!w) { + w = (mpi->w + 15) >> 4; + h = 1; + } + if(!vf->priv->non_b_qp) + vf->priv->non_b_qp= malloc(w*h); + fast_memcpy(vf->priv->non_b_qp, mpi->qscale, w*h); + } + if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){ + char *qp_tab= vf->priv->non_b_qp; + if((vf->priv->mode&4) || !qp_tab) + qp_tab= mpi->qscale; + + if(qp_tab || vf->priv->qp){ + filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, qp_tab, mpi->qstride, 1); + filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0); + filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0); + }else{ + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]); + memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]); + } + } + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t"); +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t"); +#endif + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + if(!vf->priv) return; + + free(vf->priv->temp); + vf->priv->temp= NULL; + free(vf->priv->src); + vf->priv->src= NULL; + free(vf->priv->avctx); + vf->priv->avctx= NULL; + free(vf->priv->non_b_qp); + vf->priv->non_b_qp= NULL; + + free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt){ + case IMGFMT_YVU9: + case IMGFMT_IF09: + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_CLPL: + case IMGFMT_Y800: + case IMGFMT_Y8: + case IMGFMT_444P: + case IMGFMT_422P: + case IMGFMT_411P: + return ff_vf_next_query_format(vf,fmt); + } + return 0; +} + +static int control(struct vf_instance *vf, int request, void* data){ + switch(request){ + case VFCTRL_QUERY_MAX_PP_LEVEL: + return 6; + case VFCTRL_SET_PP_LEVEL: + vf->priv->log2_count= *((unsigned int*)data); + return CONTROL_TRUE; + } + return ff_vf_next_control(vf,request,data); +} + +static int vf_open(vf_instance_t *vf, char *args){ + + int log2c=-1; + + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->control= control; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + ff_init_avcodec(); + + vf->priv->avctx= avcodec_alloc_context3(NULL); + ff_dsputil_init(&vf->priv->dsp, vf->priv->avctx); + + vf->priv->log2_count= 3; + + if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode); + + if( log2c >=0 && log2c <=6 ) + vf->priv->log2_count = log2c; + + if(vf->priv->qp < 0) + vf->priv->qp = 0; + + switch(vf->priv->mode&3){ + default: + case 0: requantize= hardthresh_c; break; + case 1: requantize= softthresh_c; break; + } + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX){ + store_slice= store_slice_mmx; + switch(vf->priv->mode&3){ + case 0: requantize= hardthresh_mmx; break; + case 1: requantize= softthresh_mmx; break; + } + } +#endif + + return 1; +} + +const vf_info_t ff_vf_info_spp = { + "simple postprocess", + "spp", + "Michael Niedermayer", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_stereo3d.c b/libavfilter/libmpcodecs/vf_stereo3d.c new file mode 100644 index 0000000..fe75bd0 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_stereo3d.c @@ -0,0 +1,540 @@ +/* + * Copyright (C) 2010 Gordon Schmidt <gordon.schmidt <at> s2000.tu-chemnitz.de> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +//==includes==// +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" +#include "help_mp.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libavutil/common.h" +#include "libvo/fastmemcpy.h" + +//==types==// +typedef enum stereo_code { + ANAGLYPH_RC_GRAY, //anaglyph red/cyan gray + ANAGLYPH_RC_HALF, //anaglyph red/cyan half colored + ANAGLYPH_RC_COLOR, //anaglyph red/cyan colored + ANAGLYPH_RC_DUBOIS, //anaglyph red/cyan dubois + ANAGLYPH_GM_GRAY, //anaglyph green/magenta gray + ANAGLYPH_GM_HALF, //anaglyph green/magenta half colored + ANAGLYPH_GM_COLOR, //anaglyph green/magenta colored + ANAGLYPH_GM_DUBOIS, //anaglyph green/magenta dubois + ANAGLYPH_YB_GRAY, //anaglyph yellow/blue gray + ANAGLYPH_YB_HALF, //anaglyph yellow/blue half colored + ANAGLYPH_YB_COLOR, //anaglyph yellow/blue colored + ANAGLYPH_YB_DUBOIS, //anaglyph yellow/blue dubois + MONO_L, //mono output for debugging (left eye only) + MONO_R, //mono output for debugging (right eye only) + SIDE_BY_SIDE_LR, //side by side parallel (left eye left, right eye right) + SIDE_BY_SIDE_RL, //side by side crosseye (right eye left, left eye right) + SIDE_BY_SIDE_2_LR, //side by side parallel with half width resolution + SIDE_BY_SIDE_2_RL, //side by side crosseye with half width resolution + ABOVE_BELOW_LR, //above-below (left eye above, right eye below) + ABOVE_BELOW_RL, //above-below (right eye above, left eye below) + ABOVE_BELOW_2_LR, //above-below with half height resolution + ABOVE_BELOW_2_RL, //above-below with half height resolution + INTERLEAVE_ROWS_LR, //row-interleave (left eye has top row) + INTERLEAVE_ROWS_RL, //row-interleave (right eye has top row) + STEREO_CODE_COUNT //no value set - TODO: needs autodetection +} stereo_code; + +typedef struct component { + stereo_code fmt; + unsigned int width; + unsigned int height; + unsigned int off_left; + unsigned int off_right; + unsigned int row_left; + unsigned int row_right; +} component; + +//==global variables==// +static const int ana_coeff[][3][6] = { + [ANAGLYPH_RC_GRAY] = + {{19595, 38470, 7471, 0, 0, 0}, + { 0, 0, 0, 19595, 38470, 7471}, + { 0, 0, 0, 19595, 38470, 7471}}, + [ANAGLYPH_RC_HALF] = + {{19595, 38470, 7471, 0, 0, 0}, + { 0, 0, 0, 0, 65536, 0}, + { 0, 0, 0, 0, 0, 65536}}, + [ANAGLYPH_RC_COLOR] = + {{65536, 0, 0, 0, 0, 0}, + { 0, 0, 0, 0, 65536, 0}, + { 0, 0, 0, 0, 0, 65536}}, + [ANAGLYPH_RC_DUBOIS] = + {{29891, 32800, 11559, -2849, -5763, -102}, + {-2627, -2479, -1033, 24804, 48080, -1209}, + { -997, -1350, -358, -4729, -7403, 80373}}, + [ANAGLYPH_GM_GRAY] = + {{ 0, 0, 0, 19595, 38470, 7471}, + {19595, 38470, 7471, 0, 0, 0}, + { 0, 0, 0, 19595, 38470, 7471}}, + [ANAGLYPH_GM_HALF] = + {{ 0, 0, 0, 65536, 0, 0}, + {19595, 38470, 7471, 0, 0, 0}, + { 0, 0, 0, 0, 0, 65536}}, + [ANAGLYPH_GM_COLOR] = + {{ 0, 0, 0, 65536, 0, 0}, + { 0, 65536, 0, 0, 0, 0}, + { 0, 0, 0, 0, 0, 65536}}, + [ANAGLYPH_GM_DUBOIS] = + {{-4063,-10354, -2556, 34669, 46203, 1573}, + {18612, 43778, 9372, -1049, -983, -4260}, + { -983, -1769, 1376, 590, 4915, 61407}}, + [ANAGLYPH_YB_GRAY] = + {{ 0, 0, 0, 19595, 38470, 7471}, + { 0, 0, 0, 19595, 38470, 7471}, + {19595, 38470, 7471, 0, 0, 0}}, + [ANAGLYPH_YB_HALF] = + {{ 0, 0, 0, 65536, 0, 0}, + { 0, 0, 0, 0, 65536, 0}, + {19595, 38470, 7471, 0, 0, 0}}, + [ANAGLYPH_YB_COLOR] = + {{ 0, 0, 0, 65536, 0, 0}, + { 0, 0, 0, 0, 65536, 0}, + { 0, 0, 65536, 0, 0, 0}}, + [ANAGLYPH_YB_DUBOIS] = + {{65535,-12650,18451, -987, -7590, -1049}, + {-1604, 56032, 4196, 370, 3826, -1049}, + {-2345,-10676, 1358, 5801, 11416, 56217}}, +}; + +struct vf_priv_s { + component in; + component out; + int ana_matrix[3][6]; + unsigned int width; + unsigned int height; + unsigned int row_step; +} const ff_vf_priv_default = { + {SIDE_BY_SIDE_LR}, + {ANAGLYPH_RC_DUBOIS} +}; + +//==functions==// +static inline uint8_t ana_convert(int coeff[6], uint8_t left[3], uint8_t right[3]) +{ + int sum; + + sum = coeff[0] * left[0] + coeff[3] * right[0]; //red in + sum += coeff[1] * left[1] + coeff[4] * right[1]; //green in + sum += coeff[2] * left[2] + coeff[5] * right[2]; //blue in + return av_clip_uint8(sum >> 16); +} + +static int config(struct vf_instance *vf, int width, int height, int d_width, + int d_height, unsigned int flags, unsigned int outfmt) +{ + if ((width & 1) || (height & 1)) { + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, "[stereo3d] invalid height or width\n"); + return 0; + } + //default input values + vf->priv->width = width; + vf->priv->height = height; + vf->priv->row_step = 1; + vf->priv->in.width = width; + vf->priv->in.height = height; + vf->priv->in.off_left = 0; + vf->priv->in.off_right = 0; + vf->priv->in.row_left = 0; + vf->priv->in.row_right = 0; + + //check input format + switch (vf->priv->in.fmt) { + case SIDE_BY_SIDE_2_LR: + d_width *= 2; + case SIDE_BY_SIDE_LR: + vf->priv->width = width / 2; + vf->priv->in.off_right = vf->priv->width * 3; + break; + case SIDE_BY_SIDE_2_RL: + d_width *= 2; + case SIDE_BY_SIDE_RL: + vf->priv->width = width / 2; + vf->priv->in.off_left = vf->priv->width * 3; + break; + case ABOVE_BELOW_2_LR: + d_height *= 2; + case ABOVE_BELOW_LR: + vf->priv->height = height / 2; + vf->priv->in.row_right = vf->priv->height; + break; + case ABOVE_BELOW_2_RL: + d_height *= 2; + case ABOVE_BELOW_RL: + vf->priv->height = height / 2; + vf->priv->in.row_left = vf->priv->height; + break; + default: + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, + "[stereo3d] stereo format of input is not supported\n"); + return 0; + break; + } + //default output values + vf->priv->out.width = vf->priv->width; + vf->priv->out.height = vf->priv->height; + vf->priv->out.off_left = 0; + vf->priv->out.off_right = 0; + vf->priv->out.row_left = 0; + vf->priv->out.row_right = 0; + + //check output format + switch (vf->priv->out.fmt) { + case ANAGLYPH_RC_GRAY: + case ANAGLYPH_RC_HALF: + case ANAGLYPH_RC_COLOR: + case ANAGLYPH_RC_DUBOIS: + case ANAGLYPH_GM_GRAY: + case ANAGLYPH_GM_HALF: + case ANAGLYPH_GM_COLOR: + case ANAGLYPH_GM_DUBOIS: + case ANAGLYPH_YB_GRAY: + case ANAGLYPH_YB_HALF: + case ANAGLYPH_YB_COLOR: + case ANAGLYPH_YB_DUBOIS: + memcpy(vf->priv->ana_matrix, ana_coeff[vf->priv->out.fmt], + sizeof(vf->priv->ana_matrix)); + break; + case SIDE_BY_SIDE_2_LR: + d_width /= 2; + case SIDE_BY_SIDE_LR: + vf->priv->out.width = vf->priv->width * 2; + vf->priv->out.off_right = vf->priv->width * 3; + break; + case SIDE_BY_SIDE_2_RL: + d_width /= 2; + case SIDE_BY_SIDE_RL: + vf->priv->out.width = vf->priv->width * 2; + vf->priv->out.off_left = vf->priv->width * 3; + break; + case ABOVE_BELOW_2_LR: + d_height /= 2; + case ABOVE_BELOW_LR: + vf->priv->out.height = vf->priv->height * 2; + vf->priv->out.row_right = vf->priv->height; + break; + case ABOVE_BELOW_2_RL: + d_height /= 2; + case ABOVE_BELOW_RL: + vf->priv->out.height = vf->priv->height * 2; + vf->priv->out.row_left = vf->priv->height; + break; + case INTERLEAVE_ROWS_LR: + vf->priv->row_step = 2; + vf->priv->height = vf->priv->height / 2; + vf->priv->out.off_right = vf->priv->width * 3; + vf->priv->in.off_right += vf->priv->in.width * 3; + break; + case INTERLEAVE_ROWS_RL: + vf->priv->row_step = 2; + vf->priv->height = vf->priv->height / 2; + vf->priv->out.off_left = vf->priv->width * 3; + vf->priv->in.off_left += vf->priv->in.width * 3; + break; + case MONO_R: + //same as MONO_L only needs switching of input offsets + vf->priv->in.off_left = vf->priv->in.off_right; + vf->priv->in.row_left = vf->priv->in.row_right; + //nobreak; + case MONO_L: + //use default settings + break; + default: + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, + "[stereo3d] stereo format of output is not supported\n"); + return 0; + break; + } +// if (!opt_screen_size_x && !opt_screen_size_y) { + d_width = d_width * vf->priv->out.width / width; + d_height = d_height * vf->priv->out.height / height; +// } + + return ff_vf_next_config(vf, vf->priv->out.width, vf->priv->out.height, + d_width, d_height, flags, outfmt); +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + if (vf->priv->in.fmt == vf->priv->out.fmt) { //nothing to do + dmpi = mpi; + } else { + int out_off_left, out_off_right; + int in_off_left = vf->priv->in.row_left * mpi->stride[0] + + vf->priv->in.off_left; + int in_off_right = vf->priv->in.row_right * mpi->stride[0] + + vf->priv->in.off_right; + + dmpi = ff_vf_get_image(vf->next, IMGFMT_RGB24, MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE, + vf->priv->out.width, vf->priv->out.height); + out_off_left = vf->priv->out.row_left * dmpi->stride[0] + + vf->priv->out.off_left; + out_off_right = vf->priv->out.row_right * dmpi->stride[0] + + vf->priv->out.off_right; + + switch (vf->priv->out.fmt) { + case SIDE_BY_SIDE_LR: + case SIDE_BY_SIDE_RL: + case SIDE_BY_SIDE_2_LR: + case SIDE_BY_SIDE_2_RL: + case ABOVE_BELOW_LR: + case ABOVE_BELOW_RL: + case ABOVE_BELOW_2_LR: + case ABOVE_BELOW_2_RL: + case INTERLEAVE_ROWS_LR: + case INTERLEAVE_ROWS_RL: + memcpy_pic2(dmpi->planes[0] + out_off_left, + mpi->planes[0] + in_off_left, + 3 * vf->priv->width, + vf->priv->height, + dmpi->stride[0] * vf->priv->row_step, + mpi->stride[0] * vf->priv->row_step, + vf->priv->row_step != 1); + memcpy_pic2(dmpi->planes[0] + out_off_right, + mpi->planes[0] + in_off_right, + 3 * vf->priv->width, + vf->priv->height, + dmpi->stride[0] * vf->priv->row_step, + mpi->stride[0] * vf->priv->row_step, + vf->priv->row_step != 1); + break; + case MONO_L: + case MONO_R: + memcpy_pic(dmpi->planes[0], + mpi->planes[0] + in_off_left, + 3 * vf->priv->width, + vf->priv->height, + dmpi->stride[0], + mpi->stride[0]); + break; + case ANAGLYPH_RC_GRAY: + case ANAGLYPH_RC_HALF: + case ANAGLYPH_RC_COLOR: + case ANAGLYPH_RC_DUBOIS: + case ANAGLYPH_GM_GRAY: + case ANAGLYPH_GM_HALF: + case ANAGLYPH_GM_COLOR: + case ANAGLYPH_GM_DUBOIS: + case ANAGLYPH_YB_GRAY: + case ANAGLYPH_YB_HALF: + case ANAGLYPH_YB_COLOR: + case ANAGLYPH_YB_DUBOIS: { + int i,x,y,il,ir,o; + unsigned char *source = mpi->planes[0]; + unsigned char *dest = dmpi->planes[0]; + unsigned int out_width = vf->priv->out.width; + int *ana_matrix[3]; + + for(i = 0; i < 3; i++) + ana_matrix[i] = vf->priv->ana_matrix[i]; + + for (y = 0; y < vf->priv->out.height; y++) { + o = dmpi->stride[0] * y; + il = in_off_left + y * mpi->stride[0]; + ir = in_off_right + y * mpi->stride[0]; + for (x = 0; x < out_width; x++) { + dest[o ] = ana_convert( + ana_matrix[0], source + il, source + ir); //red out + dest[o + 1] = ana_convert( + ana_matrix[1], source + il, source + ir); //green out + dest[o + 2] = ana_convert( + ana_matrix[2], source + il, source + ir); //blue out + il += 3; + ir += 3; + o += 3; + } + } + break; + } + default: + ff_mp_msg(MSGT_VFILTER, MSGL_WARN, + "[stereo3d] stereo format of output is not supported\n"); + return 0; + break; + } + } + return ff_vf_next_put_image(vf, dmpi, pts); +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + switch (fmt) + case IMGFMT_RGB24: + return ff_vf_next_query_format(vf, fmt); + return 0; +} + +static void uninit(vf_instance_t *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + vf->config = config; + vf->uninit = uninit; + vf->put_image = put_image; + vf->query_format = query_format; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + vf->priv->in.fmt = SIDE_BY_SIDE_LR; + vf->priv->out.fmt= ANAGLYPH_RC_DUBOIS; + if (args) sscanf(args, "%d:%d", &vf->priv->in.fmt, &vf->priv->out.fmt); + + return 1; +} +#if 0 +///Presets usage +static const struct format_preset { + char* name; + stereo_code scode; +} vf_format_presets_defs[] = { + {"arcg", ANAGLYPH_RC_GRAY}, + {"anaglyph_red_cyan_gray", ANAGLYPH_RC_GRAY}, + {"arch", ANAGLYPH_RC_HALF}, + {"anaglyph_red_cyan_half_color", ANAGLYPH_RC_HALF}, + {"arcc", ANAGLYPH_RC_COLOR}, + {"anaglyph_red_cyan_color", ANAGLYPH_RC_COLOR}, + {"arcd", ANAGLYPH_RC_DUBOIS}, + {"anaglyph_red_cyan_dubios", ANAGLYPH_RC_DUBOIS}, + {"agmg", ANAGLYPH_GM_GRAY}, + {"anaglyph_green_magenta_gray", ANAGLYPH_GM_GRAY}, + {"agmh", ANAGLYPH_GM_HALF}, + {"anaglyph_green_magenta_half_color",ANAGLYPH_GM_HALF}, + {"agmc", ANAGLYPH_GM_COLOR}, + {"anaglyph_green_magenta_color", ANAGLYPH_GM_COLOR}, + {"agmd", ANAGLYPH_GM_DUBOIS}, + {"anaglyph_green_magenta_dubois", ANAGLYPH_GM_DUBOIS}, + {"aybg", ANAGLYPH_YB_GRAY}, + {"anaglyph_yellow_blue_gray", ANAGLYPH_YB_GRAY}, + {"aybh", ANAGLYPH_YB_HALF}, + {"anaglyph_yellow_blue_half_color", ANAGLYPH_YB_HALF}, + {"aybc", ANAGLYPH_YB_COLOR}, + {"anaglyph_yellow_blue_color", ANAGLYPH_YB_COLOR}, + {"aybd", ANAGLYPH_YB_DUBOIS}, + {"anaglyph_yellow_blue_dubois", ANAGLYPH_YB_DUBOIS}, + {"ml", MONO_L}, + {"mono_left", MONO_L}, + {"mr", MONO_R}, + {"mono_right", MONO_R}, + {"sbsl", SIDE_BY_SIDE_LR}, + {"side_by_side_left_first", SIDE_BY_SIDE_LR}, + {"sbsr", SIDE_BY_SIDE_RL}, + {"side_by_side_right_first", SIDE_BY_SIDE_RL}, + {"sbs2l", SIDE_BY_SIDE_2_LR}, + {"side_by_side_half_width_left_first", SIDE_BY_SIDE_2_LR}, + {"sbs2r", SIDE_BY_SIDE_2_RL}, + {"side_by_side_half_width_right_first",SIDE_BY_SIDE_2_RL}, + {"abl", ABOVE_BELOW_LR}, + {"above_below_left_first", ABOVE_BELOW_LR}, + {"abr", ABOVE_BELOW_RL}, + {"above_below_right_first", ABOVE_BELOW_RL}, + {"ab2l", ABOVE_BELOW_2_LR}, + {"above_below_half_height_left_first", ABOVE_BELOW_2_LR}, + {"ab2r", ABOVE_BELOW_2_RL}, + {"above_below_half_height_right_first",ABOVE_BELOW_2_RL}, + {"irl", INTERLEAVE_ROWS_LR}, + {"interleave_rows_left_first", INTERLEAVE_ROWS_LR}, + {"irr", INTERLEAVE_ROWS_RL}, + {"interleave_rows_right_first", INTERLEAVE_ROWS_RL}, + { NULL, 0} +}; + +#define ST_OFF(f) M_ST_OFF(struct format_preset,f) +static const m_option_t vf_format_preset_fields_in[] = { + {"in", ST_OFF(scode), CONF_TYPE_INT, 0,0,0, NULL}, + { NULL, NULL, 0, 0, 0, 0, NULL } +}; +static const m_option_t vf_format_preset_fields_out[] = { + {"out", ST_OFF(scode), CONF_TYPE_INT, 0,0,0, NULL}, + { NULL, NULL, 0, 0, 0, 0, NULL } +}; + +static const m_struct_t vf_format_preset_in = { + "stereo_format_preset_in", + sizeof(struct format_preset), + NULL, + vf_format_preset_fields_in +}; +static const m_struct_t vf_format_preset_out = { + "stereo_format_preset_out", + sizeof(struct format_preset), + NULL, + vf_format_preset_fields_out +}; + +static const m_struct_t vf_opts; +static const m_obj_presets_t format_preset_in = { + (struct m_struct_st*)&vf_format_preset_in, + (struct m_struct_st*)&vf_opts, + (struct format_preset*)vf_format_presets_defs, + ST_OFF(name) +}; +static const m_obj_presets_t format_preset_out = { + (struct m_struct_st*)&vf_format_preset_out, + (struct m_struct_st*)&vf_opts, + (struct format_preset*)vf_format_presets_defs, + ST_OFF(name) +}; + +/// Now the options +#undef ST_OFF +#define ST_OFF(f) M_ST_OFF(struct vf_priv_s,f) +static const m_option_t vf_opts_fields[] = { + {"stereo_in", 0, CONF_TYPE_OBJ_PRESETS, 0, 0, 0, + (m_obj_presets_t*)&format_preset_in}, + {"stereo_out", 0, CONF_TYPE_OBJ_PRESETS, 0, 0, 0, + (m_obj_presets_t*)&format_preset_out}, + {"in", ST_OFF(in.fmt), CONF_TYPE_INT, 0,0,0, NULL}, + {"out", ST_OFF(out.fmt), CONF_TYPE_INT, 0,0,0, NULL}, + { NULL, NULL, 0, 0, 0, 0, NULL } +}; + +static const m_struct_t vf_opts = { + "stereo3d", + sizeof(struct vf_priv_s), + &ff_vf_priv_default, + vf_opts_fields +}; +#endif + +//==info struct==// +const vf_info_t ff_vf_info_stereo3d = { + "stereoscopic 3d view", + "stereo3d", + "Gordon Schmidt", + "view stereoscopic videos", + vf_open, +// &vf_opts +}; diff --git a/libavfilter/libmpcodecs/vf_telecine.c b/libavfilter/libmpcodecs/vf_telecine.c new file mode 100644 index 0000000..77f75f0 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_telecine.c @@ -0,0 +1,158 @@ +/* + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +struct vf_priv_s { + int frame; +}; + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + mp_image_t *dmpi; + int ret; + int w = (IMGFMT_IS_YUVP16(mpi->imgfmt) ? 2 : 1) * mpi->w; + int chroma_width = (IMGFMT_IS_YUVP16(mpi->imgfmt) ? 2 : 1) * mpi->chroma_width; + + vf->priv->frame = (vf->priv->frame+1)%4; + + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE, mpi->width, mpi->height); + + ret = 0; + // 0/0 1/1 2/2 2/3 3/0 + switch (vf->priv->frame) { + case 0: + my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0], + mpi->planes[0]+mpi->stride[0], w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1], + mpi->planes[1]+mpi->stride[1], + chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2], + mpi->planes[2]+mpi->stride[2], + chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + /* Fallthrough */ + case 1: + case 2: + memcpy_pic(dmpi->planes[0], mpi->planes[0], w, mpi->h, + dmpi->stride[0], mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1], mpi->planes[1], + chroma_width, mpi->chroma_height, + dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], + chroma_width, mpi->chroma_height, + dmpi->stride[2], mpi->stride[2]); + } + return ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE) || ret; + case 3: + my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0], + mpi->planes[0]+mpi->stride[0], w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1], + mpi->planes[1]+mpi->stride[1], + chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2], + mpi->planes[2]+mpi->stride[2], + chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + my_memcpy_pic(dmpi->planes[0], mpi->planes[0], w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], mpi->planes[1], + chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], mpi->planes[2], + chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + return ret; + } + return 0; +} + +#if 0 +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - figure out which other formats work */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} +#endif + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + //vf->config = config; + vf->put_image = put_image; + //vf->query_format = query_format; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = calloc(1, sizeof(struct vf_priv_s)); + vf->priv->frame = 1; + if (args) sscanf(args, "%d", &vf->priv->frame); + vf->priv->frame--; + return 1; +} + +const vf_info_t ff_vf_info_telecine = { + "telecine filter", + "telecine", + "Rich Felker", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_tinterlace.c b/libavfilter/libmpcodecs/vf_tinterlace.c new file mode 100644 index 0000000..6c7dbab --- /dev/null +++ b/libavfilter/libmpcodecs/vf_tinterlace.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2003 Michael Zucchi <notzed@ximian.com> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "config.h" +#include "mp_msg.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" + +#include "libvo/fastmemcpy.h" + +struct vf_priv_s { + int mode; + int frame; + mp_image_t *dmpi; +}; + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts) +{ + int ret = 0; + mp_image_t *dmpi; + + switch (vf->priv->mode) { + case 0: + dmpi = vf->priv->dmpi; + if (dmpi == NULL) { + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE, + mpi->width, mpi->height*2); + + vf->priv->dmpi = dmpi; + + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0]*2, mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1]*2, mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2]*2, mpi->stride[2]); + } + } else { + vf->priv->dmpi = NULL; + + memcpy_pic(dmpi->planes[0]+dmpi->stride[0], mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0]*2, mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1]+dmpi->stride[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1]*2, mpi->stride[1]); + memcpy_pic(dmpi->planes[2]+dmpi->stride[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2]*2, mpi->stride[2]); + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + } + break; + case 1: + if (vf->priv->frame & 1) + ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE); + break; + case 2: + if ((vf->priv->frame & 1) == 0) + ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE); + break; + case 3: + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, + mpi->width, mpi->height*2); + /* fixme, just clear alternate lines */ + ff_vf_mpi_clear(dmpi, 0, 0, dmpi->w, dmpi->h); + if ((vf->priv->frame & 1) == 0) { + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0]*2, mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1]*2, mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2]*2, mpi->stride[2]); + } + } else { + memcpy_pic(dmpi->planes[0]+dmpi->stride[0], mpi->planes[0], mpi->w, mpi->h, + dmpi->stride[0]*2, mpi->stride[0]); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + memcpy_pic(dmpi->planes[1]+dmpi->stride[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[1]*2, mpi->stride[1]); + memcpy_pic(dmpi->planes[2]+dmpi->stride[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height, + dmpi->stride[2]*2, mpi->stride[2]); + } + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + break; + case 4: + // Interleave even lines (only) from Frame 'i' with odd + // lines (only) from Frame 'i+1', halving the Frame + // rate and preserving image height. + + dmpi = vf->priv->dmpi; + + // @@ Need help: Should I set dmpi->fields to indicate + // that the (new) frame will be interlaced!? E.g. ... + // dmpi->fields |= MP_IMGFIELD_INTERLACED; + // dmpi->fields |= MP_IMGFIELD_TOP_FIRST; + // etc. + + if (dmpi == NULL) { + dmpi = ff_vf_get_image(vf->next, mpi->imgfmt, + MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE | + MP_IMGFLAG_PRESERVE, + mpi->width, mpi->height); + + vf->priv->dmpi = dmpi; + + my_memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1], mpi->planes[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2], mpi->planes[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + } else { + vf->priv->dmpi = NULL; + + my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0], + mpi->planes[0]+mpi->stride[0], + mpi->w, mpi->h/2, + dmpi->stride[0]*2, mpi->stride[0]*2); + if (mpi->flags & MP_IMGFLAG_PLANAR) { + my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1], + mpi->planes[1]+mpi->stride[1], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[1]*2, mpi->stride[1]*2); + my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2], + mpi->planes[2]+mpi->stride[2], + mpi->chroma_width, mpi->chroma_height/2, + dmpi->stride[2]*2, mpi->stride[2]*2); + } + ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE); + } + break; + } + + vf->priv->frame++; + + return ret; +} + +static int query_format(struct vf_instance *vf, unsigned int fmt) +{ + /* FIXME - figure out which other formats work */ + switch (fmt) { + case IMGFMT_YV12: + case IMGFMT_IYUV: + case IMGFMT_I420: + return ff_vf_next_query_format(vf, fmt); + } + return 0; +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt) +{ + switch (vf->priv->mode) { + case 0: + case 3: + return ff_vf_next_config(vf,width,height*2,d_width,d_height*2,flags,outfmt); + case 1: /* odd frames */ + case 2: /* even frames */ + case 4: /* alternate frame (height-preserving) interlacing */ + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); + } + return 0; +} + +static void uninit(struct vf_instance *vf) +{ + free(vf->priv); +} + +static int vf_open(vf_instance_t *vf, char *args) +{ + struct vf_priv_s *p; + vf->config = config; + vf->put_image = put_image; + vf->query_format = query_format; + vf->uninit = uninit; + vf->default_reqs = VFCAP_ACCEPT_STRIDE; + vf->priv = p = calloc(1, sizeof(struct vf_priv_s)); + p->mode = 0; + if (args) + sscanf(args, "%d", &p->mode); + p->frame = 0; + return 1; +} + +const vf_info_t ff_vf_info_tinterlace = { + "temporal field interlacing", + "tinterlace", + "Michael Zucchi", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vf_unsharp.c b/libavfilter/libmpcodecs/vf_unsharp.c new file mode 100644 index 0000000..89eddec --- /dev/null +++ b/libavfilter/libmpcodecs/vf_unsharp.c @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2002 Remi Guyomarch <rguyom@pobox.com> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> + +#include "config.h" +#include "mp_msg.h" +#include "cpudetect.h" + +#if HAVE_MALLOC_H +#include <malloc.h> +#endif + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "libvo/fastmemcpy.h" +#include "libavutil/common.h" + +//===========================================================================// + +#define MIN_MATRIX_SIZE 3 +#define MAX_MATRIX_SIZE 63 + +typedef struct FilterParam { + int msizeX, msizeY; + double amount; + uint32_t *SC[MAX_MATRIX_SIZE-1]; +} FilterParam; + +struct vf_priv_s { + FilterParam lumaParam; + FilterParam chromaParam; + unsigned int outfmt; +}; + + +//===========================================================================// + +/* This code is based on : + +An Efficient algorithm for Gaussian blur using finite-state machines +Frederick M. Waltz and John W. V. Miller + +SPIE Conf. on Machine Vision Systems for Inspection and Metrology VII +Originally published Boston, Nov 98 + +*/ + +static void unsharp( uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int width, int height, FilterParam *fp ) { + + uint32_t **SC = fp->SC; + uint32_t SR[MAX_MATRIX_SIZE-1], Tmp1, Tmp2; + uint8_t* src2 = src; // avoid gcc warning + + int32_t res; + int x, y, z; + int amount = fp->amount * 65536.0; + int stepsX = fp->msizeX/2; + int stepsY = fp->msizeY/2; + int scalebits = (stepsX+stepsY)*2; + int32_t halfscale = 1 << ((stepsX+stepsY)*2-1); + + if( !fp->amount ) { + if( src == dst ) + return; + if( dstStride == srcStride ) + fast_memcpy( dst, src, srcStride*height ); + else + for( y=0; y<height; y++, dst+=dstStride, src+=srcStride ) + fast_memcpy( dst, src, width ); + return; + } + + for( y=0; y<2*stepsY; y++ ) + memset( SC[y], 0, sizeof(SC[y][0]) * (width+2*stepsX) ); + + for( y=-stepsY; y<height+stepsY; y++ ) { + if( y < height ) src2 = src; + memset( SR, 0, sizeof(SR[0]) * (2*stepsX-1) ); + for( x=-stepsX; x<width+stepsX; x++ ) { + Tmp1 = x<=0 ? src2[0] : x>=width ? src2[width-1] : src2[x]; + for( z=0; z<stepsX*2; z+=2 ) { + Tmp2 = SR[z+0] + Tmp1; SR[z+0] = Tmp1; + Tmp1 = SR[z+1] + Tmp2; SR[z+1] = Tmp2; + } + for( z=0; z<stepsY*2; z+=2 ) { + Tmp2 = SC[z+0][x+stepsX] + Tmp1; SC[z+0][x+stepsX] = Tmp1; + Tmp1 = SC[z+1][x+stepsX] + Tmp2; SC[z+1][x+stepsX] = Tmp2; + } + if( x>=stepsX && y>=stepsY ) { + uint8_t* srx = src - stepsY*srcStride + x - stepsX; + uint8_t* dsx = dst - stepsY*dstStride + x - stepsX; + + res = (int32_t)*srx + ( ( ( (int32_t)*srx - (int32_t)((Tmp1+halfscale) >> scalebits) ) * amount ) >> 16 ); + *dsx = res>255 ? 255 : res<0 ? 0 : (uint8_t)res; + } + } + if( y >= 0 ) { + dst += dstStride; + src += srcStride; + } + } +} + +//===========================================================================// + +static int config( struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt ) { + + int z, stepsX, stepsY; + FilterParam *fp; + const char *effect; + + // allocate buffers + + fp = &vf->priv->lumaParam; + effect = fp->amount == 0 ? "don't touch" : fp->amount < 0 ? "blur" : "sharpen"; + ff_mp_msg( MSGT_VFILTER, MSGL_INFO, "unsharp: %dx%d:%0.2f (%s luma) \n", fp->msizeX, fp->msizeY, fp->amount, effect ); + memset( fp->SC, 0, sizeof( fp->SC ) ); + stepsX = fp->msizeX/2; + stepsY = fp->msizeY/2; + for( z=0; z<2*stepsY; z++ ) + fp->SC[z] = av_malloc(sizeof(*(fp->SC[z])) * (width+2*stepsX)); + + fp = &vf->priv->chromaParam; + effect = fp->amount == 0 ? "don't touch" : fp->amount < 0 ? "blur" : "sharpen"; + ff_mp_msg( MSGT_VFILTER, MSGL_INFO, "unsharp: %dx%d:%0.2f (%s chroma)\n", fp->msizeX, fp->msizeY, fp->amount, effect ); + memset( fp->SC, 0, sizeof( fp->SC ) ); + stepsX = fp->msizeX/2; + stepsY = fp->msizeY/2; + for( z=0; z<2*stepsY; z++ ) + fp->SC[z] = av_malloc(sizeof(*(fp->SC[z])) * (width+2*stepsX)); + + return ff_vf_next_config( vf, width, height, d_width, d_height, flags, outfmt ); +} + +//===========================================================================// + +static void get_image( struct vf_instance *vf, mp_image_t *mpi ) { + if( mpi->flags & MP_IMGFLAG_PRESERVE ) + return; // don't change + if( mpi->imgfmt!=vf->priv->outfmt ) + return; // colorspace differ + + mpi->priv = + vf->dmpi = ff_vf_get_image( vf->next, mpi->imgfmt, mpi->type, mpi->flags, mpi->width, mpi->height ); + mpi->planes[0] = vf->dmpi->planes[0]; + mpi->stride[0] = vf->dmpi->stride[0]; + mpi->width = vf->dmpi->width; + if( mpi->flags & MP_IMGFLAG_PLANAR ) { + mpi->planes[1] = vf->dmpi->planes[1]; + mpi->planes[2] = vf->dmpi->planes[2]; + mpi->stride[1] = vf->dmpi->stride[1]; + mpi->stride[2] = vf->dmpi->stride[2]; + } + mpi->flags |= MP_IMGFLAG_DIRECT; +} + +static int put_image( struct vf_instance *vf, mp_image_t *mpi, double pts) { + mp_image_t *dmpi = mpi->priv; + mpi->priv = NULL; + + if( !(mpi->flags & MP_IMGFLAG_DIRECT) ) + // no DR, so get a new image! hope we'll get DR buffer: + dmpi = vf->dmpi = ff_vf_get_image( vf->next,vf->priv->outfmt, MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, mpi->width, mpi->height); + + unsharp( dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, &vf->priv->lumaParam ); + unsharp( dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w/2, mpi->h/2, &vf->priv->chromaParam ); + unsharp( dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w/2, mpi->h/2, &vf->priv->chromaParam ); + + ff_vf_clone_mpi_attributes(dmpi, mpi); + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) + __asm__ volatile ("emms\n\t"); +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) + __asm__ volatile ("sfence\n\t"); +#endif + + return ff_vf_next_put_image( vf, dmpi, pts); +} + +static void uninit( struct vf_instance *vf ) { + unsigned int z; + FilterParam *fp; + + if( !vf->priv ) return; + + fp = &vf->priv->lumaParam; + for( z=0; z<sizeof(fp->SC)/sizeof(fp->SC[0]); z++ ) { + av_free( fp->SC[z] ); + fp->SC[z] = NULL; + } + fp = &vf->priv->chromaParam; + for( z=0; z<sizeof(fp->SC)/sizeof(fp->SC[0]); z++ ) { + av_free( fp->SC[z] ); + fp->SC[z] = NULL; + } + + free( vf->priv ); + vf->priv = NULL; +} + +//===========================================================================// + +static int query_format( struct vf_instance *vf, unsigned int fmt ) { + switch(fmt) { + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + return ff_vf_next_query_format( vf, vf->priv->outfmt ); + } + return 0; +} + +//===========================================================================// + +static void parse( FilterParam *fp, char* args ) { + + // l7x5:0.8:c3x3:-0.2 + + char *z; + char *pos = args; + char *max = args + strlen(args); + + // parse matrix sizes + fp->msizeX = ( pos && pos+1<max ) ? atoi( pos+1 ) : 0; + z = strchr( pos+1, 'x' ); + fp->msizeY = ( z && z+1<max ) ? atoi( pos=z+1 ) : fp->msizeX; + + // min/max & odd + fp->msizeX = 1 | av_clip(fp->msizeX, MIN_MATRIX_SIZE, MAX_MATRIX_SIZE); + fp->msizeY = 1 | av_clip(fp->msizeY, MIN_MATRIX_SIZE, MAX_MATRIX_SIZE); + + // parse amount + pos = strchr( pos+1, ':' ); + fp->amount = ( pos && pos+1<max ) ? atof( pos+1 ) : 0; +} + +//===========================================================================// + +static const unsigned int fmt_list[] = { + IMGFMT_YV12, + IMGFMT_I420, + IMGFMT_IYUV, + 0 +}; + +static int vf_open( vf_instance_t *vf, char *args ) { + vf->config = config; + vf->put_image = put_image; + vf->get_image = get_image; + vf->query_format = query_format; + vf->uninit = uninit; + vf->priv = malloc( sizeof(struct vf_priv_s) ); + memset( vf->priv, 0, sizeof(struct vf_priv_s) ); + + if( args ) { + char *args2 = strchr( args, 'l' ); + if( args2 ) + parse( &vf->priv->lumaParam, args2 ); + else { + vf->priv->lumaParam.amount = + vf->priv->lumaParam.msizeX = + vf->priv->lumaParam.msizeY = 0; + } + + args2 = strchr( args, 'c' ); + if( args2 ) + parse( &vf->priv->chromaParam, args2 ); + else { + vf->priv->chromaParam.amount = + vf->priv->chromaParam.msizeX = + vf->priv->chromaParam.msizeY = 0; + } + + if( !vf->priv->lumaParam.msizeX && !vf->priv->chromaParam.msizeX ) + return 0; // nothing to do + } + + // check csp: + vf->priv->outfmt = ff_vf_match_csp( &vf->next, fmt_list, IMGFMT_YV12 ); + if( !vf->priv->outfmt ) { + uninit( vf ); + return 0; // no csp match :( + } + + return 1; +} + +const vf_info_t ff_vf_info_unsharp = { + "unsharp mask & gaussian blur", + "unsharp", + "Remi Guyomarch", + "", + vf_open, + NULL +}; + +//===========================================================================// diff --git a/libavfilter/libmpcodecs/vf_uspp.c b/libavfilter/libmpcodecs/vf_uspp.c new file mode 100644 index 0000000..54cc0f9 --- /dev/null +++ b/libavfilter/libmpcodecs/vf_uspp.c @@ -0,0 +1,393 @@ +/* + * Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <inttypes.h> +#include <math.h> +#include <assert.h> + +#include "config.h" + +#include "mp_msg.h" +#include "cpudetect.h" + +#include "libavutil/mem.h" +#include "libavcodec/avcodec.h" + +#include "img_format.h" +#include "mp_image.h" +#include "vf.h" +#include "av_helpers.h" +#include "libvo/fastmemcpy.h" + +#define XMIN(a,b) ((a) < (b) ? (a) : (b)) + +#define BLOCK 16 + +//===========================================================================// +static const uint8_t __attribute__((aligned(8))) dither[8][8]={ +{ 0*4, 48*4, 12*4, 60*4, 3*4, 51*4, 15*4, 63*4, }, +{ 32*4, 16*4, 44*4, 28*4, 35*4, 19*4, 47*4, 31*4, }, +{ 8*4, 56*4, 4*4, 52*4, 11*4, 59*4, 7*4, 55*4, }, +{ 40*4, 24*4, 36*4, 20*4, 43*4, 27*4, 39*4, 23*4, }, +{ 2*4, 50*4, 14*4, 62*4, 1*4, 49*4, 13*4, 61*4, }, +{ 34*4, 18*4, 46*4, 30*4, 33*4, 17*4, 45*4, 29*4, }, +{ 10*4, 58*4, 6*4, 54*4, 9*4, 57*4, 5*4, 53*4, }, +{ 42*4, 26*4, 38*4, 22*4, 41*4, 25*4, 37*4, 21*4, }, +}; + +static const uint8_t offset[511][2]= { +{ 0, 0}, +{ 0, 0}, { 8, 8}, +{ 0, 0}, { 4, 4}, {12, 8}, { 8,12}, +{ 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14}, + +{ 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14}, +{ 5, 1}, {15, 3}, { 9, 5}, { 3, 7}, {13, 9}, { 7,11}, { 1,13}, {11,15}, + +{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, +{ 2, 2}, {10, 2}, { 2,10}, {10,10}, { 7, 3}, {15, 3}, { 7,11}, {15,11}, +{ 4, 4}, {12, 4}, { 4,12}, {12,12}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, +{ 6, 6}, {14, 6}, { 6,14}, {14,14}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, + +{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8}, +{ 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, +{ 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 2}, {15, 2}, { 7,10}, {15,10}, +{ 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 3}, {14, 3}, { 6,11}, {14,11}, +{ 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 4}, {12, 4}, { 4,12}, {12,12}, +{ 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 5}, {13, 5}, { 5,13}, {13,13}, +{ 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 6}, {15, 6}, { 7,14}, {15,14}, +{ 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 7}, {14, 7}, { 6,15}, {14,15}, + +{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10}, +{ 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14}, +{ 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11}, +{ 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15}, +{ 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10}, +{ 2, 4}, {10, 4}, { 2,12}, {10,12}, { 2, 6}, {10, 6}, { 2,14}, {10,14}, +{ 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11}, +{ 3, 5}, {11, 5}, { 3,13}, {11,13}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, +{ 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 4, 2}, {12, 2}, { 4,10}, {12,10}, +{ 4, 4}, {12, 4}, { 4,12}, {12,12}, { 4, 6}, {12, 6}, { 4,14}, {12,14}, +{ 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 5, 3}, {13, 3}, { 5,11}, {13,11}, +{ 5, 5}, {13, 5}, { 5,13}, {13,13}, { 5, 7}, {13, 7}, { 5,15}, {13,15}, +{ 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 6, 2}, {14, 2}, { 6,10}, {14,10}, +{ 6, 4}, {14, 4}, { 6,12}, {14,12}, { 6, 6}, {14, 6}, { 6,14}, {14,14}, +{ 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 7, 3}, {15, 3}, { 7,11}, {15,11}, +{ 7, 5}, {15, 5}, { 7,13}, {15,13}, { 7, 7}, {15, 7}, { 7,15}, {15,15}, + +{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 6, 6}, {14, 6}, { 6,14}, {14,14}, { 2, 6}, {10, 6}, { 2,14}, {10,14}, { 6, 2}, {14, 2}, { 6,10}, {14,10}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10}, { 4, 6}, {12, 6}, { 4,14}, {12,14}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14}, { 4, 2}, {12, 2}, { 4,10}, {12,10}, { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 6, 4}, {14, 4}, { 6,12}, {14,12}, { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 5}, {13, 5}, { 5,13}, {13,13}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11}, { 7, 7}, {15, 7}, { 7,15}, {15,15}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, { 7, 3}, {15, 3}, { 7,11}, {15,11}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11}, { 5, 7}, {13, 7}, { 5,15}, {13,15}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15}, { 5, 3}, {13, 3}, { 5,11}, {13,11}, { 3, 1}, {11, 1} +, { 3, 9}, {11, 9}, { 7, 5}, {15, 5}, { 7,13}, {15,13}, { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 0, 1}, { 8, 1}, { 0, 9}, { 8, 9}, { 4, 5}, {12, 5}, { 4,13}, {12,13}, { 0, 5}, { 8, 5}, { 0,13}, { 8,13}, { 4, 1}, {12, 1}, { 4, 9}, {12, 9}, { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 7}, {14, 7}, { 6,15}, {14,15}, { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 3}, {14, 3}, { 6,11}, {14,11}, { 0, 3}, { 8, 3}, { 0,11}, { 8,11}, { 4, 7}, {12, 7}, { 4,15}, {12,15}, { 0, 7}, { 8, 7}, { 0,15}, { 8,15}, { 4, 3}, {12, 3}, { 4,11}, {12,11}, { 2, 1}, {10, 1}, { 2, 9}, {10, 9}, { 6, 5}, {14, 5}, { 6,13}, {14,13}, { 2, 5}, {10, 5}, { 2,13}, {10,13}, { 6, 1}, {14, 1}, { 6, 9}, {14, 9}, { 1, 0}, { 9, 0}, { 1, 8}, { 9, 8}, { 5, 4}, {13, 4}, { 5,12}, {13,12}, { 1, 4}, { 9, 4}, { 1,12}, { 9,12}, { 5, 0}, {13, 0}, { 5, 8}, {13, 8}, { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 6}, {15, 6}, { 7,14}, {15,14}, { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 2}, {15, 2}, { 7,10}, {15,10}, { 1, 2}, { 9, 2}, { 1,10}, { 9, +10}, { 5, 6}, {13, 6}, { 5,14}, {13,14}, { 1, 6}, { 9, 6}, { 1,14}, { 9,14}, { 5, 2}, {13, 2}, { 5,10}, {13,10}, { 3, 0}, {11, 0}, { 3, 8}, {11, 8}, { 7, 4}, {15, 4}, { 7,12}, {15,12}, { 3, 4}, {11, 4}, { 3,12}, {11,12}, { 7, 0}, {15, 0}, { 7, 8}, {15, 8}, +}; + +struct vf_priv_s { + int log2_count; + int qp; + int mode; + int mpeg2; + int temp_stride[3]; + uint8_t *src[3]; + int16_t *temp[3]; + int outbuf_size; + uint8_t *outbuf; + AVCodecContext *avctx_enc[BLOCK*BLOCK]; + AVFrame *frame; + AVFrame *frame_dec; +}; + +static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){ + int y, x; + +#define STORE(pos) \ + temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>8;\ + if(temp & 0x100) temp= ~(temp>>31);\ + dst[x + y*dst_stride + pos]= temp; + + for(y=0; y<height; y++){ + const uint8_t *d= dither[y&7]; + for(x=0; x<width; x+=8){ + int temp; + STORE(0); + STORE(1); + STORE(2); + STORE(3); + STORE(4); + STORE(5); + STORE(6); + STORE(7); + } + } +} + +static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height, uint8_t *qp_store, int qp_stride){ + int x, y, i, j; + const int count= 1<<p->log2_count; + + for(i=0; i<3; i++){ + int is_chroma= !!i; + int w= width >>is_chroma; + int h= height>>is_chroma; + int stride= p->temp_stride[i]; + int block= BLOCK>>is_chroma; + + if (!src[i] || !dst[i]) + continue; // HACK avoid crash for Y8 colourspace + for(y=0; y<h; y++){ + int index= block + block*stride + y*stride; + fast_memcpy(p->src[i] + index, src[i] + y*src_stride[i], w); + for(x=0; x<block; x++){ + p->src[i][index - x - 1]= p->src[i][index + x ]; + p->src[i][index + w + x ]= p->src[i][index + w - x - 1]; + } + } + for(y=0; y<block; y++){ + fast_memcpy(p->src[i] + ( block-1-y)*stride, p->src[i] + ( y+block )*stride, stride); + fast_memcpy(p->src[i] + (h+block +y)*stride, p->src[i] + (h-y+block-1)*stride, stride); + } + + p->frame->linesize[i]= stride; + memset(p->temp[i], 0, (h+2*block)*stride*sizeof(int16_t)); + } + + if(p->qp) + p->frame->quality= p->qp * FF_QP2LAMBDA; + else + p->frame->quality= norm_qscale(qp_store[0], p->mpeg2) * FF_QP2LAMBDA; +// init per MB qscale stuff FIXME + + for(i=0; i<count; i++){ + const int x1= offset[i+count-1][0]; + const int y1= offset[i+count-1][1]; + int offset; + p->frame->data[0]= p->src[0] + x1 + y1 * p->frame->linesize[0]; + p->frame->data[1]= p->src[1] + x1/2 + y1/2 * p->frame->linesize[1]; + p->frame->data[2]= p->src[2] + x1/2 + y1/2 * p->frame->linesize[2]; + + avcodec_encode_video(p->avctx_enc[i], p->outbuf, p->outbuf_size, p->frame); + p->frame_dec = p->avctx_enc[i]->coded_frame; + + offset= (BLOCK-x1) + (BLOCK-y1)*p->frame_dec->linesize[0]; + //FIXME optimize + for(y=0; y<height; y++){ + for(x=0; x<width; x++){ + p->temp[0][ x + y*p->temp_stride[0] ] += p->frame_dec->data[0][ x + y*p->frame_dec->linesize[0] + offset ]; + } + } + offset= (BLOCK/2-x1/2) + (BLOCK/2-y1/2)*p->frame_dec->linesize[1]; + for(y=0; y<height/2; y++){ + for(x=0; x<width/2; x++){ + p->temp[1][ x + y*p->temp_stride[1] ] += p->frame_dec->data[1][ x + y*p->frame_dec->linesize[1] + offset ]; + p->temp[2][ x + y*p->temp_stride[2] ] += p->frame_dec->data[2][ x + y*p->frame_dec->linesize[2] + offset ]; + } + } + } + + for(j=0; j<3; j++){ + int is_chroma= !!j; + if (!dst[j]) + continue; // HACK avoid crash for Y8 colourspace + store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j], width>>is_chroma, height>>is_chroma, 8-p->log2_count); + } +} + +static int config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int flags, unsigned int outfmt){ + int i; + AVCodec *enc= avcodec_find_encoder(AV_CODEC_ID_SNOW); + + for(i=0; i<3; i++){ + int is_chroma= !!i; + int w= ((width + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma; + int h= ((height + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma; + + vf->priv->temp_stride[i]= w; + vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t)); + vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t)); + } + for(i=0; i< (1<<vf->priv->log2_count); i++){ + AVCodecContext *avctx_enc; + AVDictionary *opts = NULL; + + avctx_enc= + vf->priv->avctx_enc[i]= avcodec_alloc_context3(NULL); + avctx_enc->width = width + BLOCK; + avctx_enc->height = height + BLOCK; + avctx_enc->time_base= (AVRational){1,25}; // meaningless + avctx_enc->gop_size = 300; + avctx_enc->max_b_frames= 0; + avctx_enc->pix_fmt = AV_PIX_FMT_YUV420P; + avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY; + avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; + avctx_enc->global_quality= 123; + av_dict_set(&opts, "no_bitstream", "1", 0); + avcodec_open2(avctx_enc, enc, &opts); + av_dict_free(&opts); + assert(avctx_enc->codec); + } + vf->priv->frame= avcodec_alloc_frame(); + vf->priv->frame_dec= avcodec_alloc_frame(); + + vf->priv->outbuf_size= (width + BLOCK)*(height + BLOCK)*10; + vf->priv->outbuf= malloc(vf->priv->outbuf_size); + + return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt); +} + +static void get_image(struct vf_instance *vf, mp_image_t *mpi){ + if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change + // ok, we can do pp in-place (or pp disabled): + vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height); + mpi->planes[0]=vf->dmpi->planes[0]; + mpi->stride[0]=vf->dmpi->stride[0]; + mpi->width=vf->dmpi->width; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + mpi->planes[1]=vf->dmpi->planes[1]; + mpi->planes[2]=vf->dmpi->planes[2]; + mpi->stride[1]=vf->dmpi->stride[1]; + mpi->stride[2]=vf->dmpi->stride[2]; + } + mpi->flags|=MP_IMGFLAG_DIRECT; +} + +static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){ + mp_image_t *dmpi; + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // no DR, so get a new image! hope we'll get DR buffer: + dmpi=ff_vf_get_image(vf->next,mpi->imgfmt, + MP_IMGTYPE_TEMP, + MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE, + mpi->width,mpi->height); + ff_vf_clone_mpi_attributes(dmpi, mpi); + }else{ + dmpi=vf->dmpi; + } + + vf->priv->mpeg2= mpi->qscale_type; + if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){ + if(mpi->qscale || vf->priv->qp){ + filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h, mpi->qscale, mpi->qstride); + }else{ + memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]); + memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]); + memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]); + } + } + +#if HAVE_MMX + if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t"); +#endif +#if HAVE_MMX2 + if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t"); +#endif + + return ff_vf_next_put_image(vf,dmpi, pts); +} + +static void uninit(struct vf_instance *vf){ + int i; + if(!vf->priv) return; + + for(i=0; i<3; i++){ + free(vf->priv->temp[i]); + vf->priv->temp[i]= NULL; + free(vf->priv->src[i]); + vf->priv->src[i]= NULL; + } + for(i=0; i<BLOCK*BLOCK; i++){ + av_freep(&vf->priv->avctx_enc[i]); + } + + free(vf->priv); + vf->priv=NULL; +} + +//===========================================================================// +static int query_format(struct vf_instance *vf, unsigned int fmt){ + switch(fmt){ + case IMGFMT_YV12: + case IMGFMT_I420: + case IMGFMT_IYUV: + case IMGFMT_Y800: + case IMGFMT_Y8: + return ff_vf_next_query_format(vf,fmt); + } + return 0; +} + +static int control(struct vf_instance *vf, int request, void* data){ + switch(request){ + case VFCTRL_QUERY_MAX_PP_LEVEL: + return 8; + case VFCTRL_SET_PP_LEVEL: + vf->priv->log2_count= *((unsigned int*)data); + //FIXME we have to realloc a few things here + return CONTROL_TRUE; + } + return ff_vf_next_control(vf,request,data); +} + +static int vf_open(vf_instance_t *vf, char *args){ + + int log2c=-1; + + vf->config=config; + vf->put_image=put_image; + vf->get_image=get_image; + vf->query_format=query_format; + vf->uninit=uninit; + vf->control= control; + vf->priv=malloc(sizeof(struct vf_priv_s)); + memset(vf->priv, 0, sizeof(struct vf_priv_s)); + + ff_init_avcodec(); + + vf->priv->log2_count= 4; + + if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode); + + if( log2c >=0 && log2c <=8 ) + vf->priv->log2_count = log2c; + + if(vf->priv->qp < 0) + vf->priv->qp = 0; + +// #if HAVE_MMX +// if(ff_gCpuCaps.hasMMX){ +// store_slice= store_slice_mmx; +// } +// #endif + + return 1; +} + +const vf_info_t ff_vf_info_uspp = { + "ultra simple/slow postprocess", + "uspp", + "Michael Niedermayer", + "", + vf_open, + NULL +}; diff --git a/libavfilter/libmpcodecs/vfcap.h b/libavfilter/libmpcodecs/vfcap.h new file mode 100644 index 0000000..611d642 --- /dev/null +++ b/libavfilter/libmpcodecs/vfcap.h @@ -0,0 +1,56 @@ +/* VFCAP_* values: they are flags, returned by query_format(): + * + * This file is part of MPlayer. + * + * MPlayer is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * MPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with MPlayer; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MPLAYER_VFCAP_H +#define MPLAYER_VFCAP_H + +// set, if the given colorspace is supported (with or without conversion) +#define VFCAP_CSP_SUPPORTED 0x1 +// set, if the given colorspace is supported _without_ conversion +#define VFCAP_CSP_SUPPORTED_BY_HW 0x2 +// set if the driver/filter can draw OSD +#define VFCAP_OSD 0x4 +// set if the driver/filter can handle compressed SPU stream +#define VFCAP_SPU 0x8 +// scaling up/down by hardware, or software: +#define VFCAP_HWSCALE_UP 0x10 +#define VFCAP_HWSCALE_DOWN 0x20 +#define VFCAP_SWSCALE 0x40 +// driver/filter can do vertical flip (upside-down) +#define VFCAP_FLIP 0x80 + +// driver/hardware handles timing (blocking) +#define VFCAP_TIMER 0x100 +// driver _always_ flip image upside-down (for ve_vfw) +#define VFCAP_FLIPPED 0x200 +// vf filter: accepts stride (put_image) +// vo driver: has draw_slice() support for the given csp +#define VFCAP_ACCEPT_STRIDE 0x400 +// filter does postprocessing (so you shouldn't scale/filter image before it) +#define VFCAP_POSTPROC 0x800 +// filter cannot be reconfigured to different size & format +#define VFCAP_CONSTANT 0x1000 +// filter can draw EOSD +#define VFCAP_EOSD 0x2000 +// filter will draw EOSD at screen resolution (without scaling) +#define VFCAP_EOSD_UNSCALED 0x4000 +// used by libvo and vf_vo, indicates the VO does not support draw_slice for this format +#define VOCAP_NOSLICES 0x8000 + +#endif /* MPLAYER_VFCAP_H */ diff --git a/libavfilter/lswsutils.c b/libavfilter/lswsutils.c new file mode 100644 index 0000000..6902ee9 --- /dev/null +++ b/libavfilter/lswsutils.c @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/imgutils.h" +#include "lswsutils.h" + +int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4], + int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt, + uint8_t * const src_data[4], int src_linesize[4], + int src_w, int src_h, enum AVPixelFormat src_pix_fmt, + void *log_ctx) +{ + int ret; + struct SwsContext *sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt, + dst_w, dst_h, dst_pix_fmt, + SWS_BILINEAR, NULL, NULL, NULL); + if (!sws_ctx) { + av_log(log_ctx, AV_LOG_ERROR, + "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", + av_get_pix_fmt_name(src_pix_fmt), src_w, src_h, + av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h); + ret = AVERROR(EINVAL); + goto end; + } + + if ((ret = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 16)) < 0) + goto end; + ret = 0; + sws_scale(sws_ctx, (const uint8_t * const*)src_data, src_linesize, 0, src_h, dst_data, dst_linesize); + +end: + sws_freeContext(sws_ctx); + return ret; +} diff --git a/libavfilter/lswsutils.h b/libavfilter/lswsutils.h new file mode 100644 index 0000000..f5f5320 --- /dev/null +++ b/libavfilter/lswsutils.h @@ -0,0 +1,38 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Miscellaneous utilities which make use of the libswscale library + */ + +#ifndef AVFILTER_LSWSUTILS_H +#define AVFILTER_LSWSUTILS_H + +#include "libswscale/swscale.h" + +/** + * Scale image using libswscale. + */ +int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4], + int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt, + uint8_t *const src_data[4], int src_linesize[4], + int src_w, int src_h, enum AVPixelFormat src_pix_fmt, + void *log_ctx); + +#endif /* AVFILTER_LSWSUTILS_H */ diff --git a/libavfilter/sink_buffer.c b/libavfilter/sink_buffer.c new file mode 100644 index 0000000..0edf9c5 --- /dev/null +++ b/libavfilter/sink_buffer.c @@ -0,0 +1,485 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * buffer sink + */ + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/fifo.h" +#include "avfilter.h" +#include "buffersink.h" +#include "audio.h" +#include "internal.h" + +AVBufferSinkParams *av_buffersink_params_alloc(void) +{ + static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; + AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams)); + if (!params) + return NULL; + + params->pixel_fmts = pixel_fmts; + return params; +} + +AVABufferSinkParams *av_abuffersink_params_alloc(void) +{ + AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams)); + + if (!params) + return NULL; + return params; +} + +typedef struct { + AVFifoBuffer *fifo; ///< FIFO buffer of video frame references + unsigned warning_limit; + + /* only used for video */ + enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1 + + /* only used for audio */ + enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE + int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1 + int all_channel_counts; +} BufferSinkContext; + +#define FIFO_INIT_SIZE 8 + +static av_cold int common_init(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + + buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *)); + if (!buf->fifo) { + av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n"); + return AVERROR(ENOMEM); + } + buf->warning_limit = 100; + return 0; +} + +static av_cold void common_uninit(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterBufferRef *picref; + + if (buf->fifo) { + while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) { + av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL); + avfilter_unref_buffer(picref); + } + av_fifo_free(buf->fifo); + buf->fifo = NULL; + } +} + +static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref) +{ + BufferSinkContext *buf = ctx->priv; + + if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) { + /* realloc fifo size */ + if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) { + av_log(ctx, AV_LOG_ERROR, + "Cannot buffer more frames. Consume some available frames " + "before adding new ones.\n"); + return AVERROR(ENOMEM); + } + } + + /* cache frame */ + av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) +{ + AVFilterContext *ctx = inlink->dst; + BufferSinkContext *buf = inlink->dst->priv; + int ret; + + if ((ret = add_buffer_ref(ctx, ref)) < 0) + return ret; + if (buf->warning_limit && + av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) { + av_log(ctx, AV_LOG_WARNING, + "%d buffers queued in %s, something may be wrong.\n", + buf->warning_limit, + (char *)av_x_if_null(ctx->name, ctx->filter->name)); + buf->warning_limit *= 10; + } + return 0; +} + +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size) +{ + AVFilterLink *inlink = ctx->inputs[0]; + + inlink->min_samples = inlink->max_samples = + inlink->partial_buf_size = frame_size; +} + +int av_buffersink_get_buffer_ref(AVFilterContext *ctx, + AVFilterBufferRef **bufref, int flags) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + int ret; + *bufref = NULL; + + av_assert0( !strcmp(ctx->filter->name, "buffersink") + || !strcmp(ctx->filter->name, "abuffersink") + || !strcmp(ctx->filter->name, "ffbuffersink") + || !strcmp(ctx->filter->name, "ffabuffersink")); + + /* no picref available, fetch it from the filterchain */ + if (!av_fifo_size(buf->fifo)) { + if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST) + return AVERROR(EAGAIN); + if ((ret = ff_request_frame(inlink)) < 0) + return ret; + } + + if (!av_fifo_size(buf->fifo)) + return AVERROR(EINVAL); + + if (flags & AV_BUFFERSINK_FLAG_PEEK) + *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0)); + else + av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL); + + return 0; +} + +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx) +{ + av_assert0( !strcmp(ctx->filter->name, "buffersink") + || !strcmp(ctx->filter->name, "ffbuffersink")); + + return ctx->inputs[0]->frame_rate; +} + +int av_buffersink_poll_frame(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + av_assert0( !strcmp(ctx->filter->name, "buffersink") + || !strcmp(ctx->filter->name, "abuffersink") + || !strcmp(ctx->filter->name, "ffbuffersink") + || !strcmp(ctx->filter->name, "ffabuffersink")); + + return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink); +} + +static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BufferSinkContext *buf = ctx->priv; + AVBufferSinkParams *params = opaque; + + if (params && params->pixel_fmts) { + const int *pixel_fmts = params->pixel_fmts; + + buf->pixel_fmts = ff_copy_int_list(pixel_fmts); + if (!buf->pixel_fmts) + return AVERROR(ENOMEM); + } + + return common_init(ctx); +} + +static av_cold void vsink_uninit(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + av_freep(&buf->pixel_fmts); + common_uninit(ctx); +} + +static int vsink_query_formats(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + + if (buf->pixel_fmts) + ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts)); + else + ff_default_query_formats(ctx); + + return 0; +} + +static const AVFilterPad ffbuffersink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL }, +}; + +AVFilter avfilter_vsink_ffbuffersink = { + .name = "ffbuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), + .priv_size = sizeof(BufferSinkContext), + .init_opaque = vsink_init, + .uninit = vsink_uninit, + + .query_formats = vsink_query_formats, + .inputs = ffbuffersink_inputs, + .outputs = NULL, +}; + +static const AVFilterPad buffersink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL }, +}; + +AVFilter avfilter_vsink_buffersink = { + .name = "buffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), + .priv_size = sizeof(BufferSinkContext), + .init_opaque = vsink_init, + .uninit = vsink_uninit, + + .query_formats = vsink_query_formats, + .inputs = buffersink_inputs, + .outputs = NULL, +}; + +static int64_t *concat_channels_lists(const int64_t *layouts, const int *counts) +{ + int nb_layouts = 0, nb_counts = 0, i; + int64_t *list; + + if (layouts) + for (; layouts[nb_layouts] != -1; nb_layouts++); + if (counts) + for (; counts[nb_counts] != -1; nb_counts++); + if (nb_counts > INT_MAX - 1 - nb_layouts) + return NULL; + if (!(list = av_calloc(nb_layouts + nb_counts + 1, sizeof(*list)))) + return NULL; + for (i = 0; i < nb_layouts; i++) + list[i] = layouts[i]; + for (i = 0; i < nb_counts; i++) + list[nb_layouts + i] = FF_COUNT2LAYOUT(counts[i]); + list[nb_layouts + nb_counts] = -1; + return list; +} + +static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + BufferSinkContext *buf = ctx->priv; + AVABufferSinkParams *params = opaque; + + if (params && params->sample_fmts) { + buf->sample_fmts = ff_copy_int_list (params->sample_fmts); + if (!buf->sample_fmts) + return AVERROR(ENOMEM); + } + if (params && (params->channel_layouts || params->channel_counts)) { + if (params->all_channel_counts) { + av_log(ctx, AV_LOG_ERROR, + "Conflicting all_channel_counts and list in parameters\n"); + return AVERROR(EINVAL); + } + buf->channel_layouts = concat_channels_lists(params->channel_layouts, + params->channel_counts); + if (!buf->channel_layouts) + return AVERROR(ENOMEM); + } + if (params) + buf->all_channel_counts = params->all_channel_counts; + return common_init(ctx); +} + +static av_cold void asink_uninit(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + + av_freep(&buf->sample_fmts); + av_freep(&buf->channel_layouts); + common_uninit(ctx); +} + +static int asink_query_formats(AVFilterContext *ctx) +{ + BufferSinkContext *buf = ctx->priv; + AVFilterFormats *formats = NULL; + AVFilterChannelLayouts *layouts = NULL; + + if (buf->sample_fmts) { + if (!(formats = ff_make_format_list(buf->sample_fmts))) + return AVERROR(ENOMEM); + ff_set_common_formats(ctx, formats); + } + + if (buf->channel_layouts || buf->all_channel_counts) { + layouts = buf->all_channel_counts ? ff_all_channel_counts() : + avfilter_make_format64_list(buf->channel_layouts); + if (!layouts) + return AVERROR(ENOMEM); + ff_set_common_channel_layouts(ctx, layouts); + } + + return 0; +} + +static const AVFilterPad ffabuffersink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL }, +}; + +AVFilter avfilter_asink_ffabuffersink = { + .name = "ffabuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), + .init_opaque = asink_init, + .uninit = asink_uninit, + .priv_size = sizeof(BufferSinkContext), + .query_formats = asink_query_formats, + .inputs = ffabuffersink_inputs, + .outputs = NULL, +}; + +static const AVFilterPad abuffersink_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL }, +}; + +AVFilter avfilter_asink_abuffersink = { + .name = "abuffersink", + .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), + .init_opaque = asink_init, + .uninit = asink_uninit, + .priv_size = sizeof(BufferSinkContext), + .query_formats = asink_query_formats, + .inputs = abuffersink_inputs, + .outputs = NULL, +}; + +/* Libav compatibility API */ + +extern AVFilter avfilter_vsink_buffer; +extern AVFilter avfilter_asink_abuffer; + +int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) +{ + AVFilterBufferRef *tbuf; + int ret; + + if (ctx->filter-> inputs[0].start_frame == + avfilter_vsink_buffer. inputs[0].start_frame || + ctx->filter-> inputs[0].filter_frame == + avfilter_asink_abuffer.inputs[0].filter_frame) + return ff_buffersink_read_compat(ctx, buf); + av_assert0(ctx->filter-> inputs[0].end_frame == + avfilter_vsink_ffbuffersink. inputs[0].end_frame || + ctx->filter-> inputs[0].filter_frame == + avfilter_asink_ffabuffersink.inputs[0].filter_frame); + + ret = av_buffersink_get_buffer_ref(ctx, &tbuf, + buf ? 0 : AV_BUFFERSINK_FLAG_PEEK); + if (!buf) + return ret >= 0; + if (ret < 0) + return ret; + *buf = tbuf; + return 0; +} + +int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, + int nb_samples) +{ + BufferSinkContext *sink = ctx->priv; + int ret = 0, have_samples = 0, need_samples; + AVFilterBufferRef *tbuf, *in_buf; + AVFilterLink *link = ctx->inputs[0]; + int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); + + if (ctx->filter-> inputs[0].filter_frame == + avfilter_asink_abuffer.inputs[0].filter_frame) + return ff_buffersink_read_samples_compat(ctx, buf, nb_samples); + av_assert0(ctx->filter-> inputs[0].filter_frame == + avfilter_asink_ffabuffersink.inputs[0].filter_frame); + + tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples); + if (!tbuf) + return AVERROR(ENOMEM); + + while (have_samples < nb_samples) { + ret = av_buffersink_get_buffer_ref(ctx, &in_buf, + AV_BUFFERSINK_FLAG_PEEK); + if (ret < 0) { + if (ret == AVERROR_EOF && have_samples) { + nb_samples = have_samples; + ret = 0; + } + break; + } + + need_samples = FFMIN(in_buf->audio->nb_samples, + nb_samples - have_samples); + av_samples_copy(tbuf->extended_data, in_buf->extended_data, + have_samples, 0, need_samples, + nb_channels, in_buf->format); + have_samples += need_samples; + if (need_samples < in_buf->audio->nb_samples) { + in_buf->audio->nb_samples -= need_samples; + av_samples_copy(in_buf->extended_data, in_buf->extended_data, + 0, need_samples, in_buf->audio->nb_samples, + nb_channels, in_buf->format); + } else { + av_buffersink_get_buffer_ref(ctx, &in_buf, 0); + avfilter_unref_buffer(in_buf); + } + } + tbuf->audio->nb_samples = have_samples; + + if (ret < 0) { + av_assert0(!av_fifo_size(sink->fifo)); + if (have_samples) + add_buffer_ref(ctx, tbuf); + else + avfilter_unref_buffer(tbuf); + return ret; + } + + *buf = tbuf; + return 0; +} diff --git a/libavfilter/split.c b/libavfilter/split.c index c1e1669..a54bef9 100644 --- a/libavfilter/split.c +++ b/libavfilter/split.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -52,6 +52,7 @@ static int split_init(AVFilterContext *ctx, const char *args) snprintf(name, sizeof(name), "output%d", i); pad.type = ctx->filter->inputs[0].type; pad.name = av_strdup(name); + pad.rej_perms = AV_PERM_WRITE; ff_insert_outpad(ctx, i, &pad); } @@ -70,10 +71,14 @@ static void split_uninit(AVFilterContext *ctx) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) { AVFilterContext *ctx = inlink->dst; - int i, ret = 0; + int i, ret = AVERROR_EOF; for (i = 0; i < ctx->nb_outputs; i++) { - AVFilterBufferRef *buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE); + AVFilterBufferRef *buf_out; + + if (ctx->outputs[i]->closed) + continue; + buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE); if (!buf_out) { ret = AVERROR(ENOMEM); break; @@ -99,7 +104,7 @@ static const AVFilterPad avfilter_vf_split_inputs[] = { AVFilter avfilter_vf_split = { .name = "split", - .description = NULL_IF_CONFIG_SMALL("Pass on the input to two outputs."), + .description = NULL_IF_CONFIG_SMALL("Pass on the input video to N outputs."), .init = split_init, .uninit = split_uninit, diff --git a/libavfilter/src_buffer.c b/libavfilter/src_buffer.c new file mode 100644 index 0000000..a997034 --- /dev/null +++ b/libavfilter/src_buffer.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2008 Vitor Sessak + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram + * Copyright (c) 2011 Mina Nagy Zaki + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * memory buffer source filter + */ + +#include "avfilter.h" +#include "internal.h" +#include "audio.h" +#include "avcodec.h" +#include "buffersrc.h" +#include "asrc_abuffer.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/fifo.h" +#include "libavutil/imgutils.h" + +typedef struct { + AVFifoBuffer *fifo; + AVRational time_base; ///< time_base to set in the output link + int eof; + unsigned nb_failed_requests; + + /* Video only */ + AVFilterContext *scale; + int h, w; + enum AVPixelFormat pix_fmt; + AVRational sample_aspect_ratio; + char sws_param[256]; + + /* Audio only */ + // Audio format of incoming buffers + int sample_rate; + unsigned int sample_format; + int64_t channel_layout; + + // Normalization filters + AVFilterContext *aconvert; + AVFilterContext *aresample; +} BufferSourceContext; + +static void buf_free(AVFilterBuffer *ptr) +{ + av_free(ptr); + return; +} + +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx, + AVFilterBufferRef *samplesref, + int av_unused flags) +{ + return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY); +} + +int av_asrc_buffer_add_samples(AVFilterContext *ctx, + uint8_t *data[8], int linesize[8], + int nb_samples, int sample_rate, + int sample_fmt, int64_t channel_layout, int planar, + int64_t pts, int av_unused flags) +{ + AVFilterBufferRef *samplesref; + + if (!channel_layout) + return AVERROR(EINVAL); + samplesref = avfilter_get_audio_buffer_ref_from_arrays( + data, linesize[0], AV_PERM_WRITE, + nb_samples, + sample_fmt, channel_layout); + if (!samplesref) + return AVERROR(ENOMEM); + + samplesref->buf->free = buf_free; + samplesref->pts = pts; + samplesref->audio->sample_rate = sample_rate; + + AV_NOWARN_DEPRECATED( + return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0); + ) +} + +int av_asrc_buffer_add_buffer(AVFilterContext *ctx, + uint8_t *buf, int buf_size, int sample_rate, + int sample_fmt, int64_t channel_layout, int planar, + int64_t pts, int av_unused flags) +{ + uint8_t *data[8] = {0}; + int linesize[8]; + int nb_channels = av_get_channel_layout_nb_channels(channel_layout), + nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt); + + av_samples_fill_arrays(data, linesize, + buf, nb_channels, nb_samples, + sample_fmt, 16); + + AV_NOWARN_DEPRECATED( + return av_asrc_buffer_add_samples(ctx, + data, linesize, nb_samples, + sample_rate, + sample_fmt, channel_layout, planar, + pts, flags); + ) +} diff --git a/libavfilter/src_movie.c b/libavfilter/src_movie.c new file mode 100644 index 0000000..bd45766 --- /dev/null +++ b/libavfilter/src_movie.c @@ -0,0 +1,644 @@ +/* + * Copyright (c) 2010 Stefano Sabatini + * Copyright (c) 2008 Victor Paesa + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * movie video source + * + * @todo use direct rendering (no allocation of a new frame) + * @todo support a PTS correction mechanism + */ + +/* #define DEBUG */ + +#include <float.h> +#include "libavutil/avstring.h" +#include "libavutil/avassert.h" +#include "libavutil/opt.h" +#include "libavutil/imgutils.h" +#include "libavutil/timestamp.h" +#include "libavformat/avformat.h" +#include "audio.h" +#include "avcodec.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +typedef struct { + AVStream *st; + int done; +} MovieStream; + +typedef struct { + /* common A/V fields */ + const AVClass *class; + int64_t seek_point; ///< seekpoint in microseconds + double seek_point_d; + char *format_name; + char *file_name; + char *stream_specs; /**< user-provided list of streams, separated by + */ + int stream_index; /**< for compatibility */ + int loop_count; + + AVFormatContext *format_ctx; + int eof; + AVPacket pkt, pkt0; + AVFrame *frame; ///< video frame to store the decoded images in + + int max_stream_index; /**< max stream # actually used for output */ + MovieStream *st; /**< array of all streams, one per output */ + int *out_index; /**< stream number -> output number map, or -1 */ +} MovieContext; + +#define OFFSET(x) offsetof(MovieContext, x) +#define F AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption movie_options[]= { +{"format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX, F }, +{"f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX, F }, +{"streams", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, F }, +{"s", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, F }, +{"si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, F }, +{"stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, F }, +{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000, F }, +{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000, F }, +{"loop", "set loop count", OFFSET(loop_count), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, F }, +{NULL}, +}; + +static int movie_config_output_props(AVFilterLink *outlink); +static int movie_request_frame(AVFilterLink *outlink); + +static AVStream *find_stream(void *log, AVFormatContext *avf, const char *spec) +{ + int i, ret, already = 0, stream_id = -1; + char type_char, dummy; + AVStream *found = NULL; + enum AVMediaType type; + + ret = sscanf(spec, "d%[av]%d%c", &type_char, &stream_id, &dummy); + if (ret >= 1 && ret <= 2) { + type = type_char == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO; + ret = av_find_best_stream(avf, type, stream_id, -1, NULL, 0); + if (ret < 0) { + av_log(log, AV_LOG_ERROR, "No %s stream with index '%d' found\n", + av_get_media_type_string(type), stream_id); + return NULL; + } + return avf->streams[ret]; + } + for (i = 0; i < avf->nb_streams; i++) { + ret = avformat_match_stream_specifier(avf, avf->streams[i], spec); + if (ret < 0) { + av_log(log, AV_LOG_ERROR, + "Invalid stream specifier \"%s\"\n", spec); + return NULL; + } + if (!ret) + continue; + if (avf->streams[i]->discard != AVDISCARD_ALL) { + already++; + continue; + } + if (found) { + av_log(log, AV_LOG_WARNING, + "Ambiguous stream specifier \"%s\", using #%d\n", spec, i); + break; + } + found = avf->streams[i]; + } + if (!found) { + av_log(log, AV_LOG_WARNING, "Stream specifier \"%s\" %s\n", spec, + already ? "matched only already used streams" : + "did not match any stream"); + return NULL; + } + if (found->codec->codec_type != AVMEDIA_TYPE_VIDEO && + found->codec->codec_type != AVMEDIA_TYPE_AUDIO) { + av_log(log, AV_LOG_ERROR, "Stream specifier \"%s\" matched a %s stream," + "currently unsupported by libavfilter\n", spec, + av_get_media_type_string(found->codec->codec_type)); + return NULL; + } + return found; +} + +static int open_stream(void *log, MovieStream *st) +{ + AVCodec *codec; + int ret; + + codec = avcodec_find_decoder(st->st->codec->codec_id); + if (!codec) { + av_log(log, AV_LOG_ERROR, "Failed to find any codec\n"); + return AVERROR(EINVAL); + } + + if ((ret = avcodec_open2(st->st->codec, codec, NULL)) < 0) { + av_log(log, AV_LOG_ERROR, "Failed to open codec\n"); + return ret; + } + + return 0; +} + +static int guess_channel_layout(MovieStream *st, int st_index, void *log_ctx) +{ + AVCodecContext *dec_ctx = st->st->codec; + char buf[256]; + int64_t chl = av_get_default_channel_layout(dec_ctx->channels); + + if (!chl) { + av_log(log_ctx, AV_LOG_ERROR, + "Channel layout is not set in stream %d, and could not " + "be guessed from the number of channels (%d)\n", + st_index, dec_ctx->channels); + return AVERROR(EINVAL); + } + + av_get_channel_layout_string(buf, sizeof(buf), dec_ctx->channels, chl); + av_log(log_ctx, AV_LOG_WARNING, + "Channel layout is not set in output stream %d, " + "guessed channel layout is '%s'\n", + st_index, buf); + dec_ctx->channel_layout = chl; + return 0; +} + +static av_cold int movie_common_init(AVFilterContext *ctx, const char *args, const AVClass *class) +{ + MovieContext *movie = ctx->priv; + AVInputFormat *iformat = NULL; + int64_t timestamp; + int nb_streams, ret, i; + char default_streams[16], *stream_specs, *spec, *cursor; + char name[16]; + AVStream *st; + + movie->class = class; + av_opt_set_defaults(movie); + + if (args) { + movie->file_name = av_get_token(&args, ":"); + if (!movie->file_name) + return AVERROR(ENOMEM); + } + if (!args || !*movie->file_name) { + av_log(ctx, AV_LOG_ERROR, "No filename provided!\n"); + return AVERROR(EINVAL); + } + + if (*args++ == ':' && (ret = av_set_options_string(movie, args, "=", ":")) < 0) + return ret; + + movie->seek_point = movie->seek_point_d * 1000000 + 0.5; + + stream_specs = movie->stream_specs; + if (!stream_specs) { + snprintf(default_streams, sizeof(default_streams), "d%c%d", + !strcmp(ctx->filter->name, "amovie") ? 'a' : 'v', + movie->stream_index); + stream_specs = default_streams; + } + for (cursor = stream_specs, nb_streams = 1; *cursor; cursor++) + if (*cursor == '+') + nb_streams++; + + if (movie->loop_count != 1 && nb_streams != 1) { + av_log(ctx, AV_LOG_ERROR, + "Loop with several streams is currently unsupported\n"); + return AVERROR_PATCHWELCOME; + } + + av_register_all(); + + // Try to find the movie format (container) + iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL; + + movie->format_ctx = NULL; + if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) { + av_log(ctx, AV_LOG_ERROR, + "Failed to avformat_open_input '%s'\n", movie->file_name); + return ret; + } + if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0) + av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n"); + + // if seeking requested, we execute it + if (movie->seek_point > 0) { + timestamp = movie->seek_point; + // add the stream start time, should it exist + if (movie->format_ctx->start_time != AV_NOPTS_VALUE) { + if (timestamp > INT64_MAX - movie->format_ctx->start_time) { + av_log(ctx, AV_LOG_ERROR, + "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n", + movie->file_name, movie->format_ctx->start_time, movie->seek_point); + return AVERROR(EINVAL); + } + timestamp += movie->format_ctx->start_time; + } + if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) { + av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n", + movie->file_name, timestamp); + return ret; + } + } + + for (i = 0; i < movie->format_ctx->nb_streams; i++) + movie->format_ctx->streams[i]->discard = AVDISCARD_ALL; + + movie->st = av_calloc(nb_streams, sizeof(*movie->st)); + if (!movie->st) + return AVERROR(ENOMEM); + + for (i = 0; i < nb_streams; i++) { + spec = av_strtok(stream_specs, "+", &cursor); + if (!spec) + return AVERROR_BUG; + stream_specs = NULL; /* for next strtok */ + st = find_stream(ctx, movie->format_ctx, spec); + if (!st) + return AVERROR(EINVAL); + st->discard = AVDISCARD_DEFAULT; + movie->st[i].st = st; + movie->max_stream_index = FFMAX(movie->max_stream_index, st->index); + } + if (av_strtok(NULL, "+", &cursor)) + return AVERROR_BUG; + + movie->out_index = av_calloc(movie->max_stream_index + 1, + sizeof(*movie->out_index)); + if (!movie->out_index) + return AVERROR(ENOMEM); + for (i = 0; i <= movie->max_stream_index; i++) + movie->out_index[i] = -1; + for (i = 0; i < nb_streams; i++) + movie->out_index[movie->st[i].st->index] = i; + + for (i = 0; i < nb_streams; i++) { + AVFilterPad pad = { 0 }; + snprintf(name, sizeof(name), "out%d", i); + pad.type = movie->st[i].st->codec->codec_type; + pad.name = av_strdup(name); + pad.config_props = movie_config_output_props; + pad.request_frame = movie_request_frame; + ff_insert_outpad(ctx, i, &pad); + ret = open_stream(ctx, &movie->st[i]); + if (ret < 0) + return ret; + if ( movie->st[i].st->codec->codec->type == AVMEDIA_TYPE_AUDIO && + !movie->st[i].st->codec->channel_layout) { + ret = guess_channel_layout(&movie->st[i], i, ctx); + if (ret < 0) + return ret; + } + } + + if (!(movie->frame = avcodec_alloc_frame()) ) { + av_log(log, AV_LOG_ERROR, "Failed to alloc frame\n"); + return AVERROR(ENOMEM); + } + + av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", + movie->seek_point, movie->format_name, movie->file_name, + movie->stream_index); + + return 0; +} + +static av_cold void movie_uninit(AVFilterContext *ctx) +{ + MovieContext *movie = ctx->priv; + int i; + + for (i = 0; i < ctx->nb_outputs; i++) { + av_freep(&ctx->output_pads[i].name); + if (movie->st[i].st) + avcodec_close(movie->st[i].st->codec); + } + av_opt_free(movie); + av_freep(&movie->file_name); + av_freep(&movie->st); + av_freep(&movie->out_index); + avcodec_free_frame(&movie->frame); + if (movie->format_ctx) + avformat_close_input(&movie->format_ctx); +} + +static int movie_query_formats(AVFilterContext *ctx) +{ + MovieContext *movie = ctx->priv; + int list[] = { 0, -1 }; + int64_t list64[] = { 0, -1 }; + int i; + + for (i = 0; i < ctx->nb_outputs; i++) { + MovieStream *st = &movie->st[i]; + AVCodecContext *c = st->st->codec; + AVFilterLink *outlink = ctx->outputs[i]; + + switch (c->codec_type) { + case AVMEDIA_TYPE_VIDEO: + list[0] = c->pix_fmt; + ff_formats_ref(ff_make_format_list(list), &outlink->in_formats); + break; + case AVMEDIA_TYPE_AUDIO: + list[0] = c->sample_fmt; + ff_formats_ref(ff_make_format_list(list), &outlink->in_formats); + list[0] = c->sample_rate; + ff_formats_ref(ff_make_format_list(list), &outlink->in_samplerates); + list64[0] = c->channel_layout; + ff_channel_layouts_ref(avfilter_make_format64_list(list64), + &outlink->in_channel_layouts); + break; + } + } + + return 0; +} + +static int movie_config_output_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + MovieContext *movie = ctx->priv; + unsigned out_id = FF_OUTLINK_IDX(outlink); + MovieStream *st = &movie->st[out_id]; + AVCodecContext *c = st->st->codec; + + outlink->time_base = st->st->time_base; + + switch (c->codec_type) { + case AVMEDIA_TYPE_VIDEO: + outlink->w = c->width; + outlink->h = c->height; + outlink->frame_rate = st->st->r_frame_rate; + break; + case AVMEDIA_TYPE_AUDIO: + break; + } + + return 0; +} + +static AVFilterBufferRef *frame_to_buf(enum AVMediaType type, AVFrame *frame, + AVFilterLink *outlink) +{ + AVFilterBufferRef *buf, *copy; + + buf = avfilter_get_buffer_ref_from_frame(type, frame, + AV_PERM_WRITE | + AV_PERM_PRESERVE | + AV_PERM_REUSE2); + if (!buf) + return NULL; + buf->pts = av_frame_get_best_effort_timestamp(frame); + copy = ff_copy_buffer_ref(outlink, buf); + if (!copy) + return NULL; + buf->buf->data[0] = NULL; /* it belongs to the frame */ + avfilter_unref_buffer(buf); + return copy; +} + +static char *describe_bufref_to_str(char *dst, size_t dst_size, + AVFilterBufferRef *buf, + AVFilterLink *link) +{ + switch (buf->type) { + case AVMEDIA_TYPE_VIDEO: + snprintf(dst, dst_size, + "video pts:%s time:%s pos:%"PRId64" size:%dx%d aspect:%d/%d", + av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base), + buf->pos, buf->video->w, buf->video->h, + buf->video->sample_aspect_ratio.num, + buf->video->sample_aspect_ratio.den); + break; + case AVMEDIA_TYPE_AUDIO: + snprintf(dst, dst_size, + "audio pts:%s time:%s pos:%"PRId64" samples:%d", + av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base), + buf->pos, buf->audio->nb_samples); + break; + default: + snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(buf->type)); + break; + } + return dst; +} + +#define describe_bufref(buf, link) \ + describe_bufref_to_str((char[1024]){0}, 1024, buf, link) + +static int rewind_file(AVFilterContext *ctx) +{ + MovieContext *movie = ctx->priv; + int64_t timestamp = movie->seek_point; + int ret, i; + + if (movie->format_ctx->start_time != AV_NOPTS_VALUE) + timestamp += movie->format_ctx->start_time; + ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Unable to loop: %s\n", av_err2str(ret)); + movie->loop_count = 1; /* do not try again */ + return ret; + } + + for (i = 0; i < ctx->nb_outputs; i++) { + avcodec_flush_buffers(movie->st[i].st->codec); + movie->st[i].done = 0; + } + movie->eof = 0; + return 0; +} + +/** + * Try to push a frame to the requested output. + * + * @param ctx filter context + * @param out_id number of output where a frame is wanted; + * if the frame is read from file, used to set the return value; + * if the codec is being flushed, flush the corresponding stream + * @return 1 if a frame was pushed on the requested output, + * 0 if another attempt is possible, + * <0 AVERROR code + */ +static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) +{ + MovieContext *movie = ctx->priv; + AVPacket *pkt = &movie->pkt; + MovieStream *st; + int ret, got_frame = 0, pkt_out_id; + AVFilterLink *outlink; + AVFilterBufferRef *buf; + + if (!pkt->size) { + if (movie->eof) { + if (movie->st[out_id].done) { + if (movie->loop_count != 1) { + ret = rewind_file(ctx); + if (ret < 0) + return ret; + movie->loop_count -= movie->loop_count > 1; + av_log(ctx, AV_LOG_VERBOSE, "Stream finished, looping.\n"); + return 0; /* retry */ + } + return AVERROR_EOF; + } + pkt->stream_index = movie->st[out_id].st->index; + /* packet is already ready for flushing */ + } else { + ret = av_read_frame(movie->format_ctx, &movie->pkt0); + if (ret < 0) { + av_init_packet(&movie->pkt0); /* ready for flushing */ + *pkt = movie->pkt0; + if (ret == AVERROR_EOF) { + movie->eof = 1; + return 0; /* start flushing */ + } + return ret; + } + *pkt = movie->pkt0; + } + } + + pkt_out_id = pkt->stream_index > movie->max_stream_index ? -1 : + movie->out_index[pkt->stream_index]; + if (pkt_out_id < 0) { + av_free_packet(&movie->pkt0); + pkt->size = 0; /* ready for next run */ + pkt->data = NULL; + return 0; + } + st = &movie->st[pkt_out_id]; + outlink = ctx->outputs[pkt_out_id]; + + switch (st->st->codec->codec_type) { + case AVMEDIA_TYPE_VIDEO: + ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt); + break; + case AVMEDIA_TYPE_AUDIO: + ret = avcodec_decode_audio4(st->st->codec, movie->frame, &got_frame, pkt); + break; + default: + ret = AVERROR(ENOSYS); + break; + } + if (ret < 0) { + av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret)); + return 0; + } + if (!ret) + ret = pkt->size; + + pkt->data += ret; + pkt->size -= ret; + if (pkt->size <= 0) { + av_free_packet(&movie->pkt0); + pkt->size = 0; /* ready for next run */ + pkt->data = NULL; + } + if (!got_frame) { + if (!ret) + st->done = 1; + return 0; + } + + buf = frame_to_buf(st->st->codec->codec_type, movie->frame, outlink); + if (!buf) + return AVERROR(ENOMEM); + av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name, + describe_bufref(buf, outlink)); + switch (st->st->codec->codec_type) { + case AVMEDIA_TYPE_VIDEO: + if (!movie->frame->sample_aspect_ratio.num) + buf->video->sample_aspect_ratio = st->st->sample_aspect_ratio; + /* Fall through */ + case AVMEDIA_TYPE_AUDIO: + ff_filter_frame(outlink, buf); + break; + } + + return pkt_out_id == out_id; +} + +static int movie_request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + unsigned out_id = FF_OUTLINK_IDX(outlink); + int ret; + + while (1) { + ret = movie_push_frame(ctx, out_id); + if (ret) + return FFMIN(ret, 0); + } +} + +#if CONFIG_MOVIE_FILTER + +AVFILTER_DEFINE_CLASS(movie); + +static av_cold int movie_init(AVFilterContext *ctx, const char *args) +{ + return movie_common_init(ctx, args, &movie_class); +} + +AVFilter avfilter_avsrc_movie = { + .name = "movie", + .description = NULL_IF_CONFIG_SMALL("Read from a movie source."), + .priv_size = sizeof(MovieContext), + .init = movie_init, + .uninit = movie_uninit, + .query_formats = movie_query_formats, + + .inputs = NULL, + .outputs = NULL, + .priv_class = &movie_class, +}; + +#endif /* CONFIG_MOVIE_FILTER */ + +#if CONFIG_AMOVIE_FILTER + +#define amovie_options movie_options +AVFILTER_DEFINE_CLASS(amovie); + +static av_cold int amovie_init(AVFilterContext *ctx, const char *args) +{ + return movie_common_init(ctx, args, &amovie_class); +} + +AVFilter avfilter_avsrc_amovie = { + .name = "amovie", + .description = NULL_IF_CONFIG_SMALL("Read audio from a movie source."), + .priv_size = sizeof(MovieContext), + .init = amovie_init, + .uninit = movie_uninit, + .query_formats = movie_query_formats, + + .inputs = NULL, + .outputs = NULL, + .priv_class = &amovie_class, +}; + +#endif /* CONFIG_AMOVIE_FILTER */ diff --git a/libavfilter/transform.c b/libavfilter/transform.c new file mode 100644 index 0000000..1db8c08 --- /dev/null +++ b/libavfilter/transform.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2010 Georg Martius <georg.martius@web.de> + * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * transform input video + */ + +#include "libavutil/common.h" +#include "libavutil/avassert.h" + +#include "transform.h" + +#define INTERPOLATE_METHOD(name) \ + static uint8_t name(float x, float y, const uint8_t *src, \ + int width, int height, int stride, uint8_t def) + +#define PIXEL(img, x, y, w, h, stride, def) \ + ((x) < 0 || (y) < 0) ? (def) : \ + (((x) >= (w) || (y) >= (h)) ? (def) : \ + img[(x) + (y) * (stride)]) + +/** + * Nearest neighbor interpolation + */ +INTERPOLATE_METHOD(interpolate_nearest) +{ + return PIXEL(src, (int)(x + 0.5), (int)(y + 0.5), width, height, stride, def); +} + +/** + * Bilinear interpolation + */ +INTERPOLATE_METHOD(interpolate_bilinear) +{ + int x_c, x_f, y_c, y_f; + int v1, v2, v3, v4; + + if (x < -1 || x > width || y < -1 || y > height) { + return def; + } else { + x_f = (int)x; + x_c = x_f + 1; + + y_f = (int)y; + y_c = y_f + 1; + + v1 = PIXEL(src, x_c, y_c, width, height, stride, def); + v2 = PIXEL(src, x_c, y_f, width, height, stride, def); + v3 = PIXEL(src, x_f, y_c, width, height, stride, def); + v4 = PIXEL(src, x_f, y_f, width, height, stride, def); + + return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) + + v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y))); + } +} + +/** + * Biquadratic interpolation + */ +INTERPOLATE_METHOD(interpolate_biquadratic) +{ + int x_c, x_f, y_c, y_f; + uint8_t v1, v2, v3, v4; + float f1, f2, f3, f4; + + if (x < - 1 || x > width || y < -1 || y > height) + return def; + else { + x_f = (int)x; + x_c = x_f + 1; + y_f = (int)y; + y_c = y_f + 1; + + v1 = PIXEL(src, x_c, y_c, width, height, stride, def); + v2 = PIXEL(src, x_c, y_f, width, height, stride, def); + v3 = PIXEL(src, x_f, y_c, width, height, stride, def); + v4 = PIXEL(src, x_f, y_f, width, height, stride, def); + + f1 = 1 - sqrt((x_c - x) * (y_c - y)); + f2 = 1 - sqrt((x_c - x) * (y - y_f)); + f3 = 1 - sqrt((x - x_f) * (y_c - y)); + f4 = 1 - sqrt((x - x_f) * (y - y_f)); + return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4); + } +} + +void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix) { + matrix[0] = zoom * cos(angle); + matrix[1] = -sin(angle); + matrix[2] = x_shift; + matrix[3] = -matrix[1]; + matrix[4] = matrix[0]; + matrix[5] = y_shift; + matrix[6] = 0; + matrix[7] = 0; + matrix[8] = 1; +} + +void avfilter_add_matrix(const float *m1, const float *m2, float *result) +{ + int i; + for (i = 0; i < 9; i++) + result[i] = m1[i] + m2[i]; +} + +void avfilter_sub_matrix(const float *m1, const float *m2, float *result) +{ + int i; + for (i = 0; i < 9; i++) + result[i] = m1[i] - m2[i]; +} + +void avfilter_mul_matrix(const float *m1, float scalar, float *result) +{ + int i; + for (i = 0; i < 9; i++) + result[i] = m1[i] * scalar; +} + +static inline int mirror(int v, int m) +{ + while ((unsigned)v > (unsigned)m) { + v = -v; + if (v < 0) + v += 2 * m; + } + return v; +} + +int avfilter_transform(const uint8_t *src, uint8_t *dst, + int src_stride, int dst_stride, + int width, int height, const float *matrix, + enum InterpolateMethod interpolate, + enum FillMethod fill) +{ + int x, y; + float x_s, y_s; + uint8_t def = 0; + uint8_t (*func)(float, float, const uint8_t *, int, int, int, uint8_t) = NULL; + + switch(interpolate) { + case INTERPOLATE_NEAREST: + func = interpolate_nearest; + break; + case INTERPOLATE_BILINEAR: + func = interpolate_bilinear; + break; + case INTERPOLATE_BIQUADRATIC: + func = interpolate_biquadratic; + break; + default: + return AVERROR(EINVAL); + } + + for (y = 0; y < height; y++) { + for(x = 0; x < width; x++) { + x_s = x * matrix[0] + y * matrix[1] + matrix[2]; + y_s = x * matrix[3] + y * matrix[4] + matrix[5]; + + switch(fill) { + case FILL_ORIGINAL: + def = src[y * src_stride + x]; + break; + case FILL_CLAMP: + y_s = av_clipf(y_s, 0, height - 1); + x_s = av_clipf(x_s, 0, width - 1); + def = src[(int)y_s * src_stride + (int)x_s]; + break; + case FILL_MIRROR: + x_s = mirror(x_s, width-1); + y_s = mirror(y_s, height-1); + + av_assert2(x_s >= 0 && y_s >= 0); + av_assert2(x_s < width && y_s < height); + def = src[(int)y_s * src_stride + (int)x_s]; + } + + dst[y * dst_stride + x] = func(x_s, y_s, src, width, height, src_stride, def); + } + } + return 0; +} + diff --git a/libavfilter/transform.h b/libavfilter/transform.h new file mode 100644 index 0000000..07436bf --- /dev/null +++ b/libavfilter/transform.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2010 Georg Martius <georg.martius@web.de> + * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_TRANSFORM_H +#define AVFILTER_TRANSFORM_H + +#include <stdint.h> + +/** + * @file + * transform input video + * + * All matrices are defined as a single 9-item block of contiguous memory. For + * example, the identity matrix would be: + * + * float *matrix = {1, 0, 0, + * 0, 1, 0, + * 0, 0, 1}; + */ + +enum InterpolateMethod { + INTERPOLATE_NEAREST, //< Nearest-neighbor (fast) + INTERPOLATE_BILINEAR, //< Bilinear + INTERPOLATE_BIQUADRATIC, //< Biquadratic (best) + INTERPOLATE_COUNT, //< Number of interpolation methods +}; + +// Shortcuts for the fastest and best interpolation methods +#define INTERPOLATE_DEFAULT INTERPOLATE_BILINEAR +#define INTERPOLATE_FAST INTERPOLATE_NEAREST +#define INTERPOLATE_BEST INTERPOLATE_BIQUADRATIC + +enum FillMethod { + FILL_BLANK, //< Fill zeroes at blank locations + FILL_ORIGINAL, //< Original image at blank locations + FILL_CLAMP, //< Extruded edge value at blank locations + FILL_MIRROR, //< Mirrored edge at blank locations + FILL_COUNT, //< Number of edge fill methods +}; + +// Shortcuts for fill methods +#define FILL_DEFAULT FILL_ORIGINAL + +/** + * Get an affine transformation matrix from a given translation, rotation, and + * zoom factor. The matrix will look like: + * + * [ zoom * cos(angle), -sin(angle), x_shift, + * sin(angle), zoom * cos(angle), y_shift, + * 0, 0, 1 ] + * + * @param x_shift horizontal translation + * @param y_shift vertical translation + * @param angle rotation in radians + * @param zoom scale percent (1.0 = 100%) + * @param matrix 9-item affine transformation matrix + */ +void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix); + +/** + * Add two matrices together. result = m1 + m2. + * + * @param m1 9-item transformation matrix + * @param m2 9-item transformation matrix + * @param result 9-item transformation matrix + */ +void avfilter_add_matrix(const float *m1, const float *m2, float *result); + +/** + * Subtract one matrix from another. result = m1 - m2. + * + * @param m1 9-item transformation matrix + * @param m2 9-item transformation matrix + * @param result 9-item transformation matrix + */ +void avfilter_sub_matrix(const float *m1, const float *m2, float *result); + +/** + * Multiply a matrix by a scalar value. result = m1 * scalar. + * + * @param m1 9-item transformation matrix + * @param scalar a number + * @param result 9-item transformation matrix + */ +void avfilter_mul_matrix(const float *m1, float scalar, float *result); + +/** + * Do an affine transformation with the given interpolation method. This + * multiplies each vector [x,y,1] by the matrix and then interpolates to + * get the final value. + * + * @param src source image + * @param dst destination image + * @param src_stride source image line size in bytes + * @param dst_stride destination image line size in bytes + * @param width image width in pixels + * @param height image height in pixels + * @param matrix 9-item affine transformation matrix + * @param interpolate pixel interpolation method + * @param fill edge fill method + * @return negative on error + */ +int avfilter_transform(const uint8_t *src, uint8_t *dst, + int src_stride, int dst_stride, + int width, int height, const float *matrix, + enum InterpolateMethod interpolate, + enum FillMethod fill); + +#endif /* AVFILTER_TRANSFORM_H */ diff --git a/libavfilter/version.h b/libavfilter/version.h index c09d44b..f90d3f2 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -1,20 +1,20 @@ /* * Version macros. * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -29,8 +29,8 @@ #include "libavutil/avutil.h" #define LIBAVFILTER_VERSION_MAJOR 3 -#define LIBAVFILTER_VERSION_MINOR 3 -#define LIBAVFILTER_VERSION_MICRO 0 +#define LIBAVFILTER_VERSION_MINOR 35 +#define LIBAVFILTER_VERSION_MICRO 101 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ LIBAVFILTER_VERSION_MINOR, \ @@ -40,6 +40,8 @@ LIBAVFILTER_VERSION_MICRO) #define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + /** * FF_API_* defines may be placed below to indicate public API that will be * dropped at a future version bump. The defines themselves are not part of @@ -52,5 +54,11 @@ #ifndef FF_API_FOO_COUNT #define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4) #endif +#ifndef FF_API_FILL_FRAME +#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_BUFFERSRC_BUFFER +#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif #endif /* AVFILTER_VERSION_H */ diff --git a/libavfilter/vf_alphaextract.c b/libavfilter/vf_alphaextract.c new file mode 100644 index 0000000..45d3dd4 --- /dev/null +++ b/libavfilter/vf_alphaextract.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2012 Steven Robertson + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple channel-swapping filter to get at the alpha component + */ + +#include <string.h> + +#include "libavutil/pixfmt.h" +#include "avfilter.h" +#include "drawutils.h" +#include "internal.h" +#include "formats.h" +#include "video.h" + +enum { Y, U, V, A }; + +typedef struct { + int is_packed_rgb; + uint8_t rgba_map[4]; +} AlphaExtractContext; + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat in_fmts[] = { + AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, + AV_PIX_FMT_NONE + }; + static const enum AVPixelFormat out_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; + ff_formats_ref(ff_make_format_list(in_fmts), &ctx->inputs[0]->out_formats); + ff_formats_ref(ff_make_format_list(out_fmts), &ctx->outputs[0]->in_formats); + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AlphaExtractContext *extract = inlink->dst->priv; + extract->is_packed_rgb = + ff_fill_rgba_map(extract->rgba_map, inlink->format) >= 0; + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf) +{ + AlphaExtractContext *extract = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *out_buf = + ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + int ret; + + if (!out_buf) { + ret = AVERROR(ENOMEM); + goto end; + } + avfilter_copy_buffer_ref_props(out_buf, cur_buf); + + if (extract->is_packed_rgb) { + int x, y; + uint8_t *pcur, *pout; + for (y = 0; y < outlink->h; y++) { + pcur = cur_buf->data[0] + y * cur_buf->linesize[0] + extract->rgba_map[A]; + pout = out_buf->data[0] + y * out_buf->linesize[0]; + for (x = 0; x < outlink->w; x++) { + *pout = *pcur; + pout += 1; + pcur += 4; + } + } + } else { + const int linesize = abs(FFMIN(out_buf->linesize[Y], cur_buf->linesize[A])); + int y; + for (y = 0; y < outlink->h; y++) { + memcpy(out_buf->data[Y] + y * out_buf->linesize[Y], + cur_buf->data[A] + y * cur_buf->linesize[A], + linesize); + } + } + + ret = ff_filter_frame(outlink, out_buf); + +end: + avfilter_unref_buffer(cur_buf); + return ret; +} + +static const AVFilterPad alphaextract_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad alphaextract_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_alphaextract = { + .name = "alphaextract", + .description = NULL_IF_CONFIG_SMALL("Extract an alpha channel as a " + "grayscale image component."), + .priv_size = sizeof(AlphaExtractContext), + .query_formats = query_formats, + .inputs = alphaextract_inputs, + .outputs = alphaextract_outputs, +}; diff --git a/libavfilter/vf_alphamerge.c b/libavfilter/vf_alphamerge.c new file mode 100644 index 0000000..99fd61e --- /dev/null +++ b/libavfilter/vf_alphamerge.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2012 Steven Robertson + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * copy an alpha component from another video's luma + */ + +#include <string.h> + +#include "libavutil/pixfmt.h" +#include "avfilter.h" +#include "bufferqueue.h" +#include "drawutils.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +enum { Y, U, V, A }; + +typedef struct { + int frame_requested; + int is_packed_rgb; + uint8_t rgba_map[4]; + struct FFBufQueue queue_main; + struct FFBufQueue queue_alpha; +} AlphaMergeContext; + +static av_cold void uninit(AVFilterContext *ctx) +{ + AlphaMergeContext *merge = ctx->priv; + ff_bufqueue_discard_all(&merge->queue_main); + ff_bufqueue_discard_all(&merge->queue_alpha); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat main_fmts[] = { + AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, + AV_PIX_FMT_NONE + }; + static const enum AVPixelFormat alpha_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; + AVFilterFormats *main_formats = ff_make_format_list(main_fmts); + AVFilterFormats *alpha_formats = ff_make_format_list(alpha_fmts); + ff_formats_ref(main_formats, &ctx->inputs[0]->out_formats); + ff_formats_ref(alpha_formats, &ctx->inputs[1]->out_formats); + ff_formats_ref(main_formats, &ctx->outputs[0]->in_formats); + return 0; +} + +static int config_input_main(AVFilterLink *inlink) +{ + AlphaMergeContext *merge = inlink->dst->priv; + merge->is_packed_rgb = + ff_fill_rgba_map(merge->rgba_map, inlink->format) >= 0; + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *mainlink = ctx->inputs[0]; + AVFilterLink *alphalink = ctx->inputs[1]; + if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) { + av_log(ctx, AV_LOG_ERROR, + "Input frame sizes do not match (%dx%d vs %dx%d).\n", + mainlink->w, mainlink->h, + alphalink->w, alphalink->h); + return AVERROR(EINVAL); + } + + outlink->w = mainlink->w; + outlink->h = mainlink->h; + outlink->time_base = mainlink->time_base; + outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; + outlink->frame_rate = mainlink->frame_rate; + return 0; +} + +static void draw_frame(AVFilterContext *ctx, + AVFilterBufferRef *main_buf, + AVFilterBufferRef *alpha_buf) +{ + AlphaMergeContext *merge = ctx->priv; + int h = main_buf->video->h; + + if (merge->is_packed_rgb) { + int x, y; + uint8_t *pin, *pout; + for (y = 0; y < h; y++) { + pin = alpha_buf->data[0] + y * alpha_buf->linesize[0]; + pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A]; + for (x = 0; x < main_buf->video->w; x++) { + *pout = *pin; + pin += 1; + pout += 4; + } + } + } else { + int y; + const int main_linesize = main_buf->linesize[A]; + const int alpha_linesize = alpha_buf->linesize[Y]; + for (y = 0; y < h && y < alpha_buf->video->h; y++) { + memcpy(main_buf->data[A] + y * main_linesize, + alpha_buf->data[Y] + y * alpha_linesize, + FFMIN(main_linesize, alpha_linesize)); + } + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) +{ + AVFilterContext *ctx = inlink->dst; + AlphaMergeContext *merge = ctx->priv; + + int is_alpha = (inlink == ctx->inputs[1]); + struct FFBufQueue *queue = + (is_alpha ? &merge->queue_alpha : &merge->queue_main); + ff_bufqueue_add(ctx, queue, buf); + + while (1) { + AVFilterBufferRef *main_buf, *alpha_buf; + + if (!ff_bufqueue_peek(&merge->queue_main, 0) || + !ff_bufqueue_peek(&merge->queue_alpha, 0)) break; + + main_buf = ff_bufqueue_get(&merge->queue_main); + alpha_buf = ff_bufqueue_get(&merge->queue_alpha); + + merge->frame_requested = 0; + draw_frame(ctx, main_buf, alpha_buf); + ff_filter_frame(ctx->outputs[0], main_buf); + avfilter_unref_buffer(alpha_buf); + } + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AlphaMergeContext *merge = ctx->priv; + int in, ret; + + merge->frame_requested = 1; + while (merge->frame_requested) { + in = ff_bufqueue_peek(&merge->queue_main, 0) ? 1 : 0; + ret = ff_request_frame(ctx->inputs[in]); + if (ret < 0) + return ret; + } + return 0; +} + +static const AVFilterPad alphamerge_inputs[] = { + { + .name = "main", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input_main, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE, + },{ + .name = "alpha", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL } +}; + +static const AVFilterPad alphamerge_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_alphamerge = { + .name = "alphamerge", + .description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second " + "input into the alpha channel of the first input."), + .uninit = uninit, + .priv_size = sizeof(AlphaMergeContext), + .query_formats = query_formats, + .inputs = alphamerge_inputs, + .outputs = alphamerge_outputs, +}; diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c index d7e851c..7869d22 100644 --- a/libavfilter/vf_aspect.c +++ b/libavfilter/vf_aspect.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2010 Bobby Bingham - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -24,44 +24,59 @@ */ #include "libavutil/common.h" +#include "libavutil/opt.h" #include "libavutil/mathematics.h" +#include "libavutil/parseutils.h" #include "avfilter.h" #include "internal.h" #include "video.h" typedef struct { - AVRational aspect; + const AVClass *class; + AVRational ratio; + char *ratio_str; + int max; } AspectContext; -static av_cold int init(AVFilterContext *ctx, const char *args) +#define OFFSET(x) offsetof(AspectContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption options[] = { + {"max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS }, + {"ratio", "set ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS }, + {"r", "set ratio", OFFSET(ratio_str), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS }, + {NULL} +}; + +static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class) { AspectContext *aspect = ctx->priv; - double ratio; - int64_t gcd; - char c = 0; - - if (args) { - if (sscanf(args, "%d:%d%c", &aspect->aspect.num, &aspect->aspect.den, &c) != 2) - if (sscanf(args, "%lf%c", &ratio, &c) == 1) - aspect->aspect = av_d2q(ratio, 100); + static const char *shorthand[] = { "ratio", "max", NULL }; + char c; + int ret; + AVRational q; + + aspect->class = class; + av_opt_set_defaults(aspect); + + if (sscanf(args, "%d:%d%c", &q.num, &q.den, &c) == 2) { + aspect->ratio_str = av_strdup(args); + av_log(ctx, AV_LOG_WARNING, + "num:den syntax is deprecated, please use num/den or named options instead\n"); + } else if ((ret = av_opt_set_from_string(aspect, args, shorthand, "=", ":")) < 0) { + return ret; + } - if (c || aspect->aspect.num <= 0 || aspect->aspect.den <= 0) { + if (aspect->ratio_str) { + ret = av_parse_ratio(&aspect->ratio, aspect->ratio_str, aspect->max, 0, ctx); + if (ret < 0 || aspect->ratio.num < 0 || aspect->ratio.den <= 0) { av_log(ctx, AV_LOG_ERROR, - "Invalid string '%s' for aspect ratio.\n", args); + "Invalid string '%s' for aspect ratio\n", args); return AVERROR(EINVAL); } - - gcd = av_gcd(FFABS(aspect->aspect.num), FFABS(aspect->aspect.den)); - if (gcd) { - aspect->aspect.num /= gcd; - aspect->aspect.den /= gcd; - } } - if (aspect->aspect.den == 0) - aspect->aspect = (AVRational) {0, 1}; - - av_log(ctx, AV_LOG_VERBOSE, "a:%d/%d\n", aspect->aspect.num, aspect->aspect.den); + av_log(ctx, AV_LOG_VERBOSE, "a:%d/%d\n", aspect->ratio.num, aspect->ratio.den); return 0; } @@ -69,25 +84,40 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) { AspectContext *aspect = link->dst->priv; - frame->video->pixel_aspect = aspect->aspect; + frame->video->sample_aspect_ratio = aspect->ratio; return ff_filter_frame(link->dst->outputs[0], frame); } +static av_cold void uninit(AVFilterContext *ctx) +{ + AspectContext *aspect = ctx->priv; + + av_opt_free(aspect); +} + #if CONFIG_SETDAR_FILTER -/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */ + +#define setdar_options options +AVFILTER_DEFINE_CLASS(setdar); + +static av_cold int setdar_init(AVFilterContext *ctx, const char *args) +{ + return init(ctx, args, &setdar_class); +} + static int setdar_config_props(AVFilterLink *inlink) { AspectContext *aspect = inlink->dst->priv; - AVRational dar = aspect->aspect; + AVRational dar = aspect->ratio; - av_reduce(&aspect->aspect.num, &aspect->aspect.den, - aspect->aspect.num * inlink->h, - aspect->aspect.den * inlink->w, 100); + av_reduce(&aspect->ratio.num, &aspect->ratio.den, + aspect->ratio.num * inlink->h, + aspect->ratio.den * inlink->w, 100); av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n", - inlink->w, inlink->h, dar.num, dar.den, aspect->aspect.num, aspect->aspect.den); + inlink->w, inlink->h, dar.num, dar.den, aspect->ratio.num, aspect->ratio.den); - inlink->sample_aspect_ratio = aspect->aspect; + inlink->sample_aspect_ratio = aspect->ratio; return 0; } @@ -115,23 +145,34 @@ AVFilter avfilter_vf_setdar = { .name = "setdar", .description = NULL_IF_CONFIG_SMALL("Set the frame display aspect ratio."), - .init = init, + .init = setdar_init, + .uninit = uninit, .priv_size = sizeof(AspectContext), .inputs = avfilter_vf_setdar_inputs, .outputs = avfilter_vf_setdar_outputs, + .priv_class = &setdar_class, }; + #endif /* CONFIG_SETDAR_FILTER */ #if CONFIG_SETSAR_FILTER -/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */ + +#define setsar_options options +AVFILTER_DEFINE_CLASS(setsar); + +static av_cold int setsar_init(AVFilterContext *ctx, const char *args) +{ + return init(ctx, args, &setsar_class); +} + static int setsar_config_props(AVFilterLink *inlink) { AspectContext *aspect = inlink->dst->priv; - inlink->sample_aspect_ratio = aspect->aspect; + inlink->sample_aspect_ratio = aspect->ratio; return 0; } @@ -159,12 +200,15 @@ AVFilter avfilter_vf_setsar = { .name = "setsar", .description = NULL_IF_CONFIG_SMALL("Set the pixel sample aspect ratio."), - .init = init, + .init = setsar_init, + .uninit = uninit, .priv_size = sizeof(AspectContext), .inputs = avfilter_vf_setsar_inputs, .outputs = avfilter_vf_setsar_outputs, + .priv_class = &setsar_class, }; + #endif /* CONFIG_SETSAR_FILTER */ diff --git a/libavfilter/vf_ass.c b/libavfilter/vf_ass.c new file mode 100644 index 0000000..ade1b37 --- /dev/null +++ b/libavfilter/vf_ass.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2011 Baptiste Coudurier + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2012 Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Libass subtitles burning filter. + * + * @see{http://www.matroska.org/technical/specs/subtitles/ssa.html} + */ + +#include <ass/ass.h> + +#include "config.h" +#if CONFIG_SUBTITLES_FILTER +# include "libavcodec/avcodec.h" +# include "libavformat/avformat.h" +#endif +#include "libavutil/avstring.h" +#include "libavutil/imgutils.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "drawutils.h" +#include "avfilter.h" +#include "internal.h" +#include "formats.h" +#include "video.h" + +typedef struct { + const AVClass *class; + ASS_Library *library; + ASS_Renderer *renderer; + ASS_Track *track; + char *filename; + uint8_t rgba_map[4]; + int pix_step[4]; ///< steps per pixel for each plane of the main output + int original_w, original_h; + FFDrawContext draw; +} AssContext; + +#define OFFSET(x) offsetof(AssContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption options[] = { + {"filename", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"f", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"original_size", "set the size of the original video (used to scale fonts)", OFFSET(original_w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + {NULL}, +}; + +/* libass supports a log level ranging from 0 to 7 */ +static const int ass_libavfilter_log_level_map[] = { + AV_LOG_QUIET, /* 0 */ + AV_LOG_PANIC, /* 1 */ + AV_LOG_FATAL, /* 2 */ + AV_LOG_ERROR, /* 3 */ + AV_LOG_WARNING, /* 4 */ + AV_LOG_INFO, /* 5 */ + AV_LOG_VERBOSE, /* 6 */ + AV_LOG_DEBUG, /* 7 */ +}; + +static void ass_log(int ass_level, const char *fmt, va_list args, void *ctx) +{ + int level = ass_libavfilter_log_level_map[ass_level]; + + av_vlog(ctx, level, fmt, args); + av_log(ctx, level, "\n"); +} + +static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *class) +{ + AssContext *ass = ctx->priv; + static const char *shorthand[] = { "filename", NULL }; + int ret; + + ass->class = class; + av_opt_set_defaults(ass); + + if ((ret = av_opt_set_from_string(ass, args, shorthand, "=", ":")) < 0) + return ret; + + if (!ass->filename) { + av_log(ctx, AV_LOG_ERROR, "No filename provided!\n"); + return AVERROR(EINVAL); + } + + ass->library = ass_library_init(); + if (!ass->library) { + av_log(ctx, AV_LOG_ERROR, "Could not initialize libass.\n"); + return AVERROR(EINVAL); + } + ass_set_message_cb(ass->library, ass_log, ctx); + + ass->renderer = ass_renderer_init(ass->library); + if (!ass->renderer) { + av_log(ctx, AV_LOG_ERROR, "Could not initialize libass renderer.\n"); + return AVERROR(EINVAL); + } + + ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1); + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + AssContext *ass = ctx->priv; + + av_opt_free(ass); + if (ass->track) + ass_free_track(ass->track); + if (ass->renderer) + ass_renderer_done(ass->renderer); + if (ass->library) + ass_library_done(ass->library); +} + +static int query_formats(AVFilterContext *ctx) +{ + ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AssContext *ass = inlink->dst->priv; + + ff_draw_init(&ass->draw, inlink->format, 0); + + ass_set_frame_size (ass->renderer, inlink->w, inlink->h); + if (ass->original_w && ass->original_h) + ass_set_aspect_ratio(ass->renderer, (double)inlink->w / inlink->h, + (double)ass->original_w / ass->original_h); + + return 0; +} + +/* libass stores an RGBA color in the format RRGGBBTT, where TT is the transparency level */ +#define AR(c) ( (c)>>24) +#define AG(c) (((c)>>16)&0xFF) +#define AB(c) (((c)>>8) &0xFF) +#define AA(c) ((0xFF-c) &0xFF) + +static void overlay_ass_image(AssContext *ass, AVFilterBufferRef *picref, + const ASS_Image *image) +{ + for (; image; image = image->next) { + uint8_t rgba_color[] = {AR(image->color), AG(image->color), AB(image->color), AA(image->color)}; + FFDrawColor color; + ff_draw_color(&ass->draw, &color, rgba_color); + ff_blend_mask(&ass->draw, &color, + picref->data, picref->linesize, + picref->video->w, picref->video->h, + image->bitmap, image->stride, image->w, image->h, + 3, 0, image->dst_x, image->dst_y); + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + AssContext *ass = ctx->priv; + int detect_change = 0; + double time_ms = picref->pts * av_q2d(inlink->time_base) * 1000; + ASS_Image *image = ass_render_frame(ass->renderer, ass->track, + time_ms, &detect_change); + + if (detect_change) + av_log(ctx, AV_LOG_DEBUG, "Change happened at time ms:%f\n", time_ms); + + overlay_ass_image(ass, picref, image); + + return ff_filter_frame(outlink, picref); +} + +static const AVFilterPad ass_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .config_props = config_input, + .min_perms = AV_PERM_READ | AV_PERM_WRITE, + }, + { NULL } +}; + +static const AVFilterPad ass_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +#if CONFIG_ASS_FILTER + +#define ass_options options +AVFILTER_DEFINE_CLASS(ass); + +static av_cold int init_ass(AVFilterContext *ctx, const char *args) +{ + AssContext *ass = ctx->priv; + int ret = init(ctx, args, &ass_class); + + if (ret < 0) + return ret; + + ass->track = ass_read_file(ass->library, ass->filename, NULL); + if (!ass->track) { + av_log(ctx, AV_LOG_ERROR, + "Could not create a libass track when reading file '%s'\n", + ass->filename); + return AVERROR(EINVAL); + } + return 0; +} + +AVFilter avfilter_vf_ass = { + .name = "ass", + .description = NULL_IF_CONFIG_SMALL("Render subtitles onto input video using the libass library."), + .priv_size = sizeof(AssContext), + .init = init_ass, + .uninit = uninit, + .query_formats = query_formats, + .inputs = ass_inputs, + .outputs = ass_outputs, + .priv_class = &ass_class, +}; +#endif + +#if CONFIG_SUBTITLES_FILTER + +#define subtitles_options options +AVFILTER_DEFINE_CLASS(subtitles); + +static av_cold int init_subtitles(AVFilterContext *ctx, const char *args) +{ + int ret, sid; + AVFormatContext *fmt = NULL; + AVCodecContext *dec_ctx = NULL; + AVCodec *dec = NULL; + AVStream *st; + AVPacket pkt; + AssContext *ass = ctx->priv; + + /* Init libass */ + ret = init(ctx, args, &subtitles_class); + if (ret < 0) + return ret; + ass->track = ass_new_track(ass->library); + if (!ass->track) { + av_log(ctx, AV_LOG_ERROR, "Could not create a libass track\n"); + return AVERROR(EINVAL); + } + + /* Open subtitles file */ + ret = avformat_open_input(&fmt, ass->filename, NULL, NULL); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Unable to open %s\n", ass->filename); + goto end; + } + ret = avformat_find_stream_info(fmt, NULL); + if (ret < 0) + goto end; + + /* Locate subtitles stream */ + ret = av_find_best_stream(fmt, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Unable to locate subtitle stream in %s\n", + ass->filename); + goto end; + } + sid = ret; + st = fmt->streams[sid]; + + /* Open decoder */ + dec_ctx = st->codec; + dec = avcodec_find_decoder(dec_ctx->codec_id); + if (!dec) { + av_log(ctx, AV_LOG_ERROR, "Failed to find subtitle codec %s\n", + avcodec_get_name(dec_ctx->codec_id)); + return AVERROR(EINVAL); + } + ret = avcodec_open2(dec_ctx, dec, NULL); + if (ret < 0) + goto end; + + /* Decode subtitles and push them into the renderer (libass) */ + if (dec_ctx->subtitle_header) + ass_process_codec_private(ass->track, + dec_ctx->subtitle_header, + dec_ctx->subtitle_header_size); + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + while (av_read_frame(fmt, &pkt) >= 0) { + int i, got_subtitle; + AVSubtitle sub; + + if (pkt.stream_index == sid) { + ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt); + if (ret < 0 || !got_subtitle) + break; + for (i = 0; i < sub.num_rects; i++) { + char *ass_line = sub.rects[i]->ass; + if (!ass_line) + break; + ass_process_data(ass->track, ass_line, strlen(ass_line)); + } + } + av_free_packet(&pkt); + avsubtitle_free(&sub); + } + +end: + if (dec_ctx) + avcodec_close(dec_ctx); + if (fmt) + avformat_close_input(&fmt); + return ret; +} + +AVFilter avfilter_vf_subtitles = { + .name = "subtitles", + .description = NULL_IF_CONFIG_SMALL("Render subtitles onto input video using the libass library."), + .priv_size = sizeof(AssContext), + .init = init_subtitles, + .uninit = uninit, + .query_formats = query_formats, + .inputs = ass_inputs, + .outputs = ass_outputs, + .priv_class = &subtitles_class, +}; +#endif diff --git a/libavfilter/vf_bbox.c b/libavfilter/vf_bbox.c new file mode 100644 index 0000000..33b96b5 --- /dev/null +++ b/libavfilter/vf_bbox.c @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * bounding box detection filter + */ + +#include "libavutil/pixdesc.h" +#include "libavutil/timestamp.h" +#include "avfilter.h" +#include "bbox.h" +#include "internal.h" + +typedef struct { + unsigned int frame; + int vsub, hsub; +} BBoxContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + BBoxContext *bbox = ctx->priv; + bbox->frame = 0; + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV411P, + AV_PIX_FMT_NONE, + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + AVFilterContext *ctx = inlink->dst; + BBoxContext *bbox = ctx->priv; + FFBoundingBox box; + int has_bbox, w, h; + + has_bbox = + ff_calculate_bounding_box(&box, + picref->data[0], picref->linesize[0], + inlink->w, inlink->h, 16); + w = box.x2 - box.x1 + 1; + h = box.y2 - box.y1 + 1; + + av_log(ctx, AV_LOG_INFO, + "n:%d pts:%s pts_time:%s", bbox->frame, + av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base)); + + if (has_bbox) { + av_log(ctx, AV_LOG_INFO, + " x1:%d x2:%d y1:%d y2:%d w:%d h:%d" + " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d", + box.x1, box.x2, box.y1, box.y2, w, h, + w, h, box.x1, box.y1, /* crop params */ + box.x1, box.y1, w, h); /* drawbox params */ + } + av_log(ctx, AV_LOG_INFO, "\n"); + + bbox->frame++; + return ff_filter_frame(inlink->dst->outputs[0], picref); +} + +static const AVFilterPad bbox_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad bbox_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_bbox = { + .name = "bbox", + .description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."), + .priv_size = sizeof(BBoxContext), + .query_formats = query_formats, + .init = init, + .inputs = bbox_inputs, + .outputs = bbox_outputs, +}; diff --git a/libavfilter/vf_blackdetect.c b/libavfilter/vf_blackdetect.c new file mode 100644 index 0000000..f2f2cfa --- /dev/null +++ b/libavfilter/vf_blackdetect.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Video black detector, loosely based on blackframe with extended + * syntax and features + */ + +#include <float.h> +#include "libavutil/opt.h" +#include "libavutil/timestamp.h" +#include "avfilter.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + double black_min_duration_time; ///< minimum duration of detected black, in seconds + int64_t black_min_duration; ///< minimum duration of detected black, expressed in timebase units + int64_t black_start; ///< pts start time of the first black picture + int64_t black_end; ///< pts end time of the last black picture + int64_t last_picref_pts; ///< pts of the last input picture + int black_started; + + double picture_black_ratio_th; + double pixel_black_th; + unsigned int pixel_black_th_i; + + unsigned int frame_count; ///< frame number + unsigned int nb_black_pixels; ///< number of black pixels counted so far +} BlackDetectContext; + +#define OFFSET(x) offsetof(BlackDetectContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption blackdetect_options[] = { + { "d", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS }, + { "black_min_duration", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS }, + { "picture_black_ratio_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS }, + { "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS }, + { "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS }, + { "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(blackdetect); + +#define YUVJ_FORMATS \ + AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P + +static enum AVPixelFormat yuvj_formats[] = { + YUVJ_FORMATS, AV_PIX_FMT_NONE +}; + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12, + AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P, + YUVJ_FORMATS, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + BlackDetectContext *blackdetect = ctx->priv; + + blackdetect->class = &blackdetect_class; + av_opt_set_defaults(blackdetect); + + if ((ret = av_set_options_string(blackdetect, args, "=", ":")) < 0) + return ret; + + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + BlackDetectContext *blackdetect = ctx->priv; + + blackdetect->black_min_duration = + blackdetect->black_min_duration_time / av_q2d(inlink->time_base); + + blackdetect->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ? + // luminance_minimum_value + pixel_black_th * luminance_range_size + blackdetect->pixel_black_th * 255 : + 16 + blackdetect->pixel_black_th * (235 - 16); + + av_log(blackdetect, AV_LOG_VERBOSE, + "black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n", + av_ts2timestr(blackdetect->black_min_duration, &inlink->time_base), + blackdetect->pixel_black_th, blackdetect->pixel_black_th_i, + blackdetect->picture_black_ratio_th); + return 0; +} + +static void check_black_end(AVFilterContext *ctx) +{ + BlackDetectContext *blackdetect = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + if ((blackdetect->black_end - blackdetect->black_start) >= blackdetect->black_min_duration) { + av_log(blackdetect, AV_LOG_INFO, + "black_start:%s black_end:%s black_duration:%s\n", + av_ts2timestr(blackdetect->black_start, &inlink->time_base), + av_ts2timestr(blackdetect->black_end, &inlink->time_base), + av_ts2timestr(blackdetect->black_end - blackdetect->black_start, &inlink->time_base)); + } +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + BlackDetectContext *blackdetect = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + int ret = ff_request_frame(inlink); + + if (ret == AVERROR_EOF && blackdetect->black_started) { + // FIXME: black_end should be set to last_picref_pts + last_picref_duration + blackdetect->black_end = blackdetect->last_picref_pts; + check_black_end(ctx); + } + return ret; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + AVFilterContext *ctx = inlink->dst; + BlackDetectContext *blackdetect = ctx->priv; + double picture_black_ratio = 0; + const uint8_t *p = picref->data[0]; + int x, i; + + for (i = 0; i < inlink->h; i++) { + for (x = 0; x < inlink->w; x++) + blackdetect->nb_black_pixels += p[x] <= blackdetect->pixel_black_th_i; + p += picref->linesize[0]; + } + + picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h); + + av_log(ctx, AV_LOG_DEBUG, + "frame:%u picture_black_ratio:%f pos:%"PRId64" pts:%s t:%s type:%c\n", + blackdetect->frame_count, picture_black_ratio, + picref->pos, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), + av_get_picture_type_char(picref->video->pict_type)); + + if (picture_black_ratio >= blackdetect->picture_black_ratio_th) { + if (!blackdetect->black_started) { + /* black starts here */ + blackdetect->black_started = 1; + blackdetect->black_start = picref->pts; + } + } else if (blackdetect->black_started) { + /* black ends here */ + blackdetect->black_started = 0; + blackdetect->black_end = picref->pts; + check_black_end(ctx); + } + + blackdetect->last_picref_pts = picref->pts; + blackdetect->frame_count++; + blackdetect->nb_black_pixels = 0; + return ff_filter_frame(inlink->dst->outputs[0], picref); +} + +static const AVFilterPad blackdetect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad blackdetect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_blackdetect = { + .name = "blackdetect", + .description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."), + .priv_size = sizeof(BlackDetectContext), + .init = init, + .query_formats = query_formats, + .inputs = blackdetect_inputs, + .outputs = blackdetect_outputs, + .priv_class = &blackdetect_class, +}; diff --git a/libavfilter/vf_blackframe.c b/libavfilter/vf_blackframe.c index 275ebb2..cf1bcd3 100644 --- a/libavfilter/vf_blackframe.c +++ b/libavfilter/vf_blackframe.c @@ -4,20 +4,20 @@ * Copyright (c) 2006 Julian Hall * Copyright (c) 2002-2003 Brian J. Murrell * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ @@ -32,6 +32,7 @@ #include "libavutil/internal.h" #include "avfilter.h" +#include "internal.h" #include "formats.h" #include "internal.h" #include "video.h" @@ -41,6 +42,7 @@ typedef struct { unsigned int bthresh; ///< black threshold unsigned int frame; ///< frame number unsigned int nblack; ///< number of black pixels counted so far + unsigned int last_keyframe; ///< frame number of the last received key-frame } BlackFrameContext; static int query_formats(AVFilterContext *ctx) @@ -63,6 +65,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) blackframe->bthresh = 32; blackframe->nblack = 0; blackframe->frame = 0; + blackframe->last_keyframe = 0; if (args) sscanf(args, "%u:%u", &blackframe->bamount, &blackframe->bthresh); @@ -92,11 +95,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) p += frame->linesize[0]; } + if (frame->video->key_frame) + blackframe->last_keyframe = blackframe->frame; + pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); if (pblack >= blackframe->bamount) - av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f\n", + av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f " + "type:%c last_keyframe:%d\n", blackframe->frame, pblack, frame->pos, frame->pts, - frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base)); + frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), + av_get_picture_type_char(frame->video->pict_type), blackframe->last_keyframe); blackframe->frame++; blackframe->nblack = 0; diff --git a/libavfilter/vf_boxblur.c b/libavfilter/vf_boxblur.c index d72c602..9ee3ea5 100644 --- a/libavfilter/vf_boxblur.c +++ b/libavfilter/vf_boxblur.c @@ -2,20 +2,20 @@ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2011 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ @@ -124,7 +124,7 @@ static av_cold void uninit(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx) { - enum AVPixelFormat pix_fmts[] = { + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8, @@ -148,14 +148,9 @@ static int config_input(AVFilterLink *inlink) char *expr; int ret; - av_freep(&boxblur->temp[0]); - av_freep(&boxblur->temp[1]); - if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h)))) - return AVERROR(ENOMEM); - if (!(boxblur->temp[1] = av_malloc(FFMAX(w, h)))) { - av_freep(&boxblur->temp[0]); + if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h))) || + !(boxblur->temp[1] = av_malloc(FFMAX(w, h)))) return AVERROR(ENOMEM); - } boxblur->hsub = desc->log2_chroma_w; boxblur->vsub = desc->log2_chroma_h; @@ -181,7 +176,7 @@ static int config_input(AVFilterLink *inlink) EVAL_RADIUS_EXPR(chroma); EVAL_RADIUS_EXPR(alpha); - av_log(ctx, AV_LOG_DEBUG, + av_log(ctx, AV_LOG_VERBOSE, "luma_radius:%d luma_power:%d " "chroma_radius:%d chroma_power:%d " "alpha_radius:%d alpha_power:%d " diff --git a/libavfilter/vf_colormatrix.c b/libavfilter/vf_colormatrix.c new file mode 100644 index 0000000..571b9d4 --- /dev/null +++ b/libavfilter/vf_colormatrix.c @@ -0,0 +1,388 @@ +/* + * ColorMatrix v2.2 for Avisynth 2.5.x + * + * Copyright (C) 2006-2007 Kevin Stone + * + * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/** + * @file + * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert + * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC, + * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional, + * adds an option to use scaled or non-scaled coefficients, and more... + */ + +#include <float.h> +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" +#include "libavutil/pixdesc.h" +#include "libavutil/avstring.h" + +#define NS(n) n < 0 ? (int)(n*65536.0-0.5+DBL_EPSILON) : (int)(n*65536.0+0.5) +#define CB(n) av_clip_uint8(n) + +static const double yuv_coeff[4][3][3] = { + { { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0) + { -0.3850, +0.5000, -0.1150 }, + { -0.4540, -0.0460, +0.5000 } }, + { { +0.5900, +0.1100, +0.3000 }, // FCC (1) + { -0.3310, +0.5000, -0.1690 }, + { -0.4210, -0.0790, +0.5000 } }, + { { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2) + { -0.3313, +0.5000, -0.1687 }, + { -0.4187, -0.0813, +0.5000 } }, + { { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3) + { -0.3840, +0.5000, -0.1160 }, + { -0.4450, -0.0550, +0.5000 } }, +}; + +typedef struct { + int yuv_convert[16][3][3]; + int interlaced; + int source, dest, mode; + char src[256]; + char dst[256]; + int hsub, vsub; +} ColorMatrixContext; + +#define ma m[0][0] +#define mb m[0][1] +#define mc m[0][2] +#define md m[1][0] +#define me m[1][1] +#define mf m[1][2] +#define mg m[2][0] +#define mh m[2][1] +#define mi m[2][2] + +#define ima im[0][0] +#define imb im[0][1] +#define imc im[0][2] +#define imd im[1][0] +#define ime im[1][1] +#define imf im[1][2] +#define img im[2][0] +#define imh im[2][1] +#define imi im[2][2] + +static void inverse3x3(double im[3][3], const double m[3][3]) +{ + double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg); + det = 1.0 / det; + ima = det * (me * mi - mf * mh); + imb = det * (mc * mh - mb * mi); + imc = det * (mb * mf - mc * me); + imd = det * (mf * mg - md * mi); + ime = det * (ma * mi - mc * mg); + imf = det * (mc * md - ma * mf); + img = det * (md * mh - me * mg); + imh = det * (mb * mg - ma * mh); + imi = det * (ma * me - mb * md); +} + +static void solve_coefficients(double cm[3][3], double rgb[3][3], const double yuv[3][3]) +{ + int i, j; + for (i = 0; i < 3; i++) + for (j = 0; j < 3; j++) + cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j]; +} + +static void calc_coefficients(AVFilterContext *ctx) +{ + ColorMatrixContext *color = ctx->priv; + double rgb_coeffd[4][3][3]; + double yuv_convertd[16][3][3]; + int v = 0; + int i, j, k; + + for (i = 0; i < 4; i++) + inverse3x3(rgb_coeffd[i], yuv_coeff[i]); + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]); + for (k = 0; k < 3; k++) { + color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]); + color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]); + color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]); + } + if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 || + color->yuv_convert[v][2][0] != 0) { + av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n"); + } + v++; + } + } +} + +static const char *color_modes[] = {"bt709", "FCC", "bt601", "smpte240m"}; + +static int get_color_mode_index(const char *name) +{ + int i; + + for (i = 0; i < FF_ARRAY_ELEMS(color_modes); i++) + if (!av_strcasecmp(color_modes[i], name)) + return i; + return -1; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ColorMatrixContext *color = ctx->priv; + + if (!args) + goto usage; + if (sscanf(args, "%255[^:]:%255[^:]", color->src, color->dst) != 2) { + usage: + av_log(ctx, AV_LOG_ERROR, "usage: <src>:<dst>\n"); + av_log(ctx, AV_LOG_ERROR, "possible options: bt709,bt601,smpte240m,fcc\n"); + return -1; + } + + color->source = get_color_mode_index(color->src); + if (color->source < 0) { + av_log(ctx, AV_LOG_ERROR, "unknown color space %s\n", color->src); + return AVERROR(EINVAL); + } + + color->dest = get_color_mode_index(color->dst); + if (color->dest < 0) { + av_log(ctx, AV_LOG_ERROR, "unknown color space %s\n", color->dst); + return AVERROR(EINVAL); + } + + if (color->source == color->dest) { + av_log(ctx, AV_LOG_ERROR, "source and destination color space are identical\n"); + return AVERROR(EINVAL); + } + + color->mode = color->source * 4 + color->dest; + + calc_coefficients(ctx); + + return 0; +} + +static void process_frame_uyvy422(ColorMatrixContext *color, + AVFilterBufferRef *dst, AVFilterBufferRef *src) +{ + const unsigned char *srcp = src->data[0]; + const int src_pitch = src->linesize[0]; + const int height = src->video->h; + const int width = src->video->w*2; + unsigned char *dstp = dst->data[0]; + const int dst_pitch = dst->linesize[0]; + const int c2 = color->yuv_convert[color->mode][0][1]; + const int c3 = color->yuv_convert[color->mode][0][2]; + const int c4 = color->yuv_convert[color->mode][1][1]; + const int c5 = color->yuv_convert[color->mode][1][2]; + const int c6 = color->yuv_convert[color->mode][2][1]; + const int c7 = color->yuv_convert[color->mode][2][2]; + int x, y; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x += 4) { + const int u = srcp[x + 0] - 128; + const int v = srcp[x + 2] - 128; + const int uvval = c2 * u + c3 * v + 1081344; + dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16); + dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16); + dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16); + dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16); + } + srcp += src_pitch; + dstp += dst_pitch; + } +} + +static void process_frame_yuv422p(ColorMatrixContext *color, + AVFilterBufferRef *dst, AVFilterBufferRef *src) +{ + const unsigned char *srcpU = src->data[1]; + const unsigned char *srcpV = src->data[2]; + const unsigned char *srcpY = src->data[0]; + const int src_pitchY = src->linesize[0]; + const int src_pitchUV = src->linesize[1]; + const int height = src->video->h; + const int width = src->video->w; + unsigned char *dstpU = dst->data[1]; + unsigned char *dstpV = dst->data[2]; + unsigned char *dstpY = dst->data[0]; + const int dst_pitchY = dst->linesize[0]; + const int dst_pitchUV = dst->linesize[1]; + const int c2 = color->yuv_convert[color->mode][0][1]; + const int c3 = color->yuv_convert[color->mode][0][2]; + const int c4 = color->yuv_convert[color->mode][1][1]; + const int c5 = color->yuv_convert[color->mode][1][2]; + const int c6 = color->yuv_convert[color->mode][2][1]; + const int c7 = color->yuv_convert[color->mode][2][2]; + int x, y; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x += 2) { + const int u = srcpU[x >> 1] - 128; + const int v = srcpV[x >> 1] - 128; + const int uvval = c2 * u + c3 * v + 1081344; + dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16); + dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16); + dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16); + dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16); + } + srcpY += src_pitchY; + dstpY += dst_pitchY; + srcpU += src_pitchUV; + srcpV += src_pitchUV; + dstpU += dst_pitchUV; + dstpV += dst_pitchUV; + } +} + +static void process_frame_yuv420p(ColorMatrixContext *color, + AVFilterBufferRef *dst, AVFilterBufferRef *src) +{ + const unsigned char *srcpU = src->data[1]; + const unsigned char *srcpV = src->data[2]; + const unsigned char *srcpY = src->data[0]; + const unsigned char *srcpN = src->data[0] + src->linesize[0]; + const int src_pitchY = src->linesize[0]; + const int src_pitchUV = src->linesize[1]; + const int height = src->video->h; + const int width = src->video->w; + unsigned char *dstpU = dst->data[1]; + unsigned char *dstpV = dst->data[2]; + unsigned char *dstpY = dst->data[0]; + unsigned char *dstpN = dst->data[0] + dst->linesize[0]; + const int dst_pitchY = dst->linesize[0]; + const int dst_pitchUV = dst->linesize[1]; + const int c2 = color->yuv_convert[color->mode][0][1]; + const int c3 = color->yuv_convert[color->mode][0][2]; + const int c4 = color->yuv_convert[color->mode][1][1]; + const int c5 = color->yuv_convert[color->mode][1][2]; + const int c6 = color->yuv_convert[color->mode][2][1]; + const int c7 = color->yuv_convert[color->mode][2][2]; + int x, y; + + for (y = 0; y < height; y += 2) { + for (x = 0; x < width; x += 2) { + const int u = srcpU[x >> 1] - 128; + const int v = srcpV[x >> 1] - 128; + const int uvval = c2 * u + c3 * v + 1081344; + dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16); + dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16); + dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16); + dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16); + dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16); + dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16); + } + srcpY += src_pitchY << 1; + dstpY += dst_pitchY << 1; + srcpN += src_pitchY << 1; + dstpN += dst_pitchY << 1; + srcpU += src_pitchUV; + srcpV += src_pitchUV; + dstpU += dst_pitchUV; + dstpV += dst_pitchUV; + } +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + ColorMatrixContext *color = ctx->priv; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); + + color->hsub = pix_desc->log2_chroma_w; + color->vsub = pix_desc->log2_chroma_h; + + av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n", color->src, color->dst); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_UYVY422, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) +{ + AVFilterContext *ctx = link->dst; + ColorMatrixContext *color = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *out; + + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) { + avfilter_unref_bufferp(&in); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(out, in); + + if (in->format == AV_PIX_FMT_YUV422P) + process_frame_yuv422p(color, out, in); + else if (in->format == AV_PIX_FMT_YUV420P) + process_frame_yuv420p(color, out, in); + else + process_frame_uyvy422(color, out, in); + + avfilter_unref_bufferp(&in); + return ff_filter_frame(outlink, out); +} + +static const AVFilterPad colormatrix_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .min_perms = AV_PERM_READ, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad colormatrix_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_colormatrix = { + .name = "colormatrix", + .description = NULL_IF_CONFIG_SMALL("Color matrix conversion"), + + .priv_size = sizeof(ColorMatrixContext), + .init = init, + .query_formats = query_formats, + .inputs = colormatrix_inputs, + .outputs = colormatrix_outputs, +}; diff --git a/libavfilter/vf_copy.c b/libavfilter/vf_copy.c index 8ece5cf..a25e282 100644 --- a/libavfilter/vf_copy.c +++ b/libavfilter/vf_copy.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c index 981dfd6..8df9595 100644 --- a/libavfilter/vf_crop.c +++ b/libavfilter/vf_crop.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -37,15 +37,18 @@ #include "libavutil/libm.h" #include "libavutil/imgutils.h" #include "libavutil/mathematics.h" +#include "libavutil/opt.h" static const char *const var_names[] = { - "E", - "PHI", - "PI", "in_w", "iw", ///< width of the input video "in_h", "ih", ///< height of the input video "out_w", "ow", ///< width of the cropped video "out_h", "oh", ///< height of the cropped video + "a", + "sar", + "dar", + "hsub", + "vsub", "x", "y", "n", ///< number of frame @@ -55,13 +58,15 @@ static const char *const var_names[] = { }; enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, VAR_IN_W, VAR_IW, VAR_IN_H, VAR_IH, VAR_OUT_W, VAR_OW, VAR_OUT_H, VAR_OH, + VAR_A, + VAR_SAR, + VAR_DAR, + VAR_HSUB, + VAR_VSUB, VAR_X, VAR_Y, VAR_N, @@ -71,18 +76,58 @@ enum var_name { }; typedef struct { + const AVClass *class; int x; ///< x offset of the non-cropped area with respect to the input area int y; ///< y offset of the non-cropped area with respect to the input area int w; ///< width of the cropped area int h; ///< height of the cropped area + AVRational out_sar; ///< output sample aspect ratio + int keep_aspect; ///< keep display aspect ratio when cropping + int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes int hsub, vsub; ///< chroma subsampling - char x_expr[256], y_expr[256], ow_expr[256], oh_expr[256]; + char *x_expr, *y_expr, *w_expr, *h_expr; AVExpr *x_pexpr, *y_pexpr; /* parsed expressions for x and y */ double var_values[VAR_VARS_NB]; } CropContext; +#define OFFSET(x) offsetof(CropContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption crop_options[] = { + { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + {NULL} +}; + +AVFILTER_DEFINE_CLASS(crop); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + CropContext *crop = ctx->priv; + static const char *shorthand[] = { "w", "h", "x", "y", "keep_aspect", NULL }; + + crop->class = &crop_class; + av_opt_set_defaults(crop); + + return av_opt_set_from_string(crop, args, shorthand, "=", ":"); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + CropContext *crop = ctx->priv; + + av_expr_free(crop->x_pexpr); crop->x_pexpr = NULL; + av_expr_free(crop->y_pexpr); crop->y_pexpr = NULL; + av_opt_free(crop); +} + static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { @@ -116,29 +161,6 @@ static int query_formats(AVFilterContext *ctx) return 0; } -static av_cold int init(AVFilterContext *ctx, const char *args) -{ - CropContext *crop = ctx->priv; - - av_strlcpy(crop->ow_expr, "iw", sizeof(crop->ow_expr)); - av_strlcpy(crop->oh_expr, "ih", sizeof(crop->oh_expr)); - av_strlcpy(crop->x_expr, "(in_w-out_w)/2", sizeof(crop->x_expr)); - av_strlcpy(crop->y_expr, "(in_h-out_h)/2", sizeof(crop->y_expr)); - - if (args) - sscanf(args, "%255[^:]:%255[^:]:%255[^:]:%255[^:]", crop->ow_expr, crop->oh_expr, crop->x_expr, crop->y_expr); - - return 0; -} - -static av_cold void uninit(AVFilterContext *ctx) -{ - CropContext *crop = ctx->priv; - - av_expr_free(crop->x_pexpr); crop->x_pexpr = NULL; - av_expr_free(crop->y_pexpr); crop->y_pexpr = NULL; -} - static inline int normalize_double(int *n, double d) { int ret = 0; @@ -163,11 +185,13 @@ static int config_input(AVFilterLink *link) const char *expr; double res; - crop->var_values[VAR_E] = M_E; - crop->var_values[VAR_PHI] = M_PHI; - crop->var_values[VAR_PI] = M_PI; crop->var_values[VAR_IN_W] = crop->var_values[VAR_IW] = ctx->inputs[0]->w; crop->var_values[VAR_IN_H] = crop->var_values[VAR_IH] = ctx->inputs[0]->h; + crop->var_values[VAR_A] = (float) link->w / link->h; + crop->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1; + crop->var_values[VAR_DAR] = crop->var_values[VAR_A] * crop->var_values[VAR_SAR]; + crop->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w; + crop->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h; crop->var_values[VAR_X] = NAN; crop->var_values[VAR_Y] = NAN; crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = NAN; @@ -180,16 +204,16 @@ static int config_input(AVFilterLink *link) crop->hsub = pix_desc->log2_chroma_w; crop->vsub = pix_desc->log2_chroma_h; - if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr), + if ((ret = av_expr_parse_and_eval(&res, (expr = crop->w_expr), var_names, crop->var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr; crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res; - if ((ret = av_expr_parse_and_eval(&res, (expr = crop->oh_expr), + if ((ret = av_expr_parse_and_eval(&res, (expr = crop->h_expr), var_names, crop->var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr; crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = res; /* evaluate again ow as it may depend on oh */ - if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr), + if ((ret = av_expr_parse_and_eval(&res, (expr = crop->w_expr), var_names, crop->var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr; crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res; @@ -198,7 +222,7 @@ static int config_input(AVFilterLink *link) av_log(ctx, AV_LOG_ERROR, "Too big value or invalid expression for out_w/ow or out_h/oh. " "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n", - crop->ow_expr, crop->oh_expr); + crop->w_expr, crop->h_expr); return AVERROR(EINVAL); } crop->w &= ~((1 << crop->hsub) - 1); @@ -210,8 +234,17 @@ static int config_input(AVFilterLink *link) NULL, NULL, NULL, NULL, 0, ctx)) < 0) return AVERROR(EINVAL); - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n", - link->w, link->h, crop->w, crop->h); + if (crop->keep_aspect) { + AVRational dar = av_mul_q(link->sample_aspect_ratio, + (AVRational){ link->w, link->h }); + av_reduce(&crop->out_sar.num, &crop->out_sar.den, + dar.num * crop->h, dar.den * crop->w, INT_MAX); + } else + crop->out_sar = link->sample_aspect_ratio; + + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n", + link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den, + crop->w, crop->h, crop->out_sar.num, crop->out_sar.den); if (crop->w <= 0 || crop->h <= 0 || crop->w > link->w || crop->h > link->h) { @@ -239,6 +272,7 @@ static int config_output(AVFilterLink *link) link->w = crop->w; link->h = crop->h; + link->sample_aspect_ratio = crop->out_sar; return 0; } @@ -329,4 +363,5 @@ AVFilter avfilter_vf_crop = { .inputs = avfilter_vf_crop_inputs, .outputs = avfilter_vf_crop_outputs, + .priv_class = &crop_class, }; diff --git a/libavfilter/vf_cropdetect.c b/libavfilter/vf_cropdetect.c index eebd8bc..f91c522 100644 --- a/libavfilter/vf_cropdetect.c +++ b/libavfilter/vf_cropdetect.c @@ -1,19 +1,19 @@ /* * Copyright (c) 2002 A'rpi - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ diff --git a/libavfilter/vf_decimate.c b/libavfilter/vf_decimate.c new file mode 100644 index 0000000..0d89b81 --- /dev/null +++ b/libavfilter/vf_decimate.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2003 Rich Felker + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file decimate filter, ported from libmpcodecs/vf_decimate.c by + * Rich Felker. + */ + +#include "libavutil/pixdesc.h" +#include "libavutil/timestamp.h" +#include "libavcodec/dsputil.h" +#include "avfilter.h" +#include "internal.h" +#include "formats.h" +#include "video.h" + +typedef struct { + int lo, hi; ///< lower and higher threshold number of differences + ///< values for 8x8 blocks + + float frac; ///< threshold of changed pixels over the total fraction + + int max_drop_count; ///< if positive: maximum number of sequential frames to drop + ///< if negative: minimum number of frames between two drops + + int drop_count; ///< if positive: number of frames sequentially dropped + ///< if negative: number of sequential frames which were not dropped + + int hsub, vsub; ///< chroma subsampling values + AVFilterBufferRef *ref; ///< reference picture + DSPContext dspctx; ///< context providing optimized diff routines + AVCodecContext *avctx; ///< codec context required for the DSPContext +} DecimateContext; + +/** + * Return 1 if the two planes are different, 0 otherwise. + */ +static int diff_planes(AVFilterContext *ctx, + uint8_t *cur, uint8_t *ref, int linesize, + int w, int h) +{ + DecimateContext *decimate = ctx->priv; + DSPContext *dspctx = &decimate->dspctx; + + int x, y; + int d, c = 0; + int t = (w/16)*(h/16)*decimate->frac; + int16_t block[8*8]; + + /* compute difference for blocks of 8x8 bytes */ + for (y = 0; y < h-7; y += 4) { + for (x = 8; x < w-7; x += 4) { + dspctx->diff_pixels(block, + cur+x+y*linesize, + ref+x+y*linesize, linesize); + d = dspctx->sum_abs_dctelem(block); + if (d > decimate->hi) + return 1; + if (d > decimate->lo) { + c++; + if (c > t) + return 1; + } + } + } + return 0; +} + +/** + * Tell if the frame should be decimated, for example if it is no much + * different with respect to the reference frame ref. + */ +static int decimate_frame(AVFilterContext *ctx, + AVFilterBufferRef *cur, AVFilterBufferRef *ref) +{ + DecimateContext *decimate = ctx->priv; + int plane; + + if (decimate->max_drop_count > 0 && + decimate->drop_count >= decimate->max_drop_count) + return 0; + if (decimate->max_drop_count < 0 && + (decimate->drop_count-1) > decimate->max_drop_count) + return 0; + + for (plane = 0; ref->data[plane] && ref->linesize[plane]; plane++) { + int vsub = plane == 1 || plane == 2 ? decimate->vsub : 0; + int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0; + if (diff_planes(ctx, + cur->data[plane], ref->data[plane], ref->linesize[plane], + ref->video->w>>hsub, ref->video->h>>vsub)) + return 0; + } + + return 1; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + DecimateContext *decimate = ctx->priv; + + /* set default values */ + decimate->drop_count = decimate->max_drop_count = 0; + decimate->lo = 64*5; + decimate->hi = 64*12; + decimate->frac = 0.33; + + if (args) { + char c1, c2, c3, c4; + int n = sscanf(args, "%d%c%d%c%d%c%f%c", + &decimate->max_drop_count, &c1, + &decimate->hi, &c2, &decimate->lo, &c3, + &decimate->frac, &c4); + if (n != 1 && + (n != 3 || c1 != ':') && + (n != 5 || c1 != ':' || c2 != ':') && + (n != 7 || c1 != ':' || c2 != ':' || c3 != ':')) { + av_log(ctx, AV_LOG_ERROR, + "Invalid syntax for argument '%s': " + "must be in the form 'max:hi:lo:frac'\n", args); + return AVERROR(EINVAL); + } + } + + av_log(ctx, AV_LOG_VERBOSE, "max_drop_count:%d hi:%d lo:%d frac:%f\n", + decimate->max_drop_count, decimate->hi, decimate->lo, decimate->frac); + + decimate->avctx = avcodec_alloc_context3(NULL); + if (!decimate->avctx) + return AVERROR(ENOMEM); + dsputil_init(&decimate->dspctx, decimate->avctx); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + DecimateContext *decimate = ctx->priv; + avfilter_unref_bufferp(&decimate->ref); + avcodec_close(decimate->avctx); + av_freep(&decimate->avctx); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, + AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P, + AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + DecimateContext *decimate = ctx->priv; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); + decimate->hsub = pix_desc->log2_chroma_w; + decimate->vsub = pix_desc->log2_chroma_h; + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) +{ + DecimateContext *decimate = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + int ret; + + if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) { + decimate->drop_count = FFMAX(1, decimate->drop_count+1); + } else { + avfilter_unref_buffer(decimate->ref); + decimate->ref = cur; + decimate->drop_count = FFMIN(-1, decimate->drop_count-1); + + if (ret = ff_filter_frame(outlink, avfilter_ref_buffer(cur, ~AV_PERM_WRITE)) < 0) + return ret; + } + + av_log(inlink->dst, AV_LOG_DEBUG, + "%s pts:%s pts_time:%s drop_count:%d\n", + decimate->drop_count > 0 ? "drop" : "keep", + av_ts2str(cur->pts), av_ts2timestr(cur->pts, &inlink->time_base), + decimate->drop_count); + + if (decimate->drop_count > 0) + avfilter_unref_buffer(cur); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + DecimateContext *decimate = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + do { + ret = ff_request_frame(inlink); + } while (decimate->drop_count > 0 && ret >= 0); + + return ret; +} + +static const AVFilterPad decimate_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .config_props = config_input, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL } +}; + +static const AVFilterPad decimate_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_decimate = { + .name = "decimate", + .description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."), + .init = init, + .uninit = uninit, + + .priv_size = sizeof(DecimateContext), + .query_formats = query_formats, + .inputs = decimate_inputs, + .outputs = decimate_outputs, +}; diff --git a/libavfilter/vf_delogo.c b/libavfilter/vf_delogo.c index af479c2..bf0ac62 100644 --- a/libavfilter/vf_delogo.c +++ b/libavfilter/vf_delogo.c @@ -2,20 +2,20 @@ * Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com> * Copyright (c) 2011 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ @@ -90,18 +90,22 @@ static void apply_delogo(uint8_t *dst, int dst_linesize, for (x = logo_x1+1, xdst = dst+logo_x1+1, xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) { - interp = (topleft[src_linesize*(y-logo_y -yclipt)] + - topleft[src_linesize*(y-logo_y-1-yclipt)] + - topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w - + (topright[src_linesize*(y-logo_y-yclipt)] + - topright[src_linesize*(y-logo_y-1-yclipt)] + - topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w - + (topleft[x-logo_x-xclipl] + - topleft[x-logo_x-1-xclipl] + - topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h - + (botleft[x-logo_x-xclipl] + - botleft[x-logo_x-1-xclipl] + - botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h; + interp = + (topleft[src_linesize*(y-logo_y -yclipt)] + + topleft[src_linesize*(y-logo_y-1-yclipt)] + + topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w + + + (topright[src_linesize*(y-logo_y-yclipt)] + + topright[src_linesize*(y-logo_y-1-yclipt)] + + topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w + + + (topleft[x-logo_x-xclipl] + + topleft[x-logo_x-1-xclipl] + + topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h + + + (botleft[x-logo_x-xclipl] + + botleft[x-logo_x-1-xclipl] + + botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h; interp /= 6; if (y >= logo_y+band && y < logo_y+logo_h-band && @@ -136,32 +140,24 @@ typedef struct { } DelogoContext; #define OFFSET(x) offsetof(DelogoContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption delogo_options[]= { - {"x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX }, - {"y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX }, - {"w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX }, - {"h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX }, - {"band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, {.i64 = 4}, -1, INT_MAX }, - {"t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, {.i64 = 4}, -1, INT_MAX }, - {"show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1 }, + {"x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS}, + {"y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS}, + {"w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS}, + {"h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS}, + {"band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, {.i64 = 4}, -1, INT_MAX, FLAGS}, + {"t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, {.i64 = 4}, -1, INT_MAX, FLAGS}, + {"show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, {NULL}, }; -static const char *delogo_get_name(void *ctx) -{ - return "delogo"; -} - -static const AVClass delogo_class = { - .class_name = "DelogoContext", - .item_name = delogo_get_name, - .option = delogo_options, -}; +AVFILTER_DEFINE_CLASS(delogo); static int query_formats(AVFilterContext *ctx) { - enum AVPixelFormat pix_fmts[] = { + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8, @@ -186,10 +182,8 @@ static av_cold int init(AVFilterContext *ctx, const char *args) if (ret == 5) { if (delogo->band < 0) delogo->show = 1; - } else if ((ret = (av_set_options_string(delogo, args, "=", ":"))) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args); + } else if ((ret = (av_set_options_string(delogo, args, "=", ":"))) < 0) return ret; - } #define CHECK_UNSET_OPT(opt) \ if (delogo->opt == -1) { \ @@ -204,7 +198,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) if (delogo->show) delogo->band = 4; - av_log(ctx, AV_LOG_DEBUG, "x:%d y:%d, w:%d h:%d band:%d show:%d\n", + av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n", delogo->x, delogo->y, delogo->w, delogo->h, delogo->band, delogo->show); delogo->w += delogo->band*2; @@ -226,7 +220,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) int direct = 0; int plane; - if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { + if (in->perms & AV_PERM_WRITE) { direct = 1; out = in; } else { @@ -235,10 +229,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); } - avfilter_copy_buffer_ref_props(out, in); - out->video->w = outlink->w; - out->video->h = outlink->h; } for (plane = 0; plane < 4 && in->data[plane]; plane++) { @@ -267,7 +258,6 @@ static const AVFilterPad avfilter_vf_delogo_inputs[] = { .get_video_buffer = ff_null_get_video_buffer, .filter_frame = filter_frame, .min_perms = AV_PERM_WRITE | AV_PERM_READ, - .rej_perms = AV_PERM_PRESERVE }, { NULL } }; @@ -289,4 +279,5 @@ AVFilter avfilter_vf_delogo = { .inputs = avfilter_vf_delogo_inputs, .outputs = avfilter_vf_delogo_outputs, + .priv_class = &delogo_class, }; diff --git a/libavfilter/vf_deshake.c b/libavfilter/vf_deshake.c new file mode 100644 index 0000000..c03919c --- /dev/null +++ b/libavfilter/vf_deshake.c @@ -0,0 +1,568 @@ +/* + * Copyright (C) 2010 Georg Martius <georg.martius@web.de> + * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * fast deshake / depan video filter + * + * SAD block-matching motion compensation to fix small changes in + * horizontal and/or vertical shift. This filter helps remove camera shake + * from hand-holding a camera, bumping a tripod, moving on a vehicle, etc. + * + * Algorithm: + * - For each frame with one previous reference frame + * - For each block in the frame + * - If contrast > threshold then find likely motion vector + * - For all found motion vectors + * - Find most common, store as global motion vector + * - Find most likely rotation angle + * - Transform image along global motion + * + * TODO: + * - Fill frame edges based on previous/next reference frames + * - Fill frame edges by stretching image near the edges? + * - Can this be done quickly and look decent? + * + * Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2 + * for an algorithm similar to what could be used here to get the gmv + * It requires only a couple diamond searches + fast downscaling + * + * Special thanks to Jason Kotenko for his help with the algorithm and my + * inability to see simple errors in C code. + */ + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" +#include "libavutil/common.h" +#include "libavutil/mem.h" +#include "libavutil/pixdesc.h" +#include "libavcodec/dsputil.h" + +#include "transform.h" + +#define CHROMA_WIDTH(link) -((-link->w) >> av_pix_fmt_desc_get(link->format)->log2_chroma_w) +#define CHROMA_HEIGHT(link) -((-link->h) >> av_pix_fmt_desc_get(link->format)->log2_chroma_h) + +enum SearchMethod { + EXHAUSTIVE, ///< Search all possible positions + SMART_EXHAUSTIVE, ///< Search most possible positions (faster) + SEARCH_COUNT +}; + +typedef struct { + int x; ///< Horizontal shift + int y; ///< Vertical shift +} IntMotionVector; + +typedef struct { + double x; ///< Horizontal shift + double y; ///< Vertical shift +} MotionVector; + +typedef struct { + MotionVector vector; ///< Motion vector + double angle; ///< Angle of rotation + double zoom; ///< Zoom percentage +} Transform; + +typedef struct { + AVClass av_class; + AVFilterBufferRef *ref; ///< Previous frame + int rx; ///< Maximum horizontal shift + int ry; ///< Maximum vertical shift + int edge; ///< Edge fill method + int blocksize; ///< Size of blocks to compare + int contrast; ///< Contrast threshold + int search; ///< Motion search method + AVCodecContext *avctx; + DSPContext c; ///< Context providing optimized SAD methods + Transform last; ///< Transform from last frame + int refcount; ///< Number of reference frames (defines averaging window) + FILE *fp; + Transform avg; + int cw; ///< Crop motion search to this box + int ch; + int cx; + int cy; +} DeshakeContext; + +static int cmp(const double *a, const double *b) +{ + return *a < *b ? -1 : ( *a > *b ? 1 : 0 ); +} + +/** + * Cleaned mean (cuts off 20% of values to remove outliers and then averages) + */ +static double clean_mean(double *values, int count) +{ + double mean = 0; + int cut = count / 5; + int x; + + qsort(values, count, sizeof(double), (void*)cmp); + + for (x = cut; x < count - cut; x++) { + mean += values[x]; + } + + return mean / (count - cut * 2); +} + +/** + * Find the most likely shift in motion between two frames for a given + * macroblock. Test each block against several shifts given by the rx + * and ry attributes. Searches using a simple matrix of those shifts and + * chooses the most likely shift by the smallest difference in blocks. + */ +static void find_block_motion(DeshakeContext *deshake, uint8_t *src1, + uint8_t *src2, int cx, int cy, int stride, + IntMotionVector *mv) +{ + int x, y; + int diff; + int smallest = INT_MAX; + int tmp, tmp2; + + #define CMP(i, j) deshake->c.sad[0](deshake, src1 + cy * stride + cx, \ + src2 + (j) * stride + (i), stride, \ + deshake->blocksize) + + if (deshake->search == EXHAUSTIVE) { + // Compare every possible position - this is sloooow! + for (y = -deshake->ry; y <= deshake->ry; y++) { + for (x = -deshake->rx; x <= deshake->rx; x++) { + diff = CMP(cx - x, cy - y); + if (diff < smallest) { + smallest = diff; + mv->x = x; + mv->y = y; + } + } + } + } else if (deshake->search == SMART_EXHAUSTIVE) { + // Compare every other possible position and find the best match + for (y = -deshake->ry + 1; y < deshake->ry - 2; y += 2) { + for (x = -deshake->rx + 1; x < deshake->rx - 2; x += 2) { + diff = CMP(cx - x, cy - y); + if (diff < smallest) { + smallest = diff; + mv->x = x; + mv->y = y; + } + } + } + + // Hone in on the specific best match around the match we found above + tmp = mv->x; + tmp2 = mv->y; + + for (y = tmp2 - 1; y <= tmp2 + 1; y++) { + for (x = tmp - 1; x <= tmp + 1; x++) { + if (x == tmp && y == tmp2) + continue; + + diff = CMP(cx - x, cy - y); + if (diff < smallest) { + smallest = diff; + mv->x = x; + mv->y = y; + } + } + } + } + + if (smallest > 512) { + mv->x = -1; + mv->y = -1; + } + emms_c(); + //av_log(NULL, AV_LOG_ERROR, "%d\n", smallest); + //av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y); +} + +/** + * Find the contrast of a given block. When searching for global motion we + * really only care about the high contrast blocks, so using this method we + * can actually skip blocks we don't care much about. + */ +static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize) +{ + int highest = 0; + int lowest = 0; + int i, j, pos; + + for (i = 0; i <= blocksize * 2; i++) { + // We use a width of 16 here to match the libavcodec sad functions + for (j = 0; i <= 15; i++) { + pos = (y - i) * stride + (x - j); + if (src[pos] < lowest) + lowest = src[pos]; + else if (src[pos] > highest) { + highest = src[pos]; + } + } + } + + return highest - lowest; +} + +/** + * Find the rotation for a given block. + */ +static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift) +{ + double a1, a2, diff; + + a1 = atan2(y - cy, x - cx); + a2 = atan2(y - cy + shift->y, x - cx + shift->x); + + diff = a2 - a1; + + return (diff > M_PI) ? diff - 2 * M_PI : + (diff < -M_PI) ? diff + 2 * M_PI : + diff; +} + +/** + * Find the estimated global motion for a scene given the most likely shift + * for each block in the frame. The global motion is estimated to be the + * same as the motion from most blocks in the frame, so if most blocks + * move one pixel to the right and two pixels down, this would yield a + * motion vector (1, -2). + */ +static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2, + int width, int height, int stride, Transform *t) +{ + int x, y; + IntMotionVector mv = {0, 0}; + int counts[128][128]; + int count_max_value = 0; + int contrast; + + int pos; + double *angles = av_malloc(sizeof(*angles) * width * height / (16 * deshake->blocksize)); + int center_x = 0, center_y = 0; + double p_x, p_y; + + // Reset counts to zero + for (x = 0; x < deshake->rx * 2 + 1; x++) { + for (y = 0; y < deshake->ry * 2 + 1; y++) { + counts[x][y] = 0; + } + } + + pos = 0; + // Find motion for every block and store the motion vector in the counts + for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) { + // We use a width of 16 here to match the libavcodec sad functions + for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) { + // If the contrast is too low, just skip this block as it probably + // won't be very useful to us. + contrast = block_contrast(src2, x, y, stride, deshake->blocksize); + if (contrast > deshake->contrast) { + //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast); + find_block_motion(deshake, src1, src2, x, y, stride, &mv); + if (mv.x != -1 && mv.y != -1) { + counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1; + if (x > deshake->rx && y > deshake->ry) + angles[pos++] = block_angle(x, y, 0, 0, &mv); + + center_x += mv.x; + center_y += mv.y; + } + } + } + } + + if (pos) { + center_x /= pos; + center_y /= pos; + t->angle = clean_mean(angles, pos); + if (t->angle < 0.001) + t->angle = 0; + } else { + t->angle = 0; + } + + // Find the most common motion vector in the frame and use it as the gmv + for (y = deshake->ry * 2; y >= 0; y--) { + for (x = 0; x < deshake->rx * 2 + 1; x++) { + //av_log(NULL, AV_LOG_ERROR, "%5d ", counts[x][y]); + if (counts[x][y] > count_max_value) { + t->vector.x = x - deshake->rx; + t->vector.y = y - deshake->ry; + count_max_value = counts[x][y]; + } + } + //av_log(NULL, AV_LOG_ERROR, "\n"); + } + + p_x = (center_x - width / 2); + p_y = (center_y - height / 2); + t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y; + t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y; + + // Clamp max shift & rotation? + t->vector.x = av_clipf(t->vector.x, -deshake->rx * 2, deshake->rx * 2); + t->vector.y = av_clipf(t->vector.y, -deshake->ry * 2, deshake->ry * 2); + t->angle = av_clipf(t->angle, -0.1, 0.1); + + //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y); + av_free(angles); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + DeshakeContext *deshake = ctx->priv; + char filename[256] = {0}; + + deshake->rx = 16; + deshake->ry = 16; + deshake->edge = FILL_MIRROR; + deshake->blocksize = 8; + deshake->contrast = 125; + deshake->search = EXHAUSTIVE; + deshake->refcount = 20; + + deshake->cw = -1; + deshake->ch = -1; + deshake->cx = -1; + deshake->cy = -1; + + if (args) { + sscanf(args, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%255s", + &deshake->cx, &deshake->cy, &deshake->cw, &deshake->ch, + &deshake->rx, &deshake->ry, &deshake->edge, + &deshake->blocksize, &deshake->contrast, &deshake->search, filename); + + deshake->blocksize /= 2; + + deshake->rx = av_clip(deshake->rx, 0, 64); + deshake->ry = av_clip(deshake->ry, 0, 64); + deshake->edge = av_clip(deshake->edge, FILL_BLANK, FILL_COUNT - 1); + deshake->blocksize = av_clip(deshake->blocksize, 4, 128); + deshake->contrast = av_clip(deshake->contrast, 1, 255); + deshake->search = av_clip(deshake->search, EXHAUSTIVE, SEARCH_COUNT - 1); + + } + if (*filename) + deshake->fp = fopen(filename, "w"); + if (deshake->fp) + fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", sizeof(char), 104, deshake->fp); + + // Quadword align left edge of box for MMX code, adjust width if necessary + // to keep right margin + if (deshake->cx > 0) { + deshake->cw += deshake->cx - (deshake->cx & ~15); + deshake->cx &= ~15; + } + + av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n", + deshake->cx, deshake->cy, deshake->cw, deshake->ch, + deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, + AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, + AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static int config_props(AVFilterLink *link) +{ + DeshakeContext *deshake = link->dst->priv; + + deshake->ref = NULL; + deshake->last.vector.x = 0; + deshake->last.vector.y = 0; + deshake->last.angle = 0; + deshake->last.zoom = 0; + + deshake->avctx = avcodec_alloc_context3(NULL); + dsputil_init(&deshake->c, deshake->avctx); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + DeshakeContext *deshake = ctx->priv; + + avfilter_unref_buffer(deshake->ref); + if (deshake->fp) + fclose(deshake->fp); + if (deshake->avctx) + avcodec_close(deshake->avctx); + av_freep(&deshake->avctx); +} + +static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) +{ + DeshakeContext *deshake = link->dst->priv; + AVFilterLink *outlink = link->dst->outputs[0]; + AVFilterBufferRef *out; + Transform t = {{0},0}, orig = {{0},0}; + float matrix[9]; + float alpha = 2.0 / deshake->refcount; + char tmp[256]; + + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) { + avfilter_unref_bufferp(&in); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(out, in); + + if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) { + // Find the most likely global motion for the current frame + find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t); + } else { + uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0]; + uint8_t *src2 = in->data[0]; + + deshake->cx = FFMIN(deshake->cx, link->w); + deshake->cy = FFMIN(deshake->cy, link->h); + + if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx; + if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy; + + // Quadword align right margin + deshake->cw &= ~15; + + src1 += deshake->cy * in->linesize[0] + deshake->cx; + src2 += deshake->cy * in->linesize[0] + deshake->cx; + + find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t); + } + + + // Copy transform so we can output it later to compare to the smoothed value + orig.vector.x = t.vector.x; + orig.vector.y = t.vector.y; + orig.angle = t.angle; + orig.zoom = t.zoom; + + // Generate a one-sided moving exponential average + deshake->avg.vector.x = alpha * t.vector.x + (1.0 - alpha) * deshake->avg.vector.x; + deshake->avg.vector.y = alpha * t.vector.y + (1.0 - alpha) * deshake->avg.vector.y; + deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle; + deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom; + + // Remove the average from the current motion to detect the motion that + // is not on purpose, just as jitter from bumping the camera + t.vector.x -= deshake->avg.vector.x; + t.vector.y -= deshake->avg.vector.y; + t.angle -= deshake->avg.angle; + t.zoom -= deshake->avg.zoom; + + // Invert the motion to undo it + t.vector.x *= -1; + t.vector.y *= -1; + t.angle *= -1; + + // Write statistics to file + if (deshake->fp) { + snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vector.x, deshake->avg.vector.x, t.vector.x, orig.vector.y, deshake->avg.vector.y, t.vector.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom); + fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp); + } + + // Turn relative current frame motion into absolute by adding it to the + // last absolute motion + t.vector.x += deshake->last.vector.x; + t.vector.y += deshake->last.vector.y; + t.angle += deshake->last.angle; + t.zoom += deshake->last.zoom; + + // Shrink motion by 10% to keep things centered in the camera frame + t.vector.x *= 0.9; + t.vector.y *= 0.9; + t.angle *= 0.9; + + // Store the last absolute motion information + deshake->last.vector.x = t.vector.x; + deshake->last.vector.y = t.vector.y; + deshake->last.angle = t.angle; + deshake->last.zoom = t.zoom; + + // Generate a luma transformation matrix + avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix); + + // Transform the luma plane + avfilter_transform(in->data[0], out->data[0], in->linesize[0], out->linesize[0], link->w, link->h, matrix, INTERPOLATE_BILINEAR, deshake->edge); + + // Generate a chroma transformation matrix + avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix); + + // Transform the chroma planes + avfilter_transform(in->data[1], out->data[1], in->linesize[1], out->linesize[1], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); + avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); + + // Cleanup the old reference frame + avfilter_unref_buffer(deshake->ref); + + // Store the current frame as the reference frame for calculating the + // motion of the next frame + deshake->ref = in; + + return ff_filter_frame(outlink, out); +} + +static const AVFilterPad deshake_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .config_props = config_props, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, + }, + { NULL } +}; + +static const AVFilterPad deshake_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_deshake = { + .name = "deshake", + .description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."), + .priv_size = sizeof(DeshakeContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = deshake_inputs, + .outputs = deshake_outputs, +}; diff --git a/libavfilter/vf_drawbox.c b/libavfilter/vf_drawbox.c index c47422e..af8eca4 100644 --- a/libavfilter/vf_drawbox.c +++ b/libavfilter/vf_drawbox.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia) * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -26,6 +26,7 @@ #include "libavutil/colorspace.h" #include "libavutil/common.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/parseutils.h" #include "avfilter.h" @@ -36,37 +37,70 @@ enum { Y, U, V, A }; typedef struct { - int x, y, w, h; + const AVClass *class; + int x, y, w, h, thickness; + char *color_str; unsigned char yuv_color[4]; + int invert_color; ///< invert luma color int vsub, hsub; ///< chroma subsampling } DrawBoxContext; +#define OFFSET(x) offsetof(DrawBoxContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption drawbox_options[] = { + { "x", "set the box top-left corner x position", OFFSET(x), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS }, + { "y", "set the box top-left corner y position", OFFSET(y), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS }, + { "width", "set the box width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, + { "w", "set the box width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, + { "height", "set the box height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, + { "h", "set the box height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, + { "color", "set the box edge color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "c", "set the box edge color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "thickness", "set the box maximum thickness", OFFSET(thickness), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX, FLAGS }, + { "t", "set the box maximum thickness", OFFSET(thickness), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX, FLAGS }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(drawbox); + static av_cold int init(AVFilterContext *ctx, const char *args) { - DrawBoxContext *drawbox= ctx->priv; - char color_str[1024] = "black"; + DrawBoxContext *drawbox = ctx->priv; uint8_t rgba_color[4]; + static const char *shorthand[] = { "x", "y", "w", "h", "color", "thickness", NULL }; + int ret; - drawbox->x = drawbox->y = drawbox->w = drawbox->h = 0; + drawbox->class = &drawbox_class; + av_opt_set_defaults(drawbox); - if (args) - sscanf(args, "%d:%d:%d:%d:%s", - &drawbox->x, &drawbox->y, &drawbox->w, &drawbox->h, color_str); + if ((ret = av_opt_set_from_string(drawbox, args, shorthand, "=", ":")) < 0) + return ret; - if (av_parse_color(rgba_color, color_str, -1, ctx) < 0) + if (!strcmp(drawbox->color_str, "invert")) + drawbox->invert_color = 1; + else if (av_parse_color(rgba_color, drawbox->color_str, -1, ctx) < 0) return AVERROR(EINVAL); - drawbox->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]); - drawbox->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); - drawbox->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); - drawbox->yuv_color[A] = rgba_color[3]; + if (!drawbox->invert_color) { + drawbox->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]); + drawbox->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); + drawbox->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); + drawbox->yuv_color[A] = rgba_color[3]; + } return 0; } +static av_cold void uninit(AVFilterContext *ctx) +{ + DrawBoxContext *drawbox = ctx->priv; + av_opt_free(drawbox); +} + static int query_formats(AVFilterContext *ctx) { - enum AVPixelFormat pix_fmts[] = { + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, @@ -90,7 +124,7 @@ static int config_input(AVFilterLink *inlink) if (drawbox->h == 0) drawbox->h = inlink->h; av_log(inlink->dst, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n", - drawbox->w, drawbox->y, drawbox->w, drawbox->h, + drawbox->x, drawbox->y, drawbox->w, drawbox->h, drawbox->yuv_color[Y], drawbox->yuv_color[U], drawbox->yuv_color[V], drawbox->yuv_color[A]); return 0; @@ -109,14 +143,21 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) row[plane] = frame->data[plane] + frame->linesize[plane] * (y >> drawbox->vsub); - for (x = FFMAX(xb, 0); x < (xb + drawbox->w) && x < frame->video->w; x++) { - double alpha = (double)drawbox->yuv_color[A] / 255; - - if ((y - yb < 3) || (yb + drawbox->h - y < 4) || - (x - xb < 3) || (xb + drawbox->w - x < 4)) { - row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawbox->yuv_color[Y]; - row[1][x >> drawbox->hsub] = (1 - alpha) * row[1][x >> drawbox->hsub] + alpha * drawbox->yuv_color[U]; - row[2][x >> drawbox->hsub] = (1 - alpha) * row[2][x >> drawbox->hsub] + alpha * drawbox->yuv_color[V]; + if (drawbox->invert_color) { + for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->video->w; x++) + if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || + (x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness)) + row[0][x] = 0xff - row[0][x]; + } else { + for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->video->w; x++) { + double alpha = (double)drawbox->yuv_color[A] / 255; + + if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || + (x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness)) { + row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawbox->yuv_color[Y]; + row[1][x >> drawbox->hsub] = (1 - alpha) * row[1][x >> drawbox->hsub] + alpha * drawbox->yuv_color[U]; + row[2][x >> drawbox->hsub] = (1 - alpha) * row[2][x >> drawbox->hsub] + alpha * drawbox->yuv_color[V]; + } } } } @@ -132,7 +173,6 @@ static const AVFilterPad avfilter_vf_drawbox_inputs[] = { .get_video_buffer = ff_null_get_video_buffer, .filter_frame = filter_frame, .min_perms = AV_PERM_WRITE | AV_PERM_READ, - .rej_perms = AV_PERM_PRESERVE }, { NULL } }; @@ -150,8 +190,10 @@ AVFilter avfilter_vf_drawbox = { .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."), .priv_size = sizeof(DrawBoxContext), .init = init, + .uninit = uninit, .query_formats = query_formats, .inputs = avfilter_vf_drawbox_inputs, .outputs = avfilter_vf_drawbox_outputs, + .priv_class = &drawbox_class, }; diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c index f8800d2..2358e35 100644 --- a/libavfilter/vf_drawtext.c +++ b/libavfilter/vf_drawtext.c @@ -3,20 +3,20 @@ * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram * Copyright (c) 2003 Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -29,15 +29,16 @@ #include <sys/time.h> #include <time.h> -#include "libavutil/colorspace.h" +#include "config.h" +#include "libavutil/avstring.h" +#include "libavutil/bprint.h" #include "libavutil/common.h" #include "libavutil/file.h" #include "libavutil/eval.h" #include "libavutil/opt.h" -#include "libavutil/mathematics.h" #include "libavutil/random_seed.h" #include "libavutil/parseutils.h" -#include "libavutil/pixdesc.h" +#include "libavutil/timecode.h" #include "libavutil/tree.h" #include "libavutil/lfg.h" #include "avfilter.h" @@ -50,19 +51,27 @@ #include <freetype/config/ftheader.h> #include FT_FREETYPE_H #include FT_GLYPH_H +#if CONFIG_FONTCONFIG +#include <fontconfig/fontconfig.h> +#endif static const char *const var_names[] = { - "E", - "PHI", - "PI", - "main_w", "W", ///< width of the main video - "main_h", "H", ///< height of the main video - "text_w", "w", ///< width of the overlay text - "text_h", "h", ///< height of the overlay text + "dar", + "hsub", "vsub", + "line_h", "lh", ///< line height, same as max_glyph_h + "main_h", "h", "H", ///< height of the input video + "main_w", "w", "W", ///< width of the input video + "max_glyph_a", "ascent", ///< max glyph ascent + "max_glyph_d", "descent", ///< min glyph descent + "max_glyph_h", ///< max glyph height + "max_glyph_w", ///< max glyph width + "n", ///< number of frame + "sar", + "t", ///< timestamp expressed in seconds + "text_h", "th", ///< height of the rendered text + "text_w", "tw", ///< width of the rendered text "x", "y", - "n", ///< number of processed frames - "t", ///< timestamp expressed in seconds NULL }; @@ -83,116 +92,136 @@ static const eval_func2 fun2[] = { }; enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, - VAR_MAIN_W, VAR_MW, - VAR_MAIN_H, VAR_MH, - VAR_TEXT_W, VAR_TW, + VAR_DAR, + VAR_HSUB, VAR_VSUB, + VAR_LINE_H, VAR_LH, + VAR_MAIN_H, VAR_h, VAR_H, + VAR_MAIN_W, VAR_w, VAR_W, + VAR_MAX_GLYPH_A, VAR_ASCENT, + VAR_MAX_GLYPH_D, VAR_DESCENT, + VAR_MAX_GLYPH_H, + VAR_MAX_GLYPH_W, + VAR_N, + VAR_SAR, + VAR_T, VAR_TEXT_H, VAR_TH, + VAR_TEXT_W, VAR_TW, VAR_X, VAR_Y, - VAR_N, - VAR_T, VAR_VARS_NB }; +enum expansion_mode { + EXP_NONE, + EXP_NORMAL, + EXP_STRFTIME, +}; + typedef struct { const AVClass *class; + enum expansion_mode exp_mode; ///< expansion mode to use for the text + int reinit; ///< tells if the filter is being reinited uint8_t *fontfile; ///< font to be used uint8_t *text; ///< text to be drawn - uint8_t *expanded_text; ///< used to contain the strftime()-expanded text - size_t expanded_text_size; ///< size in bytes of the expanded_text buffer + AVBPrint expanded_text; ///< used to contain the expanded text int ft_load_flags; ///< flags used for loading fonts, see FT_LOAD_* FT_Vector *positions; ///< positions for each element in the text size_t nb_positions; ///< number of elements of positions array char *textfile; ///< file with text to be drawn - int x, y; ///< position to start drawing text - int w, h; ///< dimension of the text block + int x; ///< x position to start drawing text + int y; ///< y position to start drawing text + int max_glyph_w; ///< max glyph width + int max_glyph_h; ///< max glyph height int shadowx, shadowy; unsigned int fontsize; ///< font size to use char *fontcolor_string; ///< font color as string char *boxcolor_string; ///< box color as string char *shadowcolor_string; ///< shadow color as string - uint8_t fontcolor[4]; ///< foreground color - uint8_t boxcolor[4]; ///< background color - uint8_t shadowcolor[4]; ///< shadow color - uint8_t fontcolor_rgba[4]; ///< foreground color in RGBA - uint8_t boxcolor_rgba[4]; ///< background color in RGBA - uint8_t shadowcolor_rgba[4]; ///< shadow color in RGBA short int draw_box; ///< draw box around text - true or false int use_kerning; ///< font kerning is used - true/false int tabsize; ///< tab size int fix_bounds; ///< do we let it go out of frame bounds - t/f + FFDrawContext dc; + FFDrawColor fontcolor; ///< foreground color + FFDrawColor shadowcolor; ///< shadow color + FFDrawColor boxcolor; ///< background color + FT_Library library; ///< freetype font library handle FT_Face face; ///< freetype font face handle struct AVTreeNode *glyphs; ///< rendered glyphs, stored using the UTF-32 char code - int hsub, vsub; ///< chroma subsampling values - int is_packed_rgb; - int pixel_step[4]; ///< distance in bytes between the component of each pixel - uint8_t rgba_map[4]; ///< map RGBA offsets to the positions in the packed RGBA format - uint8_t *box_line[4]; ///< line used for filling the box background - char *x_expr, *y_expr; + char *x_expr; ///< expression for x position + char *y_expr; ///< expression for y position AVExpr *x_pexpr, *y_pexpr; ///< parsed expressions for x and y + int64_t basetime; ///< base pts time in the real world for display double var_values[VAR_VARS_NB]; - char *d_expr; - AVExpr *d_pexpr; + char *draw_expr; ///< expression for draw + AVExpr *draw_pexpr; ///< parsed expression for draw int draw; ///< set to zero to prevent drawing AVLFG prng; ///< random + char *tc_opt_string; ///< specified timecode option string + AVRational tc_rate; ///< frame rate for timecode + AVTimecode tc; ///< timecode context + int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise + int frame_id; + int reload; ///< reload text file for each frame } DrawTextContext; #define OFFSET(x) offsetof(DrawTextContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption drawtext_options[]= { -{"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX }, -{"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX }, -{"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX }, -{"fontcolor","set foreground color", OFFSET(fontcolor_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX }, -{"boxcolor", "set box color", OFFSET(boxcolor_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX }, -{"shadowcolor", "set shadow color", OFFSET(shadowcolor_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX }, -{"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_INT, {.i64=0}, 0, 1 }, -{"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=16}, 1, 72 }, -{"x", "set x", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX }, -{"y", "set y", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX }, -{"shadowx", "set x", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX }, -{"shadowy", "set y", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX }, -{"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX }, -{"draw", "if false do not draw", OFFSET(d_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX }, -{"fix_bounds", "if true, check and fix text coords to avoid clipping", - OFFSET(fix_bounds), AV_OPT_TYPE_INT, {.i64=1}, 0, 1 }, +{"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"fontcolor", "set foreground color", OFFSET(fontcolor_string), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"boxcolor", "set box color", OFFSET(boxcolor_string), AV_OPT_TYPE_STRING, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"shadowcolor", "set shadow color", OFFSET(shadowcolor_string), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_INT, {.i64=0}, 0, 1 , FLAGS}, +{"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX , FLAGS}, +{"x", "set x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"y", "set y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"shadowx", "set x", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS}, +{"shadowy", "set y", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS}, +{"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX , FLAGS}, +{"basetime", "set base time", OFFSET(basetime), AV_OPT_TYPE_INT64, {.i64=AV_NOPTS_VALUE}, INT64_MIN, INT64_MAX , FLAGS}, +{"draw", "if false do not draw", OFFSET(draw_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS}, + +{"expansion","set the expansion mode", OFFSET(exp_mode), AV_OPT_TYPE_INT, {.i64=EXP_NORMAL}, 0, 2, FLAGS, "expansion"}, +{"none", "set no expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NONE}, 0, 0, FLAGS, "expansion"}, +{"normal", "set normal expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NORMAL}, 0, 0, FLAGS, "expansion"}, +{"strftime", "set strftime expansion (deprecated)", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_STRFTIME}, 0, 0, FLAGS, "expansion"}, + +{"timecode", "set initial timecode", OFFSET(tc_opt_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS}, +{"tc24hmax", "set 24 hours max (timecode only)", OFFSET(tc24hmax), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS}, +{"timecode_rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS}, +{"r", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS}, +{"rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS}, +{"reload", "reload text file for each frame", OFFSET(reload), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS}, +{"fix_bounds", "if true, check and fix text coords to avoid clipping", OFFSET(fix_bounds), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS}, /* FT_LOAD_* flags */ -{"ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, {.i64=FT_LOAD_DEFAULT|FT_LOAD_RENDER}, 0, INT_MAX, 0, "ft_load_flags" }, -{"default", "set default", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_DEFAULT}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"no_scale", "set no_scale", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_NO_SCALE}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"no_hinting", "set no_hinting", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_NO_HINTING}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"render", "set render", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_RENDER}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"no_bitmap", "set no_bitmap", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_NO_BITMAP}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"vertical_layout", "set vertical_layout", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_VERTICAL_LAYOUT}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"force_autohint", "set force_autohint", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_FORCE_AUTOHINT}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"crop_bitmap", "set crop_bitmap", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_CROP_BITMAP}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"pedantic", "set pedantic", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_PEDANTIC}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"ignore_global_advance_width", "set ignore_global_advance_width", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"no_recurse", "set no_recurse", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_NO_RECURSE}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"ignore_transform", "set ignore_transform", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_IGNORE_TRANSFORM}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"monochrome", "set monochrome", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_MONOCHROME}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"linear_design", "set linear_design", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_LINEAR_DESIGN}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, -{"no_autohint", "set no_autohint", 0, AV_OPT_TYPE_CONST, {.i64 = FT_LOAD_NO_AUTOHINT}, INT_MIN, INT_MAX, 0, "ft_load_flags" }, +{"ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, {.i64=FT_LOAD_DEFAULT|FT_LOAD_RENDER}, 0, INT_MAX, FLAGS, "ft_load_flags"}, +{"default", "set default", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_DEFAULT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"no_scale", "set no_scale", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_SCALE}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"no_hinting", "set no_hinting", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_HINTING}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"render", "set render", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_RENDER}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"no_bitmap", "set no_bitmap", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_BITMAP}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"vertical_layout", "set vertical_layout", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_VERTICAL_LAYOUT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"force_autohint", "set force_autohint", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_FORCE_AUTOHINT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"crop_bitmap", "set crop_bitmap", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_CROP_BITMAP}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"pedantic", "set pedantic", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_PEDANTIC}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"ignore_global_advance_width", "set ignore_global_advance_width", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"no_recurse", "set no_recurse", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_RECURSE}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"ignore_transform", "set ignore_transform", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_IGNORE_TRANSFORM}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"monochrome", "set monochrome", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_MONOCHROME}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"linear_design", "set linear_design", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_LINEAR_DESIGN}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, +{"no_autohint", "set no_autohint", 0, AV_OPT_TYPE_CONST, {.i64=FT_LOAD_NO_AUTOHINT}, INT_MIN, INT_MAX, FLAGS, "ft_load_flags"}, {NULL}, }; -static const char *drawtext_get_name(void *ctx) -{ - return "drawtext"; -} - -static const AVClass drawtext_class = { - "DrawTextContext", - drawtext_get_name, - drawtext_options -}; +AVFILTER_DEFINE_CLASS(drawtext); #undef __FTERRORS_H__ #define FT_ERROR_START_LIST { @@ -279,6 +308,114 @@ error: return ret; } +static int load_font_file(AVFilterContext *ctx, const char *path, int index, + const char **error) +{ + DrawTextContext *dtext = ctx->priv; + int err; + + err = FT_New_Face(dtext->library, path, index, &dtext->face); + if (err) { + *error = FT_ERRMSG(err); + return AVERROR(EINVAL); + } + return 0; +} + +#if CONFIG_FONTCONFIG +static int load_font_fontconfig(AVFilterContext *ctx, const char **error) +{ + DrawTextContext *dtext = ctx->priv; + FcConfig *fontconfig; + FcPattern *pattern, *fpat; + FcResult result = FcResultMatch; + FcChar8 *filename; + int err, index; + double size; + + fontconfig = FcInitLoadConfigAndFonts(); + if (!fontconfig) { + *error = "impossible to init fontconfig\n"; + return AVERROR(EINVAL); + } + pattern = FcNameParse(dtext->fontfile ? dtext->fontfile : + (uint8_t *)(intptr_t)"default"); + if (!pattern) { + *error = "could not parse fontconfig pattern"; + return AVERROR(EINVAL); + } + if (!FcConfigSubstitute(fontconfig, pattern, FcMatchPattern)) { + *error = "could not substitue fontconfig options"; /* very unlikely */ + return AVERROR(EINVAL); + } + FcDefaultSubstitute(pattern); + fpat = FcFontMatch(fontconfig, pattern, &result); + if (!fpat || result != FcResultMatch) { + *error = "impossible to find a matching font"; + return AVERROR(EINVAL); + } + if (FcPatternGetString (fpat, FC_FILE, 0, &filename) != FcResultMatch || + FcPatternGetInteger(fpat, FC_INDEX, 0, &index ) != FcResultMatch || + FcPatternGetDouble (fpat, FC_SIZE, 0, &size ) != FcResultMatch) { + *error = "impossible to find font information"; + return AVERROR(EINVAL); + } + av_log(ctx, AV_LOG_INFO, "Using \"%s\"\n", filename); + if (!dtext->fontsize) + dtext->fontsize = size + 0.5; + err = load_font_file(ctx, filename, index, error); + if (err) + return err; + FcPatternDestroy(fpat); + FcPatternDestroy(pattern); + FcConfigDestroy(fontconfig); + return 0; +} +#endif + +static int load_font(AVFilterContext *ctx) +{ + DrawTextContext *dtext = ctx->priv; + int err; + const char *error = "unknown error\n"; + + /* load the face, and set up the encoding, which is by default UTF-8 */ + err = load_font_file(ctx, dtext->fontfile, 0, &error); + if (!err) + return 0; +#if CONFIG_FONTCONFIG + err = load_font_fontconfig(ctx, &error); + if (!err) + return 0; +#endif + av_log(ctx, AV_LOG_ERROR, "Could not load font \"%s\": %s\n", + dtext->fontfile, error); + return err; +} + +static int load_textfile(AVFilterContext *ctx) +{ + DrawTextContext *dtext = ctx->priv; + int err; + uint8_t *textbuf; + size_t textbuf_size; + + if ((err = av_file_map(dtext->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, + "The text file '%s' could not be read or is empty\n", + dtext->textfile); + return err; + } + + if (!(dtext->text = av_realloc(dtext->text, textbuf_size + 1))) + return AVERROR(ENOMEM); + memcpy(dtext->text, textbuf, textbuf_size); + dtext->text[textbuf_size] = 0; + av_file_unmap(textbuf, textbuf_size); + + return 0; +} + static av_cold int init(AVFilterContext *ctx, const char *args) { int err; @@ -287,62 +424,58 @@ static av_cold int init(AVFilterContext *ctx, const char *args) dtext->class = &drawtext_class; av_opt_set_defaults(dtext); - dtext->fontcolor_string = av_strdup("black"); - dtext->boxcolor_string = av_strdup("white"); - dtext->shadowcolor_string = av_strdup("black"); - if ((err = (av_set_options_string(dtext, args, "=", ":"))) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args); + if ((err = av_set_options_string(dtext, args, "=", ":")) < 0) return err; - } - if (!dtext->fontfile) { + if (!dtext->fontfile && !CONFIG_FONTCONFIG) { av_log(ctx, AV_LOG_ERROR, "No font filename provided\n"); return AVERROR(EINVAL); } if (dtext->textfile) { - uint8_t *textbuf; - size_t textbuf_size; - if (dtext->text) { av_log(ctx, AV_LOG_ERROR, "Both text and text file provided. Please provide only one\n"); return AVERROR(EINVAL); } - if ((err = av_file_map(dtext->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) { - av_log(ctx, AV_LOG_ERROR, - "The text file '%s' could not be read or is empty\n", - dtext->textfile); + if ((err = load_textfile(ctx)) < 0) return err; - } + } - if (!(dtext->text = av_malloc(textbuf_size+1))) - return AVERROR(ENOMEM); - memcpy(dtext->text, textbuf, textbuf_size); - dtext->text[textbuf_size] = 0; - av_file_unmap(textbuf, textbuf_size); + if (dtext->reload && !dtext->textfile) + av_log(ctx, AV_LOG_WARNING, "No file to reload\n"); + + if (dtext->tc_opt_string) { + int ret = av_timecode_init_from_string(&dtext->tc, dtext->tc_rate, + dtext->tc_opt_string, ctx); + if (ret < 0) + return ret; + if (dtext->tc24hmax) + dtext->tc.flags |= AV_TIMECODE_FLAG_24HOURSMAX; + if (!dtext->text) + dtext->text = av_strdup(""); } if (!dtext->text) { av_log(ctx, AV_LOG_ERROR, - "Either text or a valid file must be provided\n"); + "Either text, a valid file or a timecode must be provided\n"); return AVERROR(EINVAL); } - if ((err = av_parse_color(dtext->fontcolor_rgba, dtext->fontcolor_string, -1, ctx))) { + if ((err = av_parse_color(dtext->fontcolor.rgba, dtext->fontcolor_string, -1, ctx))) { av_log(ctx, AV_LOG_ERROR, "Invalid font color '%s'\n", dtext->fontcolor_string); return err; } - if ((err = av_parse_color(dtext->boxcolor_rgba, dtext->boxcolor_string, -1, ctx))) { + if ((err = av_parse_color(dtext->boxcolor.rgba, dtext->boxcolor_string, -1, ctx))) { av_log(ctx, AV_LOG_ERROR, "Invalid box color '%s'\n", dtext->boxcolor_string); return err; } - if ((err = av_parse_color(dtext->shadowcolor_rgba, dtext->shadowcolor_string, -1, ctx))) { + if ((err = av_parse_color(dtext->shadowcolor.rgba, dtext->shadowcolor_string, -1, ctx))) { av_log(ctx, AV_LOG_ERROR, "Invalid shadow color '%s'\n", dtext->shadowcolor_string); return err; @@ -354,12 +487,11 @@ static av_cold int init(AVFilterContext *ctx, const char *args) return AVERROR(EINVAL); } - /* load the face, and set up the encoding, which is by default UTF-8 */ - if ((err = FT_New_Face(dtext->library, dtext->fontfile, 0, &dtext->face))) { - av_log(ctx, AV_LOG_ERROR, "Could not load fontface from file '%s': %s\n", - dtext->fontfile, FT_ERRMSG(err)); - return AVERROR(EINVAL); - } + err = load_font(ctx); + if (err) + return err; + if (!dtext->fontsize) + dtext->fontsize = 16; if ((err = FT_Set_Pixel_Sizes(dtext->face, 0, dtext->fontsize))) { av_log(ctx, AV_LOG_ERROR, "Could not set font size to %d pixels: %s\n", dtext->fontsize, FT_ERRMSG(err)); @@ -372,37 +504,33 @@ static av_cold int init(AVFilterContext *ctx, const char *args) load_glyph(ctx, NULL, 0); /* set the tabsize in pixels */ - if ((err = load_glyph(ctx, &glyph, ' ') < 0)) { + if ((err = load_glyph(ctx, &glyph, ' ')) < 0) { av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n"); return err; } dtext->tabsize *= glyph->advance; -#if !HAVE_LOCALTIME_R - av_log(ctx, AV_LOG_WARNING, "strftime() expansion unavailable!\n"); -#endif + if (dtext->exp_mode == EXP_STRFTIME && + (strchr(dtext->text, '%') || strchr(dtext->text, '\\'))) + av_log(ctx, AV_LOG_WARNING, "expansion=strftime is deprecated.\n"); + + av_bprint_init(&dtext->expanded_text, 0, AV_BPRINT_SIZE_UNLIMITED); return 0; } static int query_formats(AVFilterContext *ctx) { - static const enum AVPixelFormat pix_fmts[] = { - AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, - AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, - AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, - AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, - AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P, - AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, - AV_PIX_FMT_NONE - }; - - ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); return 0; } static int glyph_enu_free(void *opaque, void *elem) { + Glyph *glyph = elem; + + FT_Done_Glyph(*glyph->glyph); + av_freep(&glyph->glyph); av_free(elem); return 0; } @@ -410,26 +538,23 @@ static int glyph_enu_free(void *opaque, void *elem) static av_cold void uninit(AVFilterContext *ctx) { DrawTextContext *dtext = ctx->priv; - int i; - av_freep(&dtext->fontfile); - av_freep(&dtext->text); - av_freep(&dtext->expanded_text); - av_freep(&dtext->fontcolor_string); - av_freep(&dtext->boxcolor_string); + av_expr_free(dtext->x_pexpr); dtext->x_pexpr = NULL; + av_expr_free(dtext->y_pexpr); dtext->y_pexpr = NULL; + av_expr_free(dtext->draw_pexpr); dtext->draw_pexpr = NULL; + av_opt_free(dtext); + av_freep(&dtext->positions); - av_freep(&dtext->shadowcolor_string); + dtext->nb_positions = 0; + av_tree_enumerate(dtext->glyphs, NULL, NULL, glyph_enu_free); av_tree_destroy(dtext->glyphs); - dtext->glyphs = 0; + dtext->glyphs = NULL; + FT_Done_Face(dtext->face); FT_Done_FreeType(dtext->library); - for (i = 0; i < 4; i++) { - av_freep(&dtext->box_line[i]); - dtext->pixel_step[i] = 0; - } - + av_bprint_finalize(&dtext->expanded_text, NULL); } static inline int is_newline(uint32_t c) @@ -437,295 +562,234 @@ static inline int is_newline(uint32_t c) return c == '\n' || c == '\r' || c == '\f' || c == '\v'; } -static int dtext_prepare_text(AVFilterContext *ctx) +static int config_input(AVFilterLink *inlink) { + AVFilterContext *ctx = inlink->dst; DrawTextContext *dtext = ctx->priv; - uint32_t code = 0, prev_code = 0; - int x = 0, y = 0, i = 0, ret; - int text_height, baseline; - char *text = dtext->text; - uint8_t *p; - int str_w = 0, len; - int y_min = 32000, y_max = -32000; - FT_Vector delta; - Glyph *glyph = NULL, *prev_glyph = NULL; - Glyph dummy = { 0 }; - int width = ctx->inputs[0]->w; - int height = ctx->inputs[0]->h; - -#if HAVE_LOCALTIME_R - time_t now = time(0); - struct tm ltime; - uint8_t *buf = dtext->expanded_text; - int buf_size = dtext->expanded_text_size; - - if (!buf) - buf_size = 2*strlen(dtext->text)+1; + int ret; - localtime_r(&now, <ime); + ff_draw_init(&dtext->dc, inlink->format, 0); + ff_draw_color(&dtext->dc, &dtext->fontcolor, dtext->fontcolor.rgba); + ff_draw_color(&dtext->dc, &dtext->shadowcolor, dtext->shadowcolor.rgba); + ff_draw_color(&dtext->dc, &dtext->boxcolor, dtext->boxcolor.rgba); + + dtext->var_values[VAR_w] = dtext->var_values[VAR_W] = dtext->var_values[VAR_MAIN_W] = inlink->w; + dtext->var_values[VAR_h] = dtext->var_values[VAR_H] = dtext->var_values[VAR_MAIN_H] = inlink->h; + dtext->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1; + dtext->var_values[VAR_DAR] = (double)inlink->w / inlink->h * dtext->var_values[VAR_SAR]; + dtext->var_values[VAR_HSUB] = 1 << dtext->dc.hsub_max; + dtext->var_values[VAR_VSUB] = 1 << dtext->dc.vsub_max; + dtext->var_values[VAR_X] = NAN; + dtext->var_values[VAR_Y] = NAN; + if (!dtext->reinit) + dtext->var_values[VAR_N] = 0; + dtext->var_values[VAR_T] = NAN; - while ((buf = av_realloc(buf, buf_size))) { - *buf = 1; - if (strftime(buf, buf_size, dtext->text, <ime) != 0 || *buf == 0) - break; - buf_size *= 2; - } + av_lfg_init(&dtext->prng, av_get_random_seed()); - if (!buf) - return AVERROR(ENOMEM); - text = dtext->expanded_text = buf; - dtext->expanded_text_size = buf_size; -#endif + if ((ret = av_expr_parse(&dtext->x_pexpr, dtext->x_expr, var_names, + NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 || + (ret = av_expr_parse(&dtext->y_pexpr, dtext->y_expr, var_names, + NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 || + (ret = av_expr_parse(&dtext->draw_pexpr, dtext->draw_expr, var_names, + NULL, NULL, fun2_names, fun2, 0, ctx)) < 0) - if ((len = strlen(text)) > dtext->nb_positions) { - FT_Vector *p = av_realloc(dtext->positions, - len * sizeof(*dtext->positions)); - if (!p) { - av_freep(dtext->positions); - dtext->nb_positions = 0; - return AVERROR(ENOMEM); - } else { - dtext->positions = p; - dtext->nb_positions = len; - } - } + return AVERROR(EINVAL); - /* load and cache glyphs */ - for (i = 0, p = text; *p; i++) { - GET_UTF8(code, *p++, continue;); + return 0; +} - /* get glyph */ - dummy.code = code; - glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL); - if (!glyph) { - ret = load_glyph(ctx, &glyph, code); - if (ret) - return ret; - } +static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags) +{ + DrawTextContext *dtext = ctx->priv; - y_min = FFMIN(glyph->bbox.yMin, y_min); - y_max = FFMAX(glyph->bbox.yMax, y_max); + if (!strcmp(cmd, "reinit")) { + int ret; + uninit(ctx); + dtext->reinit = 1; + if ((ret = init(ctx, arg)) < 0) + return ret; + return config_input(ctx->inputs[0]); } - text_height = y_max - y_min; - baseline = y_max; - - /* compute and save position for each glyph */ - glyph = NULL; - for (i = 0, p = text; *p; i++) { - GET_UTF8(code, *p++, continue;); - - /* skip the \n in the sequence \r\n */ - if (prev_code == '\r' && code == '\n') - continue; - - prev_code = code; - if (is_newline(code)) { - str_w = FFMAX(str_w, x - dtext->x); - y += text_height; - x = 0; - continue; - } - /* get glyph */ - prev_glyph = glyph; - dummy.code = code; - glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL); + return AVERROR(ENOSYS); +} - /* kerning */ - if (dtext->use_kerning && prev_glyph && glyph->code) { - FT_Get_Kerning(dtext->face, prev_glyph->code, glyph->code, - ft_kerning_default, &delta); - x += delta.x >> 6; - } +static int func_pts(AVFilterContext *ctx, AVBPrint *bp, + char *fct, unsigned argc, char **argv, int tag) +{ + DrawTextContext *dtext = ctx->priv; - if (x + glyph->bbox.xMax >= width) { - str_w = FFMAX(str_w, x); - y += text_height; - x = 0; - } + av_bprintf(bp, "%.6f", dtext->var_values[VAR_T]); + return 0; +} - /* save position */ - dtext->positions[i].x = x + glyph->bitmap_left; - dtext->positions[i].y = y - glyph->bitmap_top + baseline; - if (code == '\t') x = (x / dtext->tabsize + 1)*dtext->tabsize; - else x += glyph->advance; - } +static int func_frame_num(AVFilterContext *ctx, AVBPrint *bp, + char *fct, unsigned argc, char **argv, int tag) +{ + DrawTextContext *dtext = ctx->priv; - str_w = FFMIN(width - 1, FFMAX(str_w, x)); - y = FFMIN(y + text_height, height - 1); + av_bprintf(bp, "%d", (int)dtext->var_values[VAR_N]); + return 0; +} - dtext->w = str_w; - dtext->var_values[VAR_TEXT_W] = dtext->var_values[VAR_TW] = dtext->w; - dtext->h = y; - dtext->var_values[VAR_TEXT_H] = dtext->var_values[VAR_TH] = dtext->h; +#if !HAVE_LOCALTIME_R +static void localtime_r(const time_t *t, struct tm *tm) +{ + *tm = *localtime(t); +} +#endif +static int func_strftime(AVFilterContext *ctx, AVBPrint *bp, + char *fct, unsigned argc, char **argv, int tag) +{ + const char *fmt = argc ? argv[0] : "%Y-%m-%d %H:%M:%S"; + time_t now; + struct tm tm; + + time(&now); + if (tag == 'L') + localtime_r(&now, &tm); + else + tm = *gmtime(&now); + av_bprint_strftime(bp, fmt, &tm); return 0; } - -static int config_input(AVFilterLink *inlink) +static int func_eval_expr(AVFilterContext *ctx, AVBPrint *bp, + char *fct, unsigned argc, char **argv, int tag) { - AVFilterContext *ctx = inlink->dst; DrawTextContext *dtext = ctx->priv; - const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); + double res; int ret; - dtext->hsub = pix_desc->log2_chroma_w; - dtext->vsub = pix_desc->log2_chroma_h; - - dtext->var_values[VAR_E ] = M_E; - dtext->var_values[VAR_PHI] = M_PHI; - dtext->var_values[VAR_PI ] = M_PI; - - dtext->var_values[VAR_MAIN_W] = - dtext->var_values[VAR_MW] = ctx->inputs[0]->w; - dtext->var_values[VAR_MAIN_H] = - dtext->var_values[VAR_MH] = ctx->inputs[0]->h; - - dtext->var_values[VAR_X] = 0; - dtext->var_values[VAR_Y] = 0; - dtext->var_values[VAR_N] = 0; - dtext->var_values[VAR_T] = NAN; - - av_lfg_init(&dtext->prng, av_get_random_seed()); - - if ((ret = av_expr_parse(&dtext->x_pexpr, dtext->x_expr, var_names, - NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 || - (ret = av_expr_parse(&dtext->y_pexpr, dtext->y_expr, var_names, - NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 || - (ret = av_expr_parse(&dtext->d_pexpr, dtext->d_expr, var_names, - NULL, NULL, fun2_names, fun2, 0, ctx)) < 0) - return AVERROR(EINVAL); - - if ((ret = - ff_fill_line_with_color(dtext->box_line, dtext->pixel_step, - inlink->w, dtext->boxcolor, - inlink->format, dtext->boxcolor_rgba, - &dtext->is_packed_rgb, dtext->rgba_map)) < 0) - return ret; - - if (!dtext->is_packed_rgb) { - uint8_t *rgba = dtext->fontcolor_rgba; - dtext->fontcolor[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); - dtext->fontcolor[1] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0); - dtext->fontcolor[2] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0); - dtext->fontcolor[3] = rgba[3]; - rgba = dtext->shadowcolor_rgba; - dtext->shadowcolor[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); - dtext->shadowcolor[1] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0); - dtext->shadowcolor[2] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0); - dtext->shadowcolor[3] = rgba[3]; - } - - dtext->draw = 1; + ret = av_expr_parse_and_eval(&res, argv[0], var_names, dtext->var_values, + NULL, NULL, fun2_names, fun2, + &dtext->prng, 0, ctx); + if (ret < 0) + av_log(ctx, AV_LOG_ERROR, + "Expression '%s' for the expr text expansion function is not valid\n", + argv[0]); + else + av_bprintf(bp, "%f", res); - return dtext_prepare_text(ctx); + return ret; } -#define GET_BITMAP_VAL(r, c) \ - bitmap->pixel_mode == FT_PIXEL_MODE_MONO ? \ - (bitmap->buffer[(r) * bitmap->pitch + ((c)>>3)] & (0x80 >> ((c)&7))) * 255 : \ - bitmap->buffer[(r) * bitmap->pitch + (c)] - -#define SET_PIXEL_YUV(picref, yuva_color, val, x, y, hsub, vsub) { \ - luma_pos = ((x) ) + ((y) ) * picref->linesize[0]; \ - alpha = yuva_color[3] * (val) * 129; \ - picref->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * picref->data[0][luma_pos] ) >> 23; \ - if (((x) & ((1<<(hsub)) - 1)) == 0 && ((y) & ((1<<(vsub)) - 1)) == 0) {\ - chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[1]; \ - chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[2]; \ - picref->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * picref->data[1][chroma_pos1]) >> 23; \ - picref->data[2][chroma_pos2] = (alpha * yuva_color[2] + (255*255*129 - alpha) * picref->data[2][chroma_pos2]) >> 23; \ - }\ -} +static const struct drawtext_function { + const char *name; + unsigned argc_min, argc_max; + int tag; /** opaque argument to func */ + int (*func)(AVFilterContext *, AVBPrint *, char *, unsigned, char **, int); +} functions[] = { + { "expr", 1, 1, 0, func_eval_expr }, + { "e", 1, 1, 0, func_eval_expr }, + { "pts", 0, 0, 0, func_pts }, + { "gmtime", 0, 1, 'G', func_strftime }, + { "localtime", 0, 1, 'L', func_strftime }, + { "frame_num", 0, 0, 0, func_frame_num }, + { "n", 0, 0, 0, func_frame_num }, +}; -static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, unsigned int x, - unsigned int y, unsigned int width, unsigned int height, - const uint8_t yuva_color[4], int hsub, int vsub) +static int eval_function(AVFilterContext *ctx, AVBPrint *bp, char *fct, + unsigned argc, char **argv) { - int r, c, alpha; - unsigned int luma_pos, chroma_pos1, chroma_pos2; - uint8_t src_val; - - for (r = 0; r < bitmap->rows && r+y < height; r++) { - for (c = 0; c < bitmap->width && c+x < width; c++) { - /* get intensity value in the glyph bitmap (source) */ - src_val = GET_BITMAP_VAL(r, c); - if (!src_val) - continue; - - SET_PIXEL_YUV(picref, yuva_color, src_val, c+x, y+r, hsub, vsub); + unsigned i; + + for (i = 0; i < FF_ARRAY_ELEMS(functions); i++) { + if (strcmp(fct, functions[i].name)) + continue; + if (argc < functions[i].argc_min) { + av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at least %d arguments\n", + fct, functions[i].argc_min); + return AVERROR(EINVAL); } + if (argc > functions[i].argc_max) { + av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at most %d arguments\n", + fct, functions[i].argc_max); + return AVERROR(EINVAL); + } + break; } - - return 0; -} - -#define SET_PIXEL_RGB(picref, rgba_color, val, x, y, pixel_step, r_off, g_off, b_off, a_off) { \ - p = picref->data[0] + (x) * pixel_step + ((y) * picref->linesize[0]); \ - alpha = rgba_color[3] * (val) * 129; \ - *(p+r_off) = (alpha * rgba_color[0] + (255*255*129 - alpha) * *(p+r_off)) >> 23; \ - *(p+g_off) = (alpha * rgba_color[1] + (255*255*129 - alpha) * *(p+g_off)) >> 23; \ - *(p+b_off) = (alpha * rgba_color[2] + (255*255*129 - alpha) * *(p+b_off)) >> 23; \ + if (i >= FF_ARRAY_ELEMS(functions)) { + av_log(ctx, AV_LOG_ERROR, "%%{%s} is not known\n", fct); + return AVERROR(EINVAL); + } + return functions[i].func(ctx, bp, fct, argc, argv, functions[i].tag); } -static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap, - unsigned int x, unsigned int y, - unsigned int width, unsigned int height, int pixel_step, - const uint8_t rgba_color[4], const uint8_t rgba_map[4]) +static int expand_function(AVFilterContext *ctx, AVBPrint *bp, char **rtext) { - int r, c, alpha; - uint8_t *p; - uint8_t src_val; - - for (r = 0; r < bitmap->rows && r+y < height; r++) { - for (c = 0; c < bitmap->width && c+x < width; c++) { - /* get intensity value in the glyph bitmap (source) */ - src_val = GET_BITMAP_VAL(r, c); - if (!src_val) - continue; + const char *text = *rtext; + char *argv[16] = { NULL }; + unsigned argc = 0, i; + int ret; - SET_PIXEL_RGB(picref, rgba_color, src_val, c+x, y+r, pixel_step, - rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]); + if (*text != '{') { + av_log(ctx, AV_LOG_ERROR, "Stray %% near '%s'\n", text); + return AVERROR(EINVAL); + } + text++; + while (1) { + if (!(argv[argc++] = av_get_token(&text, ":}"))) { + ret = AVERROR(ENOMEM); + goto end; } + if (!*text) { + av_log(ctx, AV_LOG_ERROR, "Unterminated %%{} near '%s'\n", *rtext); + ret = AVERROR(EINVAL); + goto end; + } + if (argc == FF_ARRAY_ELEMS(argv)) + av_freep(&argv[--argc]); /* error will be caught later */ + if (*text == '}') + break; + text++; } - return 0; + if ((ret = eval_function(ctx, bp, argv[0], argc - 1, argv + 1)) < 0) + goto end; + ret = 0; + *rtext = (char *)text + 1; + +end: + for (i = 0; i < argc; i++) + av_freep(&argv[i]); + return ret; } -static inline void drawbox(AVFilterBufferRef *picref, unsigned int x, unsigned int y, - unsigned int width, unsigned int height, - uint8_t *line[4], int pixel_step[4], uint8_t color[4], - int hsub, int vsub, int is_rgba_packed, uint8_t rgba_map[4]) +static int expand_text(AVFilterContext *ctx) { - int i, j, alpha; - - if (color[3] != 0xFF) { - if (is_rgba_packed) { - uint8_t *p; - for (j = 0; j < height; j++) - for (i = 0; i < width; i++) - SET_PIXEL_RGB(picref, color, 255, i+x, y+j, pixel_step[0], - rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]); + DrawTextContext *dtext = ctx->priv; + char *text = dtext->text; + AVBPrint *bp = &dtext->expanded_text; + int ret; + + av_bprint_clear(bp); + while (*text) { + if (*text == '\\' && text[1]) { + av_bprint_chars(bp, text[1], 1); + text += 2; + } else if (*text == '%') { + text++; + if ((ret = expand_function(ctx, bp, &text)) < 0) + return ret; } else { - unsigned int luma_pos, chroma_pos1, chroma_pos2; - for (j = 0; j < height; j++) - for (i = 0; i < width; i++) - SET_PIXEL_YUV(picref, color, 255, i+x, y+j, hsub, vsub); + av_bprint_chars(bp, *text, 1); + text++; } - } else { - ff_draw_rectangle(picref->data, picref->linesize, - line, pixel_step, hsub, vsub, - x, y, width, height); } + if (!av_bprint_is_complete(bp)) + return AVERROR(ENOMEM); + return 0; } static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, - int width, int height, const uint8_t rgbcolor[4], const uint8_t yuvcolor[4], int x, int y) + int width, int height, const uint8_t rgbcolor[4], FFDrawColor *color, int x, int y) { - char *text = HAVE_LOCALTIME_R ? dtext->expanded_text : dtext->text; + char *text = dtext->expanded_text.str; uint32_t code = 0; - int i; + int i, x1, y1; uint8_t *p; Glyph *glyph = NULL; @@ -744,15 +808,15 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, glyph->bitmap.pixel_mode != FT_PIXEL_MODE_GRAY) return AVERROR(EINVAL); - if (dtext->is_packed_rgb) { - draw_glyph_rgb(picref, &glyph->bitmap, - dtext->positions[i].x+x, dtext->positions[i].y+y, width, height, - dtext->pixel_step[0], rgbcolor, dtext->rgba_map); - } else { - draw_glyph_yuv(picref, &glyph->bitmap, - dtext->positions[i].x+x, dtext->positions[i].y+y, width, height, - yuvcolor, dtext->hsub, dtext->vsub); - } + x1 = dtext->positions[i].x+dtext->x+x; + y1 = dtext->positions[i].y+dtext->y+y; + + ff_blend_mask(&dtext->dc, color, + picref->data, picref->linesize, width, height, + glyph->bitmap.buffer, glyph->bitmap.pitch, + glyph->bitmap.width, glyph->bitmap.rows, + glyph->bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3, + 0, x1, y1); } return 0; @@ -762,97 +826,182 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, int width, int height) { DrawTextContext *dtext = ctx->priv; - int ret; + uint32_t code = 0, prev_code = 0; + int x = 0, y = 0, i = 0, ret; + int max_text_line_w = 0, len; + int box_w, box_h; + char *text = dtext->text; + uint8_t *p; + int y_min = 32000, y_max = -32000; + int x_min = 32000, x_max = -32000; + FT_Vector delta; + Glyph *glyph = NULL, *prev_glyph = NULL; + Glyph dummy = { 0 }; + + time_t now = time(0); + struct tm ltime; + AVBPrint *bp = &dtext->expanded_text; + + av_bprint_clear(bp); + + if(dtext->basetime != AV_NOPTS_VALUE) + now= picref->pts*av_q2d(ctx->inputs[0]->time_base) + dtext->basetime/1000000; + + switch (dtext->exp_mode) { + case EXP_NONE: + av_bprintf(bp, "%s", dtext->text); + break; + case EXP_NORMAL: + if ((ret = expand_text(ctx)) < 0) + return ret; + break; + case EXP_STRFTIME: + localtime_r(&now, <ime); + av_bprint_strftime(bp, dtext->text, <ime); + break; + } + + if (dtext->tc_opt_string) { + char tcbuf[AV_TIMECODE_STR_SIZE]; + av_timecode_make_string(&dtext->tc, tcbuf, dtext->frame_id++); + av_bprint_clear(bp); + av_bprintf(bp, "%s%s", dtext->text, tcbuf); + } + + if (!av_bprint_is_complete(bp)) + return AVERROR(ENOMEM); + text = dtext->expanded_text.str; + if ((len = dtext->expanded_text.len) > dtext->nb_positions) { + if (!(dtext->positions = + av_realloc(dtext->positions, len*sizeof(*dtext->positions)))) + return AVERROR(ENOMEM); + dtext->nb_positions = len; + } + + x = 0; + y = 0; + + /* load and cache glyphs */ + for (i = 0, p = text; *p; i++) { + GET_UTF8(code, *p++, continue;); + + /* get glyph */ + dummy.code = code; + glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL); + if (!glyph) { + load_glyph(ctx, &glyph, code); + } + + y_min = FFMIN(glyph->bbox.yMin, y_min); + y_max = FFMAX(glyph->bbox.yMax, y_max); + x_min = FFMIN(glyph->bbox.xMin, x_min); + x_max = FFMAX(glyph->bbox.xMax, x_max); + } + dtext->max_glyph_h = y_max - y_min; + dtext->max_glyph_w = x_max - x_min; + + /* compute and save position for each glyph */ + glyph = NULL; + for (i = 0, p = text; *p; i++) { + GET_UTF8(code, *p++, continue;); + + /* skip the \n in the sequence \r\n */ + if (prev_code == '\r' && code == '\n') + continue; + + prev_code = code; + if (is_newline(code)) { + max_text_line_w = FFMAX(max_text_line_w, x); + y += dtext->max_glyph_h; + x = 0; + continue; + } + + /* get glyph */ + prev_glyph = glyph; + dummy.code = code; + glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL); + + /* kerning */ + if (dtext->use_kerning && prev_glyph && glyph->code) { + FT_Get_Kerning(dtext->face, prev_glyph->code, glyph->code, + ft_kerning_default, &delta); + x += delta.x >> 6; + } + + /* save position */ + dtext->positions[i].x = x + glyph->bitmap_left; + dtext->positions[i].y = y - glyph->bitmap_top + y_max; + if (code == '\t') x = (x / dtext->tabsize + 1)*dtext->tabsize; + else x += glyph->advance; + } + + max_text_line_w = FFMAX(x, max_text_line_w); + + dtext->var_values[VAR_TW] = dtext->var_values[VAR_TEXT_W] = max_text_line_w; + dtext->var_values[VAR_TH] = dtext->var_values[VAR_TEXT_H] = y + dtext->max_glyph_h; + + dtext->var_values[VAR_MAX_GLYPH_W] = dtext->max_glyph_w; + dtext->var_values[VAR_MAX_GLYPH_H] = dtext->max_glyph_h; + dtext->var_values[VAR_MAX_GLYPH_A] = dtext->var_values[VAR_ASCENT ] = y_max; + dtext->var_values[VAR_MAX_GLYPH_D] = dtext->var_values[VAR_DESCENT] = y_min; + + dtext->var_values[VAR_LINE_H] = dtext->var_values[VAR_LH] = dtext->max_glyph_h; + + dtext->x = dtext->var_values[VAR_X] = av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng); + dtext->y = dtext->var_values[VAR_Y] = av_expr_eval(dtext->y_pexpr, dtext->var_values, &dtext->prng); + dtext->x = dtext->var_values[VAR_X] = av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng); + dtext->draw = av_expr_eval(dtext->draw_pexpr, dtext->var_values, &dtext->prng); + + if(!dtext->draw) + return 0; + + box_w = FFMIN(width - 1 , max_text_line_w); + box_h = FFMIN(height - 1, y + dtext->max_glyph_h); /* draw box */ if (dtext->draw_box) - drawbox(picref, dtext->x, dtext->y, dtext->w, dtext->h, - dtext->box_line, dtext->pixel_step, dtext->boxcolor, - dtext->hsub, dtext->vsub, dtext->is_packed_rgb, - dtext->rgba_map); + ff_blend_rectangle(&dtext->dc, &dtext->boxcolor, + picref->data, picref->linesize, width, height, + dtext->x, dtext->y, box_w, box_h); if (dtext->shadowx || dtext->shadowy) { - if ((ret = draw_glyphs(dtext, picref, width, height, - dtext->shadowcolor_rgba, - dtext->shadowcolor, - dtext->x + dtext->shadowx, - dtext->y + dtext->shadowy)) < 0) + if ((ret = draw_glyphs(dtext, picref, width, height, dtext->shadowcolor.rgba, + &dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0) return ret; } - if ((ret = draw_glyphs(dtext, picref, width, height, - dtext->fontcolor_rgba, - dtext->fontcolor, - dtext->x, - dtext->y)) < 0) + if ((ret = draw_glyphs(dtext, picref, width, height, dtext->fontcolor.rgba, + &dtext->fontcolor, 0, 0)) < 0) return ret; return 0; } -static inline int normalize_double(int *n, double d) -{ - int ret = 0; - - if (isnan(d)) { - ret = AVERROR(EINVAL); - } else if (d > INT_MAX || d < INT_MIN) { - *n = d > INT_MAX ? INT_MAX : INT_MIN; - ret = AVERROR(EINVAL); - } else - *n = round(d); - - return ret; -} - static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) { AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; DrawTextContext *dtext = ctx->priv; - int ret = 0; + int ret; - if ((ret = dtext_prepare_text(ctx)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Can't draw text\n"); - avfilter_unref_bufferp(&frame); - return ret; - } + if (dtext->reload) + if ((ret = load_textfile(ctx)) < 0) + return ret; dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? NAN : frame->pts * av_q2d(inlink->time_base); - dtext->var_values[VAR_X] = - av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng); - dtext->var_values[VAR_Y] = - av_expr_eval(dtext->y_pexpr, dtext->var_values, &dtext->prng); - dtext->var_values[VAR_X] = - av_expr_eval(dtext->x_pexpr, dtext->var_values, &dtext->prng); - - dtext->draw = av_expr_eval(dtext->d_pexpr, dtext->var_values, &dtext->prng); - - normalize_double(&dtext->x, dtext->var_values[VAR_X]); - normalize_double(&dtext->y, dtext->var_values[VAR_Y]); - - if (dtext->fix_bounds) { - if (dtext->x < 0) dtext->x = 0; - if (dtext->y < 0) dtext->y = 0; - if ((unsigned)dtext->x + (unsigned)dtext->w > inlink->w) - dtext->x = inlink->w - dtext->w; - if ((unsigned)dtext->y + (unsigned)dtext->h > inlink->h) - dtext->y = inlink->h - dtext->h; - } - - dtext->x &= ~((1 << dtext->hsub) - 1); - dtext->y &= ~((1 << dtext->vsub) - 1); - av_dlog(ctx, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n", - (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T], - dtext->x, dtext->y, dtext->x+dtext->w, dtext->y+dtext->h); + draw_text(ctx, frame, frame->video->w, frame->video->h); - if (dtext->draw) - draw_text(inlink->dst, frame, frame->video->w, frame->video->h); + av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n", + (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T], + (int)dtext->var_values[VAR_TEXT_W], (int)dtext->var_values[VAR_TEXT_H], + dtext->x, dtext->y); dtext->var_values[VAR_N] += 1.0; - return ff_filter_frame(inlink->dst->outputs[0], frame); + return ff_filter_frame(outlink, frame); } static const AVFilterPad avfilter_vf_drawtext_inputs[] = { @@ -864,7 +1013,6 @@ static const AVFilterPad avfilter_vf_drawtext_inputs[] = { .config_props = config_input, .min_perms = AV_PERM_WRITE | AV_PERM_READ, - .rej_perms = AV_PERM_PRESERVE }, { NULL } }; @@ -887,4 +1035,6 @@ AVFilter avfilter_vf_drawtext = { .inputs = avfilter_vf_drawtext_inputs, .outputs = avfilter_vf_drawtext_outputs, + .process_command = command, + .priv_class = &drawtext_class, }; diff --git a/libavfilter/vf_edgedetect.c b/libavfilter/vf_edgedetect.c new file mode 100644 index 0000000..5837ccc --- /dev/null +++ b/libavfilter/vf_edgedetect.c @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2012 Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Edge detection filter + * + * @see https://en.wikipedia.org/wiki/Canny_edge_detector + */ + +#include "libavutil/opt.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +typedef struct { + const AVClass *class; + uint8_t *tmpbuf; + uint16_t *gradients; + char *directions; + double low, high; + uint8_t low_u8, high_u8; +} EdgeDetectContext; + +#define OFFSET(x) offsetof(EdgeDetectContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +static const AVOption edgedetect_options[] = { + { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS }, + { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(edgedetect); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + int ret; + EdgeDetectContext *edgedetect = ctx->priv; + + edgedetect->class = &edgedetect_class; + av_opt_set_defaults(edgedetect); + + if ((ret = av_set_options_string(edgedetect, args, "=", ":")) < 0) + return ret; + + edgedetect->low_u8 = edgedetect->low * 255. + .5; + edgedetect->high_u8 = edgedetect->high * 255. + .5; + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE}; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + EdgeDetectContext *edgedetect = ctx->priv; + + edgedetect->tmpbuf = av_malloc(inlink->w * inlink->h); + edgedetect->gradients = av_calloc(inlink->w * inlink->h, sizeof(*edgedetect->gradients)); + edgedetect->directions = av_malloc(inlink->w * inlink->h); + if (!edgedetect->tmpbuf || !edgedetect->gradients || !edgedetect->directions) + return AVERROR(ENOMEM); + return 0; +} + +static void gaussian_blur(AVFilterContext *ctx, int w, int h, + uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize) +{ + int i, j; + + memcpy(dst, src, w); dst += dst_linesize; src += src_linesize; + memcpy(dst, src, w); dst += dst_linesize; src += src_linesize; + for (j = 2; j < h - 2; j++) { + dst[0] = src[0]; + dst[1] = src[1]; + for (i = 2; i < w - 2; i++) { + /* Gaussian mask of size 5x5 with sigma = 1.4 */ + dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2 + + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4 + + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5 + + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4 + + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2 + + + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4 + + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9 + + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12 + + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9 + + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4 + + + src[i-2] * 5 + + src[i-1] * 12 + + src[i ] * 15 + + src[i+1] * 12 + + src[i+2] * 5) / 159; + } + dst[i ] = src[i ]; + dst[i + 1] = src[i + 1]; + + dst += dst_linesize; + src += src_linesize; + } + memcpy(dst, src, w); dst += dst_linesize; src += src_linesize; + memcpy(dst, src, w); +} + +enum { + DIRECTION_45UP, + DIRECTION_45DOWN, + DIRECTION_HORIZONTAL, + DIRECTION_VERTICAL, +}; + +static int get_rounded_direction(int gx, int gy) +{ + /* reference angles: + * tan( pi/8) = sqrt(2)-1 + * tan(3pi/8) = sqrt(2)+1 + * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against + * <ref-angle>, or more simply Gy against <ref-angle>*Gx + * + * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic: + * round((sqrt(2)-1) * (1<<16)) = 27146 + * round((sqrt(2)+1) * (1<<16)) = 158218 + */ + if (gx) { + int tanpi8gx, tan3pi8gx; + + if (gx < 0) + gx = -gx, gy = -gy; + gy <<= 16; + tanpi8gx = 27146 * gx; + tan3pi8gx = 158218 * gx; + if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP; + if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL; + if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN; + } + return DIRECTION_VERTICAL; +} + +static void sobel(AVFilterContext *ctx, int w, int h, + uint16_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize) +{ + int i, j; + EdgeDetectContext *edgedetect = ctx->priv; + + for (j = 1; j < h - 1; j++) { + dst += dst_linesize; + src += src_linesize; + for (i = 1; i < w - 1; i++) { + const int gx = + -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1] + -2*src[ i-1] + 2*src[ i+1] + -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1]; + const int gy = + -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1] + -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ] + -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1]; + + dst[i] = FFABS(gx) + FFABS(gy); + edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy); + } + } +} + +static void non_maximum_suppression(AVFilterContext *ctx, int w, int h, + uint8_t *dst, int dst_linesize, + const uint16_t *src, int src_linesize) +{ + int i, j; + EdgeDetectContext *edgedetect = ctx->priv; + +#define COPY_MAXIMA(ay, ax, by, bx) do { \ + if (src[i] > src[(ay)*src_linesize + i+(ax)] && \ + src[i] > src[(by)*src_linesize + i+(bx)]) \ + dst[i] = av_clip_uint8(src[i]); \ +} while (0) + + for (j = 1; j < h - 1; j++) { + dst += dst_linesize; + src += src_linesize; + for (i = 1; i < w - 1; i++) { + switch (edgedetect->directions[j*w + i]) { + case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break; + case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break; + case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break; + case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break; + } + } + } +} + +static void double_threshold(AVFilterContext *ctx, int w, int h, + uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize) +{ + int i, j; + EdgeDetectContext *edgedetect = ctx->priv; + const int low = edgedetect->low_u8; + const int high = edgedetect->high_u8; + + for (j = 0; j < h; j++) { + for (i = 0; i < w; i++) { + if (src[i] > high) { + dst[i] = src[i]; + continue; + } + + if ((!i || i == w - 1 || !j || j == h - 1) && + src[i] > low && + (src[-src_linesize + i-1] > high || + src[-src_linesize + i ] > high || + src[-src_linesize + i+1] > high || + src[ i-1] > high || + src[ i+1] > high || + src[ src_linesize + i-1] > high || + src[ src_linesize + i ] > high || + src[ src_linesize + i+1] > high)) + dst[i] = src[i]; + else + dst[i] = 0; + } + dst += dst_linesize; + src += src_linesize; + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) +{ + AVFilterContext *ctx = inlink->dst; + EdgeDetectContext *edgedetect = ctx->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + uint8_t *tmpbuf = edgedetect->tmpbuf; + uint16_t *gradients = edgedetect->gradients; + AVFilterBufferRef *out; + + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) { + avfilter_unref_bufferp(&in); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(out, in); + + /* gaussian filter to reduce noise */ + gaussian_blur(ctx, inlink->w, inlink->h, + tmpbuf, inlink->w, + in->data[0], in->linesize[0]); + + /* compute the 16-bits gradients and directions for the next step */ + sobel(ctx, inlink->w, inlink->h, + gradients, inlink->w, + tmpbuf, inlink->w); + + /* non_maximum_suppression() will actually keep & clip what's necessary and + * ignore the rest, so we need a clean output buffer */ + memset(tmpbuf, 0, inlink->w * inlink->h); + non_maximum_suppression(ctx, inlink->w, inlink->h, + tmpbuf, inlink->w, + gradients, inlink->w); + + /* keep high values, or low values surrounded by high values */ + double_threshold(ctx, inlink->w, inlink->h, + out->data[0], out->linesize[0], + tmpbuf, inlink->w); + + avfilter_unref_bufferp(&in); + return ff_filter_frame(outlink, out); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + EdgeDetectContext *edgedetect = ctx->priv; + av_freep(&edgedetect->tmpbuf); + av_freep(&edgedetect->gradients); + av_freep(&edgedetect->directions); +} + +static const AVFilterPad edgedetect_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_props, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad edgedetect_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_edgedetect = { + .name = "edgedetect", + .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."), + .priv_size = sizeof(EdgeDetectContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = edgedetect_inputs, + .outputs = edgedetect_outputs, + .priv_class = &edgedetect_class, +}; diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c index f609db1..df22274 100644 --- a/libavfilter/vf_fade.c +++ b/libavfilter/vf_fade.c @@ -2,20 +2,20 @@ * Copyright (c) 2010 Brandon Mintern * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,52 +25,94 @@ * based heavily on vf_negate.c by Bobby Bingham */ +#include "libavutil/avstring.h" #include "libavutil/common.h" +#include "libavutil/eval.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "drawutils.h" +#include "internal.h" #include "formats.h" #include "internal.h" #include "video.h" +#define R 0 +#define G 1 +#define B 2 +#define A 3 + +#define Y 0 +#define U 1 +#define V 2 + typedef struct { + const AVClass *class; int factor, fade_per_frame; - unsigned int frame_index, start_frame, stop_frame; + unsigned int frame_index, start_frame, stop_frame, nb_frames; int hsub, vsub, bpp; + unsigned int black_level, black_level_scaled; + uint8_t is_packed_rgb; + uint8_t rgba_map[4]; + int alpha; + + char *type; } FadeContext; +#define OFFSET(x) offsetof(FadeContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption fade_options[] = { + { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_STRING, {.str = "in" }, CHAR_MIN, CHAR_MAX, FLAGS }, + { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_STRING, {.str = "in" }, CHAR_MIN, CHAR_MAX, FLAGS }, + { "start_frame", "set expression of frame to start fading", OFFSET(start_frame), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, FLAGS }, + { "s", "set expression of frame to start fading", OFFSET(start_frame), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, FLAGS }, + { "nb_frames", "set expression for fade duration in frames", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64 = 25 }, 0, INT_MAX, FLAGS }, + { "n", "set expression for fade duration in frames", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64 = 25 }, 0, INT_MAX, FLAGS }, + { "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(fade); + static av_cold int init(AVFilterContext *ctx, const char *args) { FadeContext *fade = ctx->priv; - unsigned int nb_frames; - char in_out[4]; + static const char *shorthand[] = { "type", "start_frame", "nb_frames", NULL }; + int ret; - if (!args || - sscanf(args, " %3[^:]:%u:%u", in_out, &fade->start_frame, &nb_frames) != 3) { - av_log(ctx, AV_LOG_ERROR, - "Expected 3 arguments '(in|out):#:#':'%s'\n", args); - return AVERROR(EINVAL); - } + fade->class = &fade_class; + av_opt_set_defaults(fade); + + if ((ret = av_opt_set_from_string(fade, args, shorthand, "=", ":")) < 0) + return ret; - nb_frames = nb_frames ? nb_frames : 1; - fade->fade_per_frame = (1 << 16) / nb_frames; - if (!strcmp(in_out, "in")) + fade->fade_per_frame = (1 << 16) / fade->nb_frames; + if (!strcmp(fade->type, "in")) fade->factor = 0; - else if (!strcmp(in_out, "out")) { + else if (!strcmp(fade->type, "out")) { fade->fade_per_frame = -fade->fade_per_frame; fade->factor = (1 << 16); } else { av_log(ctx, AV_LOG_ERROR, - "first argument must be 'in' or 'out':'%s'\n", in_out); + "Type argument must be 'in' or 'out' but '%s' was specified\n", fade->type); return AVERROR(EINVAL); } - fade->stop_frame = fade->start_frame + nb_frames; + fade->stop_frame = fade->start_frame + fade->nb_frames; av_log(ctx, AV_LOG_VERBOSE, - "type:%s start_frame:%d nb_frames:%d\n", - in_out, fade->start_frame, nb_frames); + "type:%s start_frame:%d nb_frames:%d alpha:%d\n", + fade->type, fade->start_frame, fade->nb_frames, fade->alpha); return 0; } +static av_cold void uninit(AVFilterContext *ctx) +{ + FadeContext *fade = ctx->priv; + + av_opt_free(fade); +} + static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { @@ -78,7 +120,10 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P, + AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, + AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE }; @@ -86,6 +131,13 @@ static int query_formats(AVFilterContext *ctx) return 0; } +const static enum AVPixelFormat studio_level_pix_fmts[] = { + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, + AV_PIX_FMT_YUV440P, + AV_PIX_FMT_NONE +}; + static int config_props(AVFilterLink *inlink) { FadeContext *fade = inlink->dst->priv; @@ -95,9 +147,37 @@ static int config_props(AVFilterLink *inlink) fade->vsub = pixdesc->log2_chroma_h; fade->bpp = av_get_bits_per_pixel(pixdesc) >> 3; + fade->alpha &= pixdesc->flags & PIX_FMT_ALPHA; + fade->is_packed_rgb = ff_fill_rgba_map(fade->rgba_map, inlink->format) >= 0; + + /* use CCIR601/709 black level for studio-level pixel non-alpha components */ + fade->black_level = + ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !fade->alpha ? 16 : 0; + /* 32768 = 1 << 15, it is an integer representation + * of 0.5 and is for rounding. */ + fade->black_level_scaled = (fade->black_level << 16) + 32768; return 0; } +static void fade_plane(int y, int h, int w, + int fade_factor, int black_level, int black_level_scaled, + uint8_t offset, uint8_t step, int bytes_per_plane, + uint8_t *data, int line_size) +{ + uint8_t *p; + int i, j; + + /* luma, alpha or rgb plane */ + for (i = 0; i < h; i++) { + p = data + offset + (y+i) * line_size; + for (j = 0; j < w * bytes_per_plane; j++) { + /* fade->factor is using 16 lower-order bits for decimal places. */ + *p = ((*p - black_level) * fade_factor + black_level_scaled) >> 16; + p+=step; + } + } +} + static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) { FadeContext *fade = inlink->dst->priv; @@ -105,29 +185,33 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) int i, j, plane; if (fade->factor < UINT16_MAX) { - /* luma or rgb plane */ - for (i = 0; i < frame->video->h; i++) { - p = frame->data[0] + i * frame->linesize[0]; - for (j = 0; j < inlink->w * fade->bpp; j++) { - /* fade->factor is using 16 lower-order bits for decimal - * places. 32768 = 1 << 15, it is an integer representation - * of 0.5 and is for rounding. */ - *p = (*p * fade->factor + 32768) >> 16; - p++; - } - } - - if (frame->data[1] && frame->data[2]) { - /* chroma planes */ - for (plane = 1; plane < 3; plane++) { - for (i = 0; i < frame->video->h; i++) { - p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane]; - for (j = 0; j < inlink->w >> fade->hsub; j++) { - /* 8421367 = ((128 << 1) + 1) << 15. It is an integer - * representation of 128.5. The .5 is for rounding - * purposes. */ - *p = ((*p - 128) * fade->factor + 8421367) >> 16; - p++; + if (fade->alpha) { + // alpha only + plane = fade->is_packed_rgb ? 0 : A; // alpha is on plane 0 for packed formats + // or plane 3 for planar formats + fade_plane(0, frame->video->h, inlink->w, + fade->factor, fade->black_level, fade->black_level_scaled, + fade->is_packed_rgb ? fade->rgba_map[A] : 0, // alpha offset for packed formats + fade->is_packed_rgb ? 4 : 1, // pixstep for 8 bit packed formats + 1, frame->data[plane], frame->linesize[plane]); + } else { + /* luma or rgb plane */ + fade_plane(0, frame->video->h, inlink->w, + fade->factor, fade->black_level, fade->black_level_scaled, + 0, 1, // offset & pixstep for Y plane or RGB packed format + fade->bpp, frame->data[0], frame->linesize[0]); + if (frame->data[1] && frame->data[2]) { + /* chroma planes */ + for (plane = 1; plane < 3; plane++) { + for (i = 0; i < frame->video->h; i++) { + p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane]; + for (j = 0; j < inlink->w >> fade->hsub; j++) { + /* 8421367 = ((128 << 1) + 1) << 15. It is an integer + * representation of 128.5. The .5 is for rounding + * purposes. */ + *p = ((*p - 128) * fade->factor + 8421367) >> 16; + p++; + } } } } @@ -151,7 +235,6 @@ static const AVFilterPad avfilter_vf_fade_inputs[] = { .get_video_buffer = ff_null_get_video_buffer, .filter_frame = filter_frame, .min_perms = AV_PERM_READ | AV_PERM_WRITE, - .rej_perms = AV_PERM_PRESERVE, }, { NULL } }; @@ -166,11 +249,13 @@ static const AVFilterPad avfilter_vf_fade_outputs[] = { AVFilter avfilter_vf_fade = { .name = "fade", - .description = NULL_IF_CONFIG_SMALL("Fade in/out input video"), + .description = NULL_IF_CONFIG_SMALL("Fade in/out input video."), .init = init, + .uninit = uninit, .priv_size = sizeof(FadeContext), .query_formats = query_formats, .inputs = avfilter_vf_fade_inputs, .outputs = avfilter_vf_fade_outputs, + .priv_class = &fade_class, }; diff --git a/libavfilter/vf_field.c b/libavfilter/vf_field.c new file mode 100644 index 0000000..67c0025 --- /dev/null +++ b/libavfilter/vf_field.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2003 Rich Felker + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * field filter, based on libmpcodecs/vf_field.c by Rich Felker + */ + +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" +#include "internal.h" + +enum FieldType { FIELD_TYPE_TOP = 0, FIELD_TYPE_BOTTOM }; + +typedef struct { + const AVClass *class; + enum FieldType type; + int nb_planes; ///< number of planes of the current format +} FieldContext; + +#define OFFSET(x) offsetof(FieldContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption field_options[] = { + {"type", "set field type (top or bottom)", OFFSET(type), AV_OPT_TYPE_INT, {.i64=FIELD_TYPE_TOP}, 0, 1, FLAGS, "field_type" }, + {"top", "select top field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_TOP}, INT_MIN, INT_MAX, FLAGS, "field_type"}, + {"bottom", "select bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field_type"}, + + {NULL} +}; + +AVFILTER_DEFINE_CLASS(field); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + FieldContext *field = ctx->priv; + static const char *shorthand[] = { "type", NULL }; + + field->class = &field_class; + av_opt_set_defaults(field); + + return av_opt_set_from_string(field, args, shorthand, "=", ":"); +} + +static int config_props_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + FieldContext *field = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + int i; + + for (i = 0; i < desc->nb_components; i++) + field->nb_planes = FFMAX(field->nb_planes, desc->comp[i].plane); + field->nb_planes++; + + outlink->w = inlink->w; + outlink->h = (inlink->h + (field->type == FIELD_TYPE_TOP)) / 2; + + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d type:%s -> w:%d h:%d\n", + inlink->w, inlink->h, field->type == FIELD_TYPE_BOTTOM ? "bottom" : "top", + outlink->w, outlink->h); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) +{ + FieldContext *field = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + int i; + + inpicref->video->h = outlink->h; + inpicref->video->interlaced = 0; + + for (i = 0; i < field->nb_planes; i++) { + if (field->type == FIELD_TYPE_BOTTOM) + inpicref->data[i] = inpicref->data[i] + inpicref->linesize[i]; + inpicref->linesize[i] = 2 * inpicref->linesize[i]; + } + return ff_filter_frame(outlink, inpicref); +} + +static const AVFilterPad field_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad field_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_props_output, + }, + { NULL } +}; + +AVFilter avfilter_vf_field = { + .name = "field", + .description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."), + + .priv_size = sizeof(FieldContext), + .init = init, + + .inputs = field_inputs, + .outputs = field_outputs, + .priv_class = &field_class, +}; diff --git a/libavfilter/vf_fieldorder.c b/libavfilter/vf_fieldorder.c index 5f0cc3b..06e0369 100644 --- a/libavfilter/vf_fieldorder.c +++ b/libavfilter/vf_fieldorder.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2011 Mark Himsley * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -185,7 +185,6 @@ static const AVFilterPad avfilter_vf_fieldorder_inputs[] = { .get_video_buffer = get_video_buffer, .filter_frame = filter_frame, .min_perms = AV_PERM_READ | AV_PERM_WRITE, - .rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE, }, { NULL } }; diff --git a/libavfilter/vf_format.c b/libavfilter/vf_format.c index 7e4a26e..df3c77a 100644 --- a/libavfilter/vf_format.c +++ b/libavfilter/vf_format.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -29,6 +29,7 @@ #include "libavutil/mem.h" #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "internal.h" #include "formats.h" #include "internal.h" #include "video.h" @@ -48,7 +49,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) FormatContext *format = ctx->priv; const char *cur, *sep; char pix_fmt_name[AV_PIX_FMT_NAME_MAXSIZE]; - int pix_fmt_name_len; + int pix_fmt_name_len, ret; enum AVPixelFormat pix_fmt; /* parse the list of formats */ @@ -64,12 +65,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args) memcpy(pix_fmt_name, cur, pix_fmt_name_len); pix_fmt_name[pix_fmt_name_len] = 0; - pix_fmt = av_get_pix_fmt(pix_fmt_name); - if (pix_fmt == AV_PIX_FMT_NONE) { - av_log(ctx, AV_LOG_ERROR, "Unknown pixel format: %s\n", pix_fmt_name); - return -1; - } + if ((ret = ff_parse_pixel_format(&pix_fmt, pix_fmt_name, ctx)) < 0) + return ret; format->listed_pix_fmt_flags[pix_fmt] = 1; } diff --git a/libavfilter/vf_fps.c b/libavfilter/vf_fps.c index 8fd51bd..29eedc7 100644 --- a/libavfilter/vf_fps.c +++ b/libavfilter/vf_fps.c @@ -1,18 +1,22 @@ /* - * This file is part of Libav. + * Copyright 2007 Bobby Bingham + * Copyright 2012 Robert Nagy <ronag89 gmail com> + * Copyright 2012 Anton Khirnov <anton khirnov net> * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -42,6 +46,7 @@ typedef struct FPSContext { AVRational framerate; ///< target framerate char *fps; ///< a string describing target framerate + int rounding; ///< AVRounding method for timestamps /* statistics */ int frames_in; ///< number of frames on input @@ -52,31 +57,31 @@ typedef struct FPSContext { #define OFFSET(x) offsetof(FPSContext, x) #define V AV_OPT_FLAG_VIDEO_PARAM -static const AVOption options[] = { - { "fps", "A string describing desired output framerate", OFFSET(fps), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = V }, +#define F AV_OPT_FLAG_FILTERING_PARAM +static const AVOption fps_options[] = { + { "fps", "A string describing desired output framerate", OFFSET(fps), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = V|F }, + { "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" }, + { "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" }, + { "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" }, + { "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" }, + { "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" }, + { "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" }, { NULL }, }; -static const AVClass class = { - .class_name = "FPS filter", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(fps); static av_cold int init(AVFilterContext *ctx, const char *args) { FPSContext *s = ctx->priv; + const char *shorthand[] = { "fps", "round", NULL }; int ret; - s->class = &class; + s->class = &fps_class; av_opt_set_defaults(s); - if ((ret = av_set_options_string(s, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing the options string %s.\n", - args); + if ((ret = av_opt_set_from_string(s, args, shorthand, "=", ":")) < 0) return ret; - } if ((ret = av_parse_video_rate(&s->framerate, s->fps)) < 0) { av_log(ctx, AV_LOG_ERROR, "Error parsing framerate %s.\n", s->fps); @@ -117,7 +122,8 @@ static int config_props(AVFilterLink* link) { FPSContext *s = link->src->priv; - link->time_base = (AVRational){ s->framerate.den, s->framerate.num }; + link->time_base = av_inv_q(s->framerate); + link->frame_rate= s->framerate; link->w = link->src->inputs[0]->w; link->h = link->src->inputs[0]->h; s->pts = AV_NOPTS_VALUE; @@ -202,8 +208,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) } /* number of output frames */ - delta = av_rescale_q(buf->pts - s->pts, inlink->time_base, - outlink->time_base); + delta = av_rescale_q_rnd(buf->pts - s->pts, inlink->time_base, + outlink->time_base, s->rounding); if (delta < 1) { /* drop the frame and everything buffered except the first */ @@ -228,7 +234,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) /* duplicate the frame if needed */ if (!av_fifo_size(s->fifo) && i < delta - 1) { - AVFilterBufferRef *dup = avfilter_ref_buffer(buf_out, AV_PERM_READ); + AVFilterBufferRef *dup = avfilter_ref_buffer(buf_out, ~0); av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); if (dup) @@ -267,6 +273,7 @@ static const AVFilterPad avfilter_vf_fps_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, .filter_frame = filter_frame, }, { NULL } @@ -276,6 +283,7 @@ static const AVFilterPad avfilter_vf_fps_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, + .rej_perms = AV_PERM_WRITE, .request_frame = request_frame, .config_props = config_props }, @@ -293,4 +301,5 @@ AVFilter avfilter_vf_fps = { .inputs = avfilter_vf_fps_inputs, .outputs = avfilter_vf_fps_outputs, + .priv_class = &fps_class, }; diff --git a/libavfilter/vf_framestep.c b/libavfilter/vf_framestep.c new file mode 100644 index 0000000..f848196 --- /dev/null +++ b/libavfilter/vf_framestep.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file framestep filter, inspired on libmpcodecs/vf_framestep.c by + * Daniele Fornighieri <guru AT digitalfantasy it>. + */ + +#include "avfilter.h" +#include "internal.h" +#include "video.h" + +typedef struct { + int frame_step, frame_count, frame_selected; +} FrameStepContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + FrameStepContext *framestep = ctx->priv; + char *tailptr; + long int n = 1; + + if (args) { + n = strtol(args, &tailptr, 10); + if (*tailptr || n <= 0 || n >= INT_MAX) { + av_log(ctx, AV_LOG_ERROR, + "Invalid argument '%s', must be a positive integer <= INT_MAX\n", args); + return AVERROR(EINVAL); + } + } + + framestep->frame_step = n; + return 0; +} + +static int config_output_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + FrameStepContext *framestep = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + + outlink->frame_rate = + av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1}); + + av_log(ctx, AV_LOG_VERBOSE, "step:%d frame_rate:%d/%d(%f) -> frame_rate:%d/%d(%f)\n", + framestep->frame_step, + inlink->frame_rate.num, inlink->frame_rate.den, av_q2d(inlink->frame_rate), + outlink->frame_rate.num, outlink->frame_rate.den, av_q2d(outlink->frame_rate)); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) +{ + FrameStepContext *framestep = inlink->dst->priv; + + if (!(framestep->frame_count++ % framestep->frame_step)) { + framestep->frame_selected = 1; + return ff_filter_frame(inlink->dst->outputs[0], ref); + } else { + framestep->frame_selected = 0; + avfilter_unref_buffer(ref); + return 0; + } +} + +static int request_frame(AVFilterLink *outlink) +{ + FrameStepContext *framestep = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + int ret; + + framestep->frame_selected = 0; + do { + ret = ff_request_frame(inlink); + } while (!framestep->frame_selected && ret >= 0); + + return ret; +} + +static const AVFilterPad framestep_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad framestep_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_framestep = { + .name = "framestep", + .description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."), + .init = init, + .priv_size = sizeof(FrameStepContext), + .inputs = framestep_inputs, + .outputs = framestep_outputs, +}; diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c index 955d0b9..7ed78fa 100644 --- a/libavfilter/vf_frei0r.c +++ b/libavfilter/vf_frei0r.c @@ -1,19 +1,19 @@ /* * Copyright (c) 2010 Stefano Sabatini - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -204,13 +204,15 @@ static int set_params(AVFilterContext *ctx, const char *params) return 0; } -static void *load_path(AVFilterContext *ctx, const char *prefix, const char *name) +static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix, const char *name) { - char path[1024]; - - snprintf(path, sizeof(path), "%s%s%s", prefix, name, SLIBSUF); + char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF); + if (!path) + return AVERROR(ENOMEM); av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'\n", path); - return dlopen(path, RTLD_NOW|RTLD_LOCAL); + *handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL); + av_free(path); + return 0; } static av_cold int frei0r_init(AVFilterContext *ctx, @@ -221,24 +223,55 @@ static av_cold int frei0r_init(AVFilterContext *ctx, f0r_get_plugin_info_f f0r_get_plugin_info; f0r_plugin_info_t *pi; char *path; + int ret = 0; - /* see: http://piksel.org/frei0r/1.2/spec/1.2/spec/group__pluglocations.html */ + /* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */ if ((path = av_strdup(getenv("FREI0R_PATH")))) { +#ifdef _WIN32 + const char *separator = ";"; +#else + const char *separator = ":"; +#endif char *p, *ptr = NULL; - for (p = path; p = strtok_r(p, ":", &ptr); p = NULL) - if (frei0r->dl_handle = load_path(ctx, p, dl_name)) + for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) { + /* add additional trailing slash in case it is missing */ + char *p1 = av_asprintf("%s/", p); + if (!p1) { + ret = AVERROR(ENOMEM); + goto check_path_end; + } + ret = load_path(ctx, &frei0r->dl_handle, p1, dl_name); + av_free(p1); + if (ret < 0) + goto check_path_end; + if (frei0r->dl_handle) break; + } + + check_path_end: av_free(path); + if (ret < 0) + return ret; } if (!frei0r->dl_handle && (path = getenv("HOME"))) { - char prefix[1024]; - snprintf(prefix, sizeof(prefix), "%s/.frei0r-1/lib/", path); - frei0r->dl_handle = load_path(ctx, prefix, dl_name); + char *prefix = av_asprintf("%s/.frei0r-1/lib/", path); + if (!prefix) + return AVERROR(ENOMEM); + ret = load_path(ctx, &frei0r->dl_handle, prefix, dl_name); + av_free(prefix); + if (ret < 0) + return ret; + } + if (!frei0r->dl_handle) { + ret = load_path(ctx, &frei0r->dl_handle, "/usr/local/lib/frei0r-1/", dl_name); + if (ret < 0) + return ret; + } + if (!frei0r->dl_handle) { + ret = load_path(ctx, &frei0r->dl_handle, "/usr/lib/frei0r-1/", dl_name); + if (ret < 0) + return ret; } - if (!frei0r->dl_handle) - frei0r->dl_handle = load_path(ctx, "/usr/local/lib/frei0r-1/", dl_name); - if (!frei0r->dl_handle) - frei0r->dl_handle = load_path(ctx, "/usr/lib/frei0r-1/", dl_name); if (!frei0r->dl_handle) { av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'\n", dl_name); return AVERROR(EINVAL); @@ -256,7 +289,7 @@ static av_cold int frei0r_init(AVFilterContext *ctx, return AVERROR(EINVAL); if (f0r_init() < 0) { - av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module"); + av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module\n"); return AVERROR(EINVAL); } @@ -316,7 +349,7 @@ static int config_input_props(AVFilterLink *inlink) Frei0rContext *frei0r = ctx->priv; if (!(frei0r->instance = frei0r->construct(inlink->w, inlink->h))) { - av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance"); + av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n"); return AVERROR(EINVAL); } @@ -421,8 +454,7 @@ static av_cold int source_init(AVFilterContext *ctx, const char *args) return AVERROR(EINVAL); } - if (av_parse_video_rate(&frame_rate_q, frame_rate) < 0 || - frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { + if (av_parse_video_rate(&frame_rate_q, frame_rate) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", frame_rate); return AVERROR(EINVAL); } @@ -442,9 +474,10 @@ static int source_config_props(AVFilterLink *outlink) outlink->w = frei0r->w; outlink->h = frei0r->h; outlink->time_base = frei0r->time_base; + outlink->sample_aspect_ratio = (AVRational){1,1}; if (!(frei0r->instance = frei0r->construct(outlink->w, outlink->h))) { - av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance"); + av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n"); return AVERROR(EINVAL); } @@ -459,7 +492,7 @@ static int source_request_frame(AVFilterLink *outlink) if (!picref) return AVERROR(ENOMEM); - picref->video->pixel_aspect = (AVRational) {1, 1}; + picref->video->sample_aspect_ratio = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; diff --git a/libavfilter/vf_geq.c b/libavfilter/vf_geq.c new file mode 100644 index 0000000..2f7b50b --- /dev/null +++ b/libavfilter/vf_geq.c @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2012 Clément Bœsch <ubitux@gmail.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Generic equation change filter + * Originally written by Michael Niedermayer for the MPlayer project, and + * ported by Clément Bœsch for FFmpeg. + */ + +#include "libavutil/avstring.h" +#include "libavutil/eval.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + AVExpr *e[3]; ///< expressions for each plane + char *expr_str[3]; ///< expression strings for each plane + int framenum; ///< frame counter + AVFilterBufferRef *picref; ///< current input buffer + int hsub, vsub; ///< chroma subsampling +} GEQContext; + +#define OFFSET(x) offsetof(GEQContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption geq_options[] = { + { "lum_expr", "set luminance expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "cb_expr", "set chroma blue expression", OFFSET(expr_str) + sizeof(char*), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "cr_expr", "set chroma red expression", OFFSET(expr_str) + 2*sizeof(char*), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(geq); + +static inline double getpix(void *priv, double x, double y, int plane) +{ + int xi, yi; + GEQContext *geq = priv; + AVFilterBufferRef *picref = geq->picref; + const uint8_t *src = picref->data[plane]; + const int linesize = picref->linesize[plane]; + const int w = picref->video->w >> (plane ? geq->hsub : 0); + const int h = picref->video->h >> (plane ? geq->vsub : 0); + + xi = x = av_clipf(x, 0, w - 2); + yi = y = av_clipf(y, 0, h - 2); + + x -= xi; + y -= yi; + + return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize]) + + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]); +} + +//TODO: cubic interpolate +//TODO: keep the last few frames +static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); } +static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); } +static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); } + +static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL }; +enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_SW, VAR_SH, VAR_T, VAR_VARS_NB }; + +static av_cold int geq_init(AVFilterContext *ctx, const char *args) +{ + GEQContext *geq = ctx->priv; + int plane, ret = 0; + static const char *shorthand[] = { "lum_expr", "cb_expr", "cr_expr", NULL }; + + geq->class = &geq_class; + av_opt_set_defaults(geq); + + if ((ret = av_opt_set_from_string(geq, args, shorthand, "=", ":")) < 0) + return ret; + + if (!geq->expr_str[0]) { + av_log(ctx, AV_LOG_ERROR, "Luminance expression is mandatory\n"); + ret = AVERROR(EINVAL); + goto end; + } + + if (!geq->expr_str[1] && !geq->expr_str[2]) { + /* No chroma at all: fallback on luma */ + geq->expr_str[1] = av_strdup(geq->expr_str[0]); + geq->expr_str[2] = av_strdup(geq->expr_str[0]); + } else { + /* One chroma unspecified, fallback on the other */ + if (!geq->expr_str[1]) geq->expr_str[1] = av_strdup(geq->expr_str[2]); + if (!geq->expr_str[2]) geq->expr_str[2] = av_strdup(geq->expr_str[1]); + } + + if (!geq->expr_str[1] || !geq->expr_str[2]) { + ret = AVERROR(ENOMEM); + goto end; + } + + for (plane = 0; plane < 3; plane++) { + static double (*p[])(void *, double, double) = { lum, cb, cr }; + static const char *const func2_names[] = { "lum", "cb", "cr", "p", NULL }; + double (*func2[])(void *, double, double) = { lum, cb, cr, p[plane], NULL }; + + ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane], var_names, + NULL, NULL, func2_names, func2, 0, ctx); + if (ret < 0) + break; + } + +end: + return ret; +} + +static int geq_query_formats(AVFilterContext *ctx) +{ + static const enum PixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_NONE + }; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int geq_config_props(AVFilterLink *inlink) +{ + GEQContext *geq = inlink->dst->priv; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + + geq->hsub = desc->log2_chroma_w; + geq->vsub = desc->log2_chroma_h; + return 0; +} + +static int geq_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) +{ + int plane; + GEQContext *geq = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *out; + double values[VAR_VARS_NB] = { + [VAR_N] = geq->framenum++, + [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base), + }; + + geq->picref = in; + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) { + avfilter_unref_bufferp(&in); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(out, in); + + for (plane = 0; plane < 3; plane++) { + int x, y; + uint8_t *dst = out->data[plane]; + const int linesize = out->linesize[plane]; + const int w = inlink->w >> (plane ? geq->hsub : 0); + const int h = inlink->h >> (plane ? geq->vsub : 0); + + values[VAR_W] = w; + values[VAR_H] = h; + values[VAR_SW] = w / (double)inlink->w; + values[VAR_SH] = h / (double)inlink->h; + + for (y = 0; y < h; y++) { + values[VAR_Y] = y; + for (x = 0; x < w; x++) { + values[VAR_X] = x; + dst[x] = av_expr_eval(geq->e[plane], values, geq); + } + dst += linesize; + } + } + + avfilter_unref_bufferp(&geq->picref); + return ff_filter_frame(outlink, out); +} + +static av_cold void geq_uninit(AVFilterContext *ctx) +{ + int i; + GEQContext *geq = ctx->priv; + + for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++) + av_expr_free(geq->e[i]); + av_opt_free(geq); +} + +static const AVFilterPad geq_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = geq_config_props, + .filter_frame = geq_filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad geq_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_geq = { + .name = "geq", + .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."), + .priv_size = sizeof(GEQContext), + .init = geq_init, + .uninit = geq_uninit, + .query_formats = geq_query_formats, + .inputs = geq_inputs, + .outputs = geq_outputs, + .priv_class = &geq_class, +}; diff --git a/libavfilter/vf_gradfun.c b/libavfilter/vf_gradfun.c index 2c9a976..13154f0 100644 --- a/libavfilter/vf_gradfun.c +++ b/libavfilter/vf_gradfun.c @@ -2,20 +2,20 @@ * Copyright (c) 2010 Nolan Lum <nol888@gmail.com> * Copyright (c) 2009 Loren Merritt <lorenm@u.washignton.edu> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -36,12 +36,24 @@ #include "libavutil/common.h" #include "libavutil/cpu.h" #include "libavutil/pixdesc.h" +#include "libavutil/opt.h" #include "avfilter.h" #include "formats.h" #include "gradfun.h" #include "internal.h" #include "video.h" +#define OFFSET(x) offsetof(GradFunContext, x) +#define F AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption gradfun_options[] = { + { "strength", "set the maximum amount by which the filter will change any one pixel", OFFSET(strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.2}, 0.51, 64, F }, + { "radius", "set the neighborhood to fit the gradient to", OFFSET(radius), AV_OPT_TYPE_INT, {.i64 = 16}, 4, 32, F }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(gradfun); + DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = { {0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E}, {0x40,0x20,0x58,0x38,0x46,0x26,0x5E,0x3E}, @@ -53,10 +65,10 @@ DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = { {0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A}, }; -void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers) +void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers) { int x; - for (x = 0; x < width; x++, dc += x & 1) { + for (x = 0; x < width; dc += x & 1, x++) { int pix = src[x] << 7; int delta = dc[0] - pix; int m = abs(delta) * thresh >> 16; @@ -67,7 +79,7 @@ void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int widt } } -void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width) +void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width) { int x, v, old; for (x = 0; x < width; x++) { @@ -78,7 +90,7 @@ void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t } } -static void filter(GradFunContext *ctx, uint8_t *dst, uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r) +static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r) { int bstride = FFALIGN(width, 16) / 2; int y; @@ -121,24 +133,26 @@ static void filter(GradFunContext *ctx, uint8_t *dst, uint8_t *src, int width, i static av_cold int init(AVFilterContext *ctx, const char *args) { + int ret; GradFunContext *gf = ctx->priv; - float thresh = 1.2; - int radius = 16; + static const char *shorthand[] = { "strength", "radius", NULL }; + + gf->class = &gradfun_class; + av_opt_set_defaults(gf); - if (args) - sscanf(args, "%f:%d", &thresh, &radius); + if ((ret = av_opt_set_from_string(gf, args, shorthand, "=", ":")) < 0) + return ret; - thresh = av_clipf(thresh, 0.51, 255); - gf->thresh = (1 << 15) / thresh; - gf->radius = av_clip((radius + 1) & ~1, 4, 32); + gf->thresh = (1 << 15) / gf->strength; + gf->radius = av_clip((gf->radius + 1) & ~1, 4, 32); - gf->blur_line = ff_gradfun_blur_line_c; + gf->blur_line = ff_gradfun_blur_line_c; gf->filter_line = ff_gradfun_filter_line_c; if (ARCH_X86) ff_gradfun_init_x86(gf); - av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", thresh, gf->radius); + av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", gf->strength, gf->radius); return 0; } @@ -156,6 +170,7 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12, AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P, + AV_PIX_FMT_YUV440P, AV_PIX_FMT_NONE }; @@ -187,9 +202,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) GradFunContext *gf = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *out; - int p, direct; + int p, direct = 0; - if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { + if (in->perms & AV_PERM_WRITE) { direct = 1; out = in; } else { @@ -198,10 +213,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); } - avfilter_copy_buffer_ref_props(out, in); - out->video->w = outlink->w; - out->video->h = outlink->h; } for (p = 0; p < 4 && in->data[p]; p++) { @@ -252,7 +264,7 @@ AVFilter avfilter_vf_gradfun = { .init = init, .uninit = uninit, .query_formats = query_formats, - - .inputs = avfilter_vf_gradfun_inputs, - .outputs = avfilter_vf_gradfun_outputs, + .inputs = avfilter_vf_gradfun_inputs, + .outputs = avfilter_vf_gradfun_outputs, + .priv_class = &gradfun_class, }; diff --git a/libavfilter/vf_hflip.c b/libavfilter/vf_hflip.c index 85a1d92..c583ffd 100644 --- a/libavfilter/vf_hflip.c +++ b/libavfilter/vf_hflip.c @@ -2,20 +2,20 @@ * Copyright (c) 2007 Benoit Fouet * Copyright (c) 2010 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -50,8 +50,10 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE, AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE, + AV_PIX_FMT_RGB444BE, AV_PIX_FMT_RGB444LE, AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE, AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE, + AV_PIX_FMT_BGR444BE, AV_PIX_FMT_BGR444LE, AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE, @@ -100,6 +102,10 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) } avfilter_copy_buffer_ref_props(out, in); + /* copy palette if required */ + if (av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_PAL) + memcpy(out->data[1], in->data[1], AVPALETTE_SIZE); + for (plane = 0; plane < 4 && in->data[plane]; plane++) { step = flip->max_step[plane]; hsub = (plane == 1 || plane == 2) ? flip->hsub : 0; diff --git a/libavfilter/vf_histeq.c b/libavfilter/vf_histeq.c new file mode 100644 index 0000000..556680c --- /dev/null +++ b/libavfilter/vf_histeq.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2012 Jeremy Tran + * Copyright (c) 2001 Donald A. Graft + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Histogram equalization filter, based on the VirtualDub filter by + * Donald A. Graft <neuron2 AT home DOT com>. + * Implements global automatic contrast adjustment by means of + * histogram equalization. + */ + +#include "libavutil/common.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" + +#include "avfilter.h" +#include "drawutils.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +// #define DEBUG + +// Linear Congruential Generator, see "Numerical Recipes" +#define LCG_A 4096 +#define LCG_C 150889 +#define LCG_M 714025 +#define LCG(x) (((x) * LCG_A + LCG_C) % LCG_M) +#define LCG_SEED 739187 + +enum HisteqAntibanding { + HISTEQ_ANTIBANDING_NONE = 0, + HISTEQ_ANTIBANDING_WEAK = 1, + HISTEQ_ANTIBANDING_STRONG = 2, + HISTEQ_ANTIBANDING_NB, +}; + +typedef struct { + const AVClass *class; + float strength; + float intensity; + enum HisteqAntibanding antibanding; + char* antibanding_str; + int in_histogram [256]; ///< input histogram + int out_histogram[256]; ///< output histogram + int LUT[256]; ///< lookup table derived from histogram[] + uint8_t rgba_map[4]; ///< components position + int bpp; ///< bytes per pixel +} HisteqContext; + +#define OFFSET(x) offsetof(HisteqContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit } + +static const AVOption histeq_options[] = { + { "strength", "set the strength", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=0.2}, 0, 1, FLAGS }, + { "intensity", "set the intensity", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0.21}, 0, 1, FLAGS }, + { "antibanding", "set the antibanding level", OFFSET(antibanding), AV_OPT_TYPE_INT, {.i64=HISTEQ_ANTIBANDING_NONE}, 0, HISTEQ_ANTIBANDING_NB-1, FLAGS, "antibanding" }, + CONST("none", "apply no antibanding", HISTEQ_ANTIBANDING_NONE, "antibanding"), + CONST("weak", "apply weak antibanding", HISTEQ_ANTIBANDING_WEAK, "antibanding"), + CONST("strong", "apply strong antibanding", HISTEQ_ANTIBANDING_STRONG, "antibanding"), + { NULL } +}; + +AVFILTER_DEFINE_CLASS(histeq); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + HisteqContext *histeq = ctx->priv; + const char *shorthand[] = { "strength", "intensity", "antibanding", NULL }; + int ret; + + histeq->class = &histeq_class; + av_opt_set_defaults(histeq); + + if ((ret = av_opt_set_from_string(histeq, args, shorthand, "=", ":")) < 0) + return ret; + + av_log(ctx, AV_LOG_VERBOSE, + "strength:%0.3f intensity:%0.3f antibanding:%d\n", + histeq->strength, histeq->intensity, histeq->antibanding); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + HisteqContext *histeq = ctx->priv; + av_opt_free(histeq); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum PixelFormat pix_fmts[] = { + AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, + AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + HisteqContext *histeq = ctx->priv; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); + + histeq->bpp = av_get_bits_per_pixel(pix_desc) / 8; + ff_fill_rgba_map(histeq->rgba_map, inlink->format); + + return 0; +} + +#define R 0 +#define G 1 +#define B 2 +#define A 3 + +#define GET_RGB_VALUES(r, g, b, src, map) do { \ + r = src[x + map[R]]; \ + g = src[x + map[G]]; \ + b = src[x + map[B]]; \ +} while (0) + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) +{ + AVFilterContext *ctx = inlink->dst; + HisteqContext *histeq = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + int strength = histeq->strength * 1000; + int intensity = histeq->intensity * 1000; + int x, y, i, luthi, lutlo, lut, luma, oluma, m; + AVFilterBufferRef *outpic; + unsigned int r, g, b, jran; + uint8_t *src, *dst; + + outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); + if (!outpic) { + avfilter_unref_bufferp(&inpic); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outpic, inpic); + + /* Seed random generator for antibanding. */ + jran = LCG_SEED; + + /* Calculate and store the luminance and calculate the global histogram + based on the luminance. */ + memset(histeq->in_histogram, 0, sizeof(histeq->in_histogram)); + src = inpic->data[0]; + dst = outpic->data[0]; + for (y = 0; y < inlink->h; y++) { + for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) { + GET_RGB_VALUES(r, g, b, src, histeq->rgba_map); + luma = (55 * r + 182 * g + 19 * b) >> 8; + dst[x + histeq->rgba_map[A]] = luma; + histeq->in_histogram[luma]++; + } + src += inpic->linesize[0]; + dst += outpic->linesize[0]; + } + +#ifdef DEBUG + for (x = 0; x < 256; x++) + av_dlog(ctx, "in[%d]: %u\n", x, histeq->in_histogram[x]); +#endif + + /* Calculate the lookup table. */ + histeq->LUT[0] = histeq->in_histogram[0]; + /* Accumulate */ + for (x = 1; x < 256; x++) + histeq->LUT[x] = histeq->LUT[x-1] + histeq->in_histogram[x]; + + /* Normalize */ + for (x = 0; x < 256; x++) + histeq->LUT[x] = (histeq->LUT[x] * intensity) / (inlink->h * inlink->w); + + /* Adjust the LUT based on the selected strength. This is an alpha + mix of the calculated LUT and a linear LUT with gain 1. */ + for (x = 0; x < 256; x++) + histeq->LUT[x] = (strength * histeq->LUT[x]) / 255 + + ((255 - strength) * x) / 255; + + /* Output the equalized frame. */ + memset(histeq->out_histogram, 0, sizeof(histeq->out_histogram)); + + src = inpic->data[0]; + dst = outpic->data[0]; + for (y = 0; y < inlink->h; y++) { + for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) { + luma = dst[x + histeq->rgba_map[A]]; + if (luma == 0) { + for (i = 0; i < histeq->bpp; ++i) + dst[x + i] = 0; + histeq->out_histogram[0]++; + } else { + lut = histeq->LUT[luma]; + if (histeq->antibanding != HISTEQ_ANTIBANDING_NONE) { + if (luma > 0) { + lutlo = histeq->antibanding == HISTEQ_ANTIBANDING_WEAK ? + (histeq->LUT[luma] + histeq->LUT[luma - 1]) / 2 : + histeq->LUT[luma - 1]; + } else + lutlo = lut; + + if (luma < 255) { + luthi = (histeq->antibanding == HISTEQ_ANTIBANDING_WEAK) ? + (histeq->LUT[luma] + histeq->LUT[luma + 1]) / 2 : + histeq->LUT[luma + 1]; + } else + luthi = lut; + + if (lutlo != luthi) { + jran = LCG(jran); + lut = lutlo + ((luthi - lutlo + 1) * jran) / LCG_M; + } + } + + GET_RGB_VALUES(r, g, b, src, histeq->rgba_map); + if (((m = FFMAX3(r, g, b)) * lut) / luma > 255) { + r = (r * 255) / m; + g = (g * 255) / m; + b = (b * 255) / m; + } else { + r = (r * lut) / luma; + g = (g * lut) / luma; + b = (b * lut) / luma; + } + dst[x + histeq->rgba_map[R]] = r; + dst[x + histeq->rgba_map[G]] = g; + dst[x + histeq->rgba_map[B]] = b; + oluma = (55 * r + 182 * g + 19 * b) >> 8; + histeq->out_histogram[oluma]++; + } + } + src += inpic->linesize[0]; + dst += outpic->linesize[0]; + } +#ifdef DEBUG + for (x = 0; x < 256; x++) + av_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]); +#endif + + avfilter_unref_bufferp(&inpic); + return ff_filter_frame(outlink, outpic); +} + +static const AVFilterPad histeq_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad histeq_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_histeq = { + .name = "histeq", + .description = NULL_IF_CONFIG_SMALL("Apply global color histogram equalization."), + .priv_size = sizeof(HisteqContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + + .inputs = histeq_inputs, + .outputs = histeq_outputs, + .priv_class = &histeq_class, +}; diff --git a/libavfilter/vf_hqdn3d.c b/libavfilter/vf_hqdn3d.c index 6161b5e..4381586 100644 --- a/libavfilter/vf_hqdn3d.c +++ b/libavfilter/vf_hqdn3d.c @@ -3,20 +3,20 @@ * Copyright (c) 2010 Baptiste Coudurier * Copyright (c) 2012 Loren Merritt * - * This file is part of Libav, ported from MPlayer. + * This file is part of FFmpeg, ported from MPlayer. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ @@ -37,10 +37,9 @@ #include "vf_hqdn3d.h" #define LUT_BITS (depth==16 ? 8 : 4) -#define RIGHTSHIFT(a,b) (((a)+(((1<<(b))-1)>>1))>>(b)) -#define LOAD(x) ((depth==8 ? src[x] : AV_RN16A(src+(x)*2)) << (16-depth)) -#define STORE(x,val) (depth==8 ? dst[x] = RIGHTSHIFT(val, 16-depth)\ - : AV_WN16A(dst+(x)*2, RIGHTSHIFT(val, 16-depth))) +#define LOAD(x) (((depth==8 ? src[x] : AV_RN16A(src+(x)*2)) << (16-depth)) + (((1<<(16-depth))-1)>>1)) +#define STORE(x,val) (depth==8 ? dst[x] = (val) >> (16-depth)\ + : AV_WN16A(dst+(x)*2, (val) >> (16-depth))) av_always_inline static uint32_t lowpass(int prev, int cur, int16_t *coef, int depth) @@ -309,10 +308,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) { HQDN3DContext *hqdn3d = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *out; - int direct, c; + int direct = 0, c; - if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { + if (in->perms & AV_PERM_WRITE) { direct = 1; out = in; } else { @@ -321,10 +321,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); } - avfilter_copy_buffer_ref_props(out, in); - out->video->w = outlink->w; - out->video->h = outlink->h; } for (c = 0; c < 3; c++) { @@ -352,6 +349,7 @@ static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = { { NULL } }; + static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = { { .name = "default", diff --git a/libavfilter/vf_hqdn3d.h b/libavfilter/vf_hqdn3d.h index 7350f74..dfc69e1 100644 --- a/libavfilter/vf_hqdn3d.h +++ b/libavfilter/vf_hqdn3d.h @@ -1,18 +1,22 @@ /* - * This file is part of Libav. + * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org> + * Copyright (c) 2010 Baptiste Coudurier + * Copyright (c) 2012 Loren Merritt * - * Libav is free software; you can redistribute it and/or modify + * This file is part of FFmpeg, ported from MPlayer. + * + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c new file mode 100644 index 0000000..7bc3d37 --- /dev/null +++ b/libavfilter/vf_hue.c @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2003 Michael Niedermayer + * Copyright (c) 2012 Jeremy Tran + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Apply a hue/saturation filter to the input video + * Ported from MPlayer libmpcodecs/vf_hue.c. + */ + +#include <float.h> +#include "libavutil/eval.h" +#include "libavutil/imgutils.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +#define HUE_DEFAULT_VAL 0 +#define SAT_DEFAULT_VAL 1 + +#define HUE_DEFAULT_VAL_STRING AV_STRINGIFY(HUE_DEFAULT_VAL) +#define SAT_DEFAULT_VAL_STRING AV_STRINGIFY(SAT_DEFAULT_VAL) + +#define SAT_MIN_VAL -10 +#define SAT_MAX_VAL 10 + +static const char *const var_names[] = { + "n", // frame count + "pts", // presentation timestamp expressed in AV_TIME_BASE units + "r", // frame rate + "t", // timestamp expressed in seconds + "tb", // timebase + NULL +}; + +enum var_name { + VAR_N, + VAR_PTS, + VAR_R, + VAR_T, + VAR_TB, + VAR_NB +}; + +typedef struct { + const AVClass *class; + float hue_deg; /* hue expressed in degrees */ + float hue; /* hue expressed in radians */ + char *hue_deg_expr; + char *hue_expr; + AVExpr *hue_deg_pexpr; + AVExpr *hue_pexpr; + float saturation; + char *saturation_expr; + AVExpr *saturation_pexpr; + int hsub; + int vsub; + int32_t hue_sin; + int32_t hue_cos; + int flat_syntax; + double var_values[VAR_NB]; +} HueContext; + +#define OFFSET(x) offsetof(HueContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +static const AVOption hue_options[] = { + { "h", "set the hue angle degrees expression", OFFSET(hue_deg_expr), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS }, + { "H", "set the hue angle radians expression", OFFSET(hue_expr), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS }, + { "s", "set the saturation expression", OFFSET(saturation_expr), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(hue); + +static inline void compute_sin_and_cos(HueContext *hue) +{ + /* + * Scale the value to the norm of the resulting (U,V) vector, that is + * the saturation. + * This will be useful in the process_chrominance function. + */ + hue->hue_sin = rint(sin(hue->hue) * (1 << 16) * hue->saturation); + hue->hue_cos = rint(cos(hue->hue) * (1 << 16) * hue->saturation); +} + +#define SET_EXPRESSION(attr, name) do { \ + if (hue->attr##_expr) { \ + if ((ret = av_expr_parse(&hue->attr##_pexpr, hue->attr##_expr, var_names, \ + NULL, NULL, NULL, NULL, 0, ctx)) < 0) { \ + av_log(ctx, AV_LOG_ERROR, \ + "Parsing failed for expression " #name "='%s'", \ + hue->attr##_expr); \ + hue->attr##_expr = old_##attr##_expr; \ + hue->attr##_pexpr = old_##attr##_pexpr; \ + return AVERROR(EINVAL); \ + } else if (old_##attr##_pexpr) { \ + av_freep(&old_##attr##_expr); \ + av_expr_free(old_##attr##_pexpr); \ + old_##attr##_pexpr = NULL; \ + } \ + } else { \ + hue->attr##_expr = old_##attr##_expr; \ + } \ +} while (0) + +static inline int set_options(AVFilterContext *ctx, const char *args) +{ + HueContext *hue = ctx->priv; + int n, ret; + char c1 = 0, c2 = 0; + char *old_hue_expr, *old_hue_deg_expr, *old_saturation_expr; + AVExpr *old_hue_pexpr, *old_hue_deg_pexpr, *old_saturation_pexpr; + + if (args) { + /* named options syntax */ + if (strchr(args, '=')) { + old_hue_expr = hue->hue_expr; + old_hue_deg_expr = hue->hue_deg_expr; + old_saturation_expr = hue->saturation_expr; + + old_hue_pexpr = hue->hue_pexpr; + old_hue_deg_pexpr = hue->hue_deg_pexpr; + old_saturation_pexpr = hue->saturation_pexpr; + + hue->hue_expr = NULL; + hue->hue_deg_expr = NULL; + hue->saturation_expr = NULL; + + if ((ret = av_set_options_string(hue, args, "=", ":")) < 0) + return ret; + if (hue->hue_expr && hue->hue_deg_expr) { + av_log(ctx, AV_LOG_ERROR, + "H and h options are incompatible and cannot be specified " + "at the same time\n"); + hue->hue_expr = old_hue_expr; + hue->hue_deg_expr = old_hue_deg_expr; + + return AVERROR(EINVAL); + } + + SET_EXPRESSION(hue_deg, h); + SET_EXPRESSION(hue, H); + SET_EXPRESSION(saturation, s); + + hue->flat_syntax = 0; + + av_log(ctx, AV_LOG_VERBOSE, + "H_expr:%s h_deg_expr:%s s_expr:%s\n", + hue->hue_expr, hue->hue_deg_expr, hue->saturation_expr); + + /* compatibility h:s syntax */ + } else { + n = sscanf(args, "%f%c%f%c", &hue->hue_deg, &c1, &hue->saturation, &c2); + if (n != 1 && (n != 3 || c1 != ':')) { + av_log(ctx, AV_LOG_ERROR, + "Invalid syntax for argument '%s': " + "must be in the form 'hue[:saturation]'\n", args); + return AVERROR(EINVAL); + } + + if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) { + av_log(ctx, AV_LOG_ERROR, + "Invalid value for saturation %0.1f: " + "must be included between range %d and +%d\n", + hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL); + return AVERROR(EINVAL); + } + + hue->hue = hue->hue_deg * M_PI / 180; + hue->flat_syntax = 1; + + av_log(ctx, AV_LOG_VERBOSE, + "H:%0.1f h:%0.1f s:%0.1f\n", + hue->hue, hue->hue_deg, hue->saturation); + } + } + + compute_sin_and_cos(hue); + + return 0; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + HueContext *hue = ctx->priv; + + hue->class = &hue_class; + av_opt_set_defaults(hue); + + hue->saturation = SAT_DEFAULT_VAL; + hue->hue = HUE_DEFAULT_VAL; + hue->hue_deg_pexpr = NULL; + hue->hue_pexpr = NULL; + hue->flat_syntax = 1; + + return set_options(ctx, args); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + HueContext *hue = ctx->priv; + + av_opt_free(hue); + + av_free(hue->hue_deg_expr); + av_expr_free(hue->hue_deg_pexpr); + av_free(hue->hue_expr); + av_expr_free(hue->hue_pexpr); + av_free(hue->saturation_expr); + av_expr_free(hue->saturation_pexpr); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + HueContext *hue = inlink->dst->priv; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + + hue->hsub = desc->log2_chroma_w; + hue->vsub = desc->log2_chroma_h; + + hue->var_values[VAR_N] = 0; + hue->var_values[VAR_TB] = av_q2d(inlink->time_base); + hue->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ? + NAN : av_q2d(inlink->frame_rate); + + return 0; +} + +static void process_chrominance(uint8_t *udst, uint8_t *vdst, const int dst_linesize, + uint8_t *usrc, uint8_t *vsrc, const int src_linesize, + int w, int h, + const int32_t c, const int32_t s) +{ + int32_t u, v, new_u, new_v; + int i; + + /* + * If we consider U and V as the components of a 2D vector then its angle + * is the hue and the norm is the saturation + */ + while (h--) { + for (i = 0; i < w; i++) { + /* Normalize the components from range [16;140] to [-112;112] */ + u = usrc[i] - 128; + v = vsrc[i] - 128; + /* + * Apply the rotation of the vector : (c * u) - (s * v) + * (s * u) + (c * v) + * De-normalize the components (without forgetting to scale 128 + * by << 16) + * Finally scale back the result by >> 16 + */ + new_u = ((c * u) - (s * v) + (1 << 15) + (128 << 16)) >> 16; + new_v = ((s * u) + (c * v) + (1 << 15) + (128 << 16)) >> 16; + + /* Prevent a potential overflow */ + udst[i] = av_clip_uint8_c(new_u); + vdst[i] = av_clip_uint8_c(new_v); + } + + usrc += src_linesize; + vsrc += src_linesize; + udst += dst_linesize; + vdst += dst_linesize; + } +} + +#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) +#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) +{ + HueContext *hue = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outpic; + + outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!outpic) { + avfilter_unref_bufferp(&inpic); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outpic, inpic); + + if (!hue->flat_syntax) { + hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base); + hue->var_values[VAR_PTS] = TS2D(inpic->pts); + + if (hue->saturation_expr) { + hue->saturation = av_expr_eval(hue->saturation_pexpr, hue->var_values, NULL); + + if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) { + hue->saturation = av_clip(hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL); + av_log(inlink->dst, AV_LOG_WARNING, + "Saturation value not in range [%d,%d]: clipping value to %0.1f\n", + SAT_MIN_VAL, SAT_MAX_VAL, hue->saturation); + } + } + + if (hue->hue_deg_expr) { + hue->hue_deg = av_expr_eval(hue->hue_deg_pexpr, hue->var_values, NULL); + hue->hue = hue->hue_deg * M_PI / 180; + } else if (hue->hue_expr) { + hue->hue = av_expr_eval(hue->hue_pexpr, hue->var_values, NULL); + } + + av_log(inlink->dst, AV_LOG_DEBUG, + "H:%0.1f s:%0.f t:%0.1f n:%d\n", + hue->hue, hue->saturation, + hue->var_values[VAR_T], (int)hue->var_values[VAR_N]); + + compute_sin_and_cos(hue); + } + + hue->var_values[VAR_N] += 1; + + av_image_copy_plane(outpic->data[0], outpic->linesize[0], + inpic->data[0], inpic->linesize[0], + inlink->w, inlink->h); + + process_chrominance(outpic->data[1], outpic->data[2], outpic->linesize[1], + inpic->data[1], inpic->data[2], inpic->linesize[1], + inlink->w >> hue->hsub, inlink->h >> hue->vsub, + hue->hue_cos, hue->hue_sin); + + avfilter_unref_bufferp(&inpic); + return ff_filter_frame(outlink, outpic); +} + +static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, + char *res, int res_len, int flags) +{ + if (!strcmp(cmd, "reinit")) + return set_options(ctx, args); + else + return AVERROR(ENOSYS); +} + +static const AVFilterPad hue_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .config_props = config_props, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad hue_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_hue = { + .name = "hue", + .description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."), + + .priv_size = sizeof(HueContext), + + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .process_command = process_command, + .inputs = hue_inputs, + .outputs = hue_outputs, + .priv_class = &hue_class, +}; diff --git a/libavfilter/vf_idet.c b/libavfilter/vf_idet.c new file mode 100644 index 0000000..513a8e3 --- /dev/null +++ b/libavfilter/vf_idet.c @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2012 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <float.h> /* FLT_MAX */ + +#include "libavutil/cpu.h" +#include "libavutil/common.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" +#include "internal.h" + +#define HIST_SIZE 4 + +typedef enum { + TFF, + BFF, + PROGRSSIVE, + UNDETERMINED, +} Type; + +typedef struct { + const AVClass *class; + float interlace_threshold; + float progressive_threshold; + + Type last_type; + int prestat[4]; + int poststat[4]; + + uint8_t history[HIST_SIZE]; + + AVFilterBufferRef *cur; + AVFilterBufferRef *next; + AVFilterBufferRef *prev; + int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w); + + const AVPixFmtDescriptor *csp; +} IDETContext; + +#define OFFSET(x) offsetof(IDETContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption idet_options[] = { + { "intl_thres", "set interlacing threshold", OFFSET(interlace_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.01}, -1, FLT_MAX, FLAGS }, + { "prog_thres", "set progressive threshold", OFFSET(progressive_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 2.5}, -1, FLT_MAX, FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(idet); + +static const char *type2str(Type type) +{ + switch(type) { + case TFF : return "Top Field First "; + case BFF : return "Bottom Field First"; + case PROGRSSIVE : return "Progressive "; + case UNDETERMINED: return "Undetermined "; + } + return NULL; +} + +static int filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w) +{ + int x; + int ret=0; + + for(x=0; x<w; x++){ + ret += FFABS((*a++ + *c++) - 2 * *b++); + } + + return ret; +} + +static int filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w) +{ + int x; + int ret=0; + + for(x=0; x<w; x++){ + ret += FFABS((*a++ + *c++) - 2 * *b++); + } + + return ret; +} + +static void filter(AVFilterContext *ctx) +{ + IDETContext *idet = ctx->priv; + int y, i; + int64_t alpha[2]={0}; + int64_t delta=0; + Type type, best_type; + int match = 0; + + for (i = 0; i < idet->csp->nb_components; i++) { + int w = idet->cur->video->w; + int h = idet->cur->video->h; + int refs = idet->cur->linesize[i]; + + if (i && i<3) { + w >>= idet->csp->log2_chroma_w; + h >>= idet->csp->log2_chroma_h; + } + + for (y = 2; y < h - 2; y++) { + uint8_t *prev = &idet->prev->data[i][y*refs]; + uint8_t *cur = &idet->cur ->data[i][y*refs]; + uint8_t *next = &idet->next->data[i][y*refs]; + alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w); + alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w); + delta += idet->filter_line(cur-refs, cur, cur+refs, w); + } + } + + if (alpha[0] > idet->interlace_threshold * alpha[1]){ + type = TFF; + }else if(alpha[1] > idet->interlace_threshold * alpha[0]){ + type = BFF; + }else if(alpha[1] > idet->progressive_threshold * delta){ + type = PROGRSSIVE; + }else{ + type = UNDETERMINED; + } + + memmove(idet->history+1, idet->history, HIST_SIZE-1); + idet->history[0] = type; + best_type = UNDETERMINED; + for(i=0; i<HIST_SIZE; i++){ + if(idet->history[i] != UNDETERMINED){ + if(best_type == UNDETERMINED) + best_type = idet->history[i]; + + if(idet->history[i] == best_type) { + match++; + }else{ + match=0; + break; + } + } + } + if(idet->last_type == UNDETERMINED){ + if(match ) idet->last_type = best_type; + }else{ + if(match>2) idet->last_type = best_type; + } + + if (idet->last_type == TFF){ + idet->cur->video->top_field_first = 1; + idet->cur->video->interlaced = 1; + }else if(idet->last_type == BFF){ + idet->cur->video->top_field_first = 0; + idet->cur->video->interlaced = 1; + }else if(idet->last_type == PROGRSSIVE){ + idet->cur->video->interlaced = 0; + } + + idet->prestat [ type] ++; + idet->poststat[idet->last_type] ++; + av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type)); +} + +static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) +{ + AVFilterContext *ctx = link->dst; + IDETContext *idet = ctx->priv; + + if (idet->prev) + avfilter_unref_buffer(idet->prev); + idet->prev = idet->cur; + idet->cur = idet->next; + idet->next = picref; + + if (!idet->cur) + return 0; + + if (!idet->prev) + idet->prev = avfilter_ref_buffer(idet->cur, ~0); + + if (!idet->csp) + idet->csp = av_pix_fmt_desc_get(link->format); + if (idet->csp->comp[0].depth_minus1 / 8 == 1) + idet->filter_line = (void*)filter_line_c_16bit; + + filter(ctx); + + return ff_filter_frame(ctx->outputs[0], avfilter_ref_buffer(idet->cur, ~0)); +} + +static int request_frame(AVFilterLink *link) +{ + AVFilterContext *ctx = link->src; + IDETContext *idet = ctx->priv; + + do { + int ret; + + if ((ret = ff_request_frame(link->src->inputs[0]))) + return ret; + } while (!idet->cur); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + IDETContext *idet = ctx->priv; + + av_log(ctx, AV_LOG_INFO, "Single frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n", + idet->prestat[TFF], + idet->prestat[BFF], + idet->prestat[PROGRSSIVE], + idet->prestat[UNDETERMINED] + ); + av_log(ctx, AV_LOG_INFO, "Multi frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n", + idet->poststat[TFF], + idet->poststat[BFF], + idet->poststat[PROGRSSIVE], + idet->poststat[UNDETERMINED] + ); + + avfilter_unref_bufferp(&idet->prev); + avfilter_unref_bufferp(&idet->cur ); + avfilter_unref_bufferp(&idet->next); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUV410P, + AV_PIX_FMT_YUV411P, + AV_PIX_FMT_GRAY8, + AV_PIX_FMT_YUVJ420P, + AV_PIX_FMT_YUVJ422P, + AV_PIX_FMT_YUVJ444P, + AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ), + AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUVJ440P, + AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ), + AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ), + AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ), + AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ), + AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ), + AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ), + AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + IDETContext *idet = ctx->priv; + static const char *shorthand[] = { "intl_thres", "prog_thres", NULL }; + int ret; + + idet->class = &idet_class; + av_opt_set_defaults(idet); + + if ((ret = av_opt_set_from_string(idet, args, shorthand, "=", ":")) < 0) + return ret; + + idet->last_type = UNDETERMINED; + memset(idet->history, UNDETERMINED, HIST_SIZE); + + idet->filter_line = filter_line_c; + + return 0; +} + + +static const AVFilterPad idet_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_PRESERVE, + }, + { NULL } +}; + +static const AVFilterPad idet_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .rej_perms = AV_PERM_WRITE, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_idet = { + .name = "idet", + .description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."), + + .priv_size = sizeof(IDETContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = idet_inputs, + .outputs = idet_outputs, + .priv_class = &idet_class, +}; diff --git a/libavfilter/vf_kerndeint.c b/libavfilter/vf_kerndeint.c new file mode 100644 index 0000000..382b8a7 --- /dev/null +++ b/libavfilter/vf_kerndeint.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2012 Jeremy Tran + * Copyright (c) 2004 Tobias Diedrich + * Copyright (c) 2003 Donald A. Graft + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Kernel Deinterlacer + * Ported from MPlayer libmpcodecs/vf_kerndeint.c. + */ + +#include "libavutil/imgutils.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + int frame; ///< frame count, starting from 0 + int thresh, map, order, sharp, twoway; + int vsub; + uint8_t *tmp_data [4]; ///< temporary plane data buffer + int tmp_bwidth[4]; ///< temporary plane byte width +} KerndeintContext; + +#define OFFSET(x) offsetof(KerndeintContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +static const AVOption kerndeint_options[] = { + { "thresh", "set the threshold", OFFSET(thresh), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS }, + { "map", "set the map", OFFSET(map), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + { "order", "set the order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + { "sharp", "enable sharpening", OFFSET(sharp), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + { "twoway", "enable twoway", OFFSET(twoway), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(kerndeint); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + KerndeintContext *kerndeint = ctx->priv; + const char const * shorthand[] = { "thresh", "map", "order", "sharp", "twoway", NULL }; + + kerndeint->class = &kerndeint_class; + av_opt_set_defaults(kerndeint); + + return av_opt_set_from_string(kerndeint, args, shorthand, "=", ":"); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + KerndeintContext *kerndeint = ctx->priv; + + av_free(kerndeint->tmp_data[0]); + av_opt_free(kerndeint); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum PixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUYV422, + AV_PIX_FMT_ARGB, AV_PIX_FMT_0RGB, + AV_PIX_FMT_ABGR, AV_PIX_FMT_0BGR, + AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB0, + AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR0, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + KerndeintContext *kerndeint = inlink->dst->priv; + const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; + int ret; + + kerndeint->vsub = desc->log2_chroma_h; + + ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_bwidth, + inlink->w, inlink->h, inlink->format, 1); + if (ret < 0) + return ret; + memset(kerndeint->tmp_data[0], 0, ret); + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) +{ + KerndeintContext *kerndeint = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outpic; + const uint8_t *prvp; ///< Previous field's pixel line number n + const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1) + const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1) + const uint8_t *prvppp; ///< Previous field's pixel line number (n - 2) + const uint8_t *prvpnn; ///< Previous field's pixel line number (n + 2) + const uint8_t *prvp4p; ///< Previous field's pixel line number (n - 4) + const uint8_t *prvp4n; ///< Previous field's pixel line number (n + 4) + + const uint8_t *srcp; ///< Current field's pixel line number n + const uint8_t *srcpp; ///< Current field's pixel line number (n - 1) + const uint8_t *srcpn; ///< Current field's pixel line number (n + 1) + const uint8_t *srcppp; ///< Current field's pixel line number (n - 2) + const uint8_t *srcpnn; ///< Current field's pixel line number (n + 2) + const uint8_t *srcp3p; ///< Current field's pixel line number (n - 3) + const uint8_t *srcp3n; ///< Current field's pixel line number (n + 3) + const uint8_t *srcp4p; ///< Current field's pixel line number (n - 4) + const uint8_t *srcp4n; ///< Current field's pixel line number (n + 4) + + uint8_t *dstp, *dstp_saved; + const uint8_t *srcp_saved; + + int src_linesize, psrc_linesize, dst_linesize, bwidth; + int x, y, plane, val, hi, lo, g, h, n = kerndeint->frame++; + double valf; + + const int thresh = kerndeint->thresh; + const int order = kerndeint->order; + const int map = kerndeint->map; + const int sharp = kerndeint->sharp; + const int twoway = kerndeint->twoway; + + const int is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_RGB; + + outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); + if (!outpic) { + avfilter_unref_bufferp(&inpic); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outpic, inpic); + outpic->video->interlaced = 0; + + for (plane = 0; inpic->data[plane] && plane < 4; plane++) { + h = plane == 0 ? inlink->h : inlink->h >> kerndeint->vsub; + bwidth = kerndeint->tmp_bwidth[plane]; + + srcp = srcp_saved = inpic->data[plane]; + src_linesize = inpic->linesize[plane]; + psrc_linesize = kerndeint->tmp_bwidth[plane]; + dstp = dstp_saved = outpic->data[plane]; + dst_linesize = outpic->linesize[plane]; + srcp = srcp_saved + (1 - order) * src_linesize; + dstp = dstp_saved + (1 - order) * dst_linesize; + + for (y = 0; y < h; y += 2) { + memcpy(dstp, srcp, bwidth); + srcp += 2 * src_linesize; + dstp += 2 * dst_linesize; + } + + // Copy through the lines that will be missed below. + memcpy(dstp_saved + order * dst_linesize, srcp_saved + (1 - order) * src_linesize, bwidth); + memcpy(dstp_saved + (2 + order ) * dst_linesize, srcp_saved + (3 - order) * src_linesize, bwidth); + memcpy(dstp_saved + (h - 2 + order) * dst_linesize, srcp_saved + (h - 1 - order) * src_linesize, bwidth); + memcpy(dstp_saved + (h - 4 + order) * dst_linesize, srcp_saved + (h - 3 - order) * src_linesize, bwidth); + + /* For the other field choose adaptively between using the previous field + or the interpolant from the current field. */ + prvp = kerndeint->tmp_data[plane] + 5 * psrc_linesize - (1 - order) * psrc_linesize; + prvpp = prvp - psrc_linesize; + prvppp = prvp - 2 * psrc_linesize; + prvp4p = prvp - 4 * psrc_linesize; + prvpn = prvp + psrc_linesize; + prvpnn = prvp + 2 * psrc_linesize; + prvp4n = prvp + 4 * psrc_linesize; + + srcp = srcp_saved + 5 * src_linesize - (1 - order) * src_linesize; + srcpp = srcp - src_linesize; + srcppp = srcp - 2 * src_linesize; + srcp3p = srcp - 3 * src_linesize; + srcp4p = srcp - 4 * src_linesize; + + srcpn = srcp + src_linesize; + srcpnn = srcp + 2 * src_linesize; + srcp3n = srcp + 3 * src_linesize; + srcp4n = srcp + 4 * src_linesize; + + dstp = dstp_saved + 5 * dst_linesize - (1 - order) * dst_linesize; + + for (y = 5 - (1 - order); y <= h - 5 - (1 - order); y += 2) { + for (x = 0; x < bwidth; x++) { + if (thresh == 0 || n == 0 || + (abs((int)prvp[x] - (int)srcp[x]) > thresh) || + (abs((int)prvpp[x] - (int)srcpp[x]) > thresh) || + (abs((int)prvpn[x] - (int)srcpn[x]) > thresh)) { + if (map) { + g = x & ~3; + + if (is_packed_rgb) { + AV_WB32(dstp + g, 0xffffffff); + x = g + 3; + } else if (inlink->format == AV_PIX_FMT_YUYV422) { + // y <- 235, u <- 128, y <- 235, v <- 128 + AV_WB32(dstp + g, 0xeb80eb80); + x = g + 3; + } else { + dstp[x] = plane == 0 ? 235 : 128; + } + } else { + if (is_packed_rgb) { + hi = 255; + lo = 0; + } else if (inlink->format == AV_PIX_FMT_YUYV422) { + hi = x & 1 ? 240 : 235; + lo = 16; + } else { + hi = plane == 0 ? 235 : 240; + lo = 16; + } + + if (sharp) { + if (twoway) { + valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x]) + + 0.170 * ((int)srcp[x] + (int)prvp[x]) + - 0.116 * ((int)srcppp[x] + (int)srcpnn[x] + (int)prvppp[x] + (int)prvpnn[x]) + - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x]) + + 0.031 * ((int)srcp4p[x] + (int)srcp4n[x] + (int)prvp4p[x] + (int)prvp4n[x]); + } else { + valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x]) + + 0.170 * ((int)prvp[x]) + - 0.116 * ((int)prvppp[x] + (int)prvpnn[x]) + - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x]) + + 0.031 * ((int)prvp4p[x] + (int)prvp4p[x]); + } + dstp[x] = av_clip(valf, lo, hi); + } else { + if (twoway) { + val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)srcp[x] + (int)prvp[x]) + - (int)(srcppp[x]) - (int)(srcpnn[x]) + - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4; + } else { + val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)prvp[x]) + - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4; + } + dstp[x] = av_clip(val, lo, hi); + } + } + } else { + dstp[x] = srcp[x]; + } + } + prvp += 2 * psrc_linesize; + prvpp += 2 * psrc_linesize; + prvppp += 2 * psrc_linesize; + prvpn += 2 * psrc_linesize; + prvpnn += 2 * psrc_linesize; + prvp4p += 2 * psrc_linesize; + prvp4n += 2 * psrc_linesize; + srcp += 2 * src_linesize; + srcpp += 2 * src_linesize; + srcppp += 2 * src_linesize; + srcp3p += 2 * src_linesize; + srcp4p += 2 * src_linesize; + srcpn += 2 * src_linesize; + srcpnn += 2 * src_linesize; + srcp3n += 2 * src_linesize; + srcp4n += 2 * src_linesize; + dstp += 2 * dst_linesize; + } + + srcp = inpic->data[plane]; + dstp = kerndeint->tmp_data[plane]; + av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h); + } + + avfilter_unref_buffer(inpic); + return ff_filter_frame(outlink, outpic); +} + +static const AVFilterPad kerndeint_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .config_props = config_props, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad kerndeint_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_kerndeint = { + .name = "kerndeint", + .description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."), + .priv_size = sizeof(KerndeintContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + + .inputs = kerndeint_inputs, + .outputs = kerndeint_outputs, + + .priv_class = &kerndeint_class, +}; diff --git a/libavfilter/vf_libopencv.c b/libavfilter/vf_libopencv.c index e558a4a..1b8a5dc 100644 --- a/libavfilter/vf_libopencv.c +++ b/libavfilter/vf_libopencv.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2010 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c index f265795..73f909d 100644 --- a/libavfilter/vf_lut.c +++ b/libavfilter/vf_lut.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2011 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -26,18 +26,15 @@ #include "libavutil/common.h" #include "libavutil/eval.h" -#include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "drawutils.h" #include "formats.h" #include "internal.h" #include "video.h" static const char *const var_names[] = { - "E", - "PHI", - "PI", "w", ///< width of the input video "h", ///< height of the input video "val", ///< input value for the pixel @@ -49,9 +46,6 @@ static const char *const var_names[] = { }; enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, VAR_W, VAR_H, VAR_VAL, @@ -70,7 +64,6 @@ typedef struct { int hsub, vsub; double var_values[VAR_VARS_NB]; int is_rgb, is_yuv; - int rgba_map[4]; int step; int negate_alpha; /* only used by negate */ } LutContext; @@ -84,53 +77,23 @@ typedef struct { #define A 3 #define OFFSET(x) offsetof(LutContext, x) - -static const AVOption lut_options[] = { - {"c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, - {"a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX}, +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption options[] = { + {"c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, + {"a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, {.str="val"}, CHAR_MIN, CHAR_MAX, FLAGS}, {NULL}, }; -static const char *lut_get_name(void *ctx) -{ - return "lut"; -} - -static const AVClass lut_class = { - "LutContext", - lut_get_name, - lut_options -}; - -static int init(AVFilterContext *ctx, const char *args) -{ - LutContext *lut = ctx->priv; - int ret; - - lut->class = &lut_class; - av_opt_set_defaults(lut); - - lut->var_values[VAR_PHI] = M_PHI; - lut->var_values[VAR_PI] = M_PI; - lut->var_values[VAR_E ] = M_E; - - lut->is_rgb = !strcmp(ctx->filter->name, "lutrgb"); - lut->is_yuv = !strcmp(ctx->filter->name, "lutyuv"); - if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0) - return ret; - - return 0; -} - static av_cold void uninit(AVFilterContext *ctx) { LutContext *lut = ctx->priv; @@ -155,16 +118,16 @@ static av_cold void uninit(AVFilterContext *ctx) AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24 -static enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE }; -static enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE }; -static enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, AV_PIX_FMT_NONE }; +static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE }; +static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE }; +static const enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, AV_PIX_FMT_NONE }; static int query_formats(AVFilterContext *ctx) { LutContext *lut = ctx->priv; - enum AVPixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts : - lut->is_yuv ? yuv_pix_fmts : all_pix_fmts; + const enum AVPixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts : + lut->is_yuv ? yuv_pix_fmts : all_pix_fmts; ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; @@ -197,8 +160,8 @@ static double compute_gammaval(void *opaque, double gamma) } static double (* const funcs1[])(void *, double) = { - clip, - compute_gammaval, + (void *)clip, + (void *)compute_gammaval, NULL }; @@ -213,6 +176,7 @@ static int config_props(AVFilterLink *inlink) AVFilterContext *ctx = inlink->dst; LutContext *lut = ctx->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + uint8_t rgba_map[4]; /* component index -> RGBA color index map */ int min[4], max[4]; int val, comp, ret; @@ -245,49 +209,43 @@ static int config_props(AVFilterLink *inlink) else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1; if (lut->is_rgb) { - switch (inlink->format) { - case AV_PIX_FMT_ARGB: lut->rgba_map[A] = 0; lut->rgba_map[R] = 1; lut->rgba_map[G] = 2; lut->rgba_map[B] = 3; break; - case AV_PIX_FMT_ABGR: lut->rgba_map[A] = 0; lut->rgba_map[B] = 1; lut->rgba_map[G] = 2; lut->rgba_map[R] = 3; break; - case AV_PIX_FMT_RGBA: - case AV_PIX_FMT_RGB24: lut->rgba_map[R] = 0; lut->rgba_map[G] = 1; lut->rgba_map[B] = 2; lut->rgba_map[A] = 3; break; - case AV_PIX_FMT_BGRA: - case AV_PIX_FMT_BGR24: lut->rgba_map[B] = 0; lut->rgba_map[G] = 1; lut->rgba_map[R] = 2; lut->rgba_map[A] = 3; break; - } + ff_fill_rgba_map(rgba_map, inlink->format); lut->step = av_get_bits_per_pixel(desc) >> 3; } for (comp = 0; comp < desc->nb_components; comp++) { double res; + int color = lut->is_rgb ? rgba_map[comp] : comp; /* create the parsed expression */ - ret = av_expr_parse(&lut->comp_expr[comp], lut->comp_expr_str[comp], + ret = av_expr_parse(&lut->comp_expr[color], lut->comp_expr_str[color], var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx); if (ret < 0) { av_log(ctx, AV_LOG_ERROR, - "Error when parsing the expression '%s' for the component %d.\n", - lut->comp_expr_str[comp], comp); + "Error when parsing the expression '%s' for the component %d and color %d.\n", + lut->comp_expr_str[comp], comp, color); return AVERROR(EINVAL); } /* compute the lut */ - lut->var_values[VAR_MAXVAL] = max[comp]; - lut->var_values[VAR_MINVAL] = min[comp]; + lut->var_values[VAR_MAXVAL] = max[color]; + lut->var_values[VAR_MINVAL] = min[color]; for (val = 0; val < 256; val++) { lut->var_values[VAR_VAL] = val; - lut->var_values[VAR_CLIPVAL] = av_clip(val, min[comp], max[comp]); + lut->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]); lut->var_values[VAR_NEGVAL] = - av_clip(min[comp] + max[comp] - lut->var_values[VAR_VAL], - min[comp], max[comp]); + av_clip(min[color] + max[color] - lut->var_values[VAR_VAL], + min[color], max[color]); - res = av_expr_eval(lut->comp_expr[comp], lut->var_values, lut); + res = av_expr_eval(lut->comp_expr[color], lut->var_values, lut); if (isnan(res)) { av_log(ctx, AV_LOG_ERROR, - "Error when evaluating the expression '%s' for the value %d for the component #%d.\n", - lut->comp_expr_str[comp], val, comp); + "Error when evaluating the expression '%s' for the value %d for the component %d.\n", + lut->comp_expr_str[color], val, comp); return AVERROR(EINVAL); } - lut->lut[comp][val] = av_clip((int)res, min[comp], max[comp]); + lut->lut[comp][val] = av_clip((int)res, min[color], max[color]); av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]); } } @@ -302,7 +260,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *out; uint8_t *inrow, *outrow, *inrow0, *outrow0; - int i, j, k, plane; + int i, j, plane; out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!out) { @@ -317,11 +275,21 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) outrow0 = out->data[0]; for (i = 0; i < in->video->h; i ++) { + int w = inlink->w; + const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut; inrow = inrow0; outrow = outrow0; - for (j = 0; j < inlink->w; j++) { - for (k = 0; k < lut->step; k++) - outrow[k] = lut->lut[lut->rgba_map[k]][inrow[k]]; + for (j = 0; j < w; j++) { + outrow[0] = tab[0][inrow[0]]; + if (lut->step>1) { + outrow[1] = tab[1][inrow[1]]; + if (lut->step>2) { + outrow[2] = tab[2][inrow[2]]; + if (lut->step>3) { + outrow[3] = tab[3][inrow[3]]; + } + } + } outrow += lut->step; inrow += lut->step; } @@ -337,9 +305,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) inrow = in ->data[plane]; outrow = out->data[plane]; - for (i = 0; i < in->video->h >> vsub; i ++) { - for (j = 0; j < inlink->w>>hsub; j++) - outrow[j] = lut->lut[plane][inrow[j]]; + for (i = 0; i < (in->video->h + (1<<vsub) - 1)>>vsub; i ++) { + const uint8_t *tab = lut->lut[plane]; + int w = (inlink->w + (1<<hsub) - 1)>>hsub; + for (j = 0; j < w; j++) + outrow[j] = tab[inrow[j]]; inrow += in ->linesize[plane]; outrow += out->linesize[plane]; } @@ -363,32 +333,94 @@ static const AVFilterPad outputs[] = { .type = AVMEDIA_TYPE_VIDEO, }, { .name = NULL} }; -#define DEFINE_LUT_FILTER(name_, description_, init_) \ +#define DEFINE_LUT_FILTER(name_, description_) \ AVFilter avfilter_vf_##name_ = { \ .name = #name_, \ .description = NULL_IF_CONFIG_SMALL(description_), \ .priv_size = sizeof(LutContext), \ \ - .init = init_, \ + .init = name_##_init, \ .uninit = uninit, \ .query_formats = query_formats, \ \ .inputs = inputs, \ .outputs = outputs, \ + .priv_class = &name_##_class, \ } #if CONFIG_LUT_FILTER -DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.", init); + +#define lut_options options +AVFILTER_DEFINE_CLASS(lut); + +static int lut_init(AVFilterContext *ctx, const char *args) +{ + LutContext *lut = ctx->priv; + int ret; + + lut->class = &lut_class; + av_opt_set_defaults(lut); + + if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0) + return ret; + + return 0; +} + +DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video."); #endif + #if CONFIG_LUTYUV_FILTER -DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.", init); + +#define lutyuv_options options +AVFILTER_DEFINE_CLASS(lutyuv); + +static int lutyuv_init(AVFilterContext *ctx, const char *args) +{ + LutContext *lut = ctx->priv; + int ret; + + lut->class = &lutyuv_class; + lut->is_yuv = 1; + av_opt_set_defaults(lut); + + if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0) + return ret; + + return 0; +} + +DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video."); #endif + #if CONFIG_LUTRGB_FILTER -DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.", init); + +#define lutrgb_options options +AVFILTER_DEFINE_CLASS(lutrgb); + +static int lutrgb_init(AVFilterContext *ctx, const char *args) +{ + LutContext *lut = ctx->priv; + int ret; + + lut->class = &lutrgb_class; + lut->is_rgb = 1; + av_opt_set_defaults(lut); + + if (args && (ret = av_set_options_string(lut, args, "=", ":")) < 0) + return ret; + + return 0; +} + +DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video."); #endif #if CONFIG_NEGATE_FILTER +#define negate_options options +AVFILTER_DEFINE_CLASS(negate); + static int negate_init(AVFilterContext *ctx, const char *args) { LutContext *lut = ctx->priv; @@ -402,9 +434,12 @@ static int negate_init(AVFilterContext *ctx, const char *args) snprintf(lut_params, sizeof(lut_params), "c0=negval:c1=negval:c2=negval:a=%s", lut->negate_alpha ? "negval" : "val"); - return init(ctx, lut_params); + lut->class = &negate_class; + av_opt_set_defaults(lut); + + return av_set_options_string(lut, lut_params, "=", ":"); } -DEFINE_LUT_FILTER(negate, "Negate input video.", negate_init); +DEFINE_LUT_FILTER(negate, "Negate input video."); #endif diff --git a/libavfilter/vf_mp.c b/libavfilter/vf_mp.c new file mode 100644 index 0000000..e057d62 --- /dev/null +++ b/libavfilter/vf_mp.c @@ -0,0 +1,863 @@ +/* + * Copyright (c) 2011 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Parts of this file have been stolen from mplayer + */ + +/** + * @file + */ + +#include "avfilter.h" +#include "video.h" +#include "formats.h" +#include "internal.h" +#include "libavutil/avassert.h" +#include "libavutil/pixdesc.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/imgutils.h" + +#include "libmpcodecs/vf.h" +#include "libmpcodecs/img_format.h" +#include "libmpcodecs/cpudetect.h" +#include "libmpcodecs/av_helpers.h" +#include "libmpcodecs/vf_scale.h" +#include "libmpcodecs/libvo/fastmemcpy.h" + +#include "libswscale/swscale.h" + + +//FIXME maybe link the orig in +//XXX: identical pix_fmt must be following with each others +static const struct { + int fmt; + enum AVPixelFormat pix_fmt; +} conversion_map[] = { + {IMGFMT_ARGB, AV_PIX_FMT_ARGB}, + {IMGFMT_BGRA, AV_PIX_FMT_BGRA}, + {IMGFMT_BGR24, AV_PIX_FMT_BGR24}, + {IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE}, + {IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE}, + {IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE}, + {IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE}, + {IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE}, + {IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE}, + {IMGFMT_BGR8, AV_PIX_FMT_RGB8}, + {IMGFMT_BGR4, AV_PIX_FMT_RGB4}, + {IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK}, + {IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK}, + {IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE}, + {IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE}, + {IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE}, + {IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE}, + {IMGFMT_ABGR, AV_PIX_FMT_ABGR}, + {IMGFMT_RGBA, AV_PIX_FMT_RGBA}, + {IMGFMT_RGB24, AV_PIX_FMT_RGB24}, + {IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE}, + {IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE}, + {IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE}, + {IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE}, + {IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE}, + {IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE}, + {IMGFMT_RGB8, AV_PIX_FMT_BGR8}, + {IMGFMT_RGB4, AV_PIX_FMT_BGR4}, + {IMGFMT_BGR8, AV_PIX_FMT_PAL8}, + {IMGFMT_YUY2, AV_PIX_FMT_YUYV422}, + {IMGFMT_UYVY, AV_PIX_FMT_UYVY422}, + {IMGFMT_NV12, AV_PIX_FMT_NV12}, + {IMGFMT_NV21, AV_PIX_FMT_NV21}, + {IMGFMT_Y800, AV_PIX_FMT_GRAY8}, + {IMGFMT_Y8, AV_PIX_FMT_GRAY8}, + {IMGFMT_YVU9, AV_PIX_FMT_YUV410P}, + {IMGFMT_IF09, AV_PIX_FMT_YUV410P}, + {IMGFMT_YV12, AV_PIX_FMT_YUV420P}, + {IMGFMT_I420, AV_PIX_FMT_YUV420P}, + {IMGFMT_IYUV, AV_PIX_FMT_YUV420P}, + {IMGFMT_411P, AV_PIX_FMT_YUV411P}, + {IMGFMT_422P, AV_PIX_FMT_YUV422P}, + {IMGFMT_444P, AV_PIX_FMT_YUV444P}, + {IMGFMT_440P, AV_PIX_FMT_YUV440P}, + + {IMGFMT_420A, AV_PIX_FMT_YUVA420P}, + + {IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE}, + {IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE}, + {IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE}, + {IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE}, + {IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE}, + {IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE}, + + // YUVJ are YUV formats that use the full Y range and not just + // 16 - 235 (see colorspaces.txt). + // Currently they are all treated the same way. + {IMGFMT_YV12, AV_PIX_FMT_YUVJ420P}, + {IMGFMT_422P, AV_PIX_FMT_YUVJ422P}, + {IMGFMT_444P, AV_PIX_FMT_YUVJ444P}, + {IMGFMT_440P, AV_PIX_FMT_YUVJ440P}, + + {IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC}, + {IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT}, + {IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1}, + {IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2}, + {IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264}, + {IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3}, + {IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1}, + {IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4}, + {0, AV_PIX_FMT_NONE} +}; + +extern const vf_info_t ff_vf_info_detc; +extern const vf_info_t ff_vf_info_dint; +extern const vf_info_t ff_vf_info_divtc; +extern const vf_info_t ff_vf_info_down3dright; +extern const vf_info_t ff_vf_info_dsize; +extern const vf_info_t ff_vf_info_eq2; +extern const vf_info_t ff_vf_info_eq; +extern const vf_info_t ff_vf_info_fil; +//extern const vf_info_t ff_vf_info_filmdint; +extern const vf_info_t ff_vf_info_fspp; +extern const vf_info_t ff_vf_info_harddup; +extern const vf_info_t ff_vf_info_il; +extern const vf_info_t ff_vf_info_ilpack; +extern const vf_info_t ff_vf_info_ivtc; +extern const vf_info_t ff_vf_info_kerndeint; +extern const vf_info_t ff_vf_info_mcdeint; +extern const vf_info_t ff_vf_info_noise; +extern const vf_info_t ff_vf_info_ow; +extern const vf_info_t ff_vf_info_perspective; +extern const vf_info_t ff_vf_info_phase; +extern const vf_info_t ff_vf_info_pp7; +extern const vf_info_t ff_vf_info_pullup; +extern const vf_info_t ff_vf_info_qp; +extern const vf_info_t ff_vf_info_sab; +extern const vf_info_t ff_vf_info_softpulldown; +extern const vf_info_t ff_vf_info_softskip; +extern const vf_info_t ff_vf_info_spp; +extern const vf_info_t ff_vf_info_stereo3d; +extern const vf_info_t ff_vf_info_telecine; +extern const vf_info_t ff_vf_info_tinterlace; +extern const vf_info_t ff_vf_info_unsharp; +extern const vf_info_t ff_vf_info_uspp; + + +static const vf_info_t* const filters[]={ + &ff_vf_info_detc, + &ff_vf_info_dint, + &ff_vf_info_divtc, + &ff_vf_info_down3dright, + &ff_vf_info_dsize, + &ff_vf_info_eq2, + &ff_vf_info_eq, + &ff_vf_info_fil, +// &ff_vf_info_filmdint, cmmx.h vd.h ‘opt_screen_size_x’ + &ff_vf_info_fspp, + &ff_vf_info_harddup, + &ff_vf_info_il, + &ff_vf_info_ilpack, + &ff_vf_info_ivtc, + &ff_vf_info_kerndeint, + &ff_vf_info_mcdeint, + &ff_vf_info_noise, + &ff_vf_info_ow, + &ff_vf_info_perspective, + &ff_vf_info_phase, + &ff_vf_info_pp7, + &ff_vf_info_pullup, + &ff_vf_info_qp, + &ff_vf_info_sab, + &ff_vf_info_softpulldown, + &ff_vf_info_softskip, + &ff_vf_info_spp, + &ff_vf_info_stereo3d, + &ff_vf_info_telecine, + &ff_vf_info_tinterlace, + &ff_vf_info_unsharp, + &ff_vf_info_uspp, + + NULL +}; + +/* +Unsupported filters +1bpp +ass +bmovl +crop +dvbscale +flip +expand +format +halfpack +lavc +lavcdeint +noformat +pp +scale +tfields +vo +yadif +zrmjpeg +*/ + +CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work + + +static void ff_sws_getFlagsAndFilterFromCmdLine(int *flags, SwsFilter **srcFilterParam, SwsFilter **dstFilterParam) +{ + static int firstTime=1; + *flags=0; + +#if ARCH_X86 + if(ff_gCpuCaps.hasMMX) + __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) +#endif + if(firstTime) + { + firstTime=0; + *flags= SWS_PRINT_INFO; + } + else if( ff_mp_msg_test(MSGT_VFILTER,MSGL_DBG2) ) *flags= SWS_PRINT_INFO; + + switch(SWS_BILINEAR) + { + case 0: *flags|= SWS_FAST_BILINEAR; break; + case 1: *flags|= SWS_BILINEAR; break; + case 2: *flags|= SWS_BICUBIC; break; + case 3: *flags|= SWS_X; break; + case 4: *flags|= SWS_POINT; break; + case 5: *flags|= SWS_AREA; break; + case 6: *flags|= SWS_BICUBLIN; break; + case 7: *flags|= SWS_GAUSS; break; + case 8: *flags|= SWS_SINC; break; + case 9: *flags|= SWS_LANCZOS; break; + case 10:*flags|= SWS_SPLINE; break; + default:*flags|= SWS_BILINEAR; break; + } + + *srcFilterParam= NULL; + *dstFilterParam= NULL; +} + +//exact copy from vf_scale.c +// will use sws_flags & src_filter (from cmd line) +struct SwsContext *ff_sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat) +{ + int flags, i; + SwsFilter *dstFilterParam, *srcFilterParam; + enum AVPixelFormat dfmt, sfmt; + + for(i=0; conversion_map[i].fmt && dstFormat != conversion_map[i].fmt; i++); + dfmt= conversion_map[i].pix_fmt; + for(i=0; conversion_map[i].fmt && srcFormat != conversion_map[i].fmt; i++); + sfmt= conversion_map[i].pix_fmt; + + if (srcFormat == IMGFMT_RGB8 || srcFormat == IMGFMT_BGR8) sfmt = AV_PIX_FMT_PAL8; + ff_sws_getFlagsAndFilterFromCmdLine(&flags, &srcFilterParam, &dstFilterParam); + + return sws_getContext(srcW, srcH, sfmt, dstW, dstH, dfmt, flags , srcFilterParam, dstFilterParam, NULL); +} + +typedef struct { + vf_instance_t vf; + vf_instance_t next_vf; + AVFilterContext *avfctx; + int frame_returned; +} MPContext; + +void ff_mp_msg(int mod, int lev, const char *format, ... ){ + va_list va; + va_start(va, format); + //FIXME convert lev/mod + av_vlog(NULL, AV_LOG_DEBUG, format, va); + va_end(va); +} + +int ff_mp_msg_test(int mod, int lev){ + return 123; +} + +void ff_init_avcodec(void) +{ + //we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here +} + +//Exact copy of vf.c +void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){ + dst->pict_type= src->pict_type; + dst->fields = src->fields; + dst->qscale_type= src->qscale_type; + if(dst->width == src->width && dst->height == src->height){ + dst->qstride= src->qstride; + dst->qscale= src->qscale; + } +} + +//Exact copy of vf.c +void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){ + if (vf->next->draw_slice) { + vf->next->draw_slice(vf->next,src,stride,w,h,x,y); + return; + } + if (!vf->dmpi) { + ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name); + return; + } + if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) { + memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x, + src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]); + return; + } + memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0], + w, h, vf->dmpi->stride[0], stride[0]); + memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift), + src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]); + memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift), + src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]); +} + +//Exact copy of vf.c +void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){ + int y; + if(mpi->flags&MP_IMGFLAG_PLANAR){ + y0&=~1;h+=h&1; + if(x0==0 && w==mpi->width){ + // full width clear: + memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h); + memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift)); + memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift)); + } else + for(y=y0;y<y0+h;y+=2){ + memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w); + memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w); + memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); + memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); + } + return; + } + // packed: + for(y=y0;y<y0+h;y++){ + unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0; + if(mpi->flags&MP_IMGFLAG_YUV){ + unsigned int* p=(unsigned int*) dst; + int size=(mpi->bpp>>3)*w/4; + int i; +#if HAVE_BIGENDIAN +#define CLEAR_PACKEDYUV_PATTERN 0x00800080 +#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000 +#else +#define CLEAR_PACKEDYUV_PATTERN 0x80008000 +#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080 +#endif + if(mpi->flags&MP_IMGFLAG_SWAPPED){ + for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED; + for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED; + } else { + for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN; + for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN; + } + } else + memset(dst,0,(mpi->bpp>>3)*w); + } +} + +int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){ + return 1; +} + +//used by delogo +unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){ + return preferred; +} + +mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){ + MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf)); + mp_image_t* mpi=NULL; + int w2; + int number = mp_imgtype >> 16; + + av_assert0(vf->next == NULL); // all existing filters call this just on next + + //vf_dint needs these as it calls ff_vf_get_image() before configuring the output + if(vf->w==0 && w>0) vf->w=w; + if(vf->h==0 && h>0) vf->h=h; + + av_assert0(w == -1 || w >= vf->w); + av_assert0(h == -1 || h >= vf->h); + av_assert0(vf->w > 0); + av_assert0(vf->h > 0); + + av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h); + + if (w == -1) w = vf->w; + if (h == -1) h = vf->h; + + w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w; + + // Note: we should call libvo first to check if it supports direct rendering + // and if not, then fallback to software buffers: + switch(mp_imgtype & 0xff){ + case MP_IMGTYPE_EXPORT: + if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h); + mpi=vf->imgctx.export_images[0]; + break; + case MP_IMGTYPE_STATIC: + if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h); + mpi=vf->imgctx.static_images[0]; + break; + case MP_IMGTYPE_TEMP: + if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); + mpi=vf->imgctx.temp_images[0]; + break; + case MP_IMGTYPE_IPB: + if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame: + if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); + mpi=vf->imgctx.temp_images[0]; + break; + } + case MP_IMGTYPE_IP: + if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h); + mpi=vf->imgctx.static_images[vf->imgctx.static_idx]; + vf->imgctx.static_idx^=1; + break; + case MP_IMGTYPE_NUMBERED: + if (number == -1) { + int i; + for (i = 0; i < NUM_NUMBERED_MPI; i++) + if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count) + break; + number = i; + } + if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL; + if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h); + mpi = vf->imgctx.numbered_images[number]; + mpi->number = number; + break; + } + if(mpi){ + mpi->type=mp_imgtype; + mpi->w=vf->w; mpi->h=vf->h; + // keep buffer allocation status & color flags only: +// mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT); + mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS; + // accept restrictions, draw_slice and palette flags only: + mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE); + if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK; + if(mpi->width!=w2 || mpi->height!=h){ +// printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h); + if(mpi->flags&MP_IMGFLAG_ALLOCATED){ + if(mpi->width<w2 || mpi->height<h){ + // need to re-allocate buffer memory: + av_free(mpi->planes[0]); + mpi->flags&=~MP_IMGFLAG_ALLOCATED; + ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n"); + } +// } else { + } { + mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift; + mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift; + } + } + if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt); + if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){ + + av_assert0(!vf->get_image); + // check libvo first! + if(vf->get_image) vf->get_image(vf,mpi); + + if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ + // non-direct and not yet allocated image. allocate it! + if (!mpi->bpp) { // no way we can allocate this + ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL, + "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n"); + return NULL; + } + + // check if codec prefer aligned stride: + if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){ + int align=(mpi->flags&MP_IMGFLAG_PLANAR && + mpi->flags&MP_IMGFLAG_YUV) ? + (8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME + w2=((w+align)&(~align)); + if(mpi->width!=w2){ +#if 0 + // we have to change width... check if we CAN co it: + int flags=vf->query_format(vf,outfmt); // should not fail + if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n"); +// printf("query -> 0x%X \n",flags); + if(flags&VFCAP_ACCEPT_STRIDE){ +#endif + mpi->width=w2; + mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift; +// } + } + } + + ff_mp_image_alloc_planes(mpi); +// printf("clearing img!\n"); + ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height); + } + } + av_assert0(!vf->start_slice); + if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK) + if(vf->start_slice) vf->start_slice(vf,mpi); + if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){ + ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n", + "NULL"/*vf->info->name*/, + (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting": + ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"), + (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"", + mpi->width,mpi->height,mpi->bpp, + (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"), + (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed", + mpi->bpp*mpi->width*mpi->height/8); + ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n", + mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2], + mpi->stride[0], mpi->stride[1], mpi->stride[2], + mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift); + mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED; + } + + mpi->qscale = NULL; + mpi->usage_count++; + } +// printf("\rVF_MPI: %p %p %p %d %d %d \n", +// mpi->planes[0],mpi->planes[1],mpi->planes[2], +// mpi->stride[0],mpi->stride[1],mpi->stride[2]); + return mpi; +} + + +int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ + MPContext *m= (void*)vf; + AVFilterLink *outlink = m->avfctx->outputs[0]; + AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer)); + AVFilterBufferRef *picref = av_mallocz(sizeof(AVFilterBufferRef)); + int i; + + av_assert0(vf->next); + + av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); + + if (!pic || !picref) + goto fail; + + picref->buf = pic; + picref->buf->free= (void*)av_free; + if (!(picref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps)))) + goto fail; + + pic->w = picref->video->w = mpi->w; + pic->h = picref->video->h = mpi->h; + + /* make sure the buffer gets read permission or it's useless for output */ + picref->perms = AV_PERM_READ | AV_PERM_REUSE2; +// av_assert0(mpi->flags&MP_IMGFLAG_READABLE); + if(!(mpi->flags&MP_IMGFLAG_PRESERVE)) + picref->perms |= AV_PERM_WRITE; + + pic->refcount = 1; + picref->type = AVMEDIA_TYPE_VIDEO; + + for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); + pic->format = picref->format = conversion_map[i].pix_fmt; + + memcpy(pic->data, mpi->planes, FFMIN(sizeof(pic->data) , sizeof(mpi->planes))); + memcpy(pic->linesize, mpi->stride, FFMIN(sizeof(pic->linesize), sizeof(mpi->stride))); + memcpy(picref->data, pic->data, sizeof(picref->data)); + memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize)); + + if(pts != MP_NOPTS_VALUE) + picref->pts= pts * av_q2d(outlink->time_base); + + ff_filter_frame(outlink, picref); + m->frame_returned++; + + return 1; +fail: + if (picref && picref->video) + av_free(picref->video); + av_free(picref); + av_free(pic); + return 0; +} + +int ff_vf_next_config(struct vf_instance *vf, + int width, int height, int d_width, int d_height, + unsigned int voflags, unsigned int outfmt){ + + av_assert0(width>0 && height>0); + vf->next->w = width; vf->next->h = height; + + return 1; +#if 0 + int flags=vf->next->query_format(vf->next,outfmt); + if(!flags){ + // hmm. colorspace mismatch!!! + //this is fatal for us ATM + return 0; + } + ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs); + miss=vf->default_reqs - (flags&vf->default_reqs); + if(miss&VFCAP_ACCEPT_STRIDE){ + // vf requires stride support but vf->next doesn't support it! + // let's insert the 'expand' filter, it does the job for us: + vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL); + if(!vf2) return 0; // shouldn't happen! + vf->next=vf2; + } + vf->next->w = width; vf->next->h = height; + return 1; +#endif +} + +int ff_vf_next_control(struct vf_instance *vf, int request, void* data){ + MPContext *m= (void*)vf; + av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request); + return 0; +} + +static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){ + MPContext *m= (void*)vf; + int i; + av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt); + + for(i=0; conversion_map[i].fmt; i++){ + if(fmt==conversion_map[i].fmt) + return 1; //we suport all + } + return 0; +} + + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + MPContext *m = ctx->priv; + char name[256]; + int i; + + m->avfctx= ctx; + + if(!args || 1!=sscanf(args, "%255[^:=]", name)){ + av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n"); + return AVERROR(EINVAL); + } + args += strlen(name); + if (args[0] == '=') + args++; + + for(i=0; ;i++){ + if(!filters[i] || !strcmp(name, filters[i]->name)) + break; + } + + if(!filters[i]){ + av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name); + return AVERROR(EINVAL); + } + + av_log(ctx, AV_LOG_WARNING, + "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n" + "once it has been ported to a native libavfilter.\n", name); + + memset(&m->vf,0,sizeof(m->vf)); + m->vf.info= filters[i]; + + m->vf.next = &m->next_vf; + m->vf.put_image = ff_vf_next_put_image; + m->vf.config = ff_vf_next_config; + m->vf.query_format= vf_default_query_format; + m->vf.control = ff_vf_next_control; + m->vf.default_caps=VFCAP_ACCEPT_STRIDE; + m->vf.default_reqs=0; + if(m->vf.info->opts) + av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n"); +#if 0 + if(vf->info->opts) { // vf_vo get some special argument + const m_struct_t* st = vf->info->opts; + void* vf_priv = m_struct_alloc(st); + int n; + for(n = 0 ; args && args[2*n] ; n++) + m_struct_set(st,vf_priv,args[2*n],args[2*n+1]); + vf->priv = vf_priv; + args = NULL; + } else // Otherwise we should have the '_oldargs_' + if(args && !strcmp(args[0],"_oldargs_")) + args = (char**)args[1]; + else + args = NULL; +#endif + if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){ + av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args); + return -1; + } + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + MPContext *m = ctx->priv; + vf_instance_t *vf = &m->vf; + + while(vf){ + vf_instance_t *next = vf->next; + if(vf->uninit) + vf->uninit(vf); + ff_free_mp_image(vf->imgctx.static_images[0]); + ff_free_mp_image(vf->imgctx.static_images[1]); + ff_free_mp_image(vf->imgctx.temp_images[0]); + ff_free_mp_image(vf->imgctx.export_images[0]); + vf = next; + } +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *avfmts=NULL; + MPContext *m = ctx->priv; + enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE; + int i; + + for(i=0; conversion_map[i].fmt; i++){ + av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt); + if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){ + av_log(ctx, AV_LOG_DEBUG, "supported,adding\n"); + if (conversion_map[i].pix_fmt != lastpixfmt) { + ff_add_format(&avfmts, conversion_map[i].pix_fmt); + lastpixfmt = conversion_map[i].pix_fmt; + } + } + } + + if (!avfmts) + return -1; + + //We assume all allowed input formats are also allowed output formats + ff_set_common_formats(ctx, avfmts); + return 0; +} + +static int config_inprops(AVFilterLink *inlink) +{ + MPContext *m = inlink->dst->priv; + int i; + for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); + + av_assert0(conversion_map[i].fmt && inlink->w && inlink->h); + + m->vf.fmt.have_configured = 1; + m->vf.fmt.orig_height = inlink->h; + m->vf.fmt.orig_width = inlink->w; + m->vf.fmt.orig_fmt = conversion_map[i].fmt; + + if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0) + return -1; + + return 0; +} + +static int config_outprops(AVFilterLink *outlink) +{ + MPContext *m = outlink->src->priv; + + outlink->w = m->next_vf.w; + outlink->h = m->next_vf.h; + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + MPContext *m = outlink->src->priv; + int ret; + + av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n"); + + for(m->frame_returned=0; !m->frame_returned;){ + ret=ff_request_frame(outlink->src->inputs[0]); + if(ret<0) + break; + } + + av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret); + return ret; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) +{ + MPContext *m = inlink->dst->priv; + int i; + double pts= MP_NOPTS_VALUE; + mp_image_t* mpi = ff_new_mp_image(inpic->video->w, inpic->video->h); + + if(inpic->pts != AV_NOPTS_VALUE) + pts= inpic->pts / av_q2d(inlink->time_base); + + for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); + ff_mp_image_setfmt(mpi,conversion_map[i].fmt); + + memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes))); + memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride))); + + //FIXME pass interleced & tff flags around + + // mpi->flags|=MP_IMGFLAG_ALLOCATED; ? + mpi->flags |= MP_IMGFLAG_READABLE; + if(!(inpic->perms & AV_PERM_WRITE)) + mpi->flags |= MP_IMGFLAG_PRESERVE; + if(m->vf.put_image(&m->vf, mpi, pts) == 0){ + av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); + }else{ + avfilter_unref_buffer(inpic); + } + ff_free_mp_image(mpi); + return 0; +} + +static const AVFilterPad mp_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .config_props = config_inprops, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad mp_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_outprops, + }, + { NULL } +}; + +AVFilter avfilter_vf_mp = { + .name = "mp", + .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."), + .init = init, + .uninit = uninit, + .priv_size = sizeof(MPContext), + .query_formats = query_formats, + .inputs = mp_inputs, + .outputs = mp_outputs, +}; diff --git a/libavfilter/vf_null.c b/libavfilter/vf_null.c index a7abb7a..eafa268 100644 --- a/libavfilter/vf_null.c +++ b/libavfilter/vf_null.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 8741d48..d44677f 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -3,20 +3,20 @@ * Copyright (c) 2010 Baptiste Coudurier * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,22 +25,24 @@ * overlay one video on top of another */ +/* #define DEBUG */ + #include "avfilter.h" #include "formats.h" #include "libavutil/common.h" #include "libavutil/eval.h" #include "libavutil/avstring.h" -#include "libavutil/avassert.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "libavutil/mathematics.h" +#include "libavutil/timestamp.h" #include "internal.h" +#include "bufferqueue.h" +#include "drawutils.h" #include "video.h" static const char *const var_names[] = { - "E", - "PHI", - "PI", "main_w", "W", ///< width of the main video "main_h", "H", ///< height of the main video "overlay_w", "w", ///< width of the overlay video @@ -49,9 +51,6 @@ static const char *const var_names[] = { }; enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, VAR_MAIN_W, VAR_MW, VAR_MAIN_H, VAR_MH, VAR_OVERLAY_W, VAR_OW, @@ -62,63 +61,129 @@ enum var_name { #define MAIN 0 #define OVERLAY 1 +#define R 0 +#define G 1 +#define B 2 +#define A 3 + +#define Y 0 +#define U 1 +#define V 2 + typedef struct { + const AVClass *class; int x, y; ///< position of overlayed picture - int max_plane_step[4]; ///< steps per pixel for each plane + int allow_packed_rgb; + uint8_t frame_requested; + uint8_t overlay_eof; + uint8_t main_is_packed_rgb; + uint8_t main_rgba_map[4]; + uint8_t main_has_alpha; + uint8_t overlay_is_packed_rgb; + uint8_t overlay_rgba_map[4]; + uint8_t overlay_has_alpha; + + AVFilterBufferRef *overpicref; + struct FFBufQueue queue_main; + struct FFBufQueue queue_over; + + int main_pix_step[4]; ///< steps per pixel for each plane of the main output + int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay int hsub, vsub; ///< chroma subsampling values - char x_expr[256], y_expr[256]; - - AVFilterBufferRef *main; - AVFilterBufferRef *over_prev, *over_next; + char *x_expr, *y_expr; } OverlayContext; +#define OFFSET(x) offsetof(OverlayContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption overlay_options[] = { + { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"rgb", "force packed RGB in input and output", OFFSET(allow_packed_rgb), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(overlay); + static av_cold int init(AVFilterContext *ctx, const char *args) { OverlayContext *over = ctx->priv; + static const char *shorthand[] = { "x", "y", NULL }; - av_strlcpy(over->x_expr, "0", sizeof(over->x_expr)); - av_strlcpy(over->y_expr, "0", sizeof(over->y_expr)); + over->class = &overlay_class; + av_opt_set_defaults(over); - if (args) - sscanf(args, "%255[^:]:%255[^:]", over->x_expr, over->y_expr); - - return 0; + return av_opt_set_from_string(over, args, shorthand, "=", ":"); } static av_cold void uninit(AVFilterContext *ctx) { - OverlayContext *s = ctx->priv; + OverlayContext *over = ctx->priv; + + av_opt_free(over); - avfilter_unref_bufferp(&s->main); - avfilter_unref_bufferp(&s->over_prev); - avfilter_unref_bufferp(&s->over_next); + avfilter_unref_bufferp(&over->overpicref); + ff_bufqueue_discard_all(&over->queue_main); + ff_bufqueue_discard_all(&over->queue_over); } static int query_formats(AVFilterContext *ctx) { - const enum AVPixelFormat inout_pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; - const enum AVPixelFormat blend_pix_fmts[] = { AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE }; - AVFilterFormats *inout_formats = ff_make_format_list(inout_pix_fmts); - AVFilterFormats *blend_formats = ff_make_format_list(blend_pix_fmts); + OverlayContext *over = ctx->priv; + + /* overlay formats contains alpha, for avoiding conversion with alpha information loss */ + static const enum AVPixelFormat main_pix_fmts_yuv[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE }; + static const enum AVPixelFormat overlay_pix_fmts_yuv[] = { AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE }; + static const enum AVPixelFormat main_pix_fmts_rgb[] = { + AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, + AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, + AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_NONE + }; + static const enum AVPixelFormat overlay_pix_fmts_rgb[] = { + AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, + AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, + AV_PIX_FMT_NONE + }; + + AVFilterFormats *main_formats; + AVFilterFormats *overlay_formats; + + if (over->allow_packed_rgb) { + main_formats = ff_make_format_list(main_pix_fmts_rgb); + overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb); + } else { + main_formats = ff_make_format_list(main_pix_fmts_yuv); + overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv); + } - ff_formats_ref(inout_formats, &ctx->inputs [MAIN ]->out_formats); - ff_formats_ref(blend_formats, &ctx->inputs [OVERLAY]->out_formats); - ff_formats_ref(inout_formats, &ctx->outputs[MAIN ]->in_formats ); + ff_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats); + ff_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats); + ff_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats ); return 0; } +static const enum AVPixelFormat alpha_pix_fmts[] = { + AV_PIX_FMT_YUVA420P, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, + AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE +}; + static int config_input_main(AVFilterLink *inlink) { OverlayContext *over = inlink->dst->priv; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); - av_image_fill_max_pixsteps(over->max_plane_step, NULL, pix_desc); + av_image_fill_max_pixsteps(over->main_pix_step, NULL, pix_desc); + over->hsub = pix_desc->log2_chroma_w; over->vsub = pix_desc->log2_chroma_h; + over->main_is_packed_rgb = + ff_fill_rgba_map(over->main_rgba_map, inlink->format) >= 0; + over->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts); return 0; } @@ -129,13 +194,12 @@ static int config_input_overlay(AVFilterLink *inlink) char *expr; double var_values[VAR_VARS_NB], res; int ret; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); + + av_image_fill_max_pixsteps(over->overlay_pix_step, NULL, pix_desc); /* Finish the configuration by evaluating the expressions now when both inputs are configured. */ - var_values[VAR_E ] = M_E; - var_values[VAR_PHI] = M_PHI; - var_values[VAR_PI ] = M_PI; - var_values[VAR_MAIN_W ] = var_values[VAR_MW] = ctx->inputs[MAIN ]->w; var_values[VAR_MAIN_H ] = var_values[VAR_MH] = ctx->inputs[MAIN ]->h; var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w; @@ -155,6 +219,10 @@ static int config_input_overlay(AVFilterLink *inlink) goto fail; over->x = res; + over->overlay_is_packed_rgb = + ff_fill_rgba_map(over->overlay_rgba_map, inlink->format) >= 0; + over->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts); + av_log(ctx, AV_LOG_VERBOSE, "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n", ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, @@ -193,54 +261,133 @@ static int config_output(AVFilterLink *outlink) return 0; } -static void blend_frame(AVFilterContext *ctx, +// divide by 255 and round to nearest +// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16 +#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16) + +// calculate the unpremultiplied alpha, applying the general equation: +// alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) ) +// (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x +// ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y) +#define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x))) + +/** + * Blend image in src to destination buffer dst at position (x, y). + * + * It is assumed that the src image at position (x, y) is contained in + * dst. + */ +static void blend_image(AVFilterContext *ctx, AVFilterBufferRef *dst, AVFilterBufferRef *src, int x, int y) { OverlayContext *over = ctx->priv; int i, j, k; - int width, height; - int overlay_end_y = y + src->video->h; - int end_y, start_y; + int width = src->video->w; + int height = src->video->h; - width = FFMIN(dst->video->w - x, src->video->w); - end_y = FFMIN(dst->video->h, overlay_end_y); - start_y = FFMAX(y, 0); - height = end_y - start_y; - - if (dst->format == AV_PIX_FMT_BGR24 || dst->format == AV_PIX_FMT_RGB24) { - uint8_t *dp = dst->data[0] + x * 3 + start_y * dst->linesize[0]; + if (over->main_is_packed_rgb) { + uint8_t *dp = dst->data[0] + x * over->main_pix_step[0] + + y * dst->linesize[0]; uint8_t *sp = src->data[0]; - int b = dst->format == AV_PIX_FMT_BGR24 ? 2 : 0; - int r = dst->format == AV_PIX_FMT_BGR24 ? 0 : 2; - if (y < 0) - sp += -y * src->linesize[0]; + uint8_t alpha; ///< the amount of overlay to blend on to main + const int dr = over->main_rgba_map[R]; + const int dg = over->main_rgba_map[G]; + const int db = over->main_rgba_map[B]; + const int da = over->main_rgba_map[A]; + const int dstep = over->main_pix_step[0]; + const int sr = over->overlay_rgba_map[R]; + const int sg = over->overlay_rgba_map[G]; + const int sb = over->overlay_rgba_map[B]; + const int sa = over->overlay_rgba_map[A]; + const int sstep = over->overlay_pix_step[0]; + const int main_has_alpha = over->main_has_alpha; for (i = 0; i < height; i++) { uint8_t *d = dp, *s = sp; for (j = 0; j < width; j++) { - d[r] = (d[r] * (0xff - s[3]) + s[0] * s[3] + 128) >> 8; - d[1] = (d[1] * (0xff - s[3]) + s[1] * s[3] + 128) >> 8; - d[b] = (d[b] * (0xff - s[3]) + s[2] * s[3] + 128) >> 8; - d += 3; - s += 4; + alpha = s[sa]; + + // if the main channel has an alpha channel, alpha has to be calculated + // to create an un-premultiplied (straight) alpha value + if (main_has_alpha && alpha != 0 && alpha != 255) { + uint8_t alpha_d = d[da]; + alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); + } + + switch (alpha) { + case 0: + break; + case 255: + d[dr] = s[sr]; + d[dg] = s[sg]; + d[db] = s[sb]; + break; + default: + // main_value = main_value * (1 - alpha) + overlay_value * alpha + // since alpha is in the range 0-255, the result must divided by 255 + d[dr] = FAST_DIV255(d[dr] * (255 - alpha) + s[sr] * alpha); + d[dg] = FAST_DIV255(d[dg] * (255 - alpha) + s[sg] * alpha); + d[db] = FAST_DIV255(d[db] * (255 - alpha) + s[sb] * alpha); + } + if (main_has_alpha) { + switch (alpha) { + case 0: + break; + case 255: + d[da] = s[sa]; + break; + default: + // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha + d[da] += FAST_DIV255((255 - d[da]) * s[sa]); + } + } + d += dstep; + s += sstep; } dp += dst->linesize[0]; sp += src->linesize[0]; } } else { + const int main_has_alpha = over->main_has_alpha; + if (main_has_alpha) { + uint8_t *da = dst->data[3] + x * over->main_pix_step[3] + + y * dst->linesize[3]; + uint8_t *sa = src->data[3]; + uint8_t alpha; ///< the amount of overlay to blend on to main + for (i = 0; i < height; i++) { + uint8_t *d = da, *s = sa; + for (j = 0; j < width; j++) { + alpha = *s; + if (alpha != 0 && alpha != 255) { + uint8_t alpha_d = *d; + alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); + } + switch (alpha) { + case 0: + break; + case 255: + *d = *s; + break; + default: + // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha + *d += FAST_DIV255((255 - *d) * *s); + } + d += 1; + s += 1; + } + da += dst->linesize[3]; + sa += src->linesize[3]; + } + } for (i = 0; i < 3; i++) { int hsub = i ? over->hsub : 0; int vsub = i ? over->vsub : 0; uint8_t *dp = dst->data[i] + (x >> hsub) + - (start_y >> vsub) * dst->linesize[i]; + (y >> vsub) * dst->linesize[i]; uint8_t *sp = src->data[i]; uint8_t *ap = src->data[3]; int wp = FFALIGN(width, 1<<hsub) >> hsub; int hp = FFALIGN(height, 1<<vsub) >> vsub; - if (y < 0) { - sp += ((-y) >> vsub) * src->linesize[i]; - ap += -y * src->linesize[3]; - } for (j = 0; j < hp; j++) { uint8_t *d = dp, *s = sp, *a = ap; for (k = 0; k < wp; k++) { @@ -257,7 +404,26 @@ static void blend_frame(AVFilterContext *ctx, alpha = (alpha_v + alpha_h) >> 1; } else alpha = a[0]; - *d = (*d * (0xff - alpha) + *s++ * alpha + 128) >> 8; + // if the main channel has an alpha channel, alpha has to be calculated + // to create an un-premultiplied (straight) alpha value + if (main_has_alpha && alpha != 0 && alpha != 255) { + // average alpha for color components, improve quality + uint8_t alpha_d; + if (hsub && vsub && j+1 < hp && k+1 < wp) { + alpha_d = (d[0] + d[src->linesize[3]] + + d[1] + d[src->linesize[3]+1]) >> 2; + } else if (hsub || vsub) { + alpha_h = hsub && k+1 < wp ? + (d[0] + d[1]) >> 1 : d[0]; + alpha_v = vsub && j+1 < hp ? + (d[0] + d[src->linesize[3]]) >> 1 : d[0]; + alpha_d = (alpha_v + alpha_h) >> 1; + } else + alpha_d = d[0]; + alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); + } + *d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); + s++; d++; a += 1 << hsub; } @@ -269,113 +435,148 @@ static void blend_frame(AVFilterContext *ctx, } } -static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *frame) +static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) { - OverlayContext *s = inlink->dst->priv; + OverlayContext *over = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *next_overpic; + int ret; - av_assert0(!s->main); - s->main = frame; + /* Discard obsolete overlay frames: if there is a next overlay frame with pts + * before the main frame, we can drop the current overlay. */ + while (1) { + next_overpic = ff_bufqueue_peek(&over->queue_over, 0); + if (!next_overpic || av_compare_ts(next_overpic->pts, ctx->inputs[OVERLAY]->time_base, + mainpic->pts , ctx->inputs[MAIN]->time_base) > 0) + break; + ff_bufqueue_get(&over->queue_over); + avfilter_unref_buffer(over->overpicref); + over->overpicref = next_overpic; + } - return 0; + /* If there is no next frame and no EOF and the overlay frame is before + * the main frame, we can not know yet if it will be superseded. */ + if (!over->queue_over.available && !over->overlay_eof && + (!over->overpicref || av_compare_ts(over->overpicref->pts, ctx->inputs[OVERLAY]->time_base, + mainpic->pts , ctx->inputs[MAIN]->time_base) < 0)) + return AVERROR(EAGAIN); + + /* At this point, we know that the current overlay frame extends to the + * time of the main frame. */ + av_dlog(ctx, "main_pts:%s main_pts_time:%s", + av_ts2str(mainpic->pts), av_ts2timestr(mainpic->pts, &outlink->time_base)); + if (over->overpicref) + av_dlog(ctx, " over_pts:%s over_pts_time:%s", + av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base)); + av_dlog(ctx, "\n"); + + if (over->overpicref) + blend_image(ctx, mainpic, over->overpicref, over->x, over->y); + ret = ff_filter_frame(ctx->outputs[0], mainpic); + av_assert1(ret != AVERROR(EAGAIN)); + over->frame_requested = 0; + return ret; } -static int filter_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *frame) +static int try_filter_next_frame(AVFilterContext *ctx) { - OverlayContext *s = inlink->dst->priv; + OverlayContext *over = ctx->priv; + AVFilterBufferRef *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0); + int ret; - av_assert0(!s->over_next); - s->over_next = frame; + if (!next_mainpic) + return AVERROR(EAGAIN); + if ((ret = try_filter_frame(ctx, next_mainpic)) == AVERROR(EAGAIN)) + return ret; + ff_bufqueue_get(&over->queue_main); + return ret; +} - return 0; +static int flush_frames(AVFilterContext *ctx) +{ + int ret; + + while (!(ret = try_filter_next_frame(ctx))); + return ret == AVERROR(EAGAIN) ? 0 : ret; } -static int output_frame(AVFilterContext *ctx) +static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { - OverlayContext *s = ctx->priv; - AVFilterLink *outlink = ctx->outputs[0]; - int ret = ff_filter_frame(outlink, s->main); - s->main = NULL; + AVFilterContext *ctx = inlink->dst; + OverlayContext *over = ctx->priv; + int ret; - return ret; + if ((ret = flush_frames(ctx)) < 0) + return ret; + if ((ret = try_filter_frame(ctx, inpicref)) < 0) { + if (ret != AVERROR(EAGAIN)) + return ret; + ff_bufqueue_add(ctx, &over->queue_main, inpicref); + } + + if (!over->overpicref) + return 0; + flush_frames(ctx); + + return 0; } -static int handle_overlay_eof(AVFilterContext *ctx) +static int filter_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { - OverlayContext *s = ctx->priv; - if (s->over_prev) - blend_frame(ctx, s->main, s->over_prev, s->x, s->y); - return output_frame(ctx); + AVFilterContext *ctx = inlink->dst; + OverlayContext *over = ctx->priv; + int ret; + + if ((ret = flush_frames(ctx)) < 0) + return ret; + ff_bufqueue_add(ctx, &over->queue_over, inpicref); + ret = try_filter_next_frame(ctx); + return ret == AVERROR(EAGAIN) ? 0 : ret; } static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; - OverlayContext *s = ctx->priv; - AVRational tb_main = ctx->inputs[MAIN]->time_base; - AVRational tb_over = ctx->inputs[OVERLAY]->time_base; - int ret = 0; - - /* get a frame on the main input */ - if (!s->main) { - ret = ff_request_frame(ctx->inputs[MAIN]); + OverlayContext *over = ctx->priv; + int input, ret; + + if (!try_filter_next_frame(ctx)) + return 0; + over->frame_requested = 1; + while (over->frame_requested) { + /* TODO if we had a frame duration, we could guess more accurately */ + input = !over->overlay_eof && (over->queue_main.available || + over->queue_over.available < 2) ? + OVERLAY : MAIN; + ret = ff_request_frame(ctx->inputs[input]); + /* EOF on main is reported immediately */ + if (ret == AVERROR_EOF && input == OVERLAY) { + over->overlay_eof = 1; + if ((ret = try_filter_next_frame(ctx)) != AVERROR(EAGAIN)) + return ret; + ret = 0; /* continue requesting frames on main */ + } if (ret < 0) return ret; } - - /* get a new frame on the overlay input, on EOF - * reuse previous */ - if (!s->over_next) { - ret = ff_request_frame(ctx->inputs[OVERLAY]); - if (ret == AVERROR_EOF) - return handle_overlay_eof(ctx); - else if (ret < 0) - return ret; - } - - while (s->main->pts != AV_NOPTS_VALUE && - s->over_next->pts != AV_NOPTS_VALUE && - av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) { - avfilter_unref_bufferp(&s->over_prev); - FFSWAP(AVFilterBufferRef*, s->over_prev, s->over_next); - - ret = ff_request_frame(ctx->inputs[OVERLAY]); - if (ret == AVERROR_EOF) - return handle_overlay_eof(ctx); - else if (ret < 0) - return ret; - } - - if (s->main->pts == AV_NOPTS_VALUE || - s->over_next->pts == AV_NOPTS_VALUE || - !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) { - blend_frame(ctx, s->main, s->over_next, s->x, s->y); - avfilter_unref_bufferp(&s->over_prev); - FFSWAP(AVFilterBufferRef*, s->over_prev, s->over_next); - } else if (s->over_prev) { - blend_frame(ctx, s->main, s->over_prev, s->x, s->y); - } - - return output_frame(ctx); + return 0; } static const AVFilterPad avfilter_vf_overlay_inputs[] = { { .name = "main", .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, .config_props = config_input_main, .filter_frame = filter_frame_main, - .min_perms = AV_PERM_READ, - .rej_perms = AV_PERM_REUSE2 | AV_PERM_PRESERVE, - .needs_fifo = 1, + .min_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE, }, { .name = "overlay", .type = AVMEDIA_TYPE_VIDEO, .config_props = config_input_overlay, - .filter_frame = filter_frame_overlay, - .min_perms = AV_PERM_READ, - .rej_perms = AV_PERM_REUSE2, - .needs_fifo = 1, + .filter_frame = filter_frame_over, + .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, }, { NULL } }; @@ -384,6 +585,7 @@ static const AVFilterPad avfilter_vf_overlay_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, + .rej_perms = AV_PERM_WRITE, .config_props = config_output, .request_frame = request_frame, }, @@ -403,4 +605,5 @@ AVFilter avfilter_vf_overlay = { .inputs = avfilter_vf_overlay_inputs, .outputs = avfilter_vf_overlay_outputs, + .priv_class = &overlay_class, }; diff --git a/libavfilter/vf_pad.c b/libavfilter/vf_pad.c index f1a890e..5c146f2 100644 --- a/libavfilter/vf_pad.c +++ b/libavfilter/vf_pad.c @@ -2,20 +2,20 @@ * Copyright (c) 2008 vmrsss * Copyright (c) 2009 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -35,14 +35,12 @@ #include "libavutil/colorspace.h" #include "libavutil/avassert.h" #include "libavutil/imgutils.h" +#include "libavutil/opt.h" #include "libavutil/parseutils.h" #include "libavutil/mathematics.h" #include "drawutils.h" static const char *const var_names[] = { - "PI", - "PHI", - "E", "in_w", "iw", "in_h", "ih", "out_w", "ow", @@ -50,15 +48,14 @@ static const char *const var_names[] = { "x", "y", "a", + "sar", + "dar", "hsub", "vsub", NULL }; enum var_name { - VAR_PI, - VAR_PHI, - VAR_E, VAR_IN_W, VAR_IW, VAR_IN_H, VAR_IH, VAR_OUT_W, VAR_OW, @@ -66,6 +63,8 @@ enum var_name { VAR_X, VAR_Y, VAR_A, + VAR_SAR, + VAR_DAR, VAR_HSUB, VAR_VSUB, VARS_NB @@ -73,56 +72,55 @@ enum var_name { static int query_formats(AVFilterContext *ctx) { - static const enum AVPixelFormat pix_fmts[] = { - AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, - AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, - AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, - - AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, - AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, - AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, - AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, - AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P, - AV_PIX_FMT_YUVA420P, - - AV_PIX_FMT_NONE - }; - - ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); return 0; } typedef struct { + const AVClass *class; int w, h; ///< output dimensions, a value of 0 will result in the input size int x, y; ///< offsets of the input area with respect to the padded area int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues - char w_expr[256]; ///< width expression string - char h_expr[256]; ///< height expression string - char x_expr[256]; ///< width expression string - char y_expr[256]; ///< height expression string - - uint8_t color[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area - uint8_t *line[4]; - int line_step[4]; - int hsub, vsub; ///< chroma subsampling values + char *w_expr; ///< width expression string + char *h_expr; ///< height expression string + char *x_expr; ///< width expression string + char *y_expr; ///< height expression string + char *color_str; + uint8_t rgba_color[4]; ///< color for the padding area + FFDrawContext draw; + FFDrawColor color; } PadContext; +#define OFFSET(x) offsetof(PadContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption pad_options[] = { + { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "color", "set the color of the padded area border", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str = "black"}, .flags = FLAGS }, + {NULL} +}; + +AVFILTER_DEFINE_CLASS(pad); + static av_cold int init(AVFilterContext *ctx, const char *args) { PadContext *pad = ctx->priv; - char color_string[128] = "black"; + static const char *shorthand[] = { "width", "height", "x", "y", "color", NULL }; + int ret; - av_strlcpy(pad->w_expr, "iw", sizeof(pad->w_expr)); - av_strlcpy(pad->h_expr, "ih", sizeof(pad->h_expr)); - av_strlcpy(pad->x_expr, "0" , sizeof(pad->w_expr)); - av_strlcpy(pad->y_expr, "0" , sizeof(pad->h_expr)); + pad->class = &pad_class; + av_opt_set_defaults(pad); - if (args) - sscanf(args, "%255[^:]:%255[^:]:%255[^:]:%255[^:]:%255s", - pad->w_expr, pad->h_expr, pad->x_expr, pad->y_expr, color_string); + if ((ret = av_opt_set_from_string(pad, args, shorthand, "=", ":")) < 0) + return ret; - if (av_parse_color(pad->color, color_string, -1, ctx) < 0) + if (av_parse_color(pad->rgba_color, pad->color_str, -1, ctx) < 0) return AVERROR(EINVAL); return 0; @@ -131,37 +129,30 @@ static av_cold int init(AVFilterContext *ctx, const char *args) static av_cold void uninit(AVFilterContext *ctx) { PadContext *pad = ctx->priv; - int i; - - for (i = 0; i < 4; i++) { - av_freep(&pad->line[i]); - pad->line_step[i] = 0; - } + av_opt_free(pad); } static int config_input(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; PadContext *pad = ctx->priv; - const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); - uint8_t rgba_color[4]; - int ret, is_packed_rgba; + int ret; double var_values[VARS_NB], res; char *expr; - pad->hsub = pix_desc->log2_chroma_w; - pad->vsub = pix_desc->log2_chroma_h; + ff_draw_init(&pad->draw, inlink->format, 0); + ff_draw_color(&pad->draw, &pad->color, pad->rgba_color); - var_values[VAR_PI] = M_PI; - var_values[VAR_PHI] = M_PHI; - var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (double) inlink->w / inlink->h; - var_values[VAR_HSUB] = 1<<pad->hsub; - var_values[VAR_VSUB] = 1<<pad->vsub; + var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? + (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; + var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; + var_values[VAR_HSUB] = 1 << pad->draw.hsub_max; + var_values[VAR_VSUB] = 1 << pad->draw.vsub_max; /* evaluate width and height */ av_expr_parse_and_eval(&res, (expr = pad->w_expr), @@ -208,22 +199,16 @@ static int config_input(AVFilterLink *inlink) if (!pad->h) pad->h = inlink->h; - pad->w &= ~((1 << pad->hsub) - 1); - pad->h &= ~((1 << pad->vsub) - 1); - pad->x &= ~((1 << pad->hsub) - 1); - pad->y &= ~((1 << pad->vsub) - 1); - - pad->in_w = inlink->w & ~((1 << pad->hsub) - 1); - pad->in_h = inlink->h & ~((1 << pad->vsub) - 1); - - memcpy(rgba_color, pad->color, sizeof(rgba_color)); - ff_fill_line_with_color(pad->line, pad->line_step, pad->w, pad->color, - inlink->format, rgba_color, &is_packed_rgba, NULL); + pad->w = ff_draw_round_to_sub(&pad->draw, 0, -1, pad->w); + pad->h = ff_draw_round_to_sub(&pad->draw, 1, -1, pad->h); + pad->x = ff_draw_round_to_sub(&pad->draw, 0, -1, pad->x); + pad->y = ff_draw_round_to_sub(&pad->draw, 1, -1, pad->y); + pad->in_w = ff_draw_round_to_sub(&pad->draw, 0, -1, inlink->w); + pad->in_h = ff_draw_round_to_sub(&pad->draw, 1, -1, inlink->h); - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X[%s]\n", + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n", inlink->w, inlink->h, pad->w, pad->h, pad->x, pad->y, - pad->color[0], pad->color[1], pad->color[2], pad->color[3], - is_packed_rgba ? "rgba" : "yuva"); + pad->rgba_color[0], pad->rgba_color[1], pad->rgba_color[2], pad->rgba_color[3]); if (pad->x < 0 || pad->y < 0 || pad->w <= 0 || pad->h <= 0 || @@ -256,9 +241,10 @@ static int config_output(AVFilterLink *outlink) static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) { PadContext *pad = inlink->dst->priv; + int align = (perms&AV_PERM_ALIGN) ? AVFILTER_ALIGN : 1; AVFilterBufferRef *picref = ff_get_video_buffer(inlink->dst->outputs[0], perms, - w + (pad->w - pad->in_w), + w + (pad->w - pad->in_w) + 4*align, h + (pad->h - pad->in_h)); int plane; @@ -268,13 +254,9 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int picref->video->w = w; picref->video->h = h; - for (plane = 0; plane < 4 && picref->data[plane]; plane++) { - int hsub = (plane == 1 || plane == 2) ? pad->hsub : 0; - int vsub = (plane == 1 || plane == 2) ? pad->vsub : 0; - - picref->data[plane] += (pad->x >> hsub) * pad->line_step[plane] + - (pad->y >> vsub) * picref->linesize[plane]; - } + for (plane = 0; plane < 4 && picref->data[plane]; plane++) + picref->data[plane] += FFALIGN(pad->x >> pad->draw.hsub[plane], align) * pad->draw.pixelstep[plane] + + (pad->y >> pad->draw.vsub[plane]) * picref->linesize[plane]; return picref; } @@ -284,12 +266,12 @@ static int does_clip(PadContext *pad, AVFilterBufferRef *outpicref, int plane, i int64_t x_in_buf, y_in_buf; x_in_buf = outpicref->data[plane] - outpicref->buf->data[plane] - + (x >> hsub) * pad ->line_step[plane] - + (y >> vsub) * outpicref->linesize [plane]; + + (x >> hsub) * pad->draw.pixelstep[plane] + + (y >> vsub) * outpicref->linesize[plane]; - if(x_in_buf < 0 || x_in_buf % pad->line_step[plane]) + if(x_in_buf < 0 || x_in_buf % pad->draw.pixelstep[plane]) return 1; - x_in_buf /= pad->line_step[plane]; + x_in_buf /= pad->draw.pixelstep[plane]; av_assert0(outpicref->buf->linesize[plane]>0); //while reference can use negative linesize the main buffer should not @@ -313,17 +295,17 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) return AVERROR(ENOMEM); } - for (plane = 0; plane < 4 && out->data[plane]; plane++) { - int hsub = (plane == 1 || plane == 2) ? pad->hsub : 0; - int vsub = (plane == 1 || plane == 2) ? pad->vsub : 0; + for (plane = 0; plane < 4 && out->data[plane] && pad->draw.pixelstep[plane]; plane++) { + int hsub = pad->draw.hsub[plane]; + int vsub = pad->draw.vsub[plane]; av_assert0(out->buf->w > 0 && out->buf->h > 0); if (out->format != out->buf->format) //unsupported currently break; - out->data[plane] -= (pad->x >> hsub) * pad->line_step[plane] + - (pad->y >> vsub) * out->linesize [plane]; + out->data[plane] -= (pad->x >> hsub) * pad->draw.pixelstep[plane] + + (pad->y >> vsub) * out->linesize[plane]; if (does_clip(pad, out, plane, hsub, vsub, 0, 0) || does_clip(pad, out, plane, hsub, vsub, 0, pad->h - 1) || @@ -331,7 +313,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) does_clip(pad, out, plane, hsub, vsub, pad->w - 1, pad->h - 1)) break; } - needs_copy = plane < 4 && out->data[plane]; + needs_copy = plane < 4 && out->data[plane] || !(out->perms & AV_PERM_WRITE); if (needs_copy) { av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); avfilter_unref_buffer(out); @@ -351,31 +333,30 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) /* top bar */ if (pad->y) { - ff_draw_rectangle(out->data, out->linesize, - pad->line, pad->line_step, pad->hsub, pad->vsub, + ff_fill_rectangle(&pad->draw, &pad->color, + out->data, out->linesize, 0, 0, pad->w, pad->y); } /* bottom bar */ if (pad->h > pad->y + pad->in_h) { - ff_draw_rectangle(out->data, out->linesize, - pad->line, pad->line_step, pad->hsub, pad->vsub, + ff_fill_rectangle(&pad->draw, &pad->color, + out->data, out->linesize, 0, pad->y + pad->in_h, pad->w, pad->h - pad->y - pad->in_h); } /* left border */ - ff_draw_rectangle(out->data, out->linesize, pad->line, pad->line_step, - pad->hsub, pad->vsub, 0, pad->y, pad->x, in->video->h); + ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, + 0, pad->y, pad->x, in->video->h); if (needs_copy) { - ff_copy_rectangle(out->data, out->linesize, in->data, in->linesize, - pad->line_step, pad->hsub, pad->vsub, - pad->x, pad->y, 0, in->video->w, in->video->h); + ff_copy_rectangle2(&pad->draw, + out->data, out->linesize, in->data, in->linesize, + pad->x, pad->y, 0, 0, in->video->w, in->video->h); } /* right border */ - ff_draw_rectangle(out->data, out->linesize, - pad->line, pad->line_step, pad->hsub, pad->vsub, + ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w, in->video->h); @@ -415,4 +396,5 @@ AVFilter avfilter_vf_pad = { .inputs = avfilter_vf_pad_inputs, .outputs = avfilter_vf_pad_outputs, + .priv_class = &pad_class, }; diff --git a/libavfilter/vf_pixdesctest.c b/libavfilter/vf_pixdesctest.c index a1e982c..6ac03d9 100644 --- a/libavfilter/vf_pixdesctest.c +++ b/libavfilter/vf_pixdesctest.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2009 Stefano Sabatini * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -81,7 +81,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) /* copy palette */ if (priv->pix_desc->flags & PIX_FMT_PAL || priv->pix_desc->flags & PIX_FMT_PSEUDOPAL) - memcpy(out->data[1], in->data[1], 256*4); + memcpy(out->data[1], in->data[1], AVPALETTE_SIZE); for (c = 0; c < priv->pix_desc->nb_components; c++) { int w1 = c == 1 || c == 2 ? w>>priv->pix_desc->log2_chroma_w : w; @@ -89,7 +89,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) for (i = 0; i < h1; i++) { av_read_image_line(priv->line, - in->data, + (void*)in->data, in->linesize, priv->pix_desc, 0, i, c, w1, 0); diff --git a/libavfilter/vf_pp.c b/libavfilter/vf_pp.c new file mode 100644 index 0000000..bde5ebf --- /dev/null +++ b/libavfilter/vf_pp.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2002 A'rpi + * Copyright (C) 2012 Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * libpostproc filter, ported from MPlayer. + */ + +#include "libavutil/avassert.h" +#include "libavutil/opt.h" +#include "internal.h" + +#include "libpostproc/postprocess.h" + +typedef struct { + int mode_id; + pp_mode *modes[PP_QUALITY_MAX + 1]; + void *pp_ctx; +} PPFilterContext; + +static av_cold int pp_init(AVFilterContext *ctx, const char *args) +{ + int i; + PPFilterContext *pp = ctx->priv; + + if (!args || !*args) + args = "de"; + + for (i = 0; i <= PP_QUALITY_MAX; i++) { + pp->modes[i] = pp_get_mode_by_name_and_quality(args, i); + if (!pp->modes[i]) + return AVERROR_EXTERNAL; + } + pp->mode_id = PP_QUALITY_MAX; + return 0; +} + +static int pp_process_command(AVFilterContext *ctx, const char *cmd, const char *args, + char *res, int res_len, int flags) +{ + PPFilterContext *pp = ctx->priv; + + if (!strcmp(cmd, "quality")) { + pp->mode_id = av_clip(strtol(args, NULL, 10), 0, PP_QUALITY_MAX); + return 0; + } + return AVERROR(ENOSYS); +} + +static int pp_query_formats(AVFilterContext *ctx) +{ + static const enum PixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P, + AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P, + AV_PIX_FMT_YUV411P, + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, + AV_PIX_FMT_NONE + }; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int pp_config_props(AVFilterLink *inlink) +{ + int flags = PP_CPU_CAPS_AUTO; + PPFilterContext *pp = inlink->dst->priv; + + switch (inlink->format) { + case AV_PIX_FMT_YUVJ420P: + case AV_PIX_FMT_YUV420P: flags |= PP_FORMAT_420; break; + case AV_PIX_FMT_YUVJ422P: + case AV_PIX_FMT_YUV422P: flags |= PP_FORMAT_422; break; + case AV_PIX_FMT_YUV411P: flags |= PP_FORMAT_411; break; + case AV_PIX_FMT_YUVJ444P: + case AV_PIX_FMT_YUV444P: flags |= PP_FORMAT_444; break; + default: av_assert0(0); + } + + pp->pp_ctx = pp_get_context(inlink->w, inlink->h, flags); + if (!pp->pp_ctx) + return AVERROR(ENOMEM); + return 0; +} + +static int pp_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inbuf) +{ + AVFilterContext *ctx = inlink->dst; + PPFilterContext *pp = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + const int aligned_w = FFALIGN(outlink->w, 8); + const int aligned_h = FFALIGN(outlink->h, 8); + AVFilterBufferRef *outbuf; + + outbuf = ff_get_video_buffer(outlink, AV_PERM_WRITE, aligned_w, aligned_h); + if (!outbuf) { + avfilter_unref_buffer(inbuf); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outbuf, inbuf); + + pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize, + outbuf->data, outbuf->linesize, + aligned_w, outlink->h, + outbuf->video->qp_table, + outbuf->video->qp_table_linesize, + pp->modes[pp->mode_id], + pp->pp_ctx, + outbuf->video->pict_type); + + avfilter_unref_buffer(inbuf); + return ff_filter_frame(outlink, outbuf); +} + +static av_cold void pp_uninit(AVFilterContext *ctx) +{ + int i; + PPFilterContext *pp = ctx->priv; + + for (i = 0; i <= PP_QUALITY_MAX; i++) + pp_free_mode(pp->modes[i]); + if (pp->pp_ctx) + pp_free_context(pp->pp_ctx); +} + +static const AVFilterPad pp_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = pp_config_props, + .filter_frame = pp_filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad pp_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_pp = { + .name = "pp", + .description = NULL_IF_CONFIG_SMALL("Filter video using libpostproc."), + .priv_size = sizeof(PPFilterContext), + .init = pp_init, + .uninit = pp_uninit, + .query_formats = pp_query_formats, + .inputs = pp_inputs, + .outputs = pp_outputs, + .process_command = pp_process_command, +}; diff --git a/libavfilter/vf_removelogo.c b/libavfilter/vf_removelogo.c new file mode 100644 index 0000000..ddaf9ef --- /dev/null +++ b/libavfilter/vf_removelogo.c @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net> + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Advanced blur-based logo removing filter + * + * This filter loads an image mask file showing where a logo is and + * uses a blur transform to remove the logo. + * + * Based on the libmpcodecs remove-logo filter by Robert Edele. + */ + +/** + * This code implements a filter to remove annoying TV logos and other annoying + * images placed onto a video stream. It works by filling in the pixels that + * comprise the logo with neighboring pixels. The transform is very loosely + * based on a gaussian blur, but it is different enough to merit its own + * paragraph later on. It is a major improvement on the old delogo filter as it + * both uses a better blurring algorithm and uses a bitmap to use an arbitrary + * and generally much tighter fitting shape than a rectangle. + * + * The logo removal algorithm has two key points. The first is that it + * distinguishes between pixels in the logo and those not in the logo by using + * the passed-in bitmap. Pixels not in the logo are copied over directly without + * being modified and they also serve as source pixels for the logo + * fill-in. Pixels inside the logo have the mask applied. + * + * At init-time the bitmap is reprocessed internally, and the distance to the + * nearest edge of the logo (Manhattan distance), along with a little extra to + * remove rough edges, is stored in each pixel. This is done using an in-place + * erosion algorithm, and incrementing each pixel that survives any given + * erosion. Once every pixel is eroded, the maximum value is recorded, and a + * set of masks from size 0 to this size are generaged. The masks are circular + * binary masks, where each pixel within a radius N (where N is the size of the + * mask) is a 1, and all other pixels are a 0. Although a gaussian mask would be + * more mathematically accurate, a binary mask works better in practice because + * we generally do not use the central pixels in the mask (because they are in + * the logo region), and thus a gaussian mask will cause too little blur and + * thus a very unstable image. + * + * The mask is applied in a special way. Namely, only pixels in the mask that + * line up to pixels outside the logo are used. The dynamic mask size means that + * the mask is just big enough so that the edges touch pixels outside the logo, + * so the blurring is kept to a minimum and at least the first boundary + * condition is met (that the image function itself is continuous), even if the + * second boundary condition (that the derivative of the image function is + * continuous) is not met. A masking algorithm that does preserve the second + * boundary coundition (perhaps something based on a highly-modified bi-cubic + * algorithm) should offer even better results on paper, but the noise in a + * typical TV signal should make anything based on derivatives hopelessly noisy. + */ + +#include "libavutil/imgutils.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" +#include "bbox.h" +#include "lavfutils.h" +#include "lswsutils.h" + +typedef struct { + /* Stores our collection of masks. The first is for an array of + the second for the y axis, and the third for the x axis. */ + int ***mask; + int max_mask_size; + int mask_w, mask_h; + + uint8_t *full_mask_data; + FFBoundingBox full_mask_bbox; + uint8_t *half_mask_data; + FFBoundingBox half_mask_bbox; +} RemovelogoContext; + +/** + * Choose a slightly larger mask size to improve performance. + * + * This function maps the absolute minimum mask size needed to the + * mask size we'll actually use. f(x) = x (the smallest that will + * work) will produce the sharpest results, but will be quite + * jittery. f(x) = 1.25x (what I'm using) is a good tradeoff in my + * opinion. This will calculate only at init-time, so you can put a + * long expression here without effecting performance. + */ +#define apply_mask_fudge_factor(x) (((x) >> 2) + x) + +/** + * Pre-process an image to give distance information. + * + * This function takes a bitmap image and converts it in place into a + * distance image. A distance image is zero for pixels outside of the + * logo and is the Manhattan distance (|dx| + |dy|) from the logo edge + * for pixels inside of the logo. This will overestimate the distance, + * but that is safe, and is far easier to implement than a proper + * pythagorean distance since I'm using a modified erosion algorithm + * to compute the distances. + * + * @param mask image which will be converted from a greyscale image + * into a distance image. + */ +static void convert_mask_to_strength_mask(uint8_t *data, int linesize, + int w, int h, int min_val, + int *max_mask_size) +{ + int x, y; + + /* How many times we've gone through the loop. Used in the + in-place erosion algorithm and to get us max_mask_size later on. */ + int current_pass = 0; + + /* set all non-zero values to 1 */ + for (y = 0; y < h; y++) + for (x = 0; x < w; x++) + data[y*linesize + x] = data[y*linesize + x] > min_val; + + /* For each pass, if a pixel is itself the same value as the + current pass, and its four neighbors are too, then it is + incremented. If no pixels are incremented by the end of the + pass, then we go again. Edge pixels are counted as always + excluded (this should be true anyway for any sane mask, but if + it isn't this will ensure that we eventually exit). */ + while (1) { + /* If this doesn't get set by the end of this pass, then we're done. */ + int has_anything_changed = 0; + uint8_t *current_pixel0 = data, *current_pixel; + current_pass++; + + for (y = 1; y < h-1; y++) { + current_pixel = current_pixel0; + for (x = 1; x < w-1; x++) { + /* Apply the in-place erosion transform. It is based + on the following two premises: + 1 - Any pixel that fails 1 erosion will fail all + future erosions. + + 2 - Only pixels having survived all erosions up to + the present will be >= to current_pass. + It doesn't matter if it survived the current pass, + failed it, or hasn't been tested yet. By using >= + instead of ==, we allow the algorithm to work in + place. */ + if ( *current_pixel >= current_pass && + *(current_pixel + 1) >= current_pass && + *(current_pixel - 1) >= current_pass && + *(current_pixel + w) >= current_pass && + *(current_pixel - w) >= current_pass) { + /* Increment the value since it still has not been + * eroded, as evidenced by the if statement that + * just evaluated to true. */ + (*current_pixel)++; + has_anything_changed = 1; + } + current_pixel++; + } + current_pixel0 += linesize; + } + if (!has_anything_changed) + break; + } + + /* Apply the fudge factor, which will increase the size of the + * mask a little to reduce jitter at the cost of more blur. */ + for (y = 1; y < h - 1; y++) + for (x = 1; x < w - 1; x++) + data[(y * linesize) + x] = apply_mask_fudge_factor(data[(y * linesize) + x]); + + /* As a side-effect, we now know the maximum mask size, which + * we'll use to generate our masks. */ + /* Apply the fudge factor to this number too, since we must ensure + * that enough masks are generated. */ + *max_mask_size = apply_mask_fudge_factor(current_pass + 1); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int load_mask(uint8_t **mask, int *w, int *h, + const char *filename, void *log_ctx) +{ + int ret; + enum AVPixelFormat pix_fmt; + uint8_t *src_data[4], *gray_data[4]; + int src_linesize[4], gray_linesize[4]; + + /* load image from file */ + if ((ret = ff_load_image(src_data, src_linesize, w, h, &pix_fmt, filename, log_ctx)) < 0) + return ret; + + /* convert the image to GRAY8 */ + if ((ret = ff_scale_image(gray_data, gray_linesize, *w, *h, AV_PIX_FMT_GRAY8, + src_data, src_linesize, *w, *h, pix_fmt, + log_ctx)) < 0) + goto end; + + /* copy mask to a newly allocated array */ + *mask = av_malloc(*w * *h); + if (!*mask) + ret = AVERROR(ENOMEM); + av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h); + +end: + av_free(src_data[0]); + av_free(gray_data[0]); + return ret; +} + +/** + * Generate a scaled down image with half width, height, and intensity. + * + * This function not only scales down an image, but halves the value + * in each pixel too. The purpose of this is to produce a chroma + * filter image out of a luma filter image. The pixel values store the + * distance to the edge of the logo and halving the dimensions halves + * the distance. This function rounds up, because a downwards rounding + * error could cause the filter to fail, but an upwards rounding error + * will only cause a minor amount of excess blur in the chroma planes. + */ +static void generate_half_size_image(const uint8_t *src_data, int src_linesize, + uint8_t *dst_data, int dst_linesize, + int src_w, int src_h, + int *max_mask_size) +{ + int x, y; + + /* Copy over the image data, using the average of 4 pixels for to + * calculate each downsampled pixel. */ + for (y = 0; y < src_h/2; y++) { + for (x = 0; x < src_w/2; x++) { + /* Set the pixel if there exists a non-zero value in the + * source pixels, else clear it. */ + dst_data[(y * dst_linesize) + x] = + src_data[((y << 1) * src_linesize) + (x << 1)] || + src_data[((y << 1) * src_linesize) + (x << 1) + 1] || + src_data[(((y << 1) + 1) * src_linesize) + (x << 1)] || + src_data[(((y << 1) + 1) * src_linesize) + (x << 1) + 1]; + dst_data[(y * dst_linesize) + x] = FFMIN(1, dst_data[(y * dst_linesize) + x]); + } + } + + convert_mask_to_strength_mask(dst_data, dst_linesize, + src_w/2, src_h/2, 0, max_mask_size); +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + RemovelogoContext *removelogo = ctx->priv; + int ***mask; + int ret = 0; + int a, b, c, w, h; + int full_max_mask_size, half_max_mask_size; + + if (!args) { + av_log(ctx, AV_LOG_ERROR, "An image file must be specified as argument\n"); + return AVERROR(EINVAL); + } + + /* Load our mask image. */ + if ((ret = load_mask(&removelogo->full_mask_data, &w, &h, args, ctx)) < 0) + return ret; + removelogo->mask_w = w; + removelogo->mask_h = h; + + convert_mask_to_strength_mask(removelogo->full_mask_data, w, w, h, + 16, &full_max_mask_size); + + /* Create the scaled down mask image for the chroma planes. */ + if (!(removelogo->half_mask_data = av_mallocz(w/2 * h/2))) + return AVERROR(ENOMEM); + generate_half_size_image(removelogo->full_mask_data, w, + removelogo->half_mask_data, w/2, + w, h, &half_max_mask_size); + + removelogo->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size); + + /* Create a circular mask for each size up to max_mask_size. When + the filter is applied, the mask size is determined on a pixel + by pixel basis, with pixels nearer the edge of the logo getting + smaller mask sizes. */ + mask = (int ***)av_malloc(sizeof(int **) * (removelogo->max_mask_size + 1)); + if (!mask) + return AVERROR(ENOMEM); + + for (a = 0; a <= removelogo->max_mask_size; a++) { + mask[a] = (int **)av_malloc(sizeof(int *) * ((a * 2) + 1)); + if (!mask[a]) + return AVERROR(ENOMEM); + for (b = -a; b <= a; b++) { + mask[a][b + a] = (int *)av_malloc(sizeof(int) * ((a * 2) + 1)); + if (!mask[a][b + a]) + return AVERROR(ENOMEM); + for (c = -a; c <= a; c++) { + if ((b * b) + (c * c) <= (a * a)) /* Circular 0/1 mask. */ + mask[a][b + a][c + a] = 1; + else + mask[a][b + a][c + a] = 0; + } + } + } + removelogo->mask = mask; + + /* Calculate our bounding rectangles, which determine in what + * region the logo resides for faster processing. */ + ff_calculate_bounding_box(&removelogo->full_mask_bbox, removelogo->full_mask_data, w, w, h, 0); + ff_calculate_bounding_box(&removelogo->half_mask_bbox, removelogo->half_mask_data, w/2, w/2, h/2, 0); + +#define SHOW_LOGO_INFO(mask_type) \ + av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \ + removelogo->mask_type##_mask_bbox.x1, removelogo->mask_type##_mask_bbox.x2, \ + removelogo->mask_type##_mask_bbox.y1, removelogo->mask_type##_mask_bbox.y2, \ + mask_type##_max_mask_size); + SHOW_LOGO_INFO(full); + SHOW_LOGO_INFO(half); + + return 0; +} + +static int config_props_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + RemovelogoContext *removelogo = ctx->priv; + + if (inlink->w != removelogo->mask_w || inlink->h != removelogo->mask_h) { + av_log(ctx, AV_LOG_INFO, + "Mask image size %dx%d does not match with the input video size %dx%d\n", + removelogo->mask_w, removelogo->mask_h, inlink->w, inlink->h); + return AVERROR(EINVAL); + } + + return 0; +} + +/** + * Blur image. + * + * It takes a pixel that is inside the mask and blurs it. It does so + * by finding the average of all the pixels within the mask and + * outside of the mask. + * + * @param mask_data the mask plane to use for averaging + * @param image_data the image plane to blur + * @param w width of the image + * @param h height of the image + * @param x x-coordinate of the pixel to blur + * @param y y-coordinate of the pixel to blur + */ +static unsigned int blur_pixel(int ***mask, + const uint8_t *mask_data, int mask_linesize, + uint8_t *image_data, int image_linesize, + int w, int h, int x, int y) +{ + /* Mask size tells how large a circle to use. The radius is about + * (slightly larger than) mask size. */ + int mask_size; + int start_posx, start_posy, end_posx, end_posy; + int i, j; + unsigned int accumulator = 0, divisor = 0; + /* What pixel we are reading out of the circular blur mask. */ + const uint8_t *image_read_position; + /* What pixel we are reading out of the filter image. */ + const uint8_t *mask_read_position; + + /* Prepare our bounding rectangle and clip it if need be. */ + mask_size = mask_data[y * mask_linesize + x]; + start_posx = FFMAX(0, x - mask_size); + start_posy = FFMAX(0, y - mask_size); + end_posx = FFMIN(w - 1, x + mask_size); + end_posy = FFMIN(h - 1, y + mask_size); + + image_read_position = image_data + image_linesize * start_posy + start_posx; + mask_read_position = mask_data + mask_linesize * start_posy + start_posx; + + for (j = start_posy; j <= end_posy; j++) { + for (i = start_posx; i <= end_posx; i++) { + /* Check if this pixel is in the mask or not. Only use the + * pixel if it is not. */ + if (!(*mask_read_position) && mask[mask_size][i - start_posx][j - start_posy]) { + accumulator += *image_read_position; + divisor++; + } + + image_read_position++; + mask_read_position++; + } + + image_read_position += (image_linesize - ((end_posx + 1) - start_posx)); + mask_read_position += (mask_linesize - ((end_posx + 1) - start_posx)); + } + + /* If divisor is 0, it means that not a single pixel is outside of + the logo, so we have no data. Else we need to normalise the + data using the divisor. */ + return divisor == 0 ? 255: + (accumulator + (divisor / 2)) / divisor; /* divide, taking into account average rounding error */ +} + +/** + * Blur image plane using a mask. + * + * @param source The image to have it's logo removed. + * @param destination Where the output image will be stored. + * @param source_stride How far apart (in memory) two consecutive lines are. + * @param destination Same as source_stride, but for the destination image. + * @param width Width of the image. This is the same for source and destination. + * @param height Height of the image. This is the same for source and destination. + * @param is_image_direct If the image is direct, then source and destination are + * the same and we can save a lot of time by not copying pixels that + * haven't changed. + * @param filter The image that stores the distance to the edge of the logo for + * each pixel. + * @param logo_start_x smallest x-coordinate that contains at least 1 logo pixel. + * @param logo_start_y smallest y-coordinate that contains at least 1 logo pixel. + * @param logo_end_x largest x-coordinate that contains at least 1 logo pixel. + * @param logo_end_y largest y-coordinate that contains at least 1 logo pixel. + * + * This function processes an entire plane. Pixels outside of the logo are copied + * to the output without change, and pixels inside the logo have the de-blurring + * function applied. + */ +static void blur_image(int ***mask, + const uint8_t *src_data, int src_linesize, + uint8_t *dst_data, int dst_linesize, + const uint8_t *mask_data, int mask_linesize, + int w, int h, int direct, + FFBoundingBox *bbox) +{ + int x, y; + uint8_t *dst_line; + const uint8_t *src_line; + + if (!direct) + av_image_copy_plane(dst_data, dst_linesize, src_data, src_linesize, w, h); + + for (y = bbox->y1; y <= bbox->y2; y++) { + src_line = src_data + src_linesize * y; + dst_line = dst_data + dst_linesize * y; + + for (x = bbox->x1; x <= bbox->x2; x++) { + if (mask_data[y * mask_linesize + x]) { + /* Only process if we are in the mask. */ + dst_line[x] = blur_pixel(mask, + mask_data, mask_linesize, + dst_data, dst_linesize, + w, h, x, y); + } else { + /* Else just copy the data. */ + if (!direct) + dst_line[x] = src_line[x]; + } + } + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) +{ + RemovelogoContext *removelogo = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outpicref; + int direct = 0; + + if (inpicref->perms & AV_PERM_WRITE) { + direct = 1; + outpicref = inpicref; + } else { + outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!outpicref) { + avfilter_unref_bufferp(&inpicref); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outpicref, inpicref); + } + + blur_image(removelogo->mask, + inpicref ->data[0], inpicref ->linesize[0], + outpicref->data[0], outpicref->linesize[0], + removelogo->full_mask_data, inlink->w, + inlink->w, inlink->h, direct, &removelogo->full_mask_bbox); + blur_image(removelogo->mask, + inpicref ->data[1], inpicref ->linesize[1], + outpicref->data[1], outpicref->linesize[1], + removelogo->half_mask_data, inlink->w/2, + inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); + blur_image(removelogo->mask, + inpicref ->data[2], inpicref ->linesize[2], + outpicref->data[2], outpicref->linesize[2], + removelogo->half_mask_data, inlink->w/2, + inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); + + if (!direct) + avfilter_unref_bufferp(&inpicref); + + return ff_filter_frame(outlink, outpicref); +} + +static void uninit(AVFilterContext *ctx) +{ + RemovelogoContext *removelogo = ctx->priv; + int a, b; + + av_freep(&removelogo->full_mask_data); + av_freep(&removelogo->half_mask_data); + + if (removelogo->mask) { + /* Loop through each mask. */ + for (a = 0; a <= removelogo->max_mask_size; a++) { + /* Loop through each scanline in a mask. */ + for (b = -a; b <= a; b++) { + av_free(removelogo->mask[a][b + a]); /* Free a scanline. */ + } + av_free(removelogo->mask[a]); + } + /* Free the array of pointers pointing to the masks. */ + av_freep(&removelogo->mask); + } +} + +static const AVFilterPad removelogo_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .config_props = config_props_input, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad removelogo_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_removelogo = { + .name = "removelogo", + .description = NULL_IF_CONFIG_SMALL("Remove a TV logo based on a mask image."), + .priv_size = sizeof(RemovelogoContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = removelogo_inputs, + .outputs = removelogo_outputs, +}; diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c index 7f189a2..f6e79ff 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -35,41 +35,42 @@ #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" +#include "libavutil/parseutils.h" #include "libavutil/pixdesc.h" +#include "libavutil/imgutils.h" +#include "libavutil/avassert.h" #include "libswscale/swscale.h" static const char *const var_names[] = { - "PI", - "PHI", - "E", "in_w", "iw", "in_h", "ih", "out_w", "ow", "out_h", "oh", - "a", "dar", + "a", "sar", + "dar", "hsub", "vsub", NULL }; enum var_name { - VAR_PI, - VAR_PHI, - VAR_E, VAR_IN_W, VAR_IW, VAR_IN_H, VAR_IH, VAR_OUT_W, VAR_OW, VAR_OUT_H, VAR_OH, - VAR_A, VAR_DAR, + VAR_A, VAR_SAR, + VAR_DAR, VAR_HSUB, VAR_VSUB, VARS_NB }; typedef struct { + const AVClass *class; struct SwsContext *sws; ///< software scaler context + struct SwsContext *isws[2]; ///< software scaler context for interlaced material /** * New dimensions. Special values are: @@ -77,37 +78,92 @@ typedef struct { * -1 = keep original aspect */ int w, h; + char *flags_str; ///sws flags string + char *size_str; unsigned int flags; ///sws flags int hsub, vsub; ///< chroma subsampling int slice_y; ///< top of current output slice int input_is_pal; ///< set to 1 if the input format is paletted + int output_is_pal; ///< set to 1 if the output format is paletted + int interlaced; - char w_expr[256]; ///< width expression string - char h_expr[256]; ///< height expression string + char *w_expr; ///< width expression string + char *h_expr; ///< height expression string } ScaleContext; +#define OFFSET(x) offsetof(ScaleContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption scale_options[] = { + { "w", "set width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "width", "set width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "h", "set height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "height", "set height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "flags", "set libswscale flags", OFFSET(flags_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, INT_MAX, FLAGS }, + { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS }, + { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS }, + { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(scale); + static av_cold int init(AVFilterContext *ctx, const char *args) { ScaleContext *scale = ctx->priv; - const char *p; + static const char *shorthand[] = { "w", "h", NULL }; + int ret; + const char *args0 = args; - av_strlcpy(scale->w_expr, "iw", sizeof(scale->w_expr)); - av_strlcpy(scale->h_expr, "ih", sizeof(scale->h_expr)); + scale->class = &scale_class; + av_opt_set_defaults(scale); - scale->flags = SWS_BILINEAR; - if (args) { - sscanf(args, "%255[^:]:%255[^:]", scale->w_expr, scale->h_expr); - p = strstr(args,"flags="); - if (p) { - const AVClass *class = sws_get_class(); - const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0, - AV_OPT_SEARCH_FAKE_OBJ); - int ret = av_opt_eval_flags(&class, o, p + 6, &scale->flags); - - if (ret < 0) - return ret; + if (args && (scale->size_str = av_get_token(&args, ":"))) { + if (av_parse_video_size(&scale->w, &scale->h, scale->size_str) < 0) { + av_freep(&scale->size_str); + args = args0; + } else if (*args) + args++; + } + + if ((ret = av_opt_set_from_string(scale, args, shorthand, "=", ":")) < 0) + return ret; + + if (scale->size_str && (scale->w_expr || scale->h_expr)) { + av_log(ctx, AV_LOG_ERROR, + "Size and width/height expressions cannot be set at the same time.\n"); + return AVERROR(EINVAL); + } + + if (scale->size_str) { + char buf[32]; + if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) { + av_log(ctx, AV_LOG_ERROR, + "Invalid size '%s'\n", scale->size_str); + return ret; } + snprintf(buf, sizeof(buf)-1, "%d", scale->w); + av_opt_set(scale, "w", buf, 0); + snprintf(buf, sizeof(buf)-1, "%d", scale->h); + av_opt_set(scale, "h", buf, 0); + } + if (!scale->w_expr) + av_opt_set(scale, "w", "iw", 0); + if (!scale->h_expr) + av_opt_set(scale, "h", "ih", 0); + + av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n", + scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced); + + scale->flags = SWS_BILINEAR; + if (scale->flags_str) { + const AVClass *class = sws_get_class(); + const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0, + AV_OPT_SEARCH_FAKE_OBJ); + int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags); + if (ret < 0) + return ret; } return 0; @@ -117,7 +173,10 @@ static av_cold void uninit(AVFilterContext *ctx) { ScaleContext *scale = ctx->priv; sws_freeContext(scale->sws); + sws_freeContext(scale->isws[0]); + sws_freeContext(scale->isws[1]); scale->sws = NULL; + av_opt_free(scale); } static int query_formats(AVFilterContext *ctx) @@ -139,7 +198,7 @@ static int query_formats(AVFilterContext *ctx) if (ctx->outputs[0]) { formats = NULL; for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) - if ( sws_isSupportedOutput(pix_fmt) + if ( (sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8) && (ret = ff_add_format(&formats, pix_fmt)) < 0) { ff_formats_unref(&formats); return ret; @@ -154,6 +213,7 @@ static int config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = outlink->src->inputs[0]; + enum AVPixelFormat outfmt = outlink->format; ScaleContext *scale = ctx->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); int64_t w, h; @@ -161,16 +221,14 @@ static int config_props(AVFilterLink *outlink) char *expr; int ret; - var_values[VAR_PI] = M_PI; - var_values[VAR_PHI] = M_PHI; - var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; - var_values[VAR_DAR] = var_values[VAR_A] = (double) inlink->w / inlink->h; + var_values[VAR_A] = (double) inlink->w / inlink->h; var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; + var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; @@ -220,13 +278,12 @@ static int config_props(AVFilterLink *outlink) outlink->h = h; /* TODO: make algorithm configurable */ - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x\n", - inlink ->w, inlink ->h, av_get_pix_fmt_name(inlink->format), - outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format), - scale->flags); scale->input_is_pal = desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL; + if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8; + scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PAL || + av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PSEUDOPAL; if (scale->sws) sws_freeContext(scale->sws); @@ -235,34 +292,91 @@ static int config_props(AVFilterLink *outlink) scale->sws = NULL; else { scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format, - outlink->w, outlink->h, outlink->format, + outlink->w, outlink->h, outfmt, scale->flags, NULL, NULL, NULL); - if (!scale->sws) + if (scale->isws[0]) + sws_freeContext(scale->isws[0]); + scale->isws[0] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format, + outlink->w, outlink->h/2, outfmt, + scale->flags, NULL, NULL, NULL); + if (scale->isws[1]) + sws_freeContext(scale->isws[1]); + scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format, + outlink->w, outlink->h/2, outfmt, + scale->flags, NULL, NULL, NULL); + if (!scale->sws || !scale->isws[0] || !scale->isws[1]) return AVERROR(EINVAL); } - - if (inlink->sample_aspect_ratio.num) - outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w, - outlink->w*inlink->h}, - inlink->sample_aspect_ratio); - else + if (inlink->sample_aspect_ratio.num){ + outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio); + } else outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n", + inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format), + inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, + outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format), + outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den, + scale->flags); return 0; fail: av_log(NULL, AV_LOG_ERROR, - "Error when evaluating the expression '%s'\n", expr); + "Error when evaluating the expression '%s'.\n" + "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n", + expr, scale->w_expr, scale->h_expr); return ret; } +static int scale_slice(AVFilterLink *link, AVFilterBufferRef *out_buf, AVFilterBufferRef *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field) +{ + ScaleContext *scale = link->dst->priv; + const uint8_t *in[4]; + uint8_t *out[4]; + int in_stride[4],out_stride[4]; + int i; + + for(i=0; i<4; i++){ + int vsub= ((i+1)&2) ? scale->vsub : 0; + in_stride[i] = cur_pic->linesize[i] * mul; + out_stride[i] = out_buf->linesize[i] * mul; + in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i]; + out[i] = out_buf->data[i] + field * out_buf->linesize[i]; + } + if(scale->input_is_pal) + in[1] = cur_pic->data[1]; + if(scale->output_is_pal) + out[1] = out_buf->data[1]; + + return sws_scale(sws, in, in_stride, y/mul, h, + out,out_stride); +} + static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) { ScaleContext *scale = link->dst->priv; AVFilterLink *outlink = link->dst->outputs[0]; AVFilterBufferRef *out; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); + char buf[32]; + + if( in->video->w != link->w + || in->video->h != link->h + || in->format != link->format) { + int ret; + snprintf(buf, sizeof(buf)-1, "%d", outlink->w); + av_opt_set(scale, "w", buf, 0); + snprintf(buf, sizeof(buf)-1, "%d", outlink->h); + av_opt_set(scale, "h", buf, 0); + + link->dst->inputs[0]->format = in->format; + link->dst->inputs[0]->w = in->video->w; + link->dst->inputs[0]->h = in->video->h; + + if ((ret = config_props(outlink)) < 0) + return ret; + } if (!scale->sws) return ff_filter_frame(outlink, in); @@ -270,7 +384,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) scale->hsub = desc->log2_chroma_w; scale->vsub = desc->log2_chroma_h; - out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + out = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); if (!out) { avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); @@ -280,13 +394,20 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) out->video->w = outlink->w; out->video->h = outlink->h; - av_reduce(&out->video->pixel_aspect.num, &out->video->pixel_aspect.den, - (int64_t)in->video->pixel_aspect.num * outlink->h * link->w, - (int64_t)in->video->pixel_aspect.den * outlink->w * link->h, + if(scale->output_is_pal) + avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format); + + av_reduce(&out->video->sample_aspect_ratio.num, &out->video->sample_aspect_ratio.den, + (int64_t)in->video->sample_aspect_ratio.num * outlink->h * link->w, + (int64_t)in->video->sample_aspect_ratio.den * outlink->w * link->h, INT_MAX); - sws_scale(scale->sws, in->data, in->linesize, 0, in->video->h, - out->data, out->linesize); + if(scale->interlaced>0 || (scale->interlaced<0 && in->video->interlaced)){ + scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0); + scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1); + }else{ + scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0); + } avfilter_unref_bufferp(&in); return ff_filter_frame(outlink, out); @@ -324,4 +445,5 @@ AVFilter avfilter_vf_scale = { .inputs = avfilter_vf_scale_inputs, .outputs = avfilter_vf_scale_outputs, + .priv_class = &scale_class, }; diff --git a/libavfilter/vf_select.c b/libavfilter/vf_select.c deleted file mode 100644 index 674151d..0000000 --- a/libavfilter/vf_select.c +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Copyright (c) 2011 Stefano Sabatini - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * filter for selecting which frame passes in the filterchain - */ - -#include "libavutil/eval.h" -#include "libavutil/fifo.h" -#include "libavutil/internal.h" -#include "libavutil/mathematics.h" -#include "avfilter.h" -#include "internal.h" -#include "video.h" - -static const char *const var_names[] = { - "E", ///< Euler number - "PHI", ///< golden ratio - "PI", ///< greek pi - - "TB", ///< timebase - - "pts", ///< original pts in the file of the frame - "start_pts", ///< first PTS in the stream, expressed in TB units - "prev_pts", ///< previous frame PTS - "prev_selected_pts", ///< previous selected frame PTS - - "t", ///< first PTS in seconds - "start_t", ///< first PTS in the stream, expressed in seconds - "prev_t", ///< previous frame time - "prev_selected_t", ///< previously selected time - - "pict_type", ///< the type of picture in the movie - "I", - "P", - "B", - "S", - "SI", - "SP", - "BI", - - "interlace_type", ///< the frame interlace type - "PROGRESSIVE", - "TOPFIRST", - "BOTTOMFIRST", - - "n", ///< frame number (starting from zero) - "selected_n", ///< selected frame number (starting from zero) - "prev_selected_n", ///< number of the last selected frame - - "key", ///< tell if the frame is a key frame - "pos", ///< original position in the file of the frame - - NULL -}; - -enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, - - VAR_TB, - - VAR_PTS, - VAR_START_PTS, - VAR_PREV_PTS, - VAR_PREV_SELECTED_PTS, - - VAR_T, - VAR_START_T, - VAR_PREV_T, - VAR_PREV_SELECTED_T, - - VAR_PICT_TYPE, - VAR_PICT_TYPE_I, - VAR_PICT_TYPE_P, - VAR_PICT_TYPE_B, - VAR_PICT_TYPE_S, - VAR_PICT_TYPE_SI, - VAR_PICT_TYPE_SP, - VAR_PICT_TYPE_BI, - - VAR_INTERLACE_TYPE, - VAR_INTERLACE_TYPE_P, - VAR_INTERLACE_TYPE_T, - VAR_INTERLACE_TYPE_B, - - VAR_N, - VAR_SELECTED_N, - VAR_PREV_SELECTED_N, - - VAR_KEY, - VAR_POS, - - VAR_VARS_NB -}; - -#define FIFO_SIZE 8 - -typedef struct { - AVExpr *expr; - double var_values[VAR_VARS_NB]; - double select; - int cache_frames; - AVFifoBuffer *pending_frames; ///< FIFO buffer of video frames -} SelectContext; - -static av_cold int init(AVFilterContext *ctx, const char *args) -{ - SelectContext *select = ctx->priv; - int ret; - - if ((ret = av_expr_parse(&select->expr, args ? args : "1", - var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", args); - return ret; - } - - select->pending_frames = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*)); - if (!select->pending_frames) { - av_log(ctx, AV_LOG_ERROR, "Failed to allocate pending frames buffer.\n"); - return AVERROR(ENOMEM); - } - return 0; -} - -#define INTERLACE_TYPE_P 0 -#define INTERLACE_TYPE_T 1 -#define INTERLACE_TYPE_B 2 - -static int config_input(AVFilterLink *inlink) -{ - SelectContext *select = inlink->dst->priv; - - select->var_values[VAR_E] = M_E; - select->var_values[VAR_PHI] = M_PHI; - select->var_values[VAR_PI] = M_PI; - - select->var_values[VAR_N] = 0.0; - select->var_values[VAR_SELECTED_N] = 0.0; - - select->var_values[VAR_TB] = av_q2d(inlink->time_base); - - select->var_values[VAR_PREV_PTS] = NAN; - select->var_values[VAR_PREV_SELECTED_PTS] = NAN; - select->var_values[VAR_PREV_SELECTED_T] = NAN; - select->var_values[VAR_START_PTS] = NAN; - select->var_values[VAR_START_T] = NAN; - - select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I; - select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P; - select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B; - select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI; - select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP; - - select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P; - select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T; - select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;; - - return 0; -} - -#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) -#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) - -static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *picref) -{ - SelectContext *select = ctx->priv; - AVFilterLink *inlink = ctx->inputs[0]; - double res; - - if (isnan(select->var_values[VAR_START_PTS])) - select->var_values[VAR_START_PTS] = TS2D(picref->pts); - if (isnan(select->var_values[VAR_START_T])) - select->var_values[VAR_START_T] = TS2D(picref->pts) * av_q2d(inlink->time_base); - - select->var_values[VAR_PTS] = TS2D(picref->pts); - select->var_values[VAR_T ] = TS2D(picref->pts) * av_q2d(inlink->time_base); - select->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos; - select->var_values[VAR_PREV_PTS] = TS2D(picref ->pts); - - select->var_values[VAR_INTERLACE_TYPE] = - !picref->video->interlaced ? INTERLACE_TYPE_P : - picref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; - select->var_values[VAR_PICT_TYPE] = picref->video->pict_type; - - res = av_expr_eval(select->expr, select->var_values, NULL); - av_log(inlink->dst, AV_LOG_DEBUG, - "n:%d pts:%d t:%f pos:%d interlace_type:%c key:%d pict_type:%c " - "-> select:%f\n", - (int)select->var_values[VAR_N], - (int)select->var_values[VAR_PTS], - select->var_values[VAR_T], - (int)select->var_values[VAR_POS], - select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' : - select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' : - select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?', - (int)select->var_values[VAR_KEY], - av_get_picture_type_char(select->var_values[VAR_PICT_TYPE]), - res); - - select->var_values[VAR_N] += 1.0; - - if (res) { - select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N]; - select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS]; - select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; - select->var_values[VAR_SELECTED_N] += 1.0; - } - return res; -} - -static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) -{ - SelectContext *select = inlink->dst->priv; - - select->select = select_frame(inlink->dst, frame); - if (select->select) { - /* frame was requested through poll_frame */ - if (select->cache_frames) { - if (!av_fifo_space(select->pending_frames)) { - av_log(inlink->dst, AV_LOG_ERROR, - "Buffering limit reached, cannot cache more frames\n"); - avfilter_unref_bufferp(&frame); - } else - av_fifo_generic_write(select->pending_frames, &frame, - sizeof(frame), NULL); - return 0; - } - return ff_filter_frame(inlink->dst->outputs[0], frame); - } - - avfilter_unref_bufferp(&frame); - return 0; -} - -static int request_frame(AVFilterLink *outlink) -{ - AVFilterContext *ctx = outlink->src; - SelectContext *select = ctx->priv; - AVFilterLink *inlink = outlink->src->inputs[0]; - select->select = 0; - - if (av_fifo_size(select->pending_frames)) { - AVFilterBufferRef *picref; - - av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); - return ff_filter_frame(outlink, picref); - } - - while (!select->select) { - int ret = ff_request_frame(inlink); - if (ret < 0) - return ret; - } - - return 0; -} - -static int poll_frame(AVFilterLink *outlink) -{ - SelectContext *select = outlink->src->priv; - AVFilterLink *inlink = outlink->src->inputs[0]; - int count, ret; - - if (!av_fifo_size(select->pending_frames)) { - if ((count = ff_poll_frame(inlink)) <= 0) - return count; - /* request frame from input, and apply select condition to it */ - select->cache_frames = 1; - while (count-- && av_fifo_space(select->pending_frames)) { - ret = ff_request_frame(inlink); - if (ret < 0) - break; - } - select->cache_frames = 0; - } - - return av_fifo_size(select->pending_frames)/sizeof(AVFilterBufferRef *); -} - -static av_cold void uninit(AVFilterContext *ctx) -{ - SelectContext *select = ctx->priv; - AVFilterBufferRef *picref; - - av_expr_free(select->expr); - select->expr = NULL; - - while (select->pending_frames && - av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL) == sizeof(picref)) - avfilter_unref_buffer(picref); - av_fifo_free(select->pending_frames); - select->pending_frames = NULL; -} - -static const AVFilterPad avfilter_vf_select_inputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .get_video_buffer = ff_null_get_video_buffer, - .config_props = config_input, - .filter_frame = filter_frame, - }, - { NULL } -}; - -static const AVFilterPad avfilter_vf_select_outputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .poll_frame = poll_frame, - .request_frame = request_frame, - }, - { NULL } -}; - -AVFilter avfilter_vf_select = { - .name = "select", - .description = NULL_IF_CONFIG_SMALL("Select frames to pass in output."), - .init = init, - .uninit = uninit, - - .priv_size = sizeof(SelectContext), - - .inputs = avfilter_vf_select_inputs, - .outputs = avfilter_vf_select_outputs, -}; diff --git a/libavfilter/vf_setfield.c b/libavfilter/vf_setfield.c new file mode 100644 index 0000000..43949fa --- /dev/null +++ b/libavfilter/vf_setfield.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * set field order + */ + +#include "libavutil/opt.h" +#include "avfilter.h" +#include "internal.h" +#include "video.h" + +enum SetFieldMode { + MODE_AUTO = -1, + MODE_BFF, + MODE_TFF, + MODE_PROG, +}; + +typedef struct { + const AVClass *class; + enum SetFieldMode mode; +} SetFieldContext; + +#define OFFSET(x) offsetof(SetFieldContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption setfield_options[] = { + {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"}, + {"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {NULL} +}; + +AVFILTER_DEFINE_CLASS(setfield); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + SetFieldContext *setfield = ctx->priv; + static const char *shorthand[] = { "mode", NULL }; + + setfield->class = &setfield_class; + av_opt_set_defaults(setfield); + + return av_opt_set_from_string(setfield, args, shorthand, "=", ":"); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + SetFieldContext *setfield = ctx->priv; + av_opt_free(setfield); +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + SetFieldContext *setfield = inlink->dst->priv; + + if (setfield->mode == MODE_PROG) { + picref->video->interlaced = 0; + } else if (setfield->mode != MODE_AUTO) { + picref->video->interlaced = 1; + picref->video->top_field_first = setfield->mode; + } + return ff_filter_frame(inlink->dst->outputs[0], picref); +} + +static const AVFilterPad setfield_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad setfield_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_setfield = { + .name = "setfield", + .description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."), + .init = init, + .uninit = uninit, + + .priv_size = sizeof(SetFieldContext), + .inputs = setfield_inputs, + .outputs = setfield_outputs, + .priv_class = &setfield_class, +}; diff --git a/libavfilter/vf_setpts.c b/libavfilter/vf_setpts.c deleted file mode 100644 index 79cadd4..0000000 --- a/libavfilter/vf_setpts.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (c) 2010 Stefano Sabatini - * Copyright (c) 2008 Victor Paesa - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * video presentation timestamp (PTS) modification filter - */ - -/* #define DEBUG */ - -#include "libavutil/eval.h" -#include "libavutil/internal.h" -#include "libavutil/mathematics.h" -#include "libavutil/time.h" -#include "avfilter.h" -#include "internal.h" -#include "video.h" - -static const char *const var_names[] = { - "E", ///< Euler number - "INTERLACED", ///< tell if the current frame is interlaced - "N", ///< frame number (starting at zero) - "PHI", ///< golden ratio - "PI", ///< greek pi - "POS", ///< original position in the file of the frame - "PREV_INPTS", ///< previous input PTS - "PREV_OUTPTS", ///< previous output PTS - "PTS", ///< original pts in the file of the frame - "STARTPTS", ///< PTS at start of movie - "TB", ///< timebase - "RTCTIME", ///< wallclock (RTC) time in micro seconds - "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds - NULL -}; - -enum var_name { - VAR_E, - VAR_INTERLACED, - VAR_N, - VAR_PHI, - VAR_PI, - VAR_POS, - VAR_PREV_INPTS, - VAR_PREV_OUTPTS, - VAR_PTS, - VAR_STARTPTS, - VAR_TB, - VAR_RTCTIME, - VAR_RTCSTART, - VAR_VARS_NB -}; - -typedef struct { - AVExpr *expr; - double var_values[VAR_VARS_NB]; -} SetPTSContext; - -static av_cold int init(AVFilterContext *ctx, const char *args) -{ - SetPTSContext *setpts = ctx->priv; - int ret; - - if ((ret = av_expr_parse(&setpts->expr, args ? args : "PTS", - var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", args); - return ret; - } - - setpts->var_values[VAR_E ] = M_E; - setpts->var_values[VAR_N ] = 0.0; - setpts->var_values[VAR_PHI ] = M_PHI; - setpts->var_values[VAR_PI ] = M_PI; - setpts->var_values[VAR_PREV_INPTS ] = NAN; - setpts->var_values[VAR_PREV_OUTPTS] = NAN; - setpts->var_values[VAR_STARTPTS ] = NAN; - return 0; -} - -static int config_input(AVFilterLink *inlink) -{ - SetPTSContext *setpts = inlink->dst->priv; - - setpts->var_values[VAR_TB] = av_q2d(inlink->time_base); - setpts->var_values[VAR_RTCSTART] = av_gettime(); - - av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f\n", setpts->var_values[VAR_TB]); - return 0; -} - -#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) -#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) - -static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) -{ - SetPTSContext *setpts = inlink->dst->priv; - int64_t in_pts = frame->pts; - double d; - - if (isnan(setpts->var_values[VAR_STARTPTS])) - setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); - - setpts->var_values[VAR_INTERLACED] = frame->video->interlaced; - setpts->var_values[VAR_PTS ] = TS2D(frame->pts); - setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos; - setpts->var_values[VAR_RTCTIME ] = av_gettime(); - - d = av_expr_eval(setpts->expr, setpts->var_values, NULL); - frame->pts = D2TS(d); - -#ifdef DEBUG - av_log(inlink->dst, AV_LOG_DEBUG, - "n:%"PRId64" interlaced:%d pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", - (int64_t)setpts->var_values[VAR_N], - (int)setpts->var_values[VAR_INTERLACED], - frame->pos, in_pts, in_pts * av_q2d(inlink->time_base), - frame->pts, frame->pts * av_q2d(inlink->time_base)); -#endif - - - setpts->var_values[VAR_N] += 1.0; - setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); - setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); - return ff_filter_frame(inlink->dst->outputs[0], frame); -} - -static av_cold void uninit(AVFilterContext *ctx) -{ - SetPTSContext *setpts = ctx->priv; - av_expr_free(setpts->expr); - setpts->expr = NULL; -} - -static const AVFilterPad avfilter_vf_setpts_inputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .get_video_buffer = ff_null_get_video_buffer, - .config_props = config_input, - .filter_frame = filter_frame, - }, - { NULL } -}; - -static const AVFilterPad avfilter_vf_setpts_outputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - }, - { NULL } -}; - -AVFilter avfilter_vf_setpts = { - .name = "setpts", - .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."), - .init = init, - .uninit = uninit, - - .priv_size = sizeof(SetPTSContext), - - .inputs = avfilter_vf_setpts_inputs, - .outputs = avfilter_vf_setpts_outputs, -}; diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c index c89b028..f91721d 100644 --- a/libavfilter/vf_showinfo.c +++ b/libavfilter/vf_showinfo.c @@ -1,19 +1,19 @@ /* * Copyright (c) 2011 Stefano Sabatini - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -26,6 +26,7 @@ #include "libavutil/imgutils.h" #include "libavutil/internal.h" #include "libavutil/pixdesc.h" +#include "libavutil/timestamp.h" #include "avfilter.h" #include "internal.h" #include "video.h" @@ -49,11 +50,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) uint32_t plane_checksum[4] = {0}, checksum = 0; int i, plane, vsub = desc->log2_chroma_h; - for (plane = 0; frame->data[plane] && plane < 4; plane++) { - size_t linesize = av_image_get_linesize(frame->format, frame->video->w, plane); + for (plane = 0; plane < 4 && frame->data[plane]; plane++) { + int64_t linesize = av_image_get_linesize(frame->format, frame->video->w, plane); uint8_t *data = frame->data[plane]; int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h; + if (linesize < 0) + return linesize; + for (i = 0; i < h; i++) { plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize); checksum = av_adler32_update(checksum, data, linesize); @@ -62,19 +66,23 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) } av_log(ctx, AV_LOG_INFO, - "n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" " + "n:%d pts:%s pts_time:%s pos:%"PRId64" " "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c " - "checksum:%u plane_checksum:[%u %u %u %u]\n", + "checksum:%08X plane_checksum:[%08X", showinfo->frame, - frame->pts, frame->pts * av_q2d(inlink->time_base), frame->pos, + av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), frame->pos, desc->name, - frame->video->pixel_aspect.num, frame->video->pixel_aspect.den, + frame->video->sample_aspect_ratio.num, frame->video->sample_aspect_ratio.den, frame->video->w, frame->video->h, !frame->video->interlaced ? 'P' : /* Progressive */ frame->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ frame->video->key_frame, av_get_picture_type_char(frame->video->pict_type), - checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]); + checksum, plane_checksum[0]); + + for (plane = 1; plane < 4 && frame->data[plane]; plane++) + av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]); + av_log(ctx, AV_LOG_INFO, "]\n"); showinfo->frame++; return ff_filter_frame(inlink->dst->outputs[0], frame); diff --git a/libavfilter/vf_smartblur.c b/libavfilter/vf_smartblur.c new file mode 100644 index 0000000..54ab209 --- /dev/null +++ b/libavfilter/vf_smartblur.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2012 Jeremy Tran + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Apply a smartblur filter to the input video + * Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer. + */ + +#include "libavutil/pixdesc.h" +#include "libswscale/swscale.h" + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +#define RADIUS_MIN 0.1 +#define RADIUS_MAX 5.0 + +#define STRENGTH_MIN -1.0 +#define STRENGTH_MAX 1.0 + +#define THRESHOLD_MIN -30 +#define THRESHOLD_MAX 30 + +typedef struct { + float radius; + float strength; + int threshold; + float quality; + struct SwsContext *filter_context; +} FilterParam; + +typedef struct { + FilterParam luma; + FilterParam chroma; + int hsub; + int vsub; + unsigned int sws_flags; +} SmartblurContext; + +#define CHECK_PARAM(param, name, min, max, format, ret) \ + if (param < min || param > max) { \ + av_log(ctx, AV_LOG_ERROR, \ + "Invalid " #name " value " #format ": " \ + "must be included between range " #format " and " #format "\n",\ + param, min, max); \ + ret = AVERROR(EINVAL); \ + } + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + SmartblurContext *sblur = ctx->priv; + int n = 0, ret = 0; + float lradius, lstrength, cradius, cstrength; + int lthreshold, cthreshold; + + if (args) + n = sscanf(args, "%f:%f:%d:%f:%f:%d", + &lradius, &lstrength, <hreshold, + &cradius, &cstrength, &cthreshold); + + if (n != 3 && n != 6) { + av_log(ctx, AV_LOG_ERROR, + "Incorrect number of parameters or invalid syntax: " + "must be luma_radius:luma_strength:luma_threshold" + "[:chroma_radius:chroma_strength:chroma_threshold]\n"); + return AVERROR(EINVAL); + } + + sblur->luma.radius = lradius; + sblur->luma.strength = lstrength; + sblur->luma.threshold = lthreshold; + + if (n == 3) { + sblur->chroma.radius = sblur->luma.radius; + sblur->chroma.strength = sblur->luma.strength; + sblur->chroma.threshold = sblur->luma.threshold; + } else { + sblur->chroma.radius = cradius; + sblur->chroma.strength = cstrength; + sblur->chroma.threshold = cthreshold; + } + + sblur->luma.quality = sblur->chroma.quality = 3.0; + sblur->sws_flags = SWS_BICUBIC; + + CHECK_PARAM(lradius, luma radius, RADIUS_MIN, RADIUS_MAX, %0.1f, ret) + CHECK_PARAM(lstrength, luma strength, STRENGTH_MIN, STRENGTH_MAX, %0.1f, ret) + CHECK_PARAM(lthreshold, luma threshold, THRESHOLD_MIN, THRESHOLD_MAX, %d, ret) + + if (n != 3) { + CHECK_PARAM(sblur->chroma.radius, chroma radius, RADIUS_MIN, RADIUS_MAX, %0.1f, ret) + CHECK_PARAM(sblur->chroma.strength, chroma strength, STRENGTH_MIN, STRENGTH_MAX, %0.1f, ret) + CHECK_PARAM(sblur->chroma.threshold, chroma threshold, THRESHOLD_MIN,THRESHOLD_MAX, %d, ret) + } + + return ret; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + SmartblurContext *sblur = ctx->priv; + + sws_freeContext(sblur->luma.filter_context); + sws_freeContext(sblur->chroma.filter_context); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, + AV_PIX_FMT_GRAY8, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + + return 0; +} + +static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags) +{ + SwsVector *vec; + SwsFilter sws_filter; + + vec = sws_getGaussianVec(f->radius, f->quality); + + if (!vec) + return AVERROR(EINVAL); + + sws_scaleVec(vec, f->strength); + vec->coeff[vec->length / 2] += 1.0 - f->strength; + sws_filter.lumH = sws_filter.lumV = vec; + sws_filter.chrH = sws_filter.chrV = NULL; + f->filter_context = sws_getCachedContext(NULL, + width, height, AV_PIX_FMT_GRAY8, + width, height, AV_PIX_FMT_GRAY8, + flags, &sws_filter, NULL, NULL); + + sws_freeVec(vec); + + if (!f->filter_context) + return AVERROR(EINVAL); + + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + SmartblurContext *sblur = inlink->dst->priv; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + + sblur->hsub = desc->log2_chroma_w; + sblur->vsub = desc->log2_chroma_h; + + alloc_sws_context(&sblur->luma, inlink->w, inlink->h, sblur->sws_flags); + alloc_sws_context(&sblur->chroma, + inlink->w >> sblur->hsub, inlink->h >> sblur->vsub, + sblur->sws_flags); + + return 0; +} + +static void blur(uint8_t *dst, const int dst_linesize, + const uint8_t *src, const int src_linesize, + const int w, const int h, const int threshold, + struct SwsContext *filter_context) +{ + int x, y; + int orig, filtered; + int diff; + /* Declare arrays of 4 to get aligned data */ + const uint8_t* const src_array[4] = {src}; + uint8_t *dst_array[4] = {dst}; + int src_linesize_array[4] = {src_linesize}; + int dst_linesize_array[4] = {dst_linesize}; + + sws_scale(filter_context, src_array, src_linesize_array, + 0, h, dst_array, dst_linesize_array); + + if (threshold > 0) { + for (y = 0; y < h; ++y) { + for (x = 0; x < w; ++x) { + orig = src[x + y * src_linesize]; + filtered = dst[x + y * dst_linesize]; + diff = orig - filtered; + + if (diff > 0) { + if (diff > 2 * threshold) + dst[x + y * dst_linesize] = orig; + else if (diff > threshold) + /* add 'diff' and substract 'threshold' from 'filtered' */ + dst[x + y * dst_linesize] = orig - threshold; + } else { + if (-diff > 2 * threshold) + dst[x + y * dst_linesize] = orig; + else if (-diff > threshold) + /* add 'diff' and 'threshold' to 'filtered' */ + dst[x + y * dst_linesize] = orig + threshold; + } + } + } + } else if (threshold < 0) { + for (y = 0; y < h; ++y) { + for (x = 0; x < w; ++x) { + orig = src[x + y * src_linesize]; + filtered = dst[x + y * dst_linesize]; + diff = orig - filtered; + + if (diff > 0) { + if (diff <= -threshold) + dst[x + y * dst_linesize] = orig; + else if (diff <= -2 * threshold) + /* substract 'diff' and 'threshold' from 'orig' */ + dst[x + y * dst_linesize] = filtered - threshold; + } else { + if (diff >= threshold) + dst[x + y * dst_linesize] = orig; + else if (diff >= 2 * threshold) + /* add 'threshold' and substract 'diff' from 'orig' */ + dst[x + y * dst_linesize] = filtered + threshold; + } + } + } + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) +{ + SmartblurContext *sblur = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outpic; + int cw = inlink->w >> sblur->hsub; + int ch = inlink->h >> sblur->vsub; + + outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!outpic) { + avfilter_unref_bufferp(&inpic); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outpic, inpic); + + blur(outpic->data[0], outpic->linesize[0], + inpic->data[0], inpic->linesize[0], + inlink->w, inlink->h, sblur->luma.threshold, + sblur->luma.filter_context); + + if (inpic->data[2]) { + blur(outpic->data[1], outpic->linesize[1], + inpic->data[1], inpic->linesize[1], + cw, ch, sblur->chroma.threshold, + sblur->chroma.filter_context); + blur(outpic->data[2], outpic->linesize[2], + inpic->data[2], inpic->linesize[2], + cw, ch, sblur->chroma.threshold, + sblur->chroma.filter_context); + } + + avfilter_unref_bufferp(&inpic); + return ff_filter_frame(outlink, outpic); +} + +static const AVFilterPad smartblur_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .config_props = config_props, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad smartblur_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_smartblur = { + .name = "smartblur", + .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."), + + .priv_size = sizeof(SmartblurContext), + + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = smartblur_inputs, + .outputs = smartblur_outputs, +}; diff --git a/libavfilter/vf_super2xsai.c b/libavfilter/vf_super2xsai.c new file mode 100644 index 0000000..e2db3b4 --- /dev/null +++ b/libavfilter/vf_super2xsai.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2010 Niel van der Westhuizen <nielkie@gmail.com> + * Copyright (c) 2002 A'rpi + * Copyright (c) 1997-2001 ZSNES Team ( zsknight@zsnes.com / _demo_@zsnes.com ) + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * Super 2xSaI video filter + * Ported from MPlayer libmpcodecs/vf_2xsai.c. + */ + +#include "libavutil/pixdesc.h" +#include "libavutil/intreadwrite.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +typedef struct { + /* masks used for two pixels interpolation */ + uint32_t hi_pixel_mask; + uint32_t lo_pixel_mask; + + /* masks used for four pixels interpolation */ + uint32_t q_hi_pixel_mask; + uint32_t q_lo_pixel_mask; + + int bpp; ///< bytes per pixel, pixel stride for each (packed) pixel + int is_be; +} Super2xSaIContext; + +#define GET_RESULT(A, B, C, D) ((A != C || A != D) - (B != C || B != D)) + +#define INTERPOLATE(A, B) (((A & hi_pixel_mask) >> 1) + ((B & hi_pixel_mask) >> 1) + (A & B & lo_pixel_mask)) + +#define Q_INTERPOLATE(A, B, C, D) ((A & q_hi_pixel_mask) >> 2) + ((B & q_hi_pixel_mask) >> 2) + ((C & q_hi_pixel_mask) >> 2) + ((D & q_hi_pixel_mask) >> 2) \ + + ((((A & q_lo_pixel_mask) + (B & q_lo_pixel_mask) + (C & q_lo_pixel_mask) + (D & q_lo_pixel_mask)) >> 2) & q_lo_pixel_mask) + +static void super2xsai(AVFilterContext *ctx, + uint8_t *src, int src_linesize, + uint8_t *dst, int dst_linesize, + int width, int height) +{ + Super2xSaIContext *sai = ctx->priv; + unsigned int x, y; + uint32_t color[4][4]; + unsigned char *src_line[4]; + const int bpp = sai->bpp; + const uint32_t hi_pixel_mask = sai->hi_pixel_mask; + const uint32_t lo_pixel_mask = sai->lo_pixel_mask; + const uint32_t q_hi_pixel_mask = sai->q_hi_pixel_mask; + const uint32_t q_lo_pixel_mask = sai->q_lo_pixel_mask; + + /* Point to the first 4 lines, first line is duplicated */ + src_line[0] = src; + src_line[1] = src; + src_line[2] = src + src_linesize*FFMIN(1, height-1); + src_line[3] = src + src_linesize*FFMIN(2, height-1); + +#define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off) +#define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off) +#define READ_COLOR2(dst, src_line, off) dst = sai->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off) + + for (y = 0; y < height; y++) { + uint8_t *dst_line[2]; + + dst_line[0] = dst + dst_linesize*2*y; + dst_line[1] = dst + dst_linesize*(2*y+1); + + switch (bpp) { + case 4: + READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2); + READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2); + READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2); + READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2); + break; + case 3: + READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2); + READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2); + READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2); + READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2); + break; + default: + READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2); + READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2); + READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2); + READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2); + } + + for (x = 0; x < width; x++) { + uint32_t product1a, product1b, product2a, product2b; + +//--------------------------------------- B0 B1 B2 B3 0 1 2 3 +// 4 5* 6 S2 -> 4 5* 6 7 +// 1 2 3 S1 8 9 10 11 +// A0 A1 A2 A3 12 13 14 15 +//-------------------------------------- + if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) { + product2b = color[2][1]; + product1b = product2b; + } else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) { + product2b = color[1][1]; + product1b = product2b; + } else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) { + int r = 0; + + r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]); + r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]); + r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]); + r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]); + + if (r > 0) + product1b = color[1][2]; + else if (r < 0) + product1b = color[1][1]; + else + product1b = INTERPOLATE(color[1][1], color[1][2]); + + product2b = product1b; + } else { + if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0]) + product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]); + else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3]) + product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]); + else + product2b = INTERPOLATE(color[2][1], color[2][2]); + + if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0]) + product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]); + else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3]) + product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]); + else + product1b = INTERPOLATE(color[1][1], color[1][2]); + } + + if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2]) + product2a = INTERPOLATE(color[2][1], color[1][1]); + else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0]) + product2a = INTERPOLATE(color[2][1], color[1][1]); + else + product2a = color[2][1]; + + if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2]) + product1a = INTERPOLATE(color[2][1], color[1][1]); + else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0]) + product1a = INTERPOLATE(color[2][1], color[1][1]); + else + product1a = color[1][1]; + + /* Set the calculated pixels */ + switch (bpp) { + case 4: + AV_WN32A(dst_line[0] + x * 8, product1a); + AV_WN32A(dst_line[0] + x * 8 + 4, product1b); + AV_WN32A(dst_line[1] + x * 8, product2a); + AV_WN32A(dst_line[1] + x * 8 + 4, product2b); + break; + case 3: + AV_WL24(dst_line[0] + x * 6, product1a); + AV_WL24(dst_line[0] + x * 6 + 3, product1b); + AV_WL24(dst_line[1] + x * 6, product2a); + AV_WL24(dst_line[1] + x * 6 + 3, product2b); + break; + default: // bpp = 2 + if (sai->is_be) { + AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16)); + AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16)); + } else { + AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16)); + AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16)); + } + } + + /* Move color matrix forward */ + color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3]; + color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3]; + color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3]; + color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3]; + + if (x < width - 3) { + x += 3; + switch (bpp) { + case 4: + READ_COLOR4(color[0][3], src_line[0], x); + READ_COLOR4(color[1][3], src_line[1], x); + READ_COLOR4(color[2][3], src_line[2], x); + READ_COLOR4(color[3][3], src_line[3], x); + break; + case 3: + READ_COLOR3(color[0][3], src_line[0], x); + READ_COLOR3(color[1][3], src_line[1], x); + READ_COLOR3(color[2][3], src_line[2], x); + READ_COLOR3(color[3][3], src_line[3], x); + break; + default: /* case 2 */ + READ_COLOR2(color[0][3], src_line[0], x); + READ_COLOR2(color[1][3], src_line[1], x); + READ_COLOR2(color[2][3], src_line[2], x); + READ_COLOR2(color[3][3], src_line[3], x); + } + x -= 3; + } + } + + /* We're done with one line, so we shift the source lines up */ + src_line[0] = src_line[1]; + src_line[1] = src_line[2]; + src_line[2] = src_line[3]; + + /* Read next line */ + src_line[3] = src_line[2]; + if (y < height - 3) + src_line[3] += src_linesize; + } // y loop +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, + AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_RGB565BE, AV_PIX_FMT_BGR565BE, AV_PIX_FMT_RGB555BE, AV_PIX_FMT_BGR555BE, + AV_PIX_FMT_RGB565LE, AV_PIX_FMT_BGR565LE, AV_PIX_FMT_RGB555LE, AV_PIX_FMT_BGR555LE, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + Super2xSaIContext *sai = inlink->dst->priv; + + sai->hi_pixel_mask = 0xFEFEFEFE; + sai->lo_pixel_mask = 0x01010101; + sai->q_hi_pixel_mask = 0xFCFCFCFC; + sai->q_lo_pixel_mask = 0x03030303; + sai->bpp = 4; + + switch (inlink->format) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: + sai->bpp = 3; + break; + + case AV_PIX_FMT_RGB565BE: + case AV_PIX_FMT_BGR565BE: + sai->is_be = 1; + case AV_PIX_FMT_RGB565LE: + case AV_PIX_FMT_BGR565LE: + sai->hi_pixel_mask = 0xF7DEF7DE; + sai->lo_pixel_mask = 0x08210821; + sai->q_hi_pixel_mask = 0xE79CE79C; + sai->q_lo_pixel_mask = 0x18631863; + sai->bpp = 2; + break; + + case AV_PIX_FMT_BGR555BE: + case AV_PIX_FMT_RGB555BE: + sai->is_be = 1; + case AV_PIX_FMT_BGR555LE: + case AV_PIX_FMT_RGB555LE: + sai->hi_pixel_mask = 0x7BDE7BDE; + sai->lo_pixel_mask = 0x04210421; + sai->q_hi_pixel_mask = 0x739C739C; + sai->q_lo_pixel_mask = 0x0C630C63; + sai->bpp = 2; + break; + } + + return 0; +} + +static int config_output(AVFilterLink *outlink) +{ + AVFilterLink *inlink = outlink->src->inputs[0]; + + outlink->w = inlink->w*2; + outlink->h = inlink->h*2; + + av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n", + av_get_pix_fmt_name(inlink->format), + inlink->w, inlink->h, outlink->w, outlink->h); + + return 0; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) +{ + AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterBufferRef *outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!outpicref) { + avfilter_unref_bufferp(&inpicref); + return AVERROR(ENOMEM); + } + avfilter_copy_buffer_ref_props(outpicref, inpicref); + outpicref->video->w = outlink->w; + outpicref->video->h = outlink->h; + + super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0], + outpicref->data[0], outpicref->linesize[0], + inlink->w, inlink->h); + + avfilter_unref_bufferp(&inpicref); + return ff_filter_frame(outlink, outpicref); +} + +static const AVFilterPad super2xsai_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad super2xsai_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter avfilter_vf_super2xsai = { + .name = "super2xsai", + .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."), + .priv_size = sizeof(Super2xSaIContext), + .query_formats = query_formats, + .inputs = super2xsai_inputs, + .outputs = super2xsai_outputs, +}; diff --git a/libavfilter/vf_swapuv.c b/libavfilter/vf_swapuv.c new file mode 100644 index 0000000..82cc07c --- /dev/null +++ b/libavfilter/vf_swapuv.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * swap UV filter + */ + +#include "libavutil/pixdesc.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, + int w, int h) +{ + AVFilterBufferRef *picref = + ff_default_get_video_buffer(link, perms, w, h); + + FFSWAP(uint8_t*, picref->data[1], picref->data[2]); + FFSWAP(int, picref->linesize[1], picref->linesize[2]); + + return picref; +} + +static int filter_frame(AVFilterLink *link, AVFilterBufferRef *inpicref) +{ + FFSWAP(uint8_t*, inpicref->data[1], inpicref->data[2]); + FFSWAP(int, inpicref->linesize[1], inpicref->linesize[2]); + + return ff_filter_frame(link->dst->outputs[0], inpicref); +} + +static int is_planar_yuv(const AVPixFmtDescriptor *desc) +{ + int i; + + if (desc->flags & ~(PIX_FMT_BE | PIX_FMT_PLANAR | PIX_FMT_ALPHA) || + desc->nb_components < 3 || + (desc->comp[1].depth_minus1 != desc->comp[2].depth_minus1)) + return 0; + for (i = 0; i < desc->nb_components; i++) { + if (desc->comp[i].offset_plus1 != 1 || + desc->comp[i].shift != 0 || + desc->comp[i].plane != i) + return 0; + } + + return 1; +} + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterFormats *formats = NULL; + int fmt; + + for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) { + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); + if (is_planar_yuv(desc)) + ff_add_format(&formats, fmt); + } + + ff_set_common_formats(ctx, formats); + return 0; +} + +static const AVFilterPad swapuv_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = get_video_buffer, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad swapuv_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter avfilter_vf_swapuv = { + .name = "swapuv", + .description = NULL_IF_CONFIG_SMALL("Swap U and V components."), + .priv_size = 0, + .query_formats = query_formats, + .inputs = swapuv_inputs, + .outputs = swapuv_outputs, +}; diff --git a/libavfilter/vf_thumbnail.c b/libavfilter/vf_thumbnail.c new file mode 100644 index 0000000..0d245d9 --- /dev/null +++ b/libavfilter/vf_thumbnail.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2011 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Potential thumbnail lookup filter to reduce the risk of an inappropriate + * selection (such as a black frame) we could get with an absolute seek. + * + * Simplified version of algorithm by Vadim Zaliva <lord@crocodile.org>. + * @see http://notbrainsurgery.livejournal.com/29773.html + */ + +#include "avfilter.h" +#include "internal.h" + +#define HIST_SIZE (3*256) + +struct thumb_frame { + AVFilterBufferRef *buf; ///< cached frame + int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame +}; + +typedef struct { + int n; ///< current frame + int n_frames; ///< number of frames for analysis + struct thumb_frame *frames; ///< the n_frames frames +} ThumbContext; + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + ThumbContext *thumb = ctx->priv; + + if (!args) { + thumb->n_frames = 100; + } else { + int n = sscanf(args, "%d", &thumb->n_frames); + if (n != 1 || thumb->n_frames < 2) { + thumb->n_frames = 0; + av_log(ctx, AV_LOG_ERROR, + "Invalid number of frames specified (minimum is 2).\n"); + return AVERROR(EINVAL); + } + } + thumb->frames = av_calloc(thumb->n_frames, sizeof(*thumb->frames)); + if (!thumb->frames) { + av_log(ctx, AV_LOG_ERROR, + "Allocation failure, try to lower the number of frames\n"); + return AVERROR(ENOMEM); + } + av_log(ctx, AV_LOG_VERBOSE, "batch size: %d frames\n", thumb->n_frames); + return 0; +} + +/** + * @brief Compute Sum-square deviation to estimate "closeness". + * @param hist color distribution histogram + * @param median average color distribution histogram + * @return sum of squared errors + */ +static double frame_sum_square_err(const int *hist, const double *median) +{ + int i; + double err, sum_sq_err = 0; + + for (i = 0; i < HIST_SIZE; i++) { + err = median[i] - (double)hist[i]; + sum_sq_err += err*err; + } + return sum_sq_err; +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) +{ + int i, j, best_frame_idx = 0; + double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1; + AVFilterContext *ctx = inlink->dst; + ThumbContext *thumb = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *picref; + int *hist = thumb->frames[thumb->n].histogram; + const uint8_t *p = frame->data[0]; + + // keep a reference of each frame + thumb->frames[thumb->n].buf = frame; + + // update current frame RGB histogram + for (j = 0; j < inlink->h; j++) { + for (i = 0; i < inlink->w; i++) { + hist[0*256 + p[i*3 ]]++; + hist[1*256 + p[i*3 + 1]]++; + hist[2*256 + p[i*3 + 2]]++; + } + p += frame->linesize[0]; + } + + // no selection until the buffer of N frames is filled up + if (thumb->n < thumb->n_frames - 1) { + thumb->n++; + return 0; + } + + // average histogram of the N frames + for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) { + for (i = 0; i < thumb->n_frames; i++) + avg_hist[j] += (double)thumb->frames[i].histogram[j]; + avg_hist[j] /= thumb->n_frames; + } + + // find the frame closer to the average using the sum of squared errors + for (i = 0; i < thumb->n_frames; i++) { + sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist); + if (i == 0 || sq_err < min_sq_err) + best_frame_idx = i, min_sq_err = sq_err; + } + + // free and reset everything (except the best frame buffer) + for (i = 0; i < thumb->n_frames; i++) { + memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram)); + if (i == best_frame_idx) + continue; + avfilter_unref_bufferp(&thumb->frames[i].buf); + } + thumb->n = 0; + + // raise the chosen one + picref = thumb->frames[best_frame_idx].buf; + av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected\n", + best_frame_idx, picref->pts * av_q2d(inlink->time_base)); + thumb->frames[best_frame_idx].buf = NULL; + return ff_filter_frame(outlink, picref); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + int i; + ThumbContext *thumb = ctx->priv; + for (i = 0; i < thumb->n_frames && thumb->frames[i].buf; i++) + avfilter_unref_bufferp(&thumb->frames[i].buf); + av_freep(&thumb->frames); +} + +static int request_frame(AVFilterLink *link) +{ + ThumbContext *thumb = link->src->priv; + + /* loop until a frame thumbnail is available (when a frame is queued, + * thumb->n is reset to zero) */ + do { + int ret = ff_request_frame(link->src->inputs[0]); + if (ret < 0) + return ret; + } while (thumb->n); + return 0; +} + +static int poll_frame(AVFilterLink *link) +{ + ThumbContext *thumb = link->src->priv; + AVFilterLink *inlink = link->src->inputs[0]; + int ret, available_frames = ff_poll_frame(inlink); + + /* If the input link is not able to provide any frame, we can't do anything + * at the moment and thus have zero thumbnail available. */ + if (!available_frames) + return 0; + + /* Since at least one frame is available and the next frame will allow us + * to compute a thumbnail, we can return 1 frame. */ + if (thumb->n == thumb->n_frames - 1) + return 1; + + /* we have some frame(s) available in the input link, but not yet enough to + * output a thumbnail, so we request more */ + ret = ff_request_frame(inlink); + return ret < 0 ? ret : 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_NONE + }; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static const AVFilterPad thumbnail_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer = ff_null_get_video_buffer, + .min_perms = AV_PERM_PRESERVE, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad thumbnail_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .poll_frame = poll_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_thumbnail = { + .name = "thumbnail", + .description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."), + .priv_size = sizeof(ThumbContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = thumbnail_inputs, + .outputs = thumbnail_outputs, +}; diff --git a/libavfilter/vf_tile.c b/libavfilter/vf_tile.c new file mode 100644 index 0000000..e4ced88 --- /dev/null +++ b/libavfilter/vf_tile.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * tile video filter + */ + +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" +#include "drawutils.h" +#include "formats.h" +#include "video.h" +#include "internal.h" + +typedef struct { + const AVClass *class; + unsigned w, h; + unsigned margin; + unsigned padding; + unsigned current; + unsigned nb_frames; + FFDrawContext draw; + FFDrawColor blank; + AVFilterBufferRef *out_ref; +} TileContext; + +#define REASONABLE_SIZE 1024 + +#define OFFSET(x) offsetof(TileContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption tile_options[] = { + { "layout", "set grid size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, + {.str = "6x5"}, 0, 0, FLAGS }, + { "margin", "set outer border margin in pixels", OFFSET(margin), + AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS }, + { "padding", "set inner border thickness in pixels", OFFSET(padding), + AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS }, + { "nb_frames", "set maximum number of frame to render", OFFSET(nb_frames), + AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS }, + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(tile); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + TileContext *tile = ctx->priv; + static const char *shorthand[] = { "layout", "nb_frames", "margin", "padding", NULL }; + int ret; + + tile->class = &tile_class; + av_opt_set_defaults(tile); + + if ((ret = av_opt_set_from_string(tile, args, shorthand, "=", ":")) < 0) + return ret; + + if (tile->w > REASONABLE_SIZE || tile->h > REASONABLE_SIZE) { + av_log(ctx, AV_LOG_ERROR, "Tile size %ux%u is insane.\n", + tile->w, tile->h); + return AVERROR(EINVAL); + } + + if (tile->nb_frames == 0) { + tile->nb_frames = tile->w * tile->h; + } else if (tile->nb_frames > tile->w * tile->h) { + av_log(ctx, AV_LOG_ERROR, "nb_frames must be less than or equal to %dx%d=%d\n", + tile->w, tile->h, tile->w * tile->h); + return AVERROR(EINVAL); + } + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); + return 0; +} + +static int config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + TileContext *tile = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + const unsigned total_margin_w = (tile->w - 1) * tile->padding + 2*tile->margin; + const unsigned total_margin_h = (tile->h - 1) * tile->padding + 2*tile->margin; + + if (inlink->w > (INT_MAX - total_margin_w) / tile->w) { + av_log(ctx, AV_LOG_ERROR, "Total width %ux%u is too much.\n", + tile->w, inlink->w); + return AVERROR(EINVAL); + } + if (inlink->h > (INT_MAX - total_margin_h) / tile->h) { + av_log(ctx, AV_LOG_ERROR, "Total height %ux%u is too much.\n", + tile->h, inlink->h); + return AVERROR(EINVAL); + } + outlink->w = tile->w * inlink->w + total_margin_w; + outlink->h = tile->h * inlink->h + total_margin_h; + outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; + outlink->frame_rate = av_mul_q(inlink->frame_rate, + (AVRational){ 1, tile->nb_frames }); + ff_draw_init(&tile->draw, inlink->format, 0); + /* TODO make the color an option, or find an unified way of choosing it */ + ff_draw_color(&tile->draw, &tile->blank, (uint8_t[]){ 0, 0, 0, -1 }); + + return 0; +} + +static void get_current_tile_pos(AVFilterContext *ctx, unsigned *x, unsigned *y) +{ + TileContext *tile = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + const unsigned tx = tile->current % tile->w; + const unsigned ty = tile->current / tile->w; + + *x = tile->margin + (inlink->w + tile->padding) * tx; + *y = tile->margin + (inlink->h + tile->padding) * ty; +} + +static void draw_blank_frame(AVFilterContext *ctx, AVFilterBufferRef *out_buf) +{ + TileContext *tile = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + unsigned x0, y0; + + get_current_tile_pos(ctx, &x0, &y0); + ff_fill_rectangle(&tile->draw, &tile->blank, + out_buf->data, out_buf->linesize, + x0, y0, inlink->w, inlink->h); + tile->current++; +} +static int end_last_frame(AVFilterContext *ctx) +{ + TileContext *tile = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFilterBufferRef *out_buf = tile->out_ref; + int ret; + + while (tile->current < tile->nb_frames) + draw_blank_frame(ctx, out_buf); + ret = ff_filter_frame(outlink, out_buf); + tile->current = 0; + return ret; +} + +/* Note: direct rendering is not possible since there is no guarantee that + * buffers are fed to filter_frame in the order they were obtained from + * get_buffer (think B-frames). */ + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + AVFilterContext *ctx = inlink->dst; + TileContext *tile = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + unsigned x0, y0; + + if (!tile->current) { + tile->out_ref = ff_get_video_buffer(outlink, AV_PERM_WRITE, + outlink->w, outlink->h); + if (!tile->out_ref) + return AVERROR(ENOMEM); + avfilter_copy_buffer_ref_props(tile->out_ref, picref); + tile->out_ref->video->w = outlink->w; + tile->out_ref->video->h = outlink->h; + + /* fill surface once for margin/padding */ + if (tile->margin || tile->padding) + ff_fill_rectangle(&tile->draw, &tile->blank, + tile->out_ref->data, + tile->out_ref->linesize, + 0, 0, outlink->w, outlink->h); + } + + get_current_tile_pos(ctx, &x0, &y0); + ff_copy_rectangle2(&tile->draw, + tile->out_ref->data, tile->out_ref->linesize, + picref->data, picref->linesize, + x0, y0, 0, 0, inlink->w, inlink->h); + + avfilter_unref_bufferp(&picref); + if (++tile->current == tile->nb_frames) + return end_last_frame(ctx); + + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + TileContext *tile = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + int r; + + while (1) { + r = ff_request_frame(inlink); + if (r < 0) { + if (r == AVERROR_EOF && tile->current) + r = end_last_frame(ctx); + break; + } + if (!tile->current) /* done */ + break; + } + return r; +} + +static const AVFilterPad tile_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + .min_perms = AV_PERM_READ, + }, + { NULL } +}; + +static const AVFilterPad tile_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_tile = { + .name = "tile", + .description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."), + .init = init, + .query_formats = query_formats, + .priv_size = sizeof(TileContext), + .inputs = tile_inputs, + .outputs = tile_outputs, + .priv_class = &tile_class, +}; diff --git a/libavfilter/vf_tinterlace.c b/libavfilter/vf_tinterlace.c new file mode 100644 index 0000000..909784e --- /dev/null +++ b/libavfilter/vf_tinterlace.c @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2010 Baptiste Coudurier + * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * temporal field interlace filter, ported from MPlayer/libmpcodecs + */ + +#include "libavutil/opt.h" +#include "libavutil/imgutils.h" +#include "libavutil/avassert.h" +#include "avfilter.h" +#include "internal.h" + +enum TInterlaceMode { + MODE_MERGE = 0, + MODE_DROP_EVEN, + MODE_DROP_ODD, + MODE_PAD, + MODE_INTERLEAVE_TOP, + MODE_INTERLEAVE_BOTTOM, + MODE_INTERLACEX2, + MODE_NB, +}; + +typedef struct { + const AVClass *class; + enum TInterlaceMode mode; ///< interlace mode selected + int flags; ///< flags affecting interlacing algorithm + int frame; ///< number of the output frame + int vsub; ///< chroma vertical subsampling + AVFilterBufferRef *cur; + AVFilterBufferRef *next; + uint8_t *black_data[4]; ///< buffer used to fill padded lines + int black_linesize[4]; +} TInterlaceContext; + +#define OFFSET(x) offsetof(TInterlaceContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define TINTERLACE_FLAG_VLPF 01 + +static const AVOption tinterlace_options[] = { + {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_MERGE}, 0, MODE_NB-1, FLAGS, "mode"}, + {"merge", "merge fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGE}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"drop_even", "drop even fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_EVEN}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"drop_odd", "drop odd fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_ODD}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"pad", "pad alternate lines with black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PAD}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"interleave_top", "interleave top and bottom fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_TOP}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"interleave_bottom", "interleave bottom and top fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"interlacex2", "interlace fields from two consecutive frames", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLACEX2}, INT_MIN, INT_MAX, FLAGS, "mode"}, + + {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, 0, "flags" }, + {"low_pass_filter", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" }, + {"vlpf", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" }, + + {NULL} +}; + +AVFILTER_DEFINE_CLASS(tinterlace); + +#define FULL_SCALE_YUVJ_FORMATS \ + AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P + +static enum AVPixelFormat full_scale_yuvj_pix_fmts[] = { + FULL_SCALE_YUVJ_FORMATS, AV_PIX_FMT_NONE +}; + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + TInterlaceContext *tinterlace = ctx->priv; + static const char *shorthand[] = { "mode", NULL }; + + tinterlace->class = &tinterlace_class; + av_opt_set_defaults(tinterlace); + + return av_opt_set_from_string(tinterlace, args, shorthand, "=", ":"); +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + TInterlaceContext *tinterlace = ctx->priv; + + avfilter_unref_bufferp(&tinterlace->cur ); + avfilter_unref_bufferp(&tinterlace->next); + + av_opt_free(tinterlace); + av_freep(&tinterlace->black_data[0]); +} + +static int config_out_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = outlink->src->inputs[0]; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format); + TInterlaceContext *tinterlace = ctx->priv; + + tinterlace->vsub = desc->log2_chroma_h; + outlink->w = inlink->w; + outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ? + inlink->h*2 : inlink->h; + + if (tinterlace->mode == MODE_PAD) { + uint8_t black[4] = { 16, 128, 128, 16 }; + int i, ret; + if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts)) + black[0] = black[3] = 0; + ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize, + outlink->w, outlink->h, outlink->format, 1); + if (ret < 0) + return ret; + + /* fill black picture with black */ + for (i = 0; i < 4 && tinterlace->black_data[i]; i++) { + int h = i == 1 || i == 2 ? outlink->h >> desc->log2_chroma_h : outlink->h; + memset(tinterlace->black_data[i], black[i], + tinterlace->black_linesize[i] * h); + } + } + if ((tinterlace->flags & TINTERLACE_FLAG_VLPF) + && !(tinterlace->mode == MODE_INTERLEAVE_TOP + || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) { + av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n", + tinterlace->mode); + tinterlace->flags &= ~TINTERLACE_FLAG_VLPF; + } + av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n", + tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off", + inlink->h, outlink->h); + + return 0; +} + +#define FIELD_UPPER 0 +#define FIELD_LOWER 1 +#define FIELD_UPPER_AND_LOWER 2 + +/** + * Copy picture field from src to dst. + * + * @param src_field copy from upper, lower field or both + * @param interleave leave a padding line between each copied line + * @param dst_field copy to upper or lower field, + * only meaningful when interleave is selected + * @param flags context flags + */ +static inline +void copy_picture_field(uint8_t *dst[4], int dst_linesize[4], + const uint8_t *src[4], int src_linesize[4], + enum AVPixelFormat format, int w, int src_h, + int src_field, int interleave, int dst_field, + int flags) +{ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format); + int plane, vsub = desc->log2_chroma_h; + int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2; + int h, i; + + for (plane = 0; plane < desc->nb_components; plane++) { + int lines = plane == 1 || plane == 2 ? src_h >> vsub : src_h; + int linesize = av_image_get_linesize(format, w, plane); + uint8_t *dstp = dst[plane]; + const uint8_t *srcp = src[plane]; + + if (linesize < 0) + return; + + lines /= k; + if (src_field == FIELD_LOWER) + srcp += src_linesize[plane]; + if (interleave && dst_field == FIELD_LOWER) + dstp += dst_linesize[plane]; + if (flags & TINTERLACE_FLAG_VLPF) { + // Low-pass filtering is required when creating an interlaced destination from + // a progressive source which contains high-frequency vertical detail. + // Filtering will reduce interlace 'twitter' and Moire patterning. + int srcp_linesize = src_linesize[plane] * k; + int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1); + for (h = lines; h > 0; h--) { + const uint8_t *srcp_above = srcp - src_linesize[plane]; + const uint8_t *srcp_below = srcp + src_linesize[plane]; + if (h == lines) srcp_above = srcp; // there is no line above + if (h == 1) srcp_below = srcp; // there is no line below + for (i = 0; i < linesize; i++) { + // this calculation is an integer representation of + // '0.5 * current + 0.25 * above + 0.25 + below' + // '1 +' is for rounding. */ + dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2; + } + dstp += dstp_linesize; + srcp += srcp_linesize; + } + } else { + av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1), + srcp, src_linesize[plane]*k, linesize, lines); + } + } +} + +static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + TInterlaceContext *tinterlace = ctx->priv; + AVFilterBufferRef *cur, *next, *out; + int field, tff, ret; + + avfilter_unref_buffer(tinterlace->cur); + tinterlace->cur = tinterlace->next; + tinterlace->next = picref; + + cur = tinterlace->cur; + next = tinterlace->next; + /* we need at least two frames */ + if (!tinterlace->cur) + return 0; + + switch (tinterlace->mode) { + case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into + * the lower field, generating a double-height video at half framerate */ + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) + return AVERROR(ENOMEM); + avfilter_copy_buffer_ref_props(out, cur); + out->video->h = outlink->h; + out->video->interlaced = 1; + out->video->top_field_first = 1; + + /* write odd frame lines into the upper field of the new frame */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)cur->data, cur->linesize, + inlink->format, inlink->w, inlink->h, + FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER, tinterlace->flags); + /* write even frame lines into the lower field of the new frame */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)next->data, next->linesize, + inlink->format, inlink->w, inlink->h, + FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags); + avfilter_unref_bufferp(&tinterlace->next); + break; + + case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */ + case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */ + out = avfilter_ref_buffer(tinterlace->mode == MODE_DROP_EVEN ? cur : next, AV_PERM_READ); + avfilter_unref_bufferp(&tinterlace->next); + break; + + case MODE_PAD: /* expand each frame to double height, but pad alternate + * lines with black; framerate unchanged */ + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + avfilter_copy_buffer_ref_props(out, cur); + out->video->h = outlink->h; + + field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER; + /* copy upper and lower fields */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)cur->data, cur->linesize, + inlink->format, inlink->w, inlink->h, + FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags); + /* pad with black the other field */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize, + inlink->format, inlink->w, inlink->h, + FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags); + break; + + /* interleave upper/lower lines from odd frames with lower/upper lines from even frames, + * halving the frame rate and preserving image height */ + case MODE_INTERLEAVE_TOP: /* top field first */ + case MODE_INTERLEAVE_BOTTOM: /* bottom field first */ + tff = tinterlace->mode == MODE_INTERLEAVE_TOP; + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) + return AVERROR(ENOMEM); + avfilter_copy_buffer_ref_props(out, cur); + out->video->interlaced = 1; + out->video->top_field_first = tff; + + /* copy upper/lower field from cur */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)cur->data, cur->linesize, + inlink->format, inlink->w, inlink->h, + tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER, + tinterlace->flags); + /* copy lower/upper field from next */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)next->data, next->linesize, + inlink->format, inlink->w, inlink->h, + tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER, + tinterlace->flags); + avfilter_unref_bufferp(&tinterlace->next); + break; + case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */ + /* output current frame first */ + out = avfilter_ref_buffer(cur, ~AV_PERM_WRITE); + if (!out) + return AVERROR(ENOMEM); + out->video->interlaced = 1; + + if ((ret = ff_filter_frame(outlink, out)) < 0) + return ret; + + /* output mix of current and next frame */ + tff = next->video->top_field_first; + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + if (!out) + return AVERROR(ENOMEM); + avfilter_copy_buffer_ref_props(out, next); + out->video->interlaced = 1; + + /* write current frame second field lines into the second field of the new frame */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)cur->data, cur->linesize, + inlink->format, inlink->w, inlink->h, + tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER, + tinterlace->flags); + /* write next frame first field lines into the first field of the new frame */ + copy_picture_field(out->data, out->linesize, + (const uint8_t **)next->data, next->linesize, + inlink->format, inlink->w, inlink->h, + tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER, + tinterlace->flags); + break; + default: + av_assert0(0); + } + + ret = ff_filter_frame(outlink, out); + tinterlace->frame++; + + return ret; +} + +static int request_frame(AVFilterLink *outlink) +{ + TInterlaceContext *tinterlace = outlink->src->priv; + AVFilterLink *inlink = outlink->src->inputs[0]; + + do { + int ret; + + if ((ret = ff_request_frame(inlink)) < 0) + return ret; + } while (!tinterlace->cur); + + return 0; +} + +static const AVFilterPad tinterlace_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = filter_frame, + }, + { NULL } +}; + +static const AVFilterPad tinterlace_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_out_props, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter avfilter_vf_tinterlace = { + .name = "tinterlace", + .description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."), + .priv_size = sizeof(TInterlaceContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = tinterlace_inputs, + .outputs = tinterlace_outputs, + .priv_class = &tinterlace_class, +}; diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c index d7a1739..c381ecc 100644 --- a/libavfilter/vf_transpose.c +++ b/libavfilter/vf_transpose.c @@ -2,20 +2,20 @@ * Copyright (c) 2010 Stefano Sabatini * Copyright (c) 2008 Vitor Sessak * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -28,6 +28,7 @@ #include <stdio.h> #include "libavutil/intreadwrite.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "libavutil/internal.h" @@ -36,7 +37,14 @@ #include "internal.h" #include "video.h" +typedef enum { + TRANSPOSE_PT_TYPE_NONE, + TRANSPOSE_PT_TYPE_LANDSCAPE, + TRANSPOSE_PT_TYPE_PORTRAIT, +} PassthroughType; + typedef struct { + const AVClass *class; int hsub, vsub; int pixsteps[4]; @@ -45,27 +53,40 @@ typedef struct { /* 2 Rotate by 90 degrees counterclockwise. */ /* 3 Rotate by 90 degrees clockwise and vflip. */ int dir; + PassthroughType passthrough; ///< landscape passthrough mode enabled } TransContext; +#define OFFSET(x) offsetof(TransContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption transpose_options[] = { + { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, {.i64=0}, 0, 7, FLAGS }, + + { "passthrough", "do not apply transposition if the input matches the specified geometry", + OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" }, + { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" }, + { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" }, + { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" }, + + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(transpose); + static av_cold int init(AVFilterContext *ctx, const char *args) { TransContext *trans = ctx->priv; - trans->dir = 0; + const char *shorthand[] = { "dir", "passthrough", NULL }; - if (args) - sscanf(args, "%d", &trans->dir); + trans->class = &transpose_class; + av_opt_set_defaults(trans); - if (trans->dir < 0 || trans->dir > 3) { - av_log(ctx, AV_LOG_ERROR, "Invalid value %d not between 0 and 3.\n", - trans->dir); - return AVERROR(EINVAL); - } - return 0; + return av_opt_set_from_string(trans, args, shorthand, "=", ":"); } static int query_formats(AVFilterContext *ctx) { - enum AVPixelFormat pix_fmts[] = { + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, @@ -75,16 +96,13 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE, AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE, - AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_NV12, AV_PIX_FMT_NV21, AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8, AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE, - AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P, - AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, - AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, - AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P, + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; @@ -101,6 +119,23 @@ static int config_props_output(AVFilterLink *outlink) const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format); const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format); + if (trans->dir&4) { + av_log(ctx, AV_LOG_WARNING, + "dir values greater than 3 are deprecated, use the passthrough option instead\n"); + trans->dir &= 3; + trans->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE; + } + + if ((inlink->w >= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) || + (inlink->w <= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) { + av_log(ctx, AV_LOG_VERBOSE, + "w:%d h:%d -> w:%d h:%d (passthrough mode)\n", + inlink->w, inlink->h, inlink->w, inlink->h); + return 0; + } else { + trans->passthrough = TRANSPOSE_PT_TYPE_NONE; + } + trans->hsub = desc_in->log2_chroma_w; trans->vsub = desc_in->log2_chroma_h; @@ -121,13 +156,25 @@ static int config_props_output(AVFilterLink *outlink) return 0; } +static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) +{ + TransContext *trans = inlink->dst->priv; + + return trans->passthrough ? + ff_null_get_video_buffer (inlink, perms, w, h) : + ff_default_get_video_buffer(inlink, perms, w, h); +} + static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) { - AVFilterLink *outlink = inlink->dst->outputs[0]; TransContext *trans = inlink->dst->priv; + AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *out; int plane; + if (trans->passthrough) + return ff_filter_frame(outlink, in); + out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!out) { avfilter_unref_bufferp(&in); @@ -136,11 +183,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) out->pts = in->pts; - if (in->video->pixel_aspect.num == 0) { - out->video->pixel_aspect = in->video->pixel_aspect; + if (in->video->sample_aspect_ratio.num == 0) { + out->video->sample_aspect_ratio = in->video->sample_aspect_ratio; } else { - out->video->pixel_aspect.num = in->video->pixel_aspect.den; - out->video->pixel_aspect.den = in->video->pixel_aspect.num; + out->video->sample_aspect_ratio.num = in->video->sample_aspect_ratio.den; + out->video->sample_aspect_ratio.den = in->video->sample_aspect_ratio.num; } for (plane = 0; out->data[plane]; plane++) { @@ -202,6 +249,7 @@ static const AVFilterPad avfilter_vf_transpose_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, + .get_video_buffer= get_video_buffer, .filter_frame = filter_frame, .min_perms = AV_PERM_READ, }, @@ -228,4 +276,5 @@ AVFilter avfilter_vf_transpose = { .inputs = avfilter_vf_transpose_inputs, .outputs = avfilter_vf_transpose_outputs, + .priv_class = &transpose_class, }; diff --git a/libavfilter/vf_unsharp.c b/libavfilter/vf_unsharp.c index b446937..3a9b0b6 100644 --- a/libavfilter/vf_unsharp.c +++ b/libavfilter/vf_unsharp.c @@ -3,26 +3,26 @@ * Port copyright (c) 2010 Daniel G. Taylor <dan@programmer-art.org> * Relicensed to the LGPL with permission from Remi Guyomarch. * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file - * blur / sharpen filter, ported to Libav from MPlayer + * blur / sharpen filter, ported to FFmpeg from MPlayer * libmpcodecs/unsharp.c. * * This code is based on: @@ -76,7 +76,7 @@ static void apply_unsharp( uint8_t *dst, int dst_stride, int32_t res; int x, y, z; - const uint8_t *src2; + const uint8_t *src2 = NULL; //silence a warning if (!fp->amount) { if (dst_stride == src_stride) @@ -159,7 +159,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) static int query_formats(AVFilterContext *ctx) { - enum AVPixelFormat pix_fmts[] = { + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE diff --git a/libavfilter/vf_vflip.c b/libavfilter/vf_vflip.c index 5e6e965..6077789 100644 --- a/libavfilter/vf_vflip.c +++ b/libavfilter/vf_vflip.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2007 Bobby Bingham * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -61,7 +61,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int vsub = i == 1 || i == 2 ? flip->vsub : 0; if (picref->data[i]) { - picref->data[i] += ((h >> vsub)-1) * picref->linesize[i]; + picref->data[i] += (((h + (1<<vsub)-1) >> vsub)-1) * picref->linesize[i]; picref->linesize[i] = -picref->linesize[i]; } } @@ -78,7 +78,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) int vsub = i == 1 || i == 2 ? flip->vsub : 0; if (frame->data[i]) { - frame->data[i] += ((link->h >> vsub)-1) * frame->linesize[i]; + frame->data[i] += (((link->h + (1<<vsub)-1)>> vsub)-1) * frame->linesize[i]; frame->linesize[i] = -frame->linesize[i]; } } diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index ae49013..56bd61a 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -1,26 +1,26 @@ /* - * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at> + * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at> * 2010 James Darnley <james.darnley@gmail.com> * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ +#include "libavutil/avassert.h" #include "libavutil/cpu.h" #include "libavutil/common.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" @@ -124,6 +124,7 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, int w = dstpic->video->w; int h = dstpic->video->h; int refs = yadif->cur->linesize[i]; + int absrefs = FFABS(refs); int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8; if (i == 1 || i == 2) { @@ -132,6 +133,12 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, h >>= yadif->csp->log2_chroma_h; } + if(yadif->temp_line_size < absrefs) { + av_free(yadif->temp_line); + yadif->temp_line = av_mallocz(2*64 + 5*absrefs); + yadif->temp_line_size = absrefs; + } + for (y = 0; y < h; y++) { if ((y ^ parity) & 1) { uint8_t *prev = &yadif->prev->data[i][y * refs]; @@ -139,9 +146,25 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, uint8_t *next = &yadif->next->data[i][y * refs]; uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]]; int mode = y == 1 || y + 2 == h ? 2 : yadif->mode; + int prefs = y+1<h ? refs : -refs; + int mrefs = y ?-refs : refs; + + if(y<=1 || y+2>=h) { + uint8_t *tmp = yadif->temp_line + 64 + 2*absrefs; + if(mode<2) + memcpy(tmp+2*mrefs, cur+2*mrefs, w*df); + memcpy(tmp+mrefs, cur+mrefs, w*df); + memcpy(tmp , cur , w*df); + if(prefs != mrefs) { + memcpy(tmp+prefs, cur+prefs, w*df); + if(mode<2) + memcpy(tmp+2*prefs, cur+2*prefs, w*df); + } + cur = tmp; + } + yadif->filter_line(dst, prev, cur, next, w, - y + 1 < h ? refs : -refs, - y ? -refs : refs, + prefs, mrefs, parity ^ tff, mode); } else { memcpy(&dstpic->data[i][y * dstpic->linesize[i]], @@ -153,25 +176,6 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, emms_c(); } -static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, - int w, int h) -{ - AVFilterBufferRef *picref; - int width = FFALIGN(w, 32); - int height = FFALIGN(h + 2, 32); - int i; - - picref = ff_default_get_video_buffer(link, perms, width, height); - - picref->video->w = w; - picref->video->h = h; - - for (i = 0; i < 3; i++) - picref->data[i] += picref->linesize[i]; - - return picref; -} - static int return_frame(AVFilterContext *ctx, int is_second) { YADIFContext *yadif = ctx->priv; @@ -197,7 +201,7 @@ static int return_frame(AVFilterContext *ctx, int is_second) if (!yadif->csp) yadif->csp = av_pix_fmt_desc_get(link->format); if (yadif->csp->comp[0].depth_minus1 / 8 == 1) - yadif->filter_line = filter_line_c_16bit; + yadif->filter_line = (void*)filter_line_c_16bit; filter(ctx, yadif->out, tff ^ !is_second, tff); @@ -222,6 +226,8 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) AVFilterContext *ctx = link->dst; YADIFContext *yadif = ctx->priv; + av_assert0(picref); + if (yadif->frame_pending) return_frame(ctx, 1); @@ -234,8 +240,8 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) if (!yadif->cur) return 0; - if (yadif->auto_enable && !yadif->cur->video->interlaced) { - yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); + if (yadif->deint && !yadif->cur->video->interlaced) { + yadif->out = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE); if (!yadif->out) return AVERROR(ENOMEM); @@ -246,7 +252,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) } if (!yadif->prev && - !(yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ))) + !(yadif->prev = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE))) return AVERROR(ENOMEM); yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP, @@ -281,9 +287,8 @@ static int request_frame(AVFilterLink *link) ret = ff_request_frame(link->src->inputs[0]); - if (ret == AVERROR_EOF && yadif->next) { - AVFilterBufferRef *next = - avfilter_ref_buffer(yadif->next, AV_PERM_READ); + if (ret == AVERROR_EOF && yadif->cur) { + AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, ~AV_PERM_WRITE); if (!next) return AVERROR(ENOMEM); @@ -300,41 +305,41 @@ static int request_frame(AVFilterLink *link) return 0; } -static int poll_frame(AVFilterLink *link) -{ - YADIFContext *yadif = link->src->priv; - int ret, val; +#define OFFSET(x) offsetof(YADIFContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM - if (yadif->frame_pending) - return 1; +#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit } - val = ff_poll_frame(link->src->inputs[0]); - if (val <= 0) - return val; +static const AVOption yadif_options[] = { + { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"}, + CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"), + CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"), + CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"), + CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"), - //FIXME change API to not requre this red tape - if (val == 1 && !yadif->next) { - if ((ret = ff_request_frame(link->src->inputs[0])) < 0) - return ret; - val = ff_poll_frame(link->src->inputs[0]); - if (val <= 0) - return val; - } - assert(yadif->next || !val); + { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" }, + CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"), + CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"), + CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"), - if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced) - return val; + { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" }, + CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"), + CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"), - return val * ((yadif->mode&1)+1); -} + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(yadif); static av_cold void uninit(AVFilterContext *ctx) { YADIFContext *yadif = ctx->priv; - if (yadif->prev) avfilter_unref_bufferp(&yadif->prev); - if (yadif->cur ) avfilter_unref_bufferp(&yadif->cur ); - if (yadif->next) avfilter_unref_bufferp(&yadif->next); + avfilter_unref_bufferp(&yadif->prev); + avfilter_unref_bufferp(&yadif->cur ); + avfilter_unref_bufferp(&yadif->next); + av_freep(&yadif->temp_line); yadif->temp_line_size = 0; + av_opt_free(yadif); } static int query_formats(AVFilterContext *ctx) @@ -359,6 +364,8 @@ static int query_formats(AVFilterContext *ctx) AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ), AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ), AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_YUVA422P, + AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE }; @@ -370,34 +377,46 @@ static int query_formats(AVFilterContext *ctx) static av_cold int init(AVFilterContext *ctx, const char *args) { YADIFContext *yadif = ctx->priv; + static const char *shorthand[] = { "mode", "parity", "deint", NULL }; + int ret; - yadif->mode = 0; - yadif->parity = -1; - yadif->auto_enable = 0; yadif->csp = NULL; - if (args) - sscanf(args, "%d:%d:%d", - &yadif->mode, &yadif->parity, &yadif->auto_enable); + yadif->class = &yadif_class; + av_opt_set_defaults(yadif); + + if ((ret = av_opt_set_from_string(yadif, args, shorthand, "=", ":")) < 0) + return ret; yadif->filter_line = filter_line_c; if (ARCH_X86) ff_yadif_init_x86(yadif); - av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d auto_enable:%d\n", - yadif->mode, yadif->parity, yadif->auto_enable); + av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d deint:%d\n", + yadif->mode, yadif->parity, yadif->deint); return 0; } static int config_props(AVFilterLink *link) { + AVFilterContext *ctx = link->src; + YADIFContext *yadif = ctx->priv; + link->time_base.num = link->src->inputs[0]->time_base.num; link->time_base.den = link->src->inputs[0]->time_base.den * 2; link->w = link->src->inputs[0]->w; link->h = link->src->inputs[0]->h; + if(yadif->mode&1) + link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1}); + + if (link->w < 3 || link->h < 3) { + av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n"); + return AVERROR(EINVAL); + } + return 0; } @@ -405,8 +424,8 @@ static const AVFilterPad avfilter_vf_yadif_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, - .get_video_buffer = get_video_buffer, .filter_frame = filter_frame, + .min_perms = AV_PERM_PRESERVE, }, { NULL } }; @@ -415,7 +434,6 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, - .poll_frame = poll_frame, .request_frame = request_frame, .config_props = config_props, }, @@ -424,7 +442,7 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = { AVFilter avfilter_vf_yadif = { .name = "yadif", - .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"), + .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."), .priv_size = sizeof(YADIFContext), .init = init, @@ -432,6 +450,7 @@ AVFilter avfilter_vf_yadif = { .query_formats = query_formats, .inputs = avfilter_vf_yadif_inputs, - .outputs = avfilter_vf_yadif_outputs, + + .priv_class = &yadif_class, }; diff --git a/libavfilter/video.c b/libavfilter/video.c index cb68ca4..a493204 100644 --- a/libavfilter/video.c +++ b/libavfilter/video.c @@ -1,24 +1,29 @@ /* - * This file is part of Libav. + * Copyright 2007 Bobby Bingham + * Copyright Stefano Sabatini <stefasab gmail com> + * Copyright Vitor Sessak <vitor1001 gmail com> * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <string.h> #include <stdio.h> +#include "libavutil/avassert.h" #include "libavutil/imgutils.h" #include "libavutil/mem.h" @@ -26,79 +31,69 @@ #include "internal.h" #include "video.h" -#ifdef DEBUG -static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms) -{ - snprintf(buf, buf_size, "%s%s%s%s%s%s", - perms & AV_PERM_READ ? "r" : "", - perms & AV_PERM_WRITE ? "w" : "", - perms & AV_PERM_PRESERVE ? "p" : "", - perms & AV_PERM_REUSE ? "u" : "", - perms & AV_PERM_REUSE2 ? "U" : "", - perms & AV_PERM_NEG_LINESIZES ? "n" : ""); - return buf; -} -#endif - -static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end) -{ - av_unused char buf[16]; - av_dlog(ctx, - "ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, - ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0], - ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], - ref->pts, ref->pos); - - if (ref->video) { - av_dlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", - ref->video->pixel_aspect.num, ref->video->pixel_aspect.den, - ref->video->w, ref->video->h, - !ref->video->interlaced ? 'P' : /* Progressive */ - ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ - ref->video->key_frame, - av_get_picture_type_char(ref->video->pict_type)); - } - if (ref->audio) { - av_dlog(ctx, " cl:%"PRId64"d n:%d r:%d p:%d", - ref->audio->channel_layout, - ref->audio->nb_samples, - ref->audio->sample_rate, - ref->audio->planar); - } - - av_dlog(ctx, "]%s", end ? "\n" : ""); -} - AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h) { return ff_get_video_buffer(link->dst->outputs[0], perms, w, h); } -/* TODO: set the buffer's priv member to a context structure for the whole - * filter chain. This will allow for a buffer pool instead of the constant - * alloc & free cycle currently implemented. */ AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h) { int linesize[4]; uint8_t *data[4]; + int i; AVFilterBufferRef *picref = NULL; + AVFilterPool *pool = link->pool; + int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE | + AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN; + + av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES))); + + if (pool) { + for (i = 0; i < POOL_SIZE; i++) { + picref = pool->pic[i]; + if (picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h) { + AVFilterBuffer *pic = picref->buf; + pool->pic[i] = NULL; + pool->count--; + av_assert0(!picref->video->qp_table); + picref->video->w = w; + picref->video->h = h; + picref->perms = full_perms; + picref->format = link->format; + pic->refcount = 1; + memcpy(picref->data, pic->data, sizeof(picref->data)); + memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize)); + pool->refcount++; + return picref; + } + } + } else { + pool = link->pool = av_mallocz(sizeof(AVFilterPool)); + pool->refcount = 1; + } - // +2 is needed for swscaler, +16 to be SIMD-friendly - if (av_image_alloc(data, linesize, w, h, link->format, 16) < 0) + // align: +2 is needed for swscaler, +16 to be SIMD-friendly + if ((i = av_image_alloc(data, linesize, w, h, link->format, 32)) < 0) return NULL; picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize, - perms, w, h, link->format); + full_perms, w, h, link->format); if (!picref) { av_free(data[0]); return NULL; } + memset(data[0], 128, i); + + picref->buf->priv = pool; + picref->buf->free = NULL; + pool->refcount++; + return picref; } AVFilterBufferRef * -avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms, +avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, int w, int h, enum AVPixelFormat format) { AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer)); @@ -147,8 +142,8 @@ AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int AVFilterBufferRef *ret = NULL; av_unused char buf[16]; - FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0); - av_dlog(NULL, " perms:%s w:%d h:%d\n", ff_get_ref_perms_string(buf, sizeof(buf), perms), w, h); + FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0); + ff_tlog(NULL, " perms:%s w:%d h:%d\n", ff_get_ref_perms_string(buf, sizeof(buf), perms), w, h); if (link->dstpad->get_video_buffer) ret = link->dstpad->get_video_buffer(link, perms, w, h); @@ -159,7 +154,7 @@ AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int if (ret) ret->type = AVMEDIA_TYPE_VIDEO; - FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " returning "); ff_dlog_ref(NULL, ret, 1); + FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0); ff_tlog(NULL, " returning "); ff_tlog_ref(NULL, ret, 1); return ret; } diff --git a/libavfilter/video.h b/libavfilter/video.h index be93810..a6af163 100644 --- a/libavfilter/video.h +++ b/libavfilter/video.h @@ -1,18 +1,20 @@ /* - * This file is part of Libav. + * Copyright (c) 2007 Bobby Bingham * - * Libav is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/vsink_nullsink.c b/libavfilter/vsink_nullsink.c index 71d2b3e..a37d346 100644 --- a/libavfilter/vsink_nullsink.c +++ b/libavfilter/vsink_nullsink.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/vsrc_cellauto.c b/libavfilter/vsrc_cellauto.c new file mode 100644 index 0000000..06d9f9a --- /dev/null +++ b/libavfilter/vsrc_cellauto.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) Stefano Sabatini 2011 + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * cellular automaton video source, based on Stephen Wolfram "experimentus crucis" + */ + +/* #define DEBUG */ + +#include "libavutil/file.h" +#include "libavutil/lfg.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "libavutil/random_seed.h" +#include "avfilter.h" +#include "internal.h" +#include "formats.h" +#include "video.h" + +typedef struct { + const AVClass *class; + int w, h; + char *filename; + char *rule_str; + uint8_t *file_buf; + size_t file_bufsize; + uint8_t *buf; + int buf_prev_row_idx, buf_row_idx; + uint8_t rule; + uint64_t pts; + AVRational time_base; + char *rate; ///< video frame rate + double random_fill_ratio; + uint32_t random_seed; + int stitch, scroll, start_full; + int64_t generation; ///< the generation number, starting from 0 + AVLFG lfg; + char *pattern; +} CellAutoContext; + +#define OFFSET(x) offsetof(CellAutoContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption cellauto_options[] = { + { "filename", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "f", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "pattern", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "p", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS }, + { "rule", "set rule", OFFSET(rule), AV_OPT_TYPE_INT, {.i64 = 110}, 0, 255, FLAGS }, + { "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS }, + { "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS }, + { "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS }, + { "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS }, + { "scroll", "scroll pattern downward", OFFSET(scroll), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS }, + { "start_full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS }, + { "full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS }, + { "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(cellauto); + +#ifdef DEBUG +static void show_cellauto_row(AVFilterContext *ctx) +{ + CellAutoContext *cellauto = ctx->priv; + int i; + uint8_t *row = cellauto->buf + cellauto->w * cellauto->buf_row_idx; + char *line = av_malloc(cellauto->w + 1); + if (!line) + return; + + for (i = 0; i < cellauto->w; i++) + line[i] = row[i] ? '@' : ' '; + line[i] = 0; + av_log(ctx, AV_LOG_DEBUG, "generation:%"PRId64" row:%s|\n", cellauto->generation, line); + av_free(line); +} +#endif + +static int init_pattern_from_string(AVFilterContext *ctx) +{ + CellAutoContext *cellauto = ctx->priv; + char *p; + int i, w = 0; + + w = strlen(cellauto->pattern); + av_log(ctx, AV_LOG_DEBUG, "w:%d\n", w); + + if (cellauto->w) { + if (w > cellauto->w) { + av_log(ctx, AV_LOG_ERROR, + "The specified width is %d which cannot contain the provided string width of %d\n", + cellauto->w, w); + return AVERROR(EINVAL); + } + } else { + /* width was not specified, set it to width of the provided row */ + cellauto->w = w; + cellauto->h = (double)cellauto->w * M_PHI; + } + + cellauto->buf = av_mallocz(sizeof(uint8_t) * cellauto->w * cellauto->h); + if (!cellauto->buf) + return AVERROR(ENOMEM); + + /* fill buf */ + p = cellauto->pattern; + for (i = (cellauto->w - w)/2;; i++) { + av_log(ctx, AV_LOG_DEBUG, "%d %c\n", i, *p == '\n' ? 'N' : *p); + if (*p == '\n' || !*p) + break; + else + cellauto->buf[i] = !!isgraph(*(p++)); + } + + return 0; +} + +static int init_pattern_from_file(AVFilterContext *ctx) +{ + CellAutoContext *cellauto = ctx->priv; + int ret; + + ret = av_file_map(cellauto->filename, + &cellauto->file_buf, &cellauto->file_bufsize, 0, ctx); + if (ret < 0) + return ret; + + /* create a string based on the read file */ + cellauto->pattern = av_malloc(cellauto->file_bufsize + 1); + if (!cellauto->pattern) + return AVERROR(ENOMEM); + memcpy(cellauto->pattern, cellauto->file_buf, cellauto->file_bufsize); + cellauto->pattern[cellauto->file_bufsize] = 0; + + return init_pattern_from_string(ctx); +} + +static int init(AVFilterContext *ctx, const char *args) +{ + CellAutoContext *cellauto = ctx->priv; + AVRational frame_rate; + int ret; + + cellauto->class = &cellauto_class; + av_opt_set_defaults(cellauto); + + if ((ret = av_set_options_string(cellauto, args, "=", ":")) < 0) + return ret; + + if ((ret = av_parse_video_rate(&frame_rate, cellauto->rate)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", cellauto->rate); + return AVERROR(EINVAL); + } + + if (!cellauto->w && !cellauto->filename && !cellauto->pattern) + av_opt_set(cellauto, "size", "320x518", 0); + + cellauto->time_base.num = frame_rate.den; + cellauto->time_base.den = frame_rate.num; + + if (cellauto->filename && cellauto->pattern) { + av_log(ctx, AV_LOG_ERROR, "Only one of the filename or pattern options can be used\n"); + return AVERROR(EINVAL); + } + + if (cellauto->filename) { + if ((ret = init_pattern_from_file(ctx)) < 0) + return ret; + } else if (cellauto->pattern) { + if ((ret = init_pattern_from_string(ctx)) < 0) + return ret; + } else { + /* fill the first row randomly */ + int i; + + cellauto->buf = av_mallocz(sizeof(uint8_t) * cellauto->w * cellauto->h); + if (!cellauto->buf) + return AVERROR(ENOMEM); + if (cellauto->random_seed == -1) + cellauto->random_seed = av_get_random_seed(); + + av_lfg_init(&cellauto->lfg, cellauto->random_seed); + + for (i = 0; i < cellauto->w; i++) { + double r = (double)av_lfg_get(&cellauto->lfg) / UINT32_MAX; + if (r <= cellauto->random_fill_ratio) + cellauto->buf[i] = 1; + } + } + + av_log(ctx, AV_LOG_VERBOSE, + "s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%u\n", + cellauto->w, cellauto->h, frame_rate.num, frame_rate.den, + cellauto->rule, cellauto->stitch, cellauto->scroll, cellauto->start_full, + cellauto->random_seed); + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + CellAutoContext *cellauto = ctx->priv; + + av_file_unmap(cellauto->file_buf, cellauto->file_bufsize); + av_freep(&cellauto->buf); + av_freep(&cellauto->pattern); +} + +static int config_props(AVFilterLink *outlink) +{ + CellAutoContext *cellauto = outlink->src->priv; + + outlink->w = cellauto->w; + outlink->h = cellauto->h; + outlink->time_base = cellauto->time_base; + + return 0; +} + +static void evolve(AVFilterContext *ctx) +{ + CellAutoContext *cellauto = ctx->priv; + int i, v, pos[3]; + uint8_t *row, *prev_row = cellauto->buf + cellauto->buf_row_idx * cellauto->w; + enum { NW, N, NE }; + + cellauto->buf_prev_row_idx = cellauto->buf_row_idx; + cellauto->buf_row_idx = cellauto->buf_row_idx == cellauto->h-1 ? 0 : cellauto->buf_row_idx+1; + row = cellauto->buf + cellauto->w * cellauto->buf_row_idx; + + for (i = 0; i < cellauto->w; i++) { + if (cellauto->stitch) { + pos[NW] = i-1 < 0 ? cellauto->w-1 : i-1; + pos[N] = i; + pos[NE] = i+1 == cellauto->w ? 0 : i+1; + v = prev_row[pos[NW]]<<2 | prev_row[pos[N]]<<1 | prev_row[pos[NE]]; + } else { + v = 0; + v|= i-1 >= 0 ? prev_row[i-1]<<2 : 0; + v|= prev_row[i ]<<1 ; + v|= i+1 < cellauto->w ? prev_row[i+1] : 0; + } + row[i] = !!(cellauto->rule & (1<<v)); + av_dlog(ctx, "i:%d context:%c%c%c -> cell:%d\n", i, + v&4?'@':' ', v&2?'@':' ', v&1?'@':' ', row[i]); + } + + cellauto->generation++; +} + +static void fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + CellAutoContext *cellauto = ctx->priv; + int i, j, k, row_idx = 0; + uint8_t *p0 = picref->data[0]; + + if (cellauto->scroll && cellauto->generation >= cellauto->h) + /* show on top the oldest row */ + row_idx = (cellauto->buf_row_idx + 1) % cellauto->h; + + /* fill the output picture with the whole buffer */ + for (i = 0; i < cellauto->h; i++) { + uint8_t byte = 0; + uint8_t *row = cellauto->buf + row_idx*cellauto->w; + uint8_t *p = p0; + for (k = 0, j = 0; j < cellauto->w; j++) { + byte |= row[j]<<(7-k++); + if (k==8 || j == cellauto->w-1) { + k = 0; + *p++ = byte; + byte = 0; + } + } + row_idx = (row_idx + 1) % cellauto->h; + p0 += picref->linesize[0]; + } +} + +static int request_frame(AVFilterLink *outlink) +{ + CellAutoContext *cellauto = outlink->src->priv; + AVFilterBufferRef *picref = + ff_get_video_buffer(outlink, AV_PERM_WRITE, cellauto->w, cellauto->h); + picref->video->sample_aspect_ratio = (AVRational) {1, 1}; + if (cellauto->generation == 0 && cellauto->start_full) { + int i; + for (i = 0; i < cellauto->h-1; i++) + evolve(outlink->src); + } + fill_picture(outlink->src, picref); + evolve(outlink->src); + + picref->pts = cellauto->pts++; + picref->pos = -1; + +#ifdef DEBUG + show_cellauto_row(outlink->src); +#endif + ff_filter_frame(outlink, picref); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE }; + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static const AVFilterPad cellauto_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL } +}; + +AVFilter avfilter_vsrc_cellauto = { + .name = "cellauto", + .description = NULL_IF_CONFIG_SMALL("Create pattern generated by an elementary cellular automaton."), + .priv_size = sizeof(CellAutoContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = NULL, + .outputs = cellauto_outputs, + .priv_class = &cellauto_class, +}; diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c deleted file mode 100644 index c0a4e1c..0000000 --- a/libavfilter/vsrc_color.c +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright (c) 2010 Stefano Sabatini - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * color source - */ - -#include <stdio.h> -#include <string.h> - -#include "avfilter.h" -#include "formats.h" -#include "internal.h" -#include "video.h" -#include "libavutil/pixdesc.h" -#include "libavutil/colorspace.h" -#include "libavutil/imgutils.h" -#include "libavutil/internal.h" -#include "libavutil/mathematics.h" -#include "libavutil/mem.h" -#include "libavutil/parseutils.h" -#include "drawutils.h" - -typedef struct { - int w, h; - uint8_t color[4]; - AVRational time_base; - uint8_t *line[4]; - int line_step[4]; - int hsub, vsub; ///< chroma subsampling values - uint64_t pts; -} ColorContext; - -static av_cold int color_init(AVFilterContext *ctx, const char *args) -{ - ColorContext *color = ctx->priv; - char color_string[128] = "black"; - char frame_size [128] = "320x240"; - char frame_rate [128] = "25"; - AVRational frame_rate_q; - int ret; - - if (args) - sscanf(args, "%127[^:]:%127[^:]:%127s", color_string, frame_size, frame_rate); - - if (av_parse_video_size(&color->w, &color->h, frame_size) < 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid frame size: %s\n", frame_size); - return AVERROR(EINVAL); - } - - if (av_parse_video_rate(&frame_rate_q, frame_rate) < 0 || - frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", frame_rate); - return AVERROR(EINVAL); - } - color->time_base.num = frame_rate_q.den; - color->time_base.den = frame_rate_q.num; - - if ((ret = av_parse_color(color->color, color_string, -1, ctx)) < 0) - return ret; - - return 0; -} - -static av_cold void color_uninit(AVFilterContext *ctx) -{ - ColorContext *color = ctx->priv; - int i; - - for (i = 0; i < 4; i++) { - av_freep(&color->line[i]); - color->line_step[i] = 0; - } -} - -static int query_formats(AVFilterContext *ctx) -{ - static const enum AVPixelFormat pix_fmts[] = { - AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, - AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, - AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, - - AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, - AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, - AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, - AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, - AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P, - AV_PIX_FMT_YUVA420P, - - AV_PIX_FMT_NONE - }; - - ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); - return 0; -} - -static int color_config_props(AVFilterLink *inlink) -{ - AVFilterContext *ctx = inlink->src; - ColorContext *color = ctx->priv; - uint8_t rgba_color[4]; - int is_packed_rgba; - const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); - - color->hsub = pix_desc->log2_chroma_w; - color->vsub = pix_desc->log2_chroma_h; - - color->w &= ~((1 << color->hsub) - 1); - color->h &= ~((1 << color->vsub) - 1); - if (av_image_check_size(color->w, color->h, 0, ctx) < 0) - return AVERROR(EINVAL); - - memcpy(rgba_color, color->color, sizeof(rgba_color)); - ff_fill_line_with_color(color->line, color->line_step, color->w, color->color, - inlink->format, rgba_color, &is_packed_rgba, NULL); - - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d r:%d/%d color:0x%02x%02x%02x%02x[%s]\n", - color->w, color->h, color->time_base.den, color->time_base.num, - color->color[0], color->color[1], color->color[2], color->color[3], - is_packed_rgba ? "rgba" : "yuva"); - inlink->w = color->w; - inlink->h = color->h; - inlink->time_base = color->time_base; - - return 0; -} - -static int color_request_frame(AVFilterLink *link) -{ - ColorContext *color = link->src->priv; - AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); - - if (!picref) - return AVERROR(ENOMEM); - - picref->video->pixel_aspect = (AVRational) {1, 1}; - picref->pts = color->pts++; - picref->pos = -1; - - ff_draw_rectangle(picref->data, picref->linesize, - color->line, color->line_step, color->hsub, color->vsub, - 0, 0, color->w, color->h); - return ff_filter_frame(link, picref); -} - -static const AVFilterPad avfilter_vsrc_color_outputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .request_frame = color_request_frame, - .config_props = color_config_props - }, - { NULL } -}; - -AVFilter avfilter_vsrc_color = { - .name = "color", - .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input, syntax is: [color[:size[:rate]]]"), - - .priv_size = sizeof(ColorContext), - .init = color_init, - .uninit = color_uninit, - - .query_formats = query_formats, - - .inputs = NULL, - - .outputs = avfilter_vsrc_color_outputs, -}; diff --git a/libavfilter/vsrc_life.c b/libavfilter/vsrc_life.c new file mode 100644 index 0000000..d8b8355 --- /dev/null +++ b/libavfilter/vsrc_life.c @@ -0,0 +1,483 @@ +/* + * Copyright (c) Stefano Sabatini 2010 + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * life video source, based on John Conways' Life Game + */ + +/* #define DEBUG */ + +#include "libavutil/file.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/lfg.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "libavutil/random_seed.h" +#include "avfilter.h" +#include "internal.h" +#include "formats.h" +#include "video.h" + +typedef struct { + const AVClass *class; + int w, h; + char *filename; + char *rule_str; + uint8_t *file_buf; + size_t file_bufsize; + + /** + * The two grid state buffers. + * + * A 0xFF (ALIVE_CELL) value means the cell is alive (or new born), while + * the decreasing values from 0xFE to 0 means the cell is dead; the range + * of values is used for the slow death effect, or mold (0xFE means dead, + * 0xFD means very dead, 0xFC means very very dead... and 0x00 means + * definitely dead/mold). + */ + uint8_t *buf[2]; + + uint8_t buf_idx; + uint16_t stay_rule; ///< encode the behavior for filled cells + uint16_t born_rule; ///< encode the behavior for empty cells + uint64_t pts; + AVRational time_base; + char *rate; ///< video frame rate + double random_fill_ratio; + uint32_t random_seed; + int stitch; + int mold; + char *life_color_str; + char *death_color_str; + char *mold_color_str; + uint8_t life_color[4]; + uint8_t death_color[4]; + uint8_t mold_color[4]; + AVLFG lfg; + void (*draw)(AVFilterContext*, AVFilterBufferRef*); +} LifeContext; + +#define ALIVE_CELL 0xFF +#define OFFSET(x) offsetof(LifeContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption life_options[] = { + { "filename", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "f", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS }, + { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "rule", "set rule", OFFSET(rule_str), AV_OPT_TYPE_STRING, {.str = "B3/S23"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS }, + { "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS }, + { "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS }, + { "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS }, + { "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS }, + { "mold", "set mold speed for dead cells", OFFSET(mold), AV_OPT_TYPE_INT, {.i64=0}, 0, 0xFF, FLAGS }, + { "life_color", "set life color", OFFSET( life_color_str), AV_OPT_TYPE_STRING, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "death_color", "set death color", OFFSET(death_color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "mold_color", "set mold color", OFFSET( mold_color_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(life); + +static int parse_rule(uint16_t *born_rule, uint16_t *stay_rule, + const char *rule_str, void *log_ctx) +{ + char *tail; + const char *p = rule_str; + *born_rule = 0; + *stay_rule = 0; + + if (strchr("bBsS", *p)) { + /* parse rule as a Born / Stay Alive code, see + * http://en.wikipedia.org/wiki/Conway%27s_Game_of_Life */ + do { + uint16_t *rule = (*p == 'b' || *p == 'B') ? born_rule : stay_rule; + p++; + while (*p >= '0' && *p <= '8') { + *rule += 1<<(*p - '0'); + p++; + } + if (*p != '/') + break; + p++; + } while (strchr("bBsS", *p)); + + if (*p) + goto error; + } else { + /* parse rule as a number, expressed in the form STAY|(BORN<<9), + * where STAY and BORN encode the corresponding 9-bits rule */ + long int rule = strtol(rule_str, &tail, 10); + if (*tail) + goto error; + *born_rule = ((1<<9)-1) & rule; + *stay_rule = rule >> 9; + } + + return 0; + +error: + av_log(log_ctx, AV_LOG_ERROR, "Invalid rule code '%s' provided\n", rule_str); + return AVERROR(EINVAL); +} + +#ifdef DEBUG +static void show_life_grid(AVFilterContext *ctx) +{ + LifeContext *life = ctx->priv; + int i, j; + + char *line = av_malloc(life->w + 1); + if (!line) + return; + for (i = 0; i < life->h; i++) { + for (j = 0; j < life->w; j++) + line[j] = life->buf[life->buf_idx][i*life->w + j] == ALIVE_CELL ? '@' : ' '; + line[j] = 0; + av_log(ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line); + } + av_free(line); +} +#endif + +static int init_pattern_from_file(AVFilterContext *ctx) +{ + LifeContext *life = ctx->priv; + char *p; + int ret, i, i0, j, h = 0, w, max_w = 0; + + if ((ret = av_file_map(life->filename, &life->file_buf, &life->file_bufsize, + 0, ctx)) < 0) + return ret; + av_freep(&life->filename); + + /* prescan file to get the number of lines and the maximum width */ + w = 0; + for (i = 0; i < life->file_bufsize; i++) { + if (life->file_buf[i] == '\n') { + h++; max_w = FFMAX(w, max_w); w = 0; + } else { + w++; + } + } + av_log(ctx, AV_LOG_DEBUG, "h:%d max_w:%d\n", h, max_w); + + if (life->w) { + if (max_w > life->w || h > life->h) { + av_log(ctx, AV_LOG_ERROR, + "The specified size is %dx%d which cannot contain the provided file size of %dx%d\n", + life->w, life->h, max_w, h); + return AVERROR(EINVAL); + } + } else { + /* size was not specified, set it to size of the grid */ + life->w = max_w; + life->h = h; + } + + if (!(life->buf[0] = av_mallocz(sizeof(char) * life->h * life->w)) || + !(life->buf[1] = av_mallocz(sizeof(char) * life->h * life->w))) { + av_free(life->buf[0]); + av_free(life->buf[1]); + return AVERROR(ENOMEM); + } + + /* fill buf[0] */ + p = life->file_buf; + for (i0 = 0, i = (life->h - h)/2; i0 < h; i0++, i++) { + for (j = (life->w - max_w)/2;; j++) { + av_log(ctx, AV_LOG_DEBUG, "%d:%d %c\n", i, j, *p == '\n' ? 'N' : *p); + if (*p == '\n') { + p++; break; + } else + life->buf[0][i*life->w + j] = isgraph(*(p++)) ? ALIVE_CELL : 0; + } + } + life->buf_idx = 0; + + return 0; +} + +static int init(AVFilterContext *ctx, const char *args) +{ + LifeContext *life = ctx->priv; + AVRational frame_rate; + int ret; + + life->class = &life_class; + av_opt_set_defaults(life); + + if ((ret = av_set_options_string(life, args, "=", ":")) < 0) + return ret; + + if ((ret = av_parse_video_rate(&frame_rate, life->rate)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", life->rate); + return AVERROR(EINVAL); + } + av_freep(&life->rate); + + if (!life->w && !life->filename) + av_opt_set(life, "size", "320x240", 0); + + if ((ret = parse_rule(&life->born_rule, &life->stay_rule, life->rule_str, ctx)) < 0) + return ret; + +#define PARSE_COLOR(name) do { \ + if ((ret = av_parse_color(life->name ## _color, life->name ## _color_str, -1, ctx))) { \ + av_log(ctx, AV_LOG_ERROR, "Invalid " #name " color '%s'\n", \ + life->name ## _color_str); \ + return ret; \ + } \ + av_freep(&life->name ## _color_str); \ +} while (0) + + PARSE_COLOR(life); + PARSE_COLOR(death); + PARSE_COLOR(mold); + + if (!life->mold && memcmp(life->mold_color, "\x00\x00\x00", 3)) + av_log(ctx, AV_LOG_WARNING, + "Mold color is set while mold isn't, ignoring the color.\n"); + + life->time_base.num = frame_rate.den; + life->time_base.den = frame_rate.num; + + if (!life->filename) { + /* fill the grid randomly */ + int i; + + if (!(life->buf[0] = av_mallocz(sizeof(char) * life->h * life->w)) || + !(life->buf[1] = av_mallocz(sizeof(char) * life->h * life->w))) { + av_free(life->buf[0]); + av_free(life->buf[1]); + return AVERROR(ENOMEM); + } + if (life->random_seed == -1) + life->random_seed = av_get_random_seed(); + + av_lfg_init(&life->lfg, life->random_seed); + + for (i = 0; i < life->w * life->h; i++) { + double r = (double)av_lfg_get(&life->lfg) / UINT32_MAX; + if (r <= life->random_fill_ratio) + life->buf[0][i] = ALIVE_CELL; + } + life->buf_idx = 0; + } else { + if ((ret = init_pattern_from_file(ctx)) < 0) + return ret; + } + + av_log(ctx, AV_LOG_VERBOSE, + "s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%u\n", + life->w, life->h, frame_rate.num, frame_rate.den, + life->rule_str, life->stay_rule, life->born_rule, life->stitch, + life->random_seed); + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + LifeContext *life = ctx->priv; + + av_file_unmap(life->file_buf, life->file_bufsize); + av_freep(&life->rule_str); + av_freep(&life->buf[0]); + av_freep(&life->buf[1]); +} + +static int config_props(AVFilterLink *outlink) +{ + LifeContext *life = outlink->src->priv; + + outlink->w = life->w; + outlink->h = life->h; + outlink->time_base = life->time_base; + + return 0; +} + +static void evolve(AVFilterContext *ctx) +{ + LifeContext *life = ctx->priv; + int i, j; + uint8_t *oldbuf = life->buf[ life->buf_idx]; + uint8_t *newbuf = life->buf[!life->buf_idx]; + + enum { NW, N, NE, W, E, SW, S, SE }; + + /* evolve the grid */ + for (i = 0; i < life->h; i++) { + for (j = 0; j < life->w; j++) { + int pos[8][2], n, alive, cell; + if (life->stitch) { + pos[NW][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NW][1] = (j-1) < 0 ? life->w-1 : j-1; + pos[N ][0] = (i-1) < 0 ? life->h-1 : i-1; pos[N ][1] = j ; + pos[NE][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NE][1] = (j+1) == life->w ? 0 : j+1; + pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? life->w-1 : j-1; + pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? 0 : j+1; + pos[SW][0] = (i+1) == life->h ? 0 : i+1; pos[SW][1] = (j-1) < 0 ? life->w-1 : j-1; + pos[S ][0] = (i+1) == life->h ? 0 : i+1; pos[S ][1] = j ; + pos[SE][0] = (i+1) == life->h ? 0 : i+1; pos[SE][1] = (j+1) == life->w ? 0 : j+1; + } else { + pos[NW][0] = (i-1) < 0 ? -1 : i-1; pos[NW][1] = (j-1) < 0 ? -1 : j-1; + pos[N ][0] = (i-1) < 0 ? -1 : i-1; pos[N ][1] = j ; + pos[NE][0] = (i-1) < 0 ? -1 : i-1; pos[NE][1] = (j+1) == life->w ? -1 : j+1; + pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? -1 : j-1; + pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? -1 : j+1; + pos[SW][0] = (i+1) == life->h ? -1 : i+1; pos[SW][1] = (j-1) < 0 ? -1 : j-1; + pos[S ][0] = (i+1) == life->h ? -1 : i+1; pos[S ][1] = j ; + pos[SE][0] = (i+1) == life->h ? -1 : i+1; pos[SE][1] = (j+1) == life->w ? -1 : j+1; + } + + /* compute the number of live neighbor cells */ + n = (pos[NW][0] == -1 || pos[NW][1] == -1 ? 0 : oldbuf[pos[NW][0]*life->w + pos[NW][1]] == ALIVE_CELL) + + (pos[N ][0] == -1 || pos[N ][1] == -1 ? 0 : oldbuf[pos[N ][0]*life->w + pos[N ][1]] == ALIVE_CELL) + + (pos[NE][0] == -1 || pos[NE][1] == -1 ? 0 : oldbuf[pos[NE][0]*life->w + pos[NE][1]] == ALIVE_CELL) + + (pos[W ][0] == -1 || pos[W ][1] == -1 ? 0 : oldbuf[pos[W ][0]*life->w + pos[W ][1]] == ALIVE_CELL) + + (pos[E ][0] == -1 || pos[E ][1] == -1 ? 0 : oldbuf[pos[E ][0]*life->w + pos[E ][1]] == ALIVE_CELL) + + (pos[SW][0] == -1 || pos[SW][1] == -1 ? 0 : oldbuf[pos[SW][0]*life->w + pos[SW][1]] == ALIVE_CELL) + + (pos[S ][0] == -1 || pos[S ][1] == -1 ? 0 : oldbuf[pos[S ][0]*life->w + pos[S ][1]] == ALIVE_CELL) + + (pos[SE][0] == -1 || pos[SE][1] == -1 ? 0 : oldbuf[pos[SE][0]*life->w + pos[SE][1]] == ALIVE_CELL); + cell = oldbuf[i*life->w + j]; + alive = 1<<n & (cell == ALIVE_CELL ? life->stay_rule : life->born_rule); + if (alive) *newbuf = ALIVE_CELL; // new cell is alive + else if (cell) *newbuf = cell - 1; // new cell is dead and in the process of mold + else *newbuf = 0; // new cell is definitely dead + av_dlog(ctx, "i:%d j:%d live_neighbors:%d cell:%d -> cell:%d\n", i, j, n, cell, *newbuf); + newbuf++; + } + } + + life->buf_idx = !life->buf_idx; +} + +static void fill_picture_monoblack(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + LifeContext *life = ctx->priv; + uint8_t *buf = life->buf[life->buf_idx]; + int i, j, k; + + /* fill the output picture with the old grid buffer */ + for (i = 0; i < life->h; i++) { + uint8_t byte = 0; + uint8_t *p = picref->data[0] + i * picref->linesize[0]; + for (k = 0, j = 0; j < life->w; j++) { + byte |= (buf[i*life->w+j] == ALIVE_CELL)<<(7-k++); + if (k==8 || j == life->w-1) { + k = 0; + *p++ = byte; + byte = 0; + } + } + } +} + +// divide by 255 and round to nearest +// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16 +#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16) + +static void fill_picture_rgb(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + LifeContext *life = ctx->priv; + uint8_t *buf = life->buf[life->buf_idx]; + int i, j; + + /* fill the output picture with the old grid buffer */ + for (i = 0; i < life->h; i++) { + uint8_t *p = picref->data[0] + i * picref->linesize[0]; + for (j = 0; j < life->w; j++) { + uint8_t v = buf[i*life->w + j]; + if (life->mold && v != ALIVE_CELL) { + const uint8_t *c1 = life-> mold_color; + const uint8_t *c2 = life->death_color; + int death_age = FFMIN((0xff - v) * life->mold, 0xff); + *p++ = FAST_DIV255((c2[0] << 8) + ((int)c1[0] - (int)c2[0]) * death_age); + *p++ = FAST_DIV255((c2[1] << 8) + ((int)c1[1] - (int)c2[1]) * death_age); + *p++ = FAST_DIV255((c2[2] << 8) + ((int)c1[2] - (int)c2[2]) * death_age); + } else { + const uint8_t *c = v == ALIVE_CELL ? life->life_color : life->death_color; + AV_WB24(p, c[0]<<16 | c[1]<<8 | c[2]); + p += 3; + } + } + } +} + +static int request_frame(AVFilterLink *outlink) +{ + LifeContext *life = outlink->src->priv; + AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, life->w, life->h); + picref->video->sample_aspect_ratio = (AVRational) {1, 1}; + picref->pts = life->pts++; + picref->pos = -1; + + life->draw(outlink->src, picref); + evolve(outlink->src); +#ifdef DEBUG + show_life_grid(outlink->src); +#endif + ff_filter_frame(outlink, picref); + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + LifeContext *life = ctx->priv; + enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_NONE, AV_PIX_FMT_NONE }; + if (life->mold || memcmp(life-> life_color, "\xff\xff\xff", 3) + || memcmp(life->death_color, "\x00\x00\x00", 3)) { + pix_fmts[0] = AV_PIX_FMT_RGB24; + life->draw = fill_picture_rgb; + } else { + pix_fmts[0] = AV_PIX_FMT_MONOBLACK; + life->draw = fill_picture_monoblack; + } + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static const AVFilterPad life_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL} +}; + +AVFilter avfilter_vsrc_life = { + .name = "life", + .description = NULL_IF_CONFIG_SMALL("Create life."), + .priv_size = sizeof(LifeContext), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = NULL, + .outputs = life_outputs, + .priv_class = &life_class, +}; diff --git a/libavfilter/vsrc_mandelbrot.c b/libavfilter/vsrc_mandelbrot.c new file mode 100644 index 0000000..1244edf --- /dev/null +++ b/libavfilter/vsrc_mandelbrot.c @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2011 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * The vsrc_color filter from Stefano Sabatini was used as template to create + * this + */ + +/** + * @file + * Mandelbrot fraktal renderer + */ + +#include "avfilter.h" +#include "formats.h" +#include "video.h" +#include "internal.h" +#include "libavutil/imgutils.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include <float.h> +#include <math.h> + +#define SQR(a) ((a)*(a)) + +enum Outer{ + ITERATION_COUNT, + NORMALIZED_ITERATION_COUNT, +}; + +enum Inner{ + BLACK, + PERIOD, + CONVTIME, + MINCOL, +}; + +typedef struct Point { + double p[2]; + uint32_t val; +} Point; + +typedef struct { + const AVClass *class; + int w, h; + AVRational time_base; + uint64_t pts; + char *rate; + int maxiter; + double start_x; + double start_y; + double start_scale; + double end_scale; + double end_pts; + double bailout; + enum Outer outer; + enum Inner inner; + int cache_allocated; + int cache_used; + Point *point_cache; + Point *next_cache; + double (*zyklus)[2]; + uint32_t dither; +} MBContext; + +#define OFFSET(x) offsetof(MBContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption mandelbrot_options[] = { + {"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"rate", "set frame rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"r", "set frame rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS }, + {"maxiter", "set max iterations number", OFFSET(maxiter), AV_OPT_TYPE_INT, {.i64=7189}, 1, INT_MAX, FLAGS }, + {"start_x", "set the initial x position", OFFSET(start_x), AV_OPT_TYPE_DOUBLE, {.dbl=-0.743643887037158704752191506114774}, -100, 100, FLAGS }, + {"start_y", "set the initial y position", OFFSET(start_y), AV_OPT_TYPE_DOUBLE, {.dbl=-0.131825904205311970493132056385139}, -100, 100, FLAGS }, + {"start_scale", "set the initial scale value", OFFSET(start_scale), AV_OPT_TYPE_DOUBLE, {.dbl=3.0}, 0, FLT_MAX, FLAGS }, + {"end_scale", "set the terminal scale value", OFFSET(end_scale), AV_OPT_TYPE_DOUBLE, {.dbl=0.3}, 0, FLT_MAX, FLAGS }, + {"end_pts", "set the terminal pts value", OFFSET(end_pts), AV_OPT_TYPE_DOUBLE, {.dbl=400}, 0, INT64_MAX, FLAGS }, + {"bailout", "set the bailout value", OFFSET(bailout), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 0, FLT_MAX, FLAGS }, + + {"outer", "set outer coloring mode", OFFSET(outer), AV_OPT_TYPE_INT, {.i64=NORMALIZED_ITERATION_COUNT}, 0, INT_MAX, FLAGS, "outer" }, + {"iteration_count", "set iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" }, + {"normalized_iteration_count", "set normalized iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=NORMALIZED_ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" }, + + {"inner", "set inner coloring mode", OFFSET(inner), AV_OPT_TYPE_INT, {.i64=MINCOL}, 0, INT_MAX, FLAGS, "inner" }, + {"black", "set black mode", 0, AV_OPT_TYPE_CONST, {.i64=BLACK}, INT_MIN, INT_MAX, FLAGS, "inner"}, + {"period", "set period mode", 0, AV_OPT_TYPE_CONST, {.i64=PERIOD}, INT_MIN, INT_MAX, FLAGS, "inner"}, + {"convergence", "show time until convergence", 0, AV_OPT_TYPE_CONST, {.i64=CONVTIME}, INT_MIN, INT_MAX, FLAGS, "inner"}, + {"mincol", "color based on point closest to the origin of the iterations", 0, AV_OPT_TYPE_CONST, {.i64=MINCOL}, INT_MIN, INT_MAX, FLAGS, "inner"}, + + {NULL}, +}; + +AVFILTER_DEFINE_CLASS(mandelbrot); + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + MBContext *mb = ctx->priv; + AVRational rate_q; + int err; + + mb->class = &mandelbrot_class; + av_opt_set_defaults(mb); + + if ((err = (av_set_options_string(mb, args, "=", ":"))) < 0) + return err; + mb->bailout *= mb->bailout; + + mb->start_scale /=mb->h; + mb->end_scale /=mb->h; + + if (av_parse_video_rate(&rate_q, mb->rate) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", mb->rate); + return AVERROR(EINVAL); + } + mb->time_base.num = rate_q.den; + mb->time_base.den = rate_q.num; + + mb->cache_allocated = mb->w * mb->h * 3; + mb->cache_used = 0; + mb->point_cache= av_malloc(sizeof(*mb->point_cache)*mb->cache_allocated); + mb-> next_cache= av_malloc(sizeof(*mb-> next_cache)*mb->cache_allocated); + mb-> zyklus = av_malloc(sizeof(*mb->zyklus) * (mb->maxiter+16)); + + return 0; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + MBContext *mb = ctx->priv; + + av_freep(&mb->rate); + av_freep(&mb->point_cache); + av_freep(&mb-> next_cache); + av_freep(&mb->zyklus); +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_BGR32, + AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int config_props(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->src; + MBContext *mb = ctx->priv; + + if (av_image_check_size(mb->w, mb->h, 0, ctx) < 0) + return AVERROR(EINVAL); + + inlink->w = mb->w; + inlink->h = mb->h; + inlink->time_base = mb->time_base; + + return 0; +} + +static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale){ + MBContext *mb = ctx->priv; + for(; *in_cidx < mb->cache_used; (*in_cidx)++){ + Point *p= &mb->point_cache[*in_cidx]; + int x; + if(p->p[1] > py) + break; + x= round((p->p[0] - mb->start_x) / scale + mb->w/2); + if(x<0 || x >= mb->w) + continue; + if(color) color[x] = p->val; + if(out_cidx && *out_cidx < mb->cache_allocated) + mb->next_cache[(*out_cidx)++]= *p; + } +} + +static int interpol(MBContext *mb, uint32_t *color, int x, int y, int linesize) +{ + uint32_t a,b,c,d, i; + uint32_t ipol=0xFF000000; + int dist; + + if(!x || !y || x+1==mb->w || y+1==mb->h) + return 0; + + dist= FFMAX(FFABS(x-(mb->w>>1))*mb->h, FFABS(y-(mb->h>>1))*mb->w); + + if(dist<(mb->w*mb->h>>3)) + return 0; + + a=color[(x+1) + (y+0)*linesize]; + b=color[(x-1) + (y+1)*linesize]; + c=color[(x+0) + (y+1)*linesize]; + d=color[(x+1) + (y+1)*linesize]; + + if(a&&c){ + b= color[(x-1) + (y+0)*linesize]; + d= color[(x+0) + (y-1)*linesize]; + }else if(b&&d){ + a= color[(x+1) + (y-1)*linesize]; + c= color[(x-1) + (y-1)*linesize]; + }else if(c){ + d= color[(x+0) + (y-1)*linesize]; + a= color[(x-1) + (y+0)*linesize]; + b= color[(x+1) + (y-1)*linesize]; + }else if(d){ + c= color[(x-1) + (y-1)*linesize]; + a= color[(x-1) + (y+0)*linesize]; + b= color[(x+1) + (y-1)*linesize]; + }else + return 0; + + for(i=0; i<3; i++){ + int s= 8*i; + uint8_t ac= a>>s; + uint8_t bc= b>>s; + uint8_t cc= c>>s; + uint8_t dc= d>>s; + int ipolab= (ac + bc); + int ipolcd= (cc + dc); + if(FFABS(ipolab - ipolcd) > 5) + return 0; + if(FFABS(ac-bc)+FFABS(cc-dc) > 20) + return 0; + ipol |= ((ipolab + ipolcd + 2)/4)<<s; + } + color[x + y*linesize]= ipol; + return 1; +} + +static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, int64_t pts) +{ + MBContext *mb = ctx->priv; + int x,y,i, in_cidx=0, next_cidx=0, tmp_cidx; + double scale= mb->start_scale*pow(mb->end_scale/mb->start_scale, pts/mb->end_pts); + int use_zyklus=0; + fill_from_cache(ctx, NULL, &in_cidx, NULL, mb->start_y+scale*(-mb->h/2-0.5), scale); + tmp_cidx= in_cidx; + memset(color, 0, sizeof(*color)*mb->w); + for(y=0; y<mb->h; y++){ + int y1= y+1; + const double ci=mb->start_y+scale*(y-mb->h/2); + fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci, scale); + if(y1<mb->h){ + memset(color+linesize*y1, 0, sizeof(*color)*mb->w); + fill_from_cache(ctx, color+linesize*y1, &tmp_cidx, NULL, ci + 3*scale/2, scale); + } + + for(x=0; x<mb->w; x++){ + float av_uninit(epsilon); + const double cr=mb->start_x+scale*(x-mb->w/2); + double zr=cr; + double zi=ci; + uint32_t c=0; + double dv= mb->dither / (double)(1LL<<32); + mb->dither= mb->dither*1664525+1013904223; + + if(color[x + y*linesize] & 0xFF000000) + continue; + if(interpol(mb, color, x, y, linesize)){ + if(next_cidx < mb->cache_allocated){ + mb->next_cache[next_cidx ].p[0]= cr; + mb->next_cache[next_cidx ].p[1]= ci; + mb->next_cache[next_cidx++].val = color[x + y*linesize]; + } + continue; + } + + use_zyklus= (x==0 || mb->inner!=BLACK ||color[x-1 + y*linesize] == 0xFF000000); + if(use_zyklus) + epsilon= scale*1*sqrt(SQR(x-mb->w/2) + SQR(y-mb->h/2))/mb->w; + +#define Z_Z2_C(outr,outi,inr,ini)\ + outr= inr*inr - ini*ini + cr;\ + outi= 2*inr*ini + ci; + +#define Z_Z2_C_ZYKLUS(outr,outi,inr,ini, Z)\ + Z_Z2_C(outr,outi,inr,ini)\ + if(use_zyklus){\ + if(Z && fabs(mb->zyklus[i>>1][0]-outr)+fabs(mb->zyklus[i>>1][1]-outi) <= epsilon)\ + break;\ + }\ + mb->zyklus[i][0]= outr;\ + mb->zyklus[i][1]= outi;\ + + + + for(i=0; i<mb->maxiter-8; i++){ + double t; + Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0) + i++; + Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1) + i++; + Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0) + i++; + Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1) + i++; + Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0) + i++; + Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1) + i++; + Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0) + i++; + Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1) + if(zr*zr + zi*zi > mb->bailout){ + i-= FFMIN(7, i); + for(; i<mb->maxiter; i++){ + zr= mb->zyklus[i][0]; + zi= mb->zyklus[i][1]; + if(zr*zr + zi*zi > mb->bailout){ + switch(mb->outer){ + case ITERATION_COUNT: zr = i; break; + case NORMALIZED_ITERATION_COUNT: zr= i + log2(log(mb->bailout) / log(zr*zr + zi*zi)); break; + } + c= lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256; + break; + } + } + break; + } + } + if(!c){ + if(mb->inner==PERIOD){ + int j; + for(j=i-1; j; j--) + if(SQR(mb->zyklus[j][0]-zr) + SQR(mb->zyklus[j][1]-zi) < epsilon*epsilon*10) + break; + if(j){ + c= i-j; + c= ((c<<5)&0xE0) + ((c<<10)&0xE000) + ((c<<15)&0xE00000); + } + }else if(mb->inner==CONVTIME){ + c= floor(i*255.0/mb->maxiter+dv)*0x010101; + } else if(mb->inner==MINCOL){ + int j; + double closest=9999; + int closest_index=0; + for(j=i-1; j>=0; j--) + if(SQR(mb->zyklus[j][0]) + SQR(mb->zyklus[j][1]) < closest){ + closest= SQR(mb->zyklus[j][0]) + SQR(mb->zyklus[j][1]); + closest_index= j; + } + closest = sqrt(closest); + c= lrintf((mb->zyklus[closest_index][0]/closest+1)*127+dv) + lrintf((mb->zyklus[closest_index][1]/closest+1)*127+dv)*256; + } + } + c |= 0xFF000000; + color[x + y*linesize]= c; + if(next_cidx < mb->cache_allocated){ + mb->next_cache[next_cidx ].p[0]= cr; + mb->next_cache[next_cidx ].p[1]= ci; + mb->next_cache[next_cidx++].val = c; + } + } + fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci + scale/2, scale); + } + FFSWAP(void*, mb->next_cache, mb->point_cache); + mb->cache_used = next_cidx; + if(mb->cache_used == mb->cache_allocated) + av_log(ctx, AV_LOG_INFO, "Mandelbrot cache is too small!\n"); +} + +static int request_frame(AVFilterLink *link) +{ + MBContext *mb = link->src->priv; + AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, mb->w, mb->h); + picref->video->sample_aspect_ratio = (AVRational) {1, 1}; + picref->pts = mb->pts++; + picref->pos = -1; + + draw_mandelbrot(link->src, (uint32_t*)picref->data[0], picref->linesize[0]/4, picref->pts); + ff_filter_frame(link, picref); + + return 0; +} + +static const AVFilterPad mandelbrot_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL }, +}; + +AVFilter avfilter_vsrc_mandelbrot = { + .name = "mandelbrot", + .description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."), + + .priv_size = sizeof(MBContext), + .init = init, + .uninit = uninit, + + .query_formats = query_formats, + .inputs = NULL, + .outputs = mandelbrot_outputs, + .priv_class = &mandelbrot_class, +}; diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c deleted file mode 100644 index 0023d55..0000000 --- a/libavfilter/vsrc_movie.c +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Copyright (c) 2010 Stefano Sabatini - * Copyright (c) 2008 Victor Paesa - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * movie video source - * - * @todo use direct rendering (no allocation of a new frame) - * @todo support a PTS correction mechanism - * @todo support more than one output stream - */ - -/* #define DEBUG */ - -#include <float.h> -#include "libavutil/avstring.h" -#include "libavutil/opt.h" -#include "libavutil/imgutils.h" -#include "libavformat/avformat.h" -#include "avfilter.h" -#include "formats.h" -#include "internal.h" -#include "video.h" - -typedef struct { - const AVClass *class; - int64_t seek_point; ///< seekpoint in microseconds - double seek_point_d; - char *format_name; - char *file_name; - int stream_index; - - AVFormatContext *format_ctx; - AVCodecContext *codec_ctx; - int is_done; - AVFrame *frame; ///< video frame to store the decoded images in - - int w, h; - AVFilterBufferRef *picref; -} MovieContext; - -#define OFFSET(x) offsetof(MovieContext, x) - -static const AVOption movie_options[]= { -{"format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX }, -{"f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX }, -{"stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX }, -{"si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX }, -{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000 }, -{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000 }, -{NULL}, -}; - -static const char *movie_get_name(void *ctx) -{ - return "movie"; -} - -static const AVClass movie_class = { - "MovieContext", - movie_get_name, - movie_options -}; - -static int movie_init(AVFilterContext *ctx) -{ - MovieContext *movie = ctx->priv; - AVInputFormat *iformat = NULL; - AVCodec *codec; - int ret; - int64_t timestamp; - - av_register_all(); - - // Try to find the movie format (container) - iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL; - - movie->format_ctx = NULL; - if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) { - av_log(ctx, AV_LOG_ERROR, - "Failed to avformat_open_input '%s'\n", movie->file_name); - return ret; - } - if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0) - av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n"); - - // if seeking requested, we execute it - if (movie->seek_point > 0) { - timestamp = movie->seek_point; - // add the stream start time, should it exist - if (movie->format_ctx->start_time != AV_NOPTS_VALUE) { - if (timestamp > INT64_MAX - movie->format_ctx->start_time) { - av_log(ctx, AV_LOG_ERROR, - "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n", - movie->file_name, movie->format_ctx->start_time, movie->seek_point); - return AVERROR(EINVAL); - } - timestamp += movie->format_ctx->start_time; - } - if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) { - av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n", - movie->file_name, timestamp); - return ret; - } - } - - /* select the video stream */ - if ((ret = av_find_best_stream(movie->format_ctx, AVMEDIA_TYPE_VIDEO, - movie->stream_index, -1, NULL, 0)) < 0) { - av_log(ctx, AV_LOG_ERROR, "No video stream with index '%d' found\n", - movie->stream_index); - return ret; - } - movie->stream_index = ret; - movie->codec_ctx = movie->format_ctx->streams[movie->stream_index]->codec; - - /* - * So now we've got a pointer to the so-called codec context for our video - * stream, but we still have to find the actual codec and open it. - */ - codec = avcodec_find_decoder(movie->codec_ctx->codec_id); - if (!codec) { - av_log(ctx, AV_LOG_ERROR, "Failed to find any codec\n"); - return AVERROR(EINVAL); - } - - if ((ret = avcodec_open2(movie->codec_ctx, codec, NULL)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Failed to open codec\n"); - return ret; - } - - if (!(movie->frame = avcodec_alloc_frame()) ) { - av_log(ctx, AV_LOG_ERROR, "Failed to alloc frame\n"); - return AVERROR(ENOMEM); - } - - movie->w = movie->codec_ctx->width; - movie->h = movie->codec_ctx->height; - - av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", - movie->seek_point, movie->format_name, movie->file_name, - movie->stream_index); - - return 0; -} - -static av_cold int init(AVFilterContext *ctx, const char *args) -{ - MovieContext *movie = ctx->priv; - int ret; - movie->class = &movie_class; - av_opt_set_defaults(movie); - - if (args) - movie->file_name = av_get_token(&args, ":"); - if (!movie->file_name || !*movie->file_name) { - av_log(ctx, AV_LOG_ERROR, "No filename provided!\n"); - return AVERROR(EINVAL); - } - - if (*args++ == ':' && (ret = av_set_options_string(movie, args, "=", ":")) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args); - return ret; - } - - movie->seek_point = movie->seek_point_d * 1000000 + 0.5; - - return movie_init(ctx); -} - -static av_cold void uninit(AVFilterContext *ctx) -{ - MovieContext *movie = ctx->priv; - - av_free(movie->file_name); - av_free(movie->format_name); - if (movie->codec_ctx) - avcodec_close(movie->codec_ctx); - if (movie->format_ctx) - avformat_close_input(&movie->format_ctx); - avfilter_unref_buffer(movie->picref); - avcodec_free_frame(&movie->frame); -} - -static int query_formats(AVFilterContext *ctx) -{ - MovieContext *movie = ctx->priv; - enum AVPixelFormat pix_fmts[] = { movie->codec_ctx->pix_fmt, AV_PIX_FMT_NONE }; - - ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); - return 0; -} - -static int config_output_props(AVFilterLink *outlink) -{ - MovieContext *movie = outlink->src->priv; - - outlink->w = movie->w; - outlink->h = movie->h; - outlink->time_base = movie->format_ctx->streams[movie->stream_index]->time_base; - - return 0; -} - -static int movie_get_frame(AVFilterLink *outlink) -{ - MovieContext *movie = outlink->src->priv; - AVPacket pkt; - int ret, frame_decoded; - AVStream *st = movie->format_ctx->streams[movie->stream_index]; - - if (movie->is_done == 1) - return 0; - - while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) { - // Is this a packet from the video stream? - if (pkt.stream_index == movie->stream_index) { - movie->codec_ctx->reordered_opaque = pkt.pos; - avcodec_decode_video2(movie->codec_ctx, movie->frame, &frame_decoded, &pkt); - - if (frame_decoded) { - /* FIXME: avoid the memcpy */ - movie->picref = ff_get_video_buffer(outlink, AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE2, outlink->w, outlink->h); - av_image_copy(movie->picref->data, movie->picref->linesize, - movie->frame->data, movie->frame->linesize, - movie->picref->format, outlink->w, outlink->h); - avfilter_copy_frame_props(movie->picref, movie->frame); - - /* FIXME: use a PTS correction mechanism as that in - * ffplay.c when some API will be available for that */ - /* use pkt_dts if pkt_pts is not available */ - movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ? - movie->frame->pkt_dts : movie->frame->pkt_pts; - - movie->picref->pos = movie->frame->reordered_opaque; - if (!movie->frame->sample_aspect_ratio.num) - movie->picref->video->pixel_aspect = st->sample_aspect_ratio; - av_dlog(outlink->src, - "movie_get_frame(): file:'%s' pts:%"PRId64" time:%f pos:%"PRId64" aspect:%d/%d\n", - movie->file_name, movie->picref->pts, - (double)movie->picref->pts * av_q2d(st->time_base), - movie->picref->pos, - movie->picref->video->pixel_aspect.num, movie->picref->video->pixel_aspect.den); - // We got it. Free the packet since we are returning - av_free_packet(&pkt); - - return 0; - } - } - // Free the packet that was allocated by av_read_frame - av_free_packet(&pkt); - } - - // On multi-frame source we should stop the mixing process when - // the movie source does not have more frames - if (ret == AVERROR_EOF) - movie->is_done = 1; - return ret; -} - -static int request_frame(AVFilterLink *outlink) -{ - MovieContext *movie = outlink->src->priv; - int ret; - - if (movie->is_done) - return AVERROR_EOF; - if ((ret = movie_get_frame(outlink)) < 0) - return ret; - - ret = ff_filter_frame(outlink, movie->picref); - movie->picref = NULL; - - return ret; -} - -static const AVFilterPad avfilter_vsrc_movie_outputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .request_frame = request_frame, - .config_props = config_output_props, - }, - { NULL } -}; - -AVFilter avfilter_vsrc_movie = { - .name = "movie", - .description = NULL_IF_CONFIG_SMALL("Read from a movie source."), - .priv_size = sizeof(MovieContext), - .init = init, - .uninit = uninit, - .query_formats = query_formats, - - .inputs = NULL, - .outputs = avfilter_vsrc_movie_outputs, -}; diff --git a/libavfilter/vsrc_mptestsrc.c b/libavfilter/vsrc_mptestsrc.c new file mode 100644 index 0000000..d526ee2 --- /dev/null +++ b/libavfilter/vsrc_mptestsrc.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** + * @file + * MP test source, ported from MPlayer libmpcodecs/vf_test.c + */ + +#include "libavutil/avstring.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "libavutil/pixdesc.h" +#include "avfilter.h" +#include "internal.h" +#include "formats.h" +#include "video.h" + +#define WIDTH 512 +#define HEIGHT 512 + +enum test_type { + TEST_DC_LUMA, + TEST_DC_CHROMA, + TEST_FREQ_LUMA, + TEST_FREQ_CHROMA, + TEST_AMP_LUMA, + TEST_AMP_CHROMA, + TEST_CBP, + TEST_MV, + TEST_RING1, + TEST_RING2, + TEST_ALL, + TEST_NB +}; + +typedef struct MPTestContext { + const AVClass *class; + unsigned int frame_nb; + AVRational time_base; + int64_t pts, max_pts; + int hsub, vsub; + char *size, *rate, *duration; + enum test_type test; +} MPTestContext; + +#define OFFSET(x) offsetof(MPTestContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +static const AVOption mptestsrc_options[]= { + { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + + { "test", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" }, + { "t", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" }, + { "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" }, + { "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" }, + + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(mptestsrc); + +static double c[64]; + +static void init_idct(void) +{ + int i, j; + + for (i = 0; i < 8; i++) { + double s = i == 0 ? sqrt(0.125) : 0.5; + + for (j = 0; j < 8; j++) + c[i*8+j] = s*cos((M_PI/8.0)*i*(j+0.5)); + } +} + +static void idct(uint8_t *dst, int dst_linesize, int src[64]) +{ + int i, j, k; + double tmp[64]; + + for (i = 0; i < 8; i++) { + for (j = 0; j < 8; j++) { + double sum = 0.0; + + for (k = 0; k < 8; k++) + sum += c[k*8+j] * src[8*i+k]; + + tmp[8*i+j] = sum; + } + } + + for (j = 0; j < 8; j++) { + for (i = 0; i < 8; i++) { + double sum = 0.0; + + for (k = 0; k < 8; k++) + sum += c[k*8+i]*tmp[8*k+j]; + + dst[dst_linesize*i + j] = av_clip((int)floor(sum+0.5), 0, 255); + } + } +} + +static void draw_dc(uint8_t *dst, int dst_linesize, int color, int w, int h) +{ + int x, y; + + for (y = 0; y < h; y++) + for (x = 0; x < w; x++) + dst[x + y*dst_linesize] = color; +} + +static void draw_basis(uint8_t *dst, int dst_linesize, int amp, int freq, int dc) +{ + int src[64]; + + memset(src, 0, 64*sizeof(int)); + src[0] = dc; + if (amp) + src[freq] = amp; + idct(dst, dst_linesize, src); +} + +static void draw_cbp(uint8_t *dst[3], int dst_linesize[3], int cbp, int amp, int dc) +{ + if (cbp&1) draw_basis(dst[0] , dst_linesize[0], amp, 1, dc); + if (cbp&2) draw_basis(dst[0]+8 , dst_linesize[0], amp, 1, dc); + if (cbp&4) draw_basis(dst[0]+ 8*dst_linesize[0], dst_linesize[0], amp, 1, dc); + if (cbp&8) draw_basis(dst[0]+8+8*dst_linesize[0], dst_linesize[0], amp, 1, dc); + if (cbp&16) draw_basis(dst[1] , dst_linesize[1], amp, 1, dc); + if (cbp&32) draw_basis(dst[2] , dst_linesize[2], amp, 1, dc); +} + +static void dc_test(uint8_t *dst, int dst_linesize, int w, int h, int off) +{ + const int step = FFMAX(256/(w*h/256), 1); + int x, y, color = off; + + for (y = 0; y < h; y += 16) { + for (x = 0; x < w; x += 16) { + draw_dc(dst + x + y*dst_linesize, dst_linesize, color, 8, 8); + color += step; + } + } +} + +static void freq_test(uint8_t *dst, int dst_linesize, int off) +{ + int x, y, freq = 0; + + for (y = 0; y < 8*16; y += 16) { + for (x = 0; x < 8*16; x += 16) { + draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*(96+off), freq, 128*8); + freq++; + } + } +} + +static void amp_test(uint8_t *dst, int dst_linesize, int off) +{ + int x, y, amp = off; + + for (y = 0; y < 16*16; y += 16) { + for (x = 0; x < 16*16; x += 16) { + draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*amp, 1, 128*8); + amp++; + } + } +} + +static void cbp_test(uint8_t *dst[3], int dst_linesize[3], int off) +{ + int x, y, cbp = 0; + + for (y = 0; y < 16*8; y += 16) { + for (x = 0; x < 16*8; x += 16) { + uint8_t *dst1[3]; + dst1[0] = dst[0] + x*2 + y*2*dst_linesize[0]; + dst1[1] = dst[1] + x + y* dst_linesize[1]; + dst1[2] = dst[2] + x + y* dst_linesize[2]; + + draw_cbp(dst1, dst_linesize, cbp, (64+off)*4, 128*8); + cbp++; + } + } +} + +static void mv_test(uint8_t *dst, int dst_linesize, int off) +{ + int x, y; + + for (y = 0; y < 16*16; y++) { + if (y&16) + continue; + for (x = 0; x < 16*16; x++) + dst[x + y*dst_linesize] = x + off*8/(y/32+1); + } +} + +static void ring1_test(uint8_t *dst, int dst_linesize, int off) +{ + int x, y, color = 0; + + for (y = off; y < 16*16; y += 16) { + for (x = off; x < 16*16; x += 16) { + draw_dc(dst + x + y*dst_linesize, dst_linesize, ((x+y)&16) ? color : -color, 16, 16); + color++; + } + } +} + +static void ring2_test(uint8_t *dst, int dst_linesize, int off) +{ + int x, y; + + for (y = 0; y < 16*16; y++) { + for (x = 0; x < 16*16; x++) { + double d = sqrt((x-8*16)*(x-8*16) + (y-8*16)*(y-8*16)); + double r = d/20 - (int)(d/20); + if (r < off/30.0) { + dst[x + y*dst_linesize] = 255; + dst[x + y*dst_linesize+256] = 0; + } else { + dst[x + y*dst_linesize] = x; + dst[x + y*dst_linesize+256] = x; + } + } + } +} + +static av_cold int init(AVFilterContext *ctx, const char *args) +{ + MPTestContext *test = ctx->priv; + AVRational frame_rate_q; + int64_t duration = -1; + int ret; + + test->class = &mptestsrc_class; + av_opt_set_defaults(test); + + if ((ret = (av_set_options_string(test, args, "=", ":"))) < 0) + return ret; + + if ((ret = av_parse_video_rate(&frame_rate_q, test->rate)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->rate); + return ret; + } + + if ((test->duration) && (ret = av_parse_time(&duration, test->duration, 1)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration); + return ret; + } + + test->time_base.num = frame_rate_q.den; + test->time_base.den = frame_rate_q.num; + test->max_pts = duration >= 0 ? + av_rescale_q(duration, AV_TIME_BASE_Q, test->time_base) : -1; + test->frame_nb = 0; + test->pts = 0; + + av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n", + frame_rate_q.num, frame_rate_q.den, + duration < 0 ? -1 : test->max_pts * av_q2d(test->time_base)); + init_idct(); + + return 0; +} + +static int config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + MPTestContext *test = ctx->priv; + const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(outlink->format); + + test->hsub = pix_desc->log2_chroma_w; + test->vsub = pix_desc->log2_chroma_h; + + outlink->w = WIDTH; + outlink->h = HEIGHT; + outlink->time_base = test->time_base; + + return 0; +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE + }; + + ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); + return 0; +} + +static int request_frame(AVFilterLink *outlink) +{ + MPTestContext *test = outlink->src->priv; + AVFilterBufferRef *picref; + int w = WIDTH, h = HEIGHT, ch = h>>test->vsub; + unsigned int frame = test->frame_nb; + enum test_type tt = test->test; + + if (test->max_pts >= 0 && test->pts > test->max_pts) + return AVERROR_EOF; + picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, w, h); + picref->pts = test->pts++; + + // clean image + memset(picref->data[0], 0, picref->linesize[0] * h); + memset(picref->data[1], 128, picref->linesize[1] * ch); + memset(picref->data[2], 128, picref->linesize[2] * ch); + + if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */ + tt = (frame/30)%(TEST_NB-1); + + switch (tt) { + case TEST_DC_LUMA: dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break; + case TEST_DC_CHROMA: dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break; + case TEST_FREQ_LUMA: freq_test(picref->data[0], picref->linesize[0], frame%30); break; + case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break; + case TEST_AMP_LUMA: amp_test(picref->data[0], picref->linesize[0], frame%30); break; + case TEST_AMP_CHROMA: amp_test(picref->data[1], picref->linesize[1], frame%30); break; + case TEST_CBP: cbp_test(picref->data , picref->linesize , frame%30); break; + case TEST_MV: mv_test(picref->data[0], picref->linesize[0], frame%30); break; + case TEST_RING1: ring1_test(picref->data[0], picref->linesize[0], frame%30); break; + case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break; + } + + test->frame_nb++; + ff_filter_frame(outlink, picref); + + return 0; +} + +static const AVFilterPad mptestsrc_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL } +}; + +AVFilter avfilter_vsrc_mptestsrc = { + .name = "mptestsrc", + .description = NULL_IF_CONFIG_SMALL("Generate various test pattern."), + .priv_size = sizeof(MPTestContext), + .init = init, + + .query_formats = query_formats, + + .inputs = NULL, + .outputs = mptestsrc_outputs, + .priv_class = &mptestsrc_class, +}; diff --git a/libavfilter/vsrc_nullsrc.c b/libavfilter/vsrc_nullsrc.c deleted file mode 100644 index 79f6d4b..0000000 --- a/libavfilter/vsrc_nullsrc.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * null video source - */ - -#include <stdio.h> - -#include "libavutil/avstring.h" -#include "libavutil/eval.h" -#include "libavutil/internal.h" -#include "libavutil/mathematics.h" -#include "libavutil/parseutils.h" -#include "avfilter.h" -#include "formats.h" -#include "internal.h" - -static const char *const var_names[] = { - "E", - "PHI", - "PI", - "AVTB", /* default timebase 1/AV_TIME_BASE */ - NULL -}; - -enum var_name { - VAR_E, - VAR_PHI, - VAR_PI, - VAR_AVTB, - VAR_VARS_NB -}; - -typedef struct { - int w, h; - char tb_expr[256]; - double var_values[VAR_VARS_NB]; -} NullContext; - -static int init(AVFilterContext *ctx, const char *args) -{ - NullContext *priv = ctx->priv; - - priv->w = 352; - priv->h = 288; - av_strlcpy(priv->tb_expr, "AVTB", sizeof(priv->tb_expr)); - - if (args) - sscanf(args, "%d:%d:%255[^:]", &priv->w, &priv->h, priv->tb_expr); - - if (priv->w <= 0 || priv->h <= 0) { - av_log(ctx, AV_LOG_ERROR, "Non-positive size values are not acceptable.\n"); - return AVERROR(EINVAL); - } - - return 0; -} - -static int config_props(AVFilterLink *outlink) -{ - AVFilterContext *ctx = outlink->src; - NullContext *priv = ctx->priv; - AVRational tb; - int ret; - double res; - - priv->var_values[VAR_E] = M_E; - priv->var_values[VAR_PHI] = M_PHI; - priv->var_values[VAR_PI] = M_PI; - priv->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q); - - if ((ret = av_expr_parse_and_eval(&res, priv->tb_expr, var_names, priv->var_values, - NULL, NULL, NULL, NULL, NULL, 0, NULL)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid expression '%s' for timebase.\n", priv->tb_expr); - return ret; - } - tb = av_d2q(res, INT_MAX); - if (tb.num <= 0 || tb.den <= 0) { - av_log(ctx, AV_LOG_ERROR, - "Invalid non-positive value for the timebase %d/%d.\n", - tb.num, tb.den); - return AVERROR(EINVAL); - } - - outlink->w = priv->w; - outlink->h = priv->h; - outlink->time_base = tb; - - av_log(outlink->src, AV_LOG_VERBOSE, "w:%d h:%d tb:%d/%d\n", priv->w, priv->h, - tb.num, tb.den); - - return 0; -} - -static int request_frame(AVFilterLink *link) -{ - return -1; -} - -static const AVFilterPad avfilter_vsrc_nullsrc_outputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .config_props = config_props, - .request_frame = request_frame, - }, - { NULL } -}; - -AVFilter avfilter_vsrc_nullsrc = { - .name = "nullsrc", - .description = NULL_IF_CONFIG_SMALL("Null video source, never return images."), - - .init = init, - .priv_size = sizeof(NullContext), - - .inputs = NULL, - - .outputs = avfilter_vsrc_nullsrc_outputs, -}; diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c index 632bd27..f5e37f8 100644 --- a/libavfilter/vsrc_testsrc.c +++ b/libavfilter/vsrc_testsrc.c @@ -1,21 +1,22 @@ /* * Copyright (c) 2007 Nicolas George <nicolas.george@normalesup.org> * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2012 Paul B Mahol * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -28,93 +29,130 @@ * * rgbtestsrc is ported from MPlayer libmpcodecs/vf_rgbtest.c by * Michael Niedermayer. + * + * smptebars is by Paul B Mahol. */ #include <float.h> #include "libavutil/common.h" -#include "libavutil/mathematics.h" #include "libavutil/opt.h" +#include "libavutil/imgutils.h" #include "libavutil/intreadwrite.h" #include "libavutil/parseutils.h" #include "avfilter.h" +#include "drawutils.h" #include "formats.h" #include "internal.h" #include "video.h" typedef struct { const AVClass *class; - int h, w; + int w, h; unsigned int nb_frame; - AVRational time_base; - int64_t pts, max_pts; - char *size; ///< video frame size - char *rate; ///< video frame rate - char *duration; ///< total duration of the generated video + AVRational time_base, frame_rate; + int64_t pts; + char *frame_rate_str; ///< video frame rate + char *duration_str; ///< total duration of the generated video + int64_t duration; ///< duration expressed in microseconds AVRational sar; ///< sample aspect ratio + int nb_decimals; + int draw_once; ///< draw only the first frame, always put out the same picture + AVFilterBufferRef *picref; ///< cached reference containing the painted picture void (* fill_picture_fn)(AVFilterContext *ctx, AVFilterBufferRef *picref); + /* only used by color */ + char *color_str; + FFDrawContext draw; + FFDrawColor color; + uint8_t color_rgba[4]; + /* only used by rgbtest */ - int rgba_map[4]; + uint8_t rgba_map[4]; } TestSourceContext; #define OFFSET(x) offsetof(TestSourceContext, x) - -static const AVOption testsrc_options[] = { - { "size", "set video size", OFFSET(size), AV_OPT_TYPE_STRING, {.str = "320x240"}}, - { "s", "set video size", OFFSET(size), AV_OPT_TYPE_STRING, {.str = "320x240"}}, - { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, }, - { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, }, - { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, }, - { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX }, +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption options[] = { + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS }, + { "rate", "set video rate", OFFSET(frame_rate_str), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "r", "set video rate", OFFSET(frame_rate_str), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, FLAGS }, + { "duration", "set video duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "d", "set video duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, + { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl= 1}, 0, INT_MAX, FLAGS }, + + /* only used by color */ + { "color", "set color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "c", "set color", OFFSET(color_str), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, + + /* only used by testsrc */ + { "decimals", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS }, + { "n", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS }, { NULL }, }; -static av_cold int init_common(AVFilterContext *ctx, const char *args) +static av_cold int init(AVFilterContext *ctx, const char *args) { TestSourceContext *test = ctx->priv; - AVRational frame_rate_q; - int64_t duration = -1; int ret = 0; av_opt_set_defaults(test); - if ((ret = (av_set_options_string(test, args, "=", ":"))) < 0) { - av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args); + if ((ret = (av_set_options_string(test, args, "=", ":"))) < 0) return ret; - } - if ((ret = av_parse_video_size(&test->w, &test->h, test->size)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'\n", test->size); + if ((ret = av_parse_video_rate(&test->frame_rate, test->frame_rate_str)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->frame_rate_str); return ret; } - if ((ret = av_parse_video_rate(&frame_rate_q, test->rate)) < 0 || - frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->rate); + test->duration = -1; + if (test->duration_str && + (ret = av_parse_time(&test->duration, test->duration_str, 1)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration_str); return ret; } - if ((test->duration) && (ret = av_parse_time(&duration, test->duration, 1)) < 0) { - av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration); - return ret; + if (test->nb_decimals && strcmp(ctx->filter->name, "testsrc")) { + av_log(ctx, AV_LOG_WARNING, + "Option 'decimals' is ignored with source '%s'\n", + ctx->filter->name); + } + + if (test->color_str) { + if (!strcmp(ctx->filter->name, "color")) { + ret = av_parse_color(test->color_rgba, test->color_str, -1, ctx); + if (ret < 0) + return ret; + } else { + av_log(ctx, AV_LOG_WARNING, + "Option 'color' is ignored with source '%s'\n", + ctx->filter->name); + } } - test->time_base.num = frame_rate_q.den; - test->time_base.den = frame_rate_q.num; - test->max_pts = duration >= 0 ? - av_rescale_q(duration, AV_TIME_BASE_Q, test->time_base) : -1; + test->time_base = av_inv_q(test->frame_rate); test->nb_frame = 0; test->pts = 0; - av_log(ctx, AV_LOG_DEBUG, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n", - test->w, test->h, frame_rate_q.num, frame_rate_q.den, - duration < 0 ? -1 : test->max_pts * av_q2d(test->time_base), + av_log(ctx, AV_LOG_VERBOSE, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n", + test->w, test->h, test->frame_rate.num, test->frame_rate.den, + test->duration < 0 ? -1 : (double)test->duration/1000000, test->sar.num, test->sar.den); return 0; } +static av_cold void uninit(AVFilterContext *ctx) +{ + TestSourceContext *test = ctx->priv; + + av_opt_free(test); + avfilter_unref_bufferp(&test->picref); +} + static int config_props(AVFilterLink *outlink) { TestSourceContext *test = outlink->src->priv; @@ -122,7 +160,8 @@ static int config_props(AVFilterLink *outlink) outlink->w = test->w; outlink->h = test->h; outlink->sample_aspect_ratio = test->sar; - outlink->time_base = test->time_base; + outlink->frame_rate = test->frame_rate; + outlink->time_base = test->time_base; return 0; } @@ -130,39 +169,163 @@ static int config_props(AVFilterLink *outlink) static int request_frame(AVFilterLink *outlink) { TestSourceContext *test = outlink->src->priv; - AVFilterBufferRef *picref; + AVFilterBufferRef *outpicref; - if (test->max_pts >= 0 && test->pts > test->max_pts) + if (test->duration >= 0 && + av_rescale_q(test->pts, test->time_base, AV_TIME_BASE_Q) >= test->duration) return AVERROR_EOF; - picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h); - if (!picref) - return AVERROR(ENOMEM); - picref->pts = test->pts++; - picref->pos = -1; - picref->video->key_frame = 1; - picref->video->interlaced = 0; - picref->video->pict_type = AV_PICTURE_TYPE_I; - picref->video->pixel_aspect = test->sar; + if (test->draw_once) { + if (!test->picref) { + test->picref = + ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE, + test->w, test->h); + if (!test->picref) + return AVERROR(ENOMEM); + test->fill_picture_fn(outlink->src, test->picref); + } + outpicref = avfilter_ref_buffer(test->picref, ~AV_PERM_WRITE); + } else + outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h); + + if (!outpicref) + return AVERROR(ENOMEM); + outpicref->pts = test->pts; + outpicref->pos = -1; + outpicref->video->key_frame = 1; + outpicref->video->interlaced = 0; + outpicref->video->pict_type = AV_PICTURE_TYPE_I; + outpicref->video->sample_aspect_ratio = test->sar; + if (!test->draw_once) + test->fill_picture_fn(outlink->src, outpicref); + + test->pts++; test->nb_frame++; - test->fill_picture_fn(outlink->src, picref); - return ff_filter_frame(outlink, picref); + return ff_filter_frame(outlink, outpicref); } -#if CONFIG_TESTSRC_FILTER +#if CONFIG_COLOR_FILTER + +#define color_options options +AVFILTER_DEFINE_CLASS(color); + +static void color_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + TestSourceContext *test = ctx->priv; + ff_fill_rectangle(&test->draw, &test->color, + picref->data, picref->linesize, + 0, 0, test->w, test->h); +} -static const char *testsrc_get_name(void *ctx) +static av_cold int color_init(AVFilterContext *ctx, const char *args) { - return "testsrc"; + TestSourceContext *test = ctx->priv; + test->class = &color_class; + test->fill_picture_fn = color_fill_picture; + test->draw_once = 1; + av_opt_set(test, "color", "black", 0); + return init(ctx, args); } -static const AVClass testsrc_class = { - .class_name = "TestSourceContext", - .item_name = testsrc_get_name, - .option = testsrc_options, +static int color_query_formats(AVFilterContext *ctx) +{ + ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); + return 0; +} + +static int color_config_props(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->src; + TestSourceContext *test = ctx->priv; + int ret; + + ff_draw_init(&test->draw, inlink->format, 0); + ff_draw_color(&test->draw, &test->color, test->color_rgba); + + test->w = ff_draw_round_to_sub(&test->draw, 0, -1, test->w); + test->h = ff_draw_round_to_sub(&test->draw, 1, -1, test->h); + if (av_image_check_size(test->w, test->h, 0, ctx) < 0) + return AVERROR(EINVAL); + + if ((ret = config_props(inlink)) < 0) + return ret; + + av_log(ctx, AV_LOG_VERBOSE, "color:0x%02x%02x%02x%02x\n", + test->color_rgba[0], test->color_rgba[1], test->color_rgba[2], test->color_rgba[3]); + return 0; +} + +static const AVFilterPad color_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = color_config_props, + }, + { NULL } +}; + +AVFilter avfilter_vsrc_color = { + .name = "color", + .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input."), + + .priv_size = sizeof(TestSourceContext), + .init = color_init, + .uninit = uninit, + + .query_formats = color_query_formats, + .inputs = NULL, + .outputs = color_outputs, + .priv_class = &color_class, +}; + +#endif /* CONFIG_COLOR_FILTER */ + +#if CONFIG_NULLSRC_FILTER + +#define nullsrc_options options +AVFILTER_DEFINE_CLASS(nullsrc); + +static void nullsrc_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) { } + +static av_cold int nullsrc_init(AVFilterContext *ctx, const char *args) +{ + TestSourceContext *test = ctx->priv; + + test->class = &nullsrc_class; + test->fill_picture_fn = nullsrc_fill_picture; + return init(ctx, args); +} + +static const AVFilterPad nullsrc_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = config_props, + }, + { NULL }, +}; + +AVFilter avfilter_vsrc_nullsrc = { + .name = "nullsrc", + .description = NULL_IF_CONFIG_SMALL("Null video source, return unprocessed video frames."), + .init = nullsrc_init, + .uninit = uninit, + .priv_size = sizeof(TestSourceContext), + .inputs = NULL, + .outputs = nullsrc_outputs, + .priv_class = &nullsrc_class, }; +#endif /* CONFIG_NULLSRC_FILTER */ + +#if CONFIG_TESTSRC_FILTER + +#define testsrc_options options +AVFILTER_DEFINE_CLASS(testsrc); + /** * Fill a rectangle with value val. * @@ -285,7 +448,7 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) } /* draw sliding color line */ - p = data + picref->linesize[0] * height * 3/4; + p0 = p = data + picref->linesize[0] * height * 3/4; grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) % GRADIENT_SIZE; rgrad = 0; @@ -313,15 +476,20 @@ static void test_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) if (grad >= GRADIENT_SIZE) grad -= GRADIENT_SIZE; } + p = p0; for (y = height / 8; y > 0; y--) { - memcpy(p, p - picref->linesize[0], 3 * width); + memcpy(p+picref->linesize[0], p, 3 * width); p += picref->linesize[0]; } /* draw digits */ seg_size = width / 80; if (seg_size >= 1 && height >= 13 * seg_size) { - second = test->nb_frame * test->time_base.num / test->time_base.den; + double time = av_q2d(test->time_base) * test->nb_frame * + pow(10, test->nb_decimals); + if (time > INT_MAX) + return; + second = (int)time; x = width - (width - seg_size * 64) / 2; y = (height - seg_size * 13) / 2; p = data + (x*3 + y * picref->linesize[0]); @@ -341,7 +509,7 @@ static av_cold int test_init(AVFilterContext *ctx, const char *args) test->class = &testsrc_class; test->fill_picture_fn = test_fill_picture; - return init_common(ctx, args); + return init(ctx, args); } static int test_query_formats(AVFilterContext *ctx) @@ -364,32 +532,25 @@ static const AVFilterPad avfilter_vsrc_testsrc_outputs[] = { }; AVFilter avfilter_vsrc_testsrc = { - .name = "testsrc", - .description = NULL_IF_CONFIG_SMALL("Generate test pattern."), - .priv_size = sizeof(TestSourceContext), - .init = test_init, + .name = "testsrc", + .description = NULL_IF_CONFIG_SMALL("Generate test pattern."), + .priv_size = sizeof(TestSourceContext), + .init = test_init, + .uninit = uninit, - .query_formats = test_query_formats, + .query_formats = test_query_formats, .inputs = NULL, - .outputs = avfilter_vsrc_testsrc_outputs, + .priv_class = &testsrc_class, }; #endif /* CONFIG_TESTSRC_FILTER */ #if CONFIG_RGBTESTSRC_FILTER -static const char *rgbtestsrc_get_name(void *ctx) -{ - return "rgbtestsrc"; -} - -static const AVClass rgbtestsrc_class = { - .class_name = "RGBTestSourceContext", - .item_name = rgbtestsrc_get_name, - .option = testsrc_options, -}; +#define rgbtestsrc_options options +AVFILTER_DEFINE_CLASS(rgbtestsrc); #define R 0 #define G 1 @@ -398,7 +559,7 @@ static const AVClass rgbtestsrc_class = { static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize, int x, int y, int r, int g, int b, enum AVPixelFormat fmt, - int rgba_map[4]) + uint8_t rgba_map[4]) { int32_t v; uint8_t *p; @@ -420,7 +581,7 @@ static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize, case AV_PIX_FMT_BGRA: case AV_PIX_FMT_ARGB: case AV_PIX_FMT_ABGR: - v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)); + v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)) + (255 << (rgba_map[A]*8)); p = dst + 4*x + y*dst_linesize; AV_WL32(p, v); break; @@ -451,9 +612,10 @@ static av_cold int rgbtest_init(AVFilterContext *ctx, const char *args) { TestSourceContext *test = ctx->priv; + test->draw_once = 1; test->class = &rgbtestsrc_class; test->fill_picture_fn = rgbtest_fill_picture; - return init_common(ctx, args); + return init(ctx, args); } static int rgbtest_query_formats(AVFilterContext *ctx) @@ -474,15 +636,7 @@ static int rgbtest_config_props(AVFilterLink *outlink) { TestSourceContext *test = outlink->src->priv; - switch (outlink->format) { - case AV_PIX_FMT_ARGB: test->rgba_map[A] = 0; test->rgba_map[R] = 1; test->rgba_map[G] = 2; test->rgba_map[B] = 3; break; - case AV_PIX_FMT_ABGR: test->rgba_map[A] = 0; test->rgba_map[B] = 1; test->rgba_map[G] = 2; test->rgba_map[R] = 3; break; - case AV_PIX_FMT_RGBA: - case AV_PIX_FMT_RGB24: test->rgba_map[R] = 0; test->rgba_map[G] = 1; test->rgba_map[B] = 2; test->rgba_map[A] = 3; break; - case AV_PIX_FMT_BGRA: - case AV_PIX_FMT_BGR24: test->rgba_map[B] = 0; test->rgba_map[G] = 1; test->rgba_map[R] = 2; test->rgba_map[A] = 3; break; - } - + ff_fill_rgba_map(test->rgba_map, outlink->format); return config_props(outlink); } @@ -497,16 +651,145 @@ static const AVFilterPad avfilter_vsrc_rgbtestsrc_outputs[] = { }; AVFilter avfilter_vsrc_rgbtestsrc = { - .name = "rgbtestsrc", - .description = NULL_IF_CONFIG_SMALL("Generate RGB test pattern."), - .priv_size = sizeof(TestSourceContext), - .init = rgbtest_init, + .name = "rgbtestsrc", + .description = NULL_IF_CONFIG_SMALL("Generate RGB test pattern."), + .priv_size = sizeof(TestSourceContext), + .init = rgbtest_init, + .uninit = uninit, - .query_formats = rgbtest_query_formats, + .query_formats = rgbtest_query_formats, .inputs = NULL, .outputs = avfilter_vsrc_rgbtestsrc_outputs, + .priv_class = &rgbtestsrc_class, }; #endif /* CONFIG_RGBTESTSRC_FILTER */ + +#if CONFIG_SMPTEBARS_FILTER + +#define smptebars_options options +AVFILTER_DEFINE_CLASS(smptebars); + +static const uint8_t rainbow[7][4] = { + { 191, 191, 191, 255 }, /* gray */ + { 191, 191, 0, 255 }, /* yellow */ + { 0, 191, 191, 255 }, /* cyan */ + { 0, 191, 0, 255 }, /* green */ + { 191, 0, 191, 255 }, /* magenta */ + { 191, 0, 0, 255 }, /* red */ + { 0, 0, 191, 255 }, /* blue */ +}; + +static const uint8_t wobnair[7][4] = { + { 0, 0, 191, 255 }, /* blue */ + { 19, 19, 19, 255 }, /* 7.5% intensity black */ + { 191, 0, 191, 255 }, /* magenta */ + { 19, 19, 19, 255 }, /* 7.5% intensity black */ + { 0, 191, 191, 255 }, /* cyan */ + { 19, 19, 19, 255 }, /* 7.5% intensity black */ + { 191, 191, 191, 255 }, /* gray */ +}; + +static const uint8_t white[4] = { 255, 255, 255, 255 }; +static const uint8_t black[4] = { 19, 19, 19, 255 }; /* 7.5% intensity black */ + +/* pluge pulses */ +static const uint8_t neg4ire[4] = { 9, 9, 9, 255 }; /* 3.5% intensity black */ +static const uint8_t pos4ire[4] = { 29, 29, 29, 255 }; /* 11.5% intensity black */ + +/* fudged Q/-I */ +static const uint8_t i_pixel[4] = { 0, 68, 130, 255 }; +static const uint8_t q_pixel[4] = { 67, 0, 130, 255 }; + +static void smptebars_fill_picture(AVFilterContext *ctx, AVFilterBufferRef *picref) +{ + TestSourceContext *test = ctx->priv; + FFDrawColor color; + int r_w, r_h, w_h, p_w, p_h, i, x = 0; + + r_w = (test->w + 6) / 7; + r_h = test->h * 2 / 3; + w_h = test->h * 3 / 4 - r_h; + p_w = r_w * 5 / 4; + p_h = test->h - w_h - r_h; + +#define DRAW_COLOR(rgba, x, y, w, h) \ + ff_draw_color(&test->draw, &color, rgba); \ + ff_fill_rectangle(&test->draw, &color, \ + picref->data, picref->linesize, x, y, w, h) \ + + for (i = 0; i < 7; i++) { + DRAW_COLOR(rainbow[i], x, 0, FFMIN(r_w, test->w - x), r_h); + DRAW_COLOR(wobnair[i], x, r_h, FFMIN(r_w, test->w - x), w_h); + x += r_w; + } + x = 0; + DRAW_COLOR(i_pixel, x, r_h + w_h, p_w, p_h); + x += p_w; + DRAW_COLOR(white, x, r_h + w_h, p_w, p_h); + x += p_w; + DRAW_COLOR(q_pixel, x, r_h + w_h, p_w, p_h); + x += p_w; + DRAW_COLOR(black, x, r_h + w_h, 5 * r_w - x, p_h); + x += 5 * r_w - x; + DRAW_COLOR(neg4ire, x, r_h + w_h, r_w / 3, p_h); + x += r_w / 3; + DRAW_COLOR(black, x, r_h + w_h, r_w / 3, p_h); + x += r_w / 3; + DRAW_COLOR(pos4ire, x, r_h + w_h, r_w / 3, p_h); + x += r_w / 3; + DRAW_COLOR(black, x, r_h + w_h, test->w - x, p_h); +} + +static av_cold int smptebars_init(AVFilterContext *ctx, const char *args) +{ + TestSourceContext *test = ctx->priv; + + test->class = &smptebars_class; + test->fill_picture_fn = smptebars_fill_picture; + test->draw_once = 1; + return init(ctx, args); +} + +static int smptebars_query_formats(AVFilterContext *ctx) +{ + ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); + return 0; +} + +static int smptebars_config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + TestSourceContext *test = ctx->priv; + + ff_draw_init(&test->draw, outlink->format, 0); + + return config_props(outlink); +} + +static const AVFilterPad smptebars_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = request_frame, + .config_props = smptebars_config_props, + }, + { NULL } +}; + +AVFilter avfilter_vsrc_smptebars = { + .name = "smptebars", + .description = NULL_IF_CONFIG_SMALL("Generate SMPTE color bars."), + .priv_size = sizeof(TestSourceContext), + .init = smptebars_init, + .uninit = uninit, + + .query_formats = smptebars_query_formats, + .inputs = NULL, + .outputs = smptebars_outputs, + .priv_class = &smptebars_class, +}; + +#endif /* CONFIG_SMPTEBARS_FILTER */ diff --git a/libavfilter/x86/af_volume.asm b/libavfilter/x86/af_volume.asm index 4e5ad22..f4cbcbc 100644 --- a/libavfilter/x86/af_volume.asm +++ b/libavfilter/x86/af_volume.asm @@ -2,20 +2,20 @@ ;* x86-optimized functions for volume filter ;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com> ;* -;* This file is part of Libav. +;* This file is part of FFmpeg. ;* -;* Libav is free software; you can redistribute it and/or +;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* -;* Libav is distributed in the hope that it will be useful, +;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public -;* License along with Libav; if not, write to the Free Software +;* License along with FFmpeg; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** @@ -99,9 +99,11 @@ cglobal scale_samples_s32, 4,4,4, dst, src, len, volume INIT_XMM sse2 %define CVTDQ2PD cvtdq2pd SCALE_SAMPLES_S32 +%if HAVE_AVX_EXTERNAL %define CVTDQ2PD vcvtdq2pd INIT_YMM avx SCALE_SAMPLES_S32 +%endif %undef CVTDQ2PD ; NOTE: This is not bit-identical with the C version because it clips to diff --git a/libavfilter/x86/af_volume_init.c b/libavfilter/x86/af_volume_init.c index 02bedd2..beee8ca 100644 --- a/libavfilter/x86/af_volume_init.c +++ b/libavfilter/x86/af_volume_init.c @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/libavfilter/x86/vf_gradfun.c b/libavfilter/x86/vf_gradfun.c index b4ca86c..214e764 100644 --- a/libavfilter/x86/vf_gradfun.c +++ b/libavfilter/x86/vf_gradfun.c @@ -1,20 +1,20 @@ /* * Copyright (C) 2009 Loren Merritt <lorenm@u.washignton.edu> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -30,7 +30,7 @@ DECLARE_ALIGNED(16, static const uint16_t, pw_7f)[8] = {0x7F,0x7F,0x7F,0x7F,0x7F DECLARE_ALIGNED(16, static const uint16_t, pw_ff)[8] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; #if HAVE_MMXEXT_INLINE -static void gradfun_filter_line_mmxext(uint8_t *dst, uint8_t *src, uint16_t *dc, +static void gradfun_filter_line_mmxext(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers) { @@ -46,7 +46,9 @@ static void gradfun_filter_line_mmxext(uint8_t *dst, uint8_t *src, uint16_t *dc, "pxor %%mm7, %%mm7 \n" "pshufw $0, %%mm5, %%mm5 \n" "movq %6, %%mm6 \n" - "movq %5, %%mm4 \n" + "movq (%5), %%mm3 \n" + "movq 8(%5), %%mm4 \n" + "1: \n" "movd (%2,%0), %%mm0 \n" "movd (%3,%0), %%mm1 \n" @@ -61,26 +63,51 @@ static void gradfun_filter_line_mmxext(uint8_t *dst, uint8_t *src, uint16_t *dc, "psubw %%mm6, %%mm2 \n" "pminsw %%mm7, %%mm2 \n" // m = -max(0, 127-m) "pmullw %%mm2, %%mm2 \n" - "paddw %%mm4, %%mm0 \n" // pix += dither + "paddw %%mm3, %%mm0 \n" // pix += dither + "psllw $2, %%mm1 \n" // m = m*m*delta >> 14 "pmulhw %%mm2, %%mm1 \n" + "paddw %%mm1, %%mm0 \n" // pix += m + "psraw $7, %%mm0 \n" + "packuswb %%mm0, %%mm0 \n" + "movd %%mm0, (%1,%0) \n" // dst = clip(pix>>7) + "add $4, %0 \n" + "jnl 2f \n" + + "movd (%2,%0), %%mm0 \n" + "movd (%3,%0), %%mm1 \n" + "punpcklbw %%mm7, %%mm0 \n" + "punpcklwd %%mm1, %%mm1 \n" + "psllw $7, %%mm0 \n" + "pxor %%mm2, %%mm2 \n" + "psubw %%mm0, %%mm1 \n" // delta = dc - pix + "psubw %%mm1, %%mm2 \n" + "pmaxsw %%mm1, %%mm2 \n" + "pmulhuw %%mm5, %%mm2 \n" // m = abs(delta) * thresh >> 16 + "psubw %%mm6, %%mm2 \n" + "pminsw %%mm7, %%mm2 \n" // m = -max(0, 127-m) + "pmullw %%mm2, %%mm2 \n" + "paddw %%mm4, %%mm0 \n" // pix += dither "psllw $2, %%mm1 \n" // m = m*m*delta >> 14 + "pmulhw %%mm2, %%mm1 \n" "paddw %%mm1, %%mm0 \n" // pix += m "psraw $7, %%mm0 \n" "packuswb %%mm0, %%mm0 \n" "movd %%mm0, (%1,%0) \n" // dst = clip(pix>>7) "add $4, %0 \n" "jl 1b \n" + + "2: \n" "emms \n" :"+r"(x) :"r"(dst+width), "r"(src+width), "r"(dc+width/2), - "rm"(thresh), "m"(*dithers), "m"(*pw_7f) + "rm"(thresh), "r"(dithers), "m"(*pw_7f) :"memory" ); } #endif #if HAVE_SSSE3_INLINE -static void gradfun_filter_line_ssse3(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers) +static void gradfun_filter_line_ssse3(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers) { intptr_t x; if (width & 7) { @@ -109,9 +136,9 @@ static void gradfun_filter_line_ssse3(uint8_t *dst, uint8_t *src, uint16_t *dc, "psubw %%xmm6, %%xmm2 \n" "pminsw %%xmm7, %%xmm2 \n" // m = -max(0, 127-m) "pmullw %%xmm2, %%xmm2 \n" - "psllw $1, %%xmm2 \n" + "psllw $2, %%xmm1 \n" "paddw %%xmm4, %%xmm0 \n" // pix += dither - "pmulhrsw %%xmm2, %%xmm1 \n" // m = m*m*delta >> 14 + "pmulhw %%xmm2, %%xmm1 \n" // m = m*m*delta >> 14 "paddw %%xmm1, %%xmm0 \n" // pix += m "psraw $7, %%xmm0 \n" "packuswb %%xmm0, %%xmm0 \n" @@ -127,7 +154,7 @@ static void gradfun_filter_line_ssse3(uint8_t *dst, uint8_t *src, uint16_t *dc, #endif /* HAVE_SSSE3_INLINE */ #if HAVE_SSE2_INLINE -static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width) +static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width) { #define BLURV(load)\ intptr_t x = -2*width;\ diff --git a/libavfilter/x86/vf_hqdn3d.asm b/libavfilter/x86/vf_hqdn3d.asm index dee2c96..961127e 100644 --- a/libavfilter/x86/vf_hqdn3d.asm +++ b/libavfilter/x86/vf_hqdn3d.asm @@ -1,20 +1,20 @@ ;****************************************************************************** ;* Copyright (c) 2012 Loren Merritt ;* -;* This file is part of Libav. +;* This file is part of FFmpeg. ;* -;* Libav is free software; you can redistribute it and/or +;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* -;* Libav is distributed in the hope that it will be useful, +;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public -;* License along with Libav; if not, write to the Free Software +;* License along with FFmpeg; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** @@ -39,6 +39,7 @@ SECTION .text %endif %if %3 != 16 shl %1, 16-%3 + add %1, (1<<(15-%3))-1 %endif %endmacro @@ -86,7 +87,6 @@ ALIGN 16 mov [frameantq+xq*2], t0w movifnidn dstq, dstmp %if %1 != 16 - add t0d, (1<<(15-%1))-1 shr t0d, 16-%1 ; could eliminate this by storing from t0h, but only with some contraints on register allocation %endif %if %1 == 8 diff --git a/libavfilter/x86/vf_hqdn3d_init.c b/libavfilter/x86/vf_hqdn3d_init.c index 2893a54..4abb878 100644 --- a/libavfilter/x86/vf_hqdn3d_init.c +++ b/libavfilter/x86/vf_hqdn3d_init.c @@ -1,18 +1,20 @@ /* - * This file is part of Libav. + * Copyright (c) 2012 Loren Merritt * - * Libav is free software; you can redistribute it and/or modify + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ diff --git a/libavfilter/x86/vf_yadif.asm b/libavfilter/x86/vf_yadif.asm index bc4b3ce..a8f7987 100644 --- a/libavfilter/x86/vf_yadif.asm +++ b/libavfilter/x86/vf_yadif.asm @@ -4,20 +4,20 @@ ;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at> ;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com> ;* -;* This file is part of Libav. +;* This file is part of FFmpeg. ;* -;* Libav is free software; you can redistribute it and/or modify +;* FFmpeg is free software; you can redistribute it and/or modify ;* it under the terms of the GNU General Public License as published by ;* the Free Software Foundation; either version 2 of the License, or ;* (at your option) any later version. ;* -;* Libav is distributed in the hope that it will be useful, +;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;* GNU General Public License for more details. ;* ;* You should have received a copy of the GNU General Public License along -;* with Libav; if not, write to the Free Software Foundation, Inc., +;* with FFmpeg; if not, write to the Free Software Foundation, Inc., ;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ;****************************************************************************** diff --git a/libavfilter/x86/vf_yadif_init.c b/libavfilter/x86/vf_yadif_init.c index 2ffeca0..8d5e768 100644 --- a/libavfilter/x86/vf_yadif_init.c +++ b/libavfilter/x86/vf_yadif_init.c @@ -1,26 +1,25 @@ /* * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "libavutil/attributes.h" #include "libavutil/cpu.h" -#include "libavutil/internal.h" #include "libavutil/mem.h" #include "libavutil/x86/asm.h" #include "libavutil/x86/cpu.h" diff --git a/libavfilter/yadif.h b/libavfilter/yadif.h index e6f713b..b7e8852 100644 --- a/libavfilter/yadif.h +++ b/libavfilter/yadif.h @@ -1,18 +1,18 @@ /* - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or modify + * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with Libav; if not, write to the Free Software Foundation, Inc., + * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ @@ -22,29 +22,32 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" +enum YADIFMode { + YADIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame + YADIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field + YADIF_MODE_SEND_FRAME_NOSPATIAL = 2, ///< send 1 frame for each frame but skips spatial interlacing check + YADIF_MODE_SEND_FIELD_NOSPATIAL = 3, ///< send 1 frame for each field but skips spatial interlacing check +}; + +enum YADIFParity { + YADIF_PARITY_TFF = 0, ///< top field first + YADIF_PARITY_BFF = 1, ///< bottom field first + YADIF_PARITY_AUTO = -1, ///< auto detection +}; + +enum YADIFDeint { + YADIF_DEINT_ALL = 0, ///< deinterlace all frames + YADIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced +}; + typedef struct YADIFContext { - /** - * 0: send 1 frame for each frame - * 1: send 1 frame for each field - * 2: like 0 but skips spatial interlacing check - * 3: like 1 but skips spatial interlacing check - */ - int mode; - - /** - * 0: top field first - * 1: bottom field first - * -1: auto-detection - */ - int parity; + const AVClass *class; - int frame_pending; + enum YADIFMode mode; + enum YADIFParity parity; + enum YADIFDeint deint; - /** - * 0: deinterlace all frames - * 1: only deinterlace frames marked as interlaced - */ - int auto_enable; + int frame_pending; AVFilterBufferRef *cur; AVFilterBufferRef *next; @@ -56,6 +59,8 @@ typedef struct YADIFContext { const AVPixFmtDescriptor *csp; int eof; + uint8_t *temp_line; + int temp_line_size; } YADIFContext; void ff_yadif_init_x86(YADIFContext *yadif); |