summaryrefslogtreecommitdiffstats
path: root/libavfilter
diff options
context:
space:
mode:
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/Makefile173
-rw-r--r--libavfilter/aeval.c466
-rw-r--r--libavfilter/af_adelay.c282
-rw-r--r--libavfilter/af_aecho.c359
-rw-r--r--libavfilter/af_afade.c300
-rw-r--r--libavfilter/af_aformat.c29
-rw-r--r--libavfilter/af_amerge.c351
-rw-r--r--libavfilter/af_amix.c65
-rw-r--r--libavfilter/af_anull.c27
-rw-r--r--libavfilter/af_apad.c161
-rw-r--r--libavfilter/af_aphaser.c294
-rw-r--r--libavfilter/af_aresample.c351
-rw-r--r--libavfilter/af_asetnsamples.c196
-rw-r--r--libavfilter/af_asetrate.c119
-rw-r--r--libavfilter/af_ashowinfo.c55
-rw-r--r--libavfilter/af_astats.c274
-rw-r--r--libavfilter/af_astreamsync.c240
-rw-r--r--libavfilter/af_asyncts.c39
-rw-r--r--libavfilter/af_atempo.c1202
-rw-r--r--libavfilter/af_biquads.c620
-rw-r--r--libavfilter/af_bs2b.c16
-rw-r--r--libavfilter/af_channelmap.c38
-rw-r--r--libavfilter/af_channelsplit.c40
-rw-r--r--libavfilter/af_compand.c147
-rw-r--r--libavfilter/af_earwax.c172
-rw-r--r--libavfilter/af_flanger.c241
-rw-r--r--libavfilter/af_join.c61
-rw-r--r--libavfilter/af_ladspa.c705
-rw-r--r--libavfilter/af_pan.c431
-rw-r--r--libavfilter/af_replaygain.c613
-rw-r--r--libavfilter/af_resample.c27
-rw-r--r--libavfilter/af_silencedetect.c212
-rw-r--r--libavfilter/af_silenceremove.c479
-rw-r--r--libavfilter/af_volume.c208
-rw-r--r--libavfilter/af_volume.h36
-rw-r--r--libavfilter/af_volumedetect.c159
-rw-r--r--libavfilter/all_channel_layouts.inc68
-rw-r--r--libavfilter/allfilters.c143
-rw-r--r--libavfilter/asink_anullsink.c10
-rw-r--r--libavfilter/asrc_abuffer.h91
-rw-r--r--libavfilter/asrc_anullsrc.c117
-rw-r--r--libavfilter/asrc_flite.c283
-rw-r--r--libavfilter/asrc_sine.c223
-rw-r--r--libavfilter/audio.c58
-rw-r--r--libavfilter/audio.h50
-rw-r--r--libavfilter/avcodec.c137
-rw-r--r--libavfilter/avcodec.h69
-rw-r--r--libavfilter/avf_avectorscope.c274
-rw-r--r--libavfilter/avf_concat.c426
-rw-r--r--libavfilter/avf_showcqt.c807
-rw-r--r--libavfilter/avf_showspectrum.c532
-rw-r--r--libavfilter/avf_showwaves.c333
-rw-r--r--libavfilter/avfilter.c635
-rw-r--r--libavfilter/avfilter.h462
-rw-r--r--libavfilter/avfiltergraph.c620
-rw-r--r--libavfilter/avfiltergraph.h9
-rw-r--r--libavfilter/avfilterres.rc55
-rw-r--r--libavfilter/bbox.c75
-rw-r--r--libavfilter/bbox.h44
-rw-r--r--libavfilter/buffer.c106
-rw-r--r--libavfilter/bufferqueue.h121
-rw-r--r--libavfilter/buffersink.c426
-rw-r--r--libavfilter/buffersink.h102
-rw-r--r--libavfilter/buffersrc.c206
-rw-r--r--libavfilter/buffersrc.h83
-rw-r--r--libavfilter/deshake.h107
-rw-r--r--libavfilter/deshake_opencl.c200
-rw-r--r--libavfilter/deshake_opencl.h45
-rw-r--r--libavfilter/deshake_opencl_kernel.h225
-rw-r--r--libavfilter/drawutils.c488
-rw-r--r--libavfilter/drawutils.h120
-rw-r--r--libavfilter/dualinput.c83
-rw-r--r--libavfilter/dualinput.h46
-rw-r--r--libavfilter/f_ebur128.c933
-rw-r--r--libavfilter/f_interleave.c259
-rw-r--r--libavfilter/f_perms.c178
-rw-r--r--libavfilter/f_select.c507
-rw-r--r--libavfilter/f_sendcmd.c576
-rw-r--r--libavfilter/f_zmq.c275
-rw-r--r--libavfilter/fifo.c12
-rw-r--r--libavfilter/filtfmts.c77
-rw-r--r--libavfilter/formats.c340
-rw-r--r--libavfilter/formats.h65
-rw-r--r--libavfilter/framesync.c329
-rw-r--r--libavfilter/framesync.h296
-rw-r--r--libavfilter/generate_wave_table.c84
-rw-r--r--libavfilter/generate_wave_table.h33
-rw-r--r--libavfilter/gradfun.h16
-rw-r--r--libavfilter/graphdump.c165
-rw-r--r--libavfilter/graphparser.c133
-rw-r--r--libavfilter/interlace.h9
-rw-r--r--libavfilter/internal.h163
-rw-r--r--libavfilter/lavfutils.c104
-rw-r--r--libavfilter/lavfutils.h43
-rw-r--r--libavfilter/libmpcodecs/av_helpers.h27
-rw-r--r--libavfilter/libmpcodecs/cpudetect.h60
-rw-r--r--libavfilter/libmpcodecs/img_format.c244
-rw-r--r--libavfilter/libmpcodecs/img_format.h309
-rw-r--r--libavfilter/libmpcodecs/libvo/fastmemcpy.h99
-rw-r--r--libavfilter/libmpcodecs/libvo/video_out.h300
-rw-r--r--libavfilter/libmpcodecs/mp_image.c257
-rw-r--r--libavfilter/libmpcodecs/mp_image.h159
-rw-r--r--libavfilter/libmpcodecs/mp_msg.h166
-rw-r--r--libavfilter/libmpcodecs/mpc_info.h43
-rw-r--r--libavfilter/libmpcodecs/vf.h169
-rw-r--r--libavfilter/libmpcodecs/vf_eq.c240
-rw-r--r--libavfilter/libmpcodecs/vf_eq2.c519
-rw-r--r--libavfilter/libmpcodecs/vf_fspp.c2124
-rw-r--r--libavfilter/libmpcodecs/vf_ilpack.c458
-rw-r--r--libavfilter/libmpcodecs/vf_pp7.c491
-rw-r--r--libavfilter/libmpcodecs/vf_softpulldown.c163
-rw-r--r--libavfilter/libmpcodecs/vf_uspp.c394
-rw-r--r--libavfilter/libmpcodecs/vfcap.h56
-rw-r--r--libavfilter/log2_tab.c1
-rw-r--r--libavfilter/lswsutils.c50
-rw-r--r--libavfilter/lswsutils.h38
-rw-r--r--libavfilter/opencl_allkernels.c41
-rw-r--r--libavfilter/opencl_allkernels.h29
-rw-r--r--libavfilter/pthread.c13
-rw-r--r--libavfilter/setpts.c160
-rw-r--r--libavfilter/settb.c73
-rw-r--r--libavfilter/split.c88
-rw-r--r--libavfilter/src_movie.c609
-rw-r--r--libavfilter/thread.h8
-rw-r--r--libavfilter/transform.c201
-rw-r--r--libavfilter/transform.h127
-rw-r--r--libavfilter/trim.c123
-rw-r--r--libavfilter/unsharp.h81
-rw-r--r--libavfilter/unsharp_opencl.c389
-rw-r--r--libavfilter/unsharp_opencl.h34
-rw-r--r--libavfilter/unsharp_opencl_kernel.h286
-rw-r--r--libavfilter/version.h17
-rw-r--r--libavfilter/vf_alphamerge.c207
-rw-r--r--libavfilter/vf_aspect.c184
-rw-r--r--libavfilter/vf_bbox.c132
-rw-r--r--libavfilter/vf_blackdetect.c209
-rw-r--r--libavfilter/vf_blackframe.c72
-rw-r--r--libavfilter/vf_blend.c465
-rw-r--r--libavfilter/vf_boxblur.c99
-rw-r--r--libavfilter/vf_codecview.c244
-rw-r--r--libavfilter/vf_colorbalance.c213
-rw-r--r--libavfilter/vf_colorchannelmixer.c360
-rw-r--r--libavfilter/vf_colormatrix.c411
-rw-r--r--libavfilter/vf_copy.c22
-rw-r--r--libavfilter/vf_crop.c160
-rw-r--r--libavfilter/vf_cropdetect.c76
-rw-r--r--libavfilter/vf_curves.c570
-rw-r--r--libavfilter/vf_dctdnoiz.c776
-rw-r--r--libavfilter/vf_decimate.c403
-rw-r--r--libavfilter/vf_dejudder.c187
-rw-r--r--libavfilter/vf_delogo.c125
-rw-r--r--libavfilter/vf_deshake.c575
-rw-r--r--libavfilter/vf_drawbox.c333
-rw-r--r--libavfilter/vf_drawtext.c1372
-rw-r--r--libavfilter/vf_edgedetect.c394
-rw-r--r--libavfilter/vf_elbg.c212
-rw-r--r--libavfilter/vf_extractplanes.c335
-rw-r--r--libavfilter/vf_fade.c303
-rw-r--r--libavfilter/vf_field.c111
-rw-r--r--libavfilter/vf_fieldmatch.c984
-rw-r--r--libavfilter/vf_fieldorder.c97
-rw-r--r--libavfilter/vf_format.c70
-rw-r--r--libavfilter/vf_fps.c85
-rw-r--r--libavfilter/vf_framepack.c8
-rw-r--r--libavfilter/vf_framestep.c101
-rw-r--r--libavfilter/vf_frei0r.c175
-rw-r--r--libavfilter/vf_geq.c280
-rw-r--r--libavfilter/vf_gradfun.c54
-rw-r--r--libavfilter/vf_hflip.c141
-rw-r--r--libavfilter/vf_histeq.c281
-rw-r--r--libavfilter/vf_histogram.c376
-rw-r--r--libavfilter/vf_hqdn3d.c74
-rw-r--r--libavfilter/vf_hqdn3d.h12
-rw-r--r--libavfilter/vf_hqx.c562
-rw-r--r--libavfilter/vf_hue.c453
-rw-r--r--libavfilter/vf_idet.c405
-rw-r--r--libavfilter/vf_idet.h76
-rw-r--r--libavfilter/vf_il.c212
-rw-r--r--libavfilter/vf_interlace.c58
-rw-r--r--libavfilter/vf_kerndeint.c318
-rw-r--r--libavfilter/vf_lenscorrection.c229
-rw-r--r--libavfilter/vf_libopencv.c64
-rw-r--r--libavfilter/vf_lut.c237
-rw-r--r--libavfilter/vf_lut3d.c815
-rw-r--r--libavfilter/vf_mcdeint.c315
-rw-r--r--libavfilter/vf_mergeplanes.c313
-rw-r--r--libavfilter/vf_mp.c792
-rw-r--r--libavfilter/vf_mpdecimate.c249
-rw-r--r--libavfilter/vf_noise.c351
-rw-r--r--libavfilter/vf_noise.h64
-rw-r--r--libavfilter/vf_null.c23
-rw-r--r--libavfilter/vf_overlay.c674
-rw-r--r--libavfilter/vf_owdenoise.c342
-rw-r--r--libavfilter/vf_pad.c209
-rw-r--r--libavfilter/vf_perspective.c483
-rw-r--r--libavfilter/vf_phase.c329
-rw-r--r--libavfilter/vf_pixdesctest.c38
-rw-r--r--libavfilter/vf_pp.c193
-rw-r--r--libavfilter/vf_psnr.c386
-rw-r--r--libavfilter/vf_pullup.c780
-rw-r--r--libavfilter/vf_pullup.h71
-rw-r--r--libavfilter/vf_removelogo.c585
-rw-r--r--libavfilter/vf_rotate.c567
-rw-r--r--libavfilter/vf_sab.c339
-rw-r--r--libavfilter/vf_scale.c401
-rw-r--r--libavfilter/vf_select.c350
-rw-r--r--libavfilter/vf_separatefields.c146
-rw-r--r--libavfilter/vf_setfield.c94
-rw-r--r--libavfilter/vf_showinfo.c71
-rw-r--r--libavfilter/vf_shuffleplanes.c10
-rw-r--r--libavfilter/vf_signalstats.c481
-rw-r--r--libavfilter/vf_smartblur.c304
-rw-r--r--libavfilter/vf_spp.c469
-rw-r--r--libavfilter/vf_spp.h59
-rw-r--r--libavfilter/vf_stereo3d.c664
-rw-r--r--libavfilter/vf_subtitles.c445
-rw-r--r--libavfilter/vf_super2xsai.c352
-rw-r--r--libavfilter/vf_swapuv.c110
-rw-r--r--libavfilter/vf_telecine.c285
-rw-r--r--libavfilter/vf_thumbnail.c239
-rw-r--r--libavfilter/vf_tile.c245
-rw-r--r--libavfilter/vf_tinterlace.c400
-rw-r--r--libavfilter/vf_transpose.c219
-rw-r--r--libavfilter/vf_unsharp.c225
-rw-r--r--libavfilter/vf_vflip.c24
-rw-r--r--libavfilter/vf_vidstabdetect.c219
-rw-r--r--libavfilter/vf_vidstabtransform.c320
-rw-r--r--libavfilter/vf_vignette.c346
-rw-r--r--libavfilter/vf_w3fdif.c394
-rw-r--r--libavfilter/vf_xbr.c759
-rw-r--r--libavfilter/vf_yadif.c253
-rw-r--r--libavfilter/vf_zoompan.c309
-rw-r--r--libavfilter/video.c17
-rw-r--r--libavfilter/video.h10
-rw-r--r--libavfilter/vidstabutils.c85
-rw-r--r--libavfilter/vidstabutils.h47
-rw-r--r--libavfilter/vsink_nullsink.c8
-rw-r--r--libavfilter/vsrc_cellauto.c337
-rw-r--r--libavfilter/vsrc_color.c202
-rw-r--r--libavfilter/vsrc_life.c450
-rw-r--r--libavfilter/vsrc_mandelbrot.c430
-rw-r--r--libavfilter/vsrc_movie.c286
-rw-r--r--libavfilter/vsrc_mptestsrc.c361
-rw-r--r--libavfilter/vsrc_nullsrc.c136
-rw-r--r--libavfilter/vsrc_testsrc.c766
-rw-r--r--libavfilter/x86/Makefile8
-rw-r--r--libavfilter/x86/af_volume.asm10
-rw-r--r--libavfilter/x86/af_volume_init.c8
-rw-r--r--libavfilter/x86/vf_gradfun.asm8
-rw-r--r--libavfilter/x86/vf_gradfun_init.c61
-rw-r--r--libavfilter/x86/vf_hqdn3d.asm8
-rw-r--r--libavfilter/x86/vf_hqdn3d_init.c10
-rw-r--r--libavfilter/x86/vf_idet.asm170
-rw-r--r--libavfilter/x86/vf_idet_init.c87
-rw-r--r--libavfilter/x86/vf_interlace.asm8
-rw-r--r--libavfilter/x86/vf_interlace_init.c8
-rw-r--r--libavfilter/x86/vf_noise.c144
-rw-r--r--libavfilter/x86/vf_pullup.asm178
-rw-r--r--libavfilter/x86/vf_pullup_init.c41
-rw-r--r--libavfilter/x86/vf_spp.c233
-rw-r--r--libavfilter/x86/vf_yadif.asm53
-rw-r--r--libavfilter/x86/vf_yadif_init.c68
-rw-r--r--libavfilter/x86/yadif-10.asm255
-rw-r--r--libavfilter/x86/yadif-16.asm317
-rw-r--r--libavfilter/yadif.h50
265 files changed, 60500 insertions, 4683 deletions
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 7b94f22..2c56e38 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -1,6 +1,10 @@
+include $(SUBDIR)../config.mak
+
NAME = avfilter
-HEADERS = avfilter.h \
+HEADERS = asrc_abuffer.h \
+ avcodec.h \
+ avfilter.h \
avfiltergraph.h \
buffersink.h \
buffersrc.h \
@@ -16,83 +20,238 @@ OBJS = allfilters.o \
drawutils.o \
fifo.o \
formats.o \
+ graphdump.o \
graphparser.o \
+ opencl_allkernels.o \
+ transform.o \
video.o \
+
+OBJS-$(CONFIG_AVCODEC) += avcodec.o
+
+OBJS-$(CONFIG_ADELAY_FILTER) += af_adelay.o
+OBJS-$(CONFIG_AECHO_FILTER) += af_aecho.o
+OBJS-$(CONFIG_AEVAL_FILTER) += aeval.o
+OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
+OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o
+OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
+OBJS-$(CONFIG_APAD_FILTER) += af_apad.o
+OBJS-$(CONFIG_APERMS_FILTER) += f_perms.o
+OBJS-$(CONFIG_APHASER_FILTER) += af_aphaser.o generate_wave_table.o
+OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
+OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o
+OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o
+OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o
OBJS-$(CONFIG_ASETPTS_FILTER) += setpts.o
+OBJS-$(CONFIG_ASETRATE_FILTER) += af_asetrate.o
OBJS-$(CONFIG_ASETTB_FILTER) += settb.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
+OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
+OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
+OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
+OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
+OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
+OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o
OBJS-$(CONFIG_BS2B_FILTER) += af_bs2b.o
OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o
OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o
OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o
+OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
+OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o
+OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o
+OBJS-$(CONFIG_FLANGER_FILTER) += af_flanger.o generate_wave_table.o
+OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_JOIN_FILTER) += af_join.o
+OBJS-$(CONFIG_LADSPA_FILTER) += af_ladspa.o
+OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o
+OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
+OBJS-$(CONFIG_REPLAYGAIN_FILTER) += af_replaygain.o
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
+OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o
+OBJS-$(CONFIG_SILENCEREMOVE_FILTER) += af_silenceremove.o
+OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o
+OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o
+OBJS-$(CONFIG_AEVALSRC_FILTER) += aeval.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
+OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o
+OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
+OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o
+OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_extractplanes.o
+OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
+OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
+OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
+OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o
+OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o
+OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
+OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
+OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o
OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o
OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o
+OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o
+OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o
+OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o
+OBJS-$(CONFIG_DEJUDDER_FILTER) += vf_dejudder.o
OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o
+OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o
+OBJS-$(CONFIG_DRAWGRID_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o
+OBJS-$(CONFIG_ELBG_FILTER) += vf_elbg.o
+OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o
+OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
+OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o
+OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
+OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o
OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FRAMEPACK_FILTER) += vf_framepack.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
+OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
+OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
+OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
+OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
+OBJS-$(CONFIG_HQX_FILTER) += vf_hqx.o
+OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
+OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
+OBJS-$(CONFIG_IL_FILTER) += vf_il.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
+OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
+OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
+OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o
+OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
+OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
+OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
+OBJS-$(CONFIG_MP_FILTER) += vf_mp.o
+OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o
+OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
-OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o
+OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o
+OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o
+OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
+OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
+OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
+OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
+OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
+OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o
+OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o
+OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
+OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
+OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
+OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o
-OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o
+OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
+OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
+OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o
OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETTB_FILTER) += settb.o
OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
OBJS-$(CONFIG_SHUFFLEPLANES_FILTER) += vf_shuffleplanes.o
+OBJS-$(CONFIG_SIGNALSTATS_FILTER) += vf_signalstats.o
+OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
+OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o
+OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
+OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o
+OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o
+OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o
+OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o
+OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o
+OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o
+OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
OBJS-$(CONFIG_TRIM_FILTER) += trim.o
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o
OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o
+OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o
+OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o
+OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o
+OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
+OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o
+OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
+OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o
-OBJS-$(CONFIG_COLOR_FILTER) += vsrc_color.o
+OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o
+OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
-OBJS-$(CONFIG_MOVIE_FILTER) += vsrc_movie.o
-OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_nullsrc.o
+OBJS-$(CONFIG_HALDCLUTSRC_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o
+OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o
+OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o
+OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o
+OBJS-$(CONFIG_SMPTEHDBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/mp_image.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/img_format.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq2.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_eq.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fspp.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ilpack.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pp7.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_softpulldown.o
+OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_uspp.o
+
+# multimedia filters
+OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o
+OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o
+OBJS-$(CONFIG_SHOWCQT_FILTER) += avf_showcqt.o
+OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o
+OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o
+
+# multimedia sources
+OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
+OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o
+
+# Windows resource file
+SLIBOBJS-$(HAVE_GNU_WINDRES) += avfilterres.o
+
+SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h
+SKIPHEADERS-$(CONFIG_OPENCL) += opencl_internal.h deshake_opencl_kernel.h unsharp_opencl_kernel.h
+
OBJS-$(HAVE_THREADS) += pthread.o
+OBJS-$(CONFIG_SHARED) += log2_tab.o
TOOLS = graph2dot
-TESTPROGS = filtfmts
+TESTPROGS = drawutils filtfmts formats
+
+TOOLS-$(CONFIG_LIBZMQ) += zmqsend
+
+clean::
+ $(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%)
diff --git a/libavfilter/aeval.c b/libavfilter/aeval.c
new file mode 100644
index 0000000..45629a9
--- /dev/null
+++ b/libavfilter/aeval.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * eval audio source
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+static const char * const var_names[] = {
+ "ch", ///< the value of the current channel
+ "n", ///< number of frame
+ "nb_in_channels",
+ "nb_out_channels",
+ "t", ///< timestamp expressed in seconds
+ "s", ///< sample rate
+ NULL
+};
+
+enum var_name {
+ VAR_CH,
+ VAR_N,
+ VAR_NB_IN_CHANNELS,
+ VAR_NB_OUT_CHANNELS,
+ VAR_T,
+ VAR_S,
+ VAR_VARS_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ char *sample_rate_str;
+ int sample_rate;
+ int64_t chlayout;
+ char *chlayout_str;
+ int nb_channels; ///< number of output channels
+ int nb_in_channels; ///< number of input channels
+ int same_chlayout; ///< set output as input channel layout
+ int64_t pts;
+ AVExpr **expr;
+ char *exprs;
+ int nb_samples; ///< number of samples per requested frame
+ int64_t duration;
+ uint64_t n;
+ double var_values[VAR_VARS_NB];
+ double *channel_values;
+ int64_t out_channel_layout;
+} EvalContext;
+
+static double val(void *priv, double ch)
+{
+ EvalContext *eval = priv;
+ return eval->channel_values[FFMIN((int)ch, eval->nb_in_channels-1)];
+}
+
+static double (* const aeval_func1[])(void *, double) = { val, NULL };
+static const char * const aeval_func1_names[] = { "val", NULL };
+
+#define OFFSET(x) offsetof(EvalContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aevalsrc_options[]= {
+ { "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aevalsrc);
+
+static int parse_channel_expressions(AVFilterContext *ctx,
+ int expected_nb_channels)
+{
+ EvalContext *eval = ctx->priv;
+ char *args1 = av_strdup(eval->exprs);
+ char *expr, *last_expr = NULL, *buf;
+ double (* const *func1)(void *, double) = NULL;
+ const char * const *func1_names = NULL;
+ int i, ret = 0;
+
+ if (!args1)
+ return AVERROR(ENOMEM);
+
+ if (!eval->exprs) {
+ av_log(ctx, AV_LOG_ERROR, "Channels expressions list is empty\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!strcmp(ctx->filter->name, "aeval")) {
+ func1 = aeval_func1;
+ func1_names = aeval_func1_names;
+ }
+
+#define ADD_EXPRESSION(expr_) do { \
+ if (!av_dynarray2_add((void **)&eval->expr, &eval->nb_channels, \
+ sizeof(*eval->expr), NULL)) { \
+ ret = AVERROR(ENOMEM); \
+ goto end; \
+ } \
+ eval->expr[eval->nb_channels-1] = NULL; \
+ ret = av_expr_parse(&eval->expr[eval->nb_channels - 1], expr_, \
+ var_names, func1_names, func1, \
+ NULL, NULL, 0, ctx); \
+ if (ret < 0) \
+ goto end; \
+ } while (0)
+
+ /* reset expressions */
+ for (i = 0; i < eval->nb_channels; i++) {
+ av_expr_free(eval->expr[i]);
+ eval->expr[i] = NULL;
+ }
+ av_freep(&eval->expr);
+ eval->nb_channels = 0;
+
+ buf = args1;
+ while (expr = av_strtok(buf, "|", &buf)) {
+ ADD_EXPRESSION(expr);
+ last_expr = expr;
+ }
+
+ if (expected_nb_channels > eval->nb_channels)
+ for (i = eval->nb_channels; i < expected_nb_channels; i++)
+ ADD_EXPRESSION(last_expr);
+
+ if (expected_nb_channels > 0 && eval->nb_channels != expected_nb_channels) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Mismatch between the specified number of channel expressions '%d' "
+ "and the number of expected output channels '%d' for the specified channel layout\n",
+ eval->nb_channels, expected_nb_channels);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+end:
+ av_free(args1);
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ EvalContext *eval = ctx->priv;
+ int ret = 0;
+
+ if (eval->chlayout_str) {
+ if (!strcmp(eval->chlayout_str, "same") && !strcmp(ctx->filter->name, "aeval")) {
+ eval->same_chlayout = 1;
+ } else {
+ ret = ff_parse_channel_layout(&eval->chlayout, NULL, eval->chlayout_str, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = parse_channel_expressions(ctx, av_get_channel_layout_nb_channels(eval->chlayout));
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ /* guess channel layout from nb expressions/channels */
+ if ((ret = parse_channel_expressions(ctx, -1)) < 0)
+ return ret;
+
+ eval->chlayout = av_get_default_channel_layout(eval->nb_channels);
+ if (!eval->chlayout && eval->nb_channels <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n",
+ eval->nb_channels);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (eval->sample_rate_str)
+ if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx)))
+ return ret;
+ eval->n = 0;
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ EvalContext *eval = ctx->priv;
+ int i;
+
+ for (i = 0; i < eval->nb_channels; i++) {
+ av_expr_free(eval->expr[i]);
+ eval->expr[i] = NULL;
+ }
+ av_freep(&eval->expr);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ EvalContext *eval = outlink->src->priv;
+ char buf[128];
+
+ outlink->time_base = (AVRational){1, eval->sample_rate};
+ outlink->sample_rate = eval->sample_rate;
+
+ eval->var_values[VAR_S] = eval->sample_rate;
+ eval->var_values[VAR_NB_IN_CHANNELS] = NAN;
+ eval->var_values[VAR_NB_OUT_CHANNELS] = outlink->channels;
+
+ av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout);
+
+ av_log(outlink->src, AV_LOG_VERBOSE,
+ "sample_rate:%d chlayout:%s duration:%"PRId64"\n",
+ eval->sample_rate, buf, eval->duration);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ EvalContext *eval = ctx->priv;
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE };
+ int64_t chlayouts[] = { eval->chlayout ? eval->chlayout : FF_COUNT2LAYOUT(eval->nb_channels) , -1 };
+ int sample_rates[] = { eval->sample_rate, -1 };
+
+ ff_set_common_formats (ctx, ff_make_format_list(sample_fmts));
+ ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
+ ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ EvalContext *eval = outlink->src->priv;
+ AVFrame *samplesref;
+ int i, j;
+ int64_t t = av_rescale(eval->n, AV_TIME_BASE, eval->sample_rate);
+
+ if (eval->duration >= 0 && t >= eval->duration)
+ return AVERROR_EOF;
+
+ samplesref = ff_get_audio_buffer(outlink, eval->nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
+ /* evaluate expression for each single sample and for each channel */
+ for (i = 0; i < eval->nb_samples; i++, eval->n++) {
+ eval->var_values[VAR_N] = eval->n;
+ eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
+
+ for (j = 0; j < eval->nb_channels; j++) {
+ *((double *) samplesref->extended_data[j] + i) =
+ av_expr_eval(eval->expr[j], eval->var_values, NULL);
+ }
+ }
+
+ samplesref->pts = eval->pts;
+ samplesref->sample_rate = eval->sample_rate;
+ eval->pts += eval->nb_samples;
+
+ return ff_filter_frame(outlink, samplesref);
+}
+
+#if CONFIG_AEVALSRC_FILTER
+static const AVFilterPad aevalsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_aevalsrc = {
+ .name = "aevalsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(EvalContext),
+ .inputs = NULL,
+ .outputs = aevalsrc_outputs,
+ .priv_class = &aevalsrc_class,
+};
+
+#endif /* CONFIG_AEVALSRC_FILTER */
+
+#define OFFSET(x) offsetof(EvalContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aeval_options[]= {
+ { "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
+ { "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aeval);
+
+static int aeval_query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ EvalContext *eval = ctx->priv;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE
+ };
+
+ // inlink supports any channel layout
+ layouts = ff_all_channel_counts();
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+
+ if (eval->same_chlayout) {
+ layouts = ff_all_channel_counts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+ } else {
+ // outlink supports only requested output channel layout
+ layouts = NULL;
+ ff_add_channel_layout(&layouts,
+ eval->out_channel_layout ? eval->out_channel_layout :
+ FF_COUNT2LAYOUT(eval->nb_channels));
+ ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+ }
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static int aeval_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ EvalContext *eval = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret;
+
+ if (eval->same_chlayout) {
+ eval->chlayout = inlink->channel_layout;
+
+ if ((ret = parse_channel_expressions(ctx, inlink->channels)) < 0)
+ return ret;
+ }
+
+ eval->n = 0;
+ eval->nb_in_channels = eval->var_values[VAR_NB_IN_CHANNELS] = inlink->channels;
+ eval->var_values[VAR_NB_OUT_CHANNELS] = outlink->channels;
+ eval->var_values[VAR_S] = inlink->sample_rate;
+ eval->var_values[VAR_T] = NAN;
+
+ eval->channel_values = av_realloc_f(eval->channel_values,
+ inlink->channels, sizeof(*eval->channel_values));
+ if (!eval->channel_values)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ EvalContext *eval = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int nb_samples = in->nb_samples;
+ AVFrame *out;
+ double t0;
+ int i, j;
+
+ /* do volume scaling in-place if input buffer is writable */
+ out = ff_get_audio_buffer(outlink, nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, in);
+
+ t0 = TS2T(in->pts, inlink->time_base);
+
+ /* evaluate expression for each single sample and for each channel */
+ for (i = 0; i < nb_samples; i++, eval->n++) {
+ eval->var_values[VAR_N] = eval->n;
+ eval->var_values[VAR_T] = t0 + i * (double)1/inlink->sample_rate;
+
+ for (j = 0; j < inlink->channels; j++)
+ eval->channel_values[j] = *((double *) in->extended_data[j] + i);
+
+ for (j = 0; j < outlink->channels; j++) {
+ eval->var_values[VAR_CH] = j;
+ *((double *) out->extended_data[j] + i) =
+ av_expr_eval(eval->expr[j], eval->var_values, eval);
+ }
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+#if CONFIG_AEVAL_FILTER
+
+static const AVFilterPad aeval_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aeval_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = aeval_config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aeval = {
+ .name = "aeval",
+ .description = NULL_IF_CONFIG_SMALL("Filter audio signal according to a specified expression."),
+ .query_formats = aeval_query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(EvalContext),
+ .inputs = aeval_inputs,
+ .outputs = aeval_outputs,
+ .priv_class = &aeval_class,
+};
+
+#endif /* CONFIG_AEVAL_FILTER */
diff --git a/libavfilter/af_adelay.c b/libavfilter/af_adelay.c
new file mode 100644
index 0000000..ef60f43
--- /dev/null
+++ b/libavfilter/af_adelay.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct ChanDelay {
+ int delay;
+ unsigned delay_index;
+ unsigned index;
+ uint8_t *samples;
+} ChanDelay;
+
+typedef struct AudioDelayContext {
+ const AVClass *class;
+ char *delays;
+ ChanDelay *chandelay;
+ int nb_delays;
+ int block_align;
+ unsigned max_delay;
+ int64_t next_pts;
+
+ void (*delay_channel)(ChanDelay *d, int nb_samples,
+ const uint8_t *src, uint8_t *dst);
+} AudioDelayContext;
+
+#define OFFSET(x) offsetof(AudioDelayContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption adelay_options[] = {
+ { "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(adelay);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+#define DELAY(name, type, fill) \
+static void delay_channel_## name ##p(ChanDelay *d, int nb_samples, \
+ const uint8_t *ssrc, uint8_t *ddst) \
+{ \
+ const type *src = (type *)ssrc; \
+ type *dst = (type *)ddst; \
+ type *samples = (type *)d->samples; \
+ \
+ while (nb_samples) { \
+ if (d->delay_index < d->delay) { \
+ const int len = FFMIN(nb_samples, d->delay - d->delay_index); \
+ \
+ memcpy(&samples[d->delay_index], src, len * sizeof(type)); \
+ memset(dst, fill, len * sizeof(type)); \
+ d->delay_index += len; \
+ src += len; \
+ dst += len; \
+ nb_samples -= len; \
+ } else { \
+ *dst = samples[d->index]; \
+ samples[d->index] = *src; \
+ nb_samples--; \
+ d->index++; \
+ src++, dst++; \
+ d->index = d->index >= d->delay ? 0 : d->index; \
+ } \
+ } \
+}
+
+DELAY(u8, uint8_t, 0x80)
+DELAY(s16, int16_t, 0)
+DELAY(s32, int32_t, 0)
+DELAY(flt, float, 0)
+DELAY(dbl, double, 0)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioDelayContext *s = ctx->priv;
+ char *p, *arg, *saveptr = NULL;
+ int i;
+
+ s->chandelay = av_calloc(inlink->channels, sizeof(*s->chandelay));
+ if (!s->chandelay)
+ return AVERROR(ENOMEM);
+ s->nb_delays = inlink->channels;
+ s->block_align = av_get_bytes_per_sample(inlink->format);
+
+ p = s->delays;
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+ float delay;
+
+ if (!(arg = av_strtok(p, "|", &saveptr)))
+ break;
+
+ p = NULL;
+ sscanf(arg, "%f", &delay);
+
+ d->delay = delay * inlink->sample_rate / 1000.0;
+ if (d->delay < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Delay must be non negative number.\n");
+ return AVERROR(EINVAL);
+ }
+ }
+
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+
+ if (!d->delay)
+ continue;
+
+ d->samples = av_malloc_array(d->delay, s->block_align);
+ if (!d->samples)
+ return AVERROR(ENOMEM);
+
+ s->max_delay = FFMAX(s->max_delay, d->delay);
+ }
+
+ if (!s->max_delay) {
+ av_log(ctx, AV_LOG_ERROR, "At least one delay >0 must be specified.\n");
+ return AVERROR(EINVAL);
+ }
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_U8P : s->delay_channel = delay_channel_u8p ; break;
+ case AV_SAMPLE_FMT_S16P: s->delay_channel = delay_channel_s16p; break;
+ case AV_SAMPLE_FMT_S32P: s->delay_channel = delay_channel_s32p; break;
+ case AV_SAMPLE_FMT_FLTP: s->delay_channel = delay_channel_fltp; break;
+ case AV_SAMPLE_FMT_DBLP: s->delay_channel = delay_channel_dblp; break;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioDelayContext *s = ctx->priv;
+ AVFrame *out_frame;
+ int i;
+
+ if (ctx->is_disabled || !s->delays)
+ return ff_filter_frame(ctx->outputs[0], frame);
+
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_frame, frame);
+
+ for (i = 0; i < s->nb_delays; i++) {
+ ChanDelay *d = &s->chandelay[i];
+ const uint8_t *src = frame->extended_data[i];
+ uint8_t *dst = out_frame->extended_data[i];
+
+ if (!d->delay)
+ memcpy(dst, src, frame->nb_samples * s->block_align);
+ else
+ s->delay_channel(d, frame->nb_samples, src, dst);
+ }
+
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+ av_frame_free(&frame);
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioDelayContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->max_delay) {
+ int nb_samples = FFMIN(s->max_delay, 2048);
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ s->max_delay -= nb_samples;
+
+ av_samples_set_silence(frame->extended_data, 0,
+ frame->nb_samples,
+ outlink->channels,
+ frame->format);
+
+ frame->pts = s->next_pts;
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ ret = filter_frame(ctx->inputs[0], frame);
+ }
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioDelayContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_delays; i++)
+ av_freep(&s->chandelay[i].samples);
+ av_freep(&s->chandelay);
+}
+
+static const AVFilterPad adelay_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad adelay_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_adelay = {
+ .name = "adelay",
+ .description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioDelayContext),
+ .priv_class = &adelay_class,
+ .uninit = uninit,
+ .inputs = adelay_inputs,
+ .outputs = adelay_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/af_aecho.c b/libavfilter/af_aecho.c
new file mode 100644
index 0000000..c26fdd4
--- /dev/null
+++ b/libavfilter/af_aecho.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct AudioEchoContext {
+ const AVClass *class;
+ float in_gain, out_gain;
+ char *delays, *decays;
+ float *delay, *decay;
+ int nb_echoes;
+ int delay_index;
+ uint8_t **delayptrs;
+ int max_samples, fade_out;
+ int *samples;
+ int64_t next_pts;
+
+ void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
+ uint8_t * const *src, uint8_t **dst,
+ int nb_samples, int channels);
+} AudioEchoContext;
+
+#define OFFSET(x) offsetof(AudioEchoContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aecho_options[] = {
+ { "in_gain", "set signal input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.6}, 0, 1, A },
+ { "out_gain", "set signal output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, A },
+ { "delays", "set list of signal delays", OFFSET(delays), AV_OPT_TYPE_STRING, {.str="1000"}, 0, 0, A },
+ { "decays", "set list of signal decays", OFFSET(decays), AV_OPT_TYPE_STRING, {.str="0.5"}, 0, 0, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aecho);
+
+static void count_items(char *item_str, int *nb_items)
+{
+ char *p;
+
+ *nb_items = 1;
+ for (p = item_str; *p; p++) {
+ if (*p == '|')
+ (*nb_items)++;
+ }
+
+}
+
+static void fill_items(char *item_str, int *nb_items, float *items)
+{
+ char *p, *saveptr = NULL;
+ int i, new_nb_items = 0;
+
+ p = item_str;
+ for (i = 0; i < *nb_items; i++) {
+ char *tstr = av_strtok(p, "|", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%f", &items[i]) == 1;
+ }
+
+ *nb_items = new_nb_items;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioEchoContext *s = ctx->priv;
+
+ av_freep(&s->delay);
+ av_freep(&s->decay);
+ av_freep(&s->samples);
+
+ if (s->delayptrs)
+ av_freep(&s->delayptrs[0]);
+ av_freep(&s->delayptrs);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioEchoContext *s = ctx->priv;
+ int nb_delays, nb_decays, i;
+
+ if (!s->delays || !s->decays) {
+ av_log(ctx, AV_LOG_ERROR, "Missing delays and/or decays.\n");
+ return AVERROR(EINVAL);
+ }
+
+ count_items(s->delays, &nb_delays);
+ count_items(s->decays, &nb_decays);
+
+ s->delay = av_realloc_f(s->delay, nb_delays, sizeof(*s->delay));
+ s->decay = av_realloc_f(s->decay, nb_decays, sizeof(*s->decay));
+ if (!s->delay || !s->decay)
+ return AVERROR(ENOMEM);
+
+ fill_items(s->delays, &nb_delays, s->delay);
+ fill_items(s->decays, &nb_decays, s->decay);
+
+ if (nb_delays != nb_decays) {
+ av_log(ctx, AV_LOG_ERROR, "Number of delays %d differs from number of decays %d.\n", nb_delays, nb_decays);
+ return AVERROR(EINVAL);
+ }
+
+ s->nb_echoes = nb_delays;
+ if (!s->nb_echoes) {
+ av_log(ctx, AV_LOG_ERROR, "At least one decay & delay must be set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->samples = av_realloc_f(s->samples, nb_delays, sizeof(*s->samples));
+ if (!s->samples)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_delays; i++) {
+ if (s->delay[i] <= 0 || s->delay[i] > 90000) {
+ av_log(ctx, AV_LOG_ERROR, "delay[%d]: %f is out of allowed range: (0, 90000]\n", i, s->delay[i]);
+ return AVERROR(EINVAL);
+ }
+ if (s->decay[i] <= 0 || s->decay[i] > 1) {
+ av_log(ctx, AV_LOG_ERROR, "decay[%d]: %f is out of allowed range: (0, 1]\n", i, s->decay[i]);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ s->next_pts = AV_NOPTS_VALUE;
+
+ av_log(ctx, AV_LOG_DEBUG, "nb_echoes:%d\n", s->nb_echoes);
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
+
+#define ECHO(name, type, min, max) \
+static void echo_samples_## name ##p(AudioEchoContext *ctx, \
+ uint8_t **delayptrs, \
+ uint8_t * const *src, uint8_t **dst, \
+ int nb_samples, int channels) \
+{ \
+ const double out_gain = ctx->out_gain; \
+ const double in_gain = ctx->in_gain; \
+ const int nb_echoes = ctx->nb_echoes; \
+ const int max_samples = ctx->max_samples; \
+ int i, j, chan, av_uninit(index); \
+ \
+ av_assert1(channels > 0); /* would corrupt delay_index */ \
+ \
+ for (chan = 0; chan < channels; chan++) { \
+ const type *s = (type *)src[chan]; \
+ type *d = (type *)dst[chan]; \
+ type *dbuf = (type *)delayptrs[chan]; \
+ \
+ index = ctx->delay_index; \
+ for (i = 0; i < nb_samples; i++, s++, d++) { \
+ double out, in; \
+ \
+ in = *s; \
+ out = in * in_gain; \
+ for (j = 0; j < nb_echoes; j++) { \
+ int ix = index + max_samples - ctx->samples[j]; \
+ ix = MOD(ix, max_samples); \
+ out += dbuf[ix] * ctx->decay[j]; \
+ } \
+ out *= out_gain; \
+ \
+ *d = av_clipd(out, min, max); \
+ dbuf[index] = in; \
+ \
+ index = MOD(index + 1, max_samples); \
+ } \
+ } \
+ ctx->delay_index = index; \
+}
+
+ECHO(dbl, double, -1.0, 1.0 )
+ECHO(flt, float, -1.0, 1.0 )
+ECHO(s16, int16_t, INT16_MIN, INT16_MAX)
+ECHO(s32, int32_t, INT32_MIN, INT32_MAX)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioEchoContext *s = ctx->priv;
+ float volume = 1.0;
+ int i;
+
+ for (i = 0; i < s->nb_echoes; i++) {
+ s->samples[i] = s->delay[i] * outlink->sample_rate / 1000.0;
+ s->max_samples = FFMAX(s->max_samples, s->samples[i]);
+ volume += s->decay[i];
+ }
+
+ if (s->max_samples <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Nothing to echo - missing delay samples.\n");
+ return AVERROR(EINVAL);
+ }
+ s->fade_out = s->max_samples;
+
+ if (volume * s->in_gain * s->out_gain > 1.0)
+ av_log(ctx, AV_LOG_WARNING,
+ "out_gain %f can cause saturation of output\n", s->out_gain);
+
+ switch (outlink->format) {
+ case AV_SAMPLE_FMT_DBLP: s->echo_samples = echo_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLTP: s->echo_samples = echo_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16P: s->echo_samples = echo_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32P: s->echo_samples = echo_samples_s32p; break;
+ }
+
+
+ if (s->delayptrs)
+ av_freep(&s->delayptrs[0]);
+ av_freep(&s->delayptrs);
+
+ return av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
+ outlink->channels,
+ s->max_samples,
+ outlink->format, 0);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioEchoContext *s = ctx->priv;
+ AVFrame *out_frame;
+
+ if (av_frame_is_writable(frame)) {
+ out_frame = frame;
+ } else {
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_frame, frame);
+ }
+
+ s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data,
+ frame->nb_samples, inlink->channels);
+
+ s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+
+ if (frame != out_frame)
+ av_frame_free(&frame);
+
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AudioEchoContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
+ int nb_samples = FFMIN(s->fade_out, 2048);
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nb_samples);
+ if (!frame)
+ return AVERROR(ENOMEM);
+ s->fade_out -= nb_samples;
+
+ av_samples_set_silence(frame->extended_data, 0,
+ frame->nb_samples,
+ outlink->channels,
+ frame->format);
+
+ s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
+ frame->nb_samples, outlink->channels);
+
+ frame->pts = s->next_pts;
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ return ff_filter_frame(outlink, frame);
+ }
+
+ return ret;
+}
+
+static const AVFilterPad aecho_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aecho_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .config_props = config_output,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aecho = {
+ .name = "aecho",
+ .description = NULL_IF_CONFIG_SMALL("Add echoing to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioEchoContext),
+ .priv_class = &aecho_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = aecho_inputs,
+ .outputs = aecho_outputs,
+};
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
new file mode 100644
index 0000000..806f6f6
--- /dev/null
+++ b/libavfilter/af_afade.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * fade audio filter
+ */
+
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int type;
+ int curve;
+ int nb_samples;
+ int64_t start_sample;
+ int64_t duration;
+ int64_t start_time;
+
+ void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
+ int nb_samples, int channels, int direction,
+ int64_t start, int range, int curve);
+} AudioFadeContext;
+
+enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, PAR, QUA, CUB, SQU, CBR };
+
+#define OFFSET(x) offsetof(AudioFadeContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption afade_options[] = {
+ { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
+ { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
+ { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" },
+ { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" },
+ { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
+ { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
+ { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
+ { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX, FLAGS },
+ { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
+ { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, TRI, CBR, FLAGS, "curve" },
+ { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
+ { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
+ { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
+ { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
+ { "par", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
+ { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
+ { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
+ { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
+ { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(afade);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioFadeContext *s = ctx->priv;
+
+ if (INT64_MAX - s->nb_samples < s->start_sample)
+ return AVERROR(EINVAL);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static double fade_gain(int curve, int64_t index, int range)
+{
+ double gain;
+
+ gain = FFMAX(0.0, FFMIN(1.0, 1.0 * index / range));
+
+ switch (curve) {
+ case QSIN:
+ gain = sin(gain * M_PI / 2.0);
+ break;
+ case ESIN:
+ gain = 1.0 - cos(M_PI / 4.0 * (pow(2.0*gain - 1, 3) + 1));
+ break;
+ case HSIN:
+ gain = (1.0 - cos(gain * M_PI)) / 2.0;
+ break;
+ case LOG:
+ gain = pow(0.1, (1 - gain) * 5.0);
+ break;
+ case PAR:
+ gain = (1 - (1 - gain) * (1 - gain));
+ break;
+ case QUA:
+ gain *= gain;
+ break;
+ case CUB:
+ gain = gain * gain * gain;
+ break;
+ case SQU:
+ gain = sqrt(gain);
+ break;
+ case CBR:
+ gain = cbrt(gain);
+ break;
+ }
+
+ return gain;
+}
+
+#define FADE_PLANAR(name, type) \
+static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
+ int nb_samples, int channels, int dir, \
+ int64_t start, int range, int curve) \
+{ \
+ int i, c; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ double gain = fade_gain(curve, start + i * dir, range); \
+ for (c = 0; c < channels; c++) { \
+ type *d = (type *)dst[c]; \
+ const type *s = (type *)src[c]; \
+ \
+ d[i] = s[i] * gain; \
+ } \
+ } \
+}
+
+#define FADE(name, type) \
+static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
+ int nb_samples, int channels, int dir, \
+ int64_t start, int range, int curve) \
+{ \
+ type *d = (type *)dst[0]; \
+ const type *s = (type *)src[0]; \
+ int i, c, k = 0; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ double gain = fade_gain(curve, start + i * dir, range); \
+ for (c = 0; c < channels; c++, k++) \
+ d[k] = s[k] * gain; \
+ } \
+}
+
+FADE_PLANAR(dbl, double)
+FADE_PLANAR(flt, float)
+FADE_PLANAR(s16, int16_t)
+FADE_PLANAR(s32, int32_t)
+
+FADE(dbl, double)
+FADE(flt, float)
+FADE(s16, int16_t)
+FADE(s32, int32_t)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioFadeContext *s = ctx->priv;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
+ case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
+ case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
+ case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
+ }
+
+ if (s->duration)
+ s->nb_samples = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
+ if (s->start_time)
+ s->start_sample = av_rescale(s->start_time, inlink->sample_rate, AV_TIME_BASE);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AudioFadeContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int nb_samples = buf->nb_samples;
+ AVFrame *out_buf;
+ int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
+
+ if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
+ ( s->type && (cur_sample + s->nb_samples < s->start_sample)))
+ return ff_filter_frame(outlink, buf);
+
+ if (av_frame_is_writable(buf)) {
+ out_buf = buf;
+ } else {
+ out_buf = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out_buf)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_buf, buf);
+ }
+
+ if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
+ ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
+ av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
+ av_frame_get_channels(out_buf), out_buf->format);
+ } else {
+ int64_t start;
+
+ if (!s->type)
+ start = cur_sample - s->start_sample;
+ else
+ start = s->start_sample + s->nb_samples - cur_sample;
+
+ s->fade_samples(out_buf->extended_data, buf->extended_data,
+ nb_samples, av_frame_get_channels(buf),
+ s->type ? -1 : 1, start,
+ s->nb_samples, s->curve);
+ }
+
+ if (buf != out_buf)
+ av_frame_free(&buf);
+
+ return ff_filter_frame(outlink, out_buf);
+}
+
+static const AVFilterPad avfilter_af_afade_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_af_afade_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_afade = {
+ .name = "afade",
+ .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioFadeContext),
+ .init = init,
+ .inputs = avfilter_af_afade_inputs,
+ .outputs = avfilter_af_afade_outputs,
+ .priv_class = &afade_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/af_aformat.c b/libavfilter/af_aformat.c
index f074673..5fd0308 100644
--- a/libavfilter/af_aformat.c
+++ b/libavfilter/af_aformat.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Mina Nagy Zaki
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -47,19 +47,15 @@ typedef struct AFormatContext {
#define OFFSET(x) offsetof(AFormatContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A },
- { "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A },
- { "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A },
- { NULL },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption aformat_options[] = {
+ { "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
};
-static const AVClass aformat_class = {
- .class_name = "aformat filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(aformat);
#define PARSE_FORMATS(str, type, list, add_to_list, get_fmt, none, desc) \
do { \
@@ -118,7 +114,7 @@ static int query_formats(AVFilterContext *ctx)
ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
ff_all_samplerates());
ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :
- ff_all_channel_layouts());
+ ff_all_channel_counts());
return 0;
}
@@ -146,7 +142,6 @@ AVFilter ff_af_aformat = {
.query_formats = query_formats,
.priv_size = sizeof(AFormatContext),
.priv_class = &aformat_class,
-
.inputs = avfilter_af_aformat_inputs,
.outputs = avfilter_af_aformat_outputs,
};
diff --git a/libavfilter/af_amerge.c b/libavfilter/af_amerge.c
new file mode 100644
index 0000000..0a0a79f
--- /dev/null
+++ b/libavfilter/af_amerge.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio merging filter
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "bufferqueue.h"
+#include "internal.h"
+
+#define SWR_CH_MAX 32
+
+typedef struct {
+ const AVClass *class;
+ int nb_inputs;
+ int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
+ int bps;
+ struct amerge_input {
+ struct FFBufQueue queue;
+ int nb_ch; /**< number of channels for the input */
+ int nb_samples;
+ int pos;
+ } *in;
+} AMergeContext;
+
+#define OFFSET(x) offsetof(AMergeContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption amerge_options[] = {
+ { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
+ AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(amerge);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AMergeContext *am = ctx->priv;
+ int i;
+
+ for (i = 0; i < am->nb_inputs; i++) {
+ if (am->in)
+ ff_bufqueue_discard_all(&am->in[i].queue);
+ if (ctx->input_pads)
+ av_freep(&ctx->input_pads[i].name);
+ }
+ av_freep(&am->in);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AMergeContext *am = ctx->priv;
+ int64_t inlayout[SWR_CH_MAX], outlayout = 0;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ int i, overlap = 0, nb_ch = 0;
+
+ for (i = 0; i < am->nb_inputs; i++) {
+ if (!ctx->inputs[i]->in_channel_layouts ||
+ !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
+ av_log(ctx, AV_LOG_WARNING,
+ "No channel layout for input %d\n", i + 1);
+ return AVERROR(EAGAIN);
+ }
+ inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
+ if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
+ char buf[256];
+ av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
+ av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
+ }
+ am->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
+ if (outlayout & inlayout[i])
+ overlap++;
+ outlayout |= inlayout[i];
+ nb_ch += am->in[i].nb_ch;
+ }
+ if (nb_ch > SWR_CH_MAX) {
+ av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
+ return AVERROR(EINVAL);
+ }
+ if (overlap) {
+ av_log(ctx, AV_LOG_WARNING,
+ "Input channel layouts overlap: "
+ "output layout will be determined by the number of distinct input channels\n");
+ for (i = 0; i < nb_ch; i++)
+ am->route[i] = i;
+ outlayout = av_get_default_channel_layout(nb_ch);
+ if (!outlayout)
+ outlayout = ((int64_t)1 << nb_ch) - 1;
+ } else {
+ int *route[SWR_CH_MAX];
+ int c, out_ch_number = 0;
+
+ route[0] = am->route;
+ for (i = 1; i < am->nb_inputs; i++)
+ route[i] = route[i - 1] + am->in[i - 1].nb_ch;
+ for (c = 0; c < 64; c++)
+ for (i = 0; i < am->nb_inputs; i++)
+ if ((inlayout[i] >> c) & 1)
+ *(route[i]++) = out_ch_number++;
+ }
+ formats = ff_make_format_list(ff_packed_sample_fmts_array);
+ ff_set_common_formats(ctx, formats);
+ for (i = 0; i < am->nb_inputs; i++) {
+ layouts = NULL;
+ ff_add_channel_layout(&layouts, inlayout[i]);
+ ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
+ }
+ layouts = NULL;
+ ff_add_channel_layout(&layouts, outlayout);
+ ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
+ ff_set_common_samplerates(ctx, ff_all_samplerates());
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AMergeContext *am = ctx->priv;
+ AVBPrint bp;
+ int i;
+
+ for (i = 1; i < am->nb_inputs; i++) {
+ if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Inputs must have the same sample rate "
+ "%d for in%d vs %d\n",
+ ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ }
+ am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
+ outlink->sample_rate = ctx->inputs[0]->sample_rate;
+ outlink->time_base = ctx->inputs[0]->time_base;
+
+ av_bprint_init(&bp, 0, 1);
+ for (i = 0; i < am->nb_inputs; i++) {
+ av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
+ av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
+ }
+ av_bprintf(&bp, " -> out:");
+ av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
+ av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AMergeContext *am = ctx->priv;
+ int i, ret;
+
+ for (i = 0; i < am->nb_inputs; i++)
+ if (!am->in[i].nb_samples)
+ if ((ret = ff_request_frame(ctx->inputs[i])) < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Copy samples from several input streams to one output stream.
+ * @param nb_inputs number of inputs
+ * @param in inputs; used only for the nb_ch field;
+ * @param route routing values;
+ * input channel i goes to output channel route[i];
+ * i < in[0].nb_ch are the channels from the first output;
+ * i >= in[0].nb_ch are the channels from the second output
+ * @param ins pointer to the samples of each inputs, in packed format;
+ * will be left at the end of the copied samples
+ * @param outs pointer to the samples of the output, in packet format;
+ * must point to a buffer big enough;
+ * will be left at the end of the copied samples
+ * @param ns number of samples to copy
+ * @param bps bytes per sample
+ */
+static inline void copy_samples(int nb_inputs, struct amerge_input in[],
+ int *route, uint8_t *ins[],
+ uint8_t **outs, int ns, int bps)
+{
+ int *route_cur;
+ int i, c, nb_ch = 0;
+
+ for (i = 0; i < nb_inputs; i++)
+ nb_ch += in[i].nb_ch;
+ while (ns--) {
+ route_cur = route;
+ for (i = 0; i < nb_inputs; i++) {
+ for (c = 0; c < in[i].nb_ch; c++) {
+ memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
+ ins[i] += bps;
+ }
+ }
+ *outs += nb_ch * bps;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AMergeContext *am = ctx->priv;
+ AVFilterLink *const outlink = ctx->outputs[0];
+ int input_number;
+ int nb_samples, ns, i;
+ AVFrame *outbuf, *inbuf[SWR_CH_MAX];
+ uint8_t *ins[SWR_CH_MAX], *outs;
+
+ for (input_number = 0; input_number < am->nb_inputs; input_number++)
+ if (inlink == ctx->inputs[input_number])
+ break;
+ av_assert1(input_number < am->nb_inputs);
+ if (ff_bufqueue_is_full(&am->in[input_number].queue)) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+ ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples));
+ am->in[input_number].nb_samples += insamples->nb_samples;
+ av_frame_free(&insamples);
+ nb_samples = am->in[0].nb_samples;
+ for (i = 1; i < am->nb_inputs; i++)
+ nb_samples = FFMIN(nb_samples, am->in[i].nb_samples);
+ if (!nb_samples)
+ return 0;
+
+ outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
+ if (!outbuf)
+ return AVERROR(ENOMEM);
+ outs = outbuf->data[0];
+ for (i = 0; i < am->nb_inputs; i++) {
+ inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
+ ins[i] = inbuf[i]->data[0] +
+ am->in[i].pos * am->in[i].nb_ch * am->bps;
+ }
+ av_frame_copy_props(outbuf, inbuf[0]);
+ outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
+ inbuf[0]->pts +
+ av_rescale_q(am->in[0].pos,
+ av_make_q(1, ctx->inputs[0]->sample_rate),
+ ctx->outputs[0]->time_base);
+
+ outbuf->nb_samples = nb_samples;
+ outbuf->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(outbuf, outlink->channels);
+
+ while (nb_samples) {
+ ns = nb_samples;
+ for (i = 0; i < am->nb_inputs; i++)
+ ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos);
+ /* Unroll the most common sample formats: speed +~350% for the loop,
+ +~13% overall (including two common decoders) */
+ switch (am->bps) {
+ case 1:
+ copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 1);
+ break;
+ case 2:
+ copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 2);
+ break;
+ case 4:
+ copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 4);
+ break;
+ default:
+ copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, am->bps);
+ break;
+ }
+
+ nb_samples -= ns;
+ for (i = 0; i < am->nb_inputs; i++) {
+ am->in[i].nb_samples -= ns;
+ am->in[i].pos += ns;
+ if (am->in[i].pos == inbuf[i]->nb_samples) {
+ am->in[i].pos = 0;
+ av_frame_free(&inbuf[i]);
+ ff_bufqueue_get(&am->in[i].queue);
+ inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
+ ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
+ }
+ }
+ }
+ return ff_filter_frame(ctx->outputs[0], outbuf);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AMergeContext *am = ctx->priv;
+ int i;
+
+ am->in = av_calloc(am->nb_inputs, sizeof(*am->in));
+ if (!am->in)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < am->nb_inputs; i++) {
+ char *name = av_asprintf("in%d", i);
+ AVFilterPad pad = {
+ .name = name,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ };
+ if (!name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, i, &pad);
+ }
+ return 0;
+}
+
+static const AVFilterPad amerge_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_amerge = {
+ .name = "amerge",
+ .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
+ "a single multi-channel stream."),
+ .priv_size = sizeof(AMergeContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = amerge_outputs,
+ .priv_class = &amerge_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/af_amix.c b/libavfilter/af_amix.c
index bfba150..47cbb45 100644
--- a/libavfilter/af_amix.c
+++ b/libavfilter/af_amix.c
@@ -2,20 +2,20 @@
* Audio Mix Filter
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -110,7 +110,7 @@ static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
int samples = nb_samples;
while (samples > 0) {
FrameInfo *info = frame_list->list;
- av_assert0(info != NULL);
+ av_assert0(info);
if (info->nb_samples <= samples) {
samples -= info->nb_samples;
frame_list->list = info->next;
@@ -142,7 +142,7 @@ static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t p
frame_list->list = info;
frame_list->end = info;
} else {
- av_assert0(frame_list->end != NULL);
+ av_assert0(frame_list->end);
frame_list->end->next = info;
frame_list->end = info;
}
@@ -175,27 +175,22 @@ typedef struct MixContext {
#define OFFSET(x) offsetof(MixContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption amix_options[] = {
{ "inputs", "Number of inputs.",
- OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A },
+ OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F },
{ "duration", "How to determine the end-of-stream.",
- OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A, "duration" },
- { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A, "duration" },
- { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A, "duration" },
- { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A, "duration" },
+ OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
+ { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" },
+ { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" },
+ { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" },
{ "dropout_transition", "Transition time, in seconds, for volume "
"renormalization when an input stream ends.",
- OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A },
- { NULL },
-};
-
-static const AVClass amix_class = {
- .class_name = "amix filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(amix);
/**
* Update the scaling factors to apply to each input during mixing.
@@ -254,7 +249,7 @@ static int config_output(AVFilterLink *outlink)
memset(s->input_state, INPUT_ON, s->nb_inputs);
s->active_inputs = s->nb_inputs;
- s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale));
+ s->input_scale = av_mallocz_array(s->nb_inputs, sizeof(*s->input_scale));
if (!s->input_scale)
return AVERROR(ENOMEM);
s->scale_norm = s->active_inputs;
@@ -533,10 +528,17 @@ static av_cold void uninit(AVFilterContext *ctx)
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+
+ layouts = ff_all_channel_layouts();
+
+ if (!layouts)
+ return AVERROR(ENOMEM);
+
ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
ff_add_format(&formats, AV_SAMPLE_FMT_FLTP);
ff_set_common_formats(ctx, formats);
- ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
+ ff_set_common_channel_layouts(ctx, layouts);
ff_set_common_samplerates(ctx, ff_all_samplerates());
return 0;
}
@@ -552,17 +554,14 @@ static const AVFilterPad avfilter_af_amix_outputs[] = {
};
AVFilter ff_af_amix = {
- .name = "amix",
- .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
- .priv_size = sizeof(MixContext),
- .priv_class = &amix_class,
-
+ .name = "amix",
+ .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
+ .priv_size = sizeof(MixContext),
+ .priv_class = &amix_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_af_amix_outputs,
-
- .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+ .inputs = NULL,
+ .outputs = avfilter_af_amix_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/libavfilter/af_anull.c b/libavfilter/af_anull.c
index 6d7caf3..fff456e 100644
--- a/libavfilter/af_anull.c
+++ b/libavfilter/af_anull.c
@@ -1,18 +1,19 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,9 +29,8 @@
static const AVFilterPad avfilter_af_anull_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
@@ -44,12 +44,9 @@ static const AVFilterPad avfilter_af_anull_outputs[] = {
};
AVFilter ff_af_anull = {
- .name = "anull",
- .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
-
- .priv_size = 0,
-
- .inputs = avfilter_af_anull_inputs,
-
- .outputs = avfilter_af_anull_outputs,
+ .name = "anull",
+ .description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
+ .query_formats = ff_query_formats_all,
+ .inputs = avfilter_af_anull_inputs,
+ .outputs = avfilter_af_anull_outputs,
};
diff --git a/libavfilter/af_apad.c b/libavfilter/af_apad.c
new file mode 100644
index 0000000..eafc705
--- /dev/null
+++ b/libavfilter/af_apad.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2012 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio pad filter.
+ *
+ * Based on af_aresample.c
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int64_t next_pts;
+
+ int packet_size;
+ int64_t pad_len, pad_len_left;
+ int64_t whole_len, whole_len_left;
+} APadContext;
+
+#define OFFSET(x) offsetof(APadContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption apad_options[] = {
+ { "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A },
+ { "pad_len", "set number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, A },
+ { "whole_len", "set minimum target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, A },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(apad);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ APadContext *apad = ctx->priv;
+
+ apad->next_pts = AV_NOPTS_VALUE;
+ if (apad->whole_len >= 0 && apad->pad_len >= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n");
+ return AVERROR(EINVAL);
+ }
+ apad->pad_len_left = apad->pad_len;
+ apad->whole_len_left = apad->whole_len;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ APadContext *apad = ctx->priv;
+
+ if (apad->whole_len >= 0) {
+ apad->whole_len_left = FFMAX(apad->whole_len_left - frame->nb_samples, 0);
+ av_log(ctx, AV_LOG_DEBUG,
+ "n_out:%d whole_len_left:%"PRId64"\n", frame->nb_samples, apad->whole_len_left);
+ }
+
+ apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
+ return ff_filter_frame(ctx->outputs[0], frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ APadContext *apad = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && !ctx->is_disabled) {
+ int n_out = apad->packet_size;
+ AVFrame *outsamplesref;
+
+ if (apad->whole_len >= 0 && apad->pad_len < 0) {
+ apad->pad_len = apad->pad_len_left = apad->whole_len_left;
+ }
+ if (apad->pad_len >=0 || apad->whole_len >= 0) {
+ n_out = FFMIN(n_out, apad->pad_len_left);
+ apad->pad_len_left -= n_out;
+ av_log(ctx, AV_LOG_DEBUG,
+ "padding n_out:%d pad_len_left:%"PRId64"\n", n_out, apad->pad_len_left);
+ }
+
+ if (!n_out)
+ return AVERROR_EOF;
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+ if (!outsamplesref)
+ return AVERROR(ENOMEM);
+
+ av_assert0(outsamplesref->sample_rate == outlink->sample_rate);
+ av_assert0(outsamplesref->nb_samples == n_out);
+
+ av_samples_set_silence(outsamplesref->extended_data, 0,
+ n_out,
+ av_frame_get_channels(outsamplesref),
+ outsamplesref->format);
+
+ outsamplesref->pts = apad->next_pts;
+ if (apad->next_pts != AV_NOPTS_VALUE)
+ apad->next_pts += av_rescale_q(n_out, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ return ff_filter_frame(outlink, outsamplesref);
+ }
+ return ret;
+}
+
+static const AVFilterPad apad_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad apad_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_apad = {
+ .name = "apad",
+ .description = NULL_IF_CONFIG_SMALL("Pad audio with silence."),
+ .init = init,
+ .priv_size = sizeof(APadContext),
+ .inputs = apad_inputs,
+ .outputs = apad_outputs,
+ .priv_class = &apad_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/af_aphaser.c b/libavfilter/af_aphaser.c
new file mode 100644
index 0000000..9d8f696
--- /dev/null
+++ b/libavfilter/af_aphaser.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * phaser audio filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "generate_wave_table.h"
+
+typedef struct AudioPhaserContext {
+ const AVClass *class;
+ double in_gain, out_gain;
+ double delay;
+ double decay;
+ double speed;
+
+ enum WaveType type;
+
+ int delay_buffer_length;
+ double *delay_buffer;
+
+ int modulation_buffer_length;
+ int32_t *modulation_buffer;
+
+ int delay_pos, modulation_pos;
+
+ void (*phaser)(struct AudioPhaserContext *p,
+ uint8_t * const *src, uint8_t **dst,
+ int nb_samples, int channels);
+} AudioPhaserContext;
+
+#define OFFSET(x) offsetof(AudioPhaserContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption aphaser_options[] = {
+ { "in_gain", "set input gain", OFFSET(in_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, 1, FLAGS },
+ { "out_gain", "set output gain", OFFSET(out_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.74}, 0, 1e9, FLAGS },
+ { "delay", "set delay in milliseconds", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=3.}, 0, 5, FLAGS },
+ { "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, .99, FLAGS },
+ { "speed", "set modulation speed", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, .1, 2, FLAGS },
+ { "type", "set modulation type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=WAVE_TRI}, 0, WAVE_NB-1, FLAGS, "type" },
+ { "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
+ { "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
+ { "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
+ { "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(aphaser);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AudioPhaserContext *p = ctx->priv;
+
+ if (p->in_gain > (1 - p->decay * p->decay))
+ av_log(ctx, AV_LOG_WARNING, "in_gain may cause clipping\n");
+ if (p->in_gain / (1 - p->decay) > 1 / p->out_gain)
+ av_log(ctx, AV_LOG_WARNING, "out_gain may cause clipping\n");
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
+
+#define PHASER_PLANAR(name, type) \
+static void phaser_## name ##p(AudioPhaserContext *p, \
+ uint8_t * const *src, uint8_t **dst, \
+ int nb_samples, int channels) \
+{ \
+ int i, c, delay_pos, modulation_pos; \
+ \
+ av_assert0(channels > 0); \
+ for (c = 0; c < channels; c++) { \
+ type *s = (type *)src[c]; \
+ type *d = (type *)dst[c]; \
+ double *buffer = p->delay_buffer + \
+ c * p->delay_buffer_length; \
+ \
+ delay_pos = p->delay_pos; \
+ modulation_pos = p->modulation_pos; \
+ \
+ for (i = 0; i < nb_samples; i++, s++, d++) { \
+ double v = *s * p->in_gain + buffer[ \
+ MOD(delay_pos + p->modulation_buffer[ \
+ modulation_pos], \
+ p->delay_buffer_length)] * p->decay; \
+ \
+ modulation_pos = MOD(modulation_pos + 1, \
+ p->modulation_buffer_length); \
+ delay_pos = MOD(delay_pos + 1, p->delay_buffer_length); \
+ buffer[delay_pos] = v; \
+ \
+ *d = v * p->out_gain; \
+ } \
+ } \
+ \
+ p->delay_pos = delay_pos; \
+ p->modulation_pos = modulation_pos; \
+}
+
+#define PHASER(name, type) \
+static void phaser_## name (AudioPhaserContext *p, \
+ uint8_t * const *src, uint8_t **dst, \
+ int nb_samples, int channels) \
+{ \
+ int i, c, delay_pos, modulation_pos; \
+ type *s = (type *)src[0]; \
+ type *d = (type *)dst[0]; \
+ double *buffer = p->delay_buffer; \
+ \
+ delay_pos = p->delay_pos; \
+ modulation_pos = p->modulation_pos; \
+ \
+ for (i = 0; i < nb_samples; i++) { \
+ int pos = MOD(delay_pos + p->modulation_buffer[modulation_pos], \
+ p->delay_buffer_length) * channels; \
+ int npos; \
+ \
+ delay_pos = MOD(delay_pos + 1, p->delay_buffer_length); \
+ npos = delay_pos * channels; \
+ for (c = 0; c < channels; c++, s++, d++) { \
+ double v = *s * p->in_gain + buffer[pos + c] * p->decay; \
+ \
+ buffer[npos + c] = v; \
+ \
+ *d = v * p->out_gain; \
+ } \
+ \
+ modulation_pos = MOD(modulation_pos + 1, \
+ p->modulation_buffer_length); \
+ } \
+ \
+ p->delay_pos = delay_pos; \
+ p->modulation_pos = modulation_pos; \
+}
+
+PHASER_PLANAR(dbl, double)
+PHASER_PLANAR(flt, float)
+PHASER_PLANAR(s16, int16_t)
+PHASER_PLANAR(s32, int32_t)
+
+PHASER(dbl, double)
+PHASER(flt, float)
+PHASER(s16, int16_t)
+PHASER(s32, int32_t)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioPhaserContext *p = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ p->delay_buffer_length = p->delay * 0.001 * inlink->sample_rate + 0.5;
+ p->delay_buffer = av_calloc(p->delay_buffer_length, sizeof(*p->delay_buffer) * inlink->channels);
+ p->modulation_buffer_length = inlink->sample_rate / p->speed + 0.5;
+ p->modulation_buffer = av_malloc_array(p->modulation_buffer_length, sizeof(*p->modulation_buffer));
+
+ if (!p->modulation_buffer || !p->delay_buffer)
+ return AVERROR(ENOMEM);
+
+ ff_generate_wave_table(p->type, AV_SAMPLE_FMT_S32,
+ p->modulation_buffer, p->modulation_buffer_length,
+ 1., p->delay_buffer_length, M_PI / 2.0);
+
+ p->delay_pos = p->modulation_pos = 0;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBL: p->phaser = phaser_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: p->phaser = phaser_dblp; break;
+ case AV_SAMPLE_FMT_FLT: p->phaser = phaser_flt; break;
+ case AV_SAMPLE_FMT_FLTP: p->phaser = phaser_fltp; break;
+ case AV_SAMPLE_FMT_S16: p->phaser = phaser_s16; break;
+ case AV_SAMPLE_FMT_S16P: p->phaser = phaser_s16p; break;
+ case AV_SAMPLE_FMT_S32: p->phaser = phaser_s32; break;
+ case AV_SAMPLE_FMT_S32P: p->phaser = phaser_s32p; break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
+{
+ AudioPhaserContext *p = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outbuf;
+
+ if (av_frame_is_writable(inbuf)) {
+ outbuf = inbuf;
+ } else {
+ outbuf = ff_get_audio_buffer(inlink, inbuf->nb_samples);
+ if (!outbuf)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(outbuf, inbuf);
+ }
+
+ p->phaser(p, inbuf->extended_data, outbuf->extended_data,
+ outbuf->nb_samples, av_frame_get_channels(outbuf));
+
+ if (inbuf != outbuf)
+ av_frame_free(&inbuf);
+
+ return ff_filter_frame(outlink, outbuf);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioPhaserContext *p = ctx->priv;
+
+ av_freep(&p->delay_buffer);
+ av_freep(&p->modulation_buffer);
+}
+
+static const AVFilterPad aphaser_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aphaser_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aphaser = {
+ .name = "aphaser",
+ .description = NULL_IF_CONFIG_SMALL("Add a phasing effect to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioPhaserContext),
+ .init = init,
+ .uninit = uninit,
+ .inputs = aphaser_inputs,
+ .outputs = aphaser_outputs,
+ .priv_class = &aphaser_class,
+};
diff --git a/libavfilter/af_aresample.c b/libavfilter/af_aresample.c
new file mode 100644
index 0000000..57ac397
--- /dev/null
+++ b/libavfilter/af_aresample.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2011 Mina Nagy Zaki
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * resampling audio filter
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
+#include "libswresample/swresample.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int sample_rate_arg;
+ double ratio;
+ struct SwrContext *swr;
+ int64_t next_pts;
+ int req_fullfilled;
+ int more_data;
+} AResampleContext;
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
+{
+ AResampleContext *aresample = ctx->priv;
+ int ret = 0;
+
+ aresample->next_pts = AV_NOPTS_VALUE;
+ aresample->swr = swr_alloc();
+ if (!aresample->swr) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(aresample->swr, e->key, e->value, 0)) < 0)
+ goto end;
+ }
+ av_dict_free(opts);
+ }
+ if (aresample->sample_rate_arg > 0)
+ av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
+end:
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AResampleContext *aresample = ctx->priv;
+ swr_free(&aresample->swr);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AResampleContext *aresample = ctx->priv;
+ int out_rate = av_get_int(aresample->swr, "osr", NULL);
+ uint64_t out_layout = av_get_int(aresample->swr, "ocl", NULL);
+ enum AVSampleFormat out_format = av_get_int(aresample->swr, "osf", NULL);
+
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
+ AVFilterFormats *out_formats;
+ AVFilterFormats *in_samplerates = ff_all_samplerates();
+ AVFilterFormats *out_samplerates;
+ AVFilterChannelLayouts *in_layouts = ff_all_channel_counts();
+ AVFilterChannelLayouts *out_layouts;
+
+ ff_formats_ref (in_formats, &inlink->out_formats);
+ ff_formats_ref (in_samplerates, &inlink->out_samplerates);
+ ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
+
+ if(out_rate > 0) {
+ int ratelist[] = { out_rate, -1 };
+ out_samplerates = ff_make_format_list(ratelist);
+ } else {
+ out_samplerates = ff_all_samplerates();
+ }
+ if (!out_samplerates) {
+ av_log(ctx, AV_LOG_ERROR, "Cannot allocate output samplerates.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ ff_formats_ref(out_samplerates, &outlink->in_samplerates);
+
+ if(out_format != AV_SAMPLE_FMT_NONE) {
+ int formatlist[] = { out_format, -1 };
+ out_formats = ff_make_format_list(formatlist);
+ } else
+ out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
+ ff_formats_ref(out_formats, &outlink->in_formats);
+
+ if(out_layout) {
+ int64_t layout_list[] = { out_layout, -1 };
+ out_layouts = avfilter_make_format64_list(layout_list);
+ } else
+ out_layouts = ff_all_channel_counts();
+ ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
+
+ return 0;
+}
+
+
+static int config_output(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AResampleContext *aresample = ctx->priv;
+ int out_rate;
+ uint64_t out_layout;
+ enum AVSampleFormat out_format;
+ char inchl_buf[128], outchl_buf[128];
+
+ aresample->swr = swr_alloc_set_opts(aresample->swr,
+ outlink->channel_layout, outlink->format, outlink->sample_rate,
+ inlink->channel_layout, inlink->format, inlink->sample_rate,
+ 0, ctx);
+ if (!aresample->swr)
+ return AVERROR(ENOMEM);
+ if (!inlink->channel_layout)
+ av_opt_set_int(aresample->swr, "ich", inlink->channels, 0);
+ if (!outlink->channel_layout)
+ av_opt_set_int(aresample->swr, "och", outlink->channels, 0);
+
+ ret = swr_init(aresample->swr);
+ if (ret < 0)
+ return ret;
+
+ out_rate = av_get_int(aresample->swr, "osr", NULL);
+ out_layout = av_get_int(aresample->swr, "ocl", NULL);
+ out_format = av_get_int(aresample->swr, "osf", NULL);
+ outlink->time_base = (AVRational) {1, out_rate};
+
+ av_assert0(outlink->sample_rate == out_rate);
+ av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout);
+ av_assert0(outlink->format == out_format);
+
+ aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
+
+ av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout);
+ av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout);
+
+ av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n",
+ inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate,
+ outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
+{
+ AResampleContext *aresample = inlink->dst->priv;
+ const int n_in = insamplesref->nb_samples;
+ int64_t delay;
+ int n_out = n_in * aresample->ratio + 32;
+ AVFilterLink *const outlink = inlink->dst->outputs[0];
+ AVFrame *outsamplesref;
+ int ret;
+
+ delay = swr_get_delay(aresample->swr, outlink->sample_rate);
+ if (delay > 0)
+ n_out += FFMIN(delay, FFMAX(4096, n_out));
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+
+ if(!outsamplesref)
+ return AVERROR(ENOMEM);
+
+ av_frame_copy_props(outsamplesref, insamplesref);
+ outsamplesref->format = outlink->format;
+ av_frame_set_channels(outsamplesref, outlink->channels);
+ outsamplesref->channel_layout = outlink->channel_layout;
+ outsamplesref->sample_rate = outlink->sample_rate;
+
+ if(insamplesref->pts != AV_NOPTS_VALUE) {
+ int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
+ int64_t outpts= swr_next_pts(aresample->swr, inpts);
+ aresample->next_pts =
+ outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate);
+ } else {
+ outsamplesref->pts = AV_NOPTS_VALUE;
+ }
+ n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
+ (void *)insamplesref->extended_data, n_in);
+ if (n_out <= 0) {
+ av_frame_free(&outsamplesref);
+ av_frame_free(&insamplesref);
+ return 0;
+ }
+
+ aresample->more_data = outsamplesref->nb_samples == n_out; // Indicate that there is probably more data in our buffers
+
+ outsamplesref->nb_samples = n_out;
+
+ ret = ff_filter_frame(outlink, outsamplesref);
+ aresample->req_fullfilled= 1;
+ av_frame_free(&insamplesref);
+ return ret;
+}
+
+static int flush_frame(AVFilterLink *outlink, int final, AVFrame **outsamplesref_ret)
+{
+ AVFilterContext *ctx = outlink->src;
+ AResampleContext *aresample = ctx->priv;
+ AVFilterLink *const inlink = outlink->src->inputs[0];
+ AVFrame *outsamplesref;
+ int n_out = 4096;
+ int64_t pts;
+
+ outsamplesref = ff_get_audio_buffer(outlink, n_out);
+ *outsamplesref_ret = outsamplesref;
+ if (!outsamplesref)
+ return AVERROR(ENOMEM);
+
+ pts = swr_next_pts(aresample->swr, INT64_MIN);
+ pts = ROUNDED_DIV(pts, inlink->sample_rate);
+
+ n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, final ? NULL : (void*)outsamplesref->extended_data, 0);
+ if (n_out <= 0) {
+ av_frame_free(&outsamplesref);
+ return (n_out == 0) ? AVERROR_EOF : n_out;
+ }
+
+ outsamplesref->sample_rate = outlink->sample_rate;
+ outsamplesref->nb_samples = n_out;
+
+ outsamplesref->pts = pts;
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AResampleContext *aresample = ctx->priv;
+ int ret;
+
+ // First try to get data from the internal buffers
+ if (aresample->more_data) {
+ AVFrame *outsamplesref;
+
+ if (flush_frame(outlink, 0, &outsamplesref) >= 0) {
+ return ff_filter_frame(outlink, outsamplesref);
+ }
+ }
+ aresample->more_data = 0;
+
+ // Second request more data from the input
+ aresample->req_fullfilled = 0;
+ do{
+ ret = ff_request_frame(ctx->inputs[0]);
+ }while(!aresample->req_fullfilled && ret>=0);
+
+ // Third if we hit the end flush
+ if (ret == AVERROR_EOF) {
+ AVFrame *outsamplesref;
+
+ if ((ret = flush_frame(outlink, 1, &outsamplesref)) < 0)
+ return ret;
+
+ return ff_filter_frame(outlink, outsamplesref);
+ }
+ return ret;
+}
+
+static const AVClass *resample_child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : swr_get_class();
+}
+
+static void *resample_child_next(void *obj, void *prev)
+{
+ AResampleContext *s = obj;
+ return prev ? NULL : s->swr;
+}
+
+#define OFFSET(x) offsetof(AResampleContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption options[] = {
+ {"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ {NULL}
+};
+
+static const AVClass aresample_class = {
+ .class_name = "aresample",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .child_class_next = resample_child_class_next,
+ .child_next = resample_child_next,
+};
+
+static const AVFilterPad aresample_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aresample_outputs[] = {
+ {
+ .name = "default",
+ .config_props = config_output,
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aresample = {
+ .name = "aresample",
+ .description = NULL_IF_CONFIG_SMALL("Resample audio data."),
+ .init_dict = init_dict,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(AResampleContext),
+ .priv_class = &aresample_class,
+ .inputs = aresample_inputs,
+ .outputs = aresample_outputs,
+};
diff --git a/libavfilter/af_asetnsamples.c b/libavfilter/af_asetnsamples.c
new file mode 100644
index 0000000..e830643
--- /dev/null
+++ b/libavfilter/af_asetnsamples.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2012 Andrey Utkin
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Filter that changes number of samples on single output operation
+ */
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+#include "formats.h"
+
+typedef struct {
+ const AVClass *class;
+ int nb_out_samples; ///< how many samples to output
+ AVAudioFifo *fifo; ///< samples are queued here
+ int64_t next_out_pts;
+ int pad;
+} ASNSContext;
+
+#define OFFSET(x) offsetof(ASNSContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption asetnsamples_options[] = {
+ { "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(asetnsamples);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ASNSContext *asns = ctx->priv;
+
+ asns->next_out_pts = AV_NOPTS_VALUE;
+ av_log(ctx, AV_LOG_VERBOSE, "nb_out_samples:%d pad:%d\n", asns->nb_out_samples, asns->pad);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ASNSContext *asns = ctx->priv;
+ av_audio_fifo_free(asns->fifo);
+}
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ ASNSContext *asns = outlink->src->priv;
+
+ asns->fifo = av_audio_fifo_alloc(outlink->format, outlink->channels, asns->nb_out_samples);
+ if (!asns->fifo)
+ return AVERROR(ENOMEM);
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+ return 0;
+}
+
+static int push_samples(AVFilterLink *outlink)
+{
+ ASNSContext *asns = outlink->src->priv;
+ AVFrame *outsamples = NULL;
+ int ret, nb_out_samples, nb_pad_samples;
+
+ if (asns->pad) {
+ nb_out_samples = av_audio_fifo_size(asns->fifo) ? asns->nb_out_samples : 0;
+ nb_pad_samples = nb_out_samples - FFMIN(nb_out_samples, av_audio_fifo_size(asns->fifo));
+ } else {
+ nb_out_samples = FFMIN(asns->nb_out_samples, av_audio_fifo_size(asns->fifo));
+ nb_pad_samples = 0;
+ }
+
+ if (!nb_out_samples)
+ return 0;
+
+ outsamples = ff_get_audio_buffer(outlink, nb_out_samples);
+ if (!outsamples)
+ return AVERROR(ENOMEM);
+
+ av_audio_fifo_read(asns->fifo,
+ (void **)outsamples->extended_data, nb_out_samples);
+
+ if (nb_pad_samples)
+ av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples,
+ nb_pad_samples, outlink->channels,
+ outlink->format);
+ outsamples->nb_samples = nb_out_samples;
+ outsamples->channel_layout = outlink->channel_layout;
+ outsamples->sample_rate = outlink->sample_rate;
+ outsamples->pts = asns->next_out_pts;
+
+ if (asns->next_out_pts != AV_NOPTS_VALUE)
+ asns->next_out_pts += av_rescale_q(nb_out_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
+
+ ret = ff_filter_frame(outlink, outsamples);
+ if (ret < 0)
+ return ret;
+ return nb_out_samples;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ASNSContext *asns = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
+ int nb_samples = insamples->nb_samples;
+
+ if (av_audio_fifo_space(asns->fifo) < nb_samples) {
+ av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples);
+ ret = av_audio_fifo_realloc(asns->fifo, av_audio_fifo_size(asns->fifo) + nb_samples);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Stretching audio fifo failed, discarded %d samples\n", nb_samples);
+ return -1;
+ }
+ }
+ av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples);
+ if (asns->next_out_pts == AV_NOPTS_VALUE)
+ asns->next_out_pts = insamples->pts;
+ av_frame_free(&insamples);
+
+ while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples)
+ push_samples(outlink);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) {
+ ret = push_samples(outlink);
+ return ret < 0 ? ret : ret > 0 ? 0 : AVERROR_EOF;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad asetnsamples_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad asetnsamples_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ .config_props = config_props_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asetnsamples = {
+ .name = "asetnsamples",
+ .description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
+ .priv_size = sizeof(ASNSContext),
+ .priv_class = &asetnsamples_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = asetnsamples_inputs,
+ .outputs = asetnsamples_outputs,
+};
diff --git a/libavfilter/af_asetrate.c b/libavfilter/af_asetrate.c
new file mode 100644
index 0000000..0d06915
--- /dev/null
+++ b/libavfilter/af_asetrate.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int sample_rate;
+ int rescale_pts;
+} ASetRateContext;
+
+#define CONTEXT ASetRateContext
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
+ { name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
+ { .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
+
+#define OPT_INT(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
+
+static const AVOption asetrate_options[] = {
+ OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
+ OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(asetrate);
+
+static av_cold int query_formats(AVFilterContext *ctx)
+{
+ ASetRateContext *sr = ctx->priv;
+ int sample_rates[] = { sr->sample_rate, -1 };
+
+ ff_formats_ref(ff_make_format_list(sample_rates),
+ &ctx->outputs[0]->in_samplerates);
+ return 0;
+}
+
+static av_cold int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ASetRateContext *sr = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVRational intb = ctx->inputs[0]->time_base;
+ int inrate = inlink->sample_rate;
+
+ if (intb.num == 1 && intb.den == inrate) {
+ outlink->time_base.num = 1;
+ outlink->time_base.den = outlink->sample_rate;
+ } else {
+ outlink->time_base = intb;
+ sr->rescale_pts = 1;
+ if (av_q2d(intb) > 1.0 / FFMAX(inrate, outlink->sample_rate))
+ av_log(ctx, AV_LOG_WARNING, "Time base is inaccurate\n");
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ASetRateContext *sr = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ frame->sample_rate = outlink->sample_rate;
+ if (sr->rescale_pts)
+ frame->pts = av_rescale(frame->pts, inlink->sample_rate,
+ outlink->sample_rate);
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad asetrate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad asetrate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asetrate = {
+ .name = "asetrate",
+ .description = NULL_IF_CONFIG_SMALL("Change the sample rate without "
+ "altering the data."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(ASetRateContext),
+ .inputs = asetrate_inputs,
+ .outputs = asetrate_outputs,
+ .priv_class = &asetrate_class,
+};
diff --git a/libavfilter/af_ashowinfo.c b/libavfilter/af_ashowinfo.c
index 57120c1..ee95029 100644
--- a/libavfilter/af_ashowinfo.c
+++ b/libavfilter/af_ashowinfo.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,6 +34,7 @@
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "libavutil/replaygain.h"
+#include "libavutil/timestamp.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
@@ -45,24 +46,8 @@ typedef struct AShowInfoContext {
* Scratch space for individual plane checksums for planar audio
*/
uint32_t *plane_checksums;
-
- /**
- * Frame counter
- */
- uint64_t frame;
} AShowInfoContext;
-static int config_input(AVFilterLink *inlink)
-{
- AShowInfoContext *s = inlink->dst->priv;
- int channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
- s->plane_checksums = av_malloc(channels * sizeof(*s->plane_checksums));
- if (!s->plane_checksums)
- return AVERROR(ENOMEM);
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
AShowInfoContext *s = ctx->priv;
@@ -168,12 +153,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
- int channels = av_get_channel_layout_nb_channels(buf->channel_layout);
+ int channels = inlink->channels;
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->nb_samples * block_align;
int planes = planar ? channels : 1;
int i;
+ void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums));
+
+ if (!tmp_ptr)
+ return AVERROR(ENOMEM);
+ s->plane_checksums = tmp_ptr;
for (i = 0; i < planes; i++) {
uint8_t *data = buf->extended_data[i];
@@ -187,11 +177,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
- "n:%"PRIu64" pts:%"PRId64" pts_time:%f "
- "fmt:%s chlayout:%s rate:%d nb_samples:%d "
+ "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
+ "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
"checksum:%08"PRIX32" ",
- s->frame, buf->pts, buf->pts * av_q2d(inlink->time_base),
- av_get_sample_fmt_name(buf->format), chlayout_str,
+ inlink->frame_count,
+ av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
+ av_frame_get_pkt_pos(buf),
+ av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
buf->sample_rate, buf->nb_samples,
checksum);
@@ -214,19 +206,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_log(ctx, AV_LOG_INFO, "\n");
}
- s->frame++;
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
static const AVFilterPad inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
- { NULL },
+ { NULL }
};
static const AVFilterPad outputs[] = {
@@ -234,7 +223,7 @@ static const AVFilterPad outputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
- { NULL },
+ { NULL }
};
AVFilter ff_af_ashowinfo = {
diff --git a/libavfilter/af_astats.c b/libavfilter/af_astats.c
new file mode 100644
index 0000000..60ccd73
--- /dev/null
+++ b/libavfilter/af_astats.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2009 Rob Sykes <robs@users.sourceforge.net>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h>
+
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct ChannelStats {
+ double last;
+ double sigma_x, sigma_x2;
+ double avg_sigma_x2, min_sigma_x2, max_sigma_x2;
+ double min, max;
+ double min_run, max_run;
+ double min_runs, max_runs;
+ uint64_t min_count, max_count;
+ uint64_t nb_samples;
+} ChannelStats;
+
+typedef struct {
+ const AVClass *class;
+ ChannelStats *chstats;
+ int nb_channels;
+ uint64_t tc_samples;
+ double time_constant;
+ double mult;
+} AudioStatsContext;
+
+#define OFFSET(x) offsetof(AudioStatsContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption astats_options[] = {
+ { "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=.05}, .01, 10, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(astats);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioStatsContext *s = outlink->src->priv;
+ int c;
+
+ s->chstats = av_calloc(sizeof(*s->chstats), outlink->channels);
+ if (!s->chstats)
+ return AVERROR(ENOMEM);
+ s->nb_channels = outlink->channels;
+ s->mult = exp((-1 / s->time_constant / outlink->sample_rate));
+ s->tc_samples = 5 * s->time_constant * outlink->sample_rate + .5;
+
+ for (c = 0; c < s->nb_channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+
+ p->min = p->min_sigma_x2 = DBL_MAX;
+ p->max = p->max_sigma_x2 = DBL_MIN;
+ }
+
+ return 0;
+}
+
+static inline void update_stat(AudioStatsContext *s, ChannelStats *p, double d)
+{
+ if (d < p->min) {
+ p->min = d;
+ p->min_run = 1;
+ p->min_runs = 0;
+ p->min_count = 1;
+ } else if (d == p->min) {
+ p->min_count++;
+ p->min_run = d == p->last ? p->min_run + 1 : 1;
+ } else if (p->last == p->min) {
+ p->min_runs += p->min_run * p->min_run;
+ }
+
+ if (d > p->max) {
+ p->max = d;
+ p->max_run = 1;
+ p->max_runs = 0;
+ p->max_count = 1;
+ } else if (d == p->max) {
+ p->max_count++;
+ p->max_run = d == p->last ? p->max_run + 1 : 1;
+ } else if (p->last == p->max) {
+ p->max_runs += p->max_run * p->max_run;
+ }
+
+ p->sigma_x += d;
+ p->sigma_x2 += d * d;
+ p->avg_sigma_x2 = p->avg_sigma_x2 * s->mult + (1.0 - s->mult) * d * d;
+ p->last = d;
+
+ if (p->nb_samples >= s->tc_samples) {
+ p->max_sigma_x2 = FFMAX(p->max_sigma_x2, p->avg_sigma_x2);
+ p->min_sigma_x2 = FFMIN(p->min_sigma_x2, p->avg_sigma_x2);
+ }
+ p->nb_samples++;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AudioStatsContext *s = inlink->dst->priv;
+ const int channels = s->nb_channels;
+ const double *src;
+ int i, c;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBLP:
+ for (c = 0; c < channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+ src = (const double *)buf->extended_data[c];
+
+ for (i = 0; i < buf->nb_samples; i++, src++)
+ update_stat(s, p, *src);
+ }
+ break;
+ case AV_SAMPLE_FMT_DBL:
+ src = (const double *)buf->extended_data[0];
+
+ for (i = 0; i < buf->nb_samples; i++) {
+ for (c = 0; c < channels; c++, src++)
+ update_stat(s, &s->chstats[c], *src);
+ }
+ break;
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], buf);
+}
+
+#define LINEAR_TO_DB(x) (log10(x) * 20)
+
+static void print_stats(AVFilterContext *ctx)
+{
+ AudioStatsContext *s = ctx->priv;
+ uint64_t min_count = 0, max_count = 0, nb_samples = 0;
+ double min_runs = 0, max_runs = 0,
+ min = DBL_MAX, max = DBL_MIN,
+ max_sigma_x = 0,
+ sigma_x = 0,
+ sigma_x2 = 0,
+ min_sigma_x2 = DBL_MAX,
+ max_sigma_x2 = DBL_MIN;
+ int c;
+
+ for (c = 0; c < s->nb_channels; c++) {
+ ChannelStats *p = &s->chstats[c];
+
+ if (p->nb_samples < s->tc_samples)
+ p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
+
+ min = FFMIN(min, p->min);
+ max = FFMAX(max, p->max);
+ min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
+ max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
+ sigma_x += p->sigma_x;
+ sigma_x2 += p->sigma_x2;
+ min_count += p->min_count;
+ max_count += p->max_count;
+ min_runs += p->min_runs;
+ max_runs += p->max_runs;
+ nb_samples += p->nb_samples;
+ if (fabs(p->sigma_x) > fabs(max_sigma_x))
+ max_sigma_x = p->sigma_x;
+
+ av_log(ctx, AV_LOG_INFO, "Channel: %d\n", c + 1);
+ av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", p->sigma_x / p->nb_samples);
+ av_log(ctx, AV_LOG_INFO, "Min level: %f\n", p->min);
+ av_log(ctx, AV_LOG_INFO, "Max level: %f\n", p->max);
+ av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-p->min, p->max)));
+ av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
+ av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
+ if (p->min_sigma_x2 != 1)
+ av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n",LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
+ av_log(ctx, AV_LOG_INFO, "Crest factor: %f\n", p->sigma_x2 ? FFMAX(-p->min, p->max) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
+ av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
+ av_log(ctx, AV_LOG_INFO, "Peak count: %"PRId64"\n", p->min_count + p->max_count);
+ }
+
+ av_log(ctx, AV_LOG_INFO, "Overall\n");
+ av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", max_sigma_x / (nb_samples / s->nb_channels));
+ av_log(ctx, AV_LOG_INFO, "Min level: %f\n", min);
+ av_log(ctx, AV_LOG_INFO, "Max level: %f\n", max);
+ av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-min, max)));
+ av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
+ av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(max_sigma_x2)));
+ if (min_sigma_x2 != 1)
+ av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n", LINEAR_TO_DB(sqrt(min_sigma_x2)));
+ av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
+ av_log(ctx, AV_LOG_INFO, "Peak count: %f\n", (min_count + max_count) / (double)s->nb_channels);
+ av_log(ctx, AV_LOG_INFO, "Number of samples: %"PRId64"\n", nb_samples / s->nb_channels);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioStatsContext *s = ctx->priv;
+
+ print_stats(ctx);
+ av_freep(&s->chstats);
+}
+
+static const AVFilterPad astats_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad astats_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_astats = {
+ .name = "astats",
+ .description = NULL_IF_CONFIG_SMALL("Show time domain statistics about audio frames."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioStatsContext),
+ .priv_class = &astats_class,
+ .uninit = uninit,
+ .inputs = astats_inputs,
+ .outputs = astats_outputs,
+};
diff --git a/libavfilter/af_astreamsync.c b/libavfilter/af_astreamsync.c
new file mode 100644
index 0000000..becfe34
--- /dev/null
+++ b/libavfilter/af_astreamsync.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Stream (de)synchronization filter
+ */
+
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+#define QUEUE_SIZE 16
+
+static const char * const var_names[] = {
+ "b1", "b2",
+ "s1", "s2",
+ "t1", "t2",
+ NULL
+};
+
+enum var_name {
+ VAR_B1, VAR_B2,
+ VAR_S1, VAR_S2,
+ VAR_T1, VAR_T2,
+ VAR_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ AVExpr *expr;
+ char *expr_str;
+ double var_values[VAR_NB];
+ struct buf_queue {
+ AVFrame *buf[QUEUE_SIZE];
+ unsigned tail, nb;
+ /* buf[tail] is the oldest,
+ buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
+ } queue[2];
+ int req[2];
+ int next_out;
+ int eof; /* bitmask, one bit for each stream */
+} AStreamSyncContext;
+
+#define OFFSET(x) offsetof(AStreamSyncContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption astreamsync_options[] = {
+ { "expr", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
+ { "e", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(astreamsync);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AStreamSyncContext *as = ctx->priv;
+ int r, i;
+
+ r = av_expr_parse(&as->expr, as->expr_str, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (r < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", as->expr_str);
+ return r;
+ }
+ for (i = 0; i < 42; i++)
+ av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ int i;
+ AVFilterFormats *formats, *rates;
+ AVFilterChannelLayouts *layouts;
+
+ for (i = 0; i < 2; i++) {
+ formats = ctx->inputs[i]->in_formats;
+ ff_formats_ref(formats, &ctx->inputs[i]->out_formats);
+ ff_formats_ref(formats, &ctx->outputs[i]->in_formats);
+ rates = ff_all_samplerates();
+ ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates);
+ ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates);
+ layouts = ctx->inputs[i]->in_channel_layouts;
+ ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
+ ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts);
+ }
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ int id = outlink == ctx->outputs[1];
+
+ outlink->sample_rate = ctx->inputs[id]->sample_rate;
+ outlink->time_base = ctx->inputs[id]->time_base;
+ return 0;
+}
+
+static int send_out(AVFilterContext *ctx, int out_id)
+{
+ AStreamSyncContext *as = ctx->priv;
+ struct buf_queue *queue = &as->queue[out_id];
+ AVFrame *buf = queue->buf[queue->tail];
+ int ret;
+
+ queue->buf[queue->tail] = NULL;
+ as->var_values[VAR_B1 + out_id]++;
+ as->var_values[VAR_S1 + out_id] += buf->nb_samples;
+ if (buf->pts != AV_NOPTS_VALUE)
+ as->var_values[VAR_T1 + out_id] =
+ av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
+ as->var_values[VAR_T1 + out_id] += buf->nb_samples /
+ (double)ctx->inputs[out_id]->sample_rate;
+ ret = ff_filter_frame(ctx->outputs[out_id], buf);
+ queue->nb--;
+ queue->tail = (queue->tail + 1) % QUEUE_SIZE;
+ if (as->req[out_id])
+ as->req[out_id]--;
+ return ret;
+}
+
+static void send_next(AVFilterContext *ctx)
+{
+ AStreamSyncContext *as = ctx->priv;
+ int i;
+
+ while (1) {
+ if (!as->queue[as->next_out].nb)
+ break;
+ send_out(ctx, as->next_out);
+ if (!as->eof)
+ as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
+ }
+ for (i = 0; i < 2; i++)
+ if (as->queue[i].nb == QUEUE_SIZE)
+ send_out(ctx, i);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AStreamSyncContext *as = ctx->priv;
+ int id = outlink == ctx->outputs[1];
+
+ as->req[id]++;
+ while (as->req[id] && !(as->eof & (1 << id))) {
+ if (as->queue[as->next_out].nb) {
+ send_next(ctx);
+ } else {
+ as->eof |= 1 << as->next_out;
+ ff_request_frame(ctx->inputs[as->next_out]);
+ if (as->eof & (1 << as->next_out))
+ as->next_out = !as->next_out;
+ }
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AStreamSyncContext *as = ctx->priv;
+ int id = inlink == ctx->inputs[1];
+
+ as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
+ insamples;
+ as->eof &= ~(1 << id);
+ send_next(ctx);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AStreamSyncContext *as = ctx->priv;
+
+ av_expr_free(as->expr);
+ as->expr = NULL;
+}
+
+static const AVFilterPad astreamsync_inputs[] = {
+ {
+ .name = "in1",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "in2",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad astreamsync_outputs[] = {
+ {
+ .name = "out1",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },{
+ .name = "out2",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_astreamsync = {
+ .name = "astreamsync",
+ .description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
+ "in a configurable order."),
+ .priv_size = sizeof(AStreamSyncContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = astreamsync_inputs,
+ .outputs = astreamsync_outputs,
+ .priv_class = &astreamsync_class,
+};
diff --git a/libavfilter/af_asyncts.c b/libavfilter/af_asyncts.c
index e662c84..5f8e1f6 100644
--- a/libavfilter/af_asyncts.c
+++ b/libavfilter/af_asyncts.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -51,21 +51,17 @@ typedef struct ASyncContext {
#define OFFSET(x) offsetof(ASyncContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption asyncts_options[] = {
+ { "compensate", "Stretch/squeeze the data to make it match the timestamps", OFFSET(resample), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, A|F },
{ "min_delta", "Minimum difference between timestamps and audio data "
- "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A },
- { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A },
- { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A },
- { NULL },
+ "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F },
+ { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F },
+ { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F },
+ { NULL }
};
-static const AVClass async_class = {
- .class_name = "asyncts filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(asyncts);
static av_cold int init(AVFilterContext *ctx)
{
@@ -298,9 +294,9 @@ fail:
static const AVFilterPad avfilter_af_asyncts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame
},
{ NULL }
};
@@ -318,13 +314,10 @@ static const AVFilterPad avfilter_af_asyncts_outputs[] = {
AVFilter ff_af_asyncts = {
.name = "asyncts",
.description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"),
-
.init = init,
.uninit = uninit,
-
.priv_size = sizeof(ASyncContext),
- .priv_class = &async_class,
-
+ .priv_class = &asyncts_class,
.inputs = avfilter_af_asyncts_inputs,
.outputs = avfilter_af_asyncts_outputs,
};
diff --git a/libavfilter/af_atempo.c b/libavfilter/af_atempo.c
new file mode 100644
index 0000000..fcd0cb0
--- /dev/null
+++ b/libavfilter/af_atempo.c
@@ -0,0 +1,1202 @@
+/*
+ * Copyright (c) 2012 Pavel Koshevoy <pkoshevoy at gmail dot com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * tempo scaling audio filter -- an implementation of WSOLA algorithm
+ *
+ * Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h
+ * from Apprentice Video player by Pavel Koshevoy.
+ * https://sourceforge.net/projects/apprenticevideo/
+ *
+ * An explanation of SOLA algorithm is available at
+ * http://www.surina.net/article/time-and-pitch-scaling.html
+ *
+ * WSOLA is very similar to SOLA, only one major difference exists between
+ * these algorithms. SOLA shifts audio fragments along the output stream,
+ * where as WSOLA shifts audio fragments along the input stream.
+ *
+ * The advantage of WSOLA algorithm is that the overlap region size is
+ * always the same, therefore the blending function is constant and
+ * can be precomputed.
+ */
+
+#include <float.h>
+#include "libavcodec/avfft.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+
+/**
+ * A fragment of audio waveform
+ */
+typedef struct {
+ // index of the first sample of this fragment in the overall waveform;
+ // 0: input sample position
+ // 1: output sample position
+ int64_t position[2];
+
+ // original packed multi-channel samples:
+ uint8_t *data;
+
+ // number of samples in this fragment:
+ int nsamples;
+
+ // rDFT transform of the down-mixed mono fragment, used for
+ // fast waveform alignment via correlation in frequency domain:
+ FFTSample *xdat;
+} AudioFragment;
+
+/**
+ * Filter state machine states
+ */
+typedef enum {
+ YAE_LOAD_FRAGMENT,
+ YAE_ADJUST_POSITION,
+ YAE_RELOAD_FRAGMENT,
+ YAE_OUTPUT_OVERLAP_ADD,
+ YAE_FLUSH_OUTPUT,
+} FilterState;
+
+/**
+ * Filter state machine
+ */
+typedef struct {
+ const AVClass *class;
+
+ // ring-buffer of input samples, necessary because some times
+ // input fragment position may be adjusted backwards:
+ uint8_t *buffer;
+
+ // ring-buffer maximum capacity, expressed in sample rate time base:
+ int ring;
+
+ // ring-buffer house keeping:
+ int size;
+ int head;
+ int tail;
+
+ // 0: input sample position corresponding to the ring buffer tail
+ // 1: output sample position
+ int64_t position[2];
+
+ // sample format:
+ enum AVSampleFormat format;
+
+ // number of channels:
+ int channels;
+
+ // row of bytes to skip from one sample to next, across multple channels;
+ // stride = (number-of-channels * bits-per-sample-per-channel) / 8
+ int stride;
+
+ // fragment window size, power-of-two integer:
+ int window;
+
+ // Hann window coefficients, for feathering
+ // (blending) the overlapping fragment region:
+ float *hann;
+
+ // tempo scaling factor:
+ double tempo;
+
+ // a snapshot of previous fragment input and output position values
+ // captured when the tempo scale factor was set most recently:
+ int64_t origin[2];
+
+ // current/previous fragment ring-buffer:
+ AudioFragment frag[2];
+
+ // current fragment index:
+ uint64_t nfrag;
+
+ // current state:
+ FilterState state;
+
+ // for fast correlation calculation in frequency domain:
+ RDFTContext *real_to_complex;
+ RDFTContext *complex_to_real;
+ FFTSample *correlation;
+
+ // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
+ AVFrame *dst_buffer;
+ uint8_t *dst;
+ uint8_t *dst_end;
+ uint64_t nsamples_in;
+ uint64_t nsamples_out;
+} ATempoContext;
+
+#define OFFSET(x) offsetof(ATempoContext, x)
+
+static const AVOption atempo_options[] = {
+ { "tempo", "set tempo scale factor",
+ OFFSET(tempo), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0.5, 2.0,
+ AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(atempo);
+
+inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
+{
+ return &atempo->frag[atempo->nfrag % 2];
+}
+
+inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
+{
+ return &atempo->frag[(atempo->nfrag + 1) % 2];
+}
+
+/**
+ * Reset filter to initial state, do not deallocate existing local buffers.
+ */
+static void yae_clear(ATempoContext *atempo)
+{
+ atempo->size = 0;
+ atempo->head = 0;
+ atempo->tail = 0;
+
+ atempo->nfrag = 0;
+ atempo->state = YAE_LOAD_FRAGMENT;
+
+ atempo->position[0] = 0;
+ atempo->position[1] = 0;
+
+ atempo->origin[0] = 0;
+ atempo->origin[1] = 0;
+
+ atempo->frag[0].position[0] = 0;
+ atempo->frag[0].position[1] = 0;
+ atempo->frag[0].nsamples = 0;
+
+ atempo->frag[1].position[0] = 0;
+ atempo->frag[1].position[1] = 0;
+ atempo->frag[1].nsamples = 0;
+
+ // shift left position of 1st fragment by half a window
+ // so that no re-normalization would be required for
+ // the left half of the 1st fragment:
+ atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2);
+ atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2);
+
+ av_frame_free(&atempo->dst_buffer);
+ atempo->dst = NULL;
+ atempo->dst_end = NULL;
+
+ atempo->nsamples_in = 0;
+ atempo->nsamples_out = 0;
+}
+
+/**
+ * Reset filter to initial state and deallocate all buffers.
+ */
+static void yae_release_buffers(ATempoContext *atempo)
+{
+ yae_clear(atempo);
+
+ av_freep(&atempo->frag[0].data);
+ av_freep(&atempo->frag[1].data);
+ av_freep(&atempo->frag[0].xdat);
+ av_freep(&atempo->frag[1].xdat);
+
+ av_freep(&atempo->buffer);
+ av_freep(&atempo->hann);
+ av_freep(&atempo->correlation);
+
+ av_rdft_end(atempo->real_to_complex);
+ atempo->real_to_complex = NULL;
+
+ av_rdft_end(atempo->complex_to_real);
+ atempo->complex_to_real = NULL;
+}
+
+/* av_realloc is not aligned enough; fortunately, the data does not need to
+ * be preserved */
+#define RE_MALLOC_OR_FAIL(field, field_size) \
+ do { \
+ av_freep(&field); \
+ field = av_malloc(field_size); \
+ if (!field) { \
+ yae_release_buffers(atempo); \
+ return AVERROR(ENOMEM); \
+ } \
+ } while (0)
+
+/**
+ * Prepare filter for processing audio data of given format,
+ * sample rate and number of channels.
+ */
+static int yae_reset(ATempoContext *atempo,
+ enum AVSampleFormat format,
+ int sample_rate,
+ int channels)
+{
+ const int sample_size = av_get_bytes_per_sample(format);
+ uint32_t nlevels = 0;
+ uint32_t pot;
+ int i;
+
+ atempo->format = format;
+ atempo->channels = channels;
+ atempo->stride = sample_size * channels;
+
+ // pick a segment window size:
+ atempo->window = sample_rate / 24;
+
+ // adjust window size to be a power-of-two integer:
+ nlevels = av_log2(atempo->window);
+ pot = 1 << nlevels;
+ av_assert0(pot <= atempo->window);
+
+ if (pot < atempo->window) {
+ atempo->window = pot * 2;
+ nlevels++;
+ }
+
+ // initialize audio fragment buffers:
+ RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride);
+ RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride);
+ RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex));
+ RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex));
+
+ // initialize rDFT contexts:
+ av_rdft_end(atempo->real_to_complex);
+ atempo->real_to_complex = NULL;
+
+ av_rdft_end(atempo->complex_to_real);
+ atempo->complex_to_real = NULL;
+
+ atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C);
+ if (!atempo->real_to_complex) {
+ yae_release_buffers(atempo);
+ return AVERROR(ENOMEM);
+ }
+
+ atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R);
+ if (!atempo->complex_to_real) {
+ yae_release_buffers(atempo);
+ return AVERROR(ENOMEM);
+ }
+
+ RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex));
+
+ atempo->ring = atempo->window * 3;
+ RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride);
+
+ // initialize the Hann window function:
+ RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float));
+
+ for (i = 0; i < atempo->window; i++) {
+ double t = (double)i / (double)(atempo->window - 1);
+ double h = 0.5 * (1.0 - cos(2.0 * M_PI * t));
+ atempo->hann[i] = (float)h;
+ }
+
+ yae_clear(atempo);
+ return 0;
+}
+
+static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo)
+{
+ const AudioFragment *prev;
+ ATempoContext *atempo = ctx->priv;
+ char *tail = NULL;
+ double tempo = av_strtod(arg_tempo, &tail);
+
+ if (tail && *tail) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid tempo value '%s'\n", arg_tempo);
+ return AVERROR(EINVAL);
+ }
+
+ if (tempo < 0.5 || tempo > 2.0) {
+ av_log(ctx, AV_LOG_ERROR, "Tempo value %f exceeds [0.5, 2.0] range\n",
+ tempo);
+ return AVERROR(EINVAL);
+ }
+
+ prev = yae_prev_frag(atempo);
+ atempo->origin[0] = prev->position[0] + atempo->window / 2;
+ atempo->origin[1] = prev->position[1] + atempo->window / 2;
+ atempo->tempo = tempo;
+ return 0;
+}
+
+/**
+ * A helper macro for initializing complex data buffer with scalar data
+ * of a given type.
+ */
+#define yae_init_xdat(scalar_type, scalar_max) \
+ do { \
+ const uint8_t *src_end = src + \
+ frag->nsamples * atempo->channels * sizeof(scalar_type); \
+ \
+ FFTSample *xdat = frag->xdat; \
+ scalar_type tmp; \
+ \
+ if (atempo->channels == 1) { \
+ for (; src < src_end; xdat++) { \
+ tmp = *(const scalar_type *)src; \
+ src += sizeof(scalar_type); \
+ \
+ *xdat = (FFTSample)tmp; \
+ } \
+ } else { \
+ FFTSample s, max, ti, si; \
+ int i; \
+ \
+ for (; src < src_end; xdat++) { \
+ tmp = *(const scalar_type *)src; \
+ src += sizeof(scalar_type); \
+ \
+ max = (FFTSample)tmp; \
+ s = FFMIN((FFTSample)scalar_max, \
+ (FFTSample)fabsf(max)); \
+ \
+ for (i = 1; i < atempo->channels; i++) { \
+ tmp = *(const scalar_type *)src; \
+ src += sizeof(scalar_type); \
+ \
+ ti = (FFTSample)tmp; \
+ si = FFMIN((FFTSample)scalar_max, \
+ (FFTSample)fabsf(ti)); \
+ \
+ if (s < si) { \
+ s = si; \
+ max = ti; \
+ } \
+ } \
+ \
+ *xdat = max; \
+ } \
+ } \
+ } while (0)
+
+/**
+ * Initialize complex data buffer of a given audio fragment
+ * with down-mixed mono data of appropriate scalar type.
+ */
+static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
+{
+ // shortcuts:
+ const uint8_t *src = frag->data;
+
+ // init complex data buffer used for FFT and Correlation:
+ memset(frag->xdat, 0, sizeof(FFTComplex) * atempo->window);
+
+ if (atempo->format == AV_SAMPLE_FMT_U8) {
+ yae_init_xdat(uint8_t, 127);
+ } else if (atempo->format == AV_SAMPLE_FMT_S16) {
+ yae_init_xdat(int16_t, 32767);
+ } else if (atempo->format == AV_SAMPLE_FMT_S32) {
+ yae_init_xdat(int, 2147483647);
+ } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
+ yae_init_xdat(float, 1);
+ } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
+ yae_init_xdat(double, 1);
+ }
+}
+
+/**
+ * Populate the internal data buffer on as-needed basis.
+ *
+ * @return
+ * 0 if requested data was already available or was successfully loaded,
+ * AVERROR(EAGAIN) if more input data is required.
+ */
+static int yae_load_data(ATempoContext *atempo,
+ const uint8_t **src_ref,
+ const uint8_t *src_end,
+ int64_t stop_here)
+{
+ // shortcut:
+ const uint8_t *src = *src_ref;
+ const int read_size = stop_here - atempo->position[0];
+
+ if (stop_here <= atempo->position[0]) {
+ return 0;
+ }
+
+ // samples are not expected to be skipped:
+ av_assert0(read_size <= atempo->ring);
+
+ while (atempo->position[0] < stop_here && src < src_end) {
+ int src_samples = (src_end - src) / atempo->stride;
+
+ // load data piece-wise, in order to avoid complicating the logic:
+ int nsamples = FFMIN(read_size, src_samples);
+ int na;
+ int nb;
+
+ nsamples = FFMIN(nsamples, atempo->ring);
+ na = FFMIN(nsamples, atempo->ring - atempo->tail);
+ nb = FFMIN(nsamples - na, atempo->ring);
+
+ if (na) {
+ uint8_t *a = atempo->buffer + atempo->tail * atempo->stride;
+ memcpy(a, src, na * atempo->stride);
+
+ src += na * atempo->stride;
+ atempo->position[0] += na;
+
+ atempo->size = FFMIN(atempo->size + na, atempo->ring);
+ atempo->tail = (atempo->tail + na) % atempo->ring;
+ atempo->head =
+ atempo->size < atempo->ring ?
+ atempo->tail - atempo->size :
+ atempo->tail;
+ }
+
+ if (nb) {
+ uint8_t *b = atempo->buffer;
+ memcpy(b, src, nb * atempo->stride);
+
+ src += nb * atempo->stride;
+ atempo->position[0] += nb;
+
+ atempo->size = FFMIN(atempo->size + nb, atempo->ring);
+ atempo->tail = (atempo->tail + nb) % atempo->ring;
+ atempo->head =
+ atempo->size < atempo->ring ?
+ atempo->tail - atempo->size :
+ atempo->tail;
+ }
+ }
+
+ // pass back the updated source buffer pointer:
+ *src_ref = src;
+
+ // sanity check:
+ av_assert0(atempo->position[0] <= stop_here);
+
+ return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN);
+}
+
+/**
+ * Populate current audio fragment data buffer.
+ *
+ * @return
+ * 0 when the fragment is ready,
+ * AVERROR(EAGAIN) if more input data is required.
+ */
+static int yae_load_frag(ATempoContext *atempo,
+ const uint8_t **src_ref,
+ const uint8_t *src_end)
+{
+ // shortcuts:
+ AudioFragment *frag = yae_curr_frag(atempo);
+ uint8_t *dst;
+ int64_t missing, start, zeros;
+ uint32_t nsamples;
+ const uint8_t *a, *b;
+ int i0, i1, n0, n1, na, nb;
+
+ int64_t stop_here = frag->position[0] + atempo->window;
+ if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
+ return AVERROR(EAGAIN);
+ }
+
+ // calculate the number of samples we don't have:
+ missing =
+ stop_here > atempo->position[0] ?
+ stop_here - atempo->position[0] : 0;
+
+ nsamples =
+ missing < (int64_t)atempo->window ?
+ (uint32_t)(atempo->window - missing) : 0;
+
+ // setup the output buffer:
+ frag->nsamples = nsamples;
+ dst = frag->data;
+
+ start = atempo->position[0] - atempo->size;
+ zeros = 0;
+
+ if (frag->position[0] < start) {
+ // what we don't have we substitute with zeros:
+ zeros = FFMIN(start - frag->position[0], (int64_t)nsamples);
+ av_assert0(zeros != nsamples);
+
+ memset(dst, 0, zeros * atempo->stride);
+ dst += zeros * atempo->stride;
+ }
+
+ if (zeros == nsamples) {
+ return 0;
+ }
+
+ // get the remaining data from the ring buffer:
+ na = (atempo->head < atempo->tail ?
+ atempo->tail - atempo->head :
+ atempo->ring - atempo->head);
+
+ nb = atempo->head < atempo->tail ? 0 : atempo->tail;
+
+ // sanity check:
+ av_assert0(nsamples <= zeros + na + nb);
+
+ a = atempo->buffer + atempo->head * atempo->stride;
+ b = atempo->buffer;
+
+ i0 = frag->position[0] + zeros - start;
+ i1 = i0 < na ? 0 : i0 - na;
+
+ n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0;
+ n1 = nsamples - zeros - n0;
+
+ if (n0) {
+ memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride);
+ dst += n0 * atempo->stride;
+ }
+
+ if (n1) {
+ memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride);
+ }
+
+ return 0;
+}
+
+/**
+ * Prepare for loading next audio fragment.
+ */
+static void yae_advance_to_next_frag(ATempoContext *atempo)
+{
+ const double fragment_step = atempo->tempo * (double)(atempo->window / 2);
+
+ const AudioFragment *prev;
+ AudioFragment *frag;
+
+ atempo->nfrag++;
+ prev = yae_prev_frag(atempo);
+ frag = yae_curr_frag(atempo);
+
+ frag->position[0] = prev->position[0] + (int64_t)fragment_step;
+ frag->position[1] = prev->position[1] + atempo->window / 2;
+ frag->nsamples = 0;
+}
+
+/**
+ * Calculate cross-correlation via rDFT.
+ *
+ * Multiply two vectors of complex numbers (result of real_to_complex rDFT)
+ * and transform back via complex_to_real rDFT.
+ */
+static void yae_xcorr_via_rdft(FFTSample *xcorr,
+ RDFTContext *complex_to_real,
+ const FFTComplex *xa,
+ const FFTComplex *xb,
+ const int window)
+{
+ FFTComplex *xc = (FFTComplex *)xcorr;
+ int i;
+
+ // NOTE: first element requires special care -- Given Y = rDFT(X),
+ // Im(Y[0]) and Im(Y[N/2]) are always zero, therefore av_rdft_calc
+ // stores Re(Y[N/2]) in place of Im(Y[0]).
+
+ xc->re = xa->re * xb->re;
+ xc->im = xa->im * xb->im;
+ xa++;
+ xb++;
+ xc++;
+
+ for (i = 1; i < window; i++, xa++, xb++, xc++) {
+ xc->re = (xa->re * xb->re + xa->im * xb->im);
+ xc->im = (xa->im * xb->re - xa->re * xb->im);
+ }
+
+ // apply inverse rDFT:
+ av_rdft_calc(complex_to_real, xcorr);
+}
+
+/**
+ * Calculate alignment offset for given fragment
+ * relative to the previous fragment.
+ *
+ * @return alignment offset of current fragment relative to previous.
+ */
+static int yae_align(AudioFragment *frag,
+ const AudioFragment *prev,
+ const int window,
+ const int delta_max,
+ const int drift,
+ FFTSample *correlation,
+ RDFTContext *complex_to_real)
+{
+ int best_offset = -drift;
+ FFTSample best_metric = -FLT_MAX;
+ FFTSample *xcorr;
+
+ int i0;
+ int i1;
+ int i;
+
+ yae_xcorr_via_rdft(correlation,
+ complex_to_real,
+ (const FFTComplex *)prev->xdat,
+ (const FFTComplex *)frag->xdat,
+ window);
+
+ // identify search window boundaries:
+ i0 = FFMAX(window / 2 - delta_max - drift, 0);
+ i0 = FFMIN(i0, window);
+
+ i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16);
+ i1 = FFMAX(i1, 0);
+
+ // identify cross-correlation peaks within search window:
+ xcorr = correlation + i0;
+
+ for (i = i0; i < i1; i++, xcorr++) {
+ FFTSample metric = *xcorr;
+
+ // normalize:
+ FFTSample drifti = (FFTSample)(drift + i);
+ metric *= drifti * (FFTSample)(i - i0) * (FFTSample)(i1 - i);
+
+ if (metric > best_metric) {
+ best_metric = metric;
+ best_offset = i - window / 2;
+ }
+ }
+
+ return best_offset;
+}
+
+/**
+ * Adjust current fragment position for better alignment
+ * with previous fragment.
+ *
+ * @return alignment correction.
+ */
+static int yae_adjust_position(ATempoContext *atempo)
+{
+ const AudioFragment *prev = yae_prev_frag(atempo);
+ AudioFragment *frag = yae_curr_frag(atempo);
+
+ const double prev_output_position =
+ (double)(prev->position[1] - atempo->origin[1] + atempo->window / 2);
+
+ const double ideal_output_position =
+ (double)(prev->position[0] - atempo->origin[0] + atempo->window / 2) /
+ atempo->tempo;
+
+ const int drift = (int)(prev_output_position - ideal_output_position);
+
+ const int delta_max = atempo->window / 2;
+ const int correction = yae_align(frag,
+ prev,
+ atempo->window,
+ delta_max,
+ drift,
+ atempo->correlation,
+ atempo->complex_to_real);
+
+ if (correction) {
+ // adjust fragment position:
+ frag->position[0] -= correction;
+
+ // clear so that the fragment can be reloaded:
+ frag->nsamples = 0;
+ }
+
+ return correction;
+}
+
+/**
+ * A helper macro for blending the overlap region of previous
+ * and current audio fragment.
+ */
+#define yae_blend(scalar_type) \
+ do { \
+ const scalar_type *aaa = (const scalar_type *)a; \
+ const scalar_type *bbb = (const scalar_type *)b; \
+ \
+ scalar_type *out = (scalar_type *)dst; \
+ scalar_type *out_end = (scalar_type *)dst_end; \
+ int64_t i; \
+ \
+ for (i = 0; i < overlap && out < out_end; \
+ i++, atempo->position[1]++, wa++, wb++) { \
+ float w0 = *wa; \
+ float w1 = *wb; \
+ int j; \
+ \
+ for (j = 0; j < atempo->channels; \
+ j++, aaa++, bbb++, out++) { \
+ float t0 = (float)*aaa; \
+ float t1 = (float)*bbb; \
+ \
+ *out = \
+ frag->position[0] + i < 0 ? \
+ *aaa : \
+ (scalar_type)(t0 * w0 + t1 * w1); \
+ } \
+ } \
+ dst = (uint8_t *)out; \
+ } while (0)
+
+/**
+ * Blend the overlap region of previous and current audio fragment
+ * and output the results to the given destination buffer.
+ *
+ * @return
+ * 0 if the overlap region was completely stored in the dst buffer,
+ * AVERROR(EAGAIN) if more destination buffer space is required.
+ */
+static int yae_overlap_add(ATempoContext *atempo,
+ uint8_t **dst_ref,
+ uint8_t *dst_end)
+{
+ // shortcuts:
+ const AudioFragment *prev = yae_prev_frag(atempo);
+ const AudioFragment *frag = yae_curr_frag(atempo);
+
+ const int64_t start_here = FFMAX(atempo->position[1],
+ frag->position[1]);
+
+ const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples,
+ frag->position[1] + frag->nsamples);
+
+ const int64_t overlap = stop_here - start_here;
+
+ const int64_t ia = start_here - prev->position[1];
+ const int64_t ib = start_here - frag->position[1];
+
+ const float *wa = atempo->hann + ia;
+ const float *wb = atempo->hann + ib;
+
+ const uint8_t *a = prev->data + ia * atempo->stride;
+ const uint8_t *b = frag->data + ib * atempo->stride;
+
+ uint8_t *dst = *dst_ref;
+
+ av_assert0(start_here <= stop_here &&
+ frag->position[1] <= start_here &&
+ overlap <= frag->nsamples);
+
+ if (atempo->format == AV_SAMPLE_FMT_U8) {
+ yae_blend(uint8_t);
+ } else if (atempo->format == AV_SAMPLE_FMT_S16) {
+ yae_blend(int16_t);
+ } else if (atempo->format == AV_SAMPLE_FMT_S32) {
+ yae_blend(int);
+ } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
+ yae_blend(float);
+ } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
+ yae_blend(double);
+ }
+
+ // pass-back the updated destination buffer pointer:
+ *dst_ref = dst;
+
+ return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
+}
+
+/**
+ * Feed as much data to the filter as it is able to consume
+ * and receive as much processed data in the destination buffer
+ * as it is able to produce or store.
+ */
+static void
+yae_apply(ATempoContext *atempo,
+ const uint8_t **src_ref,
+ const uint8_t *src_end,
+ uint8_t **dst_ref,
+ uint8_t *dst_end)
+{
+ while (1) {
+ if (atempo->state == YAE_LOAD_FRAGMENT) {
+ // load additional data for the current fragment:
+ if (yae_load_frag(atempo, src_ref, src_end) != 0) {
+ break;
+ }
+
+ // down-mix to mono:
+ yae_downmix(atempo, yae_curr_frag(atempo));
+
+ // apply rDFT:
+ av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
+
+ // must load the second fragment before alignment can start:
+ if (!atempo->nfrag) {
+ yae_advance_to_next_frag(atempo);
+ continue;
+ }
+
+ atempo->state = YAE_ADJUST_POSITION;
+ }
+
+ if (atempo->state == YAE_ADJUST_POSITION) {
+ // adjust position for better alignment:
+ if (yae_adjust_position(atempo)) {
+ // reload the fragment at the corrected position, so that the
+ // Hann window blending would not require normalization:
+ atempo->state = YAE_RELOAD_FRAGMENT;
+ } else {
+ atempo->state = YAE_OUTPUT_OVERLAP_ADD;
+ }
+ }
+
+ if (atempo->state == YAE_RELOAD_FRAGMENT) {
+ // load additional data if necessary due to position adjustment:
+ if (yae_load_frag(atempo, src_ref, src_end) != 0) {
+ break;
+ }
+
+ // down-mix to mono:
+ yae_downmix(atempo, yae_curr_frag(atempo));
+
+ // apply rDFT:
+ av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
+
+ atempo->state = YAE_OUTPUT_OVERLAP_ADD;
+ }
+
+ if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) {
+ // overlap-add and output the result:
+ if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
+ break;
+ }
+
+ // advance to the next fragment, repeat:
+ yae_advance_to_next_frag(atempo);
+ atempo->state = YAE_LOAD_FRAGMENT;
+ }
+ }
+}
+
+/**
+ * Flush any buffered data from the filter.
+ *
+ * @return
+ * 0 if all data was completely stored in the dst buffer,
+ * AVERROR(EAGAIN) if more destination buffer space is required.
+ */
+static int yae_flush(ATempoContext *atempo,
+ uint8_t **dst_ref,
+ uint8_t *dst_end)
+{
+ AudioFragment *frag = yae_curr_frag(atempo);
+ int64_t overlap_end;
+ int64_t start_here;
+ int64_t stop_here;
+ int64_t offset;
+
+ const uint8_t *src;
+ uint8_t *dst;
+
+ int src_size;
+ int dst_size;
+ int nbytes;
+
+ atempo->state = YAE_FLUSH_OUTPUT;
+
+ if (atempo->position[0] == frag->position[0] + frag->nsamples &&
+ atempo->position[1] == frag->position[1] + frag->nsamples) {
+ // the current fragment is already flushed:
+ return 0;
+ }
+
+ if (frag->position[0] + frag->nsamples < atempo->position[0]) {
+ // finish loading the current (possibly partial) fragment:
+ yae_load_frag(atempo, NULL, NULL);
+
+ if (atempo->nfrag) {
+ // down-mix to mono:
+ yae_downmix(atempo, frag);
+
+ // apply rDFT:
+ av_rdft_calc(atempo->real_to_complex, frag->xdat);
+
+ // align current fragment to previous fragment:
+ if (yae_adjust_position(atempo)) {
+ // reload the current fragment due to adjusted position:
+ yae_load_frag(atempo, NULL, NULL);
+ }
+ }
+ }
+
+ // flush the overlap region:
+ overlap_end = frag->position[1] + FFMIN(atempo->window / 2,
+ frag->nsamples);
+
+ while (atempo->position[1] < overlap_end) {
+ if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
+ return AVERROR(EAGAIN);
+ }
+ }
+
+ // check whether all of the input samples have been consumed:
+ if (frag->position[0] + frag->nsamples < atempo->position[0]) {
+ yae_advance_to_next_frag(atempo);
+ return AVERROR(EAGAIN);
+ }
+
+ // flush the remainder of the current fragment:
+ start_here = FFMAX(atempo->position[1], overlap_end);
+ stop_here = frag->position[1] + frag->nsamples;
+ offset = start_here - frag->position[1];
+ av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
+
+ src = frag->data + offset * atempo->stride;
+ dst = (uint8_t *)*dst_ref;
+
+ src_size = (int)(stop_here - start_here) * atempo->stride;
+ dst_size = dst_end - dst;
+ nbytes = FFMIN(src_size, dst_size);
+
+ memcpy(dst, src, nbytes);
+ dst += nbytes;
+
+ atempo->position[1] += (nbytes / atempo->stride);
+
+ // pass-back the updated destination buffer pointer:
+ *dst_ref = (uint8_t *)dst;
+
+ return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ATempoContext *atempo = ctx->priv;
+ atempo->format = AV_SAMPLE_FMT_NONE;
+ atempo->state = YAE_LOAD_FRAGMENT;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ATempoContext *atempo = ctx->priv;
+ yae_release_buffers(atempo);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterFormats *formats = NULL;
+
+ // WSOLA necessitates an internal sliding window ring buffer
+ // for incoming audio stream.
+ //
+ // Planar sample formats are too cumbersome to store in a ring buffer,
+ // therefore planar sample formats are not supported.
+ //
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts) {
+ return AVERROR(ENOMEM);
+ }
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats) {
+ return AVERROR(ENOMEM);
+ }
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats) {
+ return AVERROR(ENOMEM);
+ }
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ATempoContext *atempo = ctx->priv;
+
+ enum AVSampleFormat format = inlink->format;
+ int sample_rate = (int)inlink->sample_rate;
+ int channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+
+ ctx->outputs[0]->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+ return yae_reset(atempo, format, sample_rate, channels);
+}
+
+static int push_samples(ATempoContext *atempo,
+ AVFilterLink *outlink,
+ int n_out)
+{
+ int ret;
+
+ atempo->dst_buffer->sample_rate = outlink->sample_rate;
+ atempo->dst_buffer->nb_samples = n_out;
+
+ // adjust the PTS:
+ atempo->dst_buffer->pts =
+ av_rescale_q(atempo->nsamples_out,
+ (AVRational){ 1, outlink->sample_rate },
+ outlink->time_base);
+
+ ret = ff_filter_frame(outlink, atempo->dst_buffer);
+ atempo->dst_buffer = NULL;
+ atempo->dst = NULL;
+ atempo->dst_end = NULL;
+ if (ret < 0)
+ return ret;
+
+ atempo->nsamples_out += n_out;
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ATempoContext *atempo = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ int ret = 0;
+ int n_in = src_buffer->nb_samples;
+ int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo);
+
+ const uint8_t *src = src_buffer->data[0];
+ const uint8_t *src_end = src + n_in * atempo->stride;
+
+ while (src < src_end) {
+ if (!atempo->dst_buffer) {
+ atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
+ if (!atempo->dst_buffer)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(atempo->dst_buffer, src_buffer);
+
+ atempo->dst = atempo->dst_buffer->data[0];
+ atempo->dst_end = atempo->dst + n_out * atempo->stride;
+ }
+
+ yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end);
+
+ if (atempo->dst == atempo->dst_end) {
+ int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) /
+ atempo->stride);
+ ret = push_samples(atempo, outlink, n_samples);
+ if (ret < 0)
+ goto end;
+ }
+ }
+
+ atempo->nsamples_in += n_in;
+end:
+ av_frame_free(&src_buffer);
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ATempoContext *atempo = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF) {
+ // flush the filter:
+ int n_max = atempo->ring;
+ int n_out;
+ int err = AVERROR(EAGAIN);
+
+ while (err == AVERROR(EAGAIN)) {
+ if (!atempo->dst_buffer) {
+ atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
+ if (!atempo->dst_buffer)
+ return AVERROR(ENOMEM);
+
+ atempo->dst = atempo->dst_buffer->data[0];
+ atempo->dst_end = atempo->dst + n_max * atempo->stride;
+ }
+
+ err = yae_flush(atempo, &atempo->dst, atempo->dst_end);
+
+ n_out = ((atempo->dst - atempo->dst_buffer->data[0]) /
+ atempo->stride);
+
+ if (n_out) {
+ ret = push_samples(atempo, outlink, n_out);
+ }
+ }
+
+ av_frame_free(&atempo->dst_buffer);
+ atempo->dst = NULL;
+ atempo->dst_end = NULL;
+
+ return AVERROR_EOF;
+ }
+
+ return ret;
+}
+
+static int process_command(AVFilterContext *ctx,
+ const char *cmd,
+ const char *arg,
+ char *res,
+ int res_len,
+ int flags)
+{
+ return !strcmp(cmd, "tempo") ? yae_set_tempo(ctx, arg) : AVERROR(ENOSYS);
+}
+
+static const AVFilterPad atempo_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad atempo_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_atempo = {
+ .name = "atempo",
+ .description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .priv_size = sizeof(ATempoContext),
+ .priv_class = &atempo_class,
+ .inputs = atempo_inputs,
+ .outputs = atempo_outputs,
+};
diff --git a/libavfilter/af_biquads.c b/libavfilter/af_biquads.c
new file mode 100644
index 0000000..02bf9db
--- /dev/null
+++ b/libavfilter/af_biquads.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ * Copyright (c) 2006-2008 Rob Sykes <robs@users.sourceforge.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * 2-pole filters designed by Robert Bristow-Johnson <rbj@audioimagination.com>
+ * see http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
+ *
+ * 1-pole filters based on code (c) 2000 Chris Bagwell <cbagwell@sprynet.com>
+ * Algorithms: Recursive single pole low/high pass filter
+ * Reference: The Scientist and Engineer's Guide to Digital Signal Processing
+ *
+ * low-pass: output[N] = input[N] * A + output[N-1] * B
+ * X = exp(-2.0 * pi * Fc)
+ * A = 1 - X
+ * B = X
+ * Fc = cutoff freq / sample rate
+ *
+ * Mimics an RC low-pass filter:
+ *
+ * ---/\/\/\/\----------->
+ * |
+ * --- C
+ * ---
+ * |
+ * |
+ * V
+ *
+ * high-pass: output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1]
+ * X = exp(-2.0 * pi * Fc)
+ * A0 = (1 + X) / 2
+ * A1 = -(1 + X) / 2
+ * B1 = X
+ * Fc = cutoff freq / sample rate
+ *
+ * Mimics an RC high-pass filter:
+ *
+ * || C
+ * ----||--------->
+ * || |
+ * <
+ * > R
+ * <
+ * |
+ * V
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum FilterType {
+ biquad,
+ equalizer,
+ bass,
+ treble,
+ band,
+ bandpass,
+ bandreject,
+ allpass,
+ highpass,
+ lowpass,
+};
+
+enum WidthType {
+ NONE,
+ HERTZ,
+ OCTAVE,
+ QFACTOR,
+ SLOPE,
+};
+
+typedef struct ChanCache {
+ double i1, i2;
+ double o1, o2;
+} ChanCache;
+
+typedef struct {
+ const AVClass *class;
+
+ enum FilterType filter_type;
+ enum WidthType width_type;
+ int poles;
+ int csg;
+
+ double gain;
+ double frequency;
+ double width;
+
+ double a0, a1, a2;
+ double b0, b1, b2;
+
+ ChanCache *cache;
+
+ void (*filter)(const void *ibuf, void *obuf, int len,
+ double *i1, double *i2, double *o1, double *o2,
+ double b0, double b1, double b2, double a1, double a2);
+} BiquadsContext;
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ BiquadsContext *p = ctx->priv;
+
+ if (p->filter_type != biquad) {
+ if (p->frequency <= 0 || p->width <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid frequency %f and/or width %f <= 0\n",
+ p->frequency, p->width);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+#define BIQUAD_FILTER(name, type, min, max, need_clipping) \
+static void biquad_## name (const void *input, void *output, int len, \
+ double *in1, double *in2, \
+ double *out1, double *out2, \
+ double b0, double b1, double b2, \
+ double a1, double a2) \
+{ \
+ const type *ibuf = input; \
+ type *obuf = output; \
+ double i1 = *in1; \
+ double i2 = *in2; \
+ double o1 = *out1; \
+ double o2 = *out2; \
+ int i; \
+ a1 = -a1; \
+ a2 = -a2; \
+ \
+ for (i = 0; i+1 < len; i++) { \
+ o2 = i2 * b2 + i1 * b1 + ibuf[i] * b0 + o2 * a2 + o1 * a1; \
+ i2 = ibuf[i]; \
+ if (need_clipping && o2 < min) { \
+ av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
+ obuf[i] = min; \
+ } else if (need_clipping && o2 > max) { \
+ av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
+ obuf[i] = max; \
+ } else { \
+ obuf[i] = o2; \
+ } \
+ i++; \
+ o1 = i1 * b2 + i2 * b1 + ibuf[i] * b0 + o1 * a2 + o2 * a1; \
+ i1 = ibuf[i]; \
+ if (need_clipping && o1 < min) { \
+ av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
+ obuf[i] = min; \
+ } else if (need_clipping && o1 > max) { \
+ av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
+ obuf[i] = max; \
+ } else { \
+ obuf[i] = o1; \
+ } \
+ } \
+ if (i < len) { \
+ double o0 = ibuf[i] * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
+ i2 = i1; \
+ i1 = ibuf[i]; \
+ o2 = o1; \
+ o1 = o0; \
+ if (need_clipping && o0 < min) { \
+ av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
+ obuf[i] = min; \
+ } else if (need_clipping && o0 > max) { \
+ av_log(NULL, AV_LOG_WARNING, "clipping\n"); \
+ obuf[i] = max; \
+ } else { \
+ obuf[i] = o0; \
+ } \
+ } \
+ *in1 = i1; \
+ *in2 = i2; \
+ *out1 = o1; \
+ *out2 = o2; \
+}
+
+BIQUAD_FILTER(s16, int16_t, INT16_MIN, INT16_MAX, 1)
+BIQUAD_FILTER(s32, int32_t, INT32_MIN, INT32_MAX, 1)
+BIQUAD_FILTER(flt, float, -1., 1., 0)
+BIQUAD_FILTER(dbl, double, -1., 1., 0)
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ BiquadsContext *p = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ double A = exp(p->gain / 40 * log(10.));
+ double w0 = 2 * M_PI * p->frequency / inlink->sample_rate;
+ double alpha;
+
+ if (w0 > M_PI) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid frequency %f. Frequency must be less than half the sample-rate %d.\n",
+ p->frequency, inlink->sample_rate);
+ return AVERROR(EINVAL);
+ }
+
+ switch (p->width_type) {
+ case NONE:
+ alpha = 0.0;
+ break;
+ case HERTZ:
+ alpha = sin(w0) / (2 * p->frequency / p->width);
+ break;
+ case OCTAVE:
+ alpha = sin(w0) * sinh(log(2.) / 2 * p->width * w0 / sin(w0));
+ break;
+ case QFACTOR:
+ alpha = sin(w0) / (2 * p->width);
+ break;
+ case SLOPE:
+ alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / p->width - 1) + 2);
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ switch (p->filter_type) {
+ case biquad:
+ break;
+ case equalizer:
+ p->a0 = 1 + alpha / A;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha / A;
+ p->b0 = 1 + alpha * A;
+ p->b1 = -2 * cos(w0);
+ p->b2 = 1 - alpha * A;
+ break;
+ case bass:
+ p->a0 = (A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
+ p->a1 = -2 * ((A - 1) + (A + 1) * cos(w0));
+ p->a2 = (A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
+ p->b0 = A * ((A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
+ p->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0));
+ p->b2 = A * ((A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
+ break;
+ case treble:
+ p->a0 = (A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
+ p->a1 = 2 * ((A - 1) - (A + 1) * cos(w0));
+ p->a2 = (A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
+ p->b0 = A * ((A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
+ p->b1 =-2 * A * ((A - 1) + (A + 1) * cos(w0));
+ p->b2 = A * ((A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
+ break;
+ case bandpass:
+ if (p->csg) {
+ p->a0 = 1 + alpha;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha;
+ p->b0 = sin(w0) / 2;
+ p->b1 = 0;
+ p->b2 = -sin(w0) / 2;
+ } else {
+ p->a0 = 1 + alpha;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha;
+ p->b0 = alpha;
+ p->b1 = 0;
+ p->b2 = -alpha;
+ }
+ break;
+ case bandreject:
+ p->a0 = 1 + alpha;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha;
+ p->b0 = 1;
+ p->b1 = -2 * cos(w0);
+ p->b2 = 1;
+ break;
+ case lowpass:
+ if (p->poles == 1) {
+ p->a0 = 1;
+ p->a1 = -exp(-w0);
+ p->a2 = 0;
+ p->b0 = 1 + p->a1;
+ p->b1 = 0;
+ p->b2 = 0;
+ } else {
+ p->a0 = 1 + alpha;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha;
+ p->b0 = (1 - cos(w0)) / 2;
+ p->b1 = 1 - cos(w0);
+ p->b2 = (1 - cos(w0)) / 2;
+ }
+ break;
+ case highpass:
+ if (p->poles == 1) {
+ p->a0 = 1;
+ p->a1 = -exp(-w0);
+ p->a2 = 0;
+ p->b0 = (1 - p->a1) / 2;
+ p->b1 = -p->b0;
+ p->b2 = 0;
+ } else {
+ p->a0 = 1 + alpha;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha;
+ p->b0 = (1 + cos(w0)) / 2;
+ p->b1 = -(1 + cos(w0));
+ p->b2 = (1 + cos(w0)) / 2;
+ }
+ break;
+ case allpass:
+ p->a0 = 1 + alpha;
+ p->a1 = -2 * cos(w0);
+ p->a2 = 1 - alpha;
+ p->b0 = 1 - alpha;
+ p->b1 = -2 * cos(w0);
+ p->b2 = 1 + alpha;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ p->a1 /= p->a0;
+ p->a2 /= p->a0;
+ p->b0 /= p->a0;
+ p->b1 /= p->a0;
+ p->b2 /= p->a0;
+
+ p->cache = av_realloc_f(p->cache, sizeof(ChanCache), inlink->channels);
+ if (!p->cache)
+ return AVERROR(ENOMEM);
+ memset(p->cache, 0, sizeof(ChanCache) * inlink->channels);
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_S16P: p->filter = biquad_s16; break;
+ case AV_SAMPLE_FMT_S32P: p->filter = biquad_s32; break;
+ case AV_SAMPLE_FMT_FLTP: p->filter = biquad_flt; break;
+ case AV_SAMPLE_FMT_DBLP: p->filter = biquad_dbl; break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ BiquadsContext *p = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out_buf;
+ int nb_samples = buf->nb_samples;
+ int ch;
+
+ if (av_frame_is_writable(buf)) {
+ out_buf = buf;
+ } else {
+ out_buf = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out_buf)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_buf, buf);
+ }
+
+ for (ch = 0; ch < av_frame_get_channels(buf); ch++)
+ p->filter(buf->extended_data[ch],
+ out_buf->extended_data[ch], nb_samples,
+ &p->cache[ch].i1, &p->cache[ch].i2,
+ &p->cache[ch].o1, &p->cache[ch].o2,
+ p->b0, p->b1, p->b2, p->a1, p->a2);
+
+ if (buf != out_buf)
+ av_frame_free(&buf);
+
+ return ff_filter_frame(outlink, out_buf);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ BiquadsContext *p = ctx->priv;
+
+ av_freep(&p->cache);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+#define OFFSET(x) offsetof(BiquadsContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define DEFINE_BIQUAD_FILTER(name_, description_) \
+AVFILTER_DEFINE_CLASS(name_); \
+static av_cold int name_##_init(AVFilterContext *ctx) \
+{ \
+ BiquadsContext *p = ctx->priv; \
+ p->class = &name_##_class; \
+ p->filter_type = name_; \
+ return init(ctx); \
+} \
+ \
+AVFilter ff_af_##name_ = { \
+ .name = #name_, \
+ .description = NULL_IF_CONFIG_SMALL(description_), \
+ .priv_size = sizeof(BiquadsContext), \
+ .init = name_##_init, \
+ .uninit = uninit, \
+ .query_formats = query_formats, \
+ .inputs = inputs, \
+ .outputs = outputs, \
+ .priv_class = &name_##_class, \
+}
+
+#if CONFIG_EQUALIZER_FILTER
+static const AVOption equalizer_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
+ {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
+ {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter.");
+#endif /* CONFIG_EQUALIZER_FILTER */
+#if CONFIG_BASS_FILTER
+static const AVOption bass_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
+#endif /* CONFIG_BASS_FILTER */
+#if CONFIG_TREBLE_FILTER
+static const AVOption treble_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
+ {"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
+#endif /* CONFIG_TREBLE_FILTER */
+#if CONFIG_BANDPASS_FILTER
+static const AVOption bandpass_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.");
+#endif /* CONFIG_BANDPASS_FILTER */
+#if CONFIG_BANDREJECT_FILTER
+static const AVOption bandreject_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter.");
+#endif /* CONFIG_BANDREJECT_FILTER */
+#if CONFIG_LOWPASS_FILTER
+static const AVOption lowpass_options[] = {
+ {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
+ {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.");
+#endif /* CONFIG_LOWPASS_FILTER */
+#if CONFIG_HIGHPASS_FILTER
+static const AVOption highpass_options[] = {
+ {"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
+ {"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency.");
+#endif /* CONFIG_HIGHPASS_FILTER */
+#if CONFIG_ALLPASS_FILTER
+static const AVOption allpass_options[] = {
+ {"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
+ {"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HERTZ}, HERTZ, SLOPE, FLAGS, "width_type"},
+ {"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
+ {"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
+ {"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
+ {"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
+ {"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
+ {"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter.");
+#endif /* CONFIG_ALLPASS_FILTER */
+#if CONFIG_BIQUAD_FILTER
+static const AVOption biquad_options[] = {
+ {"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
+ {NULL}
+};
+
+DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients.");
+#endif /* CONFIG_BIQUAD_FILTER */
diff --git a/libavfilter/af_bs2b.c b/libavfilter/af_bs2b.c
index 25e7867..3dd0bcd 100644
--- a/libavfilter/af_bs2b.c
+++ b/libavfilter/af_bs2b.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -168,16 +168,16 @@ static int config_output(AVFilterLink *outlink)
bs2b->filter = bs2b_cross_feed_u8;
break;
case AV_SAMPLE_FMT_S16:
- bs2b->filter = bs2b_cross_feed_s16;
+ bs2b->filter = (void*)bs2b_cross_feed_s16;
break;
case AV_SAMPLE_FMT_S32:
- bs2b->filter = bs2b_cross_feed_s32;
+ bs2b->filter = (void*)bs2b_cross_feed_s32;
break;
case AV_SAMPLE_FMT_FLT:
- bs2b->filter = bs2b_cross_feed_f;
+ bs2b->filter = (void*)bs2b_cross_feed_f;
break;
case AV_SAMPLE_FMT_DBL:
- bs2b->filter = bs2b_cross_feed_d;
+ bs2b->filter = (void*)bs2b_cross_feed_d;
break;
default:
return AVERROR_BUG;
diff --git a/libavfilter/af_channelmap.c b/libavfilter/af_channelmap.c
index 3035405..c3454c5 100644
--- a/libavfilter/af_channelmap.c
+++ b/libavfilter/af_channelmap.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2012 Google, Inc.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -68,20 +68,16 @@ typedef struct ChannelMapContext {
#define OFFSET(x) offsetof(ChannelMapContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption channelmap_options[] = {
{ "map", "A comma-separated list of input channel numbers in output order.",
- OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A },
+ OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layout", "Output channel layout.",
- OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
- { NULL },
+ OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
};
-static const AVClass channelmap_class = {
- .class_name = "channel map filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(channelmap);
static char* split(char *message, char delim) {
char *next = strchr(message, delim);
@@ -291,10 +287,16 @@ static av_cold int channelmap_init(AVFilterContext *ctx)
static int channelmap_query_formats(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
+ AVFilterChannelLayouts *layouts;
ff_set_common_formats(ctx, ff_planar_sample_fmts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
- ff_channel_layouts_ref(ff_all_channel_layouts(), &ctx->inputs[0]->out_channel_layouts);
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+
+ ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
ff_channel_layouts_ref(s->channel_layouts, &ctx->outputs[0]->in_channel_layouts);
return 0;
@@ -316,7 +318,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (nch_out > nch_in) {
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
uint8_t **new_extended_data =
- av_mallocz(nch_out * sizeof(*buf->extended_data));
+ av_mallocz_array(nch_out, sizeof(*buf->extended_data));
if (!new_extended_data) {
av_frame_free(&buf);
return AVERROR(ENOMEM);
@@ -389,7 +391,8 @@ static const AVFilterPad avfilter_af_channelmap_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = channelmap_filter_frame,
- .config_props = channelmap_config_input
+ .config_props = channelmap_config_input,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -409,7 +412,6 @@ AVFilter ff_af_channelmap = {
.query_formats = channelmap_query_formats,
.priv_size = sizeof(ChannelMapContext),
.priv_class = &channelmap_class,
-
.inputs = avfilter_af_channelmap_inputs,
.outputs = avfilter_af_channelmap_outputs,
};
diff --git a/libavfilter/af_channelsplit.c b/libavfilter/af_channelsplit.c
index 5b410fd..b3756e2 100644
--- a/libavfilter/af_channelsplit.c
+++ b/libavfilter/af_channelsplit.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -42,17 +42,13 @@ typedef struct ChannelSplitContext {
#define OFFSET(x) offsetof(ChannelSplitContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A },
- { NULL },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption channelsplit_options[] = {
+ { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F },
+ { NULL }
};
-static const AVClass channelsplit_class = {
- .class_name = "channelsplit filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(channelsplit);
static av_cold int init(AVFilterContext *ctx)
{
@@ -121,6 +117,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
+ av_frame_set_channels(buf_out, 1);
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
@@ -132,24 +129,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
AVFilter ff_af_channelsplit = {
.name = "channelsplit",
- .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams"),
+ .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams."),
.priv_size = sizeof(ChannelSplitContext),
.priv_class = &channelsplit_class,
-
.init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_af_channelsplit_inputs,
- .outputs = NULL,
-
- .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .inputs = avfilter_af_channelsplit_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/libavfilter/af_compand.c b/libavfilter/af_compand.c
index f21c861..4ca73c4 100644
--- a/libavfilter/af_compand.c
+++ b/libavfilter/af_compand.c
@@ -5,20 +5,20 @@
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2014 Andrew Kelley
*
- * This file is part of libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,39 +27,33 @@
* audio compand filter
*/
-#include <string.h>
-
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
-#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/mem.h"
#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
-#include "formats.h"
#include "internal.h"
typedef struct ChanParam {
- float attack;
- float decay;
- float volume;
+ double attack;
+ double decay;
+ double volume;
} ChanParam;
typedef struct CompandSegment {
- float x, y;
- float a, b;
+ double x, y;
+ double a, b;
} CompandSegment;
typedef struct CompandContext {
const AVClass *class;
- int nb_channels;
int nb_segments;
char *attacks, *decays, *points;
CompandSegment *segments;
ChanParam *channels;
- float in_min_lin;
- float out_min_lin;
+ double in_min_lin;
+ double out_min_lin;
double curve_dB;
double gain_dB;
double initial_volume;
@@ -71,12 +65,10 @@ typedef struct CompandContext {
int64_t pts;
int (*compand)(AVFilterContext *ctx, AVFrame *frame);
- /* set by filter_frame() to signal an output frame to request_frame() */
- int got_output;
} CompandContext;
#define OFFSET(x) offsetof(CompandContext, x)
-#define A AV_OPT_FLAG_AUDIO_PARAM
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption compand_options[] = {
{ "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0.3" }, 0, 0, A },
@@ -89,12 +81,7 @@ static const AVOption compand_options[] = {
{ NULL }
};
-static const AVClass compand_class = {
- .class_name = "compand filter",
- .item_name = av_default_item_name,
- .option = compand_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(compand);
static av_cold int init(AVFilterContext *ctx)
{
@@ -117,7 +104,7 @@ static int query_formats(AVFilterContext *ctx)
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
- AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
@@ -145,14 +132,14 @@ static void count_items(char *item_str, int *nb_items)
*nb_items = 1;
for (p = item_str; *p; p++) {
- if (*p == '|')
+ if (*p == ' ' || *p == '|')
(*nb_items)++;
}
}
-static void update_volume(ChanParam *cp, float in)
+static void update_volume(ChanParam *cp, double in)
{
- float delta = in - cp->volume;
+ double delta = in - cp->volume;
if (delta > 0.0)
cp->volume += delta * cp->attack;
@@ -160,16 +147,16 @@ static void update_volume(ChanParam *cp, float in)
cp->volume += delta * cp->decay;
}
-static float get_volume(CompandContext *s, float in_lin)
+static double get_volume(CompandContext *s, double in_lin)
{
CompandSegment *cs;
- float in_log, out_log;
+ double in_log, out_log;
int i;
if (in_lin < s->in_min_lin)
return s->out_min_lin;
- in_log = logf(in_lin);
+ in_log = log(in_lin);
for (i = 1; i < s->nb_segments; i++)
if (in_log <= s->segments[i].x)
@@ -178,14 +165,14 @@ static float get_volume(CompandContext *s, float in_lin)
in_log -= cs->x;
out_log = cs->y + in_log * (cs->a * in_log + cs->b);
- return expf(out_log);
+ return exp(out_log);
}
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
- const int channels = s->nb_channels;
+ const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
AVFrame *out_frame;
int chan, i;
@@ -208,14 +195,14 @@ static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
}
for (chan = 0; chan < channels; chan++) {
- const float *src = (float *)frame->extended_data[chan];
- float *dst = (float *)out_frame->extended_data[chan];
+ const double *src = (double *)frame->extended_data[chan];
+ double *dst = (double *)out_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
for (i = 0; i < nb_samples; i++) {
update_volume(cp, fabs(src[i]));
- dst[i] = av_clipf(src[i] * get_volume(s, cp->volume), -1.0f, 1.0f);
+ dst[i] = av_clipd(src[i] * get_volume(s, cp->volume), -1, 1);
}
}
@@ -231,9 +218,9 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
- const int channels = s->nb_channels;
+ const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
- int chan, i, dindex = 0, oindex, count = 0;
+ int chan, i, av_uninit(dindex), oindex, av_uninit(count);
AVFrame *out_frame = NULL;
int err;
@@ -241,17 +228,19 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
}
+ av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
+
for (chan = 0; chan < channels; chan++) {
AVFrame *delay_frame = s->delay_frame;
- const float *src = (float *)frame->extended_data[chan];
- float *dbuf = (float *)delay_frame->extended_data[chan];
+ const double *src = (double *)frame->extended_data[chan];
+ double *dbuf = (double *)delay_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
- float *dst;
+ double *dst;
count = s->delay_count;
dindex = s->delay_index;
for (i = 0, oindex = 0; i < nb_samples; i++) {
- const float in = src[i];
+ const double in = src[i];
update_volume(cp, fabs(in));
if (count >= s->delay_samples) {
@@ -273,9 +262,9 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
inlink->time_base);
}
- dst = (float *)out_frame->extended_data[chan];
- dst[oindex++] = av_clipf(dbuf[dindex] *
- get_volume(s, cp->volume), -1.0f, 1.0f);
+ dst = (double *)out_frame->extended_data[chan];
+ dst[oindex++] = av_clipd(dbuf[dindex] *
+ get_volume(s, cp->volume), -1, 1);
} else {
count++;
}
@@ -292,8 +281,6 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
if (out_frame) {
err = ff_filter_frame(ctx->outputs[0], out_frame);
- if (err >= 0)
- s->got_output = 1;
return err;
}
@@ -304,7 +291,7 @@ static int compand_drain(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
- const int channels = s->nb_channels;
+ const int channels = outlink->channels;
AVFrame *frame = NULL;
int chan, i, dindex;
@@ -316,16 +303,17 @@ static int compand_drain(AVFilterLink *outlink)
s->pts += av_rescale_q(frame->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+ av_assert0(channels > 0);
for (chan = 0; chan < channels; chan++) {
AVFrame *delay_frame = s->delay_frame;
- float *dbuf = (float *)delay_frame->extended_data[chan];
- float *dst = (float *)frame->extended_data[chan];
+ double *dbuf = (double *)delay_frame->extended_data[chan];
+ double *dst = (double *)frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
dindex = s->delay_index;
for (i = 0; i < frame->nb_samples; i++) {
- dst[i] = av_clipf(dbuf[dindex] * get_volume(s, cp->volume),
- -1.0f, 1.0f);
+ dst[i] = av_clipd(dbuf[dindex] * get_volume(s, cp->volume),
+ -1, 1);
dindex = MOD(dindex + 1, s->delay_samples);
}
}
@@ -341,9 +329,8 @@ static int config_output(AVFilterLink *outlink)
CompandContext *s = ctx->priv;
const int sample_rate = outlink->sample_rate;
double radius = s->curve_dB * M_LN10 / 20.0;
- const char *p;
- const int channels =
- av_get_channel_layout_nb_channels(outlink->channel_layout);
+ char *p, *saveptr = NULL;
+ const int channels = outlink->channels;
int nb_attacks, nb_decays, nb_points;
int new_nb_items, num;
int i;
@@ -367,7 +354,6 @@ static int config_output(AVFilterLink *outlink)
uninit(ctx);
- s->nb_channels = channels;
s->channels = av_mallocz_array(channels, sizeof(*s->channels));
s->nb_segments = (nb_points + 4) * 2;
s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
@@ -379,34 +365,25 @@ static int config_output(AVFilterLink *outlink)
p = s->attacks;
for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
- char *tstr = av_get_token(&p, "|");
- if (!tstr)
- return AVERROR(ENOMEM);
-
- new_nb_items += sscanf(tstr, "%f", &s->channels[i].attack) == 1;
- av_freep(&tstr);
+ char *tstr = av_strtok(p, " |", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
if (s->channels[i].attack < 0) {
uninit(ctx);
return AVERROR(EINVAL);
}
- if (*p)
- p++;
}
nb_attacks = new_nb_items;
p = s->decays;
for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
- char *tstr = av_get_token(&p, "|");
- if (!tstr)
- return AVERROR(ENOMEM);
- new_nb_items += sscanf(tstr, "%f", &s->channels[i].decay) == 1;
- av_freep(&tstr);
+ char *tstr = av_strtok(p, " |", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
if (s->channels[i].decay < 0) {
uninit(ctx);
return AVERROR(EINVAL);
}
- if (*p)
- p++;
}
nb_decays = new_nb_items;
@@ -421,13 +398,9 @@ static int config_output(AVFilterLink *outlink)
#define S(x) s->segments[2 * ((x) + 1)]
p = s->points;
for (i = 0, new_nb_items = 0; i < nb_points; i++) {
- char *tstr = av_get_token(&p, "|");
- if (!tstr)
- return AVERROR(ENOMEM);
-
- err = sscanf(tstr, "%f/%f", &S(i).x, &S(i).y);
- av_freep(&tstr);
- if (err != 2) {
+ char *tstr = av_strtok(p, " |", &saveptr);
+ p = NULL;
+ if (sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
av_log(ctx, AV_LOG_ERROR,
"Invalid and/or missing input/output value.\n");
uninit(ctx);
@@ -442,8 +415,6 @@ static int config_output(AVFilterLink *outlink)
S(i).y -= S(i).x;
av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
new_nb_items++;
- if (*p)
- p++;
}
num = new_nb_items;
@@ -464,7 +435,6 @@ static int config_output(AVFilterLink *outlink)
double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
int j;
- /* here we purposefully lose precision so that we can compare floats */
if (fabs(g1 - g2))
continue;
num--;
@@ -553,6 +523,7 @@ static int config_output(AVFilterLink *outlink)
if (err)
return err;
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
s->compand = compand_delay;
return 0;
}
@@ -571,11 +542,9 @@ static int request_frame(AVFilterLink *outlink)
CompandContext *s = ctx->priv;
int ret = 0;
- s->got_output = 0;
- while (ret >= 0 && !s->got_output)
- ret = ff_request_frame(ctx->inputs[0]);
+ ret = ff_request_frame(ctx->inputs[0]);
- if (ret == AVERROR_EOF && s->delay_count)
+ if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
ret = compand_drain(outlink);
return ret;
diff --git a/libavfilter/af_earwax.c b/libavfilter/af_earwax.c
new file mode 100644
index 0000000..c310997
--- /dev/null
+++ b/libavfilter/af_earwax.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2011 Mina Nagy Zaki
+ * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
+ * This source code is freely redistributable and may be used for any purpose.
+ * This copyright notice must be maintained. Edward Beingessner And Sundry
+ * Contributors are not responsible for the consequences of using this
+ * software.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Stereo Widening Effect. Adds audio cues to move stereo image in
+ * front of the listener. Adapted from the libsox earwax effect.
+ */
+
+#include "libavutil/channel_layout.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+
+#define NUMTAPS 64
+
+static const int8_t filt[NUMTAPS] = {
+/* 30° 330° */
+ 4, -6, /* 32 tap stereo FIR filter. */
+ 4, -11, /* One side filters as if the */
+ -1, -5, /* signal was from 30 degrees */
+ 3, 3, /* from the ear, the other as */
+ -2, 5, /* if 330 degrees. */
+ -5, 0,
+ 9, 1,
+ 6, 3, /* Input */
+ -4, -1, /* Left Right */
+ -5, -3, /* __________ __________ */
+ -2, -5, /* | | | | */
+ -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
+ 6, -7, /* / |__________| |__________| \ */
+ 30, -29, /* / \ / \ */
+ 12, -3, /* / X \ */
+ -11, 4, /* / / \ \ */
+ -3, 7, /* ____V_____ __________V V__________ _____V____ */
+ -20, 23, /* | | | | | | | | */
+ 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
+ 1, -6, /* |__________| |__________| |__________| |__________| */
+ -14, -5, /* \ ___ / \ ___ / */
+ 15, -18, /* \ / \ / _____ \ / \ / */
+ 6, 7, /* `->| + |<--' / \ `-->| + |<-' */
+ 15, -10, /* \___/ _/ \_ \___/ */
+ -14, 22, /* \ / \ / \ / */
+ -7, -2, /* `--->| | | |<---' */
+ -4, 9, /* \_/ \_/ */
+ 6, -12, /* */
+ 6, -6, /* Headphones */
+ 0, -11,
+ 0, -5,
+ 4, 0};
+
+typedef struct {
+ int16_t taps[NUMTAPS * 2];
+} EarwaxContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const int sample_rates[] = { 44100, -1 };
+
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+
+ ff_add_format(&formats, AV_SAMPLE_FMT_S16);
+ ff_set_common_formats(ctx, formats);
+ ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
+ ff_set_common_channel_layouts(ctx, layout);
+ ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
+
+ return 0;
+}
+
+//FIXME: replace with DSPContext.scalarproduct_int16
+static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out)
+{
+ int32_t sample;
+ int16_t j;
+
+ while (in < endin) {
+ sample = 0;
+ for (j = 0; j < NUMTAPS; j++)
+ sample += in[j] * filt[j];
+ *out = av_clip_int16(sample >> 6);
+ out++;
+ in++;
+ }
+
+ return out;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int16_t *taps, *endin, *in, *out;
+ AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
+ int len;
+
+ if (!outsamples) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outsamples, insamples);
+
+ taps = ((EarwaxContext *)inlink->dst->priv)->taps;
+ out = (int16_t *)outsamples->data[0];
+ in = (int16_t *)insamples ->data[0];
+
+ len = FFMIN(NUMTAPS, 2*insamples->nb_samples);
+ // copy part of new input and process with saved input
+ memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
+ out = scalarproduct(taps, taps + len, out);
+
+ // process current input
+ if (2*insamples->nb_samples >= NUMTAPS ){
+ endin = in + insamples->nb_samples * 2 - NUMTAPS;
+ scalarproduct(in, endin, out);
+
+ // save part of input for next round
+ memcpy(taps, endin, NUMTAPS * sizeof(*taps));
+ } else
+ memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps));
+
+ av_frame_free(&insamples);
+ return ff_filter_frame(outlink, outsamples);
+}
+
+static const AVFilterPad earwax_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad earwax_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_earwax = {
+ .name = "earwax",
+ .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(EarwaxContext),
+ .inputs = earwax_inputs,
+ .outputs = earwax_outputs,
+};
diff --git a/libavfilter/af_flanger.c b/libavfilter/af_flanger.c
new file mode 100644
index 0000000..5ff3786
--- /dev/null
+++ b/libavfilter/af_flanger.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2006 Rob Sykes <robs@users.sourceforge.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/samplefmt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "internal.h"
+#include "generate_wave_table.h"
+
+#define INTERPOLATION_LINEAR 0
+#define INTERPOLATION_QUADRATIC 1
+
+typedef struct FlangerContext {
+ const AVClass *class;
+ double delay_min;
+ double delay_depth;
+ double feedback_gain;
+ double delay_gain;
+ double speed;
+ int wave_shape;
+ double channel_phase;
+ int interpolation;
+ double in_gain;
+ int max_samples;
+ uint8_t **delay_buffer;
+ int delay_buf_pos;
+ double *delay_last;
+ float *lfo;
+ int lfo_length;
+ int lfo_pos;
+} FlangerContext;
+
+#define OFFSET(x) offsetof(FlangerContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption flanger_options[] = {
+ { "delay", "base delay in milliseconds", OFFSET(delay_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 30, A },
+ { "depth", "added swept delay in milliseconds", OFFSET(delay_depth), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, 10, A },
+ { "regen", "percentage regeneration (delayed signal feedback)", OFFSET(feedback_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -95, 95, A },
+ { "width", "percentage of delayed signal mixed with original", OFFSET(delay_gain), AV_OPT_TYPE_DOUBLE, {.dbl=71}, 0, 100, A },
+ { "speed", "sweeps per second (Hz)", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.1, 10, A },
+ { "shape", "swept wave shape", OFFSET(wave_shape), AV_OPT_TYPE_INT, {.i64=WAVE_SIN}, WAVE_SIN, WAVE_NB-1, A, "type" },
+ { "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, A, "type" },
+ { "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, A, "type" },
+ { "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, A, "type" },
+ { "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, A, "type" },
+ { "phase", "swept wave percentage phase-shift for multi-channel", OFFSET(channel_phase), AV_OPT_TYPE_DOUBLE, {.dbl=25}, 0, 100, A },
+ { "interp", "delay-line interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "itype" },
+ { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATION_LINEAR}, 0, 0, A, "itype" },
+ { "quadratic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATION_QUADRATIC}, 0, 0, A, "itype" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(flanger);
+
+static int init(AVFilterContext *ctx)
+{
+ FlangerContext *s = ctx->priv;
+
+ s->feedback_gain /= 100;
+ s->delay_gain /= 100;
+ s->channel_phase /= 100;
+ s->delay_min /= 1000;
+ s->delay_depth /= 1000;
+ s->in_gain = 1 / (1 + s->delay_gain);
+ s->delay_gain /= 1 + s->delay_gain;
+ s->delay_gain *= 1 - fabs(s->feedback_gain);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FlangerContext *s = ctx->priv;
+
+ s->max_samples = (s->delay_min + s->delay_depth) * inlink->sample_rate + 2.5;
+ s->lfo_length = inlink->sample_rate / s->speed;
+ s->delay_last = av_calloc(inlink->channels, sizeof(*s->delay_last));
+ s->lfo = av_calloc(s->lfo_length, sizeof(*s->lfo));
+ if (!s->lfo || !s->delay_last)
+ return AVERROR(ENOMEM);
+
+ ff_generate_wave_table(s->wave_shape, AV_SAMPLE_FMT_FLT, s->lfo, s->lfo_length,
+ floor(s->delay_min * inlink->sample_rate + 0.5),
+ s->max_samples - 2., 3 * M_PI_2);
+
+ return av_samples_alloc_array_and_samples(&s->delay_buffer, NULL,
+ inlink->channels, s->max_samples,
+ inlink->format, 0);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ FlangerContext *s = ctx->priv;
+ AVFrame *out_frame;
+ int chan, i;
+
+ if (av_frame_is_writable(frame)) {
+ out_frame = frame;
+ } else {
+ out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
+ if (!out_frame)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_frame, frame);
+ }
+
+ for (i = 0; i < frame->nb_samples; i++) {
+
+ s->delay_buf_pos = (s->delay_buf_pos + s->max_samples - 1) % s->max_samples;
+
+ for (chan = 0; chan < inlink->channels; chan++) {
+ double *src = (double *)frame->extended_data[chan];
+ double *dst = (double *)out_frame->extended_data[chan];
+ double delayed_0, delayed_1;
+ double delayed;
+ double in, out;
+ int channel_phase = chan * s->lfo_length * s->channel_phase + .5;
+ double delay = s->lfo[(s->lfo_pos + channel_phase) % s->lfo_length];
+ int int_delay = (int)delay;
+ double frac_delay = modf(delay, &delay);
+ double *delay_buffer = (double *)s->delay_buffer[chan];
+
+ in = src[i];
+ delay_buffer[s->delay_buf_pos] = in + s->delay_last[chan] *
+ s->feedback_gain;
+ delayed_0 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
+ delayed_1 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
+
+ if (s->interpolation == INTERPOLATION_LINEAR) {
+ delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay;
+ } else {
+ double a, b;
+ double delayed_2 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
+ delayed_2 -= delayed_0;
+ delayed_1 -= delayed_0;
+ a = delayed_2 * .5 - delayed_1;
+ b = delayed_1 * 2 - delayed_2 *.5;
+ delayed = delayed_0 + (a * frac_delay + b) * frac_delay;
+ }
+
+ s->delay_last[chan] = delayed;
+ out = in * s->in_gain + delayed * s->delay_gain;
+ dst[i] = out;
+ }
+ s->lfo_pos = (s->lfo_pos + 1) % s->lfo_length;
+ }
+
+ if (frame != out_frame)
+ av_frame_free(&frame);
+
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FlangerContext *s = ctx->priv;
+
+ av_freep(&s->lfo);
+ av_freep(&s->delay_last);
+
+ if (s->delay_buffer)
+ av_freep(&s->delay_buffer[0]);
+ av_freep(&s->delay_buffer);
+}
+
+static const AVFilterPad flanger_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad flanger_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_flanger = {
+ .name = "flanger",
+ .description = NULL_IF_CONFIG_SMALL("Apply a flanging effect to the audio."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(FlangerContext),
+ .priv_class = &flanger_class,
+ .init = init,
+ .uninit = uninit,
+ .inputs = flanger_inputs,
+ .outputs = flanger_outputs,
+};
diff --git a/libavfilter/af_join.c b/libavfilter/af_join.c
index e684cb9..a1717c6 100644
--- a/libavfilter/af_join.c
+++ b/libavfilter/af_join.c
@@ -1,19 +1,18 @@
/*
+ * This file is part of FFmpeg.
*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -66,22 +65,18 @@ typedef struct JoinContext {
#define OFFSET(x) offsetof(JoinContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption join_options[] = {
- { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A },
+ { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
{ "channel_layout", "Channel layout of the "
- "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A },
+ "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
{ "map", "A comma-separated list of channels maps in the format "
"'input_stream.input_channel-output_channel.",
- OFFSET(map), AV_OPT_TYPE_STRING, .flags = A },
- { NULL },
+ OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
+ { NULL }
};
-static const AVClass join_class = {
- .class_name = "join filter",
- .item_name = av_default_item_name,
- .option = join_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(join);
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
@@ -194,18 +189,15 @@ static av_cold int join_init(AVFilterContext *ctx)
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
- ret = AVERROR(EINVAL);
- goto fail;
+ return AVERROR(EINVAL);
}
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
- if (!s->channels || !s->buffers|| !s->input_frames) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
+ if (!s->channels || !s->buffers|| !s->input_frames)
+ return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
@@ -213,7 +205,7 @@ static av_cold int join_init(AVFilterContext *ctx)
}
if ((ret = parse_maps(ctx)) < 0)
- goto fail;
+ return ret;
for (i = 0; i < s->inputs; i++) {
char name[32];
@@ -229,9 +221,7 @@ static av_cold int join_init(AVFilterContext *ctx)
ff_insert_inpad(ctx, i, &pad);
}
-fail:
- av_opt_free(s);
- return ret;
+ return 0;
}
static av_cold void join_uninit(AVFilterContext *ctx)
@@ -258,9 +248,12 @@ static int join_query_formats(AVFilterContext *ctx)
ff_add_channel_layout(&layouts, s->channel_layout);
ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
- for (i = 0; i < ctx->nb_inputs; i++)
- ff_channel_layouts_ref(ff_all_channel_layouts(),
- &ctx->inputs[i]->out_channel_layouts);
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
+ }
ff_set_common_formats (ctx, ff_planar_sample_fmts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
@@ -479,6 +472,7 @@ static int join_request_frame(AVFilterLink *outlink)
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(frame, outlink->channels);
frame->sample_rate = outlink->sample_rate;
frame->format = outlink->format;
frame->pts = s->input_frames[0]->pts;
@@ -513,16 +507,13 @@ static const AVFilterPad avfilter_af_join_outputs[] = {
AVFilter ff_af_join = {
.name = "join",
.description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
- "multi-channel output"),
+ "multi-channel output."),
.priv_size = sizeof(JoinContext),
.priv_class = &join_class,
-
.init = join_init,
.uninit = join_uninit,
.query_formats = join_query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_af_join_outputs,
-
- .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+ .inputs = NULL,
+ .outputs = avfilter_af_join_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
diff --git a/libavfilter/af_ladspa.c b/libavfilter/af_ladspa.c
new file mode 100644
index 0000000..2057e6d
--- /dev/null
+++ b/libavfilter/af_ladspa.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ * Copyright (c) 2011 Mina Nagy Zaki
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * LADSPA wrapper
+ */
+
+#include <dlfcn.h>
+#include <ladspa.h>
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct LADSPAContext {
+ const AVClass *class;
+ char *dl_name;
+ char *plugin;
+ char *options;
+ void *dl_handle;
+
+ unsigned long nb_inputs;
+ unsigned long *ipmap; /* map input number to port number */
+
+ unsigned long nb_inputcontrols;
+ unsigned long *icmap; /* map input control number to port number */
+ LADSPA_Data *ictlv; /* input controls values */
+
+ unsigned long nb_outputs;
+ unsigned long *opmap; /* map output number to port number */
+
+ unsigned long nb_outputcontrols;
+ unsigned long *ocmap; /* map output control number to port number */
+ LADSPA_Data *octlv; /* output controls values */
+
+ const LADSPA_Descriptor *desc;
+ int *ctl_needs_value;
+ int nb_handles;
+ LADSPA_Handle *handles;
+
+ int sample_rate;
+ int nb_samples;
+ int64_t pts;
+ int64_t duration;
+} LADSPAContext;
+
+#define OFFSET(x) offsetof(LADSPAContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ladspa_options[] = {
+ { "file", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "f", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "plugin", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "p", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "controls", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "c", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
+ { "s", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
+ { "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
+ { "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(ladspa);
+
+static void print_ctl_info(AVFilterContext *ctx, int level,
+ LADSPAContext *s, int ctl, unsigned long *map,
+ LADSPA_Data *values, int print)
+{
+ const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
+
+ av_log(ctx, level, "c%i: %s [", ctl, s->desc->PortNames[map[ctl]]);
+
+ if (LADSPA_IS_HINT_TOGGLED(h->HintDescriptor)) {
+ av_log(ctx, level, "toggled (1 or 0)");
+
+ if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
+ av_log(ctx, level, " (default %i)", (int)values[ctl]);
+ } else {
+ if (LADSPA_IS_HINT_INTEGER(h->HintDescriptor)) {
+ av_log(ctx, level, "<int>");
+
+ if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
+ av_log(ctx, level, ", min: %i", (int)h->LowerBound);
+
+ if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
+ av_log(ctx, level, ", max: %i", (int)h->UpperBound);
+
+ if (print)
+ av_log(ctx, level, " (value %d)", (int)values[ctl]);
+ else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
+ av_log(ctx, level, " (default %d)", (int)values[ctl]);
+ } else {
+ av_log(ctx, level, "<float>");
+
+ if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
+ av_log(ctx, level, ", min: %f", h->LowerBound);
+
+ if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
+ av_log(ctx, level, ", max: %f", h->UpperBound);
+
+ if (print)
+ av_log(ctx, level, " (value %f)", values[ctl]);
+ else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
+ av_log(ctx, level, " (default %f)", values[ctl]);
+ }
+
+ if (LADSPA_IS_HINT_SAMPLE_RATE(h->HintDescriptor))
+ av_log(ctx, level, ", multiple of sample rate");
+
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ av_log(ctx, level, ", logarithmic scale");
+ }
+
+ av_log(ctx, level, "]\n");
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LADSPAContext *s = ctx->priv;
+ AVFrame *out;
+ int i, h;
+
+ if (!s->nb_outputs ||
+ (av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs &&
+ !(s->desc->Properties & LADSPA_PROPERTY_INPLACE_BROKEN))) {
+ out = in;
+ } else {
+ out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (h = 0; h < s->nb_handles; h++) {
+ for (i = 0; i < s->nb_inputs; i++) {
+ s->desc->connect_port(s->handles[h], s->ipmap[i],
+ (LADSPA_Data*)in->extended_data[i]);
+ }
+
+ for (i = 0; i < s->nb_outputs; i++) {
+ s->desc->connect_port(s->handles[h], s->opmap[i],
+ (LADSPA_Data*)out->extended_data[i]);
+ }
+
+ s->desc->run(s->handles[h], in->nb_samples);
+ }
+
+ for (i = 0; i < s->nb_outputcontrols; i++)
+ print_ctl_info(ctx, AV_LOG_VERBOSE, s, i, s->ocmap, s->octlv, 1);
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LADSPAContext *s = ctx->priv;
+ AVFrame *out;
+ int64_t t;
+ int i;
+
+ if (ctx->nb_inputs)
+ return ff_request_frame(ctx->inputs[0]);
+
+ t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
+ if (s->duration >= 0 && t >= s->duration)
+ return AVERROR_EOF;
+
+ out = ff_get_audio_buffer(outlink, s->nb_samples);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_outputs; i++)
+ s->desc->connect_port(s->handles[0], s->opmap[i],
+ (LADSPA_Data*)out->extended_data[i]);
+
+ s->desc->run(s->handles[0], s->nb_samples);
+
+ for (i = 0; i < s->nb_outputcontrols; i++)
+ print_ctl_info(ctx, AV_LOG_INFO, s, i, s->ocmap, s->octlv, 1);
+
+ out->sample_rate = s->sample_rate;
+ out->pts = s->pts;
+ s->pts += s->nb_samples;
+
+ return ff_filter_frame(outlink, out);
+}
+
+static void set_default_ctl_value(LADSPAContext *s, int ctl,
+ unsigned long *map, LADSPA_Data *values)
+{
+ const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
+ const LADSPA_Data lower = h->LowerBound;
+ const LADSPA_Data upper = h->UpperBound;
+
+ if (LADSPA_IS_HINT_DEFAULT_MINIMUM(h->HintDescriptor)) {
+ values[ctl] = lower;
+ } else if (LADSPA_IS_HINT_DEFAULT_MAXIMUM(h->HintDescriptor)) {
+ values[ctl] = upper;
+ } else if (LADSPA_IS_HINT_DEFAULT_0(h->HintDescriptor)) {
+ values[ctl] = 0.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_1(h->HintDescriptor)) {
+ values[ctl] = 1.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_100(h->HintDescriptor)) {
+ values[ctl] = 100.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_440(h->HintDescriptor)) {
+ values[ctl] = 440.0;
+ } else if (LADSPA_IS_HINT_DEFAULT_LOW(h->HintDescriptor)) {
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ values[ctl] = exp(log(lower) * 0.75 + log(upper) * 0.25);
+ else
+ values[ctl] = lower * 0.75 + upper * 0.25;
+ } else if (LADSPA_IS_HINT_DEFAULT_MIDDLE(h->HintDescriptor)) {
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ values[ctl] = exp(log(lower) * 0.5 + log(upper) * 0.5);
+ else
+ values[ctl] = lower * 0.5 + upper * 0.5;
+ } else if (LADSPA_IS_HINT_DEFAULT_HIGH(h->HintDescriptor)) {
+ if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
+ values[ctl] = exp(log(lower) * 0.25 + log(upper) * 0.75);
+ else
+ values[ctl] = lower * 0.25 + upper * 0.75;
+ }
+}
+
+static int connect_ports(AVFilterContext *ctx, AVFilterLink *link)
+{
+ LADSPAContext *s = ctx->priv;
+ int i, j;
+
+ s->nb_handles = s->nb_inputs == 1 && s->nb_outputs == 1 ? link->channels : 1;
+ s->handles = av_calloc(s->nb_handles, sizeof(*s->handles));
+ if (!s->handles)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_handles; i++) {
+ s->handles[i] = s->desc->instantiate(s->desc, link->sample_rate);
+ if (!s->handles[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Could not instantiate plugin.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ // Connect the input control ports
+ for (j = 0; j < s->nb_inputcontrols; j++)
+ s->desc->connect_port(s->handles[i], s->icmap[j], s->ictlv + j);
+
+ // Connect the output control ports
+ for (j = 0; j < s->nb_outputcontrols; j++)
+ s->desc->connect_port(s->handles[i], s->ocmap[j], &s->octlv[j]);
+
+ if (s->desc->activate)
+ s->desc->activate(s->handles[i]);
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "handles: %d\n", s->nb_handles);
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+
+ return connect_ports(ctx, inlink);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ int ret;
+
+ if (ctx->nb_inputs) {
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->format = inlink->format;
+ outlink->sample_rate = inlink->sample_rate;
+
+ ret = 0;
+ } else {
+ LADSPAContext *s = ctx->priv;
+
+ outlink->sample_rate = s->sample_rate;
+ outlink->time_base = (AVRational){1, s->sample_rate};
+
+ ret = connect_ports(ctx, outlink);
+ }
+
+ return ret;
+}
+
+static void count_ports(const LADSPA_Descriptor *desc,
+ unsigned long *nb_inputs, unsigned long *nb_outputs)
+{
+ LADSPA_PortDescriptor pd;
+ int i;
+
+ for (i = 0; i < desc->PortCount; i++) {
+ pd = desc->PortDescriptors[i];
+
+ if (LADSPA_IS_PORT_AUDIO(pd)) {
+ if (LADSPA_IS_PORT_INPUT(pd)) {
+ (*nb_inputs)++;
+ } else if (LADSPA_IS_PORT_OUTPUT(pd)) {
+ (*nb_outputs)++;
+ }
+ }
+ }
+}
+
+static void *try_load(const char *dir, const char *soname)
+{
+ char *path = av_asprintf("%s/%s.so", dir, soname);
+ void *ret = NULL;
+
+ if (path) {
+ ret = dlopen(path, RTLD_LOCAL|RTLD_NOW);
+ av_free(path);
+ }
+
+ return ret;
+}
+
+static int set_control(AVFilterContext *ctx, unsigned long port, LADSPA_Data value)
+{
+ LADSPAContext *s = ctx->priv;
+ const char *label = s->desc->Label;
+ LADSPA_PortRangeHint *h = (LADSPA_PortRangeHint *)s->desc->PortRangeHints +
+ s->icmap[port];
+
+ if (port >= s->nb_inputcontrols) {
+ av_log(ctx, AV_LOG_ERROR, "Control c%ld is out of range [0 - %lu].\n",
+ port, s->nb_inputcontrols);
+ return AVERROR(EINVAL);
+ }
+
+ if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor) &&
+ value < h->LowerBound) {
+ av_log(ctx, AV_LOG_ERROR,
+ "%s: input control c%ld is below lower boundary of %0.4f.\n",
+ label, port, h->LowerBound);
+ return AVERROR(EINVAL);
+ }
+
+ if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor) &&
+ value > h->UpperBound) {
+ av_log(ctx, AV_LOG_ERROR,
+ "%s: input control c%ld is above upper boundary of %0.4f.\n",
+ label, port, h->UpperBound);
+ return AVERROR(EINVAL);
+ }
+
+ s->ictlv[port] = value;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ LADSPAContext *s = ctx->priv;
+ LADSPA_Descriptor_Function descriptor_fn;
+ const LADSPA_Descriptor *desc;
+ LADSPA_PortDescriptor pd;
+ AVFilterPad pad = { NULL };
+ char *p, *arg, *saveptr = NULL;
+ unsigned long nb_ports;
+ int i;
+
+ if (!s->dl_name) {
+ av_log(ctx, AV_LOG_ERROR, "No plugin name provided\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->dl_name[0] == '/' || s->dl_name[0] == '.') {
+ // argument is a path
+ s->dl_handle = dlopen(s->dl_name, RTLD_LOCAL|RTLD_NOW);
+ } else {
+ // argument is a shared object name
+ char *paths = av_strdup(getenv("LADSPA_PATH"));
+ const char *separator = ":";
+
+ if (paths) {
+ p = paths;
+ while ((arg = av_strtok(p, separator, &saveptr)) && !s->dl_handle) {
+ s->dl_handle = try_load(arg, s->dl_name);
+ p = NULL;
+ }
+ }
+
+ av_free(paths);
+ if (!s->dl_handle && (paths = av_asprintf("%s/.ladspa/lib", getenv("HOME")))) {
+ s->dl_handle = try_load(paths, s->dl_name);
+ av_free(paths);
+ }
+
+ if (!s->dl_handle)
+ s->dl_handle = try_load("/usr/local/lib/ladspa", s->dl_name);
+
+ if (!s->dl_handle)
+ s->dl_handle = try_load("/usr/lib/ladspa", s->dl_name);
+ }
+ if (!s->dl_handle) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to load '%s'\n", s->dl_name);
+ return AVERROR(EINVAL);
+ }
+
+ descriptor_fn = dlsym(s->dl_handle, "ladspa_descriptor");
+ if (!descriptor_fn) {
+ av_log(ctx, AV_LOG_ERROR, "Could not find ladspa_descriptor: %s\n", dlerror());
+ return AVERROR(EINVAL);
+ }
+
+ // Find the requested plugin, or list plugins
+ if (!s->plugin) {
+ av_log(ctx, AV_LOG_INFO, "The '%s' library contains the following plugins:\n", s->dl_name);
+ av_log(ctx, AV_LOG_INFO, "I = Input Channels\n");
+ av_log(ctx, AV_LOG_INFO, "O = Output Channels\n");
+ av_log(ctx, AV_LOG_INFO, "I:O %-25s %s\n", "Plugin", "Description");
+ av_log(ctx, AV_LOG_INFO, "\n");
+ for (i = 0; desc = descriptor_fn(i); i++) {
+ unsigned long inputs = 0, outputs = 0;
+
+ count_ports(desc, &inputs, &outputs);
+ av_log(ctx, AV_LOG_INFO, "%lu:%lu %-25s %s\n", inputs, outputs, desc->Label,
+ (char *)av_x_if_null(desc->Name, "?"));
+ av_log(ctx, AV_LOG_VERBOSE, "Maker: %s\n",
+ (char *)av_x_if_null(desc->Maker, "?"));
+ av_log(ctx, AV_LOG_VERBOSE, "Copyright: %s\n",
+ (char *)av_x_if_null(desc->Copyright, "?"));
+ }
+ return AVERROR_EXIT;
+ } else {
+ for (i = 0;; i++) {
+ desc = descriptor_fn(i);
+ if (!desc) {
+ av_log(ctx, AV_LOG_ERROR, "Could not find plugin: %s\n", s->plugin);
+ return AVERROR(EINVAL);
+ }
+
+ if (desc->Label && !strcmp(desc->Label, s->plugin))
+ break;
+ }
+ }
+
+ s->desc = desc;
+ nb_ports = desc->PortCount;
+
+ s->ipmap = av_calloc(nb_ports, sizeof(*s->ipmap));
+ s->opmap = av_calloc(nb_ports, sizeof(*s->opmap));
+ s->icmap = av_calloc(nb_ports, sizeof(*s->icmap));
+ s->ocmap = av_calloc(nb_ports, sizeof(*s->ocmap));
+ s->ictlv = av_calloc(nb_ports, sizeof(*s->ictlv));
+ s->octlv = av_calloc(nb_ports, sizeof(*s->octlv));
+ s->ctl_needs_value = av_calloc(nb_ports, sizeof(*s->ctl_needs_value));
+ if (!s->ipmap || !s->opmap || !s->icmap ||
+ !s->ocmap || !s->ictlv || !s->octlv || !s->ctl_needs_value)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_ports; i++) {
+ pd = desc->PortDescriptors[i];
+
+ if (LADSPA_IS_PORT_AUDIO(pd)) {
+ if (LADSPA_IS_PORT_INPUT(pd)) {
+ s->ipmap[s->nb_inputs] = i;
+ s->nb_inputs++;
+ } else if (LADSPA_IS_PORT_OUTPUT(pd)) {
+ s->opmap[s->nb_outputs] = i;
+ s->nb_outputs++;
+ }
+ } else if (LADSPA_IS_PORT_CONTROL(pd)) {
+ if (LADSPA_IS_PORT_INPUT(pd)) {
+ s->icmap[s->nb_inputcontrols] = i;
+
+ if (LADSPA_IS_HINT_HAS_DEFAULT(desc->PortRangeHints[i].HintDescriptor))
+ set_default_ctl_value(s, s->nb_inputcontrols, s->icmap, s->ictlv);
+ else
+ s->ctl_needs_value[s->nb_inputcontrols] = 1;
+
+ s->nb_inputcontrols++;
+ } else if (LADSPA_IS_PORT_OUTPUT(pd)) {
+ s->ocmap[s->nb_outputcontrols] = i;
+ s->nb_outputcontrols++;
+ }
+ }
+ }
+
+ // List Control Ports if "help" is specified
+ if (s->options && !strcmp(s->options, "help")) {
+ if (!s->nb_inputcontrols) {
+ av_log(ctx, AV_LOG_INFO,
+ "The '%s' plugin does not have any input controls.\n",
+ desc->Label);
+ } else {
+ av_log(ctx, AV_LOG_INFO,
+ "The '%s' plugin has the following input controls:\n",
+ desc->Label);
+ for (i = 0; i < s->nb_inputcontrols; i++)
+ print_ctl_info(ctx, AV_LOG_INFO, s, i, s->icmap, s->ictlv, 0);
+ }
+ return AVERROR_EXIT;
+ }
+
+ // Parse control parameters
+ p = s->options;
+ while (s->options) {
+ LADSPA_Data val;
+ int ret;
+
+ if (!(arg = av_strtok(p, "|", &saveptr)))
+ break;
+ p = NULL;
+
+ if (sscanf(arg, "c%d=%f", &i, &val) != 2) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid syntax.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((ret = set_control(ctx, i, val)) < 0)
+ return ret;
+ s->ctl_needs_value[i] = 0;
+ }
+
+ // Check if any controls are not set
+ for (i = 0; i < s->nb_inputcontrols; i++) {
+ if (s->ctl_needs_value[i]) {
+ av_log(ctx, AV_LOG_ERROR, "Control c%d must be set.\n", i);
+ print_ctl_info(ctx, AV_LOG_ERROR, s, i, s->icmap, s->ictlv, 0);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ pad.type = AVMEDIA_TYPE_AUDIO;
+
+ if (s->nb_inputs) {
+ pad.name = av_asprintf("in0:%s%lu", desc->Label, s->nb_inputs);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+
+ pad.filter_frame = filter_frame;
+ pad.config_props = config_input;
+ if (ff_insert_inpad(ctx, ctx->nb_inputs, &pad) < 0) {
+ av_freep(&pad.name);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "ports: %lu\n", nb_ports);
+ av_log(ctx, AV_LOG_DEBUG, "inputs: %lu outputs: %lu\n",
+ s->nb_inputs, s->nb_outputs);
+ av_log(ctx, AV_LOG_DEBUG, "input controls: %lu output controls: %lu\n",
+ s->nb_inputcontrols, s->nb_outputcontrols);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ LADSPAContext *s = ctx->priv;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ if (s->nb_inputs) {
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+
+ ff_set_common_samplerates(ctx, formats);
+ } else {
+ int sample_rates[] = { s->sample_rate, -1 };
+
+ ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
+ }
+
+ if (s->nb_inputs == 1 && s->nb_outputs == 1) {
+ // We will instantiate multiple LADSPA_Handle, one over each channel
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+
+ ff_set_common_channel_layouts(ctx, layouts);
+ } else {
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ if (s->nb_inputs >= 1) {
+ AVFilterLink *inlink = ctx->inputs[0];
+ int64_t inlayout = FF_COUNT2LAYOUT(s->nb_inputs);
+
+ layouts = NULL;
+ ff_add_channel_layout(&layouts, inlayout);
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+
+ if (!s->nb_outputs)
+ ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+ }
+
+ if (s->nb_outputs >= 1) {
+ int64_t outlayout = FF_COUNT2LAYOUT(s->nb_outputs);
+
+ layouts = NULL;
+ ff_add_channel_layout(&layouts, outlayout);
+ ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LADSPAContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < s->nb_handles; i++) {
+ if (s->desc->deactivate)
+ s->desc->deactivate(s->handles[i]);
+ if (s->desc->cleanup)
+ s->desc->cleanup(s->handles[i]);
+ }
+
+ if (s->dl_handle)
+ dlclose(s->dl_handle);
+
+ av_freep(&s->ipmap);
+ av_freep(&s->opmap);
+ av_freep(&s->icmap);
+ av_freep(&s->ocmap);
+ av_freep(&s->ictlv);
+ av_freep(&s->octlv);
+ av_freep(&s->handles);
+ av_freep(&s->ctl_needs_value);
+
+ if (ctx->nb_inputs)
+ av_freep(&ctx->input_pads[0].name);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ LADSPA_Data value;
+ unsigned long port;
+
+ if (sscanf(cmd, "c%ld", &port) + sscanf(args, "%f", &value) != 2)
+ return AVERROR(EINVAL);
+
+ return set_control(ctx, port, value);
+}
+
+static const AVFilterPad ladspa_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ladspa = {
+ .name = "ladspa",
+ .description = NULL_IF_CONFIG_SMALL("Apply LADSPA effect."),
+ .priv_size = sizeof(LADSPAContext),
+ .priv_class = &ladspa_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .inputs = 0,
+ .outputs = ladspa_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/af_pan.c b/libavfilter/af_pan.c
new file mode 100644
index 0000000..4ba77a7
--- /dev/null
+++ b/libavfilter/af_pan.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au>
+ * Copyright (c) 2011 Clément Bœsch <u pkh me>
+ * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio panning filter (channels mixing)
+ * Original code written by Anders Johansson for MPlayer,
+ * reimplemented for FFmpeg.
+ */
+
+#include <stdio.h>
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libswresample/swresample.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define MAX_CHANNELS 63
+
+typedef struct PanContext {
+ const AVClass *class;
+ char *args;
+ int64_t out_channel_layout;
+ double gain[MAX_CHANNELS][MAX_CHANNELS];
+ int64_t need_renorm;
+ int need_renumber;
+ int nb_output_channels;
+
+ int pure_gains;
+ /* channel mapping specific */
+ int channel_map[MAX_CHANNELS];
+ struct SwrContext *swr;
+} PanContext;
+
+static void skip_spaces(char **arg)
+{
+ int len = 0;
+
+ sscanf(*arg, " %n", &len);
+ *arg += len;
+}
+
+static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
+{
+ char buf[8];
+ int len, i, channel_id = 0;
+ int64_t layout, layout0;
+
+ skip_spaces(arg);
+ /* try to parse a channel name, e.g. "FL" */
+ if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
+ layout0 = layout = av_get_channel_layout(buf);
+ /* channel_id <- first set bit in layout */
+ for (i = 32; i > 0; i >>= 1) {
+ if (layout >= (int64_t)1 << i) {
+ channel_id += i;
+ layout >>= i;
+ }
+ }
+ /* reject layouts that are not a single channel */
+ if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id)
+ return AVERROR(EINVAL);
+ *rchannel = channel_id;
+ *rnamed = 1;
+ *arg += len;
+ return 0;
+ }
+ /* try to parse a channel number, e.g. "c2" */
+ if (sscanf(*arg, "c%d%n", &channel_id, &len) &&
+ channel_id >= 0 && channel_id < MAX_CHANNELS) {
+ *rchannel = channel_id;
+ *rnamed = 0;
+ *arg += len;
+ return 0;
+ }
+ return AVERROR(EINVAL);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PanContext *const pan = ctx->priv;
+ char *arg, *arg0, *tokenizer, *args = av_strdup(pan->args);
+ int out_ch_id, in_ch_id, len, named, ret;
+ int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels
+ double gain;
+
+ if (!pan->args) {
+ av_log(ctx, AV_LOG_ERROR,
+ "pan filter needs a channel layout and a set "
+ "of channels definitions as parameter\n");
+ return AVERROR(EINVAL);
+ }
+ if (!args)
+ return AVERROR(ENOMEM);
+ arg = av_strtok(args, "|", &tokenizer);
+ ret = ff_parse_channel_layout(&pan->out_channel_layout,
+ &pan->nb_output_channels, arg, ctx);
+ if (ret < 0)
+ goto fail;
+
+ /* parse channel specifications */
+ while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) {
+ /* channel name */
+ if (parse_channel_name(&arg, &out_ch_id, &named)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Expected out channel name, got \"%.8s\"\n", arg);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ if (named) {
+ if (!((pan->out_channel_layout >> out_ch_id) & 1)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Channel \"%.8s\" does not exist in the chosen layout\n", arg0);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ /* get the channel number in the output channel layout:
+ * out_channel_layout & ((1 << out_ch_id) - 1) are all the
+ * channels that come before out_ch_id,
+ * so their count is the index of out_ch_id */
+ out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1));
+ }
+ if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid out channel name \"%.8s\"\n", arg0);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ skip_spaces(&arg);
+ if (*arg == '=') {
+ arg++;
+ } else if (*arg == '<') {
+ pan->need_renorm |= (int64_t)1 << out_ch_id;
+ arg++;
+ } else {
+ av_log(ctx, AV_LOG_ERROR,
+ "Syntax error after channel name in \"%.8s\"\n", arg0);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ /* gains */
+ while (1) {
+ gain = 1;
+ if (sscanf(arg, "%lf%n *%n", &gain, &len, &len))
+ arg += len;
+ if (parse_channel_name(&arg, &in_ch_id, &named)){
+ av_log(ctx, AV_LOG_ERROR,
+ "Expected in channel name, got \"%.8s\"\n", arg);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ nb_in_channels[named]++;
+ if (nb_in_channels[!named]) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Can not mix named and numbered channels\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ pan->gain[out_ch_id][in_ch_id] = gain;
+ skip_spaces(&arg);
+ if (!*arg)
+ break;
+ if (*arg != '+') {
+ av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ arg++;
+ }
+ }
+ pan->need_renumber = !!nb_in_channels[1];
+
+ ret = 0;
+fail:
+ av_free(args);
+ return ret;
+}
+
+static int are_gains_pure(const PanContext *pan)
+{
+ int i, j;
+
+ for (i = 0; i < MAX_CHANNELS; i++) {
+ int nb_gain = 0;
+
+ for (j = 0; j < MAX_CHANNELS; j++) {
+ double gain = pan->gain[i][j];
+
+ /* channel mapping is effective only if 0% or 100% of a channel is
+ * selected... */
+ if (gain != 0. && gain != 1.)
+ return 0;
+ /* ...and if the output channel is only composed of one input */
+ if (gain && nb_gain++)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ PanContext *pan = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts;
+
+ pan->pure_gains = are_gains_pure(pan);
+ /* libswr supports any sample and packing formats */
+ ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ // inlink supports any channel layout
+ layouts = ff_all_channel_counts();
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+
+ // outlink supports only requested output channel layout
+ layouts = NULL;
+ ff_add_channel_layout(&layouts,
+ pan->out_channel_layout ? pan->out_channel_layout :
+ FF_COUNT2LAYOUT(pan->nb_output_channels));
+ ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+ return 0;
+}
+
+static int config_props(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->dst;
+ PanContext *pan = ctx->priv;
+ char buf[1024], *cur;
+ int i, j, k, r;
+ double t;
+
+ if (pan->need_renumber) {
+ // input channels were given by their name: renumber them
+ for (i = j = 0; i < MAX_CHANNELS; i++) {
+ if ((link->channel_layout >> i) & 1) {
+ for (k = 0; k < pan->nb_output_channels; k++)
+ pan->gain[k][j] = pan->gain[k][i];
+ j++;
+ }
+ }
+ }
+
+ // sanity check; can't be done in query_formats since the inlink
+ // channel layout is unknown at that time
+ if (link->channels > MAX_CHANNELS ||
+ pan->nb_output_channels > MAX_CHANNELS) {
+ av_log(ctx, AV_LOG_ERROR,
+ "af_pan support a maximum of %d channels. "
+ "Feel free to ask for a higher limit.\n", MAX_CHANNELS);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ // init libswresample context
+ pan->swr = swr_alloc_set_opts(pan->swr,
+ pan->out_channel_layout, link->format, link->sample_rate,
+ link->channel_layout, link->format, link->sample_rate,
+ 0, ctx);
+ if (!pan->swr)
+ return AVERROR(ENOMEM);
+ if (!link->channel_layout) {
+ if (av_opt_set_int(pan->swr, "ich", link->channels, 0) < 0)
+ return AVERROR(EINVAL);
+ }
+ if (!pan->out_channel_layout) {
+ if (av_opt_set_int(pan->swr, "och", pan->nb_output_channels, 0) < 0)
+ return AVERROR(EINVAL);
+ }
+
+ // gains are pure, init the channel mapping
+ if (pan->pure_gains) {
+
+ // get channel map from the pure gains
+ for (i = 0; i < pan->nb_output_channels; i++) {
+ int ch_id = -1;
+ for (j = 0; j < link->channels; j++) {
+ if (pan->gain[i][j]) {
+ ch_id = j;
+ break;
+ }
+ }
+ pan->channel_map[i] = ch_id;
+ }
+
+ av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0);
+ av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0);
+ swr_set_channel_mapping(pan->swr, pan->channel_map);
+ } else {
+ // renormalize
+ for (i = 0; i < pan->nb_output_channels; i++) {
+ if (!((pan->need_renorm >> i) & 1))
+ continue;
+ t = 0;
+ for (j = 0; j < link->channels; j++)
+ t += pan->gain[i][j];
+ if (t > -1E-5 && t < 1E-5) {
+ // t is almost 0 but not exactly, this is probably a mistake
+ if (t)
+ av_log(ctx, AV_LOG_WARNING,
+ "Degenerate coefficients while renormalizing\n");
+ continue;
+ }
+ for (j = 0; j < link->channels; j++)
+ pan->gain[i][j] /= t;
+ }
+ av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
+ av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0);
+ swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]);
+ }
+
+ r = swr_init(pan->swr);
+ if (r < 0)
+ return r;
+
+ // summary
+ for (i = 0; i < pan->nb_output_channels; i++) {
+ cur = buf;
+ for (j = 0; j < link->channels; j++) {
+ r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
+ j ? " + " : "", pan->gain[i][j], j);
+ cur += FFMIN(buf + sizeof(buf) - cur, r);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "o%d = %s\n", i, buf);
+ }
+ // add channel mapping summary if possible
+ if (pan->pure_gains) {
+ av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:");
+ for (i = 0; i < pan->nb_output_channels; i++)
+ if (pan->channel_map[i] < 0)
+ av_log(ctx, AV_LOG_INFO, " M");
+ else
+ av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]);
+ av_log(ctx, AV_LOG_INFO, "\n");
+ return 0;
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ int ret;
+ int n = insamples->nb_samples;
+ AVFilterLink *const outlink = inlink->dst->outputs[0];
+ AVFrame *outsamples = ff_get_audio_buffer(outlink, n);
+ PanContext *pan = inlink->dst->priv;
+
+ if (!outsamples)
+ return AVERROR(ENOMEM);
+ swr_convert(pan->swr, outsamples->extended_data, n,
+ (void *)insamples->extended_data, n);
+ av_frame_copy_props(outsamples, insamples);
+ outsamples->channel_layout = outlink->channel_layout;
+ av_frame_set_channels(outsamples, outlink->channels);
+
+ ret = ff_filter_frame(outlink, outsamples);
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PanContext *pan = ctx->priv;
+ swr_free(&pan->swr);
+}
+
+#define OFFSET(x) offsetof(PanContext, x)
+
+static const AVOption pan_options[] = {
+ { "args", NULL, OFFSET(args), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pan);
+
+static const AVFilterPad pan_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pan_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_pan = {
+ .name = "pan",
+ .description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."),
+ .priv_size = sizeof(PanContext),
+ .priv_class = &pan_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = pan_inputs,
+ .outputs = pan_outputs,
+};
diff --git a/libavfilter/af_replaygain.c b/libavfilter/af_replaygain.c
new file mode 100644
index 0000000..c419857
--- /dev/null
+++ b/libavfilter/af_replaygain.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 1998 - 2009 Conifer Software
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * ReplayGain scanner
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define HISTOGRAM_SLOTS 12000
+#define BUTTER_ORDER 2
+#define YULE_ORDER 10
+
+typedef struct ReplayGainFreqInfo {
+ int sample_rate;
+ double BYule[YULE_ORDER + 1];
+ double AYule[YULE_ORDER + 1];
+ double BButter[BUTTER_ORDER + 1];
+ double AButter[BUTTER_ORDER + 1];
+} ReplayGainFreqInfo;
+
+static const ReplayGainFreqInfo freqinfos[] =
+{
+ {
+ 192000,
+ { 0.01184742123123, -0.04631092400086, 0.06584226961238,
+ -0.02165588522478, -0.05656260778952, 0.08607493592760,
+ -0.03375544339786, -0.04216579932754, 0.06416711490648,
+ -0.03444708260844, 0.00697275872241 },
+ { 1.00000000000000, -5.24727318348167, 10.60821585192244,
+ -8.74127665810413, -1.33906071371683, 8.07972882096606,
+ -5.46179918950847, 0.54318070652536, 0.87450969224280,
+ -0.34656083539754, 0.03034796843589 },
+ { 0.99653501465135, -1.99307002930271, 0.99653501465135 },
+ { 1.00000000000000, -1.99305802314321, 0.99308203546221 },
+ },
+ {
+ 176400,
+ { 0.00268568524529, -0.00852379426080, 0.00852704191347,
+ 0.00146116310295, -0.00950855828762, 0.00625449515499,
+ 0.00116183868722, -0.00362461417136, 0.00203961000134,
+ -0.00050664587933, 0.00004327455427 },
+ { 1.00000000000000, -5.57512782763045, 12.44291056065794,
+ -12.87462799681221, 3.08554846961576, 6.62493459880692,
+ -7.07662766313248, 2.51175542736441, 0.06731510802735,
+ -0.24567753819213, 0.03961404162376 },
+ { 0.99622916581118, -1.99245833162236, 0.99622916581118 },
+ { 1.00000000000000, -1.99244411238133, 0.99247255086339 },
+ },
+ {
+ 144000,
+ { 0.00639682359450, -0.02556437970955, 0.04230854400938,
+ -0.03722462201267, 0.01718514827295, 0.00610592243009,
+ -0.03065965747365, 0.04345745003539, -0.03298592681309,
+ 0.01320937236809, -0.00220304127757 },
+ { 1.00000000000000, -6.14814623523425, 15.80002457141566,
+ -20.78487587686937, 11.98848552310315, 3.36462015062606,
+ -10.22419868359470, 6.65599702146473, -1.67141861110485,
+ -0.05417956536718, 0.07374767867406 },
+ { 0.99538268958706, -1.99076537917413, 0.99538268958706 },
+ { 1.00000000000000, -1.99074405950505, 0.99078669884321 },
+ },
+ {
+ 128000,
+ { 0.00553120584305, -0.02112620545016, 0.03549076243117,
+ -0.03362498312306, 0.01425867248183, 0.01344686928787,
+ -0.03392770787836, 0.03464136459530, -0.02039116051549,
+ 0.00667420794705, -0.00093763762995 },
+ { 1.00000000000000, -6.14581710839925, 16.04785903675838,
+ -22.19089131407749, 15.24756471580286, -0.52001440400238,
+ -8.00488641699940, 6.60916094768855, -2.37856022810923,
+ 0.33106947986101, 0.00459820832036 },
+ { 0.99480702681278, -1.98961405362557, 0.99480702681278 },
+ { 1.00000000000000, -1.98958708647324, 0.98964102077790 },
+ },
+ {
+ 112000,
+ { 0.00528778718259, -0.01893240907245, 0.03185982561867,
+ -0.02926260297838, 0.00715743034072, 0.01985743355827,
+ -0.03222614850941, 0.02565681978192, -0.01210662313473,
+ 0.00325436284541, -0.00044173593001 },
+ { 1.00000000000000, -6.24932108456288, 17.42344320538476,
+ -27.86819709054896, 26.79087344681326,-13.43711081485123,
+ -0.66023612948173, 6.03658091814935, -4.24926577030310,
+ 1.40829268709186, -0.19480852628112 },
+ { 0.99406737810867, -1.98813475621734, 0.99406737810867 },
+ { 1.00000000000000, -1.98809955990514, 0.98816995252954 },
+ },
+ {
+ 96000,
+ { 0.00588138296683, -0.01613559730421, 0.02184798954216,
+ -0.01742490405317, 0.00464635643780, 0.01117772513205,
+ -0.02123865824368, 0.01959354413350, -0.01079720643523,
+ 0.00352183686289, -0.00063124341421 },
+ { 1.00000000000000, -5.97808823642008, 16.21362507964068,
+ -25.72923730652599, 25.40470663139513,-14.66166287771134,
+ 2.81597484359752, 2.51447125969733, -2.23575306985286,
+ 0.75788151036791, -0.10078025199029 },
+ { 0.99308203517541, -1.98616407035082, 0.99308203517541 },
+ { 1.00000000000000, -1.98611621154089, 0.98621192916075 },
+ },
+ {
+ 88200,
+ { 0.02667482047416, -0.11377479336097, 0.23063167910965,
+ -0.30726477945593, 0.33188520686529, -0.33862680249063,
+ 0.31807161531340, -0.23730796929880, 0.12273894790371,
+ -0.03840017967282, 0.00549673387936 },
+ { 1.00000000000000, -6.31836451657302, 18.31351310801799,
+ -31.88210014815921, 36.53792146976740,-28.23393036467559,
+ 14.24725258227189, -4.04670980012854, 0.18865757280515,
+ 0.25420333563908, -0.06012333531065 },
+ { 0.99247255046129, -1.98494510092259, 0.99247255046129 },
+ { 1.00000000000000, -1.98488843762335, 0.98500176422183 },
+ },
+ {
+ 64000,
+ { 0.02613056568174, -0.08128786488109, 0.14937282347325,
+ -0.21695711675126, 0.25010286673402, -0.23162283619278,
+ 0.17424041833052, -0.10299599216680, 0.04258696481981,
+ -0.00977952936493, 0.00105325558889 },
+ { 1.00000000000000, -5.73625477092119, 16.15249794355035,
+ -29.68654912464508, 39.55706155674083,-39.82524556246253,
+ 30.50605345013009,-17.43051772821245, 7.05154573908017,
+ -1.80783839720514, 0.22127840210813 },
+ { 0.98964101933472, -1.97928203866944, 0.98964101933472 },
+ { 1.00000000000000, -1.97917472731009, 0.97938935002880 },
+ },
+ {
+ 56000,
+ { 0.03144914734085, -0.06151729206963, 0.08066788708145,
+ -0.09737939921516, 0.08943210803999, -0.06989984672010,
+ 0.04926972841044, -0.03161257848451, 0.01456837493506,
+ -0.00316015108496, 0.00132807215875 },
+ { 1.00000000000000, -4.87377313090032, 12.03922160140209,
+ -20.10151118381395, 25.10388534415171,-24.29065560815903,
+ 18.27158469090663,-10.45249552560593, 4.30319491872003,
+ -1.13716992070185, 0.14510733527035 },
+ { 0.98816995007392, -1.97633990014784, 0.98816995007392 },
+ { 1.00000000000000, -1.97619994516973, 0.97647985512594 },
+ },
+ {
+ 48000,
+ { 0.03857599435200, -0.02160367184185, -0.00123395316851,
+ -0.00009291677959, -0.01655260341619, 0.02161526843274,
+ -0.02074045215285, 0.00594298065125, 0.00306428023191,
+ 0.00012025322027, 0.00288463683916 },
+ { 1.00000000000000, -3.84664617118067, 7.81501653005538,
+ -11.34170355132042, 13.05504219327545,-12.28759895145294,
+ 9.48293806319790, -5.87257861775999, 2.75465861874613,
+ -0.86984376593551, 0.13919314567432 },
+ { 0.98621192462708, -1.97242384925416, 0.98621192462708 },
+ { 1.00000000000000, -1.97223372919527, 0.97261396931306 },
+ },
+ {
+ 44100,
+ { 0.05418656406430, -0.02911007808948, -0.00848709379851,
+ -0.00851165645469, -0.00834990904936, 0.02245293253339,
+ -0.02596338512915, 0.01624864962975, -0.00240879051584,
+ 0.00674613682247, -0.00187763777362 },
+ { 1.00000000000000, -3.47845948550071, 6.36317777566148,
+ -8.54751527471874, 9.47693607801280, -8.81498681370155,
+ 6.85401540936998, -4.39470996079559, 2.19611684890774,
+ -0.75104302451432, 0.13149317958808 },
+ { 0.98500175787242, -1.97000351574484, 0.98500175787242 },
+ { 1.00000000000000, -1.96977855582618, 0.97022847566350 },
+ },
+ {
+ 37800,
+ { 0.08717879977844, -0.01000374016172, -0.06265852122368,
+ -0.01119328800950, -0.00114279372960, 0.02081333954769,
+ -0.01603261863207, 0.01936763028546, 0.00760044736442,
+ -0.00303979112271, -0.00075088605788 },
+ { 1.00000000000000, -2.62816311472146, 3.53734535817992,
+ -3.81003448678921, 3.91291636730132, -3.53518605896288,
+ 2.71356866157873, -1.86723311846592, 1.12075382367659,
+ -0.48574086886890, 0.11330544663849 },
+ { 0.98252400815195, -1.96504801630391, 0.98252400815195 },
+ { 1.00000000000000, -1.96474258269041, 0.96535344991740 },
+ },
+ {
+ 32000,
+ { 0.15457299681924, -0.09331049056315, -0.06247880153653,
+ 0.02163541888798, -0.05588393329856, 0.04781476674921,
+ 0.00222312597743, 0.03174092540049, -0.01390589421898,
+ 0.00651420667831, -0.00881362733839 },
+ { 1.00000000000000, -2.37898834973084, 2.84868151156327,
+ -2.64577170229825, 2.23697657451713, -1.67148153367602,
+ 1.00595954808547, -0.45953458054983, 0.16378164858596,
+ -0.05032077717131, 0.02347897407020 },
+ { 0.97938932735214, -1.95877865470428, 0.97938932735214 },
+ { 1.00000000000000, -1.95835380975398, 0.95920349965459 },
+ },
+ {
+ 24000,
+ { 0.30296907319327, -0.22613988682123, -0.08587323730772,
+ 0.03282930172664, -0.00915702933434, -0.02364141202522,
+ -0.00584456039913, 0.06276101321749, -0.00000828086748,
+ 0.00205861885564, -0.02950134983287 },
+ { 1.00000000000000, -1.61273165137247, 1.07977492259970,
+ -0.25656257754070, -0.16276719120440, -0.22638893773906,
+ 0.39120800788284, -0.22138138954925, 0.04500235387352,
+ 0.02005851806501, 0.00302439095741 },
+ { 0.97531843204928, -1.95063686409857, 0.97531843204928 },
+ { 1.00000000000000, -1.95002759149878, 0.95124613669835 },
+ },
+ {
+ 22050,
+ { 0.33642304856132, -0.25572241425570, -0.11828570177555,
+ 0.11921148675203, -0.07834489609479, -0.00469977914380,
+ -0.00589500224440, 0.05724228140351, 0.00832043980773,
+ -0.01635381384540, -0.01760176568150 },
+ { 1.00000000000000, -1.49858979367799, 0.87350271418188,
+ 0.12205022308084, -0.80774944671438, 0.47854794562326,
+ -0.12453458140019, -0.04067510197014, 0.08333755284107,
+ -0.04237348025746, 0.02977207319925 },
+ { 0.97316523498161, -1.94633046996323, 0.97316523498161 },
+ { 1.00000000000000, -1.94561023566527, 0.94705070426118 },
+ },
+ {
+ 18900,
+ { 0.38524531015142, -0.27682212062067, -0.09980181488805,
+ 0.09951486755646, -0.08934020156622, -0.00322369330199,
+ -0.00110329090689, 0.03784509844682, 0.01683906213303,
+ -0.01147039862572, -0.01941767987192 },
+ { 1.00000000000000, -1.29708918404534, 0.90399339674203,
+ -0.29613799017877, -0.42326645916207, 0.37934887402200,
+ -0.37919795944938, 0.23410283284785, -0.03892971758879,
+ 0.00403009552351, 0.03640166626278 },
+ { 0.96535326815829, -1.93070653631658, 0.96535326815829 },
+ { 1.00000000000000, -1.92950577983524, 0.93190729279793 },
+ },
+ {
+ 16000,
+ { 0.44915256608450, -0.14351757464547, -0.22784394429749,
+ -0.01419140100551, 0.04078262797139, -0.12398163381748,
+ 0.04097565135648, 0.10478503600251, -0.01863887810927,
+ -0.03193428438915, 0.00541907748707 },
+ { 1.00000000000000, -0.62820619233671, 0.29661783706366,
+ -0.37256372942400, 0.00213767857124, -0.42029820170918,
+ 0.22199650564824, 0.00613424350682, 0.06747620744683,
+ 0.05784820375801, 0.03222754072173 },
+ { 0.96454515552826, -1.92909031105652, 0.96454515552826 },
+ { 1.00000000000000, -1.92783286977036, 0.93034775234268 },
+ },
+ {
+ 12000,
+ { 0.56619470757641, -0.75464456939302, 0.16242137742230,
+ 0.16744243493672, -0.18901604199609, 0.30931782841830,
+ -0.27562961986224, 0.00647310677246, 0.08647503780351,
+ -0.03788984554840, -0.00588215443421 },
+ { 1.00000000000000, -1.04800335126349, 0.29156311971249,
+ -0.26806001042947, 0.00819999645858, 0.45054734505008,
+ -0.33032403314006, 0.06739368333110, -0.04784254229033,
+ 0.01639907836189, 0.01807364323573 },
+ { 0.96009142950541, -1.92018285901082, 0.96009142950541 },
+ { 1.00000000000000, -1.91858953033784, 0.92177618768381 },
+ },
+ {
+ 11025,
+ { 0.58100494960553, -0.53174909058578, -0.14289799034253,
+ 0.17520704835522, 0.02377945217615, 0.15558449135573,
+ -0.25344790059353, 0.01628462406333, 0.06920467763959,
+ -0.03721611395801, -0.00749618797172 },
+ { 1.00000000000000, -0.51035327095184, -0.31863563325245,
+ -0.20256413484477, 0.14728154134330, 0.38952639978999,
+ -0.23313271880868, -0.05246019024463, -0.02505961724053,
+ 0.02442357316099, 0.01818801111503 },
+ { 0.95856916599601, -1.91713833199203, 0.95856916599601 },
+ { 1.00000000000000, -1.91542108074780, 0.91885558323625 },
+ },
+ {
+ 8000,
+ { 0.53648789255105, -0.42163034350696, -0.00275953611929,
+ 0.04267842219415, -0.10214864179676, 0.14590772289388,
+ -0.02459864859345, -0.11202315195388, -0.04060034127000,
+ 0.04788665548180, -0.02217936801134 },
+ { 1.00000000000000, -0.25049871956020, -0.43193942311114,
+ -0.03424681017675, -0.04678328784242, 0.26408300200955,
+ 0.15113130533216, -0.17556493366449, -0.18823009262115,
+ 0.05477720428674, 0.04704409688120 },
+ { 0.94597685600279, -1.89195371200558, 0.94597685600279 },
+ { 1.00000000000000, -1.88903307939452, 0.89487434461664 },
+ },
+};
+
+typedef struct ReplayGainContext {
+ uint32_t histogram[HISTOGRAM_SLOTS];
+ float peak;
+ int yule_hist_i, butter_hist_i;
+ const double *yule_coeff_a;
+ const double *yule_coeff_b;
+ const double *butter_coeff_a;
+ const double *butter_coeff_b;
+ float yule_hist_a[256];
+ float yule_hist_b[256];
+ float butter_hist_a[256];
+ float butter_hist_b[256];
+} ReplayGainContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ int i;
+
+ ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
+ ff_set_common_formats(ctx, formats);
+ ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
+ ff_set_common_channel_layouts(ctx, layout);
+
+ formats = NULL;
+ for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++)
+ ff_add_format(&formats, freqinfos[i].sample_rate);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ReplayGainContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++) {
+ if (freqinfos[i].sample_rate == inlink->sample_rate)
+ break;
+ }
+ av_assert0(i < FF_ARRAY_ELEMS(freqinfos));
+
+ s->yule_coeff_a = freqinfos[i].AYule;
+ s->yule_coeff_b = freqinfos[i].BYule;
+ s->butter_coeff_a = freqinfos[i].AButter;
+ s->butter_coeff_b = freqinfos[i].BButter;
+
+ s->yule_hist_i = 20;
+ s->butter_hist_i = 4;
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = inlink->sample_rate / 20;
+
+ return 0;
+}
+
+/*
+ * Update largest absolute sample value.
+ */
+static void calc_stereo_peak(const float *samples, int nb_samples,
+ float *peak_p)
+{
+ float peak = 0.0;
+
+ while (nb_samples--) {
+ if (samples[0] > peak)
+ peak = samples[0];
+ else if (-samples[0] > peak)
+ peak = -samples[0];
+
+ if (samples[1] > peak)
+ peak = samples[1];
+ else if (-samples[1] > peak)
+ peak = -samples[1];
+
+ samples += 2;
+ }
+
+ *peak_p = FFMAX(peak, *peak_p);
+}
+
+/*
+ * Calculate stereo RMS level. Minimum value is about -100 dB for
+ * digital silence. The 90 dB offset is to compensate for the
+ * normalized float range and 3 dB is for stereo samples.
+ */
+static double calc_stereo_rms(const float *samples, int nb_samples)
+{
+ int count = nb_samples;
+ double sum = 1e-16;
+
+ while (count--) {
+ sum += samples[0] * samples[0] + samples[1] * samples[1];
+ samples += 2;
+ }
+
+ return 10 * log10 (sum / nb_samples) + 90.0 - 3.0;
+}
+
+/*
+ * Optimized implementation of 2nd-order IIR stereo filter.
+ */
+static void butter_filter_stereo_samples(ReplayGainContext *s,
+ float *samples, int nb_samples)
+{
+ const double *coeff_a = s->butter_coeff_a;
+ const double *coeff_b = s->butter_coeff_b;
+ float *hist_a = s->butter_hist_a;
+ float *hist_b = s->butter_hist_b;
+ double left, right;
+ int i, j;
+
+ i = s->butter_hist_i;
+
+ // If filter history is very small magnitude, clear it completely
+ // to prevent denormals from rattling around in there forever
+ // (slowing us down).
+
+ for (j = -4; j < 0; ++j)
+ if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
+ break;
+
+ if (!j) {
+ memset(s->butter_hist_a, 0, sizeof(s->butter_hist_a));
+ memset(s->butter_hist_b, 0, sizeof(s->butter_hist_b));
+ }
+
+ while (nb_samples--) {
+ left = (hist_b[i ] = samples[0]) * coeff_b[0];
+ right = (hist_b[i + 1] = samples[1]) * coeff_b[0];
+ left += hist_b[i - 2] * coeff_b[1] - hist_a[i - 2] * coeff_a[1];
+ right += hist_b[i - 1] * coeff_b[1] - hist_a[i - 1] * coeff_a[1];
+ left += hist_b[i - 4] * coeff_b[2] - hist_a[i - 4] * coeff_a[2];
+ right += hist_b[i - 3] * coeff_b[2] - hist_a[i - 3] * coeff_a[2];
+ samples[0] = hist_a[i ] = (float) left;
+ samples[1] = hist_a[i + 1] = (float) right;
+ samples += 2;
+
+ if ((i += 2) == 256) {
+ memcpy(hist_a, hist_a + 252, sizeof(*hist_a) * 4);
+ memcpy(hist_b, hist_b + 252, sizeof(*hist_b) * 4);
+ i = 4;
+ }
+ }
+
+ s->butter_hist_i = i;
+}
+
+/*
+ * Optimized implementation of 10th-order IIR stereo filter.
+ */
+static void yule_filter_stereo_samples(ReplayGainContext *s, const float *src,
+ float *dst, int nb_samples)
+{
+ const double *coeff_a = s->yule_coeff_a;
+ const double *coeff_b = s->yule_coeff_b;
+ float *hist_a = s->yule_hist_a;
+ float *hist_b = s->yule_hist_b;
+ double left, right;
+ int i, j;
+
+ i = s->yule_hist_i;
+
+ // If filter history is very small magnitude, clear it completely to
+ // prevent denormals from rattling around in there forever
+ // (slowing us down).
+
+ for (j = -20; j < 0; ++j)
+ if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
+ break;
+
+ if (!j) {
+ memset(s->yule_hist_a, 0, sizeof(s->yule_hist_a));
+ memset(s->yule_hist_b, 0, sizeof(s->yule_hist_b));
+ }
+
+ while (nb_samples--) {
+ left = (hist_b[i] = src[0]) * coeff_b[0];
+ right = (hist_b[i + 1] = src[1]) * coeff_b[0];
+ left += hist_b[i - 2] * coeff_b[ 1] - hist_a[i - 2] * coeff_a[1 ];
+ right += hist_b[i - 1] * coeff_b[ 1] - hist_a[i - 1] * coeff_a[1 ];
+ left += hist_b[i - 4] * coeff_b[ 2] - hist_a[i - 4] * coeff_a[2 ];
+ right += hist_b[i - 3] * coeff_b[ 2] - hist_a[i - 3] * coeff_a[2 ];
+ left += hist_b[i - 6] * coeff_b[ 3] - hist_a[i - 6] * coeff_a[3 ];
+ right += hist_b[i - 5] * coeff_b[ 3] - hist_a[i - 5] * coeff_a[3 ];
+ left += hist_b[i - 8] * coeff_b[ 4] - hist_a[i - 8] * coeff_a[4 ];
+ right += hist_b[i - 7] * coeff_b[ 4] - hist_a[i - 7] * coeff_a[4 ];
+ left += hist_b[i - 10] * coeff_b[ 5] - hist_a[i - 10] * coeff_a[5 ];
+ right += hist_b[i - 9] * coeff_b[ 5] - hist_a[i - 9] * coeff_a[5 ];
+ left += hist_b[i - 12] * coeff_b[ 6] - hist_a[i - 12] * coeff_a[6 ];
+ right += hist_b[i - 11] * coeff_b[ 6] - hist_a[i - 11] * coeff_a[6 ];
+ left += hist_b[i - 14] * coeff_b[ 7] - hist_a[i - 14] * coeff_a[7 ];
+ right += hist_b[i - 13] * coeff_b[ 7] - hist_a[i - 13] * coeff_a[7 ];
+ left += hist_b[i - 16] * coeff_b[ 8] - hist_a[i - 16] * coeff_a[8 ];
+ right += hist_b[i - 15] * coeff_b[ 8] - hist_a[i - 15] * coeff_a[8 ];
+ left += hist_b[i - 18] * coeff_b[ 9] - hist_a[i - 18] * coeff_a[9 ];
+ right += hist_b[i - 17] * coeff_b[ 9] - hist_a[i - 17] * coeff_a[9 ];
+ left += hist_b[i - 20] * coeff_b[10] - hist_a[i - 20] * coeff_a[10];
+ right += hist_b[i - 19] * coeff_b[10] - hist_a[i - 19] * coeff_a[10];
+ dst[0] = hist_a[i ] = (float)left;
+ dst[1] = hist_a[i + 1] = (float)right;
+ src += 2;
+ dst += 2;
+
+ if ((i += 2) == 256) {
+ memcpy(hist_a, hist_a + 236, sizeof(*hist_a) * 20);
+ memcpy(hist_b, hist_b + 236, sizeof(*hist_b) * 20);
+ i = 20;
+ }
+ }
+
+ s->yule_hist_i = i;
+}
+
+/*
+ * Calculate the ReplayGain value from the specified loudness histogram;
+ * clip to -24 / +64 dB.
+ */
+static float calc_replaygain(uint32_t *histogram)
+{
+ uint32_t loud_count = 0, total_windows = 0;
+ float gain;
+ int i;
+
+ for (i = 0; i < HISTOGRAM_SLOTS; i++)
+ total_windows += histogram [i];
+
+ while (i--)
+ if ((loud_count += histogram [i]) * 20 >= total_windows)
+ break;
+
+ gain = (float)(64.54 - i / 100.0);
+
+ return av_clipf(gain, -24.0, 64.0);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ReplayGainContext *s = ctx->priv;
+ uint32_t level;
+ AVFrame *out;
+
+ out = ff_get_audio_buffer(inlink, in->nb_samples);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ calc_stereo_peak((float *)in->data[0],
+ in->nb_samples, &s->peak);
+ yule_filter_stereo_samples(s, (const float *)in->data[0],
+ (float *)out->data[0],
+ out->nb_samples);
+ butter_filter_stereo_samples(s, (float *)out->data[0],
+ out->nb_samples);
+ level = (uint32_t)floor(100 * calc_stereo_rms((float *)out->data[0],
+ out->nb_samples));
+ level = av_clip(level, 0, HISTOGRAM_SLOTS - 1);
+
+ s->histogram[level]++;
+
+ av_frame_free(&out);
+ return ff_filter_frame(outlink, in);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ReplayGainContext *s = ctx->priv;
+ float gain = calc_replaygain(s->histogram);
+
+ av_log(ctx, AV_LOG_INFO, "track_gain = %+.2f dB\n", gain);
+ av_log(ctx, AV_LOG_INFO, "track_peak = %.6f\n", s->peak);
+}
+
+static const AVFilterPad replaygain_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad replaygain_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_replaygain = {
+ .name = "replaygain",
+ .description = NULL_IF_CONFIG_SMALL("ReplayGain scanner."),
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .priv_size = sizeof(ReplayGainContext),
+ .inputs = replaygain_inputs,
+ .outputs = replaygain_outputs,
+};
diff --git a/libavfilter/af_resample.c b/libavfilter/af_resample.c
index fbe6105..d65d4bc 100644
--- a/libavfilter/af_resample.c
+++ b/libavfilter/af_resample.c
@@ -1,19 +1,18 @@
/*
+ * This file is part of FFmpeg.
*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -312,9 +311,9 @@ static const AVClass resample_class = {
static const AVFilterPad avfilter_af_resample_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -334,11 +333,9 @@ AVFilter ff_af_resample = {
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
.priv_size = sizeof(ResampleContext),
.priv_class = &resample_class,
-
- .init_dict = init,
- .uninit = uninit,
- .query_formats = query_formats,
-
- .inputs = avfilter_af_resample_inputs,
- .outputs = avfilter_af_resample_outputs,
+ .init_dict = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_af_resample_inputs,
+ .outputs = avfilter_af_resample_outputs,
};
diff --git a/libavfilter/af_silencedetect.c b/libavfilter/af_silencedetect.c
new file mode 100644
index 0000000..687d2e7
--- /dev/null
+++ b/libavfilter/af_silencedetect.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2012 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio silence detector
+ */
+
+#include <float.h> /* DBL_MAX */
+
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "audio.h"
+#include "formats.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct SilenceDetectContext {
+ const AVClass *class;
+ double noise; ///< noise amplitude ratio
+ double duration; ///< minimum duration of silence until notification
+ int64_t nb_null_samples; ///< current number of continuous zero samples
+ int64_t start; ///< if silence is detected, this value contains the time of the first zero sample
+ int last_sample_rate; ///< last sample rate to check for sample rate changes
+
+ void (*silencedetect)(struct SilenceDetectContext *s, AVFrame *insamples,
+ int nb_samples, int64_t nb_samples_notify,
+ AVRational time_base);
+} SilenceDetectContext;
+
+#define OFFSET(x) offsetof(SilenceDetectContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption silencedetect_options[] = {
+ { "n", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
+ { "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
+ { "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
+ { "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(silencedetect);
+
+static char *get_metadata_val(AVFrame *insamples, const char *key)
+{
+ AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0);
+ return e && e->value ? e->value : NULL;
+}
+
+static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples,
+ int is_silence, int64_t nb_samples_notify,
+ AVRational time_base)
+{
+ if (is_silence) {
+ if (!s->start) {
+ s->nb_null_samples++;
+ if (s->nb_null_samples >= nb_samples_notify) {
+ s->start = insamples->pts - (int64_t)(s->duration / av_q2d(time_base) + .5);
+ av_dict_set(&insamples->metadata, "lavfi.silence_start",
+ av_ts2timestr(s->start, &time_base), 0);
+ av_log(s, AV_LOG_INFO, "silence_start: %s\n",
+ get_metadata_val(insamples, "lavfi.silence_start"));
+ }
+ }
+ } else {
+ if (s->start) {
+ av_dict_set(&insamples->metadata, "lavfi.silence_end",
+ av_ts2timestr(insamples->pts, &time_base), 0);
+ av_dict_set(&insamples->metadata, "lavfi.silence_duration",
+ av_ts2timestr(insamples->pts - s->start, &time_base), 0);
+ av_log(s, AV_LOG_INFO,
+ "silence_end: %s | silence_duration: %s\n",
+ get_metadata_val(insamples, "lavfi.silence_end"),
+ get_metadata_val(insamples, "lavfi.silence_duration"));
+ }
+ s->nb_null_samples = s->start = 0;
+ }
+}
+
+#define SILENCE_DETECT(name, type) \
+static void silencedetect_##name(SilenceDetectContext *s, AVFrame *insamples, \
+ int nb_samples, int64_t nb_samples_notify, \
+ AVRational time_base) \
+{ \
+ const type *p = (const type *)insamples->data[0]; \
+ const type noise = s->noise; \
+ int i; \
+ \
+ for (i = 0; i < nb_samples; i++, p++) \
+ update(s, insamples, *p < noise && *p > -noise, \
+ nb_samples_notify, time_base); \
+}
+
+SILENCE_DETECT(dbl, double)
+SILENCE_DETECT(flt, float)
+SILENCE_DETECT(s32, int32_t)
+SILENCE_DETECT(s16, int16_t)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SilenceDetectContext *s = ctx->priv;
+
+ switch (inlink->format) {
+ case AV_SAMPLE_FMT_DBL: s->silencedetect = silencedetect_dbl; break;
+ case AV_SAMPLE_FMT_FLT: s->silencedetect = silencedetect_flt; break;
+ case AV_SAMPLE_FMT_S32:
+ s->noise *= INT32_MAX;
+ s->silencedetect = silencedetect_s32;
+ break;
+ case AV_SAMPLE_FMT_S16:
+ s->noise *= INT16_MAX;
+ s->silencedetect = silencedetect_s16;
+ break;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ SilenceDetectContext *s = inlink->dst->priv;
+ const int nb_channels = inlink->channels;
+ const int srate = inlink->sample_rate;
+ const int nb_samples = insamples->nb_samples * nb_channels;
+ const int64_t nb_samples_notify = srate * s->duration * nb_channels;
+
+ // scale number of null samples to the new sample rate
+ if (s->last_sample_rate && s->last_sample_rate != srate)
+ s->nb_null_samples = srate * s->nb_null_samples / s->last_sample_rate;
+ s->last_sample_rate = srate;
+
+ // TODO: document metadata
+ s->silencedetect(s, insamples, nb_samples, nb_samples_notify,
+ inlink->time_base);
+
+ return ff_filter_frame(inlink->dst->outputs[0], insamples);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static const AVFilterPad silencedetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad silencedetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_silencedetect = {
+ .name = "silencedetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect silence."),
+ .priv_size = sizeof(SilenceDetectContext),
+ .query_formats = query_formats,
+ .inputs = silencedetect_inputs,
+ .outputs = silencedetect_outputs,
+ .priv_class = &silencedetect_class,
+};
diff --git a/libavfilter/af_silenceremove.c b/libavfilter/af_silenceremove.c
new file mode 100644
index 0000000..02b64ce
--- /dev/null
+++ b/libavfilter/af_silenceremove.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2001 Heikki Leinonen
+ * Copyright (c) 2001 Chris Bagwell
+ * Copyright (c) 2003 Donnie Smith
+ * Copyright (c) 2014 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h> /* DBL_MAX */
+
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "audio.h"
+#include "formats.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum SilenceMode {
+ SILENCE_TRIM,
+ SILENCE_TRIM_FLUSH,
+ SILENCE_COPY,
+ SILENCE_COPY_FLUSH,
+ SILENCE_STOP
+};
+
+typedef struct SilenceRemoveContext {
+ const AVClass *class;
+
+ enum SilenceMode mode;
+
+ int start_periods;
+ int64_t start_duration;
+ double start_threshold;
+
+ int stop_periods;
+ int64_t stop_duration;
+ double stop_threshold;
+
+ double *start_holdoff;
+ size_t start_holdoff_offset;
+ size_t start_holdoff_end;
+ int start_found_periods;
+
+ double *stop_holdoff;
+ size_t stop_holdoff_offset;
+ size_t stop_holdoff_end;
+ int stop_found_periods;
+
+ double *window;
+ double *window_current;
+ double *window_end;
+ int window_size;
+ double rms_sum;
+
+ int leave_silence;
+ int restart;
+ int64_t next_pts;
+} SilenceRemoveContext;
+
+#define OFFSET(x) offsetof(SilenceRemoveContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption silenceremove_options[] = {
+ { "start_periods", NULL, OFFSET(start_periods), AV_OPT_TYPE_INT, {.i64=0}, 0, 9000, FLAGS },
+ { "start_duration", NULL, OFFSET(start_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
+ { "start_threshold", NULL, OFFSET(start_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
+ { "stop_periods", NULL, OFFSET(stop_periods), AV_OPT_TYPE_INT, {.i64=0}, -9000, 9000, FLAGS },
+ { "stop_duration", NULL, OFFSET(stop_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
+ { "stop_threshold", NULL, OFFSET(stop_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
+ { "leave_silence", NULL, OFFSET(leave_silence), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(silenceremove);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SilenceRemoveContext *s = ctx->priv;
+
+ if (s->stop_periods < 0) {
+ s->stop_periods = -s->stop_periods;
+ s->restart = 1;
+ }
+
+ return 0;
+}
+
+static void clear_rms(SilenceRemoveContext *s)
+{
+ memset(s->window, 0, s->window_size * sizeof(*s->window));
+
+ s->window_current = s->window;
+ s->window_end = s->window + s->window_size;
+ s->rms_sum = 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SilenceRemoveContext *s = ctx->priv;
+
+ s->window_size = (inlink->sample_rate / 50) * inlink->channels;
+ s->window = av_malloc_array(s->window_size, sizeof(*s->window));
+ if (!s->window)
+ return AVERROR(ENOMEM);
+
+ clear_rms(s);
+
+ s->start_duration = av_rescale(s->start_duration, inlink->sample_rate,
+ AV_TIME_BASE);
+ s->stop_duration = av_rescale(s->stop_duration, inlink->sample_rate,
+ AV_TIME_BASE);
+
+ s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1),
+ sizeof(*s->start_holdoff) *
+ inlink->channels);
+ if (!s->start_holdoff)
+ return AVERROR(ENOMEM);
+
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ s->start_found_periods = 0;
+
+ s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1),
+ sizeof(*s->stop_holdoff) *
+ inlink->channels);
+ if (!s->stop_holdoff)
+ return AVERROR(ENOMEM);
+
+ s->stop_holdoff_offset = 0;
+ s->stop_holdoff_end = 0;
+ s->stop_found_periods = 0;
+
+ if (s->start_periods)
+ s->mode = SILENCE_TRIM;
+ else
+ s->mode = SILENCE_COPY;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+ return 0;
+}
+
+static double compute_rms(SilenceRemoveContext *s, double sample)
+{
+ double new_sum;
+
+ new_sum = s->rms_sum;
+ new_sum -= *s->window_current;
+ new_sum += sample * sample;
+
+ return sqrt(new_sum / s->window_size);
+}
+
+static void update_rms(SilenceRemoveContext *s, double sample)
+{
+ s->rms_sum -= *s->window_current;
+ *s->window_current = sample * sample;
+ s->rms_sum += *s->window_current;
+
+ s->window_current++;
+ if (s->window_current >= s->window_end)
+ s->window_current = s->window;
+}
+
+static void flush(AVFrame *out, AVFilterLink *outlink,
+ int *nb_samples_written, int *ret)
+{
+ if (*nb_samples_written) {
+ out->nb_samples = *nb_samples_written / outlink->channels;
+ *ret = ff_filter_frame(outlink, out);
+ *nb_samples_written = 0;
+ } else {
+ av_frame_free(&out);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ SilenceRemoveContext *s = ctx->priv;
+ int i, j, threshold, ret = 0;
+ int nbs, nb_samples_read, nb_samples_written;
+ double *obuf, *ibuf = (double *)in->data[0];
+ AVFrame *out;
+
+ nb_samples_read = nb_samples_written = 0;
+
+ switch (s->mode) {
+ case SILENCE_TRIM:
+silence_trim:
+ nbs = in->nb_samples - nb_samples_read / inlink->channels;
+ if (!nbs)
+ break;
+
+ for (i = 0; i < nbs; i++) {
+ threshold = 0;
+ for (j = 0; j < inlink->channels; j++) {
+ threshold |= compute_rms(s, ibuf[j]) > s->start_threshold;
+ }
+
+ if (threshold) {
+ for (j = 0; j < inlink->channels; j++) {
+ update_rms(s, *ibuf);
+ s->start_holdoff[s->start_holdoff_end++] = *ibuf++;
+ nb_samples_read++;
+ }
+
+ if (s->start_holdoff_end >= s->start_duration * inlink->channels) {
+ if (++s->start_found_periods >= s->start_periods) {
+ s->mode = SILENCE_TRIM_FLUSH;
+ goto silence_trim_flush;
+ }
+
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ }
+ } else {
+ s->start_holdoff_end = 0;
+
+ for (j = 0; j < inlink->channels; j++)
+ update_rms(s, ibuf[j]);
+
+ ibuf += inlink->channels;
+ nb_samples_read += inlink->channels;
+ }
+ }
+ break;
+
+ case SILENCE_TRIM_FLUSH:
+silence_trim_flush:
+ nbs = s->start_holdoff_end - s->start_holdoff_offset;
+ nbs -= nbs % inlink->channels;
+ if (!nbs)
+ break;
+
+ out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ memcpy(out->data[0], &s->start_holdoff[s->start_holdoff_offset],
+ nbs * sizeof(double));
+ s->start_holdoff_offset += nbs;
+
+ ret = ff_filter_frame(outlink, out);
+
+ if (s->start_holdoff_offset == s->start_holdoff_end) {
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ s->mode = SILENCE_COPY;
+ goto silence_copy;
+ }
+ break;
+
+ case SILENCE_COPY:
+silence_copy:
+ nbs = in->nb_samples - nb_samples_read / inlink->channels;
+ if (!nbs)
+ break;
+
+ out = ff_get_audio_buffer(inlink, nbs);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ obuf = (double *)out->data[0];
+
+ if (s->stop_periods) {
+ for (i = 0; i < nbs; i++) {
+ threshold = 1;
+ for (j = 0; j < inlink->channels; j++)
+ threshold &= compute_rms(s, ibuf[j]) > s->stop_threshold;
+
+ if (threshold && s->stop_holdoff_end && !s->leave_silence) {
+ s->mode = SILENCE_COPY_FLUSH;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_copy_flush;
+ } else if (threshold) {
+ for (j = 0; j < inlink->channels; j++) {
+ update_rms(s, *ibuf);
+ *obuf++ = *ibuf++;
+ nb_samples_read++;
+ nb_samples_written++;
+ }
+ } else if (!threshold) {
+ for (j = 0; j < inlink->channels; j++) {
+ update_rms(s, *ibuf);
+ if (s->leave_silence) {
+ *obuf++ = *ibuf;
+ nb_samples_written++;
+ }
+
+ s->stop_holdoff[s->stop_holdoff_end++] = *ibuf++;
+ nb_samples_read++;
+ }
+
+ if (s->stop_holdoff_end >= s->stop_duration * inlink->channels) {
+ if (++s->stop_found_periods >= s->stop_periods) {
+ s->stop_holdoff_offset = 0;
+ s->stop_holdoff_end = 0;
+
+ if (!s->restart) {
+ s->mode = SILENCE_STOP;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_stop;
+ } else {
+ s->stop_found_periods = 0;
+ s->start_found_periods = 0;
+ s->start_holdoff_offset = 0;
+ s->start_holdoff_end = 0;
+ clear_rms(s);
+ s->mode = SILENCE_TRIM;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_trim;
+ }
+ }
+ s->mode = SILENCE_COPY_FLUSH;
+ flush(out, outlink, &nb_samples_written, &ret);
+ goto silence_copy_flush;
+ }
+ }
+ }
+ flush(out, outlink, &nb_samples_written, &ret);
+ } else {
+ memcpy(obuf, ibuf, sizeof(double) * nbs * inlink->channels);
+ ret = ff_filter_frame(outlink, out);
+ }
+ break;
+
+ case SILENCE_COPY_FLUSH:
+silence_copy_flush:
+ nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
+ nbs -= nbs % inlink->channels;
+ if (!nbs)
+ break;
+
+ out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ memcpy(out->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
+ nbs * sizeof(double));
+ s->stop_holdoff_offset += nbs;
+
+ ret = ff_filter_frame(outlink, out);
+
+ if (s->stop_holdoff_offset == s->stop_holdoff_end) {
+ s->stop_holdoff_offset = 0;
+ s->stop_holdoff_end = 0;
+ s->mode = SILENCE_COPY;
+ goto silence_copy;
+ }
+ break;
+ case SILENCE_STOP:
+silence_stop:
+ break;
+ }
+
+ av_frame_free(&in);
+
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SilenceRemoveContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && (s->mode == SILENCE_COPY_FLUSH ||
+ s->mode == SILENCE_COPY)) {
+ int nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
+ if (nbs) {
+ AVFrame *frame;
+
+ frame = ff_get_audio_buffer(outlink, nbs / outlink->channels);
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ memcpy(frame->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
+ nbs * sizeof(double));
+ ret = ff_filter_frame(ctx->inputs[0], frame);
+ }
+ s->mode = SILENCE_STOP;
+ }
+ return ret;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SilenceRemoveContext *s = ctx->priv;
+
+ av_freep(&s->start_holdoff);
+ av_freep(&s->stop_holdoff);
+ av_freep(&s->window);
+}
+
+static const AVFilterPad silenceremove_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad silenceremove_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_silenceremove = {
+ .name = "silenceremove",
+ .description = NULL_IF_CONFIG_SMALL("Remove silence."),
+ .priv_size = sizeof(SilenceRemoveContext),
+ .priv_class = &silenceremove_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = silenceremove_inputs,
+ .outputs = silenceremove_outputs,
+};
diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c
index 11d85a1..9900d22 100644
--- a/libavfilter/af_volume.c
+++ b/libavfilter/af_volume.c
@@ -2,20 +2,20 @@
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,21 +38,41 @@
#include "internal.h"
#include "af_volume.h"
-static const char *precision_str[] = {
+static const char * const precision_str[] = {
"fixed", "float", "double"
};
+static const char *const var_names[] = {
+ "n", ///< frame number (starting at zero)
+ "nb_channels", ///< number of channels
+ "nb_consumed_samples", ///< number of samples consumed by the filter
+ "nb_samples", ///< number of samples in the current frame
+ "pos", ///< position in the file of the frame
+ "pts", ///< frame presentation timestamp
+ "sample_rate", ///< sample rate
+ "startpts", ///< PTS at start of stream
+ "startt", ///< time at start of stream
+ "t", ///< time in the file of the frame
+ "tb", ///< timebase
+ "volume", ///< last set value
+ NULL
+};
+
#define OFFSET(x) offsetof(VolumeContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
-
-static const AVOption options[] = {
- { "volume", "Volume adjustment.",
- OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 }, 0, 0x7fffff, A },
- { "precision", "Mathematical precision.",
- OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A, "precision" },
- { "fixed", "8-bit fixed-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A, "precision" },
- { "float", "32-bit floating-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A, "precision" },
- { "double", "64-bit floating-point.", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A, "precision" },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption volume_options[] = {
+ { "volume", "set volume adjustment expression",
+ OFFSET(volume_expr), AV_OPT_TYPE_STRING, { .str = "1.0" }, .flags = A|F },
+ { "precision", "select mathematical precision",
+ OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" },
+ { "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" },
+ { "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" },
+ { "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_ONCE}, 0, EVAL_MODE_NB-1, .flags = A|F, "eval" },
+ { "once", "eval volume expression once", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_ONCE}, .flags = A|F, .unit = "eval" },
+ { "frame", "eval volume expression per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = A|F, .unit = "eval" },
{ "replaygain", "Apply replaygain side data when present",
OFFSET(replaygain), AV_OPT_TYPE_INT, { .i64 = REPLAYGAIN_DROP }, REPLAYGAIN_DROP, REPLAYGAIN_ALBUM, A, "replaygain" },
{ "drop", "replaygain side data is dropped", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_DROP }, 0, 0, A, "replaygain" },
@@ -66,39 +86,48 @@ static const AVOption options[] = {
{ NULL },
};
-static const AVClass volume_class = {
- .class_name = "volume filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(volume);
-static av_cold int init(AVFilterContext *ctx)
+static int set_expr(AVExpr **pexpr, const char *expr, void *log_ctx)
{
- VolumeContext *vol = ctx->priv;
-
- if (vol->precision == PRECISION_FIXED) {
- vol->volume_i = (int)(vol->volume * 256 + 0.5);
- vol->volume = vol->volume_i / 256.0;
- av_log(ctx, AV_LOG_VERBOSE, "volume:(%d/256)(%f)(%1.2fdB) precision:fixed\n",
- vol->volume_i, vol->volume, 20.0*log(vol->volume)/M_LN10);
- } else {
- av_log(ctx, AV_LOG_VERBOSE, "volume:(%f)(%1.2fdB) precision:%s\n",
- vol->volume, 20.0*log(vol->volume)/M_LN10,
- precision_str[vol->precision]);
+ int ret;
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the volume expression '%s'\n", expr);
+ *pexpr = old;
+ return ret;
}
+ av_expr_free(old);
return 0;
}
+static av_cold int init(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+ return set_expr(&vol->volume_pexpr, vol->volume_expr, ctx);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+ av_expr_free(vol->volume_pexpr);
+ av_opt_free(vol);
+}
+
static int query_formats(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[][7] = {
- /* PRECISION_FIXED */
- {
+ [PRECISION_FIXED] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16,
@@ -107,21 +136,19 @@ static int query_formats(AVFilterContext *ctx)
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE
},
- /* PRECISION_FLOAT */
- {
+ [PRECISION_FLOAT] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
},
- /* PRECISION_DOUBLE */
- {
+ [PRECISION_DOUBLE] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
}
};
- layouts = ff_all_channel_layouts();
+ layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ff_set_common_channel_layouts(ctx, layouts);
@@ -185,8 +212,6 @@ static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src,
smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8));
}
-
-
static av_cold void volume_init(VolumeContext *vol)
{
vol->samples_align = 1;
@@ -221,6 +246,38 @@ static av_cold void volume_init(VolumeContext *vol)
ff_volume_init_x86(vol);
}
+static int set_volume(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+
+ vol->volume = av_expr_eval(vol->volume_pexpr, vol->var_values, NULL);
+ if (isnan(vol->volume)) {
+ if (vol->eval_mode == EVAL_MODE_ONCE) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid value NaN for volume\n");
+ return AVERROR(EINVAL);
+ } else {
+ av_log(ctx, AV_LOG_WARNING, "Invalid value NaN for volume, setting to 0\n");
+ vol->volume = 0;
+ }
+ }
+ vol->var_values[VAR_VOLUME] = vol->volume;
+
+ av_log(ctx, AV_LOG_VERBOSE, "n:%f t:%f pts:%f precision:%s ",
+ vol->var_values[VAR_N], vol->var_values[VAR_T], vol->var_values[VAR_PTS],
+ precision_str[vol->precision]);
+
+ if (vol->precision == PRECISION_FIXED) {
+ vol->volume_i = (int)(vol->volume * 256 + 0.5);
+ vol->volume = vol->volume_i / 256.0;
+ av_log(ctx, AV_LOG_VERBOSE, "volume_i:%d/255 ", vol->volume_i);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "volume:%f volume_dB:%f\n",
+ vol->volume, 20.0*log(vol->volume)/M_LN10);
+
+ volume_init(vol);
+ return 0;
+}
+
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -228,20 +285,59 @@ static int config_output(AVFilterLink *outlink)
AVFilterLink *inlink = ctx->inputs[0];
vol->sample_fmt = inlink->format;
- vol->channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
+ vol->channels = inlink->channels;
vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels : 1;
- volume_init(vol);
+ vol->var_values[VAR_N] =
+ vol->var_values[VAR_NB_CONSUMED_SAMPLES] =
+ vol->var_values[VAR_NB_SAMPLES] =
+ vol->var_values[VAR_POS] =
+ vol->var_values[VAR_PTS] =
+ vol->var_values[VAR_STARTPTS] =
+ vol->var_values[VAR_STARTT] =
+ vol->var_values[VAR_T] =
+ vol->var_values[VAR_VOLUME] = NAN;
+
+ vol->var_values[VAR_NB_CHANNELS] = inlink->channels;
+ vol->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ vol->var_values[VAR_SAMPLE_RATE] = inlink->sample_rate;
+
+ av_log(inlink->src, AV_LOG_VERBOSE, "tb:%f sample_rate:%f nb_channels:%f\n",
+ vol->var_values[VAR_TB],
+ vol->var_values[VAR_SAMPLE_RATE],
+ vol->var_values[VAR_NB_CHANNELS]);
+
+ return set_volume(ctx);
+}
- return 0;
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ VolumeContext *vol = ctx->priv;
+ int ret = AVERROR(ENOSYS);
+
+ if (!strcmp(cmd, "volume")) {
+ if ((ret = set_expr(&vol->volume_pexpr, args, ctx)) < 0)
+ return ret;
+ if (vol->eval_mode == EVAL_MODE_ONCE)
+ set_volume(ctx);
+ }
+
+ return ret;
}
+#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
+ AVFilterContext *ctx = inlink->dst;
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
+ int64_t pos;
AVFrameSideData *sd = av_frame_get_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
int ret;
@@ -283,8 +379,23 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_remove_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
}
- if (vol->volume == 1.0 || vol->volume_i == 256)
- return ff_filter_frame(outlink, buf);
+ if (isnan(vol->var_values[VAR_STARTPTS])) {
+ vol->var_values[VAR_STARTPTS] = TS2D(buf->pts);
+ vol->var_values[VAR_STARTT ] = TS2T(buf->pts, inlink->time_base);
+ }
+ vol->var_values[VAR_PTS] = TS2D(buf->pts);
+ vol->var_values[VAR_T ] = TS2T(buf->pts, inlink->time_base);
+ vol->var_values[VAR_N ] = inlink->frame_count;
+
+ pos = av_frame_get_pkt_pos(buf);
+ vol->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+ if (vol->eval_mode == EVAL_MODE_FRAME)
+ set_volume(ctx);
+
+ if (vol->volume == 1.0 || vol->volume_i == 256) {
+ out_buf = buf;
+ goto end;
+ }
/* do volume scaling in-place if input buffer is writable */
if (av_frame_is_writable(buf)) {
@@ -335,6 +446,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (buf != out_buf)
av_frame_free(&buf);
+end:
+ vol->var_values[VAR_NB_CONSUMED_SAMPLES] += out_buf->nb_samples;
return ff_filter_frame(outlink, out_buf);
}
@@ -363,6 +476,9 @@ AVFilter ff_af_volume = {
.priv_size = sizeof(VolumeContext),
.priv_class = &volume_class,
.init = init,
+ .uninit = uninit,
.inputs = avfilter_af_volume_inputs,
.outputs = avfilter_af_volume_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+ .process_command = process_command,
};
diff --git a/libavfilter/af_volume.h b/libavfilter/af_volume.h
index 6bd89ac..e78e042 100644
--- a/libavfilter/af_volume.h
+++ b/libavfilter/af_volume.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,6 +25,7 @@
#define AVFILTER_AF_VOLUME_H
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
@@ -35,6 +36,28 @@ enum PrecisionType {
PRECISION_DOUBLE,
};
+enum EvalMode {
+ EVAL_MODE_ONCE,
+ EVAL_MODE_FRAME,
+ EVAL_MODE_NB
+};
+
+enum VolumeVarName {
+ VAR_N,
+ VAR_NB_CHANNELS,
+ VAR_NB_CONSUMED_SAMPLES,
+ VAR_NB_SAMPLES,
+ VAR_POS,
+ VAR_PTS,
+ VAR_SAMPLE_RATE,
+ VAR_STARTPTS,
+ VAR_STARTT,
+ VAR_T,
+ VAR_TB,
+ VAR_VOLUME,
+ VAR_VARS_NB
+};
+
enum ReplayGainType {
REPLAYGAIN_DROP,
REPLAYGAIN_IGNORE,
@@ -46,6 +69,11 @@ typedef struct VolumeContext {
const AVClass *class;
AVFloatDSPContext fdsp;
enum PrecisionType precision;
+ enum EvalMode eval_mode;
+ const char *volume_expr;
+ AVExpr *volume_pexpr;
+ double var_values[VAR_VARS_NB];
+
enum ReplayGainType replaygain;
double replaygain_preamp;
int replaygain_noclip;
diff --git a/libavfilter/af_volumedetect.c b/libavfilter/af_volumedetect.c
new file mode 100644
index 0000000..5de115e
--- /dev/null
+++ b/libavfilter/af_volumedetect.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/avassert.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ /**
+ * Number of samples at each PCM value.
+ * histogram[0x8000 + i] is the number of samples at value i.
+ * The extra element is there for symmetry.
+ */
+ uint64_t histogram[0x10001];
+} VolDetectContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE
+ };
+ AVFilterFormats *formats;
+
+ if (!(formats = ff_make_format_list(sample_fmts)))
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ VolDetectContext *vd = ctx->priv;
+ int64_t layout = samples->channel_layout;
+ int nb_samples = samples->nb_samples;
+ int nb_channels = av_get_channel_layout_nb_channels(layout);
+ int nb_planes = nb_channels;
+ int plane, i;
+ int16_t *pcm;
+
+ if (!av_sample_fmt_is_planar(samples->format)) {
+ nb_samples *= nb_channels;
+ nb_planes = 1;
+ }
+ for (plane = 0; plane < nb_planes; plane++) {
+ pcm = (int16_t *)samples->extended_data[plane];
+ for (i = 0; i < nb_samples; i++)
+ vd->histogram[pcm[i] + 0x8000]++;
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], samples);
+}
+
+#define MAX_DB 91
+
+static inline double logdb(uint64_t v)
+{
+ double d = v / (double)(0x8000 * 0x8000);
+ if (!v)
+ return MAX_DB;
+ return log(d) * -4.3429448190325182765112891891660508229; /* -10/log(10) */
+}
+
+static void print_stats(AVFilterContext *ctx)
+{
+ VolDetectContext *vd = ctx->priv;
+ int i, max_volume, shift;
+ uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0;
+ uint64_t histdb[MAX_DB + 1] = { 0 };
+
+ for (i = 0; i < 0x10000; i++)
+ nb_samples += vd->histogram[i];
+ av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples);
+ if (!nb_samples)
+ return;
+
+ /* If nb_samples > 1<<34, there is a risk of overflow in the
+ multiplication or the sum: shift all histogram values to avoid that.
+ The total number of samples must be recomputed to avoid rounding
+ errors. */
+ shift = av_log2(nb_samples >> 33);
+ for (i = 0; i < 0x10000; i++) {
+ nb_samples_shift += vd->histogram[i] >> shift;
+ power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift);
+ }
+ if (!nb_samples_shift)
+ return;
+ power = (power + nb_samples_shift / 2) / nb_samples_shift;
+ av_assert0(power <= 0x8000 * 0x8000);
+ av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power));
+
+ max_volume = 0x8000;
+ while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] &&
+ !vd->histogram[0x8000 - max_volume])
+ max_volume--;
+ av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume));
+
+ for (i = 0; i < 0x10000; i++)
+ histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i];
+ for (i = 0; i <= MAX_DB && !histdb[i]; i++);
+ for (; i <= MAX_DB && sum < nb_samples / 1000; i++) {
+ av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]);
+ sum += histdb[i];
+ }
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ print_stats(ctx);
+}
+
+static const AVFilterPad volumedetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad volumedetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_volumedetect = {
+ .name = "volumedetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect audio volume."),
+ .priv_size = sizeof(VolDetectContext),
+ .query_formats = query_formats,
+ .uninit = uninit,
+ .inputs = volumedetect_inputs,
+ .outputs = volumedetect_outputs,
+};
diff --git a/libavfilter/all_channel_layouts.inc b/libavfilter/all_channel_layouts.inc
new file mode 100644
index 0000000..878e1f5
--- /dev/null
+++ b/libavfilter/all_channel_layouts.inc
@@ -0,0 +1,68 @@
+AV_CH_FRONT_CENTER,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER,
+AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_CENTER|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_LOW_FREQUENCY|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
+AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT,
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 67a298d..2352d44 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -2,25 +2,26 @@
* filter registration
* Copyright (c) 2008 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avfilter.h"
#include "config.h"
+#include "opencl_allkernels.h"
#define REGISTER_FILTER(X, x, y) \
@@ -44,79 +45,210 @@ void avfilter_register_all(void)
return;
initialized = 1;
+ REGISTER_FILTER(ADELAY, adelay, af);
+ REGISTER_FILTER(AECHO, aecho, af);
+ REGISTER_FILTER(AEVAL, aeval, af);
+ REGISTER_FILTER(AFADE, afade, af);
REGISTER_FILTER(AFORMAT, aformat, af);
+ REGISTER_FILTER(AINTERLEAVE, ainterleave, af);
+ REGISTER_FILTER(ALLPASS, allpass, af);
+ REGISTER_FILTER(AMERGE, amerge, af);
REGISTER_FILTER(AMIX, amix, af);
REGISTER_FILTER(ANULL, anull, af);
+ REGISTER_FILTER(APAD, apad, af);
+ REGISTER_FILTER(APERMS, aperms, af);
+ REGISTER_FILTER(APHASER, aphaser, af);
+ REGISTER_FILTER(ARESAMPLE, aresample, af);
+ REGISTER_FILTER(ASELECT, aselect, af);
+ REGISTER_FILTER(ASENDCMD, asendcmd, af);
+ REGISTER_FILTER(ASETNSAMPLES, asetnsamples, af);
REGISTER_FILTER(ASETPTS, asetpts, af);
+ REGISTER_FILTER(ASETRATE, asetrate, af);
REGISTER_FILTER(ASETTB, asettb, af);
REGISTER_FILTER(ASHOWINFO, ashowinfo, af);
REGISTER_FILTER(ASPLIT, asplit, af);
+ REGISTER_FILTER(ASTATS, astats, af);
+ REGISTER_FILTER(ASTREAMSYNC, astreamsync, af);
REGISTER_FILTER(ASYNCTS, asyncts, af);
+ REGISTER_FILTER(ATEMPO, atempo, af);
REGISTER_FILTER(ATRIM, atrim, af);
+ REGISTER_FILTER(AZMQ, azmq, af);
+ REGISTER_FILTER(BANDPASS, bandpass, af);
+ REGISTER_FILTER(BANDREJECT, bandreject, af);
+ REGISTER_FILTER(BASS, bass, af);
+ REGISTER_FILTER(BIQUAD, biquad, af);
REGISTER_FILTER(BS2B, bs2b, af);
REGISTER_FILTER(CHANNELMAP, channelmap, af);
REGISTER_FILTER(CHANNELSPLIT, channelsplit, af);
REGISTER_FILTER(COMPAND, compand, af);
+ REGISTER_FILTER(EARWAX, earwax, af);
+ REGISTER_FILTER(EBUR128, ebur128, af);
+ REGISTER_FILTER(EQUALIZER, equalizer, af);
+ REGISTER_FILTER(FLANGER, flanger, af);
+ REGISTER_FILTER(HIGHPASS, highpass, af);
REGISTER_FILTER(JOIN, join, af);
+ REGISTER_FILTER(LADSPA, ladspa, af);
+ REGISTER_FILTER(LOWPASS, lowpass, af);
+ REGISTER_FILTER(PAN, pan, af);
+ REGISTER_FILTER(REPLAYGAIN, replaygain, af);
REGISTER_FILTER(RESAMPLE, resample, af);
+ REGISTER_FILTER(SILENCEDETECT, silencedetect, af);
+ REGISTER_FILTER(SILENCEREMOVE, silenceremove, af);
+ REGISTER_FILTER(TREBLE, treble, af);
REGISTER_FILTER(VOLUME, volume, af);
+ REGISTER_FILTER(VOLUMEDETECT, volumedetect, af);
+ REGISTER_FILTER(AEVALSRC, aevalsrc, asrc);
REGISTER_FILTER(ANULLSRC, anullsrc, asrc);
+ REGISTER_FILTER(FLITE, flite, asrc);
+ REGISTER_FILTER(SINE, sine, asrc);
REGISTER_FILTER(ANULLSINK, anullsink, asink);
+ REGISTER_FILTER(ALPHAEXTRACT, alphaextract, vf);
+ REGISTER_FILTER(ALPHAMERGE, alphamerge, vf);
+ REGISTER_FILTER(ASS, ass, vf);
+ REGISTER_FILTER(BBOX, bbox, vf);
+ REGISTER_FILTER(BLACKDETECT, blackdetect, vf);
REGISTER_FILTER(BLACKFRAME, blackframe, vf);
+ REGISTER_FILTER(BLEND, blend, vf);
REGISTER_FILTER(BOXBLUR, boxblur, vf);
+ REGISTER_FILTER(CODECVIEW, codecview, vf);
+ REGISTER_FILTER(COLORBALANCE, colorbalance, vf);
+ REGISTER_FILTER(COLORCHANNELMIXER, colorchannelmixer, vf);
+ REGISTER_FILTER(COLORMATRIX, colormatrix, vf);
REGISTER_FILTER(COPY, copy, vf);
REGISTER_FILTER(CROP, crop, vf);
REGISTER_FILTER(CROPDETECT, cropdetect, vf);
+ REGISTER_FILTER(CURVES, curves, vf);
+ REGISTER_FILTER(DCTDNOIZ, dctdnoiz, vf);
+ REGISTER_FILTER(DECIMATE, decimate, vf);
+ REGISTER_FILTER(DEJUDDER, dejudder, vf);
REGISTER_FILTER(DELOGO, delogo, vf);
+ REGISTER_FILTER(DESHAKE, deshake, vf);
REGISTER_FILTER(DRAWBOX, drawbox, vf);
+ REGISTER_FILTER(DRAWGRID, drawgrid, vf);
REGISTER_FILTER(DRAWTEXT, drawtext, vf);
+ REGISTER_FILTER(EDGEDETECT, edgedetect, vf);
+ REGISTER_FILTER(ELBG, elbg, vf);
+ REGISTER_FILTER(EXTRACTPLANES, extractplanes, vf);
REGISTER_FILTER(FADE, fade, vf);
+ REGISTER_FILTER(FIELD, field, vf);
+ REGISTER_FILTER(FIELDMATCH, fieldmatch, vf);
REGISTER_FILTER(FIELDORDER, fieldorder, vf);
REGISTER_FILTER(FORMAT, format, vf);
REGISTER_FILTER(FPS, fps, vf);
REGISTER_FILTER(FRAMEPACK, framepack, vf);
+ REGISTER_FILTER(FRAMESTEP, framestep, vf);
REGISTER_FILTER(FREI0R, frei0r, vf);
+ REGISTER_FILTER(GEQ, geq, vf);
REGISTER_FILTER(GRADFUN, gradfun, vf);
+ REGISTER_FILTER(HALDCLUT, haldclut, vf);
REGISTER_FILTER(HFLIP, hflip, vf);
+ REGISTER_FILTER(HISTEQ, histeq, vf);
+ REGISTER_FILTER(HISTOGRAM, histogram, vf);
REGISTER_FILTER(HQDN3D, hqdn3d, vf);
+ REGISTER_FILTER(HQX, hqx, vf);
+ REGISTER_FILTER(HUE, hue, vf);
+ REGISTER_FILTER(IDET, idet, vf);
+ REGISTER_FILTER(IL, il, vf);
REGISTER_FILTER(INTERLACE, interlace, vf);
+ REGISTER_FILTER(INTERLEAVE, interleave, vf);
+ REGISTER_FILTER(KERNDEINT, kerndeint, vf);
+ REGISTER_FILTER(LENSCORRECTION, lenscorrection, vf);
+ REGISTER_FILTER(LUT3D, lut3d, vf);
REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);
+ REGISTER_FILTER(MCDEINT, mcdeint, vf);
+ REGISTER_FILTER(MERGEPLANES, mergeplanes, vf);
+ REGISTER_FILTER(MP, mp, vf);
+ REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf);
REGISTER_FILTER(NOFORMAT, noformat, vf);
+ REGISTER_FILTER(NOISE, noise, vf);
REGISTER_FILTER(NULL, null, vf);
REGISTER_FILTER(OCV, ocv, vf);
REGISTER_FILTER(OVERLAY, overlay, vf);
+ REGISTER_FILTER(OWDENOISE, owdenoise, vf);
REGISTER_FILTER(PAD, pad, vf);
+ REGISTER_FILTER(PERMS, perms, vf);
+ REGISTER_FILTER(PERSPECTIVE, perspective, vf);
+ REGISTER_FILTER(PHASE, phase, vf);
REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf);
+ REGISTER_FILTER(PP, pp, vf);
+ REGISTER_FILTER(PSNR, psnr, vf);
+ REGISTER_FILTER(PULLUP, pullup, vf);
+ REGISTER_FILTER(REMOVELOGO, removelogo, vf);
+ REGISTER_FILTER(ROTATE, rotate, vf);
+ REGISTER_FILTER(SAB, sab, vf);
REGISTER_FILTER(SCALE, scale, vf);
REGISTER_FILTER(SELECT, select, vf);
+ REGISTER_FILTER(SENDCMD, sendcmd, vf);
+ REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf);
REGISTER_FILTER(SETDAR, setdar, vf);
+ REGISTER_FILTER(SETFIELD, setfield, vf);
REGISTER_FILTER(SETPTS, setpts, vf);
REGISTER_FILTER(SETSAR, setsar, vf);
REGISTER_FILTER(SETTB, settb, vf);
REGISTER_FILTER(SHOWINFO, showinfo, vf);
REGISTER_FILTER(SHUFFLEPLANES, shuffleplanes, vf);
+ REGISTER_FILTER(SIGNALSTATS, signalstats, vf);
+ REGISTER_FILTER(SMARTBLUR, smartblur, vf);
REGISTER_FILTER(SPLIT, split, vf);
+ REGISTER_FILTER(SPP, spp, vf);
+ REGISTER_FILTER(STEREO3D, stereo3d, vf);
+ REGISTER_FILTER(SUBTITLES, subtitles, vf);
+ REGISTER_FILTER(SUPER2XSAI, super2xsai, vf);
+ REGISTER_FILTER(SWAPUV, swapuv, vf);
+ REGISTER_FILTER(TELECINE, telecine, vf);
+ REGISTER_FILTER(THUMBNAIL, thumbnail, vf);
+ REGISTER_FILTER(TILE, tile, vf);
+ REGISTER_FILTER(TINTERLACE, tinterlace, vf);
REGISTER_FILTER(TRANSPOSE, transpose, vf);
REGISTER_FILTER(TRIM, trim, vf);
REGISTER_FILTER(UNSHARP, unsharp, vf);
REGISTER_FILTER(VFLIP, vflip, vf);
+ REGISTER_FILTER(VIDSTABDETECT, vidstabdetect, vf);
+ REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
+ REGISTER_FILTER(VIGNETTE, vignette, vf);
+ REGISTER_FILTER(W3FDIF, w3fdif, vf);
+ REGISTER_FILTER(XBR, xbr, vf);
REGISTER_FILTER(YADIF, yadif, vf);
+ REGISTER_FILTER(ZMQ, zmq, vf);
+ REGISTER_FILTER(ZOOMPAN, zoompan, vf);
+ REGISTER_FILTER(CELLAUTO, cellauto, vsrc);
REGISTER_FILTER(COLOR, color, vsrc);
REGISTER_FILTER(FREI0R, frei0r_src, vsrc);
- REGISTER_FILTER(MOVIE, movie, vsrc);
+ REGISTER_FILTER(HALDCLUTSRC, haldclutsrc, vsrc);
+ REGISTER_FILTER(LIFE, life, vsrc);
+ REGISTER_FILTER(MANDELBROT, mandelbrot, vsrc);
+ REGISTER_FILTER(MPTESTSRC, mptestsrc, vsrc);
REGISTER_FILTER(NULLSRC, nullsrc, vsrc);
REGISTER_FILTER(RGBTESTSRC, rgbtestsrc, vsrc);
+ REGISTER_FILTER(SMPTEBARS, smptebars, vsrc);
+ REGISTER_FILTER(SMPTEHDBARS, smptehdbars, vsrc);
REGISTER_FILTER(TESTSRC, testsrc, vsrc);
REGISTER_FILTER(NULLSINK, nullsink, vsink);
+ /* multimedia filters */
+ REGISTER_FILTER(AVECTORSCOPE, avectorscope, avf);
+ REGISTER_FILTER(CONCAT, concat, avf);
+ REGISTER_FILTER(SHOWCQT, showcqt, avf);
+ REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf);
+ REGISTER_FILTER(SHOWWAVES, showwaves, avf);
+
+ /* multimedia sources */
+ REGISTER_FILTER(AMOVIE, amovie, avsrc);
+ REGISTER_FILTER(MOVIE, movie, avsrc);
+
+#if FF_API_AVFILTERBUFFER
+ REGISTER_FILTER_UNCONDITIONAL(vsink_ffbuffersink);
+ REGISTER_FILTER_UNCONDITIONAL(asink_ffabuffersink);
+#endif
+
/* those filters are part of public or internal API => registered
* unconditionally */
REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer);
@@ -125,4 +257,5 @@ void avfilter_register_all(void)
REGISTER_FILTER_UNCONDITIONAL(vsink_buffer);
REGISTER_FILTER_UNCONDITIONAL(af_afifo);
REGISTER_FILTER_UNCONDITIONAL(vf_fifo);
+ ff_opencl_register_filter_kernel_code_all();
}
diff --git a/libavfilter/asink_anullsink.c b/libavfilter/asink_anullsink.c
index 44f547d..9b53d3f 100644
--- a/libavfilter/asink_anullsink.c
+++ b/libavfilter/asink_anullsink.c
@@ -1,18 +1,20 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/asrc_abuffer.h b/libavfilter/asrc_abuffer.h
new file mode 100644
index 0000000..aa34461
--- /dev/null
+++ b/libavfilter/asrc_abuffer.h
@@ -0,0 +1,91 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_ASRC_ABUFFER_H
+#define AVFILTER_ASRC_ABUFFER_H
+
+#include "avfilter.h"
+
+/**
+ * @file
+ * memory buffer source for audio
+ *
+ * @deprecated use buffersrc.h instead.
+ */
+
+/**
+ * Queue an audio buffer to the audio buffer source.
+ *
+ * @param abuffersrc audio source buffer context
+ * @param data pointers to the samples planes
+ * @param linesize linesizes of each audio buffer plane
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt sample format of the audio data
+ * @param ch_layout channel layout of the audio data
+ * @param planar flag to indicate if audio data is planar or packed
+ * @param pts presentation timestamp of the audio buffer
+ * @param flags unused
+ *
+ * @deprecated use av_buffersrc_add_ref() instead.
+ */
+attribute_deprecated
+int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,
+ uint8_t *data[8], int linesize[8],
+ int nb_samples, int sample_rate,
+ int sample_fmt, int64_t ch_layout, int planar,
+ int64_t pts, int av_unused flags);
+
+/**
+ * Queue an audio buffer to the audio buffer source.
+ *
+ * This is similar to av_asrc_buffer_add_samples(), but the samples
+ * are stored in a buffer with known size.
+ *
+ * @param abuffersrc audio source buffer context
+ * @param buf pointer to the samples data, packed is assumed
+ * @param size the size in bytes of the buffer, it must contain an
+ * integer number of samples
+ * @param sample_fmt sample format of the audio data
+ * @param ch_layout channel layout of the audio data
+ * @param pts presentation timestamp of the audio buffer
+ * @param flags unused
+ *
+ * @deprecated use av_buffersrc_add_ref() instead.
+ */
+attribute_deprecated
+int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,
+ uint8_t *buf, int buf_size,
+ int sample_rate,
+ int sample_fmt, int64_t ch_layout, int planar,
+ int64_t pts, int av_unused flags);
+
+/**
+ * Queue an audio buffer to the audio buffer source.
+ *
+ * @param abuffersrc audio source buffer context
+ * @param samplesref buffer ref to queue
+ * @param flags unused
+ *
+ * @deprecated use av_buffersrc_add_ref() instead.
+ */
+attribute_deprecated
+int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,
+ AVFilterBufferRef *samplesref,
+ int av_unused flags);
+
+#endif /* AVFILTER_ASRC_ABUFFER_H */
diff --git a/libavfilter/asrc_anullsrc.c b/libavfilter/asrc_anullsrc.c
index b1a449c..28d4500 100644
--- a/libavfilter/asrc_anullsrc.c
+++ b/libavfilter/asrc_anullsrc.c
@@ -1,18 +1,21 @@
/*
- * This file is part of Libav.
+ * Copyright 2010 S.N. Hemanth Meenakshisundaram <smeenaks ucsd edu>
+ * Copyright 2010 Stefano Sabatini <stefano.sabatini-lala poste it>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,28 +29,118 @@
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "audio.h"
#include "avfilter.h"
#include "internal.h"
-static int request_frame(AVFilterLink *link)
+typedef struct {
+ const AVClass *class;
+ char *channel_layout_str;
+ uint64_t channel_layout;
+ char *sample_rate_str;
+ int sample_rate;
+ int nb_samples; ///< number of samples per requested frame
+ int64_t pts;
+} ANullContext;
+
+#define OFFSET(x) offsetof(ANullContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption anullsrc_options[]= {
+ { "channel_layout", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
+ { "cl", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
+ { "sample_rate", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
+ { "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
+ { "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(anullsrc);
+
+static av_cold int init(AVFilterContext *ctx)
{
- return AVERROR_EOF;
+ ANullContext *null = ctx->priv;
+ int ret;
+
+ if ((ret = ff_parse_sample_rate(&null->sample_rate,
+ null->sample_rate_str, ctx)) < 0)
+ return ret;
+
+ if ((ret = ff_parse_channel_layout(&null->channel_layout, NULL,
+ null->channel_layout_str, ctx)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ANullContext *null = ctx->priv;
+ int64_t chlayouts[] = { null->channel_layout, -1 };
+ int sample_rates[] = { null->sample_rate, -1 };
+
+ ff_set_common_formats (ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
+ ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
+ ff_set_common_samplerates (ctx, ff_make_format_list(sample_rates));
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ ANullContext *null = outlink->src->priv;
+ char buf[128];
+
+ av_get_channel_layout_string(buf, sizeof(buf), 0, null->channel_layout);
+ av_log(outlink->src, AV_LOG_VERBOSE,
+ "sample_rate:%d channel_layout:'%s' nb_samples:%d\n",
+ null->sample_rate, buf, null->nb_samples);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ ANullContext *null = outlink->src->priv;
+ AVFrame *samplesref;
+
+ samplesref = ff_get_audio_buffer(outlink, null->nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
+ samplesref->pts = null->pts;
+ samplesref->channel_layout = null->channel_layout;
+ samplesref->sample_rate = outlink->sample_rate;
+
+ ret = ff_filter_frame(outlink, av_frame_clone(samplesref));
+ av_frame_free(&samplesref);
+ if (ret < 0)
+ return ret;
+
+ null->pts += null->nb_samples;
+ return ret;
}
static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_asrc_anullsrc = {
- .name = "anullsrc",
- .description = NULL_IF_CONFIG_SMALL("Null audio source, never return audio frames."),
-
- .inputs = NULL,
-
- .outputs = avfilter_asrc_anullsrc_outputs,
+ .name = "anullsrc",
+ .description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
+ .init = init,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ANullContext),
+ .inputs = NULL,
+ .outputs = avfilter_asrc_anullsrc_outputs,
+ .priv_class = &anullsrc_class,
};
diff --git a/libavfilter/asrc_flite.c b/libavfilter/asrc_flite.c
new file mode 100644
index 0000000..098a1dd
--- /dev/null
+++ b/libavfilter/asrc_flite.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * flite voice synth source
+ */
+
+#include <flite/flite.h>
+#include "libavutil/channel_layout.h"
+#include "libavutil/file.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ char *voice_str;
+ char *textfile;
+ char *text;
+ cst_wave *wave;
+ int16_t *wave_samples;
+ int wave_nb_samples;
+ int list_voices;
+ cst_voice *voice;
+ struct voice_entry *voice_entry;
+ int64_t pts;
+ int frame_nb_samples; ///< number of samples per frame
+} FliteContext;
+
+#define OFFSET(x) offsetof(FliteContext, x)
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption flite_options[] = {
+ { "list_voices", "list voices and exit", OFFSET(list_voices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "nb_samples", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
+ { "n", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
+ { "text", "set text to speak", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "textfile", "set filename of the text to speak", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "v", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "voice", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(flite);
+
+static volatile int flite_inited = 0;
+
+/* declare functions for all the supported voices */
+#define DECLARE_REGISTER_VOICE_FN(name) \
+ cst_voice *register_cmu_us_## name(const char *); \
+ void unregister_cmu_us_## name(cst_voice *);
+DECLARE_REGISTER_VOICE_FN(awb);
+DECLARE_REGISTER_VOICE_FN(kal);
+DECLARE_REGISTER_VOICE_FN(kal16);
+DECLARE_REGISTER_VOICE_FN(rms);
+DECLARE_REGISTER_VOICE_FN(slt);
+
+struct voice_entry {
+ const char *name;
+ cst_voice * (*register_fn)(const char *);
+ void (*unregister_fn)(cst_voice *);
+ cst_voice *voice;
+ unsigned usage_count;
+} voice_entry;
+
+#define MAKE_VOICE_STRUCTURE(voice_name) { \
+ .name = #voice_name, \
+ .register_fn = register_cmu_us_ ## voice_name, \
+ .unregister_fn = unregister_cmu_us_ ## voice_name, \
+}
+static struct voice_entry voice_entries[] = {
+ MAKE_VOICE_STRUCTURE(awb),
+ MAKE_VOICE_STRUCTURE(kal),
+ MAKE_VOICE_STRUCTURE(kal16),
+ MAKE_VOICE_STRUCTURE(rms),
+ MAKE_VOICE_STRUCTURE(slt),
+};
+
+static void list_voices(void *log_ctx, const char *sep)
+{
+ int i, n = FF_ARRAY_ELEMS(voice_entries);
+ for (i = 0; i < n; i++)
+ av_log(log_ctx, AV_LOG_INFO, "%s%s",
+ voice_entries[i].name, i < (n-1) ? sep : "\n");
+}
+
+static int select_voice(struct voice_entry **entry_ret, const char *voice_name, void *log_ctx)
+{
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(voice_entries); i++) {
+ struct voice_entry *entry = &voice_entries[i];
+ if (!strcmp(entry->name, voice_name)) {
+ if (!entry->voice)
+ entry->voice = entry->register_fn(NULL);
+ if (!entry->voice) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Could not register voice '%s'\n", voice_name);
+ return AVERROR_UNKNOWN;
+ }
+ entry->usage_count++;
+ *entry_ret = entry;
+ return 0;
+ }
+ }
+
+ av_log(log_ctx, AV_LOG_ERROR, "Could not find voice '%s'\n", voice_name);
+ av_log(log_ctx, AV_LOG_INFO, "Choose between the voices: ");
+ list_voices(log_ctx, ", ");
+
+ return AVERROR(EINVAL);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ FliteContext *flite = ctx->priv;
+ int ret = 0;
+
+ if (flite->list_voices) {
+ list_voices(ctx, "\n");
+ return AVERROR_EXIT;
+ }
+
+ if (!flite_inited) {
+ if (flite_init() < 0) {
+ av_log(ctx, AV_LOG_ERROR, "flite initialization failed\n");
+ return AVERROR_UNKNOWN;
+ }
+ flite_inited++;
+ }
+
+ if ((ret = select_voice(&flite->voice_entry, flite->voice_str, ctx)) < 0)
+ return ret;
+ flite->voice = flite->voice_entry->voice;
+
+ if (flite->textfile && flite->text) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Both text and textfile options set: only one must be specified\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (flite->textfile) {
+ uint8_t *textbuf;
+ size_t textbuf_size;
+
+ if ((ret = av_file_map(flite->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The text file '%s' could not be read: %s\n",
+ flite->textfile, av_err2str(ret));
+ return ret;
+ }
+
+ if (!(flite->text = av_malloc(textbuf_size+1)))
+ return AVERROR(ENOMEM);
+ memcpy(flite->text, textbuf, textbuf_size);
+ flite->text[textbuf_size] = 0;
+ av_file_unmap(textbuf, textbuf_size);
+ }
+
+ if (!flite->text) {
+ av_log(ctx, AV_LOG_ERROR,
+ "No speech text specified, specify the 'text' or 'textfile' option\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* synth all the file data in block */
+ flite->wave = flite_text_to_wave(flite->text, flite->voice);
+ flite->wave_samples = flite->wave->samples;
+ flite->wave_nb_samples = flite->wave->num_samples;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ FliteContext *flite = ctx->priv;
+
+ if (!--flite->voice_entry->usage_count)
+ flite->voice_entry->unregister_fn(flite->voice);
+ flite->voice = NULL;
+ flite->voice_entry = NULL;
+ delete_wave(flite->wave);
+ flite->wave = NULL;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ FliteContext *flite = ctx->priv;
+
+ AVFilterChannelLayouts *chlayouts = NULL;
+ int64_t chlayout = av_get_default_channel_layout(flite->wave->num_channels);
+ AVFilterFormats *sample_formats = NULL;
+ AVFilterFormats *sample_rates = NULL;
+
+ ff_add_channel_layout(&chlayouts, chlayout);
+ ff_set_common_channel_layouts(ctx, chlayouts);
+ ff_add_format(&sample_formats, AV_SAMPLE_FMT_S16);
+ ff_set_common_formats(ctx, sample_formats);
+ ff_add_format(&sample_rates, flite->wave->sample_rate);
+ ff_set_common_samplerates (ctx, sample_rates);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FliteContext *flite = ctx->priv;
+
+ outlink->sample_rate = flite->wave->sample_rate;
+ outlink->time_base = (AVRational){1, flite->wave->sample_rate};
+
+ av_log(ctx, AV_LOG_VERBOSE, "voice:%s fmt:%s sample_rate:%d\n",
+ flite->voice_str,
+ av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFrame *samplesref;
+ FliteContext *flite = outlink->src->priv;
+ int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples);
+
+ if (!nb_samples)
+ return AVERROR_EOF;
+
+ samplesref = ff_get_audio_buffer(outlink, nb_samples);
+ if (!samplesref)
+ return AVERROR(ENOMEM);
+
+ memcpy(samplesref->data[0], flite->wave_samples,
+ nb_samples * flite->wave->num_channels * 2);
+ samplesref->pts = flite->pts;
+ av_frame_set_pkt_pos(samplesref, -1);
+ av_frame_set_sample_rate(samplesref, flite->wave->sample_rate);
+ flite->pts += nb_samples;
+ flite->wave_samples += nb_samples * flite->wave->num_channels;
+ flite->wave_nb_samples -= nb_samples;
+
+ return ff_filter_frame(outlink, samplesref);
+}
+
+static const AVFilterPad flite_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_props,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_flite = {
+ .name = "flite",
+ .description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(FliteContext),
+ .inputs = NULL,
+ .outputs = flite_outputs,
+ .priv_class = &flite_class,
+};
diff --git a/libavfilter/asrc_sine.c b/libavfilter/asrc_sine.c
new file mode 100644
index 0000000..68e1398
--- /dev/null
+++ b/libavfilter/asrc_sine.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double frequency;
+ double beep_factor;
+ int samples_per_frame;
+ int sample_rate;
+ int64_t duration;
+ int16_t *sin;
+ int64_t pts;
+ uint32_t phi; ///< current phase of the sine (2pi = 1<<32)
+ uint32_t dphi; ///< phase increment between two samples
+ unsigned beep_period;
+ unsigned beep_index;
+ unsigned beep_length;
+ uint32_t phi_beep; ///< current phase of the beep
+ uint32_t dphi_beep; ///< phase increment of the beep
+} SineContext;
+
+#define CONTEXT SineContext
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
+ { name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
+ { .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
+
+#define OPT_INT(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
+
+#define OPT_DBL(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, DOUBLE, dbl, __VA_ARGS__)
+
+#define OPT_DUR(name, field, def, min, max, descr, ...) \
+ OPT_GENERIC(name, field, def, min, max, descr, DURATION, str, __VA_ARGS__)
+
+static const AVOption sine_options[] = {
+ OPT_DBL("frequency", frequency, 440, 0, DBL_MAX, "set the sine frequency"),
+ OPT_DBL("f", frequency, 440, 0, DBL_MAX, "set the sine frequency"),
+ OPT_DBL("beep_factor", beep_factor, 0, 0, DBL_MAX, "set the beep fequency factor"),
+ OPT_DBL("b", beep_factor, 0, 0, DBL_MAX, "set the beep fequency factor"),
+ OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
+ OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
+ OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration"),
+ OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration"),
+ OPT_INT("samples_per_frame", samples_per_frame, 1024, 0, INT_MAX, "set the number of samples per frame"),
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(sine);
+
+#define LOG_PERIOD 15
+#define AMPLITUDE 4095
+#define AMPLITUDE_SHIFT 3
+
+static void make_sin_table(int16_t *sin)
+{
+ unsigned half_pi = 1 << (LOG_PERIOD - 2);
+ unsigned ampls = AMPLITUDE << AMPLITUDE_SHIFT;
+ uint64_t unit2 = (uint64_t)(ampls * ampls) << 32;
+ unsigned step, i, c, s, k, new_k, n2;
+
+ /* Principle: if u = exp(i*a1) and v = exp(i*a2), then
+ exp(i*(a1+a2)/2) = (u+v) / length(u+v) */
+ sin[0] = 0;
+ sin[half_pi] = ampls;
+ for (step = half_pi; step > 1; step /= 2) {
+ /* k = (1 << 16) * amplitude / length(u+v)
+ In exact values, k is constant at a given step */
+ k = 0x10000;
+ for (i = 0; i < half_pi / 2; i += step) {
+ s = sin[i] + sin[i + step];
+ c = sin[half_pi - i] + sin[half_pi - i - step];
+ n2 = s * s + c * c;
+ /* Newton's method to solve n² * k² = unit² */
+ while (1) {
+ new_k = (k + unit2 / ((uint64_t)k * n2) + 1) >> 1;
+ if (k == new_k)
+ break;
+ k = new_k;
+ }
+ sin[i + step / 2] = (k * s + 0x7FFF) >> 16;
+ sin[half_pi - i - step / 2] = (k * c + 0x8000) >> 16;
+ }
+ }
+ /* Unshift amplitude */
+ for (i = 0; i <= half_pi; i++)
+ sin[i] = (sin[i] + (1 << (AMPLITUDE_SHIFT - 1))) >> AMPLITUDE_SHIFT;
+ /* Use symmetries to fill the other three quarters */
+ for (i = 0; i < half_pi; i++)
+ sin[half_pi * 2 - i] = sin[i];
+ for (i = 0; i < 2 * half_pi; i++)
+ sin[i + 2 * half_pi] = -sin[i];
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SineContext *sine = ctx->priv;
+
+ if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD)))
+ return AVERROR(ENOMEM);
+ sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5;
+ make_sin_table(sine->sin);
+
+ if (sine->beep_factor) {
+ sine->beep_period = sine->sample_rate;
+ sine->beep_length = sine->beep_period / 25;
+ sine->dphi_beep = ldexp(sine->beep_factor * sine->frequency, 32) /
+ sine->sample_rate + 0.5;
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SineContext *sine = ctx->priv;
+
+ av_freep(&sine->sin);
+}
+
+static av_cold int query_formats(AVFilterContext *ctx)
+{
+ SineContext *sine = ctx->priv;
+ static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
+ int sample_rates[] = { sine->sample_rate, -1 };
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE };
+
+ ff_set_common_formats (ctx, ff_make_format_list(sample_fmts));
+ ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
+ ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
+ return 0;
+}
+
+static av_cold int config_props(AVFilterLink *outlink)
+{
+ SineContext *sine = outlink->src->priv;
+ sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ SineContext *sine = outlink->src->priv;
+ AVFrame *frame;
+ int i, nb_samples = sine->samples_per_frame;
+ int16_t *samples;
+
+ if (sine->duration) {
+ nb_samples = FFMIN(nb_samples, sine->duration - sine->pts);
+ av_assert1(nb_samples >= 0);
+ if (!nb_samples)
+ return AVERROR_EOF;
+ }
+ if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
+ return AVERROR(ENOMEM);
+ samples = (int16_t *)frame->data[0];
+
+ for (i = 0; i < nb_samples; i++) {
+ samples[i] = sine->sin[sine->phi >> (32 - LOG_PERIOD)];
+ sine->phi += sine->dphi;
+ if (sine->beep_index < sine->beep_length) {
+ samples[i] += sine->sin[sine->phi_beep >> (32 - LOG_PERIOD)] << 1;
+ sine->phi_beep += sine->dphi_beep;
+ }
+ if (++sine->beep_index == sine->beep_period)
+ sine->beep_index = 0;
+ }
+
+ frame->pts = sine->pts;
+ sine->pts += nb_samples;
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad sine_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_asrc_sine = {
+ .name = "sine",
+ .description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."),
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SineContext),
+ .inputs = NULL,
+ .outputs = sine_outputs,
+ .priv_class = &sine_class,
+};
diff --git a/libavfilter/audio.c b/libavfilter/audio.c
index b332e9e..1e1d8e0 100644
--- a/libavfilter/audio.c
+++ b/libavfilter/audio.c
@@ -1,28 +1,38 @@
/*
- * This file is part of Libav.
+ * Copyright (c) Stefano Sabatini | stefasab at gmail.com
+ * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavcodec/avcodec.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
+int avfilter_ref_get_channels(AVFilterBufferRef *ref)
+{
+ return ref->audio ? ref->audio->channels : 0;
+}
+
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples);
@@ -31,14 +41,17 @@ AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
- int channels = av_get_channel_layout_nb_channels(link->channel_layout);
+ int channels = link->channels;
int ret;
+ av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout));
+
if (!frame)
return NULL;
frame->nb_samples = nb_samples;
frame->format = link->format;
+ av_frame_set_channels(frame, link->channels);
frame->channel_layout = link->channel_layout;
frame->sample_rate = link->sample_rate;
ret = av_frame_get_buffer(frame, 0);
@@ -68,11 +81,12 @@ AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
}
#if FF_API_AVFILTERBUFFER
-AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
- int linesize,int perms,
- int nb_samples,
- enum AVSampleFormat sample_fmt,
- uint64_t channel_layout)
+AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
+ int linesize,int perms,
+ int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ int channels,
+ uint64_t channel_layout)
{
int planes;
AVFilterBuffer *samples = av_mallocz(sizeof(*samples));
@@ -81,6 +95,10 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
if (!samples || !samplesref)
goto fail;
+ av_assert0(channels);
+ av_assert0(channel_layout == 0 ||
+ channels == av_get_channel_layout_nb_channels(channel_layout));
+
samplesref->buf = samples;
samplesref->buf->free = ff_avfilter_default_free_buffer;
if (!(samplesref->audio = av_mallocz(sizeof(*samplesref->audio))))
@@ -88,9 +106,9 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
samplesref->audio->nb_samples = nb_samples;
samplesref->audio->channel_layout = channel_layout;
- samplesref->audio->planar = av_sample_fmt_is_planar(sample_fmt);
+ samplesref->audio->channels = channels;
- planes = samplesref->audio->planar ? av_get_channel_layout_nb_channels(channel_layout) : 1;
+ planes = av_sample_fmt_is_planar(sample_fmt) ? channels : 1;
/* make sure the buffer gets read permission or it's useless for output */
samplesref->perms = perms | AV_PERM_READ;
@@ -106,9 +124,9 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
samples->linesize[0] = samplesref->linesize[0] = linesize;
if (planes > FF_ARRAY_ELEMS(samples->data)) {
- samples-> extended_data = av_mallocz(sizeof(*samples->extended_data) *
+ samples-> extended_data = av_mallocz_array(sizeof(*samples->extended_data),
planes);
- samplesref->extended_data = av_mallocz(sizeof(*samplesref->extended_data) *
+ samplesref->extended_data = av_mallocz_array(sizeof(*samplesref->extended_data),
planes);
if (!samples->extended_data || !samplesref->extended_data)
@@ -137,4 +155,16 @@ fail:
av_freep(&samples);
return NULL;
}
+
+AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
+ int linesize,int perms,
+ int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ uint64_t channel_layout)
+{
+ int channels = av_get_channel_layout_nb_channels(channel_layout);
+ return avfilter_get_audio_buffer_ref_from_arrays_channels(data, linesize, perms,
+ nb_samples, sample_fmt,
+ channels, channel_layout);
+}
#endif
diff --git a/libavfilter/audio.h b/libavfilter/audio.h
index 4684b6c..3335c96 100644
--- a/libavfilter/audio.h
+++ b/libavfilter/audio.h
@@ -1,18 +1,21 @@
/*
- * This file is part of Libav.
+ * Copyright (c) Stefano Sabatini | stefasab at gmail.com
+ * Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -20,6 +23,25 @@
#define AVFILTER_AUDIO_H
#include "avfilter.h"
+#include "internal.h"
+
+static const enum AVSampleFormat ff_packed_sample_fmts_array[] = {
+ AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL,
+ AV_SAMPLE_FMT_NONE
+};
+
+static const enum AVSampleFormat ff_planar_sample_fmts_array[] = {
+ AV_SAMPLE_FMT_U8P,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+};
/** default handler for get_audio_buffer() for audio inputs */
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples);
@@ -38,4 +60,24 @@ AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples);
*/
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples);
+/**
+ * Send a buffer of audio samples to the next filter.
+ *
+ * @param link the output link over which the audio samples are being sent
+ * @param samplesref a reference to the buffer of audio samples being sent. The
+ * receiving filter will free this reference when it no longer
+ * needs it or pass it on to the next filter.
+ *
+ * @return >= 0 on success, a negative AVERROR on error. The receiving filter
+ * is responsible for unreferencing samplesref in case of error.
+ */
+int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
+
+/**
+ * Send a buffer of audio samples to the next link, without checking
+ * min_samples.
+ */
+int ff_filter_samples_framed(AVFilterLink *link,
+ AVFilterBufferRef *samplesref);
+
#endif /* AVFILTER_AUDIO_H */
diff --git a/libavfilter/avcodec.c b/libavfilter/avcodec.c
new file mode 100644
index 0000000..e0d9015
--- /dev/null
+++ b/libavfilter/avcodec.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Stefano Sabatini | stefasab at gmail.com
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ */
+
+#include "avcodec.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+
+#if FF_API_AVFILTERBUFFER
+AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame,
+ int perms)
+{
+ AVFilterBufferRef *picref =
+ avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms,
+ frame->width, frame->height,
+ frame->format);
+ if (!picref)
+ return NULL;
+ if (avfilter_copy_frame_props(picref, frame) < 0) {
+ picref->buf->data[0] = NULL;
+ avfilter_unref_bufferp(&picref);
+ }
+ return picref;
+}
+
+AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
+ int perms)
+{
+ AVFilterBufferRef *samplesref;
+ int channels = av_frame_get_channels(frame);
+ int64_t layout = av_frame_get_channel_layout(frame);
+
+ if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) {
+ av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
+ return NULL;
+ }
+
+ samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels(
+ (uint8_t **)frame->extended_data, frame->linesize[0], perms,
+ frame->nb_samples, frame->format, channels, layout);
+ if (!samplesref)
+ return NULL;
+ if (avfilter_copy_frame_props(samplesref, frame) < 0) {
+ samplesref->buf->data[0] = NULL;
+ avfilter_unref_bufferp(&samplesref);
+ }
+ return samplesref;
+}
+
+AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
+ const AVFrame *frame,
+ int perms)
+{
+ switch (type) {
+ case AVMEDIA_TYPE_VIDEO:
+ return avfilter_get_video_buffer_ref_from_frame(frame, perms);
+ case AVMEDIA_TYPE_AUDIO:
+ return avfilter_get_audio_buffer_ref_from_frame(frame, perms);
+ default:
+ return NULL;
+ }
+}
+
+int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src)
+{
+ int planes, nb_channels;
+
+ if (!dst)
+ return AVERROR(EINVAL);
+ /* abort in case the src is NULL and dst is not, avoid inconsistent state in dst */
+ av_assert0(src);
+
+ memcpy(dst->data, src->data, sizeof(dst->data));
+ memcpy(dst->linesize, src->linesize, sizeof(dst->linesize));
+
+ dst->pts = src->pts;
+ dst->format = src->format;
+ av_frame_set_pkt_pos(dst, src->pos);
+
+ switch (src->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ av_assert0(src->video);
+ dst->width = src->video->w;
+ dst->height = src->video->h;
+ dst->sample_aspect_ratio = src->video->sample_aspect_ratio;
+ dst->interlaced_frame = src->video->interlaced;
+ dst->top_field_first = src->video->top_field_first;
+ dst->key_frame = src->video->key_frame;
+ dst->pict_type = src->video->pict_type;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_assert0(src->audio);
+ nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout);
+ planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1;
+
+ if (planes > FF_ARRAY_ELEMS(dst->data)) {
+ dst->extended_data = av_mallocz_array(planes, sizeof(*dst->extended_data));
+ if (!dst->extended_data)
+ return AVERROR(ENOMEM);
+ memcpy(dst->extended_data, src->extended_data,
+ planes * sizeof(*dst->extended_data));
+ } else
+ dst->extended_data = dst->data;
+ dst->nb_samples = src->audio->nb_samples;
+ av_frame_set_sample_rate (dst, src->audio->sample_rate);
+ av_frame_set_channel_layout(dst, src->audio->channel_layout);
+ av_frame_set_channels (dst, src->audio->channels);
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+#endif
diff --git a/libavfilter/avcodec.h b/libavfilter/avcodec.h
new file mode 100644
index 0000000..d3d0e20
--- /dev/null
+++ b/libavfilter/avcodec.h
@@ -0,0 +1,69 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AVCODEC_H
+#define AVFILTER_AVCODEC_H
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ *
+ * This should be included in an application ONLY if the installed
+ * libavfilter has been compiled with libavcodec support, otherwise
+ * symbols defined below will not be available.
+ */
+
+#include "avfilter.h"
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * Create and return a picref reference from the data and properties
+ * contained in frame.
+ *
+ * @param perms permissions to assign to the new buffer reference
+ * @deprecated avfilter APIs work natively with AVFrame instead.
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);
+
+
+/**
+ * Create and return a picref reference from the data and properties
+ * contained in frame.
+ *
+ * @param perms permissions to assign to the new buffer reference
+ * @deprecated avfilter APIs work natively with AVFrame instead.
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
+ int perms);
+
+/**
+ * Create and return a buffer reference from the data and properties
+ * contained in frame.
+ *
+ * @param perms permissions to assign to the new buffer reference
+ * @deprecated avfilter APIs work natively with AVFrame instead.
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
+ const AVFrame *frame,
+ int perms);
+#endif
+
+#endif /* AVFILTER_AVCODEC_H */
diff --git a/libavfilter/avf_avectorscope.c b/libavfilter/avf_avectorscope.c
new file mode 100644
index 0000000..f9ebc0f
--- /dev/null
+++ b/libavfilter/avf_avectorscope.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to video multimedia vectorscope filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+enum VectorScopeMode {
+ LISSAJOUS,
+ LISSAJOUS_XY,
+ MODE_NB,
+};
+
+typedef struct AudioVectorScopeContext {
+ const AVClass *class;
+ AVFrame *outpicref;
+ int w, h;
+ int hw, hh;
+ enum VectorScopeMode mode;
+ int contrast[3];
+ int fade[3];
+ double zoom;
+ AVRational frame_rate;
+} AudioVectorScopeContext;
+
+#define OFFSET(x) offsetof(AudioVectorScopeContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption avectorscope_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
+ { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, FLAGS, "mode" },
+ { "lissajous", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS}, 0, 0, FLAGS, "mode" },
+ { "lissajous_xy", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS_XY}, 0, 0, FLAGS, "mode" },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
+ { "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=40}, 0, 255, FLAGS },
+ { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=160}, 0, 255, FLAGS },
+ { "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=80}, 0, 255, FLAGS },
+ { "rf", "set red fade", OFFSET(fade[0]), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, FLAGS },
+ { "gf", "set green fade", OFFSET(fade[1]), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
+ { "bf", "set blue fade", OFFSET(fade[2]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, FLAGS },
+ { "zoom", "set zoom factor", OFFSET(zoom), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 10, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(avectorscope);
+
+static void draw_dot(AudioVectorScopeContext *p, unsigned x, unsigned y)
+{
+ const int linesize = p->outpicref->linesize[0];
+ uint8_t *dst;
+
+ if (p->zoom > 1) {
+ if (y >= p->h || x >= p->w)
+ return;
+ } else {
+ y = FFMIN(y, p->h - 1);
+ x = FFMIN(x, p->w - 1);
+ }
+
+ dst = &p->outpicref->data[0][y * linesize + x * 4];
+ dst[0] = FFMIN(dst[0] + p->contrast[0], 255);
+ dst[1] = FFMIN(dst[1] + p->contrast[1], 255);
+ dst[2] = FFMIN(dst[2] + p->contrast[2], 255);
+}
+
+static void fade(AudioVectorScopeContext *p)
+{
+ const int linesize = p->outpicref->linesize[0];
+ int i, j;
+
+ if (p->fade[0] || p->fade[1] || p->fade[2]) {
+ uint8_t *d = p->outpicref->data[0];
+ for (i = 0; i < p->h; i++) {
+ for (j = 0; j < p->w*4; j+=4) {
+ d[j+0] = FFMAX(d[j+0] - p->fade[0], 0);
+ d[j+1] = FFMAX(d[j+1] - p->fade[1], 0);
+ d[j+2] = FFMAX(d[j+2] - p->fade[2], 0);
+ }
+ d += linesize;
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layout = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_formats);
+
+ ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
+ ff_channel_layouts_ref(layout, &inlink->out_channel_layouts);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_samplerates);
+
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &outlink->in_formats);
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AudioVectorScopeContext *p = ctx->priv;
+ int nb_samples;
+
+ nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(p->frame_rate)) + 0.5);
+ inlink->partial_buf_size =
+ inlink->min_samples =
+ inlink->max_samples = nb_samples;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AudioVectorScopeContext *p = outlink->src->priv;
+
+ outlink->w = p->w;
+ outlink->h = p->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ outlink->frame_rate = p->frame_rate;
+
+ p->hw = p->w / 2;
+ p->hh = p->h / 2;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AudioVectorScopeContext *p = ctx->priv;
+ const int hw = p->hw;
+ const int hh = p->hh;
+ unsigned x, y;
+ const double zoom = p->zoom;
+ int i;
+
+ if (!p->outpicref || p->outpicref->width != outlink->w ||
+ p->outpicref->height != outlink->h) {
+ av_frame_free(&p->outpicref);
+ p->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!p->outpicref) {
+ av_frame_free(&insamples);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < outlink->h; i++)
+ memset(p->outpicref->data[0] + i * p->outpicref->linesize[0], 0, outlink->w * 4);
+ }
+ p->outpicref->pts = insamples->pts;
+
+ fade(p);
+
+ switch (insamples->format) {
+ case AV_SAMPLE_FMT_S16:
+ for (i = 0; i < insamples->nb_samples; i++) {
+ int16_t *src = (int16_t *)insamples->data[0] + i * 2;
+
+ if (p->mode == LISSAJOUS) {
+ x = ((src[1] - src[0]) * zoom / (float)(UINT16_MAX) + 1) * hw;
+ y = (1.0 - (src[0] + src[1]) * zoom / (float)UINT16_MAX) * hh;
+ } else {
+ x = (src[1] * zoom / (float)INT16_MAX + 1) * hw;
+ y = (src[0] * zoom / (float)INT16_MAX + 1) * hh;
+ }
+
+ draw_dot(p, x, y);
+ }
+ break;
+ case AV_SAMPLE_FMT_FLT:
+ for (i = 0; i < insamples->nb_samples; i++) {
+ float *src = (float *)insamples->data[0] + i * 2;
+
+ if (p->mode == LISSAJOUS) {
+ x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
+ y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
+ } else {
+ x = (src[1] * zoom + 1) * hw;
+ y = (src[0] * zoom + 1) * hh;
+ }
+
+ draw_dot(p, x, y);
+ }
+ break;
+ }
+
+ av_frame_free(&insamples);
+
+ return ff_filter_frame(outlink, av_frame_clone(p->outpicref));
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AudioVectorScopeContext *p = ctx->priv;
+
+ av_frame_free(&p->outpicref);
+}
+
+static const AVFilterPad audiovectorscope_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad audiovectorscope_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_avectorscope = {
+ .name = "avectorscope",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to vectorscope video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(AudioVectorScopeContext),
+ .inputs = audiovectorscope_inputs,
+ .outputs = audiovectorscope_outputs,
+ .priv_class = &avectorscope_class,
+};
diff --git a/libavfilter/avf_concat.c b/libavfilter/avf_concat.c
new file mode 100644
index 0000000..266bb36
--- /dev/null
+++ b/libavfilter/avf_concat.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * concat audio-video filter
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#define FF_BUFQUEUE_SIZE 256
+#include "bufferqueue.h"
+#include "internal.h"
+#include "video.h"
+#include "audio.h"
+
+#define TYPE_ALL 2
+
+typedef struct {
+ const AVClass *class;
+ unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
+ unsigned nb_segments;
+ unsigned cur_idx; /**< index of the first input of current segment */
+ int64_t delta_ts; /**< timestamp to add to produce output timestamps */
+ unsigned nb_in_active; /**< number of active inputs in current segment */
+ unsigned unsafe;
+ struct concat_in {
+ int64_t pts;
+ int64_t nb_frames;
+ unsigned eof;
+ struct FFBufQueue queue;
+ } *in;
+} ConcatContext;
+
+#define OFFSET(x) offsetof(ConcatContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption concat_options[] = {
+ { "n", "specify the number of segments", OFFSET(nb_segments),
+ AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, V|A|F},
+ { "v", "specify the number of video streams",
+ OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
+ AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F },
+ { "a", "specify the number of audio streams",
+ OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
+ { "unsafe", "enable unsafe mode",
+ OFFSET(unsafe),
+ AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V|A|F},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(concat);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned type, nb_str, idx0 = 0, idx, str, seg;
+ AVFilterFormats *formats, *rates = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+
+ for (type = 0; type < TYPE_ALL; type++) {
+ nb_str = cat->nb_streams[type];
+ for (str = 0; str < nb_str; str++) {
+ idx = idx0;
+
+ /* Set the output formats */
+ formats = ff_all_formats(type);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &ctx->outputs[idx]->in_formats);
+ if (type == AVMEDIA_TYPE_AUDIO) {
+ rates = ff_all_samplerates();
+ if (!rates)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates);
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts);
+ }
+
+ /* Set the same formats for each corresponding input */
+ for (seg = 0; seg < cat->nb_segments; seg++) {
+ ff_formats_ref(formats, &ctx->inputs[idx]->out_formats);
+ if (type == AVMEDIA_TYPE_AUDIO) {
+ ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates);
+ ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts);
+ }
+ idx += ctx->nb_outputs;
+ }
+
+ idx0++;
+ }
+ }
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ConcatContext *cat = ctx->priv;
+ unsigned out_no = FF_OUTLINK_IDX(outlink);
+ unsigned in_no = out_no, seg;
+ AVFilterLink *inlink = ctx->inputs[in_no];
+
+ /* enhancement: find a common one */
+ outlink->time_base = AV_TIME_BASE_Q;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->format = inlink->format;
+ for (seg = 1; seg < cat->nb_segments; seg++) {
+ inlink = ctx->inputs[in_no += ctx->nb_outputs];
+ if (!outlink->sample_aspect_ratio.num)
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ /* possible enhancement: unsafe mode, do not check */
+ if (outlink->w != inlink->w ||
+ outlink->h != inlink->h ||
+ outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num &&
+ inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "output link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[in_no].name, inlink->w, inlink->h,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ ctx->input_pads[out_no].name, outlink->w, outlink->h,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ if (!cat->unsafe)
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned out_no = in_no % ctx->nb_outputs;
+ AVFilterLink * inlink = ctx-> inputs[ in_no];
+ AVFilterLink *outlink = ctx->outputs[out_no];
+ struct concat_in *in = &cat->in[in_no];
+
+ buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
+ in->pts = buf->pts;
+ in->nb_frames++;
+ /* add duration to input PTS */
+ if (inlink->sample_rate)
+ /* use number of audio samples */
+ in->pts += av_rescale_q(buf->nb_samples,
+ av_make_q(1, inlink->sample_rate),
+ outlink->time_base);
+ else if (in->nb_frames >= 2)
+ /* use mean duration */
+ in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
+
+ buf->pts += cat->delta_ts;
+ return ff_filter_frame(outlink, buf);
+}
+
+static int process_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ConcatContext *cat = ctx->priv;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+
+ if (in_no < cat->cur_idx) {
+ av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
+ ctx->input_pads[in_no].name);
+ av_frame_free(&buf);
+ } else if (in_no >= cat->cur_idx + ctx->nb_outputs) {
+ ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
+ } else {
+ return push_frame(ctx, in_no, buf);
+ }
+ return 0;
+}
+
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
+{
+ AVFilterContext *ctx = inlink->dst;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+ AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
+
+ return ff_get_video_buffer(outlink, w, h);
+}
+
+static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+ AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
+
+ return ff_get_audio_buffer(outlink, nb_samples);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ return process_frame(inlink, buf);
+}
+
+static void close_input(AVFilterContext *ctx, unsigned in_no)
+{
+ ConcatContext *cat = ctx->priv;
+
+ cat->in[in_no].eof = 1;
+ cat->nb_in_active--;
+ av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
+ ctx->input_pads[in_no].name, cat->nb_in_active);
+}
+
+static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned i = cat->cur_idx;
+ unsigned imax = i + ctx->nb_outputs;
+ int64_t pts;
+
+ pts = cat->in[i++].pts;
+ for (; i < imax; i++)
+ pts = FFMAX(pts, cat->in[i].pts);
+ cat->delta_ts += pts;
+ *seg_delta = pts;
+}
+
+static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no,
+ int64_t seg_delta)
+{
+ ConcatContext *cat = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[out_no];
+ int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta;
+ int64_t nb_samples, sent = 0;
+ int frame_nb_samples, ret;
+ AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
+ AVFrame *buf;
+ int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
+
+ if (!rate_tb.den)
+ return AVERROR_BUG;
+ nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts,
+ outlink->time_base, rate_tb);
+ frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
+ while (nb_samples) {
+ frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
+ buf = ff_get_audio_buffer(outlink, frame_nb_samples);
+ if (!buf)
+ return AVERROR(ENOMEM);
+ av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
+ nb_channels, outlink->format);
+ buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
+ ret = ff_filter_frame(outlink, buf);
+ if (ret < 0)
+ return ret;
+ sent += frame_nb_samples;
+ nb_samples -= frame_nb_samples;
+ }
+ return 0;
+}
+
+static int flush_segment(AVFilterContext *ctx)
+{
+ int ret;
+ ConcatContext *cat = ctx->priv;
+ unsigned str, str_max;
+ int64_t seg_delta;
+
+ find_next_delta_ts(ctx, &seg_delta);
+ cat->cur_idx += ctx->nb_outputs;
+ cat->nb_in_active = ctx->nb_outputs;
+ av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
+ cat->delta_ts);
+
+ if (cat->cur_idx < ctx->nb_inputs) {
+ /* pad audio streams with silence */
+ str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
+ str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
+ for (; str < str_max; str++) {
+ ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str,
+ seg_delta);
+ if (ret < 0)
+ return ret;
+ }
+ /* flush queued buffers */
+ /* possible enhancement: flush in PTS order */
+ str_max = cat->cur_idx + ctx->nb_outputs;
+ for (str = cat->cur_idx; str < str_max; str++) {
+ while (cat->in[str].queue.available) {
+ ret = push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue));
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ConcatContext *cat = ctx->priv;
+ unsigned out_no = FF_OUTLINK_IDX(outlink);
+ unsigned in_no = out_no + cat->cur_idx;
+ unsigned str, str_max;
+ int ret;
+
+ while (1) {
+ if (in_no >= ctx->nb_inputs)
+ return AVERROR_EOF;
+ if (!cat->in[in_no].eof) {
+ ret = ff_request_frame(ctx->inputs[in_no]);
+ if (ret != AVERROR_EOF)
+ return ret;
+ close_input(ctx, in_no);
+ }
+ /* cycle on all inputs to finish the segment */
+ /* possible enhancement: request in PTS order */
+ str_max = cat->cur_idx + ctx->nb_outputs - 1;
+ for (str = cat->cur_idx; cat->nb_in_active;
+ str = str == str_max ? cat->cur_idx : str + 1) {
+ if (cat->in[str].eof)
+ continue;
+ ret = ff_request_frame(ctx->inputs[str]);
+ if (ret == AVERROR_EOF)
+ close_input(ctx, str);
+ else if (ret < 0)
+ return ret;
+ }
+ ret = flush_segment(ctx);
+ if (ret < 0)
+ return ret;
+ in_no += ctx->nb_outputs;
+ }
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned seg, type, str;
+
+ /* create input pads */
+ for (seg = 0; seg < cat->nb_segments; seg++) {
+ for (type = 0; type < TYPE_ALL; type++) {
+ for (str = 0; str < cat->nb_streams[type]; str++) {
+ AVFilterPad pad = {
+ .type = type,
+ .get_video_buffer = get_video_buffer,
+ .get_audio_buffer = get_audio_buffer,
+ .filter_frame = filter_frame,
+ };
+ pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str);
+ ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
+ }
+ }
+ }
+ /* create output pads */
+ for (type = 0; type < TYPE_ALL; type++) {
+ for (str = 0; str < cat->nb_streams[type]; str++) {
+ AVFilterPad pad = {
+ .type = type,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ };
+ pad.name = av_asprintf("out:%c%d", "va"[type], str);
+ ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
+ }
+ }
+
+ cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
+ if (!cat->in)
+ return AVERROR(ENOMEM);
+ cat->nb_in_active = ctx->nb_outputs;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ConcatContext *cat = ctx->priv;
+ unsigned i;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ av_freep(&ctx->input_pads[i].name);
+ ff_bufqueue_discard_all(&cat->in[i].queue);
+ }
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+ av_free(cat->in);
+}
+
+AVFilter ff_avf_concat = {
+ .name = "concat",
+ .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ConcatContext),
+ .inputs = NULL,
+ .outputs = NULL,
+ .priv_class = &concat_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
diff --git a/libavfilter/avf_showcqt.c b/libavfilter/avf_showcqt.c
new file mode 100644
index 0000000..e650f74
--- /dev/null
+++ b/libavfilter/avf_showcqt.c
@@ -0,0 +1,807 @@
+/*
+ * Copyright (c) 2014 Muhammad Faiz <mfcc64@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavcodec/avfft.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/xga_font_data.h"
+#include "libavutil/qsort.h"
+#include "libavutil/time.h"
+#include "libavutil/eval.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#include <math.h>
+#include <stdlib.h>
+
+#if CONFIG_LIBFREETYPE
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#endif
+
+/* this filter is designed to do 16 bins/semitones constant Q transform with Brown-Puckette algorithm
+ * start from E0 to D#10 (10 octaves)
+ * so there are 16 bins/semitones * 12 semitones/octaves * 10 octaves = 1920 bins
+ * match with full HD resolution */
+
+#define VIDEO_WIDTH 1920
+#define VIDEO_HEIGHT 1080
+#define FONT_HEIGHT 32
+#define SPECTOGRAM_HEIGHT ((VIDEO_HEIGHT-FONT_HEIGHT)/2)
+#define SPECTOGRAM_START (VIDEO_HEIGHT-SPECTOGRAM_HEIGHT)
+#define BASE_FREQ 20.051392800492
+#define COEFF_CLAMP 1.0e-4
+#define TLENGTH_MIN 0.001
+#define TLENGTH_DEFAULT "384/f*tc/(384/f+tc)"
+#define VOLUME_MIN 1e-10
+#define VOLUME_MAX 100.0
+#define FONTCOLOR_DEFAULT "st(0, (midi(f)-59.5)/12);" \
+ "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
+ "r(1-ld(1)) + b(ld(1))"
+
+typedef struct {
+ FFTSample value;
+ int index;
+} SparseCoeff;
+
+typedef struct {
+ const AVClass *class;
+ AVFrame *outpicref;
+ FFTContext *fft_context;
+ FFTComplex *fft_data;
+ FFTComplex *fft_result_left;
+ FFTComplex *fft_result_right;
+ uint8_t *spectogram;
+ SparseCoeff *coeff_sort;
+ SparseCoeff *coeffs[VIDEO_WIDTH];
+ uint8_t *font_alpha;
+ char *fontfile; /* using freetype */
+ int coeffs_len[VIDEO_WIDTH];
+ uint8_t fontcolor_value[VIDEO_WIDTH*3]; /* result of fontcolor option */
+ int64_t frame_count;
+ int spectogram_count;
+ int spectogram_index;
+ int fft_bits;
+ int req_fullfilled;
+ int remaining_fill;
+ char *tlength;
+ char *volume;
+ char *fontcolor;
+ double timeclamp; /* lower timeclamp, time-accurate, higher timeclamp, freq-accurate (at low freq)*/
+ float coeffclamp; /* lower coeffclamp, more precise, higher coeffclamp, faster */
+ int fullhd; /* if true, output video is at full HD resolution, otherwise it will be halved */
+ float gamma; /* lower gamma, more contrast, higher gamma, more range */
+ int fps; /* the required fps is so strict, so it's enough to be int, but 24000/1001 etc cannot be encoded */
+ int count; /* fps * count = transform rate */
+} ShowCQTContext;
+
+#define OFFSET(x) offsetof(ShowCQTContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showcqt_options[] = {
+ { "volume", "set volume", OFFSET(volume), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "tlength", "set transform length", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
+ { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 0.1, 10, FLAGS },
+ { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 3 }, 1, 7, FLAGS },
+ { "fullhd", "set full HD resolution", OFFSET(fullhd), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
+ { "fps", "set video fps", OFFSET(fps), AV_OPT_TYPE_INT, { .i64 = 25 }, 10, 100, FLAGS },
+ { "count", "set number of transform per frame", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
+ { "fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showcqt);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int k;
+
+ ShowCQTContext *s = ctx->priv;
+ av_fft_end(s->fft_context);
+ s->fft_context = NULL;
+ for (k = 0; k < VIDEO_WIDTH; k++)
+ av_freep(&s->coeffs[k]);
+ av_freep(&s->fft_data);
+ av_freep(&s->fft_result_left);
+ av_freep(&s->fft_result_right);
+ av_freep(&s->coeff_sort);
+ av_freep(&s->spectogram);
+ av_freep(&s->font_alpha);
+ av_frame_free(&s->outpicref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
+ static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
+ static const int samplerates[] = { 44100, 48000, -1 };
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_formats);
+
+ layouts = avfilter_make_format64_list(channel_layouts);
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+
+ formats = ff_make_format_list(samplerates);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_samplerates);
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &outlink->in_formats);
+
+ return 0;
+}
+
+#if CONFIG_LIBFREETYPE
+static void load_freetype_font(AVFilterContext *ctx)
+{
+ static const char str[] = "EF G A BC D ";
+ ShowCQTContext *s = ctx->priv;
+ FT_Library lib = NULL;
+ FT_Face face = NULL;
+ int video_scale = s->fullhd ? 2 : 1;
+ int video_width = (VIDEO_WIDTH/2) * video_scale;
+ int font_height = (FONT_HEIGHT/2) * video_scale;
+ int font_width = 8 * video_scale;
+ int font_repeat = font_width * 12;
+ int linear_hori_advance = font_width * 65536;
+ int non_monospace_warning = 0;
+ int x;
+
+ s->font_alpha = NULL;
+
+ if (!s->fontfile)
+ return;
+
+ if (FT_Init_FreeType(&lib))
+ goto fail;
+
+ if (FT_New_Face(lib, s->fontfile, 0, &face))
+ goto fail;
+
+ if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
+ goto fail;
+
+ if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
+ goto fail;
+
+ if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
+ goto fail;
+
+ s->font_alpha = av_malloc(font_height * video_width);
+ if (!s->font_alpha)
+ goto fail;
+
+ memset(s->font_alpha, 0, font_height * video_width);
+
+ for (x = 0; x < 12; x++) {
+ int sx, sy, rx, bx, by, dx, dy;
+
+ if (str[x] == ' ')
+ continue;
+
+ if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
+ goto fail;
+
+ if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
+ av_log(ctx, AV_LOG_WARNING, "Font is not monospace\n");
+ non_monospace_warning = 1;
+ }
+
+ sy = font_height - 4*video_scale - face->glyph->bitmap_top;
+ for (rx = 0; rx < 10; rx++) {
+ sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
+ for (by = 0; by < face->glyph->bitmap.rows; by++) {
+ dy = by + sy;
+ if (dy < 0)
+ continue;
+ if (dy >= font_height)
+ break;
+
+ for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
+ dx = bx + sx;
+ if (dx < 0)
+ continue;
+ if (dx >= video_width)
+ break;
+ s->font_alpha[dy*video_width+dx] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
+ }
+ }
+ }
+ }
+
+ FT_Done_Face(face);
+ FT_Done_FreeType(lib);
+ return;
+
+ fail:
+ av_log(ctx, AV_LOG_WARNING, "Error while loading freetype font, using default font instead\n");
+ FT_Done_Face(face);
+ FT_Done_FreeType(lib);
+ av_freep(&s->font_alpha);
+ return;
+}
+#endif
+
+static double a_weighting(void *p, double f)
+{
+ double ret = 12200.0*12200.0 * (f*f*f*f);
+ ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
+ sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
+ return ret;
+}
+
+static double b_weighting(void *p, double f)
+{
+ double ret = 12200.0*12200.0 * (f*f*f);
+ ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
+ return ret;
+}
+
+static double c_weighting(void *p, double f)
+{
+ double ret = 12200.0*12200.0 * (f*f);
+ ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
+ return ret;
+}
+
+static double midi(void *p, double f)
+{
+ return log2(f/440.0) * 12.0 + 69.0;
+}
+
+static double r_func(void *p, double x)
+{
+ x = av_clipd(x, 0.0, 1.0);
+ return (int)(x*255.0+0.5) << 16;
+}
+
+static double g_func(void *p, double x)
+{
+ x = av_clipd(x, 0.0, 1.0);
+ return (int)(x*255.0+0.5) << 8;
+}
+
+static double b_func(void *p, double x)
+{
+ x = av_clipd(x, 0.0, 1.0);
+ return (int)(x*255.0+0.5);
+}
+
+static inline int qsort_sparsecoeff(const SparseCoeff *a, const SparseCoeff *b)
+{
+ if (fabsf(a->value) >= fabsf(b->value))
+ return 1;
+ else
+ return -1;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowCQTContext *s = ctx->priv;
+ AVExpr *tlength_expr = NULL, *volume_expr = NULL, *fontcolor_expr = NULL;
+ uint8_t *fontcolor_value = s->fontcolor_value;
+ static const char * const expr_vars[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
+ static const char * const expr_func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
+ static const char * const expr_fontcolor_func_names[] = { "midi", "r", "g", "b", NULL };
+ static double (* const expr_funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting, NULL };
+ static double (* const expr_fontcolor_funcs[])(void *, double) = { midi, r_func, g_func, b_func, NULL };
+ int fft_len, k, x, y, ret;
+ int num_coeffs = 0;
+ int rate = inlink->sample_rate;
+ double max_len = rate * (double) s->timeclamp;
+ int64_t start_time, end_time;
+ int video_scale = s->fullhd ? 2 : 1;
+ int video_width = (VIDEO_WIDTH/2) * video_scale;
+ int video_height = (VIDEO_HEIGHT/2) * video_scale;
+ int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
+
+ s->fft_bits = ceil(log2(max_len));
+ fft_len = 1 << s->fft_bits;
+
+ if (rate % (s->fps * s->count)) {
+ av_log(ctx, AV_LOG_ERROR, "Rate (%u) is not divisible by fps*count (%u*%u)\n", rate, s->fps, s->count);
+ return AVERROR(EINVAL);
+ }
+
+ s->fft_data = av_malloc_array(fft_len, sizeof(*s->fft_data));
+ s->coeff_sort = av_malloc_array(fft_len, sizeof(*s->coeff_sort));
+ s->fft_result_left = av_malloc_array(fft_len, sizeof(*s->fft_result_left));
+ s->fft_result_right = av_malloc_array(fft_len, sizeof(*s->fft_result_right));
+ s->fft_context = av_fft_init(s->fft_bits, 0);
+
+ if (!s->fft_data || !s->coeff_sort || !s->fft_result_left || !s->fft_result_right || !s->fft_context)
+ return AVERROR(ENOMEM);
+
+#if CONFIG_LIBFREETYPE
+ load_freetype_font(ctx);
+#else
+ if (s->fontfile)
+ av_log(ctx, AV_LOG_WARNING, "Freetype is not available, ignoring fontfile option\n");
+ s->font_alpha = NULL;
+#endif
+
+ av_log(ctx, AV_LOG_INFO, "Calculating spectral kernel, please wait\n");
+ start_time = av_gettime_relative();
+ ret = av_expr_parse(&tlength_expr, s->tlength, expr_vars, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ goto eval_error;
+
+ ret = av_expr_parse(&volume_expr, s->volume, expr_vars, expr_func_names,
+ expr_funcs, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ goto eval_error;
+
+ ret = av_expr_parse(&fontcolor_expr, s->fontcolor, expr_vars, expr_fontcolor_func_names,
+ expr_fontcolor_funcs, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ goto eval_error;
+
+ for (k = 0; k < VIDEO_WIDTH; k++) {
+ int hlen = fft_len >> 1;
+ float total = 0;
+ float partial = 0;
+ double freq = BASE_FREQ * exp2(k * (1.0/192.0));
+ double tlen, tlength, volume;
+ double expr_vars_val[] = { s->timeclamp, s->timeclamp, freq, freq, freq, 0 };
+ /* a window function from Albert H. Nuttall,
+ * "Some Windows with Very Good Sidelobe Behavior"
+ * -93.32 dB peak sidelobe and 18 dB/octave asymptotic decay
+ * coefficient normalized to a0 = 1 */
+ double a0 = 0.355768;
+ double a1 = 0.487396/a0;
+ double a2 = 0.144232/a0;
+ double a3 = 0.012604/a0;
+ double sv_step, cv_step, sv, cv;
+ double sw_step, cw_step, sw, cw, w;
+
+ tlength = av_expr_eval(tlength_expr, expr_vars_val, NULL);
+ if (isnan(tlength)) {
+ av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is nan, setting it to %g\n", freq, s->timeclamp);
+ tlength = s->timeclamp;
+ } else if (tlength < TLENGTH_MIN) {
+ av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, TLENGTH_MIN);
+ tlength = TLENGTH_MIN;
+ } else if (tlength > s->timeclamp) {
+ av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, s->timeclamp);
+ tlength = s->timeclamp;
+ }
+
+ volume = FFABS(av_expr_eval(volume_expr, expr_vars_val, NULL));
+ if (isnan(volume)) {
+ av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is nan, setting it to 0\n", freq);
+ volume = VOLUME_MIN;
+ } else if (volume < VOLUME_MIN) {
+ volume = VOLUME_MIN;
+ } else if (volume > VOLUME_MAX) {
+ av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is %g, setting it to %g\n", freq, volume, VOLUME_MAX);
+ volume = VOLUME_MAX;
+ }
+
+ if (s->fullhd || !(k & 1)) {
+ int fontcolor = av_expr_eval(fontcolor_expr, expr_vars_val, NULL);
+ fontcolor_value[0] = (fontcolor >> 16) & 0xFF;
+ fontcolor_value[1] = (fontcolor >> 8) & 0xFF;
+ fontcolor_value[2] = fontcolor & 0xFF;
+ fontcolor_value += 3;
+ }
+
+ tlen = tlength * rate;
+ s->fft_data[0].re = 0;
+ s->fft_data[0].im = 0;
+ s->fft_data[hlen].re = (1.0 + a1 + a2 + a3) * (1.0/tlen) * volume * (1.0/fft_len);
+ s->fft_data[hlen].im = 0;
+ sv_step = sv = sin(2.0*M_PI*freq*(1.0/rate));
+ cv_step = cv = cos(2.0*M_PI*freq*(1.0/rate));
+ /* also optimizing window func */
+ sw_step = sw = sin(2.0*M_PI*(1.0/tlen));
+ cw_step = cw = cos(2.0*M_PI*(1.0/tlen));
+ for (x = 1; x < 0.5 * tlen; x++) {
+ double cv_tmp, cw_tmp;
+ double cw2, cw3, sw2;
+
+ cw2 = cw * cw - sw * sw;
+ sw2 = cw * sw + sw * cw;
+ cw3 = cw * cw2 - sw * sw2;
+ w = (1.0 + a1 * cw + a2 * cw2 + a3 * cw3) * (1.0/tlen) * volume * (1.0/fft_len);
+ s->fft_data[hlen + x].re = w * cv;
+ s->fft_data[hlen + x].im = w * sv;
+ s->fft_data[hlen - x].re = s->fft_data[hlen + x].re;
+ s->fft_data[hlen - x].im = -s->fft_data[hlen + x].im;
+
+ cv_tmp = cv * cv_step - sv * sv_step;
+ sv = sv * cv_step + cv * sv_step;
+ cv = cv_tmp;
+ cw_tmp = cw * cw_step - sw * sw_step;
+ sw = sw * cw_step + cw * sw_step;
+ cw = cw_tmp;
+ }
+ for (; x < hlen; x++) {
+ s->fft_data[hlen + x].re = 0;
+ s->fft_data[hlen + x].im = 0;
+ s->fft_data[hlen - x].re = 0;
+ s->fft_data[hlen - x].im = 0;
+ }
+ av_fft_permute(s->fft_context, s->fft_data);
+ av_fft_calc(s->fft_context, s->fft_data);
+
+ for (x = 0; x < fft_len; x++) {
+ s->coeff_sort[x].index = x;
+ s->coeff_sort[x].value = s->fft_data[x].re;
+ }
+
+ AV_QSORT(s->coeff_sort, fft_len, SparseCoeff, qsort_sparsecoeff);
+ for (x = 0; x < fft_len; x++)
+ total += fabsf(s->coeff_sort[x].value);
+
+ for (x = 0; x < fft_len; x++) {
+ partial += fabsf(s->coeff_sort[x].value);
+ if (partial > total * s->coeffclamp * COEFF_CLAMP) {
+ s->coeffs_len[k] = fft_len - x;
+ num_coeffs += s->coeffs_len[k];
+ s->coeffs[k] = av_malloc_array(s->coeffs_len[k], sizeof(*s->coeffs[k]));
+ if (!s->coeffs[k]) {
+ ret = AVERROR(ENOMEM);
+ goto eval_error;
+ }
+ for (y = 0; y < s->coeffs_len[k]; y++)
+ s->coeffs[k][y] = s->coeff_sort[x+y];
+ break;
+ }
+ }
+ }
+ av_expr_free(fontcolor_expr);
+ av_expr_free(volume_expr);
+ av_expr_free(tlength_expr);
+ end_time = av_gettime_relative();
+ av_log(ctx, AV_LOG_INFO, "Elapsed time %.6f s (fft_len=%u, num_coeffs=%u)\n", 1e-6 * (end_time-start_time), fft_len, num_coeffs);
+
+ outlink->w = video_width;
+ outlink->h = video_height;
+
+ s->req_fullfilled = 0;
+ s->spectogram_index = 0;
+ s->frame_count = 0;
+ s->spectogram_count = 0;
+ s->remaining_fill = fft_len >> 1;
+ memset(s->fft_data, 0, fft_len * sizeof(*s->fft_data));
+
+ s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!s->outpicref)
+ return AVERROR(ENOMEM);
+
+ s->spectogram = av_calloc(spectogram_height, s->outpicref->linesize[0]);
+ if (!s->spectogram)
+ return AVERROR(ENOMEM);
+
+ outlink->sample_aspect_ratio = av_make_q(1, 1);
+ outlink->time_base = av_make_q(1, s->fps);
+ outlink->frame_rate = av_make_q(s->fps, 1);
+ return 0;
+
+eval_error:
+ av_expr_free(fontcolor_expr);
+ av_expr_free(volume_expr);
+ av_expr_free(tlength_expr);
+ return ret;
+}
+
+static int plot_cqt(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowCQTContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int fft_len = 1 << s->fft_bits;
+ FFTSample result[VIDEO_WIDTH][4];
+ int x, y, ret = 0;
+ int linesize = s->outpicref->linesize[0];
+ int video_scale = s->fullhd ? 2 : 1;
+ int video_width = (VIDEO_WIDTH/2) * video_scale;
+ int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
+ int spectogram_start = (SPECTOGRAM_START/2) * video_scale;
+ int font_height = (FONT_HEIGHT/2) * video_scale;
+
+ /* real part contains left samples, imaginary part contains right samples */
+ memcpy(s->fft_result_left, s->fft_data, fft_len * sizeof(*s->fft_data));
+ av_fft_permute(s->fft_context, s->fft_result_left);
+ av_fft_calc(s->fft_context, s->fft_result_left);
+
+ /* separate left and right, (and multiply by 2.0) */
+ s->fft_result_right[0].re = 2.0f * s->fft_result_left[0].im;
+ s->fft_result_right[0].im = 0;
+ s->fft_result_left[0].re = 2.0f * s->fft_result_left[0].re;
+ s->fft_result_left[0].im = 0;
+ for (x = 1; x <= fft_len >> 1; x++) {
+ FFTSample tmpy = s->fft_result_left[fft_len-x].im - s->fft_result_left[x].im;
+
+ s->fft_result_right[x].re = s->fft_result_left[x].im + s->fft_result_left[fft_len-x].im;
+ s->fft_result_right[x].im = s->fft_result_left[x].re - s->fft_result_left[fft_len-x].re;
+ s->fft_result_right[fft_len-x].re = s->fft_result_right[x].re;
+ s->fft_result_right[fft_len-x].im = -s->fft_result_right[x].im;
+
+ s->fft_result_left[x].re = s->fft_result_left[x].re + s->fft_result_left[fft_len-x].re;
+ s->fft_result_left[x].im = tmpy;
+ s->fft_result_left[fft_len-x].re = s->fft_result_left[x].re;
+ s->fft_result_left[fft_len-x].im = -s->fft_result_left[x].im;
+ }
+
+ /* calculating cqt */
+ for (x = 0; x < VIDEO_WIDTH; x++) {
+ int u;
+ float g = 1.0f / s->gamma;
+ FFTComplex l = {0,0};
+ FFTComplex r = {0,0};
+
+ for (u = 0; u < s->coeffs_len[x]; u++) {
+ FFTSample value = s->coeffs[x][u].value;
+ int index = s->coeffs[x][u].index;
+ l.re += value * s->fft_result_left[index].re;
+ l.im += value * s->fft_result_left[index].im;
+ r.re += value * s->fft_result_right[index].re;
+ r.im += value * s->fft_result_right[index].im;
+ }
+ /* result is power, not amplitude */
+ result[x][0] = l.re * l.re + l.im * l.im;
+ result[x][2] = r.re * r.re + r.im * r.im;
+ result[x][1] = 0.5f * (result[x][0] + result[x][2]);
+ result[x][3] = result[x][1];
+ result[x][0] = 255.0f * powf(FFMIN(1.0f,result[x][0]), g);
+ result[x][1] = 255.0f * powf(FFMIN(1.0f,result[x][1]), g);
+ result[x][2] = 255.0f * powf(FFMIN(1.0f,result[x][2]), g);
+ }
+
+ if (!s->fullhd) {
+ for (x = 0; x < video_width; x++) {
+ result[x][0] = 0.5f * (result[2*x][0] + result[2*x+1][0]);
+ result[x][1] = 0.5f * (result[2*x][1] + result[2*x+1][1]);
+ result[x][2] = 0.5f * (result[2*x][2] + result[2*x+1][2]);
+ result[x][3] = 0.5f * (result[2*x][3] + result[2*x+1][3]);
+ }
+ }
+
+ for (x = 0; x < video_width; x++) {
+ s->spectogram[s->spectogram_index*linesize + 3*x] = result[x][0] + 0.5f;
+ s->spectogram[s->spectogram_index*linesize + 3*x + 1] = result[x][1] + 0.5f;
+ s->spectogram[s->spectogram_index*linesize + 3*x + 2] = result[x][2] + 0.5f;
+ }
+
+ /* drawing */
+ if (!s->spectogram_count) {
+ uint8_t *data = (uint8_t*) s->outpicref->data[0];
+ float rcp_result[VIDEO_WIDTH];
+ int total_length = linesize * spectogram_height;
+ int back_length = linesize * s->spectogram_index;
+
+ for (x = 0; x < video_width; x++)
+ rcp_result[x] = 1.0f / (result[x][3]+0.0001f);
+
+ /* drawing bar */
+ for (y = 0; y < spectogram_height; y++) {
+ float height = (spectogram_height - y) * (1.0f/spectogram_height);
+ uint8_t *lineptr = data + y * linesize;
+ for (x = 0; x < video_width; x++) {
+ float mul;
+ if (result[x][3] <= height) {
+ *lineptr++ = 0;
+ *lineptr++ = 0;
+ *lineptr++ = 0;
+ } else {
+ mul = (result[x][3] - height) * rcp_result[x];
+ *lineptr++ = mul * result[x][0] + 0.5f;
+ *lineptr++ = mul * result[x][1] + 0.5f;
+ *lineptr++ = mul * result[x][2] + 0.5f;
+ }
+ }
+ }
+
+ /* drawing font */
+ if (s->font_alpha) {
+ for (y = 0; y < font_height; y++) {
+ uint8_t *lineptr = data + (spectogram_height + y) * linesize;
+ uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize;
+ uint8_t *fontcolor_value = s->fontcolor_value;
+ for (x = 0; x < video_width; x++) {
+ uint8_t alpha = s->font_alpha[y*video_width+x];
+ lineptr[3*x] = (spectogram_src[3*x] * (255-alpha) + fontcolor_value[0] * alpha + 255) >> 8;
+ lineptr[3*x+1] = (spectogram_src[3*x+1] * (255-alpha) + fontcolor_value[1] * alpha + 255) >> 8;
+ lineptr[3*x+2] = (spectogram_src[3*x+2] * (255-alpha) + fontcolor_value[2] * alpha + 255) >> 8;
+ fontcolor_value += 3;
+ }
+ }
+ } else {
+ for (y = 0; y < font_height; y++) {
+ uint8_t *lineptr = data + (spectogram_height + y) * linesize;
+ memcpy(lineptr, s->spectogram + s->spectogram_index * linesize, video_width*3);
+ }
+ for (x = 0; x < video_width; x += video_width/10) {
+ int u;
+ static const char str[] = "EF G A BC D ";
+ uint8_t *startptr = data + spectogram_height * linesize + x * 3;
+ for (u = 0; str[u]; u++) {
+ int v;
+ for (v = 0; v < 16; v++) {
+ uint8_t *p = startptr + v * linesize * video_scale + 8 * 3 * u * video_scale;
+ int ux = x + 8 * u * video_scale;
+ int mask;
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (mask & avpriv_vga16_font[str[u] * 16 + v]) {
+ p[0] = s->fontcolor_value[3*ux];
+ p[1] = s->fontcolor_value[3*ux+1];
+ p[2] = s->fontcolor_value[3*ux+2];
+ if (video_scale == 2) {
+ p[linesize] = p[0];
+ p[linesize+1] = p[1];
+ p[linesize+2] = p[2];
+ p[3] = p[linesize+3] = s->fontcolor_value[3*ux+3];
+ p[4] = p[linesize+4] = s->fontcolor_value[3*ux+4];
+ p[5] = p[linesize+5] = s->fontcolor_value[3*ux+5];
+ }
+ }
+ p += 3 * video_scale;
+ ux += video_scale;
+ }
+ }
+ }
+ }
+ }
+
+ /* drawing spectogram/sonogram */
+ data += spectogram_start * linesize;
+ memcpy(data, s->spectogram + s->spectogram_index*linesize, total_length - back_length);
+
+ data += total_length - back_length;
+ if (back_length)
+ memcpy(data, s->spectogram, back_length);
+
+ s->outpicref->pts = s->frame_count;
+ ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
+ s->req_fullfilled = 1;
+ s->frame_count++;
+ }
+ s->spectogram_count = (s->spectogram_count + 1) % s->count;
+ s->spectogram_index = (s->spectogram_index + spectogram_height - 1) % spectogram_height;
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowCQTContext *s = ctx->priv;
+ int step = inlink->sample_rate / (s->fps * s->count);
+ int fft_len = 1 << s->fft_bits;
+ int remaining;
+ float *audio_data;
+
+ if (!insamples) {
+ while (s->remaining_fill < (fft_len >> 1)) {
+ int ret, x;
+ memset(&s->fft_data[fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
+ ret = plot_cqt(inlink);
+ if (ret < 0)
+ return ret;
+ for (x = 0; x < (fft_len-step); x++)
+ s->fft_data[x] = s->fft_data[x+step];
+ s->remaining_fill += step;
+ }
+ return AVERROR(EOF);
+ }
+
+ remaining = insamples->nb_samples;
+ audio_data = (float*) insamples->data[0];
+
+ while (remaining) {
+ if (remaining >= s->remaining_fill) {
+ int i = insamples->nb_samples - remaining;
+ int j = fft_len - s->remaining_fill;
+ int m, ret;
+ for (m = 0; m < s->remaining_fill; m++) {
+ s->fft_data[j+m].re = audio_data[2*(i+m)];
+ s->fft_data[j+m].im = audio_data[2*(i+m)+1];
+ }
+ ret = plot_cqt(inlink);
+ if (ret < 0) {
+ av_frame_free(&insamples);
+ return ret;
+ }
+ remaining -= s->remaining_fill;
+ for (m = 0; m < fft_len-step; m++)
+ s->fft_data[m] = s->fft_data[m+step];
+ s->remaining_fill = step;
+ } else {
+ int i = insamples->nb_samples - remaining;
+ int j = fft_len - s->remaining_fill;
+ int m;
+ for (m = 0; m < remaining; m++) {
+ s->fft_data[m+j].re = audio_data[2*(i+m)];
+ s->fft_data[m+j].im = audio_data[2*(i+m)+1];
+ }
+ s->remaining_fill -= remaining;
+ remaining = 0;
+ }
+ }
+ av_frame_free(&insamples);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ ShowCQTContext *s = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ s->req_fullfilled = 0;
+ do {
+ ret = ff_request_frame(inlink);
+ } while (!s->req_fullfilled && ret >= 0);
+
+ if (ret == AVERROR_EOF && s->outpicref)
+ filter_frame(inlink, NULL);
+ return ret;
+}
+
+static const AVFilterPad showcqt_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showcqt_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showcqt = {
+ .name = "showcqt",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant Q Transform) spectrum video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowCQTContext),
+ .inputs = showcqt_inputs,
+ .outputs = showcqt_outputs,
+ .priv_class = &showcqt_class,
+};
diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c
new file mode 100644
index 0000000..24116da
--- /dev/null
+++ b/libavfilter/avf_showspectrum.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2012-2013 Clément Bœsch
+ * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
+ * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
+ */
+
+#include <math.h>
+
+#include "libavcodec/avfft.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
+enum DisplayScale { LINEAR, SQRT, CBRT, LOG, NB_SCALES };
+enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES };
+enum WindowFunc { WFUNC_NONE, WFUNC_HANN, WFUNC_HAMMING, WFUNC_BLACKMAN, NB_WFUNC };
+enum SlideMode { REPLACE, SCROLL, FULLFRAME, NB_SLIDES };
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ AVFrame *outpicref;
+ int req_fullfilled;
+ int nb_display_channels;
+ int channel_height;
+ int sliding; ///< 1 if sliding mode, 0 otherwise
+ enum DisplayMode mode; ///< channel display mode
+ enum ColorMode color_mode; ///< display color scheme
+ enum DisplayScale scale;
+ float saturation; ///< color saturation multiplier
+ int xpos; ///< x position (current column)
+ RDFTContext *rdft; ///< Real Discrete Fourier Transform context
+ int rdft_bits; ///< number of bits (RDFT window size = 1<<rdft_bits)
+ FFTSample **rdft_data; ///< bins holder for each (displayed) channels
+ float *window_func_lut; ///< Window function LUT
+ enum WindowFunc win_func;
+ float *combine_buffer; ///< color combining buffer (3 * h items)
+} ShowSpectrumContext;
+
+#define OFFSET(x) offsetof(ShowSpectrumContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showspectrum_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
+ { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES, FLAGS, "slide" },
+ { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
+ { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
+ { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
+ { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
+ { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
+ { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
+ { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
+ { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
+ { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
+ { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
+ { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
+ { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
+ { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
+ { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
+ { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
+ { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANN}, 0, NB_WFUNC-1, FLAGS, "win_func" },
+ { "hann", "Hann window", 0, AV_OPT_TYPE_CONST, {.i64 = WFUNC_HANN}, 0, 0, FLAGS, "win_func" },
+ { "hamming", "Hamming window", 0, AV_OPT_TYPE_CONST, {.i64 = WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
+ { "blackman", "Blackman window", 0, AV_OPT_TYPE_CONST, {.i64 = WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showspectrum);
+
+static const struct {
+ float a, y, u, v;
+} intensity_color_table[] = {
+ { 0, 0, 0, 0 },
+ { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
+ { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
+ { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
+ { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
+ { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
+ { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
+ { 1, 1, 0, 0 }
+};
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShowSpectrumContext *s = ctx->priv;
+ int i;
+
+ av_freep(&s->combine_buffer);
+ av_rdft_end(s->rdft);
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->rdft_data[i]);
+ av_freep(&s->rdft_data);
+ av_freep(&s->window_func_lut);
+ av_frame_free(&s->outpicref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_formats);
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_samplerates);
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &outlink->in_formats);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowSpectrumContext *s = ctx->priv;
+ int i, rdft_bits, win_size, h;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+
+ h = (s->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
+ s->channel_height = h;
+
+ /* RDFT window size (precision) according to the requested output frame height */
+ for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++);
+ win_size = 1 << rdft_bits;
+
+ /* (re-)configuration if the video output changed (or first init) */
+ if (rdft_bits != s->rdft_bits) {
+ size_t rdft_size, rdft_listsize;
+ AVFrame *outpicref;
+
+ av_rdft_end(s->rdft);
+ s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ if (!s->rdft) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to create RDFT context. "
+ "The window size might be too high.\n");
+ return AVERROR(EINVAL);
+ }
+ s->rdft_bits = rdft_bits;
+
+ /* RDFT buffers: x2 for each (display) channel buffer.
+ * Note: we use free and malloc instead of a realloc-like function to
+ * make sure the buffer is aligned in memory for the FFT functions. */
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->rdft_data[i]);
+ av_freep(&s->rdft_data);
+ s->nb_display_channels = inlink->channels;
+
+ if (av_size_mult(sizeof(*s->rdft_data),
+ s->nb_display_channels, &rdft_listsize) < 0)
+ return AVERROR(EINVAL);
+ if (av_size_mult(sizeof(**s->rdft_data),
+ win_size, &rdft_size) < 0)
+ return AVERROR(EINVAL);
+ s->rdft_data = av_malloc(rdft_listsize);
+ if (!s->rdft_data)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->rdft_data[i] = av_malloc(rdft_size);
+ if (!s->rdft_data[i])
+ return AVERROR(ENOMEM);
+ }
+
+ /* pre-calc windowing function */
+ s->window_func_lut =
+ av_realloc_f(s->window_func_lut, win_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
+ return AVERROR(ENOMEM);
+ switch (s->win_func) {
+ case WFUNC_NONE:
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = 1.;
+ break;
+ case WFUNC_HANN:
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
+ break;
+ case WFUNC_HAMMING:
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = .54f - .46f * cos(2*M_PI*i / (win_size-1));
+ break;
+ case WFUNC_BLACKMAN: {
+ for (i = 0; i < win_size; i++)
+ s->window_func_lut[i] = .42f - .5f*cos(2*M_PI*i / (win_size-1)) + .08f*cos(4*M_PI*i / (win_size-1));
+ break;
+ }
+ default:
+ av_assert0(0);
+ }
+
+ /* prepare the initial picref buffer (black frame) */
+ av_frame_free(&s->outpicref);
+ s->outpicref = outpicref =
+ ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+ for (i = 0; i < outlink->h; i++) {
+ memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
+ memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
+ memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
+ }
+ }
+
+ if (s->xpos >= outlink->w)
+ s->xpos = 0;
+
+ outlink->frame_rate = av_make_q(inlink->sample_rate, win_size);
+ if (s->sliding == FULLFRAME)
+ outlink->frame_rate.den *= outlink->w;
+
+ inlink->min_samples = inlink->max_samples = inlink->partial_buf_size =
+ win_size;
+
+ s->combine_buffer =
+ av_realloc_f(s->combine_buffer, outlink->h * 3,
+ sizeof(*s->combine_buffer));
+
+ av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n",
+ s->w, s->h, win_size);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ ShowSpectrumContext *s = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ unsigned i;
+ int ret;
+
+ s->req_fullfilled = 0;
+ do {
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF && s->sliding == FULLFRAME && s->xpos > 0 &&
+ s->outpicref) {
+ for (i = 0; i < outlink->h; i++) {
+ memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
+ memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
+ memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
+ }
+ ret = ff_filter_frame(outlink, s->outpicref);
+ s->outpicref = NULL;
+ s->req_fullfilled = 1;
+ }
+ } while (!s->req_fullfilled && ret >= 0);
+
+ return ret;
+}
+
+static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
+{
+ int ret;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowSpectrumContext *s = ctx->priv;
+ AVFrame *outpicref = s->outpicref;
+
+ /* nb_freq contains the power of two superior or equal to the output image
+ * height (or half the RDFT window size) */
+ const int nb_freq = 1 << (s->rdft_bits - 1);
+ const int win_size = nb_freq << 1;
+ const double w = 1. / (sqrt(nb_freq) * 32768.);
+ int h = s->channel_height;
+
+ int ch, plane, n, y;
+
+ av_assert0(insamples->nb_samples == win_size);
+
+ /* fill RDFT input with the number of samples available */
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
+ const int16_t *p = (int16_t *)insamples->extended_data[ch];
+
+ for (n = 0; n < win_size; n++)
+ s->rdft_data[ch][n] = p[n] * s->window_func_lut[n];
+ }
+
+ /* TODO reindent */
+
+ /* run RDFT on each samples set */
+ for (ch = 0; ch < s->nb_display_channels; ch++)
+ av_rdft_calc(s->rdft, s->rdft_data[ch]);
+
+ /* fill a new spectrum column */
+#define RE(y, ch) s->rdft_data[ch][2 * (y) + 0]
+#define IM(y, ch) s->rdft_data[ch][2 * (y) + 1]
+#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
+
+ /* initialize buffer for combining to black */
+ for (y = 0; y < outlink->h; y++) {
+ s->combine_buffer[3 * y ] = 0;
+ s->combine_buffer[3 * y + 1] = 127.5;
+ s->combine_buffer[3 * y + 2] = 127.5;
+ }
+
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
+ float yf, uf, vf;
+
+ /* decide color range */
+ switch (s->mode) {
+ case COMBINED:
+ // reduce range by channel count
+ yf = 256.0f / s->nb_display_channels;
+ switch (s->color_mode) {
+ case INTENSITY:
+ uf = yf;
+ vf = yf;
+ break;
+ case CHANNEL:
+ /* adjust saturation for mixed UV coloring */
+ /* this factor is correct for infinite channels, an approximation otherwise */
+ uf = yf * M_PI;
+ vf = yf * M_PI;
+ break;
+ default:
+ av_assert0(0);
+ }
+ break;
+ case SEPARATE:
+ // full range
+ yf = 256.0f;
+ uf = 256.0f;
+ vf = 256.0f;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ if (s->color_mode == CHANNEL) {
+ if (s->nb_display_channels > 1) {
+ uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
+ vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
+ } else {
+ uf = 0.0f;
+ vf = 0.0f;
+ }
+ }
+ uf *= s->saturation;
+ vf *= s->saturation;
+
+ /* draw the channel */
+ for (y = 0; y < h; y++) {
+ int row = (s->mode == COMBINED) ? y : ch * h + y;
+ float *out = &s->combine_buffer[3 * row];
+
+ /* get magnitude */
+ float a = w * MAGNITUDE(y, ch);
+
+ /* apply scale */
+ switch (s->scale) {
+ case LINEAR:
+ break;
+ case SQRT:
+ a = sqrt(a);
+ break;
+ case CBRT:
+ a = cbrt(a);
+ break;
+ case LOG:
+ a = 1 - log(FFMAX(FFMIN(1, a), 1e-6)) / log(1e-6); // zero = -120dBFS
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ if (s->color_mode == INTENSITY) {
+ float y, u, v;
+ int i;
+
+ for (i = 1; i < sizeof(intensity_color_table) / sizeof(*intensity_color_table) - 1; i++)
+ if (intensity_color_table[i].a >= a)
+ break;
+ // i now is the first item >= the color
+ // now we know to interpolate between item i - 1 and i
+ if (a <= intensity_color_table[i - 1].a) {
+ y = intensity_color_table[i - 1].y;
+ u = intensity_color_table[i - 1].u;
+ v = intensity_color_table[i - 1].v;
+ } else if (a >= intensity_color_table[i].a) {
+ y = intensity_color_table[i].y;
+ u = intensity_color_table[i].u;
+ v = intensity_color_table[i].v;
+ } else {
+ float start = intensity_color_table[i - 1].a;
+ float end = intensity_color_table[i].a;
+ float lerpfrac = (a - start) / (end - start);
+ y = intensity_color_table[i - 1].y * (1.0f - lerpfrac)
+ + intensity_color_table[i].y * lerpfrac;
+ u = intensity_color_table[i - 1].u * (1.0f - lerpfrac)
+ + intensity_color_table[i].u * lerpfrac;
+ v = intensity_color_table[i - 1].v * (1.0f - lerpfrac)
+ + intensity_color_table[i].v * lerpfrac;
+ }
+
+ out[0] += y * yf;
+ out[1] += u * uf;
+ out[2] += v * vf;
+ } else {
+ out[0] += a * yf;
+ out[1] += a * uf;
+ out[2] += a * vf;
+ }
+ }
+ }
+
+ /* copy to output */
+ if (s->sliding == SCROLL) {
+ for (plane = 0; plane < 3; plane++) {
+ for (y = 0; y < outlink->h; y++) {
+ uint8_t *p = outpicref->data[plane] +
+ y * outpicref->linesize[plane];
+ memmove(p, p + 1, outlink->w - 1);
+ }
+ }
+ s->xpos = outlink->w - 1;
+ }
+ for (plane = 0; plane < 3; plane++) {
+ uint8_t *p = outpicref->data[plane] +
+ (outlink->h - 1) * outpicref->linesize[plane] +
+ s->xpos;
+ for (y = 0; y < outlink->h; y++) {
+ *p = rint(FFMAX(0, FFMIN(s->combine_buffer[3 * y + plane], 255)));
+ p -= outpicref->linesize[plane];
+ }
+ }
+
+ if (s->sliding != FULLFRAME || s->xpos == 0)
+ outpicref->pts = insamples->pts;
+
+ s->xpos++;
+ if (s->xpos >= outlink->w)
+ s->xpos = 0;
+ if (s->sliding != FULLFRAME || s->xpos == 0) {
+ s->req_fullfilled = 1;
+ ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
+ if (ret < 0)
+ return ret;
+ }
+
+ return win_size;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ShowSpectrumContext *s = ctx->priv;
+ unsigned win_size = 1 << s->rdft_bits;
+ int ret = 0;
+
+ av_assert0(insamples->nb_samples <= win_size);
+ if (insamples->nb_samples == win_size)
+ ret = plot_spectrum_column(inlink, insamples);
+
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static const AVFilterPad showspectrum_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showspectrum_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showspectrum = {
+ .name = "showspectrum",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowSpectrumContext),
+ .inputs = showspectrum_inputs,
+ .outputs = showspectrum_outputs,
+ .priv_class = &showspectrum_class,
+};
diff --git a/libavfilter/avf_showwaves.c b/libavfilter/avf_showwaves.c
new file mode 100644
index 0000000..fa34a52
--- /dev/null
+++ b/libavfilter/avf_showwaves.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio to video multimedia filter
+ */
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "audio.h"
+#include "video.h"
+#include "internal.h"
+
+enum ShowWavesMode {
+ MODE_POINT,
+ MODE_LINE,
+ MODE_P2P,
+ MODE_CENTERED_LINE,
+ MODE_NB,
+};
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ AVRational rate;
+ int buf_idx;
+ int16_t *buf_idy; /* y coordinate of previous sample for each channel */
+ AVFrame *outpicref;
+ int req_fullfilled;
+ int n;
+ int sample_count_mod;
+ enum ShowWavesMode mode;
+ int split_channels;
+ void (*draw_sample)(uint8_t *buf, int height, int linesize,
+ int16_t sample, int16_t *prev_y, int intensity);
+} ShowWavesContext;
+
+#define OFFSET(x) offsetof(ShowWavesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption showwaves_options[] = {
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
+ { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
+ { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
+ { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
+ { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
+ { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
+ { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(showwaves);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ShowWavesContext *showwaves = ctx->priv;
+
+ av_frame_free(&showwaves->outpicref);
+ av_freep(&showwaves->buf_idy);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+
+ /* set input audio formats */
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_formats);
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_samplerates);
+
+ /* set output video format */
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &outlink->in_formats);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowWavesContext *showwaves = ctx->priv;
+ int nb_channels = inlink->channels;
+
+ if (!showwaves->n)
+ showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
+
+ showwaves->buf_idx = 0;
+ if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
+ av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
+ return AVERROR(ENOMEM);
+ }
+ outlink->w = showwaves->w;
+ outlink->h = showwaves->h;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
+ (AVRational){showwaves->w,1});
+
+ av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
+ showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
+ return 0;
+}
+
+inline static int push_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ShowWavesContext *showwaves = outlink->src->priv;
+ int nb_channels = inlink->channels;
+ int ret, i;
+
+ if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0)
+ showwaves->req_fullfilled = 1;
+ showwaves->outpicref = NULL;
+ showwaves->buf_idx = 0;
+ for (i = 0; i < nb_channels; i++)
+ showwaves->buf_idy[i] = 0;
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ ShowWavesContext *showwaves = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ showwaves->req_fullfilled = 0;
+ do {
+ ret = ff_request_frame(inlink);
+ } while (!showwaves->req_fullfilled && ret >= 0);
+
+ if (ret == AVERROR_EOF && showwaves->outpicref)
+ push_frame(outlink);
+ return ret;
+}
+
+#define MAX_INT16 ((1<<15) -1)
+
+static void draw_sample_point(uint8_t *buf, int height, int linesize,
+ int16_t sample, int16_t *prev_y, int intensity)
+{
+ const int h = height/2 - av_rescale(sample, height/2, MAX_INT16);
+ if (h >= 0 && h < height)
+ buf[h * linesize] += intensity;
+}
+
+static void draw_sample_line(uint8_t *buf, int height, int linesize,
+ int16_t sample, int16_t *prev_y, int intensity)
+{
+ int k;
+ const int h = height/2 - av_rescale(sample, height/2, MAX_INT16);
+ int start = height/2;
+ int end = av_clip(h, 0, height-1);
+ if (start > end)
+ FFSWAP(int16_t, start, end);
+ for (k = start; k < end; k++)
+ buf[k * linesize] += intensity;
+}
+
+static void draw_sample_p2p(uint8_t *buf, int height, int linesize,
+ int16_t sample, int16_t *prev_y, int intensity)
+{
+ int k;
+ const int h = height/2 - av_rescale(sample, height/2, MAX_INT16);
+ if (h >= 0 && h < height) {
+ buf[h * linesize] += intensity;
+ if (*prev_y && h != *prev_y) {
+ int start = *prev_y;
+ int end = av_clip(h, 0, height-1);
+ if (start > end)
+ FFSWAP(int16_t, start, end);
+ for (k = start + 1; k < end; k++)
+ buf[k * linesize] += intensity;
+ }
+ }
+ *prev_y = h;
+}
+
+static void draw_sample_cline(uint8_t *buf, int height, int linesize,
+ int16_t sample, int16_t *prev_y, int intensity)
+{
+ int k;
+ const int h = av_rescale(abs(sample), height, UINT16_MAX);
+ const int start = (height - h) / 2;
+ const int end = start + h;
+ for (k = start; k < end; k++)
+ buf[k * linesize] += intensity;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ShowWavesContext *showwaves = ctx->priv;
+ const int nb_samples = insamples->nb_samples;
+ AVFrame *outpicref = showwaves->outpicref;
+ int linesize = outpicref ? outpicref->linesize[0] : 0;
+ int16_t *p = (int16_t *)insamples->data[0];
+ int nb_channels = inlink->channels;
+ int i, j, ret = 0;
+ const int n = showwaves->n;
+ const int x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
+ const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
+
+ /* draw data in the buffer */
+ for (i = 0; i < nb_samples; i++) {
+ if (!showwaves->outpicref) {
+ showwaves->outpicref = outpicref =
+ ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+ outpicref->width = outlink->w;
+ outpicref->height = outlink->h;
+ outpicref->pts = insamples->pts +
+ av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
+ (AVRational){ 1, inlink->sample_rate },
+ outlink->time_base);
+ linesize = outpicref->linesize[0];
+ for (j = 0; j < outlink->h; j++)
+ memset(outpicref->data[0] + j * linesize, 0, outlink->w);
+ }
+ for (j = 0; j < nb_channels; j++) {
+ uint8_t *buf = outpicref->data[0] + showwaves->buf_idx;
+ if (showwaves->split_channels)
+ buf += j*ch_height*linesize;
+ showwaves->draw_sample(buf, ch_height, linesize, *p++,
+ &showwaves->buf_idy[j], x);
+ }
+
+ showwaves->sample_count_mod++;
+ if (showwaves->sample_count_mod == n) {
+ showwaves->sample_count_mod = 0;
+ showwaves->buf_idx++;
+ }
+ if (showwaves->buf_idx == showwaves->w)
+ if ((ret = push_frame(outlink)) < 0)
+ break;
+ outpicref = showwaves->outpicref;
+ }
+
+ av_frame_free(&insamples);
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ShowWavesContext *showwaves = ctx->priv;
+
+ switch (showwaves->mode) {
+ case MODE_POINT: showwaves->draw_sample = draw_sample_point; break;
+ case MODE_LINE: showwaves->draw_sample = draw_sample_line; break;
+ case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break;
+ case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break;
+ default:
+ return AVERROR_BUG;
+ }
+ return 0;
+}
+
+static const AVFilterPad showwaves_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad showwaves_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_avf_showwaves = {
+ .name = "showwaves",
+ .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(ShowWavesContext),
+ .inputs = showwaves_inputs,
+ .outputs = showwaves_outputs,
+ .priv_class = &showwaves_class,
+};
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 4098973..963f5e6 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -2,26 +2,29 @@
* filter layer
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/atomic.h"
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -33,34 +36,82 @@
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
-#include "video.h"
+
+static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
+
+void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
+{
+ av_unused char buf[16];
+ ff_tlog(ctx,
+ "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
+ ref, ref->buf, ref->data[0],
+ ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
+ ref->pts, av_frame_get_pkt_pos(ref));
+
+ if (ref->width) {
+ ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
+ ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
+ ref->width, ref->height,
+ !ref->interlaced_frame ? 'P' : /* Progressive */
+ ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
+ ref->key_frame,
+ av_get_picture_type_char(ref->pict_type));
+ }
+ if (ref->nb_samples) {
+ ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
+ ref->channel_layout,
+ ref->nb_samples,
+ ref->sample_rate);
+ }
+
+ ff_tlog(ctx, "]%s", end ? "\n" : "");
+}
unsigned avfilter_version(void)
{
+ av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
return LIBAVFILTER_VERSION_INT;
}
const char *avfilter_configuration(void)
{
- return LIBAV_CONFIGURATION;
+ return FFMPEG_CONFIGURATION;
}
const char *avfilter_license(void)
{
#define LICENSE_PREFIX "libavfilter license: "
- return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+ return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
-void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
+void ff_command_queue_pop(AVFilterContext *filter)
+{
+ AVFilterCommand *c= filter->command_queue;
+ av_freep(&c->arg);
+ av_freep(&c->command);
+ filter->command_queue= c->next;
+ av_free(c);
+}
+
+int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad)
{
+ AVFilterLink **newlinks;
+ AVFilterPad *newpads;
unsigned i;
idx = FFMIN(idx, *count);
- *pads = av_realloc(*pads, sizeof(AVFilterPad) * (*count + 1));
- *links = av_realloc(*links, sizeof(AVFilterLink*) * (*count + 1));
+ newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
+ newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
+ if (newpads)
+ *pads = newpads;
+ if (newlinks)
+ *links = newlinks;
+ if (!newpads || !newlinks)
+ return AVERROR(ENOMEM);
+
memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
@@ -68,8 +119,10 @@ void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
(*count)++;
for (i = idx + 1; i < *count; i++)
- if (*links[i])
- (*(unsigned *)((uint8_t *) *links[i] + padidx_off))++;
+ if ((*links)[i])
+ (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
+
+ return 0;
}
int avfilter_link(AVFilterContext *src, unsigned srcpad,
@@ -83,8 +136,9 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
av_log(src, AV_LOG_ERROR,
- "Media type mismatch between the '%s' filter output pad %d and the '%s' filter input pad %d\n",
- src->name, srcpad, dst->name, dstpad);
+ "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
+ src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
+ dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
return AVERROR(EINVAL);
}
@@ -99,12 +153,32 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
link->srcpad = &src->output_pads[srcpad];
link->dstpad = &dst->input_pads[dstpad];
link->type = src->output_pads[srcpad].type;
- assert(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
+ av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
link->format = -1;
return 0;
}
+void avfilter_link_free(AVFilterLink **link)
+{
+ if (!*link)
+ return;
+
+ av_frame_free(&(*link)->partial_buf);
+
+ av_freep(link);
+}
+
+int avfilter_link_get_channels(AVFilterLink *link)
+{
+ return link->channels;
+}
+
+void avfilter_link_set_closed(AVFilterLink *link, int closed)
+{
+ link->closed = closed;
+}
+
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
{
@@ -150,6 +224,7 @@ int avfilter_config_links(AVFilterContext *filter)
for (i = 0; i < filter->nb_inputs; i ++) {
AVFilterLink *link = filter->inputs[i];
+ AVFilterLink *inlink;
if (!link) continue;
if (!link->src || !link->dst) {
@@ -158,6 +233,9 @@ int avfilter_config_links(AVFilterContext *filter)
return AVERROR(EINVAL);
}
+ inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
+ link->current_pts = AV_NOPTS_VALUE;
+
switch (link->init_state) {
case AVLINK_INIT:
continue;
@@ -185,26 +263,39 @@ int avfilter_config_links(AVFilterContext *filter)
return ret;
}
- if (link->time_base.num == 0 && link->time_base.den == 0)
- link->time_base = link->src->nb_inputs ?
- link->src->inputs[0]->time_base : AV_TIME_BASE_Q;
+ switch (link->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (!link->time_base.num && !link->time_base.den)
+ link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
- if (link->type == AVMEDIA_TYPE_VIDEO) {
if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
- link->sample_aspect_ratio = link->src->nb_inputs ?
- link->src->inputs[0]->sample_aspect_ratio : (AVRational){1,1};
+ link->sample_aspect_ratio = inlink ?
+ inlink->sample_aspect_ratio : (AVRational){1,1};
+
+ if (inlink && !link->frame_rate.num && !link->frame_rate.den)
+ link->frame_rate = inlink->frame_rate;
- if (link->src->nb_inputs) {
+ if (inlink) {
if (!link->w)
- link->w = link->src->inputs[0]->w;
+ link->w = inlink->w;
if (!link->h)
- link->h = link->src->inputs[0]->h;
+ link->h = inlink->h;
} else if (!link->w || !link->h) {
av_log(link->src, AV_LOG_ERROR,
"Video source filters must set their output link's "
"width and height\n");
return AVERROR(EINVAL);
}
+ break;
+
+ case AVMEDIA_TYPE_AUDIO:
+ if (inlink) {
+ if (!link->time_base.num && !link->time_base.den)
+ link->time_base = inlink->time_base;
+ }
+
+ if (!link->time_base.num && !link->time_base.den)
+ link->time_base = (AVRational) {1, link->sample_rate};
}
if ((config_link = link->dstpad->config_props))
@@ -222,11 +313,11 @@ int avfilter_config_links(AVFilterContext *filter)
return 0;
}
-void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
+void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
{
if (link->type == AVMEDIA_TYPE_VIDEO) {
- av_dlog(ctx,
- "link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s",
+ ff_tlog(ctx,
+ "link[%p s:%dx%d fmt:%s %s->%s]%s",
link, link->w, link->h,
av_get_pix_fmt_name(link->format),
link->src ? link->src->filter->name : "",
@@ -236,9 +327,9 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
char buf[128];
av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
- av_dlog(ctx,
- "link[%p r:%d cl:%s fmt:%-16s %-16s->%-16s]%s",
- link, link->sample_rate, buf,
+ ff_tlog(ctx,
+ "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
+ link, (int)link->sample_rate, buf,
av_get_sample_fmt_name(link->format),
link->src ? link->src->filter->name : "",
link->dst ? link->dst->filter->name : "",
@@ -248,13 +339,33 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
int ff_request_frame(AVFilterLink *link)
{
- FF_DPRINTF_START(NULL, request_frame); ff_dlog_link(NULL, link, 1);
-
- if (link->srcpad->request_frame)
- return link->srcpad->request_frame(link);
- else if (link->src->inputs[0])
- return ff_request_frame(link->src->inputs[0]);
- else return -1;
+ int ret = -1;
+ FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
+
+ if (link->closed)
+ return AVERROR_EOF;
+ av_assert0(!link->frame_requested);
+ link->frame_requested = 1;
+ while (link->frame_requested) {
+ if (link->srcpad->request_frame)
+ ret = link->srcpad->request_frame(link);
+ else if (link->src->inputs[0])
+ ret = ff_request_frame(link->src->inputs[0]);
+ if (ret == AVERROR_EOF && link->partial_buf) {
+ AVFrame *pbuf = link->partial_buf;
+ link->partial_buf = NULL;
+ ret = ff_filter_frame_framed(link, pbuf);
+ }
+ if (ret < 0) {
+ link->frame_requested = 0;
+ if (ret == AVERROR_EOF)
+ link->closed = 1;
+ } else {
+ av_assert0(!link->frame_requested ||
+ link->flags & FF_LINK_FLAG_REQUEST_LOOP);
+ }
+ }
+ return ret;
}
int ff_poll_frame(AVFilterLink *link)
@@ -275,7 +386,97 @@ int ff_poll_frame(AVFilterLink *link)
return min;
}
+static const char *const var_names[] = {
+ "t",
+ "n",
+ "pos",
+ "w",
+ "h",
+ NULL
+};
+
+enum {
+ VAR_T,
+ VAR_N,
+ VAR_POS,
+ VAR_W,
+ VAR_H,
+ VAR_VARS_NB
+};
+
+static int set_enable_expr(AVFilterContext *ctx, const char *expr)
+{
+ int ret;
+ char *expr_dup;
+ AVExpr *old = ctx->enable;
+
+ if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
+ av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
+ "with filter '%s'\n", ctx->filter->name);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ expr_dup = av_strdup(expr);
+ if (!expr_dup)
+ return AVERROR(ENOMEM);
+
+ if (!ctx->var_values) {
+ ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
+ if (!ctx->var_values) {
+ av_free(expr_dup);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx->priv);
+ if (ret < 0) {
+ av_log(ctx->priv, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for enable\n",
+ expr_dup);
+ av_free(expr_dup);
+ return ret;
+ }
+
+ av_expr_free(old);
+ av_free(ctx->enable_str);
+ ctx->enable_str = expr_dup;
+ return 0;
+}
+
+void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
+{
+ if (pts == AV_NOPTS_VALUE)
+ return;
+ link->current_pts = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
+ /* TODO use duration */
+ if (link->graph && link->age_index >= 0)
+ ff_avfilter_graph_update_heap(link->graph, link);
+}
+
+int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
+{
+ if(!strcmp(cmd, "ping")){
+ char local_res[256] = {0};
+
+ if (!res) {
+ res = local_res;
+ res_len = sizeof(local_res);
+ }
+ av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
+ if (res == local_res)
+ av_log(filter, AV_LOG_INFO, "%s", res);
+ return 0;
+ }else if(!strcmp(cmd, "enable")) {
+ return set_enable_expr(filter, arg);
+ }else if(filter->filter->process_command) {
+ return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
+ }
+ return AVERROR(ENOSYS);
+}
+
static AVFilter *first_filter;
+static AVFilter **last_filter = &first_filter;
#if !FF_API_NOCONST_GET_NAME
const
@@ -289,18 +490,31 @@ AVFilter *avfilter_get_by_name(const char *name)
while ((f = avfilter_next(f)))
if (!strcmp(f->name, name))
- return f;
+ return (AVFilter *)f;
return NULL;
}
int avfilter_register(AVFilter *filter)
{
- AVFilter **f = &first_filter;
- while (*f)
- f = &(*f)->next;
- *f = filter;
+ AVFilter **f = last_filter;
+ int i;
+
+ /* the filter must select generic or internal exclusively */
+ av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
+
+ for(i=0; filter->inputs && filter->inputs[i].name; i++) {
+ const AVFilterPad *input = &filter->inputs[i];
+ av_assert0( !input->filter_frame
+ || (!input->start_frame && !input->end_frame));
+ }
+
filter->next = NULL;
+
+ while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
+ f = &(*f)->next;
+ last_filter = &filter->next;
+
return 0;
}
@@ -332,10 +546,10 @@ int avfilter_pad_count(const AVFilterPad *pads)
return count;
}
-static const char *filter_name(void *p)
+static const char *default_filter_name(void *filter_ctx)
{
- AVFilterContext *filter = p;
- return filter->filter->name;
+ AVFilterContext *ctx = filter_ctx;
+ return ctx->name ? ctx->name : ctx->filter->name;
}
static void *filter_child_next(void *obj, void *prev)
@@ -350,10 +564,16 @@ static const AVClass *filter_child_class_next(const AVClass *prev)
{
const AVFilter *f = NULL;
+ /* find the filter that corresponds to prev */
while (prev && (f = avfilter_next(f)))
if (f->priv_class == prev)
break;
+ /* could not find filter corresponding to prev */
+ if (prev && !f)
+ return NULL;
+
+ /* find next filter with specific options */
while ((f = avfilter_next(f)))
if (f->priv_class)
return f->priv_class;
@@ -362,18 +582,20 @@ static const AVClass *filter_child_class_next(const AVClass *prev)
}
#define OFFSET(x) offsetof(AVFilterContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
static const AVOption avfilter_options[] = {
{ "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
{ .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
{ "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
+ { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ NULL },
};
static const AVClass avfilter_class = {
.class_name = "AVFilter",
- .item_name = filter_name,
+ .item_name = default_filter_name,
.version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
.child_next = filter_child_next,
.child_class_next = filter_child_class_next,
.option = avfilter_options,
@@ -425,22 +647,22 @@ AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
ret->nb_inputs = avfilter_pad_count(filter->inputs);
if (ret->nb_inputs ) {
- ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
+ ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
if (!ret->input_pads)
goto err;
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
- ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
+ ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
if (!ret->inputs)
goto err;
}
ret->nb_outputs = avfilter_pad_count(filter->outputs);
if (ret->nb_outputs) {
- ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
+ ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
if (!ret->output_pads)
goto err;
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
- ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
+ ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
if (!ret->outputs)
goto err;
}
@@ -490,13 +712,16 @@ static void free_link(AVFilterLink *link)
ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts);
- av_freep(&link);
+ avfilter_link_free(&link);
}
void avfilter_free(AVFilterContext *filter)
{
int i;
+ if (!filter)
+ return;
+
if (filter->graph)
ff_filter_graph_remove_filter(filter->graph, filter);
@@ -519,41 +744,93 @@ void avfilter_free(AVFilterContext *filter)
av_freep(&filter->inputs);
av_freep(&filter->outputs);
av_freep(&filter->priv);
+ while(filter->command_queue){
+ ff_command_queue_pop(filter);
+ }
+ av_opt_free(filter);
+ av_expr_free(filter->enable);
+ filter->enable = NULL;
+ av_freep(&filter->var_values);
av_freep(&filter->internal);
av_free(filter);
}
-/* process a list of value1:value2:..., each value corresponding
- * to subsequent AVOption, in the order they are declared */
-static int process_unnamed_options(AVFilterContext *ctx, AVDictionary **options,
- const char *args)
+static int process_options(AVFilterContext *ctx, AVDictionary **options,
+ const char *args)
{
const AVOption *o = NULL;
- const char *p = args;
- char *val;
+ int ret, count = 0;
+ char *av_uninit(parsed_key), *av_uninit(value);
+ const char *key;
+ int offset= -1;
+
+ if (!args)
+ return 0;
+
+ while (*args) {
+ const char *shorthand = NULL;
- while (*p) {
o = av_opt_next(ctx->priv, o);
- if (!o) {
- av_log(ctx, AV_LOG_ERROR, "More options provided than "
- "this filter supports.\n");
- return AVERROR(EINVAL);
+ if (o) {
+ if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
+ continue;
+ offset = o->offset;
+ shorthand = o->name;
}
- if (o->type == AV_OPT_TYPE_CONST)
- continue;
- val = av_get_token(&p, ":");
- if (!val)
- return AVERROR(ENOMEM);
+ ret = av_opt_get_key_value(&args, "=", ":",
+ shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
+ &parsed_key, &value);
+ if (ret < 0) {
+ if (ret == AVERROR(EINVAL))
+ av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
+ else
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
+ av_err2str(ret));
+ return ret;
+ }
+ if (*args)
+ args++;
+ if (parsed_key) {
+ key = parsed_key;
+ while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
+ } else {
+ key = shorthand;
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
- av_dict_set(options, o->name, val, 0);
+ if (av_opt_find(ctx, key, NULL, 0, 0)) {
+ ret = av_opt_set(ctx, key, value, 0);
+ if (ret < 0) {
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+ } else {
+ av_dict_set(options, key, value, 0);
+ if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
+ if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
+ if (ret == AVERROR_OPTION_NOT_FOUND)
+ av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
+ av_free(value);
+ av_free(parsed_key);
+ return ret;
+ }
+ }
+ }
- av_freep(&val);
- if (*p)
- p++;
+ av_free(value);
+ av_free(parsed_key);
+ count++;
}
- return 0;
+ if (ctx->enable_str) {
+ ret = set_enable_expr(ctx, ctx->enable_str);
+ if (ret < 0)
+ return ret;
+ }
+ return count;
}
#if FF_API_AVFILTER_INIT_FILTER
@@ -590,7 +867,9 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
}
}
- if (ctx->filter->init)
+ if (ctx->filter->init_opaque)
+ ret = ctx->filter->init_opaque(ctx, NULL);
+ else if (ctx->filter->init)
ret = ctx->filter->init(ctx);
else if (ctx->filter->init_dict)
ret = ctx->filter->init_dict(ctx, options);
@@ -612,51 +891,20 @@ int avfilter_init_str(AVFilterContext *filter, const char *args)
}
#if FF_API_OLD_FILTER_OPTS
- if (!strcmp(filter->filter->name, "scale") &&
- strchr(args, ':') && strchr(args, ':') < strchr(args, '=')) {
- /* old w:h:flags=<flags> syntax */
- char *copy = av_strdup(args);
- char *p;
-
- av_log(filter, AV_LOG_WARNING, "The <w>:<h>:flags=<flags> option "
- "syntax is deprecated. Use either <w>:<h>:<flags> or "
- "w=<w>:h=<h>:flags=<flags>.\n");
-
- if (!copy) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- p = strrchr(copy, ':');
- if (p) {
- *p++ = 0;
- ret = av_dict_parse_string(&options, p, "=", ":", 0);
- }
- if (ret >= 0)
- ret = process_unnamed_options(filter, &options, copy);
- av_freep(&copy);
-
- if (ret < 0)
- goto fail;
- } else
-#endif
-
- if (strchr(args, '=')) {
- /* assume a list of key1=value1:key2=value2:... */
- ret = av_dict_parse_string(&options, args, "=", ":", 0);
- if (ret < 0)
- goto fail;
-#if FF_API_OLD_FILTER_OPTS
- } else if (!strcmp(filter->filter->name, "format") ||
+ if ( !strcmp(filter->filter->name, "format") ||
!strcmp(filter->filter->name, "noformat") ||
!strcmp(filter->filter->name, "frei0r") ||
!strcmp(filter->filter->name, "frei0r_src") ||
- !strcmp(filter->filter->name, "ocv")) {
+ !strcmp(filter->filter->name, "ocv") ||
+ !strcmp(filter->filter->name, "pan") ||
+ !strcmp(filter->filter->name, "pp") ||
+ !strcmp(filter->filter->name, "aevalsrc")) {
/* a hack for compatibility with the old syntax
* replace colons with |s */
char *copy = av_strdup(args);
char *p = copy;
int nb_leading = 0; // number of leading colons to skip
+ int deprecated = 0;
if (!copy) {
ret = AVERROR(ENOMEM);
@@ -678,22 +926,58 @@ int avfilter_init_str(AVFilterContext *filter, const char *args)
p++;
}
- if (strchr(p, ':')) {
- av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
- "'|' to separate the list items.\n");
- }
-
+ deprecated = strchr(p, ':') != NULL;
+
+ if (!strcmp(filter->filter->name, "aevalsrc")) {
+ deprecated = 0;
+ while ((p = strchr(p, ':')) && p[1] != ':') {
+ const char *epos = strchr(p + 1, '=');
+ const char *spos = strchr(p + 1, ':');
+ const int next_token_is_opt = epos && (!spos || epos < spos);
+ if (next_token_is_opt) {
+ p++;
+ break;
+ }
+ /* next token does not contain a '=', assume a channel expression */
+ deprecated = 1;
+ *p++ = '|';
+ }
+ if (p && *p == ':') { // double sep '::' found
+ deprecated = 1;
+ memmove(p, p + 1, strlen(p));
+ }
+ } else
while ((p = strchr(p, ':')))
*p++ = '|';
- ret = process_unnamed_options(filter, &options, copy);
+ if (deprecated)
+ av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
+ "'|' to separate the list items.\n");
+
+ av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
+ ret = process_options(filter, &options, copy);
av_freep(&copy);
if (ret < 0)
goto fail;
#endif
} else {
- ret = process_unnamed_options(filter, &options, args);
+#if CONFIG_MP_FILTER
+ if (!strcmp(filter->filter->name, "mp")) {
+ char *escaped;
+
+ if (!strncmp(args, "filter=", 7))
+ args += 7;
+ ret = av_escape(&escaped, args, ":=", AV_ESCAPE_MODE_BACKSLASH, 0);
+ if (ret < 0) {
+ av_log(filter, AV_LOG_ERROR, "Unable to escape MPlayer filters arg '%s'\n", args);
+ goto fail;
+ }
+ ret = process_options(filter, &options, escaped);
+ av_free(escaped);
+ } else
+#endif
+ ret = process_options(filter, &options, args);
if (ret < 0)
goto fail;
}
@@ -730,15 +1014,20 @@ static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
return ff_filter_frame(link->dst->outputs[0], frame);
}
-int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
+static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
int (*filter_frame)(AVFilterLink *, AVFrame *);
+ AVFilterContext *dstctx = link->dst;
AVFilterPad *dst = link->dstpad;
AVFrame *out = NULL;
int ret;
+ AVFilterCommand *cmd= link->dst->command_queue;
+ int64_t pts;
- FF_DPRINTF_START(NULL, filter_frame);
- ff_dlog_link(NULL, link, 1);
+ if (link->closed) {
+ av_frame_free(&frame);
+ return AVERROR_EOF;
+ }
if (!(filter_frame = dst->filter_frame))
filter_frame = default_filter_frame;
@@ -747,6 +1036,7 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
if (dst->needs_writable && !av_frame_is_writable(frame)) {
av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
+ /* Maybe use ff_copy_buffer_ref instead? */
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
out = ff_get_video_buffer(link, link->w, link->h);
@@ -769,7 +1059,7 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
- av_image_copy(out->data, out->linesize, frame->data, frame->linesize,
+ av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
frame->format, frame->width, frame->height);
break;
case AVMEDIA_TYPE_AUDIO:
@@ -787,7 +1077,34 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
} else
out = frame;
- return filter_frame(link, out);
+ while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){
+ av_log(link->dst, AV_LOG_DEBUG,
+ "Processing command time:%f command:%s arg:%s\n",
+ cmd->time, cmd->command, cmd->arg);
+ avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
+ ff_command_queue_pop(link->dst);
+ cmd= link->dst->command_queue;
+ }
+
+ pts = out->pts;
+ if (dstctx->enable_str) {
+ int64_t pos = av_frame_get_pkt_pos(out);
+ dstctx->var_values[VAR_N] = link->frame_count;
+ dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
+ dstctx->var_values[VAR_W] = link->w;
+ dstctx->var_values[VAR_H] = link->h;
+ dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
+
+ dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
+ if (dstctx->is_disabled &&
+ (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
+ filter_frame = default_filter_frame;
+ }
+ ret = filter_frame(link, out);
+ link->frame_count++;
+ link->frame_requested = 0;
+ ff_update_link_current_pts(link, pts);
+ return ret;
fail:
av_frame_free(&out);
@@ -795,6 +1112,78 @@ fail:
return ret;
}
+static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame)
+{
+ int insamples = frame->nb_samples, inpos = 0, nb_samples;
+ AVFrame *pbuf = link->partial_buf;
+ int nb_channels = av_frame_get_channels(frame);
+ int ret = 0;
+
+ link->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ /* Handle framing (min_samples, max_samples) */
+ while (insamples) {
+ if (!pbuf) {
+ AVRational samples_tb = { 1, link->sample_rate };
+ pbuf = ff_get_audio_buffer(link, link->partial_buf_size);
+ if (!pbuf) {
+ av_log(link->dst, AV_LOG_WARNING,
+ "Samples dropped due to memory allocation failure.\n");
+ return 0;
+ }
+ av_frame_copy_props(pbuf, frame);
+ pbuf->pts = frame->pts;
+ if (pbuf->pts != AV_NOPTS_VALUE)
+ pbuf->pts += av_rescale_q(inpos, samples_tb, link->time_base);
+ pbuf->nb_samples = 0;
+ }
+ nb_samples = FFMIN(insamples,
+ link->partial_buf_size - pbuf->nb_samples);
+ av_samples_copy(pbuf->extended_data, frame->extended_data,
+ pbuf->nb_samples, inpos,
+ nb_samples, nb_channels, link->format);
+ inpos += nb_samples;
+ insamples -= nb_samples;
+ pbuf->nb_samples += nb_samples;
+ if (pbuf->nb_samples >= link->min_samples) {
+ ret = ff_filter_frame_framed(link, pbuf);
+ pbuf = NULL;
+ }
+ }
+ av_frame_free(&frame);
+ link->partial_buf = pbuf;
+ return ret;
+}
+
+int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
+{
+ FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
+
+ /* Consistency checks */
+ if (link->type == AVMEDIA_TYPE_VIDEO) {
+ if (strcmp(link->dst->filter->name, "scale")) {
+ av_assert1(frame->format == link->format);
+ av_assert1(frame->width == link->w);
+ av_assert1(frame->height == link->h);
+ }
+ } else {
+ av_assert1(frame->format == link->format);
+ av_assert1(av_frame_get_channels(frame) == link->channels);
+ av_assert1(frame->channel_layout == link->channel_layout);
+ av_assert1(frame->sample_rate == link->sample_rate);
+ }
+
+ /* Go directly to actual filtering if possible */
+ if (link->type == AVMEDIA_TYPE_AUDIO &&
+ link->min_samples &&
+ (link->partial_buf ||
+ frame->nb_samples < link->min_samples ||
+ frame->nb_samples > link->max_samples)) {
+ return ff_filter_frame_needs_framing(link, frame);
+ } else {
+ return ff_filter_frame_framed(link, frame);
+ }
+}
+
const AVClass *avfilter_get_class(void)
{
return &avfilter_class;
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index 1b42086..b5220b9 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -2,20 +2,20 @@
* filter layer
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,16 +33,16 @@
* @{
*/
+#include <stddef.h>
+
#include "libavutil/attributes.h"
#include "libavutil/avutil.h"
+#include "libavutil/dict.h"
#include "libavutil/frame.h"
#include "libavutil/log.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
-#include "libavcodec/avcodec.h"
-
-#include <stddef.h>
#include "libavfilter/version.h"
@@ -61,7 +61,6 @@ const char *avfilter_configuration(void);
*/
const char *avfilter_license(void);
-
typedef struct AVFilterContext AVFilterContext;
typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
@@ -114,6 +113,9 @@ typedef struct AVFilterBuffer {
#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time
#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time
#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes
+#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned
+
+#define AVFILTER_ALIGN 16 //not part of ABI
/**
* Audio specific properties in a reference to an AVFilterBuffer. Since
@@ -122,9 +124,9 @@ typedef struct AVFilterBuffer {
*/
typedef struct AVFilterBufferRefAudioProps {
uint64_t channel_layout; ///< channel layout of audio buffer
- int nb_samples; ///< number of audio samples
+ int nb_samples; ///< number of audio samples per channel
int sample_rate; ///< audio buffer sample rate
- int planar; ///< audio buffer - planar or packed
+ int channels; ///< number of channels (do not access directly)
} AVFilterBufferRefAudioProps;
/**
@@ -135,11 +137,14 @@ typedef struct AVFilterBufferRefAudioProps {
typedef struct AVFilterBufferRefVideoProps {
int w; ///< image width
int h; ///< image height
- AVRational pixel_aspect; ///< pixel aspect ratio
+ AVRational sample_aspect_ratio; ///< sample aspect ratio
int interlaced; ///< is frame interlaced
int top_field_first; ///< field order
enum AVPictureType pict_type; ///< picture type of the frame
int key_frame; ///< 1 -> keyframe, 0-> not
+ int qp_table_linesize; ///< qp_table stride
+ int qp_table_size; ///< qp_table size
+ int8_t *qp_table; ///< array of Quantization Parameters
} AVFilterBufferRefVideoProps;
/**
@@ -186,13 +191,15 @@ typedef struct AVFilterBufferRef {
int perms; ///< permissions, see the AV_PERM_* flags
enum AVMediaType type; ///< media type of buffer data
+
+ AVDictionary *metadata; ///< dictionary containing metadata key=value tags
} AVFilterBufferRef;
/**
* Copy properties of src to dst, without copying the actual data
*/
attribute_deprecated
-void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src);
+void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, const AVFilterBufferRef *src);
/**
* Add a new reference to a buffer.
@@ -229,11 +236,19 @@ attribute_deprecated
void avfilter_unref_bufferp(AVFilterBufferRef **ref);
#endif
+/**
+ * Get the number of channels of a buffer reference.
+ */
+attribute_deprecated
+int avfilter_ref_get_channels(AVFilterBufferRef *ref);
+
#if FF_API_AVFILTERPAD_PUBLIC
/**
* A filter pad used for either input or output.
*
- * @warning this struct will be removed from public API.
+ * See doc/filter_design.txt for details on how to implement the methods.
+ *
+ * @warning this struct might be removed from public API.
* users should call avfilter_pad_get_name() and avfilter_pad_get_type()
* to access the name and type fields; there should be no need to access
* any other fields from outside of libavfilter.
@@ -252,22 +267,29 @@ struct AVFilterPad {
enum AVMediaType type;
/**
+ * Input pads:
* Minimum required permissions on incoming buffers. Any buffer with
* insufficient permissions will be automatically copied by the filter
* system to a new buffer which provides the needed access permissions.
*
- * Input pads only.
+ * Output pads:
+ * Guaranteed permissions on outgoing buffers. Any buffer pushed on the
+ * link must have at least these permissions; this fact is checked by
+ * asserts. It can be used to optimize buffer allocation.
*/
attribute_deprecated int min_perms;
/**
+ * Input pads:
* Permissions which are not accepted on incoming buffers. Any buffer
* which has any of these permissions set will be automatically copied
* by the filter system to a new buffer which does not have those
* permissions. This can be used to easily disallow buffers with
* AV_PERM_REUSE.
*
- * Input pads only.
+ * Output pads:
+ * Permissions which are automatically removed on outgoing buffers. It
+ * can be used to optimize buffer allocation.
*/
attribute_deprecated int rej_perms;
@@ -278,7 +300,7 @@ struct AVFilterPad {
/**
* Callback function to get a video buffer. If NULL, the filter system will
- * use avfilter_default_get_video_buffer().
+ * use ff_default_get_video_buffer().
*
* Input video pads only.
*/
@@ -286,7 +308,7 @@ struct AVFilterPad {
/**
* Callback function to get an audio buffer. If NULL, the filter system will
- * use avfilter_default_get_audio_buffer().
+ * use ff_default_get_audio_buffer().
*
* Input audio pads only.
*/
@@ -309,7 +331,7 @@ struct AVFilterPad {
* Input pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
- * must ensure that samplesref is properly unreferenced on error if it
+ * must ensure that frame is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
@@ -329,6 +351,8 @@ struct AVFilterPad {
* Frame request callback. A call to this should result in at least one
* frame being output over the given link. This should return zero on
* success, and another value on error.
+ * See ff_request_frame() for the error codes with a specific
+ * meaning.
*
* Output pads only.
*/
@@ -337,15 +361,18 @@ struct AVFilterPad {
/**
* Link configuration callback.
*
- * For output pads, this should set the link properties such as
- * width/height. This should NOT set the format property - that is
- * negotiated between filters by the filter system using the
+ * For output pads, this should set the following link properties:
+ * video: width, height, sample_aspect_ratio, time_base
+ * audio: sample_rate.
+ *
+ * This should NOT set properties such as format, channel_layout, etc which
+ * are negotiated between filters by the filter system using the
* query_formats() callback before this function is called.
*
* For input pads, this should check the properties of the link, and update
* the filter's internal state as necessary.
*
- * For both input and output filters, this should return zero on success,
+ * For both input and output pads, this should return zero on success,
* and another value on error.
*/
int (*config_props)(AVFilterLink *link);
@@ -413,6 +440,28 @@ enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);
* and processing them concurrently.
*/
#define AVFILTER_FLAG_SLICE_THREADS (1 << 2)
+/**
+ * Some filters support a generic "enable" expression option that can be used
+ * to enable or disable a filter in the timeline. Filters supporting this
+ * option have this flag set. When the enable expression is false, the default
+ * no-op filter_frame() function is called in place of the filter_frame()
+ * callback defined on each input pad, thus the frame is passed unchanged to
+ * the next filters.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16)
+/**
+ * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
+ * have its filter_frame() callback(s) called as usual even when the enable
+ * expression is false. The filter will disable filtering within the
+ * filter_frame() callback(s) itself, for example executing code depending on
+ * the AVFilterContext->is_disabled value.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)
+/**
+ * Handy mask to test whether the filter supports or no the timeline feature
+ * (internally or generically).
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)
/**
* Filter definition. This defines the pads a filter contains, and all the
@@ -550,6 +599,27 @@ typedef struct AVFilter {
* code.
*/
struct AVFilter *next;
+
+ /**
+ * Make the filter instance process a command.
+ *
+ * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.
+ * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be
+ * time consuming then a filter should treat it like an unsupported command
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+ int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+ /**
+ * Filter initialization function, alternative to the init()
+ * callback. Args contains the user-supplied parameters, opaque is
+ * used for providing binary data.
+ */
+ int (*init_opaque)(AVFilterContext *ctx, void *opaque);
} AVFilter;
/**
@@ -561,7 +631,7 @@ typedef struct AVFilterInternal AVFilterInternal;
/** An instance of a filter */
struct AVFilterContext {
- const AVClass *av_class; ///< needed for av_log()
+ const AVClass *av_class; ///< needed for av_log() and filters common options
const AVFilter *filter; ///< the AVFilter of which this is an instance
@@ -598,7 +668,7 @@ struct AVFilterContext {
* allowed threading types. I.e. a threading type needs to be set in both
* to be allowed.
*
- * After the filter is initialzed, libavfilter sets this field to the
+ * After the filter is initialized, libavfilter sets this field to the
* threading type that is actually used (0 for no multithreading).
*/
int thread_type;
@@ -607,6 +677,13 @@ struct AVFilterContext {
* An opaque struct for libavfilter internal use.
*/
AVFilterInternal *internal;
+
+ struct AVFilterCommand *command_queue;
+
+ char *enable_str; ///< enable expression string
+ void *enable; ///< parsed expression (AVExpr*)
+ double *var_values; ///< variable values for the enable expression
+ int is_disabled; ///< the enabled state from the last expression evaluation
};
/**
@@ -629,7 +706,7 @@ struct AVFilterLink {
int w; ///< agreed upon image width
int h; ///< agreed upon image height
AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
- /* These two parameters apply only to audio */
+ /* These parameters apply only to audio */
uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h)
int sample_rate; ///< samples per second
@@ -652,9 +729,11 @@ struct AVFilterLink {
*****************************************************************
*/
/**
- * Lists of formats supported by the input and output filters respectively.
- * These lists are used for negotiating the format to actually be used,
- * which will be loaded into the format member, above, when chosen.
+ * Lists of formats and channel layouts supported by the input and output
+ * filters respectively. These lists are used for negotiating the format
+ * to actually be used, which will be loaded into the format and
+ * channel_layout members, above, when chosen.
+ *
*/
AVFilterFormats *in_formats;
AVFilterFormats *out_formats;
@@ -683,6 +762,104 @@ struct AVFilterLink {
AVLINK_STARTINIT, ///< started, but incomplete
AVLINK_INIT ///< complete
} init_state;
+
+ struct AVFilterPool *pool;
+
+ /**
+ * Graph the filter belongs to.
+ */
+ struct AVFilterGraph *graph;
+
+ /**
+ * Current timestamp of the link, as defined by the most recent
+ * frame(s), in AV_TIME_BASE units.
+ */
+ int64_t current_pts;
+
+ /**
+ * Index in the age array.
+ */
+ int age_index;
+
+ /**
+ * Frame rate of the stream on the link, or 1/0 if unknown;
+ * if left to 0/0, will be automatically be copied from the first input
+ * of the source filter if it exists.
+ *
+ * Sources should set it to the best estimation of the real frame rate.
+ * Filters should update it if necessary depending on their function.
+ * Sinks can use it to set a default output frame rate.
+ * It is similar to the r_frame_rate field in AVStream.
+ */
+ AVRational frame_rate;
+
+ /**
+ * Buffer partially filled with samples to achieve a fixed/minimum size.
+ */
+ AVFrame *partial_buf;
+
+ /**
+ * Size of the partial buffer to allocate.
+ * Must be between min_samples and max_samples.
+ */
+ int partial_buf_size;
+
+ /**
+ * Minimum number of samples to filter at once. If filter_frame() is
+ * called with fewer samples, it will accumulate them in partial_buf.
+ * This field and the related ones must not be changed after filtering
+ * has started.
+ * If 0, all related fields are ignored.
+ */
+ int min_samples;
+
+ /**
+ * Maximum number of samples to filter at once. If filter_frame() is
+ * called with more samples, it will split them.
+ */
+ int max_samples;
+
+ /**
+ * The buffer reference currently being received across the link by the
+ * destination filter. This is used internally by the filter system to
+ * allow automatic copying of buffers which do not have sufficient
+ * permissions for the destination. This should not be accessed directly
+ * by the filters.
+ */
+ AVFilterBufferRef *cur_buf_copy;
+
+ /**
+ * True if the link is closed.
+ * If set, all attempts of start_frame, filter_frame or request_frame
+ * will fail with AVERROR_EOF, and if necessary the reference will be
+ * destroyed.
+ * If request_frame returns AVERROR_EOF, this flag is set on the
+ * corresponding link.
+ * It can be set also be set by either the source or the destination
+ * filter.
+ */
+ int closed;
+
+ /**
+ * Number of channels.
+ */
+ int channels;
+
+ /**
+ * True if a frame is being requested on the link.
+ * Used internally by the framework.
+ */
+ unsigned frame_requested;
+
+ /**
+ * Link processing flags.
+ */
+ unsigned flags;
+
+ /**
+ * Number of past frames sent through the link.
+ */
+ int64_t frame_count;
};
/**
@@ -698,6 +875,21 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
AVFilterContext *dst, unsigned dstpad);
/**
+ * Free the link in *link, and set its pointer to NULL.
+ */
+void avfilter_link_free(AVFilterLink **link);
+
+/**
+ * Get the number of channels of a link.
+ */
+int avfilter_link_get_channels(AVFilterLink *link);
+
+/**
+ * Set the closed field of a link.
+ */
+void avfilter_link_set_closed(AVFilterLink *link, int closed);
+
+/**
* Negotiate the media format, dimensions, etc of all inputs to a filter.
*
* @param filter the filter to negotiate the properties for its inputs
@@ -719,13 +911,16 @@ int avfilter_config_links(AVFilterContext *filter);
*/
attribute_deprecated
AVFilterBufferRef *
-avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms,
+avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
int w, int h, enum AVPixelFormat format);
/**
* Create an audio buffer reference wrapped around an already
* allocated samples buffer.
*
+ * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version
+ * that can handle unknown channel layouts.
+ *
* @param data pointers to the samples plane buffers
* @param linesize linesize for the samples plane buffers
* @param perms the required access permissions
@@ -740,8 +935,40 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
int nb_samples,
enum AVSampleFormat sample_fmt,
uint64_t channel_layout);
+/**
+ * Create an audio buffer reference wrapped around an already
+ * allocated samples buffer.
+ *
+ * @param data pointers to the samples plane buffers
+ * @param linesize linesize for the samples plane buffers
+ * @param perms the required access permissions
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt the format of each sample in the buffer to allocate
+ * @param channels the number of channels of the buffer
+ * @param channel_layout the channel layout of the buffer,
+ * must be either 0 or consistent with channels
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
+ int linesize,
+ int perms,
+ int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ int channels,
+ uint64_t channel_layout);
+
#endif
+
+#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
+#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)
+
+/**
+ * Make the filter instance process a command.
+ * It is recommended to use avfilter_graph_send_command().
+ */
+int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
/** Initialize the filter system. Register all builtin filters. */
void avfilter_register_all(void);
@@ -758,7 +985,7 @@ void avfilter_uninit(void);
* is not registered.
*
* @param filter the filter to register
- * @return 0 if the registration was succesfull, a negative value
+ * @return 0 if the registration was successful, a negative value
* otherwise
*/
int avfilter_register(AVFilter *filter);
@@ -941,7 +1168,7 @@ typedef struct AVFilterGraph {
const AVClass *av_class;
#if FF_API_FOO_COUNT
attribute_deprecated
- unsigned filter_count;
+ unsigned filter_count_unused;
#endif
AVFilterContext **filters;
#if !FF_API_FOO_COUNT
@@ -1000,6 +1227,20 @@ typedef struct AVFilterGraph {
* platform and build options.
*/
avfilter_execute_func *execute;
+
+ char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
+
+ /**
+ * Private fields
+ *
+ * The following fields are for internal use only.
+ * Their type, offset, number and semantic can change without notice.
+ */
+
+ AVFilterLink **sink_links;
+ int sink_links_count;
+
+ unsigned disable_auto_convert;
} AVFilterGraph;
/**
@@ -1019,19 +1260,21 @@ AVFilterGraph *avfilter_graph_alloc(void);
*
* @return the context of the newly created filter instance (note that it is
* also retrievable directly through AVFilterGraph.filters or with
- * avfilter_graph_get_filter()) on success or NULL or failure.
+ * avfilter_graph_get_filter()) on success or NULL on failure.
*/
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
const AVFilter *filter,
const char *name);
/**
- * Get a filter instance with name name from graph.
+ * Get a filter instance identified by instance name from graph.
*
+ * @param graph filter graph to search through.
+ * @param name filter instance name (should be unique in the graph).
* @return the pointer to the found filter instance or NULL if it
* cannot be found.
*/
-AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);
+AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);
#if FF_API_AVFILTER_OPEN
/**
@@ -1065,11 +1308,26 @@ int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *fil
AVFilterGraph *graph_ctx);
/**
+ * Enable or disable automatic format conversion inside the graph.
+ *
+ * Note that format conversion can still happen inside explicitly inserted
+ * scale and aresample filters.
+ *
+ * @param flags any of the AVFILTER_AUTO_CONVERT_* constants
+ */
+void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
+
+enum {
+ AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
+ AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
+};
+
+/**
* Check validity and configure all the links and formats in the graph.
*
* @param graphctx the filter graph
* @param log_ctx context used for logging
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
@@ -1115,9 +1373,19 @@ AVFilterInOut *avfilter_inout_alloc(void);
*/
void avfilter_inout_free(AVFilterInOut **inout);
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
/**
* Add a graph described by a string to a graph.
*
+ * @note The caller must provide the lists of inputs and outputs,
+ * which therefore must be known before calling the function.
+ *
+ * @note The inputs parameter describes inputs of the already existing
+ * part of the graph; i.e. from the point of view of the newly created
+ * part, they are outputs. Similarly the outputs parameter describes
+ * outputs of the already existing filters, which are provided as
+ * inputs to the parsed filters.
+ *
* @param graph the filter graph where to link the parsed graph context
* @param filters string to be parsed
* @param inputs linked list to the inputs of the graph
@@ -1127,6 +1395,43 @@ void avfilter_inout_free(AVFilterInOut **inout);
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut *inputs, AVFilterInOut *outputs,
void *log_ctx);
+#else
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ * @deprecated Use avfilter_graph_parse_ptr() instead.
+ */
+attribute_deprecated
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
+#endif
+
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ */
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
/**
* Add a graph described by a string to a graph.
@@ -1141,21 +1446,13 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
* caller using avfilter_inout_free().
* @return zero on success, a negative AVERROR code on error
*
- * @note the difference between avfilter_graph_parse2() and
- * avfilter_graph_parse() is that in avfilter_graph_parse(), the caller provides
- * the lists of inputs and outputs, which therefore must be known before calling
- * the function. On the other hand, avfilter_graph_parse2() \em returns the
- * inputs and outputs that are left unlinked after parsing the graph and the
- * caller then deals with them. Another difference is that in
- * avfilter_graph_parse(), the inputs parameter describes inputs of the
- * <em>already existing</em> part of the graph; i.e. from the point of view of
- * the newly created part, they are outputs. Similarly the outputs parameter
- * describes outputs of the already existing filters, which are provided as
- * inputs to the parsed filters.
- * avfilter_graph_parse2() takes the opposite approach -- it makes no reference
- * whatsoever to already existing parts of the graph and the inputs parameter
- * will on return contain inputs of the newly parsed part of the graph.
- * Analogously the outputs parameter will contain outputs of the newly created
+ * @note This function returns the inputs and outputs that are left
+ * unlinked after parsing the graph and the caller then deals with
+ * them.
+ * @note This function makes no reference whatsoever to already
+ * existing parts of the graph and the inputs parameter will on return
+ * contain inputs of the newly parsed part of the graph. Analogously
+ * the outputs parameter will contain outputs of the newly created
* filters.
*/
int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
@@ -1163,6 +1460,71 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **outputs);
/**
+ * Send a command to one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response.
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+/**
+ * Queue a command for one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param ts time at which the command should be sent to the filter
+ *
+ * @note As this executes commands after this function returns, no return code
+ * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
+ */
+int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
+
+
+/**
+ * Dump a graph into a human-readable string representation.
+ *
+ * @param graph the graph to dump
+ * @param options formatting options; currently ignored
+ * @return a string, or NULL in case of memory allocation failure;
+ * the string must be freed using av_free
+ */
+char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
+
+/**
+ * Request a frame on the oldest sink link.
+ *
+ * If the request returns AVERROR_EOF, try the next.
+ *
+ * Note that this function is not meant to be the sole scheduling mechanism
+ * of a filtergraph, only a convenience function to help drain a filtergraph
+ * in a balanced way under normal circumstances.
+ *
+ * Also note that AVERROR_EOF does not mean that frames did not arrive on
+ * some of the sinks during the process.
+ * When there are multiple sink links, in case the requested link
+ * returns an EOF, this may cause a filter to flush pending frames
+ * which are sent to another sink link, although unrequested.
+ *
+ * @return the return value of ff_request_frame(),
+ * or AVERROR_EOF if all links returned AVERROR_EOF
+ */
+int avfilter_graph_request_oldest(AVFilterGraph *graph);
+
+/**
* @}
*/
diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c
index 0fc385c..a859ecb 100644
--- a/libavfilter/avfiltergraph.c
+++ b/libavfilter/avfiltergraph.c
@@ -3,20 +3,20 @@
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,11 +26,11 @@
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
#include "libavutil/internal.h"
-#include "libavutil/log.h"
#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
@@ -38,13 +38,17 @@
#include "thread.h"
#define OFFSET(x) offsetof(AVFilterGraph, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption filtergraph_options[] = {
{ "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
{ .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
{ "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
{ "threads", "Maximum number of threads", OFFSET(nb_threads),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ {"scale_sws_opts" , "default scale filter options" , OFFSET(scale_sws_opts) ,
+ AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ {"aresample_swr_opts" , "default aresample filter options" , OFFSET(aresample_swr_opts) ,
+ AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL },
};
@@ -53,6 +57,7 @@ static const AVClass filtergraph_class = {
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.option = filtergraph_options,
+ .category = AV_CLASS_CATEGORY_FILTER,
};
#if !HAVE_THREADS
@@ -109,7 +114,10 @@ void avfilter_graph_free(AVFilterGraph **graph)
ff_graph_thread_free(*graph);
+ av_freep(&(*graph)->sink_links);
+
av_freep(&(*graph)->scale_sws_opts);
+ av_freep(&(*graph)->aresample_swr_opts);
av_freep(&(*graph)->resample_lavr_opts);
av_freep(&(*graph)->filters);
av_freep(&(*graph)->internal);
@@ -129,7 +137,7 @@ int avfilter_graph_add_filter(AVFilterGraph *graph, AVFilterContext *filter)
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
- graph->filter_count = graph->nb_filters;
+ graph->filter_count_unused = graph->nb_filters;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
@@ -162,6 +170,11 @@ fail:
return ret;
}
+void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
+{
+ graph->disable_auto_convert = flags;
+}
+
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
const AVFilter *filter,
const char *name)
@@ -195,7 +208,7 @@ AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
- graph->filter_count = graph->nb_filters;
+ graph->filter_count_unused = graph->nb_filters;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
@@ -210,7 +223,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
* A graph is considered valid if all its input and output pads are
* connected.
*
- * @return 0 in case of success, a negative value otherwise
+ * @return >= 0 in case of success, a negative value otherwise
*/
static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
{
@@ -218,22 +231,25 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
int i, j;
for (i = 0; i < graph->nb_filters; i++) {
+ const AVFilterPad *pad;
filt = graph->filters[i];
for (j = 0; j < filt->nb_inputs; j++) {
if (!filt->inputs[j] || !filt->inputs[j]->src) {
+ pad = &filt->input_pads[j];
av_log(log_ctx, AV_LOG_ERROR,
- "Input pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any source\n",
- filt->input_pads[j].name, filt->name, filt->filter->name);
+ "Input pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any source\n",
+ pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
return AVERROR(EINVAL);
}
}
for (j = 0; j < filt->nb_outputs; j++) {
if (!filt->outputs[j] || !filt->outputs[j]->dst) {
+ pad = &filt->output_pads[j];
av_log(log_ctx, AV_LOG_ERROR,
- "Output pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any destination\n",
- filt->output_pads[j].name, filt->name, filt->filter->name);
+ "Output pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any destination\n",
+ pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
return AVERROR(EINVAL);
}
}
@@ -245,7 +261,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
/**
* Configure all the links of graphctx.
*
- * @return 0 in case of success, a negative value otherwise
+ * @return >= 0 in case of success, a negative value otherwise
*/
static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
{
@@ -264,7 +280,7 @@ static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
return 0;
}
-AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name)
+AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name)
{
int i;
@@ -275,17 +291,169 @@ AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name)
return NULL;
}
+static void sanitize_channel_layouts(void *log, AVFilterChannelLayouts *l)
+{
+ if (!l)
+ return;
+ if (l->nb_channel_layouts) {
+ if (l->all_layouts || l->all_counts)
+ av_log(log, AV_LOG_WARNING, "All layouts set on non-empty list\n");
+ l->all_layouts = l->all_counts = 0;
+ } else {
+ if (l->all_counts && !l->all_layouts)
+ av_log(log, AV_LOG_WARNING, "All counts without all layouts\n");
+ l->all_layouts = 1;
+ }
+}
+
+static int filter_query_formats(AVFilterContext *ctx)
+{
+ int ret, i;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *chlayouts;
+ AVFilterFormats *samplerates;
+ enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
+ ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
+ AVMEDIA_TYPE_VIDEO;
+
+ if ((ret = ctx->filter->query_formats(ctx)) < 0) {
+ if (ret != AVERROR(EAGAIN))
+ av_log(ctx, AV_LOG_ERROR, "Query format failed for '%s': %s\n",
+ ctx->name, av_err2str(ret));
+ return ret;
+ }
+
+ for (i = 0; i < ctx->nb_inputs; i++)
+ sanitize_channel_layouts(ctx, ctx->inputs[i]->out_channel_layouts);
+ for (i = 0; i < ctx->nb_outputs; i++)
+ sanitize_channel_layouts(ctx, ctx->outputs[i]->in_channel_layouts);
+
+ formats = ff_all_formats(type);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+ if (type == AVMEDIA_TYPE_AUDIO) {
+ samplerates = ff_all_samplerates();
+ if (!samplerates)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, samplerates);
+ chlayouts = ff_all_channel_layouts();
+ if (!chlayouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, chlayouts);
+ }
+ return 0;
+}
+
+static int formats_declared(AVFilterContext *f)
+{
+ int i;
+
+ for (i = 0; i < f->nb_inputs; i++) {
+ if (!f->inputs[i]->out_formats)
+ return 0;
+ if (f->inputs[i]->type == AVMEDIA_TYPE_AUDIO &&
+ !(f->inputs[i]->out_samplerates &&
+ f->inputs[i]->out_channel_layouts))
+ return 0;
+ }
+ for (i = 0; i < f->nb_outputs; i++) {
+ if (!f->outputs[i]->in_formats)
+ return 0;
+ if (f->outputs[i]->type == AVMEDIA_TYPE_AUDIO &&
+ !(f->outputs[i]->in_samplerates &&
+ f->outputs[i]->in_channel_layouts))
+ return 0;
+ }
+ return 1;
+}
+
+static AVFilterFormats *clone_filter_formats(AVFilterFormats *arg)
+{
+ AVFilterFormats *a = av_memdup(arg, sizeof(*arg));
+ if (a) {
+ a->refcount = 0;
+ a->refs = NULL;
+ a->formats = av_memdup(a->formats, sizeof(*a->formats) * a->nb_formats);
+ if (!a->formats && arg->formats)
+ av_freep(&a);
+ }
+ return a;
+}
+
+static int can_merge_formats(AVFilterFormats *a_arg,
+ AVFilterFormats *b_arg,
+ enum AVMediaType type,
+ int is_sample_rate)
+{
+ AVFilterFormats *a, *b, *ret;
+ if (a_arg == b_arg)
+ return 1;
+ a = clone_filter_formats(a_arg);
+ b = clone_filter_formats(b_arg);
+
+ if (!a || !b) {
+ if (a)
+ av_freep(&a->formats);
+ if (b)
+ av_freep(&b->formats);
+
+ av_freep(&a);
+ av_freep(&b);
+
+ return 0;
+ }
+
+ if (is_sample_rate) {
+ ret = ff_merge_samplerates(a, b);
+ } else {
+ ret = ff_merge_formats(a, b, type);
+ }
+ if (ret) {
+ av_freep(&ret->formats);
+ av_freep(&ret->refs);
+ av_freep(&ret);
+ return 1;
+ } else {
+ av_freep(&a->formats);
+ av_freep(&b->formats);
+ av_freep(&a);
+ av_freep(&b);
+ return 0;
+ }
+}
+
+/**
+ * Perform one round of query_formats() and merging formats lists on the
+ * filter graph.
+ * @return >=0 if all links formats lists could be queried and merged;
+ * AVERROR(EAGAIN) some progress was made in the queries or merging
+ * and a later call may succeed;
+ * AVERROR(EIO) (may be changed) plus a log message if no progress
+ * was made and the negotiation is stuck;
+ * a negative error code if some other error happened
+ */
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int i, j, ret;
int scaler_count = 0, resampler_count = 0;
+ int count_queried = 0; /* successful calls to query_formats() */
+ int count_merged = 0; /* successful merge of formats lists */
+ int count_already_merged = 0; /* lists already merged */
+ int count_delayed = 0; /* lists that need to be merged later */
- /* ask all the sub-filters for their supported media formats */
for (i = 0; i < graph->nb_filters; i++) {
- if (graph->filters[i]->filter->query_formats)
- graph->filters[i]->filter->query_formats(graph->filters[i]);
+ AVFilterContext *f = graph->filters[i];
+ if (formats_declared(f))
+ continue;
+ if (f->filter->query_formats)
+ ret = filter_query_formats(f);
else
- ff_default_query_formats(graph->filters[i]);
+ ret = ff_default_query_formats(f);
+ if (ret < 0 && ret != AVERROR(EAGAIN))
+ return ret;
+ /* note: EAGAIN could indicate a partial success, not counted yet */
+ count_queried += ret >= 0;
}
/* go through and merge as many format lists as possible */
@@ -299,21 +467,49 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if (!link)
continue;
- if (link->in_formats != link->out_formats &&
- !ff_merge_formats(link->in_formats,
- link->out_formats))
- convert_needed = 1;
- if (link->type == AVMEDIA_TYPE_AUDIO) {
- if (link->in_channel_layouts != link->out_channel_layouts &&
- !ff_merge_channel_layouts(link->in_channel_layouts,
- link->out_channel_layouts))
- convert_needed = 1;
- if (link->in_samplerates != link->out_samplerates &&
- !ff_merge_samplerates(link->in_samplerates,
- link->out_samplerates))
+ if (link->in_formats != link->out_formats
+ && link->in_formats && link->out_formats)
+ if (!can_merge_formats(link->in_formats, link->out_formats,
+ link->type, 0))
convert_needed = 1;
+ if (link->type == AVMEDIA_TYPE_AUDIO) {
+ if (link->in_samplerates != link->out_samplerates
+ && link->in_samplerates && link->out_samplerates)
+ if (!can_merge_formats(link->in_samplerates,
+ link->out_samplerates,
+ 0, 1))
+ convert_needed = 1;
}
+#define MERGE_DISPATCH(field, statement) \
+ if (!(link->in_ ## field && link->out_ ## field)) { \
+ count_delayed++; \
+ } else if (link->in_ ## field == link->out_ ## field) { \
+ count_already_merged++; \
+ } else if (!convert_needed) { \
+ count_merged++; \
+ statement \
+ }
+
+ if (link->type == AVMEDIA_TYPE_AUDIO) {
+ MERGE_DISPATCH(channel_layouts,
+ if (!ff_merge_channel_layouts(link->in_channel_layouts,
+ link->out_channel_layouts))
+ convert_needed = 1;
+ )
+ MERGE_DISPATCH(samplerates,
+ if (!ff_merge_samplerates(link->in_samplerates,
+ link->out_samplerates))
+ convert_needed = 1;
+ )
+ }
+ MERGE_DISPATCH(formats,
+ if (!ff_merge_formats(link->in_formats, link->out_formats,
+ link->type))
+ convert_needed = 1;
+ )
+#undef MERGE_DISPATCH
+
if (convert_needed) {
AVFilterContext *convert;
AVFilter *filter;
@@ -339,8 +535,8 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
return ret;
break;
case AVMEDIA_TYPE_AUDIO:
- if (!(filter = avfilter_get_by_name("resample"))) {
- av_log(log_ctx, AV_LOG_ERROR, "'resample' filter "
+ if (!(filter = avfilter_get_by_name("aresample"))) {
+ av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter "
"not present, cannot convert audio formats.\n");
return AVERROR(EINVAL);
}
@@ -348,11 +544,11 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
resampler_count++);
scale_args[0] = '\0';
- if (graph->resample_lavr_opts)
+ if (graph->aresample_swr_opts)
snprintf(scale_args, sizeof(scale_args), "%s",
- graph->resample_lavr_opts);
+ graph->aresample_swr_opts);
if ((ret = avfilter_graph_create_filter(&convert, filter,
- inst_name, scale_args,
+ inst_name, graph->aresample_swr_opts,
NULL, graph)) < 0)
return ret;
break;
@@ -363,24 +559,40 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
return ret;
- convert->filter->query_formats(convert);
+ if ((ret = filter_query_formats(convert)) < 0)
+ return ret;
+
inlink = convert->inputs[0];
outlink = convert->outputs[0];
- if (!ff_merge_formats( inlink->in_formats, inlink->out_formats) ||
- !ff_merge_formats(outlink->in_formats, outlink->out_formats))
- ret |= AVERROR(ENOSYS);
+ av_assert0( inlink-> in_formats->refcount > 0);
+ av_assert0( inlink->out_formats->refcount > 0);
+ av_assert0(outlink-> in_formats->refcount > 0);
+ av_assert0(outlink->out_formats->refcount > 0);
+ if (outlink->type == AVMEDIA_TYPE_AUDIO) {
+ av_assert0( inlink-> in_samplerates->refcount > 0);
+ av_assert0( inlink->out_samplerates->refcount > 0);
+ av_assert0(outlink-> in_samplerates->refcount > 0);
+ av_assert0(outlink->out_samplerates->refcount > 0);
+ av_assert0( inlink-> in_channel_layouts->refcount > 0);
+ av_assert0( inlink->out_channel_layouts->refcount > 0);
+ av_assert0(outlink-> in_channel_layouts->refcount > 0);
+ av_assert0(outlink->out_channel_layouts->refcount > 0);
+ }
+ if (!ff_merge_formats( inlink->in_formats, inlink->out_formats, inlink->type) ||
+ !ff_merge_formats(outlink->in_formats, outlink->out_formats, outlink->type))
+ ret = AVERROR(ENOSYS);
if (inlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(inlink->in_samplerates,
inlink->out_samplerates) ||
!ff_merge_channel_layouts(inlink->in_channel_layouts,
inlink->out_channel_layouts)))
- ret |= AVERROR(ENOSYS);
+ ret = AVERROR(ENOSYS);
if (outlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(outlink->in_samplerates,
outlink->out_samplerates) ||
!ff_merge_channel_layouts(outlink->in_channel_layouts,
outlink->out_channel_layouts)))
- ret |= AVERROR(ENOSYS);
+ ret = AVERROR(ENOSYS);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
@@ -392,14 +604,54 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
}
}
+ av_log(graph, AV_LOG_DEBUG, "query_formats: "
+ "%d queried, %d merged, %d already done, %d delayed\n",
+ count_queried, count_merged, count_already_merged, count_delayed);
+ if (count_delayed) {
+ AVBPrint bp;
+
+ /* if count_queried > 0, one filter at least did set its formats,
+ that will give additional information to its neighbour;
+ if count_merged > 0, one pair of formats lists at least was merged,
+ that will give additional information to all connected filters;
+ in both cases, progress was made and a new round must be done */
+ if (count_queried || count_merged)
+ return AVERROR(EAGAIN);
+ av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ for (i = 0; i < graph->nb_filters; i++)
+ if (!formats_declared(graph->filters[i]))
+ av_bprintf(&bp, "%s%s", bp.len ? ", " : "",
+ graph->filters[i]->name);
+ av_log(graph, AV_LOG_ERROR,
+ "The following filters could not choose their formats: %s\n"
+ "Consider inserting the (a)format filter near their input or "
+ "output.\n", bp.str);
+ return AVERROR(EIO);
+ }
return 0;
}
-static int pick_format(AVFilterLink *link)
+static int pick_format(AVFilterLink *link, AVFilterLink *ref)
{
if (!link || !link->in_formats)
return 0;
+ if (link->type == AVMEDIA_TYPE_VIDEO) {
+ if(ref && ref->type == AVMEDIA_TYPE_VIDEO){
+ int has_alpha= av_pix_fmt_desc_get(ref->format)->nb_components % 2 == 0;
+ enum AVPixelFormat best= AV_PIX_FMT_NONE;
+ int i;
+ for (i=0; i<link->in_formats->nb_formats; i++) {
+ enum AVPixelFormat p = link->in_formats->formats[i];
+ best= av_find_best_pix_fmt_of_2(best, p, ref->format, has_alpha, NULL);
+ }
+ av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n",
+ av_get_pix_fmt_name(best), link->in_formats->nb_formats,
+ av_get_pix_fmt_name(ref->format), has_alpha);
+ link->in_formats->formats[0] = best;
+ }
+ }
+
link->in_formats->nb_formats = 1;
link->format = link->in_formats->formats[0];
@@ -413,14 +665,22 @@ static int pick_format(AVFilterLink *link)
link->in_samplerates->nb_formats = 1;
link->sample_rate = link->in_samplerates->formats[0];
- if (!link->in_channel_layouts->nb_channel_layouts) {
+ if (link->in_channel_layouts->all_layouts) {
av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
- "the link between filters %s and %s.\n", link->src->name,
+ " the link between filters %s and %s.\n", link->src->name,
link->dst->name);
+ if (!link->in_channel_layouts->all_counts)
+ av_log(link->src, AV_LOG_ERROR, "Unknown channel layouts not "
+ "supported, try specifying a channel layout using "
+ "'aformat=channel_layouts=something'.\n");
return AVERROR(EINVAL);
}
link->in_channel_layouts->nb_channel_layouts = 1;
link->channel_layout = link->in_channel_layouts->channel_layouts[0];
+ if ((link->channels = FF_LAYOUT2COUNT(link->channel_layout)))
+ link->channel_layout = 0;
+ else
+ link->channels = av_get_channel_layout_nb_channels(link->channel_layout);
}
ff_formats_unref(&link->in_formats);
@@ -454,6 +714,7 @@ do { \
\
if (!out_link->in_ ## list->nb) { \
add_format(&out_link->in_ ##list, fmt); \
+ ret = 1; \
break; \
} \
\
@@ -476,8 +737,43 @@ static int reduce_formats_on_filter(AVFilterContext *filter)
nb_formats, ff_add_format);
REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats,
nb_formats, ff_add_format);
- REDUCE_FORMATS(uint64_t, AVFilterChannelLayouts, channel_layouts,
- channel_layouts, nb_channel_layouts, ff_add_channel_layout);
+
+ /* reduce channel layouts */
+ for (i = 0; i < filter->nb_inputs; i++) {
+ AVFilterLink *inlink = filter->inputs[i];
+ uint64_t fmt;
+
+ if (!inlink->out_channel_layouts ||
+ inlink->out_channel_layouts->nb_channel_layouts != 1)
+ continue;
+ fmt = inlink->out_channel_layouts->channel_layouts[0];
+
+ for (j = 0; j < filter->nb_outputs; j++) {
+ AVFilterLink *outlink = filter->outputs[j];
+ AVFilterChannelLayouts *fmts;
+
+ fmts = outlink->in_channel_layouts;
+ if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1)
+ continue;
+
+ if (fmts->all_layouts &&
+ (!FF_LAYOUT2COUNT(fmt) || fmts->all_counts)) {
+ /* Turn the infinite list into a singleton */
+ fmts->all_layouts = fmts->all_counts = 0;
+ ff_add_channel_layout(&outlink->in_channel_layouts, fmt);
+ break;
+ }
+
+ for (k = 0; k < outlink->in_channel_layouts->nb_channel_layouts; k++) {
+ if (fmts->channel_layouts[k] == fmt) {
+ fmts->channel_layouts[0] = fmt;
+ fmts->nb_channel_layouts = 1;
+ ret = 1;
+ break;
+ }
+ }
+ }
+ }
return ret;
}
@@ -605,7 +901,23 @@ static void swap_channel_layouts_on_filter(AVFilterContext *filter)
int out_channels = av_get_channel_layout_nb_channels(out_chlayout);
int count_diff = out_channels - in_channels;
int matched_channels, extra_channels;
- int score = 0;
+ int score = 100000;
+
+ if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {
+ /* Compute score in case the input or output layout encodes
+ a channel count; in this case the score is not altered by
+ the computation afterwards, as in_chlayout and
+ out_chlayout have both been set to 0 */
+ if (FF_LAYOUT2COUNT(in_chlayout))
+ in_channels = FF_LAYOUT2COUNT(in_chlayout);
+ if (FF_LAYOUT2COUNT(out_chlayout))
+ out_channels = FF_LAYOUT2COUNT(out_chlayout);
+ score -= 10000 + FFABS(out_channels - in_channels) +
+ (in_channels > out_channels ? 10000 : 0);
+ in_chlayout = out_chlayout = 0;
+ /* Let the remaining computation run, even if the score
+ value is not altered */
+ }
/* channel substitution */
for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
@@ -728,15 +1040,50 @@ static void swap_sample_fmts(AVFilterGraph *graph)
static int pick_formats(AVFilterGraph *graph)
{
int i, j, ret;
+ int change;
+
+ do{
+ change = 0;
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ if (filter->nb_inputs){
+ for (j = 0; j < filter->nb_inputs; j++){
+ if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->nb_formats == 1) {
+ if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
+ return ret;
+ change = 1;
+ }
+ }
+ }
+ if (filter->nb_outputs){
+ for (j = 0; j < filter->nb_outputs; j++){
+ if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->nb_formats == 1) {
+ if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
+ return ret;
+ change = 1;
+ }
+ }
+ }
+ if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) {
+ for (j = 0; j < filter->nb_outputs; j++) {
+ if(filter->outputs[j]->format<0) {
+ if ((ret = pick_format(filter->outputs[j], filter->inputs[0])) < 0)
+ return ret;
+ change = 1;
+ }
+ }
+ }
+ }
+ }while(change);
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
for (j = 0; j < filter->nb_inputs; j++)
- if ((ret = pick_format(filter->inputs[j])) < 0)
+ if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
return ret;
for (j = 0; j < filter->nb_outputs; j++)
- if ((ret = pick_format(filter->outputs[j])) < 0)
+ if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
return ret;
}
return 0;
@@ -750,7 +1097,9 @@ static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
int ret;
/* find supported formats from sub-filters, and merge along links */
- if ((ret = query_formats(graph, log_ctx)) < 0)
+ while ((ret = query_formats(graph, log_ctx)) == AVERROR(EAGAIN))
+ av_log(graph, AV_LOG_DEBUG, "query_formats not finished\n");
+ if (ret < 0)
return ret;
/* Once everything is merged, it's possible that we'll still have
@@ -770,6 +1119,48 @@ static int graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
return 0;
}
+static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph,
+ AVClass *log_ctx)
+{
+ unsigned i, j;
+ int sink_links_count = 0, n = 0;
+ AVFilterContext *f;
+ AVFilterLink **sinks;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ f = graph->filters[i];
+ for (j = 0; j < f->nb_inputs; j++) {
+ f->inputs[j]->graph = graph;
+ f->inputs[j]->age_index = -1;
+ }
+ for (j = 0; j < f->nb_outputs; j++) {
+ f->outputs[j]->graph = graph;
+ f->outputs[j]->age_index= -1;
+ }
+ if (!f->nb_outputs) {
+ if (f->nb_inputs > INT_MAX - sink_links_count)
+ return AVERROR(EINVAL);
+ sink_links_count += f->nb_inputs;
+ }
+ }
+ sinks = av_calloc(sink_links_count, sizeof(*sinks));
+ if (!sinks)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < graph->nb_filters; i++) {
+ f = graph->filters[i];
+ if (!f->nb_outputs) {
+ for (j = 0; j < f->nb_inputs; j++) {
+ sinks[n] = f->inputs[j];
+ f->inputs[j]->age_index = n++;
+ }
+ }
+ }
+ av_assert0(n == sink_links_count);
+ graph->sink_links = sinks;
+ graph->sink_links_count = sink_links_count;
+ return 0;
+}
+
static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx)
{
AVFilterContext *f;
@@ -820,6 +1211,131 @@ int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
return ret;
if ((ret = graph_config_links(graphctx, log_ctx)))
return ret;
+ if ((ret = ff_avfilter_graph_config_pointers(graphctx, log_ctx)))
+ return ret;
return 0;
}
+
+int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
+{
+ int i, r = AVERROR(ENOSYS);
+
+ if (!graph)
+ return r;
+
+ if ((flags & AVFILTER_CMD_FLAG_ONE) && !(flags & AVFILTER_CMD_FLAG_FAST)) {
+ r = avfilter_graph_send_command(graph, target, cmd, arg, res, res_len, flags | AVFILTER_CMD_FLAG_FAST);
+ if (r != AVERROR(ENOSYS))
+ return r;
+ }
+
+ if (res_len && res)
+ res[0] = 0;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ if (!strcmp(target, "all") || (filter->name && !strcmp(target, filter->name)) || !strcmp(target, filter->filter->name)) {
+ r = avfilter_process_command(filter, cmd, arg, res, res_len, flags);
+ if (r != AVERROR(ENOSYS)) {
+ if ((flags & AVFILTER_CMD_FLAG_ONE) || r < 0)
+ return r;
+ }
+ }
+ }
+
+ return r;
+}
+
+int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *command, const char *arg, int flags, double ts)
+{
+ int i;
+
+ if(!graph)
+ return 0;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ if(filter && (!strcmp(target, "all") || !strcmp(target, filter->name) || !strcmp(target, filter->filter->name))){
+ AVFilterCommand **queue = &filter->command_queue, *next;
+ while (*queue && (*queue)->time <= ts)
+ queue = &(*queue)->next;
+ next = *queue;
+ *queue = av_mallocz(sizeof(AVFilterCommand));
+ (*queue)->command = av_strdup(command);
+ (*queue)->arg = av_strdup(arg);
+ (*queue)->time = ts;
+ (*queue)->flags = flags;
+ (*queue)->next = next;
+ if(flags & AVFILTER_CMD_FLAG_ONE)
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void heap_bubble_up(AVFilterGraph *graph,
+ AVFilterLink *link, int index)
+{
+ AVFilterLink **links = graph->sink_links;
+
+ while (index) {
+ int parent = (index - 1) >> 1;
+ if (links[parent]->current_pts >= link->current_pts)
+ break;
+ links[index] = links[parent];
+ links[index]->age_index = index;
+ index = parent;
+ }
+ links[index] = link;
+ link->age_index = index;
+}
+
+static void heap_bubble_down(AVFilterGraph *graph,
+ AVFilterLink *link, int index)
+{
+ AVFilterLink **links = graph->sink_links;
+
+ while (1) {
+ int child = 2 * index + 1;
+ if (child >= graph->sink_links_count)
+ break;
+ if (child + 1 < graph->sink_links_count &&
+ links[child + 1]->current_pts < links[child]->current_pts)
+ child++;
+ if (link->current_pts < links[child]->current_pts)
+ break;
+ links[index] = links[child];
+ links[index]->age_index = index;
+ index = child;
+ }
+ links[index] = link;
+ link->age_index = index;
+}
+
+void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link)
+{
+ heap_bubble_up (graph, link, link->age_index);
+ heap_bubble_down(graph, link, link->age_index);
+}
+
+
+int avfilter_graph_request_oldest(AVFilterGraph *graph)
+{
+ while (graph->sink_links_count) {
+ AVFilterLink *oldest = graph->sink_links[0];
+ int r = ff_request_frame(oldest);
+ if (r != AVERROR_EOF)
+ return r;
+ av_log(oldest->dst, AV_LOG_DEBUG, "EOF on sink link %s:%s.\n",
+ oldest->dst ? oldest->dst->name : "unknown",
+ oldest->dstpad ? oldest->dstpad->name : "unknown");
+ /* EOF: remove the link from the heap */
+ if (oldest->age_index < --graph->sink_links_count)
+ heap_bubble_down(graph, graph->sink_links[graph->sink_links_count],
+ oldest->age_index);
+ oldest->age_index = -1;
+ }
+ return AVERROR_EOF;
+}
diff --git a/libavfilter/avfiltergraph.h b/libavfilter/avfiltergraph.h
index 47174ef..b31d581 100644
--- a/libavfilter/avfiltergraph.h
+++ b/libavfilter/avfiltergraph.h
@@ -2,20 +2,20 @@
* Filter graphs
* copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,5 +25,4 @@
#include "avfilter.h"
#include "libavutil/log.h"
-
#endif /* AVFILTER_AVFILTERGRAPH_H */
diff --git a/libavfilter/avfilterres.rc b/libavfilter/avfilterres.rc
new file mode 100644
index 0000000..8be6247
--- /dev/null
+++ b/libavfilter/avfilterres.rc
@@ -0,0 +1,55 @@
+/*
+ * Windows resource file for libavfilter
+ *
+ * Copyright (C) 2012 James Almer
+ * Copyright (C) 2013 Tiancheng "Timothy" Gu
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <windows.h>
+#include "libavfilter/version.h"
+#include "libavutil/ffversion.h"
+#include "config.h"
+
+1 VERSIONINFO
+FILEVERSION LIBAVFILTER_VERSION_MAJOR, LIBAVFILTER_VERSION_MINOR, LIBAVFILTER_VERSION_MICRO, 0
+PRODUCTVERSION LIBAVFILTER_VERSION_MAJOR, LIBAVFILTER_VERSION_MINOR, LIBAVFILTER_VERSION_MICRO, 0
+FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
+FILEOS VOS_NT_WINDOWS32
+FILETYPE VFT_DLL
+{
+ BLOCK "StringFileInfo"
+ {
+ BLOCK "040904B0"
+ {
+ VALUE "CompanyName", "FFmpeg Project"
+ VALUE "FileDescription", "FFmpeg audio/video filtering library"
+ VALUE "FileVersion", AV_STRINGIFY(LIBAVFILTER_VERSION)
+ VALUE "InternalName", "libavfilter"
+ VALUE "LegalCopyright", "Copyright (C) 2000-" AV_STRINGIFY(CONFIG_THIS_YEAR) " FFmpeg Project"
+ VALUE "OriginalFilename", "avfilter" BUILDSUF "-" AV_STRINGIFY(LIBAVFILTER_VERSION_MAJOR) SLIBSUF
+ VALUE "ProductName", "FFmpeg"
+ VALUE "ProductVersion", FFMPEG_VERSION
+ }
+ }
+
+ BLOCK "VarFileInfo"
+ {
+ VALUE "Translation", 0x0409, 0x04B0
+ }
+}
diff --git a/libavfilter/bbox.c b/libavfilter/bbox.c
new file mode 100644
index 0000000..be9b2e6
--- /dev/null
+++ b/libavfilter/bbox.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "bbox.h"
+
+int ff_calculate_bounding_box(FFBoundingBox *bbox,
+ const uint8_t *data, int linesize, int w, int h,
+ int min_val)
+{
+ int x, y;
+ int start_x;
+ int start_y;
+ int end_x;
+ int end_y;
+ const uint8_t *line;
+
+ /* left bound */
+ for (start_x = 0; start_x < w; start_x++)
+ for (y = 0; y < h; y++)
+ if ((data[y * linesize + start_x] > min_val))
+ goto outl;
+outl:
+ if (start_x == w) /* no points found */
+ return 0;
+
+ /* right bound */
+ for (end_x = w - 1; end_x >= start_x; end_x--)
+ for (y = 0; y < h; y++)
+ if ((data[y * linesize + end_x] > min_val))
+ goto outr;
+outr:
+
+ /* top bound */
+ line = data;
+ for (start_y = 0; start_y < h; start_y++) {
+ for (x = 0; x < w; x++)
+ if (line[x] > min_val)
+ goto outt;
+ line += linesize;
+ }
+outt:
+
+ /* bottom bound */
+ line = data + (h-1)*linesize;
+ for (end_y = h - 1; end_y >= start_y; end_y--) {
+ for (x = 0; x < w; x++)
+ if (line[x] > min_val)
+ goto outb;
+ line -= linesize;
+ }
+outb:
+
+ bbox->x1 = start_x;
+ bbox->y1 = start_y;
+ bbox->x2 = end_x;
+ bbox->y2 = end_y;
+ return 1;
+}
diff --git a/libavfilter/bbox.h b/libavfilter/bbox.h
new file mode 100644
index 0000000..eb73154
--- /dev/null
+++ b/libavfilter/bbox.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BBOX_H
+#define AVFILTER_BBOX_H
+
+#include <stdint.h>
+
+typedef struct {
+ int x1, x2, y1, y2;
+} FFBoundingBox;
+
+/**
+ * Calculate the smallest rectangle that will encompass the
+ * region with values > min_val.
+ *
+ * @param bbox bounding box structure which is updated with the found values.
+ * If no pixels could be found with value > min_val, the
+ * structure is not modified.
+ * @return 1 in case at least one pixel with value > min_val was found,
+ * 0 otherwise
+ */
+int ff_calculate_bounding_box(FFBoundingBox *bbox,
+ const uint8_t *data, int linesize,
+ int w, int h, int min_val);
+
+#endif /* AVFILTER_BBOX_H */
diff --git a/libavfilter/buffer.c b/libavfilter/buffer.c
index fd0b18f..0327952 100644
--- a/libavfilter/buffer.c
+++ b/libavfilter/buffer.c
@@ -1,32 +1,39 @@
/*
- * This file is part of Libav.
+ * Copyright Stefano Sabatini <stefasab gmail com>
+ * Copyright Anton Khirnov <anton khirnov net>
+ * Copyright Michael Niedermayer <michaelni gmx at>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
+#include "libavutil/avassert.h"
#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavcodec/avcodec.h"
#include "avfilter.h"
#include "internal.h"
+#include "audio.h"
+#include "avcodec.h"
#include "version.h"
#if FF_API_AVFILTERBUFFER
-/* TODO: buffer pool. see comment for avfilter_default_get_video_buffer() */
void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
{
if (ptr->extended_data != ptr->data)
@@ -35,19 +42,32 @@ void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
av_free(ptr);
}
+static void copy_video_props(AVFilterBufferRefVideoProps *dst, AVFilterBufferRefVideoProps *src) {
+ *dst = *src;
+ if (src->qp_table) {
+ int qsize = src->qp_table_size;
+ dst->qp_table = av_malloc(qsize);
+ memcpy(dst->qp_table, src->qp_table, qsize);
+ }
+}
+
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
{
AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef));
if (!ret)
return NULL;
*ret = *ref;
+
+ ret->metadata = NULL;
+ av_dict_copy(&ret->metadata, ref->metadata, 0);
+
if (ref->type == AVMEDIA_TYPE_VIDEO) {
ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps));
if (!ret->video) {
av_free(ret);
return NULL;
}
- *ret->video = *ref->video;
+ copy_video_props(ret->video, ref->video);
ret->extended_data = ret->data;
} else if (ref->type == AVMEDIA_TYPE_AUDIO) {
ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps));
@@ -57,9 +77,9 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
}
*ret->audio = *ref->audio;
- if (ref->extended_data != ref->data) {
+ if (ref->extended_data && ref->extended_data != ref->data) {
int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
- if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) *
+ if (!(ret->extended_data = av_malloc_array(sizeof(*ret->extended_data),
nb_channels))) {
av_freep(&ret->audio);
av_freep(&ret);
@@ -79,12 +99,16 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref)
{
if (!ref)
return;
+ av_assert0(ref->buf->refcount > 0);
if (!(--ref->buf->refcount))
ref->buf->free(ref->buf);
if (ref->extended_data != ref->data)
av_freep(&ref->extended_data);
- av_free(ref->video);
- av_free(ref->audio);
+ if (ref->video)
+ av_freep(&ref->video->qp_table);
+ av_freep(&ref->video);
+ av_freep(&ref->audio);
+ av_dict_free(&ref->metadata);
av_free(ref);
}
@@ -99,13 +123,17 @@ FF_ENABLE_DEPRECATION_WARNINGS
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{
dst->pts = src->pts;
+ dst->pos = av_frame_get_pkt_pos(src);
dst->format = src->format;
+ av_dict_free(&dst->metadata);
+ av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0);
+
switch (dst->type) {
case AVMEDIA_TYPE_VIDEO:
dst->video->w = src->width;
dst->video->h = src->height;
- dst->video->pixel_aspect = src->sample_aspect_ratio;
+ dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
dst->video->interlaced = src->interlaced_frame;
dst->video->top_field_first = src->top_field_first;
dst->video->key_frame = src->key_frame;
@@ -122,60 +150,24 @@ int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
return 0;
}
-int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src)
-{
- int planes, nb_channels;
-
- memcpy(dst->data, src->data, sizeof(dst->data));
- memcpy(dst->linesize, src->linesize, sizeof(dst->linesize));
-
- dst->pts = src->pts;
- dst->format = src->format;
-
- switch (src->type) {
- case AVMEDIA_TYPE_VIDEO:
- dst->width = src->video->w;
- dst->height = src->video->h;
- dst->sample_aspect_ratio = src->video->pixel_aspect;
- dst->interlaced_frame = src->video->interlaced;
- dst->top_field_first = src->video->top_field_first;
- dst->key_frame = src->video->key_frame;
- dst->pict_type = src->video->pict_type;
- break;
- case AVMEDIA_TYPE_AUDIO:
- nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout);
- planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1;
-
- if (planes > FF_ARRAY_ELEMS(dst->data)) {
- dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data));
- if (!dst->extended_data)
- return AVERROR(ENOMEM);
- memcpy(dst->extended_data, src->extended_data,
- planes * sizeof(*dst->extended_data));
- } else
- dst->extended_data = dst->data;
-
- dst->sample_rate = src->audio->sample_rate;
- dst->channel_layout = src->audio->channel_layout;
- dst->nb_samples = src->audio->nb_samples;
- break;
- default:
- return AVERROR(EINVAL);
- }
-
- return 0;
-}
-
-void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
+void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, const AVFilterBufferRef *src)
{
// copy common properties
dst->pts = src->pts;
dst->pos = src->pos;
switch (src->type) {
- case AVMEDIA_TYPE_VIDEO: *dst->video = *src->video; break;
+ case AVMEDIA_TYPE_VIDEO: {
+ if (dst->video->qp_table)
+ av_freep(&dst->video->qp_table);
+ copy_video_props(dst->video, src->video);
+ break;
+ }
case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break;
default: break;
}
+
+ av_dict_free(&dst->metadata);
+ av_dict_copy(&dst->metadata, src->metadata, 0);
}
#endif /* FF_API_AVFILTERBUFFER */
diff --git a/libavfilter/bufferqueue.h b/libavfilter/bufferqueue.h
new file mode 100644
index 0000000..f5e5df2
--- /dev/null
+++ b/libavfilter/bufferqueue.h
@@ -0,0 +1,121 @@
+/*
+ * Generic buffer queue
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BUFFERQUEUE_H
+#define AVFILTER_BUFFERQUEUE_H
+
+/**
+ * FFBufQueue: simple AVFrame queue API
+ *
+ * Note: this API is not thread-safe. Concurrent access to the same queue
+ * must be protected by a mutex or any synchronization mechanism.
+ */
+
+/**
+ * Maximum size of the queue.
+ *
+ * This value can be overridden by definying it before including this
+ * header.
+ * Powers of 2 are recommended.
+ */
+#ifndef FF_BUFQUEUE_SIZE
+#define FF_BUFQUEUE_SIZE 64
+#endif
+
+#include "avfilter.h"
+#include "libavutil/avassert.h"
+
+/**
+ * Structure holding the queue
+ */
+struct FFBufQueue {
+ AVFrame *queue[FF_BUFQUEUE_SIZE];
+ unsigned short head;
+ unsigned short available; /**< number of available buffers */
+};
+
+#define BUCKET(i) queue->queue[(queue->head + (i)) % FF_BUFQUEUE_SIZE]
+
+/**
+ * Test if a buffer queue is full.
+ */
+static inline int ff_bufqueue_is_full(struct FFBufQueue *queue)
+{
+ return queue->available == FF_BUFQUEUE_SIZE;
+}
+
+/**
+ * Add a buffer to the queue.
+ *
+ * If the queue is already full, then the current last buffer is dropped
+ * (and unrefed) with a warning before adding the new buffer.
+ */
+static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue,
+ AVFrame *buf)
+{
+ if (ff_bufqueue_is_full(queue)) {
+ av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n");
+ av_frame_free(&BUCKET(--queue->available));
+ }
+ BUCKET(queue->available++) = buf;
+}
+
+/**
+ * Get a buffer from the queue without altering it.
+ *
+ * Buffer with index 0 is the first buffer in the queue.
+ * Return NULL if the queue has not enough buffers.
+ */
+static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue,
+ unsigned index)
+{
+ return index < queue->available ? BUCKET(index) : NULL;
+}
+
+/**
+ * Get the first buffer from the queue and remove it.
+ *
+ * Do not use on an empty queue.
+ */
+static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue)
+{
+ AVFrame *ret = queue->queue[queue->head];
+ av_assert0(queue->available);
+ queue->available--;
+ queue->queue[queue->head] = NULL;
+ queue->head = (queue->head + 1) % FF_BUFQUEUE_SIZE;
+ return ret;
+}
+
+/**
+ * Unref and remove all buffers from the queue.
+ */
+static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue)
+{
+ while (queue->available) {
+ AVFrame *buf = ff_bufqueue_get(queue);
+ av_frame_free(&buf);
+ }
+}
+
+#undef BUCKET
+
+#endif /* AVFILTER_BUFFERQUEUE_H */
diff --git a/libavfilter/buffersink.c b/libavfilter/buffersink.c
index 2894a5b..525f97b 100644
--- a/libavfilter/buffersink.c
+++ b/libavfilter/buffersink.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,6 +29,7 @@
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
@@ -36,44 +37,119 @@
#include "internal.h"
typedef struct BufferSinkContext {
- AVFrame *cur_frame; ///< last frame delivered on the sink
+ const AVClass *class;
+ AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
+ unsigned warning_limit;
+
+ /* only used for video */
+ enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
+ int pixel_fmts_size;
+
+ /* only used for audio */
+ enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
+ int sample_fmts_size;
+ int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
+ int channel_layouts_size;
+ int *channel_counts; ///< list of accepted channel counts, terminated by -1
+ int channel_counts_size;
+ int all_channel_counts;
+ int *sample_rates; ///< list of accepted sample rates, terminated by -1
+ int sample_rates_size;
+
+ /* only used for compat API */
AVAudioFifo *audio_fifo; ///< FIFO for audio samples
int64_t next_pts; ///< interpolating audio pts
} BufferSinkContext;
+#define NB_ITEMS(list) (list ## _size / sizeof(*list))
+
static av_cold void uninit(AVFilterContext *ctx)
{
BufferSinkContext *sink = ctx->priv;
+ AVFrame *frame;
if (sink->audio_fifo)
av_audio_fifo_free(sink->audio_fifo);
+
+ if (sink->fifo) {
+ while (av_fifo_size(sink->fifo) >= sizeof(AVFilterBufferRef *)) {
+ av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL);
+ av_frame_free(&frame);
+ }
+ av_fifo_freep(&sink->fifo);
+ }
}
-static int filter_frame(AVFilterLink *link, AVFrame *frame)
+static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref)
{
- BufferSinkContext *s = link->dst->priv;
-
- av_assert0(!s->cur_frame);
- s->cur_frame = frame;
+ BufferSinkContext *buf = ctx->priv;
+
+ if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
+ /* realloc fifo size */
+ if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Cannot buffer more frames. Consume some available frames "
+ "before adding new ones.\n");
+ return AVERROR(ENOMEM);
+ }
+ }
+ /* cache frame */
+ av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL);
return 0;
}
-int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx,
- AVFrame *frame)
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
- BufferSinkContext *s = ctx->priv;
- AVFilterLink *link = ctx->inputs[0];
+ AVFilterContext *ctx = link->dst;
+ BufferSinkContext *buf = link->dst->priv;
int ret;
- if ((ret = ff_request_frame(link)) < 0)
+ if ((ret = add_buffer_ref(ctx, frame)) < 0)
return ret;
+ if (buf->warning_limit &&
+ av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) {
+ av_log(ctx, AV_LOG_WARNING,
+ "%d buffers queued in %s, something may be wrong.\n",
+ buf->warning_limit,
+ (char *)av_x_if_null(ctx->name, ctx->filter->name));
+ buf->warning_limit *= 10;
+ }
+ return 0;
+}
- if (!s->cur_frame)
+int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
+{
+ return av_buffersink_get_frame_flags(ctx, frame, 0);
+}
+
+int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret;
+ AVFrame *cur_frame;
+
+ /* no picref available, fetch it from the filterchain */
+ if (!av_fifo_size(buf->fifo)) {
+ if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
+ return AVERROR(EAGAIN);
+ if ((ret = ff_request_frame(inlink)) < 0)
+ return ret;
+ }
+
+ if (!av_fifo_size(buf->fifo))
return AVERROR(EINVAL);
- av_frame_move_ref(frame, s->cur_frame);
- av_frame_free(&s->cur_frame);
+ if (flags & AV_BUFFERSINK_FLAG_PEEK) {
+ cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0));
+ if ((ret = av_frame_ref(frame, cur_frame)) < 0)
+ return ret;
+ } else {
+ av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL);
+ av_frame_move_ref(frame, cur_frame);
+ av_frame_free(&cur_frame);
+ }
return 0;
}
@@ -90,8 +166,9 @@ static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
tmp->pts = s->next_pts;
- s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
- link->time_base);
+ if (s->next_pts != AV_NOPTS_VALUE)
+ s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
+ link->time_base);
av_frame_move_ref(frame, tmp);
av_frame_free(&tmp);
@@ -104,10 +181,11 @@ int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
+ AVFrame *cur_frame;
int ret = 0;
if (!s->audio_fifo) {
- int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
+ int nb_channels = link->channels;
if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
return AVERROR(ENOMEM);
}
@@ -116,27 +194,76 @@ int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
return read_from_fifo(ctx, frame, nb_samples);
- ret = ff_request_frame(link);
- if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo))
+ if (!(cur_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ ret = av_buffersink_get_frame_flags(ctx, cur_frame, 0);
+ if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) {
+ av_frame_free(&cur_frame);
return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
- else if (ret < 0)
+ } else if (ret < 0) {
+ av_frame_free(&cur_frame);
return ret;
+ }
- if (s->cur_frame->pts != AV_NOPTS_VALUE) {
- s->next_pts = s->cur_frame->pts -
+ if (cur_frame->pts != AV_NOPTS_VALUE) {
+ s->next_pts = cur_frame->pts -
av_rescale_q(av_audio_fifo_size(s->audio_fifo),
(AVRational){ 1, link->sample_rate },
link->time_base);
}
- ret = av_audio_fifo_write(s->audio_fifo, (void**)s->cur_frame->extended_data,
- s->cur_frame->nb_samples);
- av_frame_free(&s->cur_frame);
+ ret = av_audio_fifo_write(s->audio_fifo, (void**)cur_frame->extended_data,
+ cur_frame->nb_samples);
+ av_frame_free(&cur_frame);
}
return ret;
}
+AVBufferSinkParams *av_buffersink_params_alloc(void)
+{
+ static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
+ AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
+ if (!params)
+ return NULL;
+
+ params->pixel_fmts = pixel_fmts;
+ return params;
+}
+
+AVABufferSinkParams *av_abuffersink_params_alloc(void)
+{
+ AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams));
+
+ if (!params)
+ return NULL;
+ return params;
+}
+
+#define FIFO_INIT_SIZE 8
+
+static av_cold int common_init(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+
+ buf->fifo = av_fifo_alloc_array(FIFO_INIT_SIZE, sizeof(AVFilterBufferRef *));
+ if (!buf->fifo) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
+ return AVERROR(ENOMEM);
+ }
+ buf->warning_limit = 100;
+ buf->next_pts = AV_NOPTS_VALUE;
+ return 0;
+}
+
+void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ inlink->min_samples = inlink->max_samples =
+ inlink->partial_buf_size = frame_size;
+}
+
#if FF_API_AVFILTERBUFFER
FF_DISABLE_DEPRECATION_WARNINGS
static void compat_free_buffer(AVFilterBuffer *buf)
@@ -147,7 +274,7 @@ static void compat_free_buffer(AVFilterBuffer *buf)
}
static int compat_read(AVFilterContext *ctx,
- AVFilterBufferRef **pbuf, int nb_samples)
+ AVFilterBufferRef **pbuf, int nb_samples, int flags)
{
AVFilterBufferRef *buf;
AVFrame *frame;
@@ -161,13 +288,14 @@ static int compat_read(AVFilterContext *ctx,
return AVERROR(ENOMEM);
if (!nb_samples)
- ret = av_buffersink_get_frame(ctx, frame);
+ ret = av_buffersink_get_frame_flags(ctx, frame, flags);
else
ret = av_buffersink_get_samples(ctx, frame, nb_samples);
if (ret < 0)
goto fail;
+ AV_NOWARN_DEPRECATED(
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
AV_PERM_READ,
@@ -186,6 +314,7 @@ static int compat_read(AVFilterContext *ctx,
}
avfilter_copy_frame_props(buf, frame);
+ )
buf->buf->priv = frame;
buf->buf->free = compat_free_buffer;
@@ -200,23 +329,245 @@ fail:
int attribute_align_arg av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
- return compat_read(ctx, buf, 0);
+ return compat_read(ctx, buf, 0, 0);
}
int attribute_align_arg av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples)
{
- return compat_read(ctx, buf, nb_samples);
+ return compat_read(ctx, buf, nb_samples, 0);
+}
+
+int attribute_align_arg av_buffersink_get_buffer_ref(AVFilterContext *ctx,
+ AVFilterBufferRef **bufref, int flags)
+{
+ *bufref = NULL;
+
+ av_assert0( !strcmp(ctx->filter->name, "buffersink")
+ || !strcmp(ctx->filter->name, "abuffersink")
+ || !strcmp(ctx->filter->name, "ffbuffersink")
+ || !strcmp(ctx->filter->name, "ffabuffersink"));
+
+ return compat_read(ctx, bufref, 0, flags);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
+AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
+{
+ av_assert0( !strcmp(ctx->filter->name, "buffersink")
+ || !strcmp(ctx->filter->name, "ffbuffersink"));
+
+ return ctx->inputs[0]->frame_rate;
+}
+
+int attribute_align_arg av_buffersink_poll_frame(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ av_assert0( !strcmp(ctx->filter->name, "buffersink")
+ || !strcmp(ctx->filter->name, "abuffersink")
+ || !strcmp(ctx->filter->name, "ffbuffersink")
+ || !strcmp(ctx->filter->name, "ffabuffersink"));
+
+ return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
+}
+
+static av_cold int vsink_init(AVFilterContext *ctx, void *opaque)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVBufferSinkParams *params = opaque;
+ int ret;
+
+ if (params) {
+ if ((ret = av_opt_set_int_list(buf, "pix_fmts", params->pixel_fmts, AV_PIX_FMT_NONE, 0)) < 0)
+ return ret;
+ }
+
+ return common_init(ctx);
+}
+
+#define CHECK_LIST_SIZE(field) \
+ if (buf->field ## _size % sizeof(*buf->field)) { \
+ av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \
+ "should be multiple of %d\n", \
+ buf->field ## _size, (int)sizeof(*buf->field)); \
+ return AVERROR(EINVAL); \
+ }
+static int vsink_query_formats(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ unsigned i;
+ int ret;
+
+ CHECK_LIST_SIZE(pixel_fmts)
+ if (buf->pixel_fmts_size) {
+ for (i = 0; i < NB_ITEMS(buf->pixel_fmts); i++)
+ if ((ret = ff_add_format(&formats, buf->pixel_fmts[i])) < 0) {
+ ff_formats_unref(&formats);
+ return ret;
+ }
+ ff_set_common_formats(ctx, formats);
+ } else {
+ ff_default_query_formats(ctx);
+ }
+
+ return 0;
+}
+
+static av_cold int asink_init(AVFilterContext *ctx, void *opaque)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVABufferSinkParams *params = opaque;
+ int ret;
+
+ if (params) {
+ if ((ret = av_opt_set_int_list(buf, "sample_fmts", params->sample_fmts, AV_SAMPLE_FMT_NONE, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "sample_rates", params->sample_rates, -1, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "channel_layouts", params->channel_layouts, -1, 0)) < 0 ||
+ (ret = av_opt_set_int_list(buf, "channel_counts", params->channel_counts, -1, 0)) < 0 ||
+ (ret = av_opt_set_int(buf, "all_channel_counts", params->all_channel_counts, 0)) < 0)
+ return ret;
+ }
+ return common_init(ctx);
+}
+
+static int asink_query_formats(AVFilterContext *ctx)
+{
+ BufferSinkContext *buf = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ AVFilterChannelLayouts *layouts = NULL;
+ unsigned i;
+ int ret;
+
+ CHECK_LIST_SIZE(sample_fmts)
+ CHECK_LIST_SIZE(sample_rates)
+ CHECK_LIST_SIZE(channel_layouts)
+ CHECK_LIST_SIZE(channel_counts)
+
+ if (buf->sample_fmts_size) {
+ for (i = 0; i < NB_ITEMS(buf->sample_fmts); i++)
+ if ((ret = ff_add_format(&formats, buf->sample_fmts[i])) < 0) {
+ ff_formats_unref(&formats);
+ return ret;
+ }
+ ff_set_common_formats(ctx, formats);
+ }
+
+ if (buf->channel_layouts_size || buf->channel_counts_size ||
+ buf->all_channel_counts) {
+ for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++)
+ if ((ret = ff_add_channel_layout(&layouts, buf->channel_layouts[i])) < 0) {
+ ff_channel_layouts_unref(&layouts);
+ return ret;
+ }
+ for (i = 0; i < NB_ITEMS(buf->channel_counts); i++)
+ if ((ret = ff_add_channel_layout(&layouts, FF_COUNT2LAYOUT(buf->channel_counts[i]))) < 0) {
+ ff_channel_layouts_unref(&layouts);
+ return ret;
+ }
+ if (buf->all_channel_counts) {
+ if (layouts)
+ av_log(ctx, AV_LOG_WARNING,
+ "Conflicting all_channel_counts and list in options\n");
+ else if (!(layouts = ff_all_channel_counts()))
+ return AVERROR(ENOMEM);
+ }
+ ff_set_common_channel_layouts(ctx, layouts);
+ }
+
+ if (buf->sample_rates_size) {
+ formats = NULL;
+ for (i = 0; i < NB_ITEMS(buf->sample_rates); i++)
+ if ((ret = ff_add_format(&formats, buf->sample_rates[i])) < 0) {
+ ff_formats_unref(&formats);
+ return ret;
+ }
+ ff_set_common_samplerates(ctx, formats);
+ }
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(BufferSinkContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption buffersink_options[] = {
+ { "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { NULL },
+};
+#undef FLAGS
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption abuffersink_options[] = {
+ { "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "channel_layouts", "set the supported channel layouts", OFFSET(channel_layouts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "channel_counts", "set the supported channel counts", OFFSET(channel_counts), AV_OPT_TYPE_BINARY, .flags = FLAGS },
+ { "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL },
+};
+#undef FLAGS
+
+AVFILTER_DEFINE_CLASS(buffersink);
+AVFILTER_DEFINE_CLASS(abuffersink);
+
+#if FF_API_AVFILTERBUFFER
+
+#define ffbuffersink_options buffersink_options
+#define ffabuffersink_options abuffersink_options
+AVFILTER_DEFINE_CLASS(ffbuffersink);
+AVFILTER_DEFINE_CLASS(ffabuffersink);
+
+static const AVFilterPad ffbuffersink_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL },
+};
+
+AVFilter ff_vsink_ffbuffersink = {
+ .name = "ffbuffersink",
+ .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
+ .priv_size = sizeof(BufferSinkContext),
+ .priv_class = &ffbuffersink_class,
+ .init_opaque = vsink_init,
+ .uninit = uninit,
+
+ .query_formats = vsink_query_formats,
+ .inputs = ffbuffersink_inputs,
+ .outputs = NULL,
+};
+
+static const AVFilterPad ffabuffersink_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL },
+};
+
+AVFilter ff_asink_ffabuffersink = {
+ .name = "ffabuffersink",
+ .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
+ .init_opaque = asink_init,
+ .uninit = uninit,
+ .priv_size = sizeof(BufferSinkContext),
+ .priv_class = &ffabuffersink_class,
+ .query_formats = asink_query_formats,
+ .inputs = ffabuffersink_inputs,
+ .outputs = NULL,
+};
+#endif /* FF_API_AVFILTERBUFFER */
+
static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
- .needs_fifo = 1
},
{ NULL }
};
@@ -225,8 +576,11 @@ AVFilter ff_vsink_buffer = {
.name = "buffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
.priv_size = sizeof(BufferSinkContext),
+ .priv_class = &buffersink_class,
+ .init_opaque = vsink_init,
.uninit = uninit,
+ .query_formats = vsink_query_formats,
.inputs = avfilter_vsink_buffer_inputs,
.outputs = NULL,
};
@@ -236,7 +590,6 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
- .needs_fifo = 1
},
{ NULL }
};
@@ -244,9 +597,12 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
AVFilter ff_asink_abuffer = {
.name = "abuffersink",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
+ .priv_class = &abuffersink_class,
.priv_size = sizeof(BufferSinkContext),
+ .init_opaque = asink_init,
.uninit = uninit,
+ .query_formats = asink_query_formats,
.inputs = avfilter_asink_abuffer_inputs,
.outputs = NULL,
};
diff --git a/libavfilter/buffersink.h b/libavfilter/buffersink.h
index 83a8bd9..24cd2fe 100644
--- a/libavfilter/buffersink.h
+++ b/libavfilter/buffersink.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -22,7 +22,7 @@
/**
* @file
* @ingroup lavfi_buffersink
- * memory buffer sink API
+ * memory buffer sink API for audio and video
*/
#include "avfilter.h"
@@ -35,6 +35,26 @@
#if FF_API_AVFILTERBUFFER
/**
+ * Get an audio/video buffer data from buffer_sink and put it in bufref.
+ *
+ * This function works with both audio and video buffer sinks.
+ *
+ * @param buffer_sink pointer to a buffersink or abuffersink context
+ * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
+ * @return >= 0 in case of success, a negative AVERROR code in case of
+ * failure
+ */
+attribute_deprecated
+int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
+ AVFilterBufferRef **bufref, int flags);
+
+/**
+ * Get the number of immediately available frames.
+ */
+attribute_deprecated
+int av_buffersink_poll_frame(AVFilterContext *ctx);
+
+/**
* Get a buffer with filtered data from sink and put it in buf.
*
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
@@ -73,6 +93,78 @@ int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
/**
* Get a frame with filtered data from sink and put it in frame.
*
+ * @param ctx pointer to a buffersink or abuffersink filter context.
+ * @param frame pointer to an allocated frame that will be filled with data.
+ * The data must be freed using av_frame_unref() / av_frame_free()
+ * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
+ *
+ * @return >= 0 in for success, a negative AVERROR code for failure.
+ */
+int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
+
+/**
+ * Tell av_buffersink_get_buffer_ref() to read video/samples buffer
+ * reference, but not remove it from the buffer. This is useful if you
+ * need only to read a video/samples buffer, without to fetch it.
+ */
+#define AV_BUFFERSINK_FLAG_PEEK 1
+
+/**
+ * Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
+ * If a frame is already buffered, it is read (and removed from the buffer),
+ * but if no frame is present, return AVERROR(EAGAIN).
+ */
+#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
+
+/**
+ * Struct to use for initializing a buffersink context.
+ */
+typedef struct {
+ const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
+} AVBufferSinkParams;
+
+/**
+ * Create an AVBufferSinkParams structure.
+ *
+ * Must be freed with av_free().
+ */
+AVBufferSinkParams *av_buffersink_params_alloc(void);
+
+/**
+ * Struct to use for initializing an abuffersink context.
+ */
+typedef struct {
+ const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
+ const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
+ const int *channel_counts; ///< list of allowed channel counts, terminated by -1
+ int all_channel_counts; ///< if not 0, accept any channel count or layout
+ int *sample_rates; ///< list of allowed sample rates, terminated by -1
+} AVABufferSinkParams;
+
+/**
+ * Create an AVABufferSinkParams structure.
+ *
+ * Must be freed with av_free().
+ */
+AVABufferSinkParams *av_abuffersink_params_alloc(void);
+
+/**
+ * Set the frame size for an audio buffer sink.
+ *
+ * All calls to av_buffersink_get_buffer_ref will return a buffer with
+ * exactly the specified number of samples, or AVERROR(EAGAIN) if there is
+ * not enough. The last buffer at EOF will be padded with 0.
+ */
+void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
+
+/**
+ * Get the frame rate of the input.
+ */
+AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);
+
+/**
+ * Get a frame with filtered data from sink and put it in frame.
+ *
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c
index 00e28f8..27d3db0 100644
--- a/libavfilter/buffersrc.c
+++ b/libavfilter/buffersrc.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2008 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -39,22 +39,26 @@
#include "formats.h"
#include "internal.h"
#include "video.h"
+#include "avcodec.h"
typedef struct BufferSourceContext {
const AVClass *class;
AVFifoBuffer *fifo;
AVRational time_base; ///< time_base to set in the output link
+ AVRational frame_rate; ///< frame_rate to set in the output link
+ unsigned nb_failed_requests;
+ unsigned warning_limit;
/* video only */
- int h, w;
+ int w, h;
enum AVPixelFormat pix_fmt;
- char *pix_fmt_str;
AVRational pixel_aspect;
+ char *sws_param;
/* audio only */
int sample_rate;
enum AVSampleFormat sample_fmt;
- char *sample_fmt_str;
+ int channels;
uint64_t channel_layout;
char *channel_layout_str;
@@ -63,39 +67,63 @@ typedef struct BufferSourceContext {
#define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
if (c->w != width || c->h != height || c->pix_fmt != format) {\
- av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
- return AVERROR(EINVAL);\
+ av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\
}
-#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
+#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, ch_count, format)\
if (c->sample_fmt != format || c->sample_rate != srate ||\
- c->channel_layout != ch_layout) {\
+ c->channel_layout != ch_layout || c->channels != ch_count) {\
av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
return AVERROR(EINVAL);\
}
int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
{
- AVFrame *copy;
+ return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame,
+ AV_BUFFERSRC_FLAG_KEEP_REF);
+}
+
+int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
+{
+ return av_buffersrc_add_frame_flags(ctx, frame, 0);
+}
+
+static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
+ AVFrame *frame, int flags);
+
+int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
+{
+ AVFrame *copy = NULL;
int ret = 0;
+ if (frame && frame->channel_layout &&
+ av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) {
+ av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame)
+ return av_buffersrc_add_frame_internal(ctx, frame, flags);
+
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
ret = av_frame_ref(copy, frame);
if (ret >= 0)
- ret = av_buffersrc_add_frame(ctx, copy);
+ ret = av_buffersrc_add_frame_internal(ctx, copy, flags);
av_frame_free(&copy);
return ret;
}
-int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
- AVFrame *frame)
+static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
+ AVFrame *frame, int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int refcounted, ret;
+ s->nb_failed_requests = 0;
+
if (!frame) {
s->eof = 1;
return 0;
@@ -104,19 +132,26 @@ int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
refcounted = !!frame->buf[0];
+ if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
+
switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
frame->format);
break;
case AVMEDIA_TYPE_AUDIO:
+ /* For layouts unknown on input but known on link after negotiation. */
+ if (!frame->channel_layout)
+ frame->channel_layout = s->channel_layout;
CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
- frame->format);
+ av_frame_get_channels(frame), frame->format);
break;
default:
return AVERROR(EINVAL);
}
+ }
+
if (!av_fifo_space(s->fifo) &&
(ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
sizeof(copy))) < 0)
@@ -142,6 +177,10 @@ int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
return ret;
}
+ if ((flags & AV_BUFFERSRC_FLAG_PUSH))
+ if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0)
+ return ret;
+
return 0;
}
@@ -150,16 +189,21 @@ FF_DISABLE_DEPRECATION_WARNINGS
static void compat_free_buffer(void *opaque, uint8_t *data)
{
AVFilterBufferRef *buf = opaque;
+ AV_NOWARN_DEPRECATED(
avfilter_unref_buffer(buf);
+ )
}
static void compat_unref_buffer(void *opaque, uint8_t *data)
{
AVBufferRef *buf = opaque;
+ AV_NOWARN_DEPRECATED(
av_buffer_unref(&buf);
+ )
}
-int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
+int av_buffersrc_add_ref(AVFilterContext *ctx, AVFilterBufferRef *buf,
+ int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *frame = NULL;
@@ -176,14 +220,17 @@ int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
if (!frame)
return AVERROR(ENOMEM);
- dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, 0);
+ dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf,
+ (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY);
if (!dummy_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
+ AV_NOWARN_DEPRECATED(
if ((ret = avfilter_copy_buf_props(frame, buf)) < 0)
goto fail;
+ )
#define WRAP_PLANE(ref_out, data, data_size) \
do { \
@@ -193,7 +240,7 @@ do { \
goto fail; \
} \
ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \
- dummy_ref, 0); \
+ dummy_ref, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); \
if (!ref_out) { \
av_frame_unref(frame); \
ret = AVERROR(ENOMEM); \
@@ -224,7 +271,7 @@ do { \
if (planes > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf);
- frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
+ frame->extended_buf = av_mallocz_array(sizeof(*frame->extended_buf),
frame->nb_extended_buf);
if (!frame->extended_buf) {
ret = AVERROR(ENOMEM);
@@ -241,7 +288,7 @@ do { \
frame->linesize[0]);
}
- ret = av_buffersrc_add_frame(ctx, frame);
+ ret = av_buffersrc_add_frame_flags(ctx, frame, flags);
fail:
av_buffer_unref(&dummy_buf);
@@ -250,41 +297,47 @@ fail:
return ret;
}
FF_ENABLE_DEPRECATION_WARNINGS
+
+int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf)
+{
+ return av_buffersrc_add_ref(ctx, buf, 0);
+}
#endif
static av_cold int init_video(AVFilterContext *ctx)
{
BufferSourceContext *c = ctx->priv;
- if (!c->pix_fmt_str || !c->w || !c->h || av_q2d(c->time_base) <= 0) {
+ if (c->pix_fmt == AV_PIX_FMT_NONE || !c->w || !c->h || av_q2d(c->time_base) <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
return AVERROR(EINVAL);
}
- if ((c->pix_fmt = av_get_pix_fmt(c->pix_fmt_str)) == AV_PIX_FMT_NONE) {
- char *tail;
- c->pix_fmt = strtol(c->pix_fmt_str, &tail, 10);
- if (*tail || c->pix_fmt < 0 || !av_pix_fmt_desc_get(c->pix_fmt)) {
- av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", c->pix_fmt_str);
- return AVERROR(EINVAL);
- }
- }
-
if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
return AVERROR(ENOMEM);
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt));
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
+ c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
+ c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
+ c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
+ c->warning_limit = 100;
return 0;
}
+unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
+{
+ return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
+}
+
#define OFFSET(x) offsetof(BufferSourceContext, x)
-#define A AV_OPT_FLAG_AUDIO_PARAM
-#define V AV_OPT_FLAG_VIDEO_PARAM
+#define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption video_options[] = {
+static const AVOption buffer_options[] = {
{ "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
+ { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
{ "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
- { "pix_fmt", NULL, OFFSET(pix_fmt_str), AV_OPT_TYPE_STRING, .flags = V },
+ { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, .min = AV_PIX_FMT_NONE, .max = INT_MAX, .flags = V },
#if FF_API_OLD_FILTER_OPTS
/* those 4 are for compatibility with the old option passing system where each filter
* did its own parsing */
@@ -294,48 +347,59 @@ static const AVOption video_options[] = {
{ "sar_den", "deprecated, do not use", OFFSET(pixel_aspect.den), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
#endif
{ "sar", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
+ { "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 1 }, 0, DBL_MAX, V },
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+ { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+ { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
{ NULL },
};
-static const AVClass buffer_class = {
- .class_name = "buffer source",
- .item_name = av_default_item_name,
- .option = video_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(buffer);
-static const AVOption audio_options[] = {
+static const AVOption abuffer_options[] = {
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A },
{ "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
- { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
+ { "sample_fmt", NULL, OFFSET(sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, { .i64 = AV_SAMPLE_FMT_NONE }, .min = AV_SAMPLE_FMT_NONE, .max = INT_MAX, .flags = A },
{ "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
+ { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
{ NULL },
};
-static const AVClass abuffer_class = {
- .class_name = "abuffer source",
- .item_name = av_default_item_name,
- .option = audio_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(abuffer);
static av_cold int init_audio(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
int ret = 0;
- s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
- av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
- s->sample_fmt_str);
+ av_log(ctx, AV_LOG_ERROR, "Sample format was not set or was invalid\n");
return AVERROR(EINVAL);
}
- s->channel_layout = av_get_channel_layout(s->channel_layout_str);
- if (!s->channel_layout) {
- av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
- s->channel_layout_str);
+ if (s->channel_layout_str) {
+ int n;
+
+ s->channel_layout = av_get_channel_layout(s->channel_layout_str);
+ if (!s->channel_layout) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
+ s->channel_layout_str);
+ return AVERROR(EINVAL);
+ }
+ n = av_get_channel_layout_nb_channels(s->channel_layout);
+ if (s->channels) {
+ if (n != s->channels) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Mismatching channel count %d and layout '%s' "
+ "(%d channels)\n",
+ s->channels, s->channel_layout_str, n);
+ return AVERROR(EINVAL);
+ }
+ }
+ s->channels = n;
+ } else if (!s->channels) {
+ av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor "
+ "channel layout specified\n");
return AVERROR(EINVAL);
}
@@ -345,9 +409,11 @@ static av_cold int init_audio(AVFilterContext *ctx)
if (!s->time_base.num)
s->time_base = (AVRational){1, s->sample_rate};
- av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
- "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
+ av_log(ctx, AV_LOG_VERBOSE,
+ "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
+ s->time_base.num, s->time_base.den, av_get_sample_fmt_name(s->sample_fmt),
s->sample_rate, s->channel_layout_str);
+ s->warning_limit = 100;
return ret;
}
@@ -360,8 +426,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL);
av_frame_free(&frame);
}
- av_fifo_free(s->fifo);
- s->fifo = NULL;
+ av_fifo_freep(&s->fifo);
}
static int query_formats(AVFilterContext *ctx)
@@ -383,7 +448,9 @@ static int query_formats(AVFilterContext *ctx)
ff_add_format(&samplerates, c->sample_rate);
ff_set_common_samplerates(ctx, samplerates);
- ff_add_channel_layout(&channel_layouts, c->channel_layout);
+ ff_add_channel_layout(&channel_layouts,
+ c->channel_layout ? c->channel_layout :
+ FF_COUNT2LAYOUT(c->channels));
ff_set_common_channel_layouts(ctx, channel_layouts);
break;
default:
@@ -404,14 +471,15 @@ static int config_props(AVFilterLink *link)
link->sample_aspect_ratio = c->pixel_aspect;
break;
case AVMEDIA_TYPE_AUDIO:
- link->channel_layout = c->channel_layout;
- link->sample_rate = c->sample_rate;
+ if (!c->channel_layout)
+ c->channel_layout = link->channel_layout;
break;
default:
return AVERROR(EINVAL);
}
link->time_base = c->time_base;
+ link->frame_rate = c->frame_rate;
return 0;
}
@@ -419,18 +487,16 @@ static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
AVFrame *frame;
- int ret = 0;
if (!av_fifo_size(c->fifo)) {
if (c->eof)
return AVERROR_EOF;
+ c->nb_failed_requests++;
return AVERROR(EAGAIN);
}
av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL);
- ff_filter_frame(link, frame);
-
- return ret;
+ return ff_filter_frame(link, frame);
}
static int poll_frame(AVFilterLink *link)
@@ -457,7 +523,6 @@ AVFilter ff_vsrc_buffer = {
.name = "buffer",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
- .priv_class = &buffer_class,
.query_formats = query_formats,
.init = init_video,
@@ -465,6 +530,7 @@ AVFilter ff_vsrc_buffer = {
.inputs = NULL,
.outputs = avfilter_vsrc_buffer_outputs,
+ .priv_class = &buffer_class,
};
static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
@@ -482,7 +548,6 @@ AVFilter ff_asrc_abuffer = {
.name = "abuffer",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
- .priv_class = &abuffer_class,
.query_formats = query_formats,
.init = init_audio,
@@ -490,4 +555,5 @@ AVFilter ff_asrc_abuffer = {
.inputs = NULL,
.outputs = avfilter_asrc_abuffer_outputs,
+ .priv_class = &abuffer_class,
};
diff --git a/libavfilter/buffersrc.h b/libavfilter/buffersrc.h
index 0ca4d96..ea34c04 100644
--- a/libavfilter/buffersrc.h
+++ b/libavfilter/buffersrc.h
@@ -1,19 +1,19 @@
/*
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,6 +26,7 @@
* Memory buffer source API.
*/
+#include "libavcodec/avcodec.h"
#include "avfilter.h"
/**
@@ -34,6 +35,55 @@
* @{
*/
+enum {
+
+ /**
+ * Do not check for format changes.
+ */
+ AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
+
+#if FF_API_AVFILTERBUFFER
+ /**
+ * Ignored
+ */
+ AV_BUFFERSRC_FLAG_NO_COPY = 2,
+#endif
+
+ /**
+ * Immediately push the frame to the output.
+ */
+ AV_BUFFERSRC_FLAG_PUSH = 4,
+
+ /**
+ * Keep a reference to the frame.
+ * If the frame if reference-counted, create a new reference; otherwise
+ * copy the frame data.
+ */
+ AV_BUFFERSRC_FLAG_KEEP_REF = 8,
+
+};
+
+/**
+ * Add buffer data in picref to buffer_src.
+ *
+ * @param buffer_src pointer to a buffer source context
+ * @param picref a buffer reference, or NULL to mark EOF
+ * @param flags a combination of AV_BUFFERSRC_FLAG_*
+ * @return >= 0 in case of success, a negative AVERROR code
+ * in case of failure
+ */
+int av_buffersrc_add_ref(AVFilterContext *buffer_src,
+ AVFilterBufferRef *picref, int flags);
+
+/**
+ * Get the number of failed requests.
+ *
+ * A failed request is when the request_frame method is called while no
+ * frame is present in the buffer.
+ * The number is reset when a frame is added.
+ */
+unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
+
#if FF_API_AVFILTERBUFFER
/**
* Add a buffer to a filtergraph.
@@ -58,6 +108,9 @@ int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf);
* copied.
*
* @return 0 on success, a negative AVERROR on error
+ *
+ * This function is equivalent to av_buffersrc_add_frame_flags() with the
+ * AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
@@ -75,10 +128,32 @@ int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
* @note the difference between this function and av_buffersrc_write_frame() is
* that av_buffersrc_write_frame() creates a new reference to the input frame,
* while this function takes ownership of the reference passed to it.
+ *
+ * This function is equivalent to av_buffersrc_add_frame_flags() without the
+ * AV_BUFFERSRC_FLAG_KEEP_REF flag.
*/
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
/**
+ * Add a frame to the buffer source.
+ *
+ * By default, if the frame is reference-counted, this function will take
+ * ownership of the reference(s) and reset the frame. This can be controlled
+ * using the flags.
+ *
+ * If this function returns an error, the input frame is not touched.
+ *
+ * @param buffer_src pointer to a buffer source context
+ * @param frame a frame, or NULL to mark EOF
+ * @param flags a combination of AV_BUFFERSRC_FLAG_*
+ * @return >= 0 in case of success, a negative AVERROR code
+ * in case of failure
+ */
+int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
+ AVFrame *frame, int flags);
+
+
+/**
* @}
*/
diff --git a/libavfilter/deshake.h b/libavfilter/deshake.h
new file mode 100644
index 0000000..becd6c2
--- /dev/null
+++ b/libavfilter/deshake.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DESHAKE_H
+#define AVFILTER_DESHAKE_H
+
+#include "config.h"
+#include "avfilter.h"
+#include "transform.h"
+#include "libavutil/pixelutils.h"
+#if CONFIG_OPENCL
+#include "libavutil/opencl.h"
+#endif
+
+
+enum SearchMethod {
+ EXHAUSTIVE, ///< Search all possible positions
+ SMART_EXHAUSTIVE, ///< Search most possible positions (faster)
+ SEARCH_COUNT
+};
+
+typedef struct {
+ int x; ///< Horizontal shift
+ int y; ///< Vertical shift
+} IntMotionVector;
+
+typedef struct {
+ double x; ///< Horizontal shift
+ double y; ///< Vertical shift
+} MotionVector;
+
+typedef struct {
+ MotionVector vec; ///< Motion vector
+ double angle; ///< Angle of rotation
+ double zoom; ///< Zoom percentage
+} Transform;
+
+#if CONFIG_OPENCL
+
+typedef struct {
+ cl_command_queue command_queue;
+ cl_program program;
+ cl_kernel kernel_luma;
+ cl_kernel kernel_chroma;
+ int in_plane_size[8];
+ int out_plane_size[8];
+ int plane_num;
+ cl_mem cl_inbuf;
+ size_t cl_inbuf_size;
+ cl_mem cl_outbuf;
+ size_t cl_outbuf_size;
+} DeshakeOpenclContext;
+
+#endif
+
+#define MAX_R 64
+
+typedef struct {
+ const AVClass *class;
+ int counts[2*MAX_R+1][2*MAX_R+1]; /// < Scratch buffer for motion search
+ double *angles; ///< Scratch buffer for block angles
+ unsigned angles_size;
+ AVFrame *ref; ///< Previous frame
+ int rx; ///< Maximum horizontal shift
+ int ry; ///< Maximum vertical shift
+ int edge; ///< Edge fill method
+ int blocksize; ///< Size of blocks to compare
+ int contrast; ///< Contrast threshold
+ int search; ///< Motion search method
+ av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function
+ Transform last; ///< Transform from last frame
+ int refcount; ///< Number of reference frames (defines averaging window)
+ FILE *fp;
+ Transform avg;
+ int cw; ///< Crop motion search to this box
+ int ch;
+ int cx;
+ int cy;
+ char *filename; ///< Motion search detailed log filename
+ int opencl;
+#if CONFIG_OPENCL
+ DeshakeOpenclContext opencl_ctx;
+#endif
+ int (* transform)(AVFilterContext *ctx, int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv, enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out);
+} DeshakeContext;
+
+#endif /* AVFILTER_DESHAKE_H */
diff --git a/libavfilter/deshake_opencl.c b/libavfilter/deshake_opencl.c
new file mode 100644
index 0000000..2821248
--- /dev/null
+++ b/libavfilter/deshake_opencl.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * transform input video
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/dict.h"
+#include "libavutil/pixdesc.h"
+#include "deshake_opencl.h"
+#include "libavutil/opencl_internal.h"
+
+#define PLANE_NUM 3
+#define ROUND_TO_16(a) (((((a) - 1)/16)+1)*16)
+
+int ff_opencl_transform(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out)
+{
+ int ret = 0;
+ cl_int status;
+ DeshakeContext *deshake = ctx->priv;
+ float4 packed_matrix_lu = {matrix_y[0], matrix_y[1], matrix_y[2], matrix_y[5]};
+ float4 packed_matrix_ch = {matrix_uv[0], matrix_uv[1], matrix_uv[2], matrix_uv[5]};
+ size_t global_worksize_lu[2] = {(size_t)ROUND_TO_16(width), (size_t)ROUND_TO_16(height)};
+ size_t global_worksize_ch[2] = {(size_t)ROUND_TO_16(cw), (size_t)(2*ROUND_TO_16(ch))};
+ size_t local_worksize[2] = {16, 16};
+ FFOpenclParam param_lu = {0};
+ FFOpenclParam param_ch = {0};
+ param_lu.ctx = param_ch.ctx = ctx;
+ param_lu.kernel = deshake->opencl_ctx.kernel_luma;
+ param_ch.kernel = deshake->opencl_ctx.kernel_chroma;
+
+ if ((unsigned int)interpolate > INTERPOLATE_BIQUADRATIC) {
+ av_log(ctx, AV_LOG_ERROR, "Selected interpolate method is invalid\n");
+ return AVERROR(EINVAL);
+ }
+ ret = avpriv_opencl_set_parameter(&param_lu,
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(packed_matrix_lu),
+ FF_OPENCL_PARAM_INFO(interpolate),
+ FF_OPENCL_PARAM_INFO(fill),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(height),
+ FF_OPENCL_PARAM_INFO(width),
+ NULL);
+ if (ret < 0)
+ return ret;
+ ret = avpriv_opencl_set_parameter(&param_ch,
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(deshake->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(packed_matrix_ch),
+ FF_OPENCL_PARAM_INFO(interpolate),
+ FF_OPENCL_PARAM_INFO(fill),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(in->linesize[1]),
+ FF_OPENCL_PARAM_INFO(out->linesize[1]),
+ FF_OPENCL_PARAM_INFO(height),
+ FF_OPENCL_PARAM_INFO(width),
+ FF_OPENCL_PARAM_INFO(ch),
+ FF_OPENCL_PARAM_INFO(cw),
+ NULL);
+ if (ret < 0)
+ return ret;
+ status = clEnqueueNDRangeKernel(deshake->opencl_ctx.command_queue,
+ deshake->opencl_ctx.kernel_luma, 2, NULL,
+ global_worksize_lu, local_worksize, 0, NULL, NULL);
+ status |= clEnqueueNDRangeKernel(deshake->opencl_ctx.command_queue,
+ deshake->opencl_ctx.kernel_chroma, 2, NULL,
+ global_worksize_ch, local_worksize, 0, NULL, NULL);
+ if (status != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
+ return AVERROR_EXTERNAL;
+ }
+ ret = av_opencl_buffer_read_image(out->data, deshake->opencl_ctx.out_plane_size,
+ deshake->opencl_ctx.plane_num, deshake->opencl_ctx.cl_outbuf,
+ deshake->opencl_ctx.cl_outbuf_size);
+ if (ret < 0)
+ return ret;
+ return ret;
+}
+
+int ff_opencl_deshake_init(AVFilterContext *ctx)
+{
+ int ret = 0;
+ DeshakeContext *deshake = ctx->priv;
+ ret = av_opencl_init(NULL);
+ if (ret < 0)
+ return ret;
+ deshake->opencl_ctx.plane_num = PLANE_NUM;
+ deshake->opencl_ctx.command_queue = av_opencl_get_command_queue();
+ if (!deshake->opencl_ctx.command_queue) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to get OpenCL command queue in filter 'deshake'\n");
+ return AVERROR(EINVAL);
+ }
+ deshake->opencl_ctx.program = av_opencl_compile("avfilter_transform", NULL);
+ if (!deshake->opencl_ctx.program) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to compile program 'avfilter_transform'\n");
+ return AVERROR(EINVAL);
+ }
+ if (!deshake->opencl_ctx.kernel_luma) {
+ deshake->opencl_ctx.kernel_luma = clCreateKernel(deshake->opencl_ctx.program,
+ "avfilter_transform_luma", &ret);
+ if (ret != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'avfilter_transform_luma'\n");
+ return AVERROR(EINVAL);
+ }
+ }
+ if (!deshake->opencl_ctx.kernel_chroma) {
+ deshake->opencl_ctx.kernel_chroma = clCreateKernel(deshake->opencl_ctx.program,
+ "avfilter_transform_chroma", &ret);
+ if (ret != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'avfilter_transform_chroma'\n");
+ return AVERROR(EINVAL);
+ }
+ }
+ return ret;
+}
+
+void ff_opencl_deshake_uninit(AVFilterContext *ctx)
+{
+ DeshakeContext *deshake = ctx->priv;
+ av_opencl_buffer_release(&deshake->opencl_ctx.cl_inbuf);
+ av_opencl_buffer_release(&deshake->opencl_ctx.cl_outbuf);
+ clReleaseKernel(deshake->opencl_ctx.kernel_luma);
+ clReleaseKernel(deshake->opencl_ctx.kernel_chroma);
+ clReleaseProgram(deshake->opencl_ctx.program);
+ deshake->opencl_ctx.command_queue = NULL;
+ av_opencl_uninit();
+}
+
+int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ int ret = 0;
+ AVFilterLink *link = ctx->inputs[0];
+ DeshakeContext *deshake = ctx->priv;
+ const int hshift = av_pix_fmt_desc_get(link->format)->log2_chroma_h;
+ int chroma_height = FF_CEIL_RSHIFT(link->h, hshift);
+
+ if ((!deshake->opencl_ctx.cl_inbuf) || (!deshake->opencl_ctx.cl_outbuf)) {
+ deshake->opencl_ctx.in_plane_size[0] = (in->linesize[0] * in->height);
+ deshake->opencl_ctx.in_plane_size[1] = (in->linesize[1] * chroma_height);
+ deshake->opencl_ctx.in_plane_size[2] = (in->linesize[2] * chroma_height);
+ deshake->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);
+ deshake->opencl_ctx.out_plane_size[1] = (out->linesize[1] * chroma_height);
+ deshake->opencl_ctx.out_plane_size[2] = (out->linesize[2] * chroma_height);
+ deshake->opencl_ctx.cl_inbuf_size = deshake->opencl_ctx.in_plane_size[0] +
+ deshake->opencl_ctx.in_plane_size[1] +
+ deshake->opencl_ctx.in_plane_size[2];
+ deshake->opencl_ctx.cl_outbuf_size = deshake->opencl_ctx.out_plane_size[0] +
+ deshake->opencl_ctx.out_plane_size[1] +
+ deshake->opencl_ctx.out_plane_size[2];
+ if (!deshake->opencl_ctx.cl_inbuf) {
+ ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_inbuf,
+ deshake->opencl_ctx.cl_inbuf_size,
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ if (!deshake->opencl_ctx.cl_outbuf) {
+ ret = av_opencl_buffer_create(&deshake->opencl_ctx.cl_outbuf,
+ deshake->opencl_ctx.cl_outbuf_size,
+ CL_MEM_READ_WRITE, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ ret = av_opencl_buffer_write_image(deshake->opencl_ctx.cl_inbuf,
+ deshake->opencl_ctx.cl_inbuf_size,
+ 0, in->data,deshake->opencl_ctx.in_plane_size,
+ deshake->opencl_ctx.plane_num);
+ if(ret < 0)
+ return ret;
+ return ret;
+}
diff --git a/libavfilter/deshake_opencl.h b/libavfilter/deshake_opencl.h
new file mode 100644
index 0000000..5b0a241
--- /dev/null
+++ b/libavfilter/deshake_opencl.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DESHAKE_OPENCL_H
+#define AVFILTER_DESHAKE_OPENCL_H
+
+#include "deshake.h"
+
+typedef struct {
+ float x;
+ float y;
+ float z;
+ float w;
+} float4;
+
+int ff_opencl_deshake_init(AVFilterContext *ctx);
+
+void ff_opencl_deshake_uninit(AVFilterContext *ctx);
+
+int ff_opencl_deshake_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+
+int ff_opencl_transform(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out);
+
+#endif /* AVFILTER_DESHAKE_OPENCL_H */
diff --git a/libavfilter/deshake_opencl_kernel.h b/libavfilter/deshake_opencl_kernel.h
new file mode 100644
index 0000000..dd45d6f
--- /dev/null
+++ b/libavfilter/deshake_opencl_kernel.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DESHAKE_OPENCL_KERNEL_H
+#define AVFILTER_DESHAKE_OPENCL_KERNEL_H
+
+#include "libavutil/opencl.h"
+
+const char *ff_kernel_deshake_opencl = AV_OPENCL_KERNEL(
+inline unsigned char pixel(global const unsigned char *src, int x, int y,
+ int w, int h,int stride, unsigned char def)
+{
+ return (x < 0 || y < 0 || x >= w || y >= h) ? def : src[x + y * stride];
+}
+
+unsigned char interpolate_nearest(float x, float y, global const unsigned char *src,
+ int width, int height, int stride, unsigned char def)
+{
+ return pixel(src, (int)(x + 0.5f), (int)(y + 0.5f), width, height, stride, def);
+}
+
+unsigned char interpolate_bilinear(float x, float y, global const unsigned char *src,
+ int width, int height, int stride, unsigned char def)
+{
+ int x_c, x_f, y_c, y_f;
+ int v1, v2, v3, v4;
+ x_f = (int)x;
+ y_f = (int)y;
+ x_c = x_f + 1;
+ y_c = y_f + 1;
+
+ if (x_f < -1 || x_f > width || y_f < -1 || y_f > height) {
+ return def;
+ } else {
+ v4 = pixel(src, x_f, y_f, width, height, stride, def);
+ v2 = pixel(src, x_c, y_f, width, height, stride, def);
+ v3 = pixel(src, x_f, y_c, width, height, stride, def);
+ v1 = pixel(src, x_c, y_c, width, height, stride, def);
+ return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) +
+ v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y)));
+ }
+}
+
+unsigned char interpolate_biquadratic(float x, float y, global const unsigned char *src,
+ int width, int height, int stride, unsigned char def)
+{
+ int x_c, x_f, y_c, y_f;
+ unsigned char v1, v2, v3, v4;
+ float f1, f2, f3, f4;
+ x_f = (int)x;
+ y_f = (int)y;
+ x_c = x_f + 1;
+ y_c = y_f + 1;
+
+ if (x_f < - 1 || x_f > width || y_f < -1 || y_f > height)
+ return def;
+ else {
+ v4 = pixel(src, x_f, y_f, width, height, stride, def);
+ v2 = pixel(src, x_c, y_f, width, height, stride, def);
+ v3 = pixel(src, x_f, y_c, width, height, stride, def);
+ v1 = pixel(src, x_c, y_c, width, height, stride, def);
+
+ f1 = 1 - sqrt((x_c - x) * (y_c - y));
+ f2 = 1 - sqrt((x_c - x) * (y - y_f));
+ f3 = 1 - sqrt((x - x_f) * (y_c - y));
+ f4 = 1 - sqrt((x - x_f) * (y - y_f));
+ return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4);
+ }
+}
+
+inline const float clipf(float a, float amin, float amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+inline int mirror(int v, int m)
+{
+ while ((unsigned)v > (unsigned)m) {
+ v = -v;
+ if (v < 0)
+ v += 2 * m;
+ }
+ return v;
+}
+
+kernel void avfilter_transform_luma(global unsigned char *src,
+ global unsigned char *dst,
+ float4 matrix,
+ int interpolate,
+ int fill,
+ int src_stride_lu,
+ int dst_stride_lu,
+ int height,
+ int width)
+{
+ int x = get_global_id(0);
+ int y = get_global_id(1);
+ int idx_dst = y * dst_stride_lu + x;
+ unsigned char def = 0;
+ float x_s = x * matrix.x + y * matrix.y + matrix.z;
+ float y_s = x * (-matrix.y) + y * matrix.x + matrix.w;
+
+ if (x < width && y < height) {
+ switch (fill) {
+ case 0: //FILL_BLANK
+ def = 0;
+ break;
+ case 1: //FILL_ORIGINAL
+ def = src[y*src_stride_lu + x];
+ break;
+ case 2: //FILL_CLAMP
+ y_s = clipf(y_s, 0, height - 1);
+ x_s = clipf(x_s, 0, width - 1);
+ def = src[(int)y_s * src_stride_lu + (int)x_s];
+ break;
+ case 3: //FILL_MIRROR
+ y_s = mirror(y_s, height - 1);
+ x_s = mirror(x_s, width - 1);
+ def = src[(int)y_s * src_stride_lu + (int)x_s];
+ break;
+ }
+ switch (interpolate) {
+ case 0: //INTERPOLATE_NEAREST
+ dst[idx_dst] = interpolate_nearest(x_s, y_s, src, width, height, src_stride_lu, def);
+ break;
+ case 1: //INTERPOLATE_BILINEAR
+ dst[idx_dst] = interpolate_bilinear(x_s, y_s, src, width, height, src_stride_lu, def);
+ break;
+ case 2: //INTERPOLATE_BIQUADRATIC
+ dst[idx_dst] = interpolate_biquadratic(x_s, y_s, src, width, height, src_stride_lu, def);
+ break;
+ default:
+ return;
+ }
+ }
+}
+
+kernel void avfilter_transform_chroma(global unsigned char *src,
+ global unsigned char *dst,
+ float4 matrix,
+ int interpolate,
+ int fill,
+ int src_stride_lu,
+ int dst_stride_lu,
+ int src_stride_ch,
+ int dst_stride_ch,
+ int height,
+ int width,
+ int ch,
+ int cw)
+{
+
+ int x = get_global_id(0);
+ int y = get_global_id(1);
+ int pad_ch = get_global_size(1)>>1;
+ global unsigned char *dst_u = dst + height * dst_stride_lu;
+ global unsigned char *src_u = src + height * src_stride_lu;
+ global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
+ global unsigned char *src_v = src_u + ch * src_stride_ch;
+ src = y < pad_ch ? src_u : src_v;
+ dst = y < pad_ch ? dst_u : dst_v;
+ y = select(y - pad_ch, y, y < pad_ch);
+ float x_s = x * matrix.x + y * matrix.y + matrix.z;
+ float y_s = x * (-matrix.y) + y * matrix.x + matrix.w;
+ int idx_dst = y * dst_stride_ch + x;
+ unsigned char def;
+
+ if (x < cw && y < ch) {
+ switch (fill) {
+ case 0: //FILL_BLANK
+ def = 0;
+ break;
+ case 1: //FILL_ORIGINAL
+ def = src[y*src_stride_ch + x];
+ break;
+ case 2: //FILL_CLAMP
+ y_s = clipf(y_s, 0, ch - 1);
+ x_s = clipf(x_s, 0, cw - 1);
+ def = src[(int)y_s * src_stride_ch + (int)x_s];
+ break;
+ case 3: //FILL_MIRROR
+ y_s = mirror(y_s, ch - 1);
+ x_s = mirror(x_s, cw - 1);
+ def = src[(int)y_s * src_stride_ch + (int)x_s];
+ break;
+ }
+ switch (interpolate) {
+ case 0: //INTERPOLATE_NEAREST
+ dst[idx_dst] = interpolate_nearest(x_s, y_s, src, cw, ch, src_stride_ch, def);
+ break;
+ case 1: //INTERPOLATE_BILINEAR
+ dst[idx_dst] = interpolate_bilinear(x_s, y_s, src, cw, ch, src_stride_ch, def);
+ break;
+ case 2: //INTERPOLATE_BIQUADRATIC
+ dst[idx_dst] = interpolate_biquadratic(x_s, y_s, src, cw, ch, src_stride_ch, def);
+ break;
+ default:
+ return;
+ }
+ }
+}
+);
+
+#endif /* AVFILTER_DESHAKE_OPENCL_KERNEL_H */
diff --git a/libavfilter/drawutils.c b/libavfilter/drawutils.c
index e837760..4437c2c 100644
--- a/libavfilter/drawutils.c
+++ b/libavfilter/drawutils.c
@@ -1,18 +1,21 @@
/*
- * This file is part of Libav.
+ * Copyright 2011 Stefano Sabatini <stefano.sabatini-lala poste it>
+ * Copyright 2012 Nicolas George <nicolas.george normalesup org>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,29 +26,49 @@
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "drawutils.h"
+#include "formats.h"
enum { RED = 0, GREEN, BLUE, ALPHA };
-int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4],
- enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
- int *is_packed_rgba, uint8_t rgba_map_ptr[4])
+int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
{
- uint8_t rgba_map[4] = {0};
- int i;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
- int hsub = pix_desc->log2_chroma_w;
-
- *is_packed_rgba = 1;
switch (pix_fmt) {
+ case AV_PIX_FMT_0RGB:
case AV_PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break;
+ case AV_PIX_FMT_0BGR:
case AV_PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break;
+ case AV_PIX_FMT_RGB48LE:
+ case AV_PIX_FMT_RGB48BE:
+ case AV_PIX_FMT_RGBA64BE:
+ case AV_PIX_FMT_RGBA64LE:
+ case AV_PIX_FMT_RGB0:
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break;
+ case AV_PIX_FMT_BGR48LE:
+ case AV_PIX_FMT_BGR48BE:
+ case AV_PIX_FMT_BGRA64BE:
+ case AV_PIX_FMT_BGRA64LE:
case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_BGR0:
case AV_PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
- default:
- *is_packed_rgba = 0;
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRP: rgba_map[GREEN] = 0; rgba_map[BLUE ] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break;
+ default: /* unsupported */
+ return AVERROR(EINVAL);
}
+ return 0;
+}
+
+int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4],
+ enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
+ int *is_packed_rgba, uint8_t rgba_map_ptr[4])
+{
+ uint8_t rgba_map[4] = {0};
+ int i;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
+ int hsub = pix_desc->log2_chroma_w;
+
+ *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0;
if (*is_packed_rgba) {
pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3;
@@ -70,7 +93,7 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t
int hsub1 = (plane == 1 || plane == 2) ? hsub : 0;
pixel_step[plane] = 1;
- line_size = (w >> hsub1) * pixel_step[plane];
+ line_size = FF_CEIL_RSHIFT(w, hsub1) * pixel_step[plane];
line[plane] = av_malloc(line_size);
memset(line[plane], dst_color[plane], line_size);
}
@@ -89,11 +112,13 @@ void ff_draw_rectangle(uint8_t *dst[4], int dst_linesize[4],
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
+ int width = FF_CEIL_RSHIFT(w, hsub1);
+ int height = FF_CEIL_RSHIFT(h, vsub1);
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
- for (i = 0; i < (h >> vsub1); i++) {
+ for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
- src[plane], (w >> hsub1) * pixelstep[plane]);
+ src[plane], width * pixelstep[plane]);
p += dst_linesize[plane];
}
}
@@ -109,12 +134,435 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
for (plane = 0; plane < 4 && dst[plane]; plane++) {
int hsub1 = plane == 1 || plane == 2 ? hsub : 0;
int vsub1 = plane == 1 || plane == 2 ? vsub : 0;
+ int width = FF_CEIL_RSHIFT(w, hsub1);
+ int height = FF_CEIL_RSHIFT(h, vsub1);
p = dst[plane] + (y >> vsub1) * dst_linesize[plane];
- for (i = 0; i < (h >> vsub1); i++) {
+ for (i = 0; i < height; i++) {
memcpy(p + (x >> hsub1) * pixelstep[plane],
- src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), (w >> hsub1) * pixelstep[plane]);
+ src[plane] + src_linesize[plane]*(i+(y2>>vsub1)), width * pixelstep[plane]);
+ p += dst_linesize[plane];
+ }
+ }
+}
+
+int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
+ const AVComponentDescriptor *c;
+ unsigned i, nb_planes = 0;
+ int pixelstep[MAX_PLANES] = { 0 };
+
+ if (!desc->name)
+ return AVERROR(EINVAL);
+ if (desc->flags & ~(AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_PSEUDOPAL | AV_PIX_FMT_FLAG_ALPHA))
+ return AVERROR(ENOSYS);
+ for (i = 0; i < desc->nb_components; i++) {
+ c = &desc->comp[i];
+ /* for now, only 8-bits formats */
+ if (c->depth_minus1 != 8 - 1)
+ return AVERROR(ENOSYS);
+ if (c->plane >= MAX_PLANES)
+ return AVERROR(ENOSYS);
+ /* strange interleaving */
+ if (pixelstep[c->plane] != 0 &&
+ pixelstep[c->plane] != c->step_minus1 + 1)
+ return AVERROR(ENOSYS);
+ pixelstep[c->plane] = c->step_minus1 + 1;
+ if (pixelstep[c->plane] >= 8)
+ return AVERROR(ENOSYS);
+ nb_planes = FFMAX(nb_planes, c->plane + 1);
+ }
+ if ((desc->log2_chroma_w || desc->log2_chroma_h) && nb_planes < 3)
+ return AVERROR(ENOSYS); /* exclude NV12 and NV21 */
+ memset(draw, 0, sizeof(*draw));
+ draw->desc = desc;
+ draw->format = format;
+ draw->nb_planes = nb_planes;
+ memcpy(draw->pixelstep, pixelstep, sizeof(draw->pixelstep));
+ draw->hsub[1] = draw->hsub[2] = draw->hsub_max = desc->log2_chroma_w;
+ draw->vsub[1] = draw->vsub[2] = draw->vsub_max = desc->log2_chroma_h;
+ for (i = 0; i < ((desc->nb_components - 1) | 1); i++)
+ draw->comp_mask[desc->comp[i].plane] |=
+ 1 << (desc->comp[i].offset_plus1 - 1);
+ return 0;
+}
+
+void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4])
+{
+ unsigned i;
+ uint8_t rgba_map[4];
+
+ if (rgba != color->rgba)
+ memcpy(color->rgba, rgba, sizeof(color->rgba));
+ if ((draw->desc->flags & AV_PIX_FMT_FLAG_RGB) &&
+ ff_fill_rgba_map(rgba_map, draw->format) >= 0) {
+ if (draw->nb_planes == 1) {
+ for (i = 0; i < 4; i++)
+ color->comp[0].u8[rgba_map[i]] = rgba[i];
+ } else {
+ for (i = 0; i < 4; i++)
+ color->comp[rgba_map[i]].u8[0] = rgba[i];
+ }
+ } else if (draw->nb_planes == 3 || draw->nb_planes == 4) {
+ /* assume YUV */
+ color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
+ color->comp[1].u8[0] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
+ color->comp[2].u8[0] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
+ color->comp[3].u8[0] = rgba[3];
+ } else if (draw->format == AV_PIX_FMT_GRAY8 || draw->format == AV_PIX_FMT_GRAY8A) {
+ color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
+ color->comp[1].u8[0] = rgba[3];
+ } else {
+ av_log(NULL, AV_LOG_WARNING,
+ "Color conversion not implemented for %s\n", draw->desc->name);
+ memset(color, 128, sizeof(*color));
+ }
+}
+
+static uint8_t *pointer_at(FFDrawContext *draw, uint8_t *data[], int linesize[],
+ int plane, int x, int y)
+{
+ return data[plane] +
+ (y >> draw->vsub[plane]) * linesize[plane] +
+ (x >> draw->hsub[plane]) * draw->pixelstep[plane];
+}
+
+void ff_copy_rectangle2(FFDrawContext *draw,
+ uint8_t *dst[], int dst_linesize[],
+ uint8_t *src[], int src_linesize[],
+ int dst_x, int dst_y, int src_x, int src_y,
+ int w, int h)
+{
+ int plane, y, wp, hp;
+ uint8_t *p, *q;
+
+ for (plane = 0; plane < draw->nb_planes; plane++) {
+ p = pointer_at(draw, src, src_linesize, plane, src_x, src_y);
+ q = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
+ wp = FF_CEIL_RSHIFT(w, draw->hsub[plane]) * draw->pixelstep[plane];
+ hp = FF_CEIL_RSHIFT(h, draw->vsub[plane]);
+ for (y = 0; y < hp; y++) {
+ memcpy(q, p, wp);
+ p += src_linesize[plane];
+ q += dst_linesize[plane];
+ }
+ }
+}
+
+void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_x, int dst_y, int w, int h)
+{
+ int plane, x, y, wp, hp;
+ uint8_t *p0, *p;
+
+ for (plane = 0; plane < draw->nb_planes; plane++) {
+ p0 = pointer_at(draw, dst, dst_linesize, plane, dst_x, dst_y);
+ wp = FF_CEIL_RSHIFT(w, draw->hsub[plane]);
+ hp = FF_CEIL_RSHIFT(h, draw->vsub[plane]);
+ if (!hp)
+ return;
+ p = p0;
+ /* copy first line from color */
+ for (x = 0; x < wp; x++) {
+ memcpy(p, color->comp[plane].u8, draw->pixelstep[plane]);
+ p += draw->pixelstep[plane];
+ }
+ wp *= draw->pixelstep[plane];
+ /* copy next lines from first line */
+ p = p0 + dst_linesize[plane];
+ for (y = 1; y < hp; y++) {
+ memcpy(p, p0, wp);
p += dst_linesize[plane];
}
}
}
+
+/**
+ * Clip interval [x; x+w[ within [0; wmax[.
+ * The resulting w may be negative if the final interval is empty.
+ * dx, if not null, return the difference between in and out value of x.
+ */
+static void clip_interval(int wmax, int *x, int *w, int *dx)
+{
+ if (dx)
+ *dx = 0;
+ if (*x < 0) {
+ if (dx)
+ *dx = -*x;
+ *w += *x;
+ *x = 0;
+ }
+ if (*x + *w > wmax)
+ *w = wmax - *x;
+}
+
+/**
+ * Decompose w pixels starting at x
+ * into start + (w starting at x) + end
+ * with x and w aligned on multiples of 1<<sub.
+ */
+static void subsampling_bounds(int sub, int *x, int *w, int *start, int *end)
+{
+ int mask = (1 << sub) - 1;
+
+ *start = (-*x) & mask;
+ *x += *start;
+ *start = FFMIN(*start, *w);
+ *w -= *start;
+ *end = *w & mask;
+ *w >>= sub;
+}
+
+static int component_used(FFDrawContext *draw, int plane, int comp)
+{
+ return (draw->comp_mask[plane] >> comp) & 1;
+}
+
+/* If alpha is in the [ 0 ; 0x1010101 ] range,
+ then alpha * value is in the [ 0 ; 0xFFFFFFFF ] range,
+ and >> 24 gives a correct rounding. */
+static void blend_line(uint8_t *dst, unsigned src, unsigned alpha,
+ int dx, int w, unsigned hsub, int left, int right)
+{
+ unsigned asrc = alpha * src;
+ unsigned tau = 0x1010101 - alpha;
+ int x;
+
+ if (left) {
+ unsigned suba = (left * alpha) >> hsub;
+ *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24;
+ dst += dx;
+ }
+ for (x = 0; x < w; x++) {
+ *dst = (*dst * tau + asrc) >> 24;
+ dst += dx;
+ }
+ if (right) {
+ unsigned suba = (right * alpha) >> hsub;
+ *dst = (*dst * (0x1010101 - suba) + src * suba) >> 24;
+ }
+}
+
+void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_w, int dst_h,
+ int x0, int y0, int w, int h)
+{
+ unsigned alpha, nb_planes, nb_comp, plane, comp;
+ int w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y;
+ uint8_t *p0, *p;
+
+ /* TODO optimize if alpha = 0xFF */
+ clip_interval(dst_w, &x0, &w, NULL);
+ clip_interval(dst_h, &y0, &h, NULL);
+ if (w <= 0 || h <= 0 || !color->rgba[3])
+ return;
+ /* 0x10203 * alpha + 2 is in the [ 2 ; 0x1010101 - 2 ] range */
+ alpha = 0x10203 * color->rgba[3] + 0x2;
+ nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */
+ for (plane = 0; plane < nb_planes; plane++) {
+ nb_comp = draw->pixelstep[plane];
+ p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0);
+ w_sub = w;
+ h_sub = h;
+ x_sub = x0;
+ y_sub = y0;
+ subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right);
+ subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom);
+ for (comp = 0; comp < nb_comp; comp++) {
+ if (!component_used(draw, plane, comp))
+ continue;
+ p = p0 + comp;
+ if (top) {
+ blend_line(p, color->comp[plane].u8[comp], alpha >> 1,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ p += dst_linesize[plane];
+ }
+ for (y = 0; y < h_sub; y++) {
+ blend_line(p, color->comp[plane].u8[comp], alpha,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ p += dst_linesize[plane];
+ }
+ if (bottom)
+ blend_line(p, color->comp[plane].u8[comp], alpha >> 1,
+ draw->pixelstep[plane], w_sub,
+ draw->hsub[plane], left, right);
+ }
+ }
+}
+
+static void blend_pixel(uint8_t *dst, unsigned src, unsigned alpha,
+ uint8_t *mask, int mask_linesize, int l2depth,
+ unsigned w, unsigned h, unsigned shift, unsigned xm0)
+{
+ unsigned xm, x, y, t = 0;
+ unsigned xmshf = 3 - l2depth;
+ unsigned xmmod = 7 >> l2depth;
+ unsigned mbits = (1 << (1 << l2depth)) - 1;
+ unsigned mmult = 255 / mbits;
+
+ for (y = 0; y < h; y++) {
+ xm = xm0;
+ for (x = 0; x < w; x++) {
+ t += ((mask[xm >> xmshf] >> ((~xm & xmmod) << l2depth)) & mbits)
+ * mmult;
+ xm++;
+ }
+ mask += mask_linesize;
+ }
+ alpha = (t >> shift) * alpha;
+ *dst = ((0x1010101 - alpha) * *dst + alpha * src) >> 24;
+}
+
+static void blend_line_hv(uint8_t *dst, int dst_delta,
+ unsigned src, unsigned alpha,
+ uint8_t *mask, int mask_linesize, int l2depth, int w,
+ unsigned hsub, unsigned vsub,
+ int xm, int left, int right, int hband)
+{
+ int x;
+
+ if (left) {
+ blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
+ left, hband, hsub + vsub, xm);
+ dst += dst_delta;
+ xm += left;
+ }
+ for (x = 0; x < w; x++) {
+ blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
+ 1 << hsub, hband, hsub + vsub, xm);
+ dst += dst_delta;
+ xm += 1 << hsub;
+ }
+ if (right)
+ blend_pixel(dst, src, alpha, mask, mask_linesize, l2depth,
+ right, hband, hsub + vsub, xm);
+}
+
+void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h,
+ uint8_t *mask, int mask_linesize, int mask_w, int mask_h,
+ int l2depth, unsigned endianness, int x0, int y0)
+{
+ unsigned alpha, nb_planes, nb_comp, plane, comp;
+ int xm0, ym0, w_sub, h_sub, x_sub, y_sub, left, right, top, bottom, y;
+ uint8_t *p0, *p, *m;
+
+ clip_interval(dst_w, &x0, &mask_w, &xm0);
+ clip_interval(dst_h, &y0, &mask_h, &ym0);
+ mask += ym0 * mask_linesize;
+ if (mask_w <= 0 || mask_h <= 0 || !color->rgba[3])
+ return;
+ /* alpha is in the [ 0 ; 0x10203 ] range,
+ alpha * mask is in the [ 0 ; 0x1010101 - 4 ] range */
+ alpha = (0x10307 * color->rgba[3] + 0x3) >> 8;
+ nb_planes = (draw->nb_planes - 1) | 1; /* eliminate alpha */
+ for (plane = 0; plane < nb_planes; plane++) {
+ nb_comp = draw->pixelstep[plane];
+ p0 = pointer_at(draw, dst, dst_linesize, plane, x0, y0);
+ w_sub = mask_w;
+ h_sub = mask_h;
+ x_sub = x0;
+ y_sub = y0;
+ subsampling_bounds(draw->hsub[plane], &x_sub, &w_sub, &left, &right);
+ subsampling_bounds(draw->vsub[plane], &y_sub, &h_sub, &top, &bottom);
+ for (comp = 0; comp < nb_comp; comp++) {
+ if (!component_used(draw, plane, comp))
+ continue;
+ p = p0 + comp;
+ m = mask;
+ if (top) {
+ blend_line_hv(p, draw->pixelstep[plane],
+ color->comp[plane].u8[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, top);
+ p += dst_linesize[plane];
+ m += top * mask_linesize;
+ }
+ for (y = 0; y < h_sub; y++) {
+ blend_line_hv(p, draw->pixelstep[plane],
+ color->comp[plane].u8[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, 1 << draw->vsub[plane]);
+ p += dst_linesize[plane];
+ m += mask_linesize << draw->vsub[plane];
+ }
+ if (bottom)
+ blend_line_hv(p, draw->pixelstep[plane],
+ color->comp[plane].u8[comp], alpha,
+ m, mask_linesize, l2depth, w_sub,
+ draw->hsub[plane], draw->vsub[plane],
+ xm0, left, right, bottom);
+ }
+ }
+}
+
+int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir,
+ int value)
+{
+ unsigned shift = sub_dir ? draw->vsub_max : draw->hsub_max;
+
+ if (!shift)
+ return value;
+ if (round_dir >= 0)
+ value += round_dir ? (1 << shift) - 1 : 1 << (shift - 1);
+ return (value >> shift) << shift;
+}
+
+AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags)
+{
+ enum AVPixelFormat i;
+ FFDrawContext draw;
+ AVFilterFormats *fmts = NULL;
+
+ for (i = 0; av_pix_fmt_desc_get(i); i++)
+ if (ff_draw_init(&draw, i, flags) >= 0)
+ ff_add_format(&fmts, i);
+ return fmts;
+}
+
+#ifdef TEST
+
+#undef printf
+
+int main(void)
+{
+ enum AVPixelFormat f;
+ const AVPixFmtDescriptor *desc;
+ FFDrawContext draw;
+ FFDrawColor color;
+ int r, i;
+
+ for (f = 0; av_pix_fmt_desc_get(f); f++) {
+ desc = av_pix_fmt_desc_get(f);
+ if (!desc->name)
+ continue;
+ printf("Testing %s...%*s", desc->name,
+ (int)(16 - strlen(desc->name)), "");
+ r = ff_draw_init(&draw, f, 0);
+ if (r < 0) {
+ char buf[128];
+ av_strerror(r, buf, sizeof(buf));
+ printf("no: %s\n", buf);
+ continue;
+ }
+ ff_draw_color(&draw, &color, (uint8_t[]) { 1, 0, 0, 1 });
+ for (i = 0; i < sizeof(color); i++)
+ if (((uint8_t *)&color)[i] != 128)
+ break;
+ if (i == sizeof(color)) {
+ printf("fallback color\n");
+ continue;
+ }
+ printf("ok\n");
+ }
+ return 0;
+}
+
+#endif
diff --git a/libavfilter/drawutils.h b/libavfilter/drawutils.h
index 73f482e..5ffffe7 100644
--- a/libavfilter/drawutils.h
+++ b/libavfilter/drawutils.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,8 +25,11 @@
*/
#include <stdint.h>
+#include "avfilter.h"
#include "libavutil/pixfmt.h"
+int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt);
+
int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w,
uint8_t dst_color[4],
enum AVPixelFormat pix_fmt, uint8_t rgba_color[4],
@@ -40,4 +43,113 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4],
uint8_t *src[4], int src_linesize[4], int pixelstep[4],
int hsub, int vsub, int x, int y, int y2, int w, int h);
+#define MAX_PLANES 4
+
+typedef struct FFDrawContext {
+ const struct AVPixFmtDescriptor *desc;
+ enum AVPixelFormat format;
+ unsigned nb_planes;
+ int pixelstep[MAX_PLANES]; /*< offset between pixels */
+ uint8_t comp_mask[MAX_PLANES]; /*< bitmask of used non-alpha components */
+ uint8_t hsub[MAX_PLANES]; /*< horizontal subsampling */
+ uint8_t vsub[MAX_PLANES]; /*< vertical subsampling */
+ uint8_t hsub_max;
+ uint8_t vsub_max;
+} FFDrawContext;
+
+typedef struct FFDrawColor {
+ uint8_t rgba[4];
+ union {
+ uint32_t u32;
+ uint16_t u16;
+ uint8_t u8[4];
+ } comp[MAX_PLANES];
+} FFDrawColor;
+
+/**
+ * Init a draw context.
+ *
+ * Only a limited number of pixel formats are supported, if format is not
+ * supported the function will return an error.
+ * No flags currently defined.
+ * @return 0 for success, < 0 for error
+ */
+int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags);
+
+/**
+ * Prepare a color.
+ */
+void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4]);
+
+/**
+ * Copy a rectangle from an image to another.
+ *
+ * The coordinates must be as even as the subsampling requires.
+ */
+void ff_copy_rectangle2(FFDrawContext *draw,
+ uint8_t *dst[], int dst_linesize[],
+ uint8_t *src[], int src_linesize[],
+ int dst_x, int dst_y, int src_x, int src_y,
+ int w, int h);
+
+/**
+ * Fill a rectangle with an uniform color.
+ *
+ * The coordinates must be as even as the subsampling requires.
+ * The color needs to be inited with ff_draw_color.
+ */
+void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_x, int dst_y, int w, int h);
+
+/**
+ * Blend a rectangle with an uniform color.
+ */
+void ff_blend_rectangle(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[],
+ int dst_w, int dst_h,
+ int x0, int y0, int w, int h);
+
+/**
+ * Blend an alpha mask with an uniform color.
+ *
+ * @param draw draw context
+ * @param color color for the overlay;
+ * @param dst destination image
+ * @param dst_linesize line stride of the destination
+ * @param dst_w width of the destination image
+ * @param dst_h height of the destination image
+ * @param mask mask
+ * @param mask_linesize line stride of the mask
+ * @param mask_w width of the mask
+ * @param mask_h height of the mask
+ * @param l2depth log2 of depth of the mask (0 for 1bpp, 3 for 8bpp)
+ * @param endianness bit order of the mask (0: MSB to the left)
+ * @param x0 horizontal position of the overlay
+ * @param y0 vertical position of the overlay
+ */
+void ff_blend_mask(FFDrawContext *draw, FFDrawColor *color,
+ uint8_t *dst[], int dst_linesize[], int dst_w, int dst_h,
+ uint8_t *mask, int mask_linesize, int mask_w, int mask_h,
+ int l2depth, unsigned endianness, int x0, int y0);
+
+/**
+ * Round a dimension according to subsampling.
+ *
+ * @param draw draw context
+ * @param sub_dir 0 for horizontal, 1 for vertical
+ * @param round_dir 0 nearest, -1 round down, +1 round up
+ * @param value value to round
+ * @return the rounded value
+ */
+int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir,
+ int value);
+
+/**
+ * Return the list of pixel formats supported by the draw functions.
+ *
+ * The flags are the same as ff_draw_init, i.e., none currently.
+ */
+AVFilterFormats *ff_draw_supported_pixel_formats(unsigned flags);
+
#endif /* AVFILTER_DRAWUTILS_H */
diff --git a/libavfilter/dualinput.c b/libavfilter/dualinput.c
new file mode 100644
index 0000000..45f6810
--- /dev/null
+++ b/libavfilter/dualinput.c
@@ -0,0 +1,83 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "dualinput.h"
+#include "libavutil/timestamp.h"
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ FFDualInputContext *s = fs->opaque;
+ AVFrame *mainpic = NULL, *secondpic = NULL;
+ int ret = 0;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &mainpic, 1)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &secondpic, 0)) < 0) {
+ av_frame_free(&mainpic);
+ return ret;
+ }
+ av_assert0(mainpic);
+ mainpic->pts = av_rescale_q(mainpic->pts, s->fs.time_base, ctx->outputs[0]->time_base);
+ if (secondpic && !ctx->is_disabled)
+ mainpic = s->process(ctx, mainpic, secondpic);
+ ret = ff_filter_frame(ctx->outputs[0], mainpic);
+ av_assert1(ret != AVERROR(EAGAIN));
+ return ret;
+}
+
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
+{
+ FFFrameSyncIn *in = s->fs.in;
+
+ ff_framesync_init(&s->fs, ctx, 2);
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+ in[0].time_base = ctx->inputs[0]->time_base;
+ in[1].time_base = ctx->inputs[1]->time_base;
+ in[0].sync = 2;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_NULL;
+ in[1].after = EXT_INFINITY;
+
+ if (s->shortest)
+ in[0].after = in[1].after = EXT_STOP;
+ if (!s->repeatlast) {
+ in[1].after = EXT_NULL;
+ in[1].sync = 0;
+ }
+
+ return ff_framesync_configure(&s->fs);
+}
+
+int ff_dualinput_filter_frame(FFDualInputContext *s,
+ AVFilterLink *inlink, AVFrame *in)
+{
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
+{
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+void ff_dualinput_uninit(FFDualInputContext *s)
+{
+ ff_framesync_uninit(&s->fs);
+}
diff --git a/libavfilter/dualinput.h b/libavfilter/dualinput.h
new file mode 100644
index 0000000..0ec0ea7
--- /dev/null
+++ b/libavfilter/dualinput.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Double input streams helper for filters
+ */
+
+#ifndef AVFILTER_DUALINPUT_H
+#define AVFILTER_DUALINPUT_H
+
+#include <stdint.h>
+#include "bufferqueue.h"
+#include "framesync.h"
+#include "internal.h"
+
+typedef struct {
+ FFFrameSync fs;
+ FFFrameSyncIn second_input; /* must be immediately after fs */
+
+ AVFrame *(*process)(AVFilterContext *ctx, AVFrame *main, const AVFrame *second);
+ int shortest; ///< terminate stream when the second input terminates
+ int repeatlast; ///< repeat last second frame
+} FFDualInputContext;
+
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s);
+int ff_dualinput_filter_frame(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
+int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink);
+void ff_dualinput_uninit(FFDualInputContext *s);
+
+#endif /* AVFILTER_DUALINPUT_H */
diff --git a/libavfilter/f_ebur128.c b/libavfilter/f_ebur128.c
new file mode 100644
index 0000000..c18ae79
--- /dev/null
+++ b/libavfilter/f_ebur128.c
@@ -0,0 +1,933 @@
+/*
+ * Copyright (c) 2012 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * EBU R.128 implementation
+ * @see http://tech.ebu.ch/loudness
+ * @see https://www.youtube.com/watch?v=iuEtQqC-Sqo "EBU R128 Introduction - Florian Camerer"
+ * @todo implement start/stop/reset through filter command injection
+ * @todo support other frequencies to avoid resampling
+ */
+
+#include <math.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/dict.h"
+#include "libavutil/xga_font_data.h"
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "libswresample/swresample.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define MAX_CHANNELS 63
+
+/* pre-filter coefficients */
+#define PRE_B0 1.53512485958697
+#define PRE_B1 -2.69169618940638
+#define PRE_B2 1.19839281085285
+#define PRE_A1 -1.69065929318241
+#define PRE_A2 0.73248077421585
+
+/* RLB-filter coefficients */
+#define RLB_B0 1.0
+#define RLB_B1 -2.0
+#define RLB_B2 1.0
+#define RLB_A1 -1.99004745483398
+#define RLB_A2 0.99007225036621
+
+#define ABS_THRES -70 ///< silence gate: we discard anything below this absolute (LUFS) threshold
+#define ABS_UP_THRES 10 ///< upper loud limit to consider (ABS_THRES being the minimum)
+#define HIST_GRAIN 100 ///< defines histogram precision
+#define HIST_SIZE ((ABS_UP_THRES - ABS_THRES) * HIST_GRAIN + 1)
+
+/**
+ * A histogram is an array of HIST_SIZE hist_entry storing all the energies
+ * recorded (with an accuracy of 1/HIST_GRAIN) of the loudnesses from ABS_THRES
+ * (at 0) to ABS_UP_THRES (at HIST_SIZE-1).
+ * This fixed-size system avoids the need of a list of energies growing
+ * infinitely over the time and is thus more scalable.
+ */
+struct hist_entry {
+ int count; ///< how many times the corresponding value occurred
+ double energy; ///< E = 10^((L + 0.691) / 10)
+ double loudness; ///< L = -0.691 + 10 * log10(E)
+};
+
+struct integrator {
+ double *cache[MAX_CHANNELS]; ///< window of filtered samples (N ms)
+ int cache_pos; ///< focus on the last added bin in the cache array
+ double sum[MAX_CHANNELS]; ///< sum of the last N ms filtered samples (cache content)
+ int filled; ///< 1 if the cache is completely filled, 0 otherwise
+ double rel_threshold; ///< relative threshold
+ double sum_kept_powers; ///< sum of the powers (weighted sums) above absolute threshold
+ int nb_kept_powers; ///< number of sum above absolute threshold
+ struct hist_entry *histogram; ///< histogram of the powers, used to compute LRA and I
+};
+
+struct rect { int x, y, w, h; };
+
+typedef struct {
+ const AVClass *class; ///< AVClass context for log and options purpose
+
+ /* peak metering */
+ int peak_mode; ///< enabled peak modes
+ double *true_peaks; ///< true peaks per channel
+ double *sample_peaks; ///< sample peaks per channel
+ double *true_peaks_per_frame; ///< true peaks in a frame per channel
+#if CONFIG_SWRESAMPLE
+ SwrContext *swr_ctx; ///< over-sampling context for true peak metering
+ double *swr_buf; ///< resampled audio data for true peak metering
+ int swr_linesize;
+#endif
+
+ /* video */
+ int do_video; ///< 1 if video output enabled, 0 otherwise
+ int w, h; ///< size of the video output
+ struct rect text; ///< rectangle for the LU legend on the left
+ struct rect graph; ///< rectangle for the main graph in the center
+ struct rect gauge; ///< rectangle for the gauge on the right
+ AVFrame *outpicref; ///< output picture reference, updated regularly
+ int meter; ///< select a EBU mode between +9 and +18
+ int scale_range; ///< the range of LU values according to the meter
+ int y_zero_lu; ///< the y value (pixel position) for 0 LU
+ int *y_line_ref; ///< y reference values for drawing the LU lines in the graph and the gauge
+
+ /* audio */
+ int nb_channels; ///< number of channels in the input
+ double *ch_weighting; ///< channel weighting mapping
+ int sample_count; ///< sample count used for refresh frequency, reset at refresh
+
+ /* Filter caches.
+ * The mult by 3 in the following is for X[i], X[i-1] and X[i-2] */
+ double x[MAX_CHANNELS * 3]; ///< 3 input samples cache for each channel
+ double y[MAX_CHANNELS * 3]; ///< 3 pre-filter samples cache for each channel
+ double z[MAX_CHANNELS * 3]; ///< 3 RLB-filter samples cache for each channel
+
+#define I400_BINS (48000 * 4 / 10)
+#define I3000_BINS (48000 * 3)
+ struct integrator i400; ///< 400ms integrator, used for Momentary loudness (M), and Integrated loudness (I)
+ struct integrator i3000; ///< 3s integrator, used for Short term loudness (S), and Loudness Range (LRA)
+
+ /* I and LRA specific */
+ double integrated_loudness; ///< integrated loudness in LUFS (I)
+ double loudness_range; ///< loudness range in LU (LRA)
+ double lra_low, lra_high; ///< low and high LRA values
+
+ /* misc */
+ int loglevel; ///< log level for frame logging
+ int metadata; ///< whether or not to inject loudness results in frames
+} EBUR128Context;
+
+enum {
+ PEAK_MODE_NONE = 0,
+ PEAK_MODE_SAMPLES_PEAKS = 1<<1,
+ PEAK_MODE_TRUE_PEAKS = 1<<2,
+};
+
+#define OFFSET(x) offsetof(EBUR128Context, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+#define V AV_OPT_FLAG_VIDEO_PARAM
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ebur128_options[] = {
+ { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, V|F },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x480"}, 0, 0, V|F },
+ { "meter", "set scale meter (+9 to +18)", OFFSET(meter), AV_OPT_TYPE_INT, {.i64 = 9}, 9, 18, V|F },
+ { "framelog", "force frame logging level", OFFSET(loglevel), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, A|V|F, "level" },
+ { "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, A|V|F, "level" },
+ { "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, INT_MIN, INT_MAX, A|V|F, "level" },
+ { "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, A|V|F },
+ { "peak", "set peak mode", OFFSET(peak_mode), AV_OPT_TYPE_FLAGS, {.i64 = PEAK_MODE_NONE}, 0, INT_MAX, A|F, "mode" },
+ { "none", "disable any peak mode", 0, AV_OPT_TYPE_CONST, {.i64 = PEAK_MODE_NONE}, INT_MIN, INT_MAX, A|F, "mode" },
+ { "sample", "enable peak-sample mode", 0, AV_OPT_TYPE_CONST, {.i64 = PEAK_MODE_SAMPLES_PEAKS}, INT_MIN, INT_MAX, A|F, "mode" },
+ { "true", "enable true-peak mode", 0, AV_OPT_TYPE_CONST, {.i64 = PEAK_MODE_TRUE_PEAKS}, INT_MIN, INT_MAX, A|F, "mode" },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(ebur128);
+
+static const uint8_t graph_colors[] = {
+ 0xdd, 0x66, 0x66, // value above 0LU non reached
+ 0x66, 0x66, 0xdd, // value below 0LU non reached
+ 0x96, 0x33, 0x33, // value above 0LU reached
+ 0x33, 0x33, 0x96, // value below 0LU reached
+ 0xdd, 0x96, 0x96, // value above 0LU line non reached
+ 0x96, 0x96, 0xdd, // value below 0LU line non reached
+ 0xdd, 0x33, 0x33, // value above 0LU line reached
+ 0x33, 0x33, 0xdd, // value below 0LU line reached
+};
+
+static const uint8_t *get_graph_color(const EBUR128Context *ebur128, int v, int y)
+{
+ const int below0 = y > ebur128->y_zero_lu;
+ const int reached = y >= v;
+ const int line = ebur128->y_line_ref[y] || y == ebur128->y_zero_lu;
+ const int colorid = 4*line + 2*reached + below0;
+ return graph_colors + 3*colorid;
+}
+
+static inline int lu_to_y(const EBUR128Context *ebur128, double v)
+{
+ v += 2 * ebur128->meter; // make it in range [0;...]
+ v = av_clipf(v, 0, ebur128->scale_range); // make sure it's in the graph scale
+ v = ebur128->scale_range - v; // invert value (y=0 is on top)
+ return v * ebur128->graph.h / ebur128->scale_range; // rescale from scale range to px height
+}
+
+#define FONT8 0
+#define FONT16 1
+
+static const uint8_t font_colors[] = {
+ 0xdd, 0xdd, 0x00,
+ 0x00, 0x96, 0x96,
+};
+
+static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...)
+{
+ int i;
+ char buf[128] = {0};
+ const uint8_t *font;
+ int font_height;
+ va_list vl;
+
+ if (ftid == FONT16) font = avpriv_vga16_font, font_height = 16;
+ else if (ftid == FONT8) font = avpriv_cga_font, font_height = 8;
+ else return;
+
+ va_start(vl, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, vl);
+ va_end(vl);
+
+ for (i = 0; buf[i]; i++) {
+ int char_y, mask;
+ uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*3;
+
+ for (char_y = 0; char_y < font_height; char_y++) {
+ for (mask = 0x80; mask; mask >>= 1) {
+ if (font[buf[i] * font_height + char_y] & mask)
+ memcpy(p, color, 3);
+ else
+ memcpy(p, "\x00\x00\x00", 3);
+ p += 3;
+ }
+ p += pic->linesize[0] - 8*3;
+ }
+ }
+}
+
+static void drawline(AVFrame *pic, int x, int y, int len, int step)
+{
+ int i;
+ uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3;
+
+ for (i = 0; i < len; i++) {
+ memcpy(p, "\x00\xff\x00", 3);
+ p += step;
+ }
+}
+
+static int config_video_output(AVFilterLink *outlink)
+{
+ int i, x, y;
+ uint8_t *p;
+ AVFilterContext *ctx = outlink->src;
+ EBUR128Context *ebur128 = ctx->priv;
+ AVFrame *outpicref;
+
+ /* check if there is enough space to represent everything decently */
+ if (ebur128->w < 640 || ebur128->h < 480) {
+ av_log(ctx, AV_LOG_ERROR, "Video size %dx%d is too small, "
+ "minimum size is 640x480\n", ebur128->w, ebur128->h);
+ return AVERROR(EINVAL);
+ }
+ outlink->w = ebur128->w;
+ outlink->h = ebur128->h;
+
+#define PAD 8
+
+ /* configure text area position and size */
+ ebur128->text.x = PAD;
+ ebur128->text.y = 40;
+ ebur128->text.w = 3 * 8; // 3 characters
+ ebur128->text.h = ebur128->h - PAD - ebur128->text.y;
+
+ /* configure gauge position and size */
+ ebur128->gauge.w = 20;
+ ebur128->gauge.h = ebur128->text.h;
+ ebur128->gauge.x = ebur128->w - PAD - ebur128->gauge.w;
+ ebur128->gauge.y = ebur128->text.y;
+
+ /* configure graph position and size */
+ ebur128->graph.x = ebur128->text.x + ebur128->text.w + PAD;
+ ebur128->graph.y = ebur128->gauge.y;
+ ebur128->graph.w = ebur128->gauge.x - ebur128->graph.x - PAD;
+ ebur128->graph.h = ebur128->gauge.h;
+
+ /* graph and gauge share the LU-to-pixel code */
+ av_assert0(ebur128->graph.h == ebur128->gauge.h);
+
+ /* prepare the initial picref buffer */
+ av_frame_free(&ebur128->outpicref);
+ ebur128->outpicref = outpicref =
+ ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref)
+ return AVERROR(ENOMEM);
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ /* init y references values (to draw LU lines) */
+ ebur128->y_line_ref = av_calloc(ebur128->graph.h + 1, sizeof(*ebur128->y_line_ref));
+ if (!ebur128->y_line_ref)
+ return AVERROR(ENOMEM);
+
+ /* black background */
+ memset(outpicref->data[0], 0, ebur128->h * outpicref->linesize[0]);
+
+ /* draw LU legends */
+ drawtext(outpicref, PAD, PAD+16, FONT8, font_colors+3, " LU");
+ for (i = ebur128->meter; i >= -ebur128->meter * 2; i--) {
+ y = lu_to_y(ebur128, i);
+ x = PAD + (i < 10 && i > -10) * 8;
+ ebur128->y_line_ref[y] = i;
+ y -= 4; // -4 to center vertically
+ drawtext(outpicref, x, y + ebur128->graph.y, FONT8, font_colors+3,
+ "%c%d", i < 0 ? '-' : i > 0 ? '+' : ' ', FFABS(i));
+ }
+
+ /* draw graph */
+ ebur128->y_zero_lu = lu_to_y(ebur128, 0);
+ p = outpicref->data[0] + ebur128->graph.y * outpicref->linesize[0]
+ + ebur128->graph.x * 3;
+ for (y = 0; y < ebur128->graph.h; y++) {
+ const uint8_t *c = get_graph_color(ebur128, INT_MAX, y);
+
+ for (x = 0; x < ebur128->graph.w; x++)
+ memcpy(p + x*3, c, 3);
+ p += outpicref->linesize[0];
+ }
+
+ /* draw fancy rectangles around the graph and the gauge */
+#define DRAW_RECT(r) do { \
+ drawline(outpicref, r.x, r.y - 1, r.w, 3); \
+ drawline(outpicref, r.x, r.y + r.h, r.w, 3); \
+ drawline(outpicref, r.x - 1, r.y, r.h, outpicref->linesize[0]); \
+ drawline(outpicref, r.x + r.w, r.y, r.h, outpicref->linesize[0]); \
+} while (0)
+ DRAW_RECT(ebur128->graph);
+ DRAW_RECT(ebur128->gauge);
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+ return 0;
+}
+
+static int config_audio_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ EBUR128Context *ebur128 = ctx->priv;
+
+ /* Force 100ms framing in case of metadata injection: the frames must have
+ * a granularity of the window overlap to be accurately exploited.
+ * As for the true peaks mode, it just simplifies the resampling buffer
+ * allocation and the lookup in it (since sample buffers differ in size, it
+ * can be more complex to integrate in the one-sample loop of
+ * filter_frame()). */
+ if (ebur128->metadata || (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS))
+ inlink->min_samples =
+ inlink->max_samples =
+ inlink->partial_buf_size = inlink->sample_rate / 10;
+ return 0;
+}
+
+static int config_audio_output(AVFilterLink *outlink)
+{
+ int i;
+ AVFilterContext *ctx = outlink->src;
+ EBUR128Context *ebur128 = ctx->priv;
+ const int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
+
+#define BACK_MASK (AV_CH_BACK_LEFT |AV_CH_BACK_CENTER |AV_CH_BACK_RIGHT| \
+ AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_BACK_RIGHT| \
+ AV_CH_SIDE_LEFT |AV_CH_SIDE_RIGHT| \
+ AV_CH_SURROUND_DIRECT_LEFT |AV_CH_SURROUND_DIRECT_RIGHT)
+
+ ebur128->nb_channels = nb_channels;
+ ebur128->ch_weighting = av_calloc(nb_channels, sizeof(*ebur128->ch_weighting));
+ if (!ebur128->ch_weighting)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_channels; i++) {
+ /* channel weighting */
+ const uint16_t chl = av_channel_layout_extract_channel(outlink->channel_layout, i);
+ if (chl & (AV_CH_LOW_FREQUENCY|AV_CH_LOW_FREQUENCY_2)) {
+ ebur128->ch_weighting[i] = 0;
+ } else if (chl & BACK_MASK) {
+ ebur128->ch_weighting[i] = 1.41;
+ } else {
+ ebur128->ch_weighting[i] = 1.0;
+ }
+
+ if (!ebur128->ch_weighting[i])
+ continue;
+
+ /* bins buffer for the two integration window (400ms and 3s) */
+ ebur128->i400.cache[i] = av_calloc(I400_BINS, sizeof(*ebur128->i400.cache[0]));
+ ebur128->i3000.cache[i] = av_calloc(I3000_BINS, sizeof(*ebur128->i3000.cache[0]));
+ if (!ebur128->i400.cache[i] || !ebur128->i3000.cache[i])
+ return AVERROR(ENOMEM);
+ }
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+#if CONFIG_SWRESAMPLE
+ if (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS) {
+ int ret;
+
+ ebur128->swr_buf = av_malloc_array(nb_channels, 19200 * sizeof(double));
+ ebur128->true_peaks = av_calloc(nb_channels, sizeof(*ebur128->true_peaks));
+ ebur128->true_peaks_per_frame = av_calloc(nb_channels, sizeof(*ebur128->true_peaks_per_frame));
+ ebur128->swr_ctx = swr_alloc();
+ if (!ebur128->swr_buf || !ebur128->true_peaks ||
+ !ebur128->true_peaks_per_frame || !ebur128->swr_ctx)
+ return AVERROR(ENOMEM);
+
+ av_opt_set_int(ebur128->swr_ctx, "in_channel_layout", outlink->channel_layout, 0);
+ av_opt_set_int(ebur128->swr_ctx, "in_sample_rate", outlink->sample_rate, 0);
+ av_opt_set_sample_fmt(ebur128->swr_ctx, "in_sample_fmt", outlink->format, 0);
+
+ av_opt_set_int(ebur128->swr_ctx, "out_channel_layout", outlink->channel_layout, 0);
+ av_opt_set_int(ebur128->swr_ctx, "out_sample_rate", 192000, 0);
+ av_opt_set_sample_fmt(ebur128->swr_ctx, "out_sample_fmt", outlink->format, 0);
+
+ ret = swr_init(ebur128->swr_ctx);
+ if (ret < 0)
+ return ret;
+ }
+#endif
+
+ if (ebur128->peak_mode & PEAK_MODE_SAMPLES_PEAKS) {
+ ebur128->sample_peaks = av_calloc(nb_channels, sizeof(*ebur128->sample_peaks));
+ if (!ebur128->sample_peaks)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+#define ENERGY(loudness) (pow(10, ((loudness) + 0.691) / 10.))
+#define LOUDNESS(energy) (-0.691 + 10 * log10(energy))
+#define DBFS(energy) (20 * log10(energy))
+
+static struct hist_entry *get_histogram(void)
+{
+ int i;
+ struct hist_entry *h = av_calloc(HIST_SIZE, sizeof(*h));
+
+ if (!h)
+ return NULL;
+ for (i = 0; i < HIST_SIZE; i++) {
+ h[i].loudness = i / (double)HIST_GRAIN + ABS_THRES;
+ h[i].energy = ENERGY(h[i].loudness);
+ }
+ return h;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ EBUR128Context *ebur128 = ctx->priv;
+ AVFilterPad pad;
+
+ if (ebur128->loglevel != AV_LOG_INFO &&
+ ebur128->loglevel != AV_LOG_VERBOSE) {
+ if (ebur128->do_video || ebur128->metadata)
+ ebur128->loglevel = AV_LOG_VERBOSE;
+ else
+ ebur128->loglevel = AV_LOG_INFO;
+ }
+
+ if (!CONFIG_SWRESAMPLE && (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "True-peak mode requires libswresample to be performed\n");
+ return AVERROR(EINVAL);
+ }
+
+ // if meter is +9 scale, scale range is from -18 LU to +9 LU (or 3*9)
+ // if meter is +18 scale, scale range is from -36 LU to +18 LU (or 3*18)
+ ebur128->scale_range = 3 * ebur128->meter;
+
+ ebur128->i400.histogram = get_histogram();
+ ebur128->i3000.histogram = get_histogram();
+ if (!ebur128->i400.histogram || !ebur128->i3000.histogram)
+ return AVERROR(ENOMEM);
+
+ ebur128->integrated_loudness = ABS_THRES;
+ ebur128->loudness_range = 0;
+
+ /* insert output pads */
+ if (ebur128->do_video) {
+ pad = (AVFilterPad){
+ .name = av_strdup("out0"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_video_output,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_outpad(ctx, 0, &pad);
+ }
+ pad = (AVFilterPad){
+ .name = av_asprintf("out%d", ebur128->do_video),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_audio_output,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_outpad(ctx, ebur128->do_video, &pad);
+
+ /* summary */
+ av_log(ctx, AV_LOG_VERBOSE, "EBU +%d scale\n", ebur128->meter);
+
+ return 0;
+}
+
+#define HIST_POS(power) (int)(((power) - ABS_THRES) * HIST_GRAIN)
+
+/* loudness and power should be set such as loudness = -0.691 +
+ * 10*log10(power), we just avoid doing that calculus two times */
+static int gate_update(struct integrator *integ, double power,
+ double loudness, int gate_thres)
+{
+ int ipower;
+ double relative_threshold;
+ int gate_hist_pos;
+
+ /* update powers histograms by incrementing current power count */
+ ipower = av_clip(HIST_POS(loudness), 0, HIST_SIZE - 1);
+ integ->histogram[ipower].count++;
+
+ /* compute relative threshold and get its position in the histogram */
+ integ->sum_kept_powers += power;
+ integ->nb_kept_powers++;
+ relative_threshold = integ->sum_kept_powers / integ->nb_kept_powers;
+ if (!relative_threshold)
+ relative_threshold = 1e-12;
+ integ->rel_threshold = LOUDNESS(relative_threshold) + gate_thres;
+ gate_hist_pos = av_clip(HIST_POS(integ->rel_threshold), 0, HIST_SIZE - 1);
+
+ return gate_hist_pos;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
+{
+ int i, ch, idx_insample;
+ AVFilterContext *ctx = inlink->dst;
+ EBUR128Context *ebur128 = ctx->priv;
+ const int nb_channels = ebur128->nb_channels;
+ const int nb_samples = insamples->nb_samples;
+ const double *samples = (double *)insamples->data[0];
+ AVFrame *pic = ebur128->outpicref;
+
+#if CONFIG_SWRESAMPLE
+ if (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS) {
+ const double *swr_samples = ebur128->swr_buf;
+ int ret = swr_convert(ebur128->swr_ctx, (uint8_t**)&ebur128->swr_buf, 19200,
+ (const uint8_t **)insamples->data, nb_samples);
+ if (ret < 0)
+ return ret;
+ for (ch = 0; ch < nb_channels; ch++)
+ ebur128->true_peaks_per_frame[ch] = 0.0;
+ for (idx_insample = 0; idx_insample < ret; idx_insample++) {
+ for (ch = 0; ch < nb_channels; ch++) {
+ ebur128->true_peaks[ch] = FFMAX(ebur128->true_peaks[ch], FFABS(*swr_samples));
+ ebur128->true_peaks_per_frame[ch] = FFMAX(ebur128->true_peaks_per_frame[ch],
+ FFABS(*swr_samples));
+ swr_samples++;
+ }
+ }
+ }
+#endif
+
+ for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) {
+ const int bin_id_400 = ebur128->i400.cache_pos;
+ const int bin_id_3000 = ebur128->i3000.cache_pos;
+
+#define MOVE_TO_NEXT_CACHED_ENTRY(time) do { \
+ ebur128->i##time.cache_pos++; \
+ if (ebur128->i##time.cache_pos == I##time##_BINS) { \
+ ebur128->i##time.filled = 1; \
+ ebur128->i##time.cache_pos = 0; \
+ } \
+} while (0)
+
+ MOVE_TO_NEXT_CACHED_ENTRY(400);
+ MOVE_TO_NEXT_CACHED_ENTRY(3000);
+
+ for (ch = 0; ch < nb_channels; ch++) {
+ double bin;
+
+ if (ebur128->peak_mode & PEAK_MODE_SAMPLES_PEAKS)
+ ebur128->sample_peaks[ch] = FFMAX(ebur128->sample_peaks[ch], FFABS(*samples));
+
+ ebur128->x[ch * 3] = *samples++; // set X[i]
+
+ if (!ebur128->ch_weighting[ch])
+ continue;
+
+ /* Y[i] = X[i]*b0 + X[i-1]*b1 + X[i-2]*b2 - Y[i-1]*a1 - Y[i-2]*a2 */
+#define FILTER(Y, X, name) do { \
+ double *dst = ebur128->Y + ch*3; \
+ double *src = ebur128->X + ch*3; \
+ dst[2] = dst[1]; \
+ dst[1] = dst[0]; \
+ dst[0] = src[0]*name##_B0 + src[1]*name##_B1 + src[2]*name##_B2 \
+ - dst[1]*name##_A1 - dst[2]*name##_A2; \
+} while (0)
+
+ // TODO: merge both filters in one?
+ FILTER(y, x, PRE); // apply pre-filter
+ ebur128->x[ch * 3 + 2] = ebur128->x[ch * 3 + 1];
+ ebur128->x[ch * 3 + 1] = ebur128->x[ch * 3 ];
+ FILTER(z, y, RLB); // apply RLB-filter
+
+ bin = ebur128->z[ch * 3] * ebur128->z[ch * 3];
+
+ /* add the new value, and limit the sum to the cache size (400ms or 3s)
+ * by removing the oldest one */
+ ebur128->i400.sum [ch] = ebur128->i400.sum [ch] + bin - ebur128->i400.cache [ch][bin_id_400];
+ ebur128->i3000.sum[ch] = ebur128->i3000.sum[ch] + bin - ebur128->i3000.cache[ch][bin_id_3000];
+
+ /* override old cache entry with the new value */
+ ebur128->i400.cache [ch][bin_id_400 ] = bin;
+ ebur128->i3000.cache[ch][bin_id_3000] = bin;
+ }
+
+ /* For integrated loudness, gating blocks are 400ms long with 75%
+ * overlap (see BS.1770-2 p5), so a re-computation is needed each 100ms
+ * (4800 samples at 48kHz). */
+ if (++ebur128->sample_count == 4800) {
+ double loudness_400, loudness_3000;
+ double power_400 = 1e-12, power_3000 = 1e-12;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const int64_t pts = insamples->pts +
+ av_rescale_q(idx_insample, (AVRational){ 1, inlink->sample_rate },
+ outlink->time_base);
+
+ ebur128->sample_count = 0;
+
+#define COMPUTE_LOUDNESS(m, time) do { \
+ if (ebur128->i##time.filled) { \
+ /* weighting sum of the last <time> ms */ \
+ for (ch = 0; ch < nb_channels; ch++) \
+ power_##time += ebur128->ch_weighting[ch] * ebur128->i##time.sum[ch]; \
+ power_##time /= I##time##_BINS; \
+ } \
+ loudness_##time = LOUDNESS(power_##time); \
+} while (0)
+
+ COMPUTE_LOUDNESS(M, 400);
+ COMPUTE_LOUDNESS(S, 3000);
+
+ /* Integrated loudness */
+#define I_GATE_THRES -10 // initially defined to -8 LU in the first EBU standard
+
+ if (loudness_400 >= ABS_THRES) {
+ double integrated_sum = 0;
+ int nb_integrated = 0;
+ int gate_hist_pos = gate_update(&ebur128->i400, power_400,
+ loudness_400, I_GATE_THRES);
+
+ /* compute integrated loudness by summing the histogram values
+ * above the relative threshold */
+ for (i = gate_hist_pos; i < HIST_SIZE; i++) {
+ const int nb_v = ebur128->i400.histogram[i].count;
+ nb_integrated += nb_v;
+ integrated_sum += nb_v * ebur128->i400.histogram[i].energy;
+ }
+ if (nb_integrated)
+ ebur128->integrated_loudness = LOUDNESS(integrated_sum / nb_integrated);
+ }
+
+ /* LRA */
+#define LRA_GATE_THRES -20
+#define LRA_LOWER_PRC 10
+#define LRA_HIGHER_PRC 95
+
+ /* XXX: example code in EBU 3342 is ">=" but formula in BS.1770
+ * specs is ">" */
+ if (loudness_3000 >= ABS_THRES) {
+ int nb_powers = 0;
+ int gate_hist_pos = gate_update(&ebur128->i3000, power_3000,
+ loudness_3000, LRA_GATE_THRES);
+
+ for (i = gate_hist_pos; i < HIST_SIZE; i++)
+ nb_powers += ebur128->i3000.histogram[i].count;
+ if (nb_powers) {
+ int n, nb_pow;
+
+ /* get lower loudness to consider */
+ n = 0;
+ nb_pow = LRA_LOWER_PRC * nb_powers / 100. + 0.5;
+ for (i = gate_hist_pos; i < HIST_SIZE; i++) {
+ n += ebur128->i3000.histogram[i].count;
+ if (n >= nb_pow) {
+ ebur128->lra_low = ebur128->i3000.histogram[i].loudness;
+ break;
+ }
+ }
+
+ /* get higher loudness to consider */
+ n = nb_powers;
+ nb_pow = LRA_HIGHER_PRC * nb_powers / 100. + 0.5;
+ for (i = HIST_SIZE - 1; i >= 0; i--) {
+ n -= ebur128->i3000.histogram[i].count;
+ if (n < nb_pow) {
+ ebur128->lra_high = ebur128->i3000.histogram[i].loudness;
+ break;
+ }
+ }
+
+ // XXX: show low & high on the graph?
+ ebur128->loudness_range = ebur128->lra_high - ebur128->lra_low;
+ }
+ }
+
+#define LOG_FMT "M:%6.1f S:%6.1f I:%6.1f LUFS LRA:%6.1f LU"
+
+ /* push one video frame */
+ if (ebur128->do_video) {
+ int x, y, ret;
+ uint8_t *p;
+
+ const int y_loudness_lu_graph = lu_to_y(ebur128, loudness_3000 + 23);
+ const int y_loudness_lu_gauge = lu_to_y(ebur128, loudness_400 + 23);
+
+ /* draw the graph using the short-term loudness */
+ p = pic->data[0] + ebur128->graph.y*pic->linesize[0] + ebur128->graph.x*3;
+ for (y = 0; y < ebur128->graph.h; y++) {
+ const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_graph, y);
+
+ memmove(p, p + 3, (ebur128->graph.w - 1) * 3);
+ memcpy(p + (ebur128->graph.w - 1) * 3, c, 3);
+ p += pic->linesize[0];
+ }
+
+ /* draw the gauge using the momentary loudness */
+ p = pic->data[0] + ebur128->gauge.y*pic->linesize[0] + ebur128->gauge.x*3;
+ for (y = 0; y < ebur128->gauge.h; y++) {
+ const uint8_t *c = get_graph_color(ebur128, y_loudness_lu_gauge, y);
+
+ for (x = 0; x < ebur128->gauge.w; x++)
+ memcpy(p + x*3, c, 3);
+ p += pic->linesize[0];
+ }
+
+ /* draw textual info */
+ drawtext(pic, PAD, PAD - PAD/2, FONT16, font_colors,
+ LOG_FMT " ", // padding to erase trailing characters
+ loudness_400, loudness_3000,
+ ebur128->integrated_loudness, ebur128->loudness_range);
+
+ /* set pts and push frame */
+ pic->pts = pts;
+ ret = ff_filter_frame(outlink, av_frame_clone(pic));
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ebur128->metadata) { /* happens only once per filter_frame call */
+ char metabuf[128];
+#define META_PREFIX "lavfi.r128."
+
+#define SET_META(name, var) do { \
+ snprintf(metabuf, sizeof(metabuf), "%.3f", var); \
+ av_dict_set(&insamples->metadata, name, metabuf, 0); \
+} while (0)
+
+#define SET_META_PEAK(name, ptype) do { \
+ if (ebur128->peak_mode & PEAK_MODE_ ## ptype ## _PEAKS) { \
+ char key[64]; \
+ for (ch = 0; ch < nb_channels; ch++) { \
+ snprintf(key, sizeof(key), \
+ META_PREFIX AV_STRINGIFY(name) "_peaks_ch%d", ch); \
+ SET_META(key, ebur128->name##_peaks[ch]); \
+ } \
+ } \
+} while (0)
+
+ SET_META(META_PREFIX "M", loudness_400);
+ SET_META(META_PREFIX "S", loudness_3000);
+ SET_META(META_PREFIX "I", ebur128->integrated_loudness);
+ SET_META(META_PREFIX "LRA", ebur128->loudness_range);
+ SET_META(META_PREFIX "LRA.low", ebur128->lra_low);
+ SET_META(META_PREFIX "LRA.high", ebur128->lra_high);
+
+ SET_META_PEAK(sample, SAMPLES);
+ SET_META_PEAK(true, TRUE);
+ }
+
+ av_log(ctx, ebur128->loglevel, "t: %-10s " LOG_FMT,
+ av_ts2timestr(pts, &outlink->time_base),
+ loudness_400, loudness_3000,
+ ebur128->integrated_loudness, ebur128->loudness_range);
+
+#define PRINT_PEAKS(str, sp, ptype) do { \
+ if (ebur128->peak_mode & PEAK_MODE_ ## ptype ## _PEAKS) { \
+ av_log(ctx, ebur128->loglevel, " " str ":"); \
+ for (ch = 0; ch < nb_channels; ch++) \
+ av_log(ctx, ebur128->loglevel, " %5.1f", DBFS(sp[ch])); \
+ av_log(ctx, ebur128->loglevel, " dBFS"); \
+ } \
+} while (0)
+
+ PRINT_PEAKS("SPK", ebur128->sample_peaks, SAMPLES);
+ PRINT_PEAKS("FTPK", ebur128->true_peaks_per_frame, TRUE);
+ PRINT_PEAKS("TPK", ebur128->true_peaks, TRUE);
+ av_log(ctx, ebur128->loglevel, "\n");
+ }
+ }
+
+ return ff_filter_frame(ctx->outputs[ebur128->do_video], insamples);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ EBUR128Context *ebur128 = ctx->priv;
+ AVFilterFormats *formats;
+ AVFilterChannelLayouts *layouts;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE };
+ static const int input_srate[] = {48000, -1}; // ITU-R BS.1770 provides coeff only for 48kHz
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
+
+ /* set optional output video format */
+ if (ebur128->do_video) {
+ formats = ff_make_format_list(pix_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &outlink->in_formats);
+ outlink = ctx->outputs[1];
+ }
+
+ /* set input and output audio formats
+ * Note: ff_set_common_* functions are not used because they affect all the
+ * links, and thus break the video format negotiation */
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_formats);
+ ff_formats_ref(formats, &outlink->in_formats);
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
+ ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
+
+ formats = ff_make_format_list(input_srate);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_formats_ref(formats, &inlink->out_samplerates);
+ ff_formats_ref(formats, &outlink->in_samplerates);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ EBUR128Context *ebur128 = ctx->priv;
+
+ av_log(ctx, AV_LOG_INFO, "Summary:\n\n"
+ " Integrated loudness:\n"
+ " I: %5.1f LUFS\n"
+ " Threshold: %5.1f LUFS\n\n"
+ " Loudness range:\n"
+ " LRA: %5.1f LU\n"
+ " Threshold: %5.1f LUFS\n"
+ " LRA low: %5.1f LUFS\n"
+ " LRA high: %5.1f LUFS",
+ ebur128->integrated_loudness, ebur128->i400.rel_threshold,
+ ebur128->loudness_range, ebur128->i3000.rel_threshold,
+ ebur128->lra_low, ebur128->lra_high);
+
+#define PRINT_PEAK_SUMMARY(str, sp, ptype) do { \
+ int ch; \
+ double maxpeak; \
+ maxpeak = 0.0; \
+ if (ebur128->peak_mode & PEAK_MODE_ ## ptype ## _PEAKS) { \
+ for (ch = 0; ch < ebur128->nb_channels; ch++) \
+ maxpeak = FFMAX(maxpeak, sp[ch]); \
+ av_log(ctx, AV_LOG_INFO, "\n\n " str " peak:\n" \
+ " Peak: %5.1f dBFS", \
+ DBFS(maxpeak)); \
+ } \
+} while (0)
+
+ PRINT_PEAK_SUMMARY("Sample", ebur128->sample_peaks, SAMPLES);
+ PRINT_PEAK_SUMMARY("True", ebur128->true_peaks, TRUE);
+ av_log(ctx, AV_LOG_INFO, "\n");
+
+ av_freep(&ebur128->y_line_ref);
+ av_freep(&ebur128->ch_weighting);
+ av_freep(&ebur128->true_peaks);
+ av_freep(&ebur128->sample_peaks);
+ av_freep(&ebur128->true_peaks_per_frame);
+ av_freep(&ebur128->i400.histogram);
+ av_freep(&ebur128->i3000.histogram);
+ for (i = 0; i < ebur128->nb_channels; i++) {
+ av_freep(&ebur128->i400.cache[i]);
+ av_freep(&ebur128->i3000.cache[i]);
+ }
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+ av_frame_free(&ebur128->outpicref);
+#if CONFIG_SWRESAMPLE
+ av_freep(&ebur128->swr_buf);
+ swr_free(&ebur128->swr_ctx);
+#endif
+}
+
+static const AVFilterPad ebur128_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ .config_props = config_audio_input,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ebur128 = {
+ .name = "ebur128",
+ .description = NULL_IF_CONFIG_SMALL("EBU R128 scanner."),
+ .priv_size = sizeof(EBUR128Context),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = ebur128_inputs,
+ .outputs = NULL,
+ .priv_class = &ebur128_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
diff --git a/libavfilter/f_interleave.c b/libavfilter/f_interleave.c
new file mode 100644
index 0000000..95401cf
--- /dev/null
+++ b/libavfilter/f_interleave.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio and video interleaver
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "formats.h"
+#include "internal.h"
+#include "audio.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int nb_inputs;
+ struct FFBufQueue *queues;
+} InterleaveContext;
+
+#define OFFSET(x) offsetof(InterleaveContext, x)
+
+#define DEFINE_OPTIONS(filt_name, flags_) \
+static const AVOption filt_name##_options[] = { \
+ { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
+ { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
+ { NULL } \
+}
+
+inline static int push_frame(AVFilterContext *ctx)
+{
+ InterleaveContext *s = ctx->priv;
+ AVFrame *frame;
+ int i, queue_idx = -1;
+ int64_t pts_min = INT64_MAX;
+
+ /* look for oldest frame */
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ struct FFBufQueue *q = &s->queues[i];
+
+ if (!q->available && !ctx->inputs[i]->closed)
+ return 0;
+ if (q->available) {
+ frame = ff_bufqueue_peek(q, 0);
+ if (frame->pts < pts_min) {
+ pts_min = frame->pts;
+ queue_idx = i;
+ }
+ }
+ }
+
+ /* all inputs are closed */
+ if (queue_idx < 0)
+ return AVERROR_EOF;
+
+ frame = ff_bufqueue_get(&s->queues[queue_idx]);
+ av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
+ queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
+ return ff_filter_frame(ctx->outputs[0], frame);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ InterleaveContext *s = ctx->priv;
+ unsigned in_no = FF_INLINK_IDX(inlink);
+
+ if (frame->pts == AV_NOPTS_VALUE) {
+ av_log(ctx, AV_LOG_WARNING,
+ "NOPTS value for input frame cannot be accepted, frame discarded\n");
+ av_frame_free(&frame);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* queue frame */
+ frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
+ av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
+ frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, s->queues[in_no].available);
+ ff_bufqueue_add(ctx, &s->queues[in_no], frame);
+
+ return push_frame(ctx);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ InterleaveContext *s = ctx->priv;
+ const AVFilterPad *outpad = &ctx->filter->outputs[0];
+ int i;
+
+ s->queues = av_calloc(s->nb_inputs, sizeof(s->queues[0]));
+ if (!s->queues)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad inpad = { 0 };
+
+ inpad.name = av_asprintf("input%d", i);
+ if (!inpad.name)
+ return AVERROR(ENOMEM);
+ inpad.type = outpad->type;
+ inpad.filter_frame = filter_frame;
+
+ switch (outpad->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ inpad.get_video_buffer = ff_null_get_video_buffer; break;
+ case AVMEDIA_TYPE_AUDIO:
+ inpad.get_audio_buffer = ff_null_get_audio_buffer; break;
+ default:
+ av_assert0(0);
+ }
+ ff_insert_inpad(ctx, i, &inpad);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ InterleaveContext *s = ctx->priv;
+ int i;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ ff_bufqueue_discard_all(&s->queues[i]);
+ av_freep(&s->queues[i]);
+ av_freep(&ctx->input_pads[i].name);
+ }
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink0 = ctx->inputs[0];
+ int i;
+
+ if (outlink->type == AVMEDIA_TYPE_VIDEO) {
+ outlink->time_base = AV_TIME_BASE_Q;
+ outlink->w = inlink0->w;
+ outlink->h = inlink0->h;
+ outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
+ outlink->format = inlink0->format;
+ outlink->frame_rate = (AVRational) {1, 0};
+ for (i = 1; i < ctx->nb_inputs; i++) {
+ AVFilterLink *inlink = ctx->inputs[i];
+
+ if (outlink->w != inlink->w ||
+ outlink->h != inlink->h ||
+ outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "output link parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[i].name, inlink->w, inlink->h,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ outlink->w, outlink->h,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ InterleaveContext *s = ctx->priv;
+ int i, ret;
+
+ for (i = 0; i < ctx->nb_inputs; i++) {
+ if (!s->queues[i].available && !ctx->inputs[i]->closed) {
+ ret = ff_request_frame(ctx->inputs[i]);
+ if (ret != AVERROR_EOF)
+ return ret;
+ }
+ }
+
+ return push_frame(ctx);
+}
+
+#if CONFIG_INTERLEAVE_FILTER
+
+DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(interleave);
+
+static const AVFilterPad interleave_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_interleave = {
+ .name = "interleave",
+ .description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
+ .priv_size = sizeof(InterleaveContext),
+ .init = init,
+ .uninit = uninit,
+ .outputs = interleave_outputs,
+ .priv_class = &interleave_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
+
+#endif
+
+#if CONFIG_AINTERLEAVE_FILTER
+
+DEFINE_OPTIONS(ainterleave, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(ainterleave);
+
+static const AVFilterPad ainterleave_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_ainterleave = {
+ .name = "ainterleave",
+ .description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
+ .priv_size = sizeof(InterleaveContext),
+ .init = init,
+ .uninit = uninit,
+ .outputs = ainterleave_outputs,
+ .priv_class = &ainterleave_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
+
+#endif
diff --git a/libavfilter/f_perms.c b/libavfilter/f_perms.c
new file mode 100644
index 0000000..188ce91
--- /dev/null
+++ b/libavfilter/f_perms.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/random_seed.h"
+#include "audio.h"
+#include "video.h"
+
+enum mode {
+ MODE_NONE,
+ MODE_RO,
+ MODE_RW,
+ MODE_TOGGLE,
+ MODE_RANDOM,
+ NB_MODES
+};
+
+typedef struct {
+ const AVClass *class;
+ AVLFG lfg;
+ int64_t random_seed;
+ enum mode mode;
+} PermsContext;
+
+#define OFFSET(x) offsetof(PermsContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption options[] = {
+ { "mode", "select permissions mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_NONE}, MODE_NONE, NB_MODES-1, FLAGS, "mode" },
+ { "none", "do nothing", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_NONE}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "ro", "set all output frames read-only", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RO}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "rw", "set all output frames writable", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RW}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "toggle", "switch permissions", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_TOGGLE}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "random", "set permissions randomly", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_RANDOM}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "seed", "set the seed for the random mode", OFFSET(random_seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { NULL }
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PermsContext *perms = ctx->priv;
+
+ if (perms->mode == MODE_RANDOM) {
+ uint32_t seed;
+
+ if (perms->random_seed == -1)
+ perms->random_seed = av_get_random_seed();
+ seed = perms->random_seed;
+ av_log(ctx, AV_LOG_INFO, "random seed: 0x%08x\n", seed);
+ av_lfg_init(&perms->lfg, seed);
+ }
+
+ return 0;
+}
+
+enum perm { RO, RW };
+static const char * const perm_str[2] = { "RO", "RW" };
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ int ret;
+ AVFilterContext *ctx = inlink->dst;
+ PermsContext *perms = ctx->priv;
+ AVFrame *out = frame;
+ enum perm in_perm = av_frame_is_writable(frame) ? RW : RO;
+ enum perm out_perm;
+
+ switch (perms->mode) {
+ case MODE_TOGGLE: out_perm = in_perm == RO ? RW : RO; break;
+ case MODE_RANDOM: out_perm = av_lfg_get(&perms->lfg) & 1 ? RW : RO; break;
+ case MODE_RO: out_perm = RO; break;
+ case MODE_RW: out_perm = RW; break;
+ default: out_perm = in_perm; break;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "%s -> %s%s\n",
+ perm_str[in_perm], perm_str[out_perm],
+ in_perm == out_perm ? " (no-op)" : "");
+
+ if (in_perm == RO && out_perm == RW) {
+ if ((ret = av_frame_make_writable(frame)) < 0)
+ return ret;
+ } else if (in_perm == RW && out_perm == RO) {
+ out = av_frame_clone(frame);
+ if (!out)
+ return AVERROR(ENOMEM);
+ }
+
+ ret = ff_filter_frame(ctx->outputs[0], out);
+
+ if (in_perm == RW && out_perm == RO)
+ av_frame_free(&frame);
+ return ret;
+}
+
+#if CONFIG_APERMS_FILTER
+
+#define aperms_options options
+AVFILTER_DEFINE_CLASS(aperms);
+
+static const AVFilterPad aperms_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad aperms_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aperms = {
+ .name = "aperms",
+ .description = NULL_IF_CONFIG_SMALL("Set permissions for the output audio frame."),
+ .init = init,
+ .priv_size = sizeof(PermsContext),
+ .inputs = aperms_inputs,
+ .outputs = aperms_outputs,
+ .priv_class = &aperms_class,
+};
+#endif /* CONFIG_APERMS_FILTER */
+
+#if CONFIG_PERMS_FILTER
+
+#define perms_options options
+AVFILTER_DEFINE_CLASS(perms);
+
+static const AVFilterPad perms_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad perms_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_perms = {
+ .name = "perms",
+ .description = NULL_IF_CONFIG_SMALL("Set permissions for the output video frame."),
+ .init = init,
+ .priv_size = sizeof(PermsContext),
+ .inputs = perms_inputs,
+ .outputs = perms_outputs,
+ .priv_class = &perms_class,
+};
+#endif /* CONFIG_PERMS_FILTER */
diff --git a/libavfilter/f_select.c b/libavfilter/f_select.c
new file mode 100644
index 0000000..546a940
--- /dev/null
+++ b/libavfilter/f_select.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * filter for selecting which frame passes in the filterchain
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/fifo.h"
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixelutils.h"
+#include "avfilter.h"
+#include "audio.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+static const char *const var_names[] = {
+ "TB", ///< timebase
+
+ "pts", ///< original pts in the file of the frame
+ "start_pts", ///< first PTS in the stream, expressed in TB units
+ "prev_pts", ///< previous frame PTS
+ "prev_selected_pts", ///< previous selected frame PTS
+
+ "t", ///< timestamp expressed in seconds
+ "start_t", ///< first PTS in the stream, expressed in seconds
+ "prev_t", ///< previous frame time
+ "prev_selected_t", ///< previously selected time
+
+ "pict_type", ///< the type of picture in the movie
+ "I",
+ "P",
+ "B",
+ "S",
+ "SI",
+ "SP",
+ "BI",
+ "PICT_TYPE_I",
+ "PICT_TYPE_P",
+ "PICT_TYPE_B",
+ "PICT_TYPE_S",
+ "PICT_TYPE_SI",
+ "PICT_TYPE_SP",
+ "PICT_TYPE_BI",
+
+ "interlace_type", ///< the frame interlace type
+ "PROGRESSIVE",
+ "TOPFIRST",
+ "BOTTOMFIRST",
+
+ "consumed_samples_n",///< number of samples consumed by the filter (only audio)
+ "samples_n", ///< number of samples in the current frame (only audio)
+ "sample_rate", ///< sample rate (only audio)
+
+ "n", ///< frame number (starting from zero)
+ "selected_n", ///< selected frame number (starting from zero)
+ "prev_selected_n", ///< number of the last selected frame
+
+ "key", ///< tell if the frame is a key frame
+ "pos", ///< original position in the file of the frame
+
+ "scene",
+
+ NULL
+};
+
+enum var_name {
+ VAR_TB,
+
+ VAR_PTS,
+ VAR_START_PTS,
+ VAR_PREV_PTS,
+ VAR_PREV_SELECTED_PTS,
+
+ VAR_T,
+ VAR_START_T,
+ VAR_PREV_T,
+ VAR_PREV_SELECTED_T,
+
+ VAR_PICT_TYPE,
+ VAR_I,
+ VAR_P,
+ VAR_B,
+ VAR_S,
+ VAR_SI,
+ VAR_SP,
+ VAR_BI,
+ VAR_PICT_TYPE_I,
+ VAR_PICT_TYPE_P,
+ VAR_PICT_TYPE_B,
+ VAR_PICT_TYPE_S,
+ VAR_PICT_TYPE_SI,
+ VAR_PICT_TYPE_SP,
+ VAR_PICT_TYPE_BI,
+
+ VAR_INTERLACE_TYPE,
+ VAR_INTERLACE_TYPE_P,
+ VAR_INTERLACE_TYPE_T,
+ VAR_INTERLACE_TYPE_B,
+
+ VAR_CONSUMED_SAMPLES_N,
+ VAR_SAMPLES_N,
+ VAR_SAMPLE_RATE,
+
+ VAR_N,
+ VAR_SELECTED_N,
+ VAR_PREV_SELECTED_N,
+
+ VAR_KEY,
+ VAR_POS,
+
+ VAR_SCENE,
+
+ VAR_VARS_NB
+};
+
+typedef struct SelectContext {
+ const AVClass *class;
+ char *expr_str;
+ AVExpr *expr;
+ double var_values[VAR_VARS_NB];
+ int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
+ av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
+ double prev_mafd; ///< previous MAFD (scene detect only)
+ AVFrame *prev_picref; ///< previous frame (scene detect only)
+ double select;
+ int select_out; ///< mark the selected output pad index
+ int nb_outputs;
+} SelectContext;
+
+#define OFFSET(x) offsetof(SelectContext, x)
+#define DEFINE_OPTIONS(filt_name, FLAGS) \
+static const AVOption filt_name##_options[] = { \
+ { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
+ { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
+ { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
+ { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
+ { NULL } \
+}
+
+static int request_frame(AVFilterLink *outlink);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+ int i, ret;
+
+ if ((ret = av_expr_parse(&select->expr, select->expr_str,
+ var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
+ select->expr_str);
+ return ret;
+ }
+ select->do_scene_detect = !!strstr(select->expr_str, "scene");
+
+ for (i = 0; i < select->nb_outputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.name = av_asprintf("output%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.type = ctx->filter->inputs[0].type;
+ pad.request_frame = request_frame;
+ ff_insert_outpad(ctx, i, &pad);
+ }
+
+ return 0;
+}
+
+#define INTERLACE_TYPE_P 0
+#define INTERLACE_TYPE_T 1
+#define INTERLACE_TYPE_B 2
+
+static int config_input(AVFilterLink *inlink)
+{
+ SelectContext *select = inlink->dst->priv;
+
+ select->var_values[VAR_N] = 0.0;
+ select->var_values[VAR_SELECTED_N] = 0.0;
+
+ select->var_values[VAR_TB] = av_q2d(inlink->time_base);
+
+ select->var_values[VAR_PREV_PTS] = NAN;
+ select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
+ select->var_values[VAR_PREV_SELECTED_T] = NAN;
+ select->var_values[VAR_PREV_T] = NAN;
+ select->var_values[VAR_START_PTS] = NAN;
+ select->var_values[VAR_START_T] = NAN;
+
+ select->var_values[VAR_I] = AV_PICTURE_TYPE_I;
+ select->var_values[VAR_P] = AV_PICTURE_TYPE_P;
+ select->var_values[VAR_B] = AV_PICTURE_TYPE_B;
+ select->var_values[VAR_SI] = AV_PICTURE_TYPE_SI;
+ select->var_values[VAR_SP] = AV_PICTURE_TYPE_SP;
+ select->var_values[VAR_BI] = AV_PICTURE_TYPE_BI;
+ select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
+ select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
+ select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
+ select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
+ select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
+ select->var_values[VAR_PICT_TYPE_BI] = AV_PICTURE_TYPE_BI;
+
+ select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
+ select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
+ select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;
+
+ select->var_values[VAR_PICT_TYPE] = NAN;
+ select->var_values[VAR_INTERLACE_TYPE] = NAN;
+ select->var_values[VAR_SCENE] = NAN;
+ select->var_values[VAR_CONSUMED_SAMPLES_N] = NAN;
+ select->var_values[VAR_SAMPLES_N] = NAN;
+
+ select->var_values[VAR_SAMPLE_RATE] =
+ inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
+
+ if (select->do_scene_detect) {
+ select->sad = av_pixelutils_get_sad_fn(3, 3, 2, select); // 8x8 both sources aligned
+ if (!select->sad)
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
+{
+ double ret = 0;
+ SelectContext *select = ctx->priv;
+ AVFrame *prev_picref = select->prev_picref;
+
+ if (prev_picref &&
+ frame->height == prev_picref->height &&
+ frame->width == prev_picref->width) {
+ int x, y, nb_sad = 0;
+ int64_t sad = 0;
+ double mafd, diff;
+ uint8_t *p1 = frame->data[0];
+ uint8_t *p2 = prev_picref->data[0];
+ const int p1_linesize = frame->linesize[0];
+ const int p2_linesize = prev_picref->linesize[0];
+
+ for (y = 0; y < frame->height - 7; y += 8) {
+ for (x = 0; x < frame->width*3 - 7; x += 8) {
+ sad += select->sad(p1 + x, p1_linesize, p2 + x, p2_linesize);
+ nb_sad += 8 * 8;
+ }
+ p1 += 8 * p1_linesize;
+ p2 += 8 * p2_linesize;
+ }
+ emms_c();
+ mafd = nb_sad ? (double)sad / nb_sad : 0;
+ diff = fabs(mafd - select->prev_mafd);
+ ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
+ select->prev_mafd = mafd;
+ av_frame_free(&prev_picref);
+ }
+ select->prev_picref = av_frame_clone(frame);
+ return ret;
+}
+
+#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+
+static void select_frame(AVFilterContext *ctx, AVFrame *frame)
+{
+ SelectContext *select = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ double res;
+
+ if (isnan(select->var_values[VAR_START_PTS]))
+ select->var_values[VAR_START_PTS] = TS2D(frame->pts);
+ if (isnan(select->var_values[VAR_START_T]))
+ select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
+
+ select->var_values[VAR_N ] = inlink->frame_count;
+ select->var_values[VAR_PTS] = TS2D(frame->pts);
+ select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
+ select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
+ select->var_values[VAR_KEY] = frame->key_frame;
+
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_AUDIO:
+ select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ select->var_values[VAR_INTERLACE_TYPE] =
+ !frame->interlaced_frame ? INTERLACE_TYPE_P :
+ frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
+ select->var_values[VAR_PICT_TYPE] = frame->pict_type;
+ if (select->do_scene_detect) {
+ char buf[32];
+ select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
+ // TODO: document metadata
+ snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
+ av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
+ }
+ break;
+ }
+
+ select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "n:%f pts:%f t:%f key:%d",
+ select->var_values[VAR_N],
+ select->var_values[VAR_PTS],
+ select->var_values[VAR_T],
+ frame->key_frame);
+
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
+ (!frame->interlaced_frame) ? 'P' :
+ frame->top_field_first ? 'T' : 'B',
+ av_get_picture_type_char(frame->pict_type),
+ select->var_values[VAR_SCENE]);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
+ frame->nb_samples,
+ select->var_values[VAR_CONSUMED_SAMPLES_N]);
+ break;
+ }
+
+ if (res == 0) {
+ select->select_out = -1; /* drop */
+ } else if (isnan(res) || res < 0) {
+ select->select_out = 0; /* first output */
+ } else {
+ select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
+
+ if (res) {
+ select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
+ select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
+ select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
+ select->var_values[VAR_SELECTED_N] += 1.0;
+ if (inlink->type == AVMEDIA_TYPE_AUDIO)
+ select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
+ }
+
+ select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
+ select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SelectContext *select = ctx->priv;
+
+ select_frame(ctx, frame);
+ if (select->select)
+ return ff_filter_frame(ctx->outputs[select->select_out], frame);
+
+ av_frame_free(&frame);
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SelectContext *select = ctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int out_no = FF_OUTLINK_IDX(outlink);
+
+ do {
+ int ret = ff_request_frame(inlink);
+ if (ret < 0)
+ return ret;
+ } while (select->select_out != out_no);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+ int i;
+
+ av_expr_free(select->expr);
+ select->expr = NULL;
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+
+ if (select->do_scene_detect) {
+ av_frame_free(&select->prev_picref);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+
+ if (!select->do_scene_detect) {
+ return ff_default_query_formats(ctx);
+ } else {
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ }
+ return 0;
+}
+
+#if CONFIG_ASELECT_FILTER
+
+DEFINE_OPTIONS(aselect, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(aselect);
+
+static av_cold int aselect_init(AVFilterContext *ctx)
+{
+ SelectContext *select = ctx->priv;
+ int ret;
+
+ if ((ret = init(ctx)) < 0)
+ return ret;
+
+ if (select->do_scene_detect) {
+ av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_af_aselect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_aselect = {
+ .name = "aselect",
+ .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
+ .init = aselect_init,
+ .uninit = uninit,
+ .priv_size = sizeof(SelectContext),
+ .inputs = avfilter_af_aselect_inputs,
+ .priv_class = &aselect_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif /* CONFIG_ASELECT_FILTER */
+
+#if CONFIG_SELECT_FILTER
+
+DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
+AVFILTER_DEFINE_CLASS(select);
+
+static av_cold int select_init(AVFilterContext *ctx)
+{
+ int ret;
+
+ if ((ret = init(ctx)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static const AVFilterPad avfilter_vf_select_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_select = {
+ .name = "select",
+ .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
+ .init = select_init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(SelectContext),
+ .priv_class = &select_class,
+ .inputs = avfilter_vf_select_inputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif /* CONFIG_SELECT_FILTER */
diff --git a/libavfilter/f_sendcmd.c b/libavfilter/f_sendcmd.c
new file mode 100644
index 0000000..c30f49f
--- /dev/null
+++ b/libavfilter/f_sendcmd.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * send commands filter
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/file.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "avfiltergraph.h"
+#include "audio.h"
+#include "video.h"
+
+#define COMMAND_FLAG_ENTER 1
+#define COMMAND_FLAG_LEAVE 2
+
+static inline char *make_command_flags_str(AVBPrint *pbuf, int flags)
+{
+ static const char * const flag_strings[] = { "enter", "leave" };
+ int i, is_first = 1;
+
+ av_bprint_init(pbuf, 0, AV_BPRINT_SIZE_AUTOMATIC);
+ for (i = 0; i < FF_ARRAY_ELEMS(flag_strings); i++) {
+ if (flags & 1<<i) {
+ if (!is_first)
+ av_bprint_chars(pbuf, '+', 1);
+ av_bprintf(pbuf, "%s", flag_strings[i]);
+ is_first = 0;
+ }
+ }
+
+ return pbuf->str;
+}
+
+typedef struct {
+ int flags;
+ char *target, *command, *arg;
+ int index;
+} Command;
+
+typedef struct {
+ int64_t start_ts; ///< start timestamp expressed as microseconds units
+ int64_t end_ts; ///< end timestamp expressed as microseconds units
+ int index; ///< unique index for these interval commands
+ Command *commands;
+ int nb_commands;
+ int enabled; ///< current time detected inside this interval
+} Interval;
+
+typedef struct {
+ const AVClass *class;
+ Interval *intervals;
+ int nb_intervals;
+
+ char *commands_filename;
+ char *commands_str;
+} SendCmdContext;
+
+#define OFFSET(x) offsetof(SendCmdContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption options[] = {
+ { "commands", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "c", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "filename", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "f", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+#define SPACES " \f\t\n\r"
+
+static void skip_comments(const char **buf)
+{
+ while (**buf) {
+ /* skip leading spaces */
+ *buf += strspn(*buf, SPACES);
+ if (**buf != '#')
+ break;
+
+ (*buf)++;
+
+ /* skip comment until the end of line */
+ *buf += strcspn(*buf, "\n");
+ if (**buf)
+ (*buf)++;
+ }
+}
+
+#define COMMAND_DELIMS " \f\t\n\r,;"
+
+static int parse_command(Command *cmd, int cmd_count, int interval_count,
+ const char **buf, void *log_ctx)
+{
+ int ret;
+
+ memset(cmd, 0, sizeof(Command));
+ cmd->index = cmd_count;
+
+ /* format: [FLAGS] target command arg */
+ *buf += strspn(*buf, SPACES);
+
+ /* parse flags */
+ if (**buf == '[') {
+ (*buf)++; /* skip "[" */
+
+ while (**buf) {
+ int len = strcspn(*buf, "|+]");
+
+ if (!strncmp(*buf, "enter", strlen("enter"))) cmd->flags |= COMMAND_FLAG_ENTER;
+ else if (!strncmp(*buf, "leave", strlen("leave"))) cmd->flags |= COMMAND_FLAG_LEAVE;
+ else {
+ char flag_buf[64];
+ av_strlcpy(flag_buf, *buf, sizeof(flag_buf));
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Unknown flag '%s' in interval #%d, command #%d\n",
+ flag_buf, interval_count, cmd_count);
+ return AVERROR(EINVAL);
+ }
+ *buf += len;
+ if (**buf == ']')
+ break;
+ if (!strspn(*buf, "+|")) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid flags char '%c' in interval #%d, command #%d\n",
+ **buf, interval_count, cmd_count);
+ return AVERROR(EINVAL);
+ }
+ if (**buf)
+ (*buf)++;
+ }
+
+ if (**buf != ']') {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Missing flag terminator or extraneous data found at the end of flags "
+ "in interval #%d, command #%d\n", interval_count, cmd_count);
+ return AVERROR(EINVAL);
+ }
+ (*buf)++; /* skip "]" */
+ } else {
+ cmd->flags = COMMAND_FLAG_ENTER;
+ }
+
+ *buf += strspn(*buf, SPACES);
+ cmd->target = av_get_token(buf, COMMAND_DELIMS);
+ if (!cmd->target || !cmd->target[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No target specified in interval #%d, command #%d\n",
+ interval_count, cmd_count);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ *buf += strspn(*buf, SPACES);
+ cmd->command = av_get_token(buf, COMMAND_DELIMS);
+ if (!cmd->command || !cmd->command[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No command specified in interval #%d, command #%d\n",
+ interval_count, cmd_count);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ *buf += strspn(*buf, SPACES);
+ cmd->arg = av_get_token(buf, COMMAND_DELIMS);
+
+ return 1;
+
+fail:
+ av_freep(&cmd->target);
+ av_freep(&cmd->command);
+ av_freep(&cmd->arg);
+ return ret;
+}
+
+static int parse_commands(Command **cmds, int *nb_cmds, int interval_count,
+ const char **buf, void *log_ctx)
+{
+ int cmd_count = 0;
+ int ret, n = 0;
+ AVBPrint pbuf;
+
+ *cmds = NULL;
+ *nb_cmds = 0;
+
+ while (**buf) {
+ Command cmd;
+
+ if ((ret = parse_command(&cmd, cmd_count, interval_count, buf, log_ctx)) < 0)
+ return ret;
+ cmd_count++;
+
+ /* (re)allocate commands array if required */
+ if (*nb_cmds == n) {
+ n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */
+ *cmds = av_realloc_f(*cmds, n, 2*sizeof(Command));
+ if (!*cmds) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Could not (re)allocate command array\n");
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ (*cmds)[(*nb_cmds)++] = cmd;
+
+ *buf += strspn(*buf, SPACES);
+ if (**buf && **buf != ';' && **buf != ',') {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Missing separator or extraneous data found at the end of "
+ "interval #%d, in command #%d\n",
+ interval_count, cmd_count);
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Command was parsed as: flags:[%s] target:%s command:%s arg:%s\n",
+ make_command_flags_str(&pbuf, cmd.flags), cmd.target, cmd.command, cmd.arg);
+ return AVERROR(EINVAL);
+ }
+ if (**buf == ';')
+ break;
+ if (**buf == ',')
+ (*buf)++;
+ }
+
+ return 0;
+}
+
+#define DELIMS " \f\t\n\r,;"
+
+static int parse_interval(Interval *interval, int interval_count,
+ const char **buf, void *log_ctx)
+{
+ char *intervalstr;
+ int ret;
+
+ *buf += strspn(*buf, SPACES);
+ if (!**buf)
+ return 0;
+
+ /* reset data */
+ memset(interval, 0, sizeof(Interval));
+ interval->index = interval_count;
+
+ /* format: INTERVAL COMMANDS */
+
+ /* parse interval */
+ intervalstr = av_get_token(buf, DELIMS);
+ if (intervalstr && intervalstr[0]) {
+ char *start, *end;
+
+ start = av_strtok(intervalstr, "-", &end);
+ if ((ret = av_parse_time(&interval->start_ts, start, 1)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid start time specification '%s' in interval #%d\n",
+ start, interval_count);
+ goto end;
+ }
+
+ if (end) {
+ if ((ret = av_parse_time(&interval->end_ts, end, 1)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid end time specification '%s' in interval #%d\n",
+ end, interval_count);
+ goto end;
+ }
+ } else {
+ interval->end_ts = INT64_MAX;
+ }
+ if (interval->end_ts < interval->start_ts) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid end time '%s' in interval #%d: "
+ "cannot be lesser than start time '%s'\n",
+ end, interval_count, start);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ } else {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No interval specified for interval #%d\n", interval_count);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ /* parse commands */
+ ret = parse_commands(&interval->commands, &interval->nb_commands,
+ interval_count, buf, log_ctx);
+
+end:
+ av_free(intervalstr);
+ return ret;
+}
+
+static int parse_intervals(Interval **intervals, int *nb_intervals,
+ const char *buf, void *log_ctx)
+{
+ int interval_count = 0;
+ int ret, n = 0;
+
+ *intervals = NULL;
+ *nb_intervals = 0;
+
+ while (1) {
+ Interval interval;
+
+ skip_comments(&buf);
+ if (!(*buf))
+ break;
+
+ if ((ret = parse_interval(&interval, interval_count, &buf, log_ctx)) < 0)
+ return ret;
+
+ buf += strspn(buf, SPACES);
+ if (*buf) {
+ if (*buf != ';') {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Missing terminator or extraneous data found at the end of interval #%d\n",
+ interval_count);
+ return AVERROR(EINVAL);
+ }
+ buf++; /* skip ';' */
+ }
+ interval_count++;
+
+ /* (re)allocate commands array if required */
+ if (*nb_intervals == n) {
+ n = FFMAX(16, 2*n); /* first allocation = 16, or double the number */
+ *intervals = av_realloc_f(*intervals, n, 2*sizeof(Interval));
+ if (!*intervals) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Could not (re)allocate intervals array\n");
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ (*intervals)[(*nb_intervals)++] = interval;
+ }
+
+ return 0;
+}
+
+static int cmp_intervals(const void *a, const void *b)
+{
+ const Interval *i1 = a;
+ const Interval *i2 = b;
+ int64_t ts_diff = i1->start_ts - i2->start_ts;
+ int ret;
+
+ ret = ts_diff > 0 ? 1 : ts_diff < 0 ? -1 : 0;
+ return ret == 0 ? i1->index - i2->index : ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SendCmdContext *sendcmd = ctx->priv;
+ int ret, i, j;
+
+ if (sendcmd->commands_filename && sendcmd->commands_str) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Only one of the filename or commands options must be specified\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (sendcmd->commands_filename) {
+ uint8_t *file_buf, *buf;
+ size_t file_bufsize;
+ ret = av_file_map(sendcmd->commands_filename,
+ &file_buf, &file_bufsize, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ /* create a 0-terminated string based on the read file */
+ buf = av_malloc(file_bufsize + 1);
+ if (!buf) {
+ av_file_unmap(file_buf, file_bufsize);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(buf, file_buf, file_bufsize);
+ buf[file_bufsize] = 0;
+ av_file_unmap(file_buf, file_bufsize);
+ sendcmd->commands_str = buf;
+ }
+
+ if ((ret = parse_intervals(&sendcmd->intervals, &sendcmd->nb_intervals,
+ sendcmd->commands_str, ctx)) < 0)
+ return ret;
+
+ qsort(sendcmd->intervals, sendcmd->nb_intervals, sizeof(Interval), cmp_intervals);
+
+ av_log(ctx, AV_LOG_DEBUG, "Parsed commands:\n");
+ for (i = 0; i < sendcmd->nb_intervals; i++) {
+ AVBPrint pbuf;
+ Interval *interval = &sendcmd->intervals[i];
+ av_log(ctx, AV_LOG_VERBOSE, "start_time:%f end_time:%f index:%d\n",
+ (double)interval->start_ts/1000000, (double)interval->end_ts/1000000, interval->index);
+ for (j = 0; j < interval->nb_commands; j++) {
+ Command *cmd = &interval->commands[j];
+ av_log(ctx, AV_LOG_VERBOSE,
+ " [%s] target:%s command:%s arg:%s index:%d\n",
+ make_command_flags_str(&pbuf, cmd->flags), cmd->target, cmd->command, cmd->arg, cmd->index);
+ }
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SendCmdContext *sendcmd = ctx->priv;
+ int i, j;
+
+ for (i = 0; i < sendcmd->nb_intervals; i++) {
+ Interval *interval = &sendcmd->intervals[i];
+ for (j = 0; j < interval->nb_commands; j++) {
+ Command *cmd = &interval->commands[j];
+ av_free(cmd->target);
+ av_free(cmd->command);
+ av_free(cmd->arg);
+ }
+ av_free(interval->commands);
+ }
+ av_freep(&sendcmd->intervals);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SendCmdContext *sendcmd = ctx->priv;
+ int64_t ts;
+ int i, j, ret;
+
+ if (ref->pts == AV_NOPTS_VALUE)
+ goto end;
+
+ ts = av_rescale_q(ref->pts, inlink->time_base, AV_TIME_BASE_Q);
+
+#define WITHIN_INTERVAL(ts, start_ts, end_ts) ((ts) >= (start_ts) && (ts) < (end_ts))
+
+ for (i = 0; i < sendcmd->nb_intervals; i++) {
+ Interval *interval = &sendcmd->intervals[i];
+ int flags = 0;
+
+ if (!interval->enabled && WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) {
+ flags += COMMAND_FLAG_ENTER;
+ interval->enabled = 1;
+ }
+ if (interval->enabled && !WITHIN_INTERVAL(ts, interval->start_ts, interval->end_ts)) {
+ flags += COMMAND_FLAG_LEAVE;
+ interval->enabled = 0;
+ }
+
+ if (flags) {
+ AVBPrint pbuf;
+ av_log(ctx, AV_LOG_VERBOSE,
+ "[%s] interval #%d start_ts:%f end_ts:%f ts:%f\n",
+ make_command_flags_str(&pbuf, flags), interval->index,
+ (double)interval->start_ts/1000000, (double)interval->end_ts/1000000,
+ (double)ts/1000000);
+
+ for (j = 0; flags && j < interval->nb_commands; j++) {
+ Command *cmd = &interval->commands[j];
+ char buf[1024];
+
+ if (cmd->flags & flags) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Processing command #%d target:%s command:%s arg:%s\n",
+ cmd->index, cmd->target, cmd->command, cmd->arg);
+ ret = avfilter_graph_send_command(inlink->graph,
+ cmd->target, cmd->command, cmd->arg,
+ buf, sizeof(buf),
+ AVFILTER_CMD_FLAG_ONE);
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Command reply for command #%d: ret:%s res:%s\n",
+ cmd->index, av_err2str(ret), buf);
+ }
+ }
+ }
+ }
+
+end:
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ case AVMEDIA_TYPE_AUDIO:
+ return ff_filter_frame(inlink->dst->outputs[0], ref);
+ }
+
+ return AVERROR(ENOSYS);
+}
+
+#if CONFIG_SENDCMD_FILTER
+
+#define sendcmd_options options
+AVFILTER_DEFINE_CLASS(sendcmd);
+
+static const AVFilterPad sendcmd_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad sendcmd_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_sendcmd = {
+ .name = "sendcmd",
+ .description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SendCmdContext),
+ .inputs = sendcmd_inputs,
+ .outputs = sendcmd_outputs,
+ .priv_class = &sendcmd_class,
+};
+
+#endif
+
+#if CONFIG_ASENDCMD_FILTER
+
+#define asendcmd_options options
+AVFILTER_DEFINE_CLASS(asendcmd);
+
+static const AVFilterPad asendcmd_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad asendcmd_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_asendcmd = {
+ .name = "asendcmd",
+ .description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(SendCmdContext),
+ .inputs = asendcmd_inputs,
+ .outputs = asendcmd_outputs,
+ .priv_class = &asendcmd_class,
+};
+
+#endif
diff --git a/libavfilter/f_zmq.c b/libavfilter/f_zmq.c
new file mode 100644
index 0000000..d6c3c65
--- /dev/null
+++ b/libavfilter/f_zmq.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * receive commands through libzeromq and broker them to filters
+ */
+
+#include <zmq.h>
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "avfiltergraph.h"
+#include "audio.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ void *zmq;
+ void *responder;
+ char *bind_address;
+ int command_count;
+} ZMQContext;
+
+#define OFFSET(x) offsetof(ZMQContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption options[] = {
+ { "bind_address", "set bind address", OFFSET(bind_address), AV_OPT_TYPE_STRING, {.str = "tcp://*:5555"}, 0, 0, FLAGS },
+ { "b", "set bind address", OFFSET(bind_address), AV_OPT_TYPE_STRING, {.str = "tcp://*:5555"}, 0, 0, FLAGS },
+ { NULL }
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ZMQContext *zmq = ctx->priv;
+
+ zmq->zmq = zmq_ctx_new();
+ if (!zmq->zmq) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create ZMQ context: %s\n", zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ zmq->responder = zmq_socket(zmq->zmq, ZMQ_REP);
+ if (!zmq->responder) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create ZMQ socket: %s\n", zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ if (zmq_bind(zmq->responder, zmq->bind_address) == -1) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not bind ZMQ socket to address '%s': %s\n",
+ zmq->bind_address, zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ zmq->command_count = -1;
+ return 0;
+}
+
+static void av_cold uninit(AVFilterContext *ctx)
+{
+ ZMQContext *zmq = ctx->priv;
+
+ zmq_close(zmq->responder);
+ zmq_ctx_destroy(zmq->zmq);
+}
+
+typedef struct {
+ char *target, *command, *arg;
+} Command;
+
+#define SPACES " \f\t\n\r"
+
+static int parse_command(Command *cmd, const char *command_str, void *log_ctx)
+{
+ const char **buf = &command_str;
+
+ cmd->target = av_get_token(buf, SPACES);
+ if (!cmd->target || !cmd->target[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No target specified in command '%s'\n", command_str);
+ return AVERROR(EINVAL);
+ }
+
+ cmd->command = av_get_token(buf, SPACES);
+ if (!cmd->command || !cmd->command[0]) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "No command specified in command '%s'\n", command_str);
+ return AVERROR(EINVAL);
+ }
+
+ cmd->arg = av_get_token(buf, SPACES);
+ return 0;
+}
+
+static int recv_msg(AVFilterContext *ctx, char **buf, int *buf_size)
+{
+ ZMQContext *zmq = ctx->priv;
+ zmq_msg_t msg;
+ int ret = 0;
+
+ if (zmq_msg_init(&msg) == -1) {
+ av_log(ctx, AV_LOG_WARNING,
+ "Could not initialize receive message: %s\n", zmq_strerror(errno));
+ return AVERROR_EXTERNAL;
+ }
+
+ if (zmq_msg_recv(&msg, zmq->responder, ZMQ_DONTWAIT) == -1) {
+ if (errno != EAGAIN)
+ av_log(ctx, AV_LOG_WARNING,
+ "Could not receive message: %s\n", zmq_strerror(errno));
+ ret = AVERROR_EXTERNAL;
+ goto end;
+ }
+
+ *buf_size = zmq_msg_size(&msg) + 1;
+ *buf = av_malloc(*buf_size);
+ if (!*buf) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ memcpy(*buf, zmq_msg_data(&msg), *buf_size);
+ (*buf)[*buf_size-1] = 0;
+
+end:
+ zmq_msg_close(&msg);
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ZMQContext *zmq = ctx->priv;
+
+ while (1) {
+ char cmd_buf[1024];
+ char *recv_buf, *send_buf;
+ int recv_buf_size;
+ Command cmd = {0};
+ int ret;
+
+ /* receive command */
+ if (recv_msg(ctx, &recv_buf, &recv_buf_size) < 0)
+ break;
+ zmq->command_count++;
+
+ /* parse command */
+ if (parse_command(&cmd, recv_buf, ctx) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Could not parse command #%d\n", zmq->command_count);
+ goto end;
+ }
+
+ /* process command */
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Processing command #%d target:%s command:%s arg:%s\n",
+ zmq->command_count, cmd.target, cmd.command, cmd.arg);
+ ret = avfilter_graph_send_command(inlink->graph,
+ cmd.target, cmd.command, cmd.arg,
+ cmd_buf, sizeof(cmd_buf),
+ AVFILTER_CMD_FLAG_ONE);
+ send_buf = av_asprintf("%d %s%s%s",
+ -ret, av_err2str(ret), cmd_buf[0] ? "\n" : "", cmd_buf);
+ if (!send_buf) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Sending command reply for command #%d:\n%s\n",
+ zmq->command_count, send_buf);
+ if (zmq_send(zmq->responder, send_buf, strlen(send_buf), 0) == -1)
+ av_log(ctx, AV_LOG_ERROR, "Failed to send reply for command #%d: %s\n",
+ zmq->command_count, zmq_strerror(ret));
+
+ end:
+ av_freep(&send_buf);
+ av_freep(&recv_buf);
+ recv_buf_size = 0;
+ av_freep(&cmd.target);
+ av_freep(&cmd.command);
+ av_freep(&cmd.arg);
+ }
+
+ return ff_filter_frame(ctx->outputs[0], ref);
+}
+
+#if CONFIG_ZMQ_FILTER
+
+#define zmq_options options
+AVFILTER_DEFINE_CLASS(zmq);
+
+static const AVFilterPad zmq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad zmq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_zmq = {
+ .name = "zmq",
+ .description = NULL_IF_CONFIG_SMALL("Receive commands through ZMQ and broker them to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(ZMQContext),
+ .inputs = zmq_inputs,
+ .outputs = zmq_outputs,
+ .priv_class = &zmq_class,
+};
+
+#endif
+
+#if CONFIG_AZMQ_FILTER
+
+#define azmq_options options
+AVFILTER_DEFINE_CLASS(azmq);
+
+static const AVFilterPad azmq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad azmq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+AVFilter ff_af_azmq = {
+ .name = "azmq",
+ .description = NULL_IF_CONFIG_SMALL("Receive commands through ZMQ and broker them to filters."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(ZMQContext),
+ .inputs = azmq_inputs,
+ .outputs = azmq_outputs,
+ .priv_class = &azmq_class,
+};
+
+#endif
diff --git a/libavfilter/fifo.c b/libavfilter/fifo.c
index a414585..e477cff 100644
--- a/libavfilter/fifo.c
+++ b/libavfilter/fifo.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -201,6 +201,7 @@ static int return_audio_frame(AVFilterContext *ctx)
break;
} else if (ret < 0)
return ret;
+ av_assert0(s->root.next); // If ff_request_frame() succeeded then we should have a frame
}
head = s->root.next->frame;
@@ -236,6 +237,7 @@ static int request_frame(AVFilterLink *outlink)
return return_audio_frame(outlink->src);
return ret;
}
+ av_assert0(fifo->root.next);
}
if (outlink->request_samples) {
@@ -252,7 +254,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
.filter_frame = add_to_queue,
},
{ NULL }
@@ -284,7 +285,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = add_to_queue,
},
{ NULL }
diff --git a/libavfilter/filtfmts.c b/libavfilter/filtfmts.c
index 40649c7..c1025b9 100644
--- a/libavfilter/filtfmts.c
+++ b/libavfilter/filtfmts.c
@@ -1,31 +1,72 @@
/*
* Copyright (c) 2009 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
+#include "libavutil/channel_layout.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/samplefmt.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/formats.h"
+static void print_formats(AVFilterContext *filter_ctx)
+{
+ int i, j;
+
+#define PRINT_FMTS(inout, outin, INOUT) \
+ for (i = 0; i < filter_ctx->nb_##inout##puts; i++) { \
+ if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \
+ AVFilterFormats *fmts = \
+ filter_ctx->inout##puts[i]->outin##_formats; \
+ for (j = 0; j < fmts->nb_formats; j++) \
+ if(av_get_pix_fmt_name(fmts->formats[j])) \
+ printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
+ i, avfilter_pad_get_name(filter_ctx->inout##put_pads, i), \
+ av_get_pix_fmt_name(fmts->formats[j])); \
+ } else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \
+ AVFilterFormats *fmts; \
+ AVFilterChannelLayouts *layouts; \
+ \
+ fmts = filter_ctx->inout##puts[i]->outin##_formats; \
+ for (j = 0; j < fmts->nb_formats; j++) \
+ printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
+ i, avfilter_pad_get_name(filter_ctx->inout##put_pads, i), \
+ av_get_sample_fmt_name(fmts->formats[j])); \
+ \
+ layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \
+ for (j = 0; j < layouts->nb_channel_layouts; j++) { \
+ char buf[256]; \
+ av_get_channel_layout_string(buf, sizeof(buf), -1, \
+ layouts->channel_layouts[j]); \
+ printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \
+ i, avfilter_pad_get_name(filter_ctx->inout##put_pads, i), buf); \
+ } \
+ } \
+ } \
+
+ PRINT_FMTS(in, out, IN);
+ PRINT_FMTS(out, in, OUT);
+}
+
int main(int argc, char **argv)
{
AVFilter *filter;
@@ -33,17 +74,17 @@ int main(int argc, char **argv)
AVFilterGraph *graph_ctx;
const char *filter_name;
const char *filter_args = NULL;
- int i, j;
+ int i;
av_log_set_level(AV_LOG_DEBUG);
- if (!argv[1]) {
+ if (argc < 2) {
fprintf(stderr, "Missing filter name as argument\n");
return 1;
}
filter_name = argv[1];
- if (argv[2])
+ if (argc > 2)
filter_args = argv[2];
/* allocate graph */
@@ -74,12 +115,12 @@ int main(int argc, char **argv)
/* create a link for each of the input pads */
for (i = 0; i < filter_ctx->nb_inputs; i++) {
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
- link->type = avfilter_pad_get_type(filter_ctx->filter->inputs, i);
+ link->type = avfilter_pad_get_type(filter_ctx->input_pads, i);
filter_ctx->inputs[i] = link;
}
for (i = 0; i < filter_ctx->nb_outputs; i++) {
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
- link->type = avfilter_pad_get_type(filter_ctx->filter->outputs, i);
+ link->type = avfilter_pad_get_type(filter_ctx->output_pads, i);
filter_ctx->outputs[i] = link;
}
@@ -88,23 +129,7 @@ int main(int argc, char **argv)
else
ff_default_query_formats(filter_ctx);
- /* print the supported formats in input */
- for (i = 0; i < filter_ctx->nb_inputs; i++) {
- AVFilterFormats *fmts = filter_ctx->inputs[i]->out_formats;
- for (j = 0; j < fmts->nb_formats; j++)
- printf("INPUT[%d] %s: %s\n",
- i, avfilter_pad_get_name(filter_ctx->filter->inputs, i),
- av_get_pix_fmt_name(fmts->formats[j]));
- }
-
- /* print the supported formats in output */
- for (i = 0; i < filter_ctx->nb_outputs; i++) {
- AVFilterFormats *fmts = filter_ctx->outputs[i]->in_formats;
- for (j = 0; j < fmts->nb_formats; j++)
- printf("OUTPUT[%d] %s: %s\n",
- i, avfilter_pad_get_name(filter_ctx->filter->outputs, i),
- av_get_pix_fmt_name(fmts->formats[j]));
- }
+ print_formats(filter_ctx);
avfilter_free(filter_ctx);
avfilter_graph_free(&graph_ctx);
diff --git a/libavfilter/formats.c b/libavfilter/formats.c
index 24a4fab..8160429 100644
--- a/libavfilter/formats.c
+++ b/libavfilter/formats.c
@@ -2,29 +2,35 @@
* Filter layer - format negotiation
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "internal.h"
#include "formats.h"
+#define KNOWN(l) (!FF_LAYOUT2COUNT(l)) /* for readability */
+
/**
* Add all refs from a to ret and destroy a.
*/
@@ -33,8 +39,8 @@ do { \
type ***tmp; \
int i; \
\
- if (!(tmp = av_realloc(ret->refs, \
- sizeof(*tmp) * (ret->refcount + a->refcount)))) \
+ if (!(tmp = av_realloc_array(ret->refs, ret->refcount + a->refcount, \
+ sizeof(*tmp)))) \
goto fail; \
ret->refs = tmp; \
\
@@ -60,15 +66,21 @@ do {
goto fail; \
\
if (count) { \
- if (!(ret->fmts = av_malloc(sizeof(*ret->fmts) * count))) \
+ if (!(ret->fmts = av_malloc_array(count, sizeof(*ret->fmts)))) \
goto fail; \
for (i = 0; i < a->nb; i++) \
for (j = 0; j < b->nb; j++) \
- if (a->fmts[i] == b->fmts[j]) \
+ if (a->fmts[i] == b->fmts[j]) { \
+ if(k >= FFMIN(a->nb, b->nb)){ \
+ av_log(NULL, AV_LOG_ERROR, "Duplicate formats in avfilter_merge_formats() detected\n"); \
+ av_free(ret->fmts); \
+ av_free(ret); \
+ return NULL; \
+ } \
ret->fmts[k++] = a->fmts[i]; \
- \
- ret->nb = k; \
+ } \
} \
+ ret->nb = k; \
/* check that there was at least one common format */ \
if (!ret->nb) \
goto fail; \
@@ -77,13 +89,41 @@ do {
MERGE_REF(ret, b, fmts, type, fail); \
} while (0)
-AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
+AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
+ enum AVMediaType type)
{
AVFilterFormats *ret = NULL;
+ int i, j;
+ int alpha1=0, alpha2=0;
+ int chroma1=0, chroma2=0;
if (a == b)
return a;
+ /* Do not lose chroma or alpha in merging.
+ It happens if both lists have formats with chroma (resp. alpha), but
+ the only formats in common do not have it (e.g. YUV+gray vs.
+ RGB+gray): in that case, the merging would select the gray format,
+ possibly causing a lossy conversion elsewhere in the graph.
+ To avoid that, pretend that there are no common formats to force the
+ insertion of a conversion filter. */
+ if (type == AVMEDIA_TYPE_VIDEO)
+ for (i = 0; i < a->nb_formats; i++)
+ for (j = 0; j < b->nb_formats; j++) {
+ const AVPixFmtDescriptor *adesc = av_pix_fmt_desc_get(a->formats[i]);
+ const AVPixFmtDescriptor *bdesc = av_pix_fmt_desc_get(b->formats[j]);
+ alpha2 |= adesc->flags & bdesc->flags & AV_PIX_FMT_FLAG_ALPHA;
+ chroma2|= adesc->nb_components > 1 && bdesc->nb_components > 1;
+ if (a->formats[i] == b->formats[j]) {
+ alpha1 |= adesc->flags & AV_PIX_FMT_FLAG_ALPHA;
+ chroma1|= adesc->nb_components > 1;
+ }
+ }
+
+ // If chroma or alpha can be lost through merging then do not merge
+ if (alpha2 > alpha1 || chroma2 > chroma1)
+ return NULL;
+
MERGE_FORMATS(ret, a, b, formats, nb_formats, AVFilterFormats, fail);
return ret;
@@ -127,21 +167,81 @@ AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a,
AVFilterChannelLayouts *b)
{
AVFilterChannelLayouts *ret = NULL;
+ unsigned a_all = a->all_layouts + a->all_counts;
+ unsigned b_all = b->all_layouts + b->all_counts;
+ int ret_max, ret_nb = 0, i, j, round;
if (a == b) return a;
- if (a->nb_channel_layouts && b->nb_channel_layouts) {
- MERGE_FORMATS(ret, a, b, channel_layouts, nb_channel_layouts,
- AVFilterChannelLayouts, fail);
- } else if (a->nb_channel_layouts) {
- MERGE_REF(a, b, channel_layouts, AVFilterChannelLayouts, fail);
- ret = a;
- } else {
+ /* Put the most generic set in a, to avoid doing everything twice */
+ if (a_all < b_all) {
+ FFSWAP(AVFilterChannelLayouts *, a, b);
+ FFSWAP(unsigned, a_all, b_all);
+ }
+ if (a_all) {
+ if (a_all == 1 && !b_all) {
+ /* keep only known layouts in b; works also for b_all = 1 */
+ for (i = j = 0; i < b->nb_channel_layouts; i++)
+ if (KNOWN(b->channel_layouts[i]))
+ b->channel_layouts[j++] = b->channel_layouts[i];
+ /* Not optimal: the unknown layouts of b may become known after
+ another merge. */
+ if (!j)
+ return NULL;
+ b->nb_channel_layouts = j;
+ }
MERGE_REF(b, a, channel_layouts, AVFilterChannelLayouts, fail);
- ret = b;
+ return b;
}
+ ret_max = a->nb_channel_layouts + b->nb_channel_layouts;
+ if (!(ret = av_mallocz(sizeof(*ret))) ||
+ !(ret->channel_layouts = av_malloc_array(ret_max,
+ sizeof(*ret->channel_layouts))))
+ goto fail;
+
+ /* a[known] intersect b[known] */
+ for (i = 0; i < a->nb_channel_layouts; i++) {
+ if (!KNOWN(a->channel_layouts[i]))
+ continue;
+ for (j = 0; j < b->nb_channel_layouts; j++) {
+ if (a->channel_layouts[i] == b->channel_layouts[j]) {
+ ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
+ a->channel_layouts[i] = b->channel_layouts[j] = 0;
+ }
+ }
+ }
+ /* 1st round: a[known] intersect b[generic]
+ 2nd round: a[generic] intersect b[known] */
+ for (round = 0; round < 2; round++) {
+ for (i = 0; i < a->nb_channel_layouts; i++) {
+ uint64_t fmt = a->channel_layouts[i], bfmt;
+ if (!fmt || !KNOWN(fmt))
+ continue;
+ bfmt = FF_COUNT2LAYOUT(av_get_channel_layout_nb_channels(fmt));
+ for (j = 0; j < b->nb_channel_layouts; j++)
+ if (b->channel_layouts[j] == bfmt)
+ ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
+ }
+ /* 1st round: swap to prepare 2nd round; 2nd round: put it back */
+ FFSWAP(AVFilterChannelLayouts *, a, b);
+ }
+ /* a[generic] intersect b[generic] */
+ for (i = 0; i < a->nb_channel_layouts; i++) {
+ if (KNOWN(a->channel_layouts[i]))
+ continue;
+ for (j = 0; j < b->nb_channel_layouts; j++)
+ if (a->channel_layouts[i] == b->channel_layouts[j])
+ ret->channel_layouts[ret_nb++] = a->channel_layouts[i];
+ }
+
+ ret->nb_channel_layouts = ret_nb;
+ if (!ret->nb_channel_layouts)
+ goto fail;
+ MERGE_REF(ret, a, channel_layouts, AVFilterChannelLayouts, fail);
+ MERGE_REF(ret, b, channel_layouts, AVFilterChannelLayouts, fail);
return ret;
+
fail:
if (ret) {
av_freep(&ret->refs);
@@ -155,26 +255,58 @@ int ff_fmt_is_in(int fmt, const int *fmts)
{
const int *p;
- for (p = fmts; *p != AV_PIX_FMT_NONE; p++) {
+ for (p = fmts; *p != -1; p++) {
if (fmt == *p)
return 1;
}
return 0;
}
+#define COPY_INT_LIST(list_copy, list, type) { \
+ int count = 0; \
+ if (list) \
+ for (count = 0; list[count] != -1; count++) \
+ ; \
+ list_copy = av_calloc(count+1, sizeof(type)); \
+ if (list_copy) { \
+ memcpy(list_copy, list, sizeof(type) * count); \
+ list_copy[count] = -1; \
+ } \
+}
+
+#define MAKE_FORMAT_LIST(type, field, count_field) \
+ type *formats; \
+ int count = 0; \
+ if (fmts) \
+ for (count = 0; fmts[count] != -1; count++) \
+ ; \
+ formats = av_mallocz(sizeof(*formats)); \
+ if (!formats) return NULL; \
+ formats->count_field = count; \
+ if (count) { \
+ formats->field = av_malloc_array(count, sizeof(*formats->field)); \
+ if (!formats->field) { \
+ av_free(formats); \
+ return NULL; \
+ } \
+ }
+
AVFilterFormats *ff_make_format_list(const int *fmts)
{
- AVFilterFormats *formats;
- int count;
+ MAKE_FORMAT_LIST(AVFilterFormats, formats, nb_formats);
+ while (count--)
+ formats->formats[count] = fmts[count];
- for (count = 0; fmts[count] != -1; count++)
- ;
+ return formats;
+}
- formats = av_mallocz(sizeof(*formats));
+AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts)
+{
+ MAKE_FORMAT_LIST(AVFilterChannelLayouts,
+ channel_layouts, nb_channel_layouts);
if (count)
- formats->formats = av_malloc(sizeof(*formats->formats) * count);
- formats->nb_formats = count;
- memcpy(formats->formats, fmts, sizeof(*formats->formats) * count);
+ memcpy(formats->channel_layouts, fmts,
+ sizeof(*formats->channel_layouts) * count);
return formats;
}
@@ -193,17 +325,19 @@ do { \
\
(*f)->list = fmts; \
(*f)->list[(*f)->nb++] = fmt; \
- return 0; \
} while (0)
-int ff_add_format(AVFilterFormats **avff, int fmt)
+int ff_add_format(AVFilterFormats **avff, int64_t fmt)
{
ADD_FORMAT(avff, fmt, int, formats, nb_formats);
+ return 0;
}
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
{
+ av_assert1(!(*l && (*l)->all_layouts));
ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts);
+ return 0;
}
AVFilterFormats *ff_all_formats(enum AVMediaType type)
@@ -227,12 +361,22 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type)
return ret;
}
+const int64_t avfilter_all_channel_layouts[] = {
+#include "all_channel_layouts.inc"
+ -1
+};
+
+// AVFilterFormats *avfilter_make_all_channel_layouts(void)
+// {
+// return avfilter_make_format64_list(avfilter_all_channel_layouts);
+// }
+
AVFilterFormats *ff_planar_sample_fmts(void)
{
AVFilterFormats *ret = NULL;
int fmt;
- for (fmt = 0; fmt < AV_SAMPLE_FMT_NB; fmt++)
+ for (fmt = 0; av_get_bytes_per_sample(fmt)>0; fmt++)
if (av_sample_fmt_is_planar(fmt))
ff_add_format(&ret, fmt);
@@ -248,6 +392,18 @@ AVFilterFormats *ff_all_samplerates(void)
AVFilterChannelLayouts *ff_all_channel_layouts(void)
{
AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
+ if (!ret)
+ return NULL;
+ ret->all_layouts = 1;
+ return ret;
+}
+
+AVFilterChannelLayouts *ff_all_channel_counts(void)
+{
+ AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
+ if (!ret)
+ return NULL;
+ ret->all_layouts = ret->all_counts = 1;
return ret;
}
@@ -338,13 +494,13 @@ void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
int count = 0, i; \
\
for (i = 0; i < ctx->nb_inputs; i++) { \
- if (ctx->inputs[i]) { \
+ if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \
ref(fmts, &ctx->inputs[i]->out_fmts); \
count++; \
} \
} \
for (i = 0; i < ctx->nb_outputs; i++) { \
- if (ctx->outputs[i]) { \
+ if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \
ref(fmts, &ctx->outputs[i]->in_fmts); \
count++; \
} \
@@ -382,7 +538,8 @@ void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
ff_formats_ref, formats);
}
-int ff_default_query_formats(AVFilterContext *ctx)
+static int default_query_formats_common(AVFilterContext *ctx,
+ AVFilterChannelLayouts *(layouts)(void))
{
enum AVMediaType type = ctx->inputs && ctx->inputs [0] ? ctx->inputs [0]->type :
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
@@ -390,9 +547,122 @@ int ff_default_query_formats(AVFilterContext *ctx)
ff_set_common_formats(ctx, ff_all_formats(type));
if (type == AVMEDIA_TYPE_AUDIO) {
- ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
+ ff_set_common_channel_layouts(ctx, layouts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
}
return 0;
}
+
+int ff_default_query_formats(AVFilterContext *ctx)
+{
+ return default_query_formats_common(ctx, ff_all_channel_layouts);
+}
+
+int ff_query_formats_all(AVFilterContext *ctx)
+{
+ return default_query_formats_common(ctx, ff_all_channel_counts);
+}
+
+/* internal functions for parsing audio format arguments */
+
+int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx)
+{
+ char *tail;
+ int pix_fmt = av_get_pix_fmt(arg);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ pix_fmt = strtol(arg, &tail, 0);
+ if (*tail || !av_pix_fmt_desc_get(pix_fmt)) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid pixel format '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ }
+ *ret = pix_fmt;
+ return 0;
+}
+
+int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx)
+{
+ char *tail;
+ int sfmt = av_get_sample_fmt(arg);
+ if (sfmt == AV_SAMPLE_FMT_NONE) {
+ sfmt = strtol(arg, &tail, 0);
+ if (*tail || av_get_bytes_per_sample(sfmt)<=0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ }
+ *ret = sfmt;
+ return 0;
+}
+
+int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx)
+{
+ AVRational r;
+ if(av_parse_ratio(&r, arg, INT_MAX, 0, log_ctx) < 0 ||r.num<=0 ||r.den<=0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid time base '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ *ret = r;
+ return 0;
+}
+
+int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx)
+{
+ char *tail;
+ double srate = av_strtod(arg, &tail);
+ if (*tail || srate < 1 || (int)srate != srate || srate > INT_MAX) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid sample rate '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ *ret = srate;
+ return 0;
+}
+
+int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
+ void *log_ctx)
+{
+ char *tail;
+ int64_t chlayout, count;
+
+ if (nret) {
+ count = strtol(arg, &tail, 10);
+ if (*tail == 'c' && !tail[1] && count > 0 && count < 63) {
+ *nret = count;
+ *ret = 0;
+ return 0;
+ }
+ }
+ chlayout = av_get_channel_layout(arg);
+ if (chlayout == 0) {
+ chlayout = strtol(arg, &tail, 10);
+ if (*tail || chlayout == 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", arg);
+ return AVERROR(EINVAL);
+ }
+ }
+ *ret = chlayout;
+ if (nret)
+ *nret = av_get_channel_layout_nb_channels(chlayout);
+ return 0;
+}
+
+#ifdef TEST
+
+#undef printf
+
+int main(void)
+{
+ const int64_t *cl;
+ char buf[512];
+
+ for (cl = avfilter_all_channel_layouts; *cl != -1; cl++) {
+ av_get_channel_layout_string(buf, sizeof(buf), -1, *cl);
+ printf("%s\n", buf);
+ }
+
+ return 0;
+}
+
+#endif
+
diff --git a/libavfilter/formats.h b/libavfilter/formats.h
index 2e44792..468eac8 100644
--- a/libavfilter/formats.h
+++ b/libavfilter/formats.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -69,15 +69,46 @@ struct AVFilterFormats {
struct AVFilterFormats ***refs; ///< references to this list
};
+/**
+ * A list of supported channel layouts.
+ *
+ * The list works the same as AVFilterFormats, except for the following
+ * differences:
+ * - A list with all_layouts = 1 means all channel layouts with a known
+ * disposition; nb_channel_layouts must then be 0.
+ * - A list with all_counts = 1 means all channel counts, with a known or
+ * unknown disposition; nb_channel_layouts must then be 0 and all_layouts 1.
+ * - The list must not contain a layout with a known disposition and a
+ * channel count with unknown disposition with the same number of channels
+ * (e.g. AV_CH_LAYOUT_STEREO and FF_COUNT2LAYOUT(2).
+ */
typedef struct AVFilterChannelLayouts {
uint64_t *channel_layouts; ///< list of channel layouts
int nb_channel_layouts; ///< number of channel layouts
+ char all_layouts; ///< accept any known channel layout
+ char all_counts; ///< accept any channel layout or count
unsigned refcount; ///< number of references to this list
struct AVFilterChannelLayouts ***refs; ///< references to this list
} AVFilterChannelLayouts;
/**
+ * Encode a channel count as a channel layout.
+ * FF_COUNT2LAYOUT(c) means any channel layout with c channels, with a known
+ * or unknown disposition.
+ * The result is only valid inside AVFilterChannelLayouts and immediately
+ * related functions.
+ */
+#define FF_COUNT2LAYOUT(c) (0x8000000000000000ULL | (c))
+
+/**
+ * Decode a channel count encoded as a channel layout.
+ * Return 0 if the channel layout was a real one.
+ */
+#define FF_LAYOUT2COUNT(l) (((l) & 0x8000000000000000ULL) ? \
+ (int)((l) & 0x7FFFFFFF) : 0)
+
+/**
* Return a channel layouts/samplerates list which contains the intersection of
* the layouts/samplerates of a and b. Also, all the references of a, all the
* references of b, and a and b themselves will be deallocated.
@@ -92,12 +123,21 @@ AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a,
/**
* Construct an empty AVFilterChannelLayouts/AVFilterFormats struct --
- * representing any channel layout/sample rate.
+ * representing any channel layout (with known disposition)/sample rate.
*/
AVFilterChannelLayouts *ff_all_channel_layouts(void);
AVFilterFormats *ff_all_samplerates(void);
/**
+ * Construct an AVFilterChannelLayouts coding for any channel layout, with
+ * known or unknown disposition.
+ */
+AVFilterChannelLayouts *ff_all_channel_counts(void);
+
+AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts);
+
+
+/**
* A helper for query_formats() which sets all links to the same list of channel
* layouts/sample rates. If there are no links hooked to this filter, the list
* is freed.
@@ -132,6 +172,14 @@ void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
int ff_default_query_formats(AVFilterContext *ctx);
+/**
+ * Set the formats list to all existing formats.
+ * This function behaves like ff_default_query_formats(), except it also
+ * accepts channel layouts with unknown disposition. It should only be used
+ * with audio filters.
+ */
+int ff_query_formats_all(AVFilterContext *ctx);
+
/**
* Create a list of supported formats. This is intended for use in
@@ -150,10 +198,10 @@ AVFilterFormats *ff_make_format_list(const int *fmts);
* @return a non negative value in case of success, or a negative
* value corresponding to an AVERROR code in case of error
*/
-int ff_add_format(AVFilterFormats **avff, int fmt);
+int ff_add_format(AVFilterFormats **avff, int64_t fmt);
/**
- * Return a list of all formats supported by Libav for the given media type.
+ * Return a list of all formats supported by FFmpeg for the given media type.
*/
AVFilterFormats *ff_all_formats(enum AVMediaType type);
@@ -170,7 +218,8 @@ AVFilterFormats *ff_planar_sample_fmts(void);
* If a and b do not share any common formats, neither is modified, and NULL
* is returned.
*/
-AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b);
+AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b,
+ enum AVMediaType type);
/**
* Add *ref as a new reference to formats.
diff --git a/libavfilter/framesync.c b/libavfilter/framesync.c
new file mode 100644
index 0000000..12db50c
--- /dev/null
+++ b/libavfilter/framesync.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "framesync.h"
+#include "internal.h"
+
+#define OFFSET(member) offsetof(FFFrameSync, member)
+
+static const char *framesync_name(void *ptr)
+{
+ return "framesync";
+}
+
+static const AVClass framesync_class = {
+ .version = LIBAVUTIL_VERSION_INT,
+ .class_name = "framesync",
+ .item_name = framesync_name,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .option = NULL,
+ .parent_log_context_offset = OFFSET(parent),
+};
+
+enum {
+ STATE_BOF,
+ STATE_RUN,
+ STATE_EOF,
+};
+
+void ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in)
+{
+ fs->class = &framesync_class;
+ fs->parent = parent;
+ fs->nb_in = nb_in;
+}
+
+static void framesync_sync_level_update(FFFrameSync *fs)
+{
+ unsigned i, level = 0;
+
+ for (i = 0; i < fs->nb_in; i++)
+ if (fs->in[i].state != STATE_EOF)
+ level = FFMAX(level, fs->in[i].sync);
+ av_assert0(level <= fs->sync_level);
+ if (level < fs->sync_level)
+ av_log(fs, AV_LOG_VERBOSE, "Sync level %u\n", level);
+ if (level)
+ fs->sync_level = level;
+ else
+ fs->eof = 1;
+}
+
+int ff_framesync_configure(FFFrameSync *fs)
+{
+ unsigned i;
+ int64_t gcd, lcm;
+
+ if (!fs->time_base.num) {
+ for (i = 0; i < fs->nb_in; i++) {
+ if (fs->in[i].sync) {
+ if (fs->time_base.num) {
+ gcd = av_gcd(fs->time_base.den, fs->in[i].time_base.den);
+ lcm = (fs->time_base.den / gcd) * fs->in[i].time_base.den;
+ if (lcm < AV_TIME_BASE / 2) {
+ fs->time_base.den = lcm;
+ fs->time_base.num = av_gcd(fs->time_base.num,
+ fs->in[i].time_base.num);
+ } else {
+ fs->time_base.num = 1;
+ fs->time_base.den = AV_TIME_BASE;
+ break;
+ }
+ } else {
+ fs->time_base = fs->in[i].time_base;
+ }
+ }
+ }
+ if (!fs->time_base.num) {
+ av_log(fs, AV_LOG_ERROR, "Impossible to set time base\n");
+ return AVERROR(EINVAL);
+ }
+ av_log(fs, AV_LOG_VERBOSE, "Selected %d/%d time base\n",
+ fs->time_base.num, fs->time_base.den);
+ }
+
+ for (i = 0; i < fs->nb_in; i++)
+ fs->in[i].pts = fs->in[i].pts_next = AV_NOPTS_VALUE;
+ fs->sync_level = UINT_MAX;
+ framesync_sync_level_update(fs);
+
+ return 0;
+}
+
+static void framesync_advance(FFFrameSync *fs)
+{
+ int latest;
+ unsigned i;
+ int64_t pts;
+
+ if (fs->eof)
+ return;
+ while (!fs->frame_ready) {
+ latest = -1;
+ for (i = 0; i < fs->nb_in; i++) {
+ if (!fs->in[i].have_next) {
+ if (latest < 0 || fs->in[i].pts < fs->in[latest].pts)
+ latest = i;
+ }
+ }
+ if (latest >= 0) {
+ fs->in_request = latest;
+ break;
+ }
+
+ pts = fs->in[0].pts_next;
+ for (i = 1; i < fs->nb_in; i++)
+ if (fs->in[i].pts_next < pts)
+ pts = fs->in[i].pts_next;
+ if (pts == INT64_MAX) {
+ fs->eof = 1;
+ break;
+ }
+ for (i = 0; i < fs->nb_in; i++) {
+ if (fs->in[i].pts_next == pts ||
+ (fs->in[i].before == EXT_INFINITY &&
+ fs->in[i].state == STATE_BOF)) {
+ av_frame_free(&fs->in[i].frame);
+ fs->in[i].frame = fs->in[i].frame_next;
+ fs->in[i].pts = fs->in[i].pts_next;
+ fs->in[i].frame_next = NULL;
+ fs->in[i].pts_next = AV_NOPTS_VALUE;
+ fs->in[i].have_next = 0;
+ fs->in[i].state = fs->in[i].frame ? STATE_RUN : STATE_EOF;
+ if (fs->in[i].sync == fs->sync_level && fs->in[i].frame)
+ fs->frame_ready = 1;
+ if (fs->in[i].state == STATE_EOF &&
+ fs->in[i].after == EXT_STOP)
+ fs->eof = 1;
+ }
+ }
+ if (fs->eof)
+ fs->frame_ready = 0;
+ if (fs->frame_ready)
+ for (i = 0; i < fs->nb_in; i++)
+ if ((fs->in[i].state == STATE_BOF &&
+ fs->in[i].before == EXT_STOP))
+ fs->frame_ready = 0;
+ fs->pts = pts;
+ }
+}
+
+static int64_t framesync_pts_extrapolate(FFFrameSync *fs, unsigned in,
+ int64_t pts)
+{
+ /* Possible enhancement: use the link's frame rate */
+ return pts + 1;
+}
+
+static void framesync_inject_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
+{
+ int64_t pts;
+
+ av_assert0(!fs->in[in].have_next);
+ if (frame) {
+ pts = av_rescale_q(frame->pts, fs->in[in].time_base, fs->time_base);
+ frame->pts = pts;
+ } else {
+ pts = fs->in[in].state != STATE_RUN || fs->in[in].after == EXT_INFINITY
+ ? INT64_MAX : framesync_pts_extrapolate(fs, in, fs->in[in].pts);
+ fs->in[in].sync = 0;
+ framesync_sync_level_update(fs);
+ }
+ fs->in[in].frame_next = frame;
+ fs->in[in].pts_next = pts;
+ fs->in[in].have_next = 1;
+}
+
+int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame)
+{
+ av_assert1(in < fs->nb_in);
+ if (!fs->in[in].have_next)
+ framesync_inject_frame(fs, in, frame);
+ else
+ ff_bufqueue_add(fs, &fs->in[in].queue, frame);
+ return 0;
+}
+
+void ff_framesync_next(FFFrameSync *fs)
+{
+ unsigned i;
+
+ av_assert0(!fs->frame_ready);
+ for (i = 0; i < fs->nb_in; i++)
+ if (!fs->in[i].have_next && fs->in[i].queue.available)
+ framesync_inject_frame(fs, i, ff_bufqueue_get(&fs->in[i].queue));
+ fs->frame_ready = 0;
+ framesync_advance(fs);
+}
+
+void ff_framesync_drop(FFFrameSync *fs)
+{
+ fs->frame_ready = 0;
+}
+
+int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
+ unsigned get)
+{
+ AVFrame *frame;
+ unsigned need_copy = 0, i;
+ int64_t pts_next;
+ int ret;
+
+ if (!fs->in[in].frame) {
+ *rframe = NULL;
+ return 0;
+ }
+ frame = fs->in[in].frame;
+ if (get) {
+ /* Find out if we need to copy the frame: is there another sync
+ stream, and do we know if its current frame will outlast this one? */
+ pts_next = fs->in[in].have_next ? fs->in[in].pts_next : INT64_MAX;
+ for (i = 0; i < fs->nb_in && !need_copy; i++)
+ if (i != in && fs->in[i].sync &&
+ (!fs->in[i].have_next || fs->in[i].pts_next < pts_next))
+ need_copy = 1;
+ if (need_copy) {
+ if (!(frame = av_frame_clone(frame)))
+ return AVERROR(ENOMEM);
+ if ((ret = av_frame_make_writable(frame)) < 0) {
+ av_frame_free(&frame);
+ return ret;
+ }
+ } else {
+ fs->in[in].frame = NULL;
+ }
+ fs->frame_ready = 0;
+ }
+ *rframe = frame;
+ return 0;
+}
+
+void ff_framesync_uninit(FFFrameSync *fs)
+{
+ unsigned i;
+
+ for (i = 0; i < fs->nb_in; i++) {
+ av_frame_free(&fs->in[i].frame);
+ av_frame_free(&fs->in[i].frame_next);
+ ff_bufqueue_discard_all(&fs->in[i].queue);
+ }
+}
+
+int ff_framesync_process_frame(FFFrameSync *fs, unsigned all)
+{
+ int ret, count = 0;
+
+ av_assert0(fs->on_event);
+ while (1) {
+ ff_framesync_next(fs);
+ if (fs->eof || !fs->frame_ready)
+ break;
+ if ((ret = fs->on_event(fs)) < 0)
+ return ret;
+ ff_framesync_drop(fs);
+ count++;
+ if (!all)
+ break;
+ }
+ if (!count && fs->eof)
+ return AVERROR_EOF;
+ return count;
+}
+
+int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink,
+ AVFrame *in)
+{
+ int ret;
+
+ if ((ret = ff_framesync_process_frame(fs, 1)) < 0)
+ return ret;
+ if ((ret = ff_framesync_add_frame(fs, FF_INLINK_IDX(inlink), in)) < 0)
+ return ret;
+ if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
+ return ret;
+ return 0;
+}
+
+int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ int input, ret;
+
+ if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
+ if (fs->eof)
+ return AVERROR_EOF;
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ input = fs->in_request;
+ ret = ff_request_frame(ctx->inputs[input]);
+ if (ret == AVERROR_EOF) {
+ if ((ret = ff_framesync_add_frame(fs, input, NULL)) < 0)
+ return ret;
+ if ((ret = ff_framesync_process_frame(fs, 0)) < 0)
+ return ret;
+ ret = 0;
+ }
+ return ret;
+}
diff --git a/libavfilter/framesync.h b/libavfilter/framesync.h
new file mode 100644
index 0000000..2072781
--- /dev/null
+++ b/libavfilter/framesync.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2013 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_FRAMESYNC_H
+#define AVFILTER_FRAMESYNC_H
+
+#include "bufferqueue.h"
+
+/*
+ * TODO
+ * Callback-based API similar to dualinput.
+ * Export convenient options.
+ */
+
+/**
+ * This API is intended as a helper for filters that have several video
+ * input and need to combine them somehow. If the inputs have different or
+ * variable frame rate, getting the input frames to match requires a rather
+ * complex logic and a few user-tunable options.
+ *
+ * In this API, when a set of synchronized input frames is ready to be
+ * procesed is called a frame event. Frame event can be generated in
+ * response to input frames on any or all inputs and the handling of
+ * situations where some stream extend beyond the beginning or the end of
+ * others can be configured.
+ *
+ * The basic working of this API is the following:
+ *
+ * - When a frame is available on any input, add it using
+ * ff_framesync_add_frame().
+ *
+ * - When a frame event is ready to be processed (i.e. after adding a frame
+ * or when requested on input):
+ * - call ff_framesync_next();
+ * - if fs->frame_ready is true, process the frames;
+ * - call ff_framesync_drop().
+ */
+
+/**
+ * Stream extrapolation mode
+ *
+ * Describe how the frames of a stream are extrapolated before the first one
+ * and after EOF to keep sync with possibly longer other streams.
+ */
+enum FFFrameSyncExtMode {
+
+ /**
+ * Completely stop all streams with this one.
+ */
+ EXT_STOP,
+
+ /**
+ * Ignore this stream and continue processing the other ones.
+ */
+ EXT_NULL,
+
+ /**
+ * Extend the frame to infinity.
+ */
+ EXT_INFINITY,
+};
+
+/**
+ * Input stream structure
+ */
+typedef struct FFFrameSyncIn {
+
+ /**
+ * Queue of incoming AVFrame, and NULL to mark EOF
+ */
+ struct FFBufQueue queue;
+
+ /**
+ * Extrapolation mode for timestamps before the first frame
+ */
+ enum FFFrameSyncExtMode before;
+
+ /**
+ * Extrapolation mode for timestamps after the last frame
+ */
+ enum FFFrameSyncExtMode after;
+
+ /**
+ * Time base for the incoming frames
+ */
+ AVRational time_base;
+
+ /**
+ * Current frame, may be NULL before the first one or after EOF
+ */
+ AVFrame *frame;
+
+ /**
+ * Next frame, for internal use
+ */
+ AVFrame *frame_next;
+
+ /**
+ * PTS of the current frame
+ */
+ int64_t pts;
+
+ /**
+ * PTS of the next frame, for internal use
+ */
+ int64_t pts_next;
+
+ /**
+ * Boolean flagging the next frame, for internal use
+ */
+ uint8_t have_next;
+
+ /**
+ * State: before first, in stream or after EOF, for internal use
+ */
+ uint8_t state;
+
+ /**
+ * Synchronization level: frames on input at the highest sync level will
+ * generate output frame events.
+ *
+ * For example, if inputs #0 and #1 have sync level 2 and input #2 has
+ * sync level 1, then a frame on either input #0 or #1 will generate a
+ * frame event, but not a frame on input #2 until both inputs #0 and #1
+ * have reached EOF.
+ *
+ * If sync is 0, no frame event will be generated.
+ */
+ unsigned sync;
+
+} FFFrameSyncIn;
+
+/**
+ * Frame sync structure.
+ */
+typedef struct FFFrameSync {
+ const AVClass *class;
+ void *parent;
+
+ /**
+ * Number of input streams
+ */
+ unsigned nb_in;
+
+ /**
+ * Time base for the output events
+ */
+ AVRational time_base;
+
+ /**
+ * Timestamp of the current event
+ */
+ int64_t pts;
+
+ /**
+ * Callback called when a frame event is ready
+ */
+ int (*on_event)(struct FFFrameSync *fs);
+
+ /**
+ * Opaque pointer, not used by the API
+ */
+ void *opaque;
+
+ /**
+ * Index of the input that requires a request
+ */
+ unsigned in_request;
+
+ /**
+ * Synchronization level: only inputs with the same sync level are sync
+ * sources.
+ */
+ unsigned sync_level;
+
+ /**
+ * Flag indicating that a frame event is ready
+ */
+ uint8_t frame_ready;
+
+ /**
+ * Flag indicating that output has reached EOF.
+ */
+ uint8_t eof;
+
+ /**
+ * Array of inputs; all inputs must be in consecutive memory
+ */
+ FFFrameSyncIn in[1]; /* must be the last field */
+
+} FFFrameSync;
+
+/**
+ * Initialize a frame sync structure.
+ *
+ * The entire structure is expected to be already set to 0.
+ *
+ * @param fs frame sync structure to initialize
+ * @param parent parent object, used for logging
+ * @param nb_in number of inputs
+ */
+void ff_framesync_init(FFFrameSync *fs, void *parent, unsigned nb_in);
+
+/**
+ * Configure a frame sync structure.
+ *
+ * Must be called after all options are set but before all use.
+ *
+ * @return >= 0 for success or a negative error code
+ */
+int ff_framesync_configure(FFFrameSync *fs);
+
+/**
+ * Free all memory currently allocated.
+ */
+void ff_framesync_uninit(FFFrameSync *fs);
+
+/**
+ * Add a frame to an input
+ *
+ * Typically called from the filter_frame() method.
+ *
+ * @param fs frame sync structure
+ * @param in index of the input
+ * @param frame input frame, or NULL for EOF
+ */
+int ff_framesync_add_frame(FFFrameSync *fs, unsigned in, AVFrame *frame);
+
+/**
+ * Prepare the next frame event.
+ *
+ * The status of the operation can be found in fs->frame_ready and fs->eof.
+ */
+void ff_framesync_next(FFFrameSync *fs);
+
+/**
+ * Drop the current frame event.
+ */
+void ff_framesync_drop(FFFrameSync *fs);
+
+/**
+ * Get the current frame in an input.
+ *
+ * @param fs frame sync structure
+ * @param in index of the input
+ * @param rframe used to return the current frame (or NULL)
+ * @param get if not zero, the calling code needs to get ownership of
+ * the returned frame; the current frame will either be
+ * duplicated or removed from the framesync structure
+ */
+int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
+ unsigned get);
+
+/**
+ * Process one or several frame using the on_event callback.
+ *
+ * @return number of frames processed or negative error code
+ */
+int ff_framesync_process_frame(FFFrameSync *fs, unsigned all);
+
+
+/**
+ * Accept a frame on a filter input.
+ *
+ * This function can be the complete implementation of all filter_frame
+ * methods of a filter using framesync.
+ */
+int ff_framesync_filter_frame(FFFrameSync *fs, AVFilterLink *inlink,
+ AVFrame *in);
+
+/**
+ * Request a frame on the filter output.
+ *
+ * This function can be the complete implementation of all filter_frame
+ * methods of a filter using framesync if it has only one output.
+ */
+int ff_framesync_request_frame(FFFrameSync *fs, AVFilterLink *outlink);
+
+#endif /* AVFILTER_FRAMESYNC_H */
diff --git a/libavfilter/generate_wave_table.c b/libavfilter/generate_wave_table.c
new file mode 100644
index 0000000..bee9c00
--- /dev/null
+++ b/libavfilter/generate_wave_table.c
@@ -0,0 +1,84 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "generate_wave_table.h"
+
+void ff_generate_wave_table(enum WaveType wave_type,
+ enum AVSampleFormat sample_fmt,
+ void *table, int table_size,
+ double min, double max, double phase)
+{
+ uint32_t i, phase_offset = phase / M_PI / 2 * table_size + 0.5;
+
+ for (i = 0; i < table_size; i++) {
+ uint32_t point = (i + phase_offset) % table_size;
+ double d;
+
+ switch (wave_type) {
+ case WAVE_SIN:
+ d = (sin((double)point / table_size * 2 * M_PI) + 1) / 2;
+ break;
+ case WAVE_TRI:
+ d = (double)point * 2 / table_size;
+ switch (4 * point / table_size) {
+ case 0: d = d + 0.5; break;
+ case 1:
+ case 2: d = 1.5 - d; break;
+ case 3: d = d - 1.5; break;
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ d = d * (max - min) + min;
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_FLT: {
+ float *fp = (float *)table;
+ *fp++ = (float)d;
+ table = fp;
+ continue; }
+ case AV_SAMPLE_FMT_DBL: {
+ double *dp = (double *)table;
+ *dp++ = d;
+ table = dp;
+ continue; }
+ }
+
+ d += d < 0 ? -0.5 : 0.5;
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_S16: {
+ int16_t *sp = table;
+ *sp++ = (int16_t)d;
+ table = sp;
+ continue; }
+ case AV_SAMPLE_FMT_S32: {
+ int32_t *ip = table;
+ *ip++ = (int32_t)d;
+ table = ip;
+ continue; }
+ default:
+ av_assert0(0);
+ }
+ }
+}
+
+
diff --git a/libavfilter/generate_wave_table.h b/libavfilter/generate_wave_table.h
new file mode 100644
index 0000000..37ea2aa
--- /dev/null
+++ b/libavfilter/generate_wave_table.h
@@ -0,0 +1,33 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_GENERATE_WAVE_TABLE_H
+#define AVFILTER_GENERATE_WAVE_TABLE_H
+
+enum WaveType {
+ WAVE_SIN,
+ WAVE_TRI,
+ WAVE_NB,
+};
+
+void ff_generate_wave_table(enum WaveType wave_type,
+ enum AVSampleFormat sample_fmt,
+ void *table, int table_size,
+ double min, double max, double phase);
+
+#endif /* AVFILTER_GENERATE_WAVE_TABLE_H */
diff --git a/libavfilter/gradfun.h b/libavfilter/gradfun.h
index f6f7311..eb1f1eb 100644
--- a/libavfilter/gradfun.h
+++ b/libavfilter/gradfun.h
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,13 +35,13 @@ typedef struct GradFunContext {
int chroma_r; ///< blur radius for the chroma planes
uint16_t *buf; ///< holds image data for blur algorithm passed into filter.
/// DSP functions.
- void (*filter_line) (uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers);
- void (*blur_line) (uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width);
+ void (*filter_line) (uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers);
+ void (*blur_line) (uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width);
} GradFunContext;
void ff_gradfun_init_x86(GradFunContext *gf);
-void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers);
-void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width);
+void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers);
+void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width);
#endif /* AVFILTER_GRADFUN_H */
diff --git a/libavfilter/graphdump.c b/libavfilter/graphdump.c
new file mode 100644
index 0000000..3d702c6
--- /dev/null
+++ b/libavfilter/graphdump.c
@@ -0,0 +1,165 @@
+/*
+ * Filter graphs to bad ASCII-art
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "libavutil/channel_layout.h"
+#include "libavutil/bprint.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "avfiltergraph.h"
+
+static int print_link_prop(AVBPrint *buf, AVFilterLink *link)
+{
+ char *format;
+ char layout[64];
+ AVBPrint dummy_buffer = { 0 };
+
+ if (!buf)
+ buf = &dummy_buffer;
+ switch (link->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ format = av_x_if_null(av_get_pix_fmt_name(link->format), "?");
+ av_bprintf(buf, "[%dx%d %d:%d %s]", link->w, link->h,
+ link->sample_aspect_ratio.num,
+ link->sample_aspect_ratio.den,
+ format);
+ break;
+
+ case AVMEDIA_TYPE_AUDIO:
+ av_get_channel_layout_string(layout, sizeof(layout),
+ link->channels, link->channel_layout);
+ format = av_x_if_null(av_get_sample_fmt_name(link->format), "?");
+ av_bprintf(buf, "[%dHz %s:%s]",
+ (int)link->sample_rate, format, layout);
+ break;
+
+ default:
+ av_bprintf(buf, "?");
+ break;
+ }
+ return buf->len;
+}
+
+static void avfilter_graph_dump_to_buf(AVBPrint *buf, AVFilterGraph *graph)
+{
+ unsigned i, j, x, e;
+
+ for (i = 0; i < graph->nb_filters; i++) {
+ AVFilterContext *filter = graph->filters[i];
+ unsigned max_src_name = 0, max_dst_name = 0;
+ unsigned max_in_name = 0, max_out_name = 0;
+ unsigned max_in_fmt = 0, max_out_fmt = 0;
+ unsigned width, height, in_indent;
+ unsigned lname = strlen(filter->name);
+ unsigned ltype = strlen(filter->filter->name);
+
+ for (j = 0; j < filter->nb_inputs; j++) {
+ AVFilterLink *l = filter->inputs[j];
+ unsigned ln = strlen(l->src->name) + 1 + strlen(l->srcpad->name);
+ max_src_name = FFMAX(max_src_name, ln);
+ max_in_name = FFMAX(max_in_name, strlen(l->dstpad->name));
+ max_in_fmt = FFMAX(max_in_fmt, print_link_prop(NULL, l));
+ }
+ for (j = 0; j < filter->nb_outputs; j++) {
+ AVFilterLink *l = filter->outputs[j];
+ unsigned ln = strlen(l->dst->name) + 1 + strlen(l->dstpad->name);
+ max_dst_name = FFMAX(max_dst_name, ln);
+ max_out_name = FFMAX(max_out_name, strlen(l->srcpad->name));
+ max_out_fmt = FFMAX(max_out_fmt, print_link_prop(NULL, l));
+ }
+ in_indent = max_src_name + max_in_name + max_in_fmt;
+ in_indent += in_indent ? 4 : 0;
+ width = FFMAX(lname + 2, ltype + 4);
+ height = FFMAX3(2, filter->nb_inputs, filter->nb_outputs);
+ av_bprint_chars(buf, ' ', in_indent);
+ av_bprintf(buf, "+");
+ av_bprint_chars(buf, '-', width);
+ av_bprintf(buf, "+\n");
+ for (j = 0; j < height; j++) {
+ unsigned in_no = j - (height - filter->nb_inputs ) / 2;
+ unsigned out_no = j - (height - filter->nb_outputs) / 2;
+
+ /* Input link */
+ if (in_no < filter->nb_inputs) {
+ AVFilterLink *l = filter->inputs[in_no];
+ e = buf->len + max_src_name + 2;
+ av_bprintf(buf, "%s:%s", l->src->name, l->srcpad->name);
+ av_bprint_chars(buf, '-', e - buf->len);
+ e = buf->len + max_in_fmt + 2 +
+ max_in_name - strlen(l->dstpad->name);
+ print_link_prop(buf, l);
+ av_bprint_chars(buf, '-', e - buf->len);
+ av_bprintf(buf, "%s", l->dstpad->name);
+ } else {
+ av_bprint_chars(buf, ' ', in_indent);
+ }
+
+ /* Filter */
+ av_bprintf(buf, "|");
+ if (j == (height - 2) / 2) {
+ x = (width - lname) / 2;
+ av_bprintf(buf, "%*s%-*s", x, "", width - x, filter->name);
+ } else if (j == (height - 2) / 2 + 1) {
+ x = (width - ltype - 2) / 2;
+ av_bprintf(buf, "%*s(%s)%*s", x, "", filter->filter->name,
+ width - ltype - 2 - x, "");
+ } else {
+ av_bprint_chars(buf, ' ', width);
+ }
+ av_bprintf(buf, "|");
+
+ /* Output link */
+ if (out_no < filter->nb_outputs) {
+ AVFilterLink *l = filter->outputs[out_no];
+ unsigned ln = strlen(l->dst->name) + 1 +
+ strlen(l->dstpad->name);
+ e = buf->len + max_out_name + 2;
+ av_bprintf(buf, "%s", l->srcpad->name);
+ av_bprint_chars(buf, '-', e - buf->len);
+ e = buf->len + max_out_fmt + 2 +
+ max_dst_name - ln;
+ print_link_prop(buf, l);
+ av_bprint_chars(buf, '-', e - buf->len);
+ av_bprintf(buf, "%s:%s", l->dst->name, l->dstpad->name);
+ }
+ av_bprintf(buf, "\n");
+ }
+ av_bprint_chars(buf, ' ', in_indent);
+ av_bprintf(buf, "+");
+ av_bprint_chars(buf, '-', width);
+ av_bprintf(buf, "+\n");
+ av_bprintf(buf, "\n");
+ }
+}
+
+char *avfilter_graph_dump(AVFilterGraph *graph, const char *options)
+{
+ AVBPrint buf;
+ char *dump;
+
+ av_bprint_init(&buf, 0, 0);
+ avfilter_graph_dump_to_buf(&buf, graph);
+ av_bprint_init(&buf, buf.len + 1, buf.len + 1);
+ avfilter_graph_dump_to_buf(&buf, graph);
+ av_bprint_finalize(&buf, &dump);
+ return dump;
+}
diff --git a/libavfilter/graphparser.c b/libavfilter/graphparser.c
index e20dd62..7e25282 100644
--- a/libavfilter/graphparser.c
+++ b/libavfilter/graphparser.c
@@ -3,20 +3,20 @@
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -89,17 +89,17 @@ static char *parse_link_name(const char **buf, void *log_ctx)
* @param filt_name the name of the filter to create
* @param args the arguments provided to the filter during its initialization
* @param log_ctx the log context to use
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index,
const char *filt_name, const char *args, void *log_ctx)
{
AVFilter *filt;
char inst_name[30];
- char tmp_args[256];
+ char *tmp_args = NULL;
int ret;
- snprintf(inst_name, sizeof(inst_name), "Parsed filter %d %s", index, filt_name);
+ snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index);
filt = avfilter_get_by_name(filt_name);
@@ -118,8 +118,10 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") &&
ctx->scale_sws_opts) {
- snprintf(tmp_args, sizeof(tmp_args), "%s:%s",
+ tmp_args = av_asprintf("%s:%s",
args, ctx->scale_sws_opts);
+ if (!tmp_args)
+ return AVERROR(ENOMEM);
args = tmp_args;
}
@@ -131,10 +133,11 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args);
av_log(log_ctx, AV_LOG_ERROR, "\n");
avfilter_free(*filt_ctx);
- return ret;
+ *filt_ctx = NULL;
}
- return 0;
+ av_free(tmp_args);
+ return ret;
}
/**
@@ -151,7 +154,7 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
* @param index an index which is assigned to the created filter
* instance, and which is supposed to be unique for each filter
* instance added to the filtergraph
- * @return 0 in case of success, a negative AVERROR code otherwise
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
*/
static int parse_filter(AVFilterContext **filt_ctx, const char **buf, AVFilterGraph *graph,
int index, void *log_ctx)
@@ -384,7 +387,7 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs,
AVFilterInOut **outputs)
{
- int index = 0, ret;
+ int index = 0, ret = 0;
char chr = 0;
AVFilterInOut *curr_inputs = NULL, *open_inputs = NULL, *open_outputs = NULL;
@@ -399,18 +402,17 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
filters += strspn(filters, WHITESPACES);
if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, graph)) < 0)
- goto fail;
-
+ goto end;
if ((ret = parse_filter(&filter, &filters, graph, index, graph)) < 0)
- goto fail;
+ goto end;
if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, graph)) < 0)
- goto fail;
+ goto end;
if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
graph)) < 0)
- goto fail;
+ goto end;
filters += strspn(filters, WHITESPACES);
chr = *filters++;
@@ -425,16 +427,17 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
"Unable to parse graph description substring: \"%s\"\n",
filters - 1);
ret = AVERROR(EINVAL);
- goto fail;
+ goto end;
}
append_inout(&open_outputs, &curr_inputs);
+
*inputs = open_inputs;
*outputs = open_outputs;
return 0;
- fail:
+ fail:end:
while (graph->nb_filters)
avfilter_free(graph->filters[0]);
av_freep(&graph->filters);
@@ -448,6 +451,7 @@ int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
return ret;
}
+#if HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
AVFilterInOut *open_inputs,
AVFilterInOut *open_outputs, void *log_ctx)
@@ -509,4 +513,95 @@ int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
avfilter_inout_free(&open_inputs);
avfilter_inout_free(&open_outputs);
return ret;
+#else
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx)
+{
+ return avfilter_graph_parse_ptr(graph, filters, inputs, outputs, log_ctx);
+#endif
+}
+
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **open_inputs_ptr, AVFilterInOut **open_outputs_ptr,
+ void *log_ctx)
+{
+ int index = 0, ret = 0;
+ char chr = 0;
+
+ AVFilterInOut *curr_inputs = NULL;
+ AVFilterInOut *open_inputs = open_inputs_ptr ? *open_inputs_ptr : NULL;
+ AVFilterInOut *open_outputs = open_outputs_ptr ? *open_outputs_ptr : NULL;
+
+ if ((ret = parse_sws_flags(&filters, graph)) < 0)
+ goto end;
+
+ do {
+ AVFilterContext *filter;
+ const char *filterchain = filters;
+ filters += strspn(filters, WHITESPACES);
+
+ if ((ret = parse_inputs(&filters, &curr_inputs, &open_outputs, log_ctx)) < 0)
+ goto end;
+
+ if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0)
+ goto end;
+
+ if (filter->nb_inputs == 1 && !curr_inputs && !index) {
+ /* First input pad, assume it is "[in]" if not specified */
+ const char *tmp = "[in]";
+ if ((ret = parse_inputs(&tmp, &curr_inputs, &open_outputs, log_ctx)) < 0)
+ goto end;
+ }
+
+ if ((ret = link_filter_inouts(filter, &curr_inputs, &open_inputs, log_ctx)) < 0)
+ goto end;
+
+ if ((ret = parse_outputs(&filters, &curr_inputs, &open_inputs, &open_outputs,
+ log_ctx)) < 0)
+ goto end;
+
+ filters += strspn(filters, WHITESPACES);
+ chr = *filters++;
+
+ if (chr == ';' && curr_inputs) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Invalid filterchain containing an unlabelled output pad: \"%s\"\n",
+ filterchain);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ index++;
+ } while (chr == ',' || chr == ';');
+
+ if (chr) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Unable to parse graph description substring: \"%s\"\n",
+ filters - 1);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if (curr_inputs) {
+ /* Last output pad, assume it is "[out]" if not specified */
+ const char *tmp = "[out]";
+ if ((ret = parse_outputs(&tmp, &curr_inputs, &open_inputs, &open_outputs,
+ log_ctx)) < 0)
+ goto end;
+ }
+
+end:
+ /* clear open_in/outputs only if not passed as parameters */
+ if (open_inputs_ptr) *open_inputs_ptr = open_inputs;
+ else avfilter_inout_free(&open_inputs);
+ if (open_outputs_ptr) *open_outputs_ptr = open_outputs;
+ else avfilter_inout_free(&open_outputs);
+ avfilter_inout_free(&curr_inputs);
+
+ if (ret < 0) {
+ while (graph->nb_filters)
+ avfilter_free(graph->filters[0]);
+ av_freep(&graph->filters);
+ }
+ return ret;
}
diff --git a/libavfilter/interlace.h b/libavfilter/interlace.h
index 2d47050..44f1e06 100644
--- a/libavfilter/interlace.h
+++ b/libavfilter/interlace.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -49,7 +49,6 @@ typedef struct InterlaceContext {
enum ScanMode scan; // top or bottom field first scanning
int lowpass; // enable or disable low pass filterning
AVFrame *cur, *next; // the two frames from which the new one is obtained
- int got_output; // signal an output frame is reday to request_frame()
void (*lowpass_line)(uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp,
const uint8_t *srcp_above, const uint8_t *srcp_below);
} InterlaceContext;
diff --git a/libavfilter/internal.h b/libavfilter/internal.h
index 6a752dc..308b115 100644
--- a/libavfilter/internal.h
+++ b/libavfilter/internal.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,8 +26,32 @@
#include "libavutil/internal.h"
#include "avfilter.h"
+#include "avfiltergraph.h"
+#include "formats.h"
#include "thread.h"
#include "version.h"
+#include "video.h"
+
+#define POOL_SIZE 32
+typedef struct AVFilterPool {
+ AVFilterBufferRef *pic[POOL_SIZE];
+ int count;
+ int refcount;
+ int draining;
+} AVFilterPool;
+
+typedef struct AVFilterCommand {
+ double time; ///< time expressed in seconds
+ char *command; ///< command
+ char *arg; ///< optional argument for the command
+ int flags;
+ struct AVFilterCommand *next;
+} AVFilterCommand;
+
+/**
+ * Update the position of a link in the age heap.
+ */
+void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link);
#if !FF_API_AVFILTERPAD_PUBLIC
/**
@@ -48,7 +72,7 @@ struct AVFilterPad {
/**
* Callback function to get a video buffer. If NULL, the filter system will
- * use avfilter_default_get_video_buffer().
+ * use ff_default_get_video_buffer().
*
* Input video pads only.
*/
@@ -56,7 +80,7 @@ struct AVFilterPad {
/**
* Callback function to get an audio buffer. If NULL, the filter system will
- * use avfilter_default_get_audio_buffer().
+ * use ff_default_get_audio_buffer().
*
* Input audio pads only.
*/
@@ -145,9 +169,82 @@ void ff_avfilter_default_free_buffer(AVFilterBuffer *buf);
/** Tell is a format is contained in the provided list terminated by -1. */
int ff_fmt_is_in(int fmt, const int *fmts);
-#define FF_DPRINTF_START(ctx, func) av_dlog(NULL, "%-16s: ", #func)
+/* Functions to parse audio format arguments */
+
+/**
+ * Parse a pixel format.
+ *
+ * @param ret pixel format pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a sample rate.
+ *
+ * @param ret unsigned integer pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a time base.
+ *
+ * @param ret unsigned AVRational pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+int ff_parse_time_base(AVRational *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a sample format name or a corresponding integer representation.
+ *
+ * @param ret integer pointer to where the value should be written
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
+
+/**
+ * Parse a channel layout or a corresponding integer representation.
+ *
+ * @param ret 64bit integer pointer to where the value should be written.
+ * @param nret integer pointer to the number of channels;
+ * if not NULL, then unknown channel layouts are accepted
+ * @param arg string to parse
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative AVERROR code on error
+ */
+int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
+ void *log_ctx);
+
+void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
+
+void ff_command_queue_pop(AVFilterContext *filter);
+
+/* misc trace functions */
+
+/* #define FF_AVFILTER_TRACE */
+
+#ifdef FF_AVFILTER_TRACE
+# define ff_tlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define ff_tlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
+#endif
+
+#define FF_TPRINTF_START(ctx, func) ff_tlog(NULL, "%-16s: ", #func)
+
+char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms);
-void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
+void ff_tlog_ref(void *ctx, AVFrame *ref, int end);
+
+void ff_tlog_link(void *ctx, AVFilterLink *link, int end);
/**
* Insert a new pad.
@@ -161,35 +258,38 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
* @param pads Pointer to the pointer to the beginning of the list of pads
* @param links Pointer to the pointer to the beginning of the list of links
* @param newpad The new pad to add. A copy is made when adding.
+ * @return >= 0 in case of success, a negative AVERROR code on error
*/
-void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
+int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad);
/** Insert a new input pad for the filter. */
-static inline void ff_insert_inpad(AVFilterContext *f, unsigned index,
+static inline int ff_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
- ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
+ int ret = ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
&f->input_pads, &f->inputs, p);
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
f->input_count = f->nb_inputs;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
+ return ret;
}
/** Insert a new output pad for the filter. */
-static inline void ff_insert_outpad(AVFilterContext *f, unsigned index,
+static inline int ff_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
- ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
+ int ret = ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
#if FF_API_FOO_COUNT
FF_DISABLE_DEPRECATION_WARNINGS
f->output_count = f->nb_outputs;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
+ return ret;
}
/**
@@ -209,6 +309,29 @@ int ff_poll_frame(AVFilterLink *link);
*/
int ff_request_frame(AVFilterLink *link);
+#define AVFILTER_DEFINE_CLASS(fname) \
+ static const AVClass fname##_class = { \
+ .class_name = #fname, \
+ .item_name = av_default_item_name, \
+ .option = fname##_options, \
+ .version = LIBAVUTIL_VERSION_INT, \
+ .category = AV_CLASS_CATEGORY_FILTER, \
+ }
+
+AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink,
+ AVFilterBufferRef *ref);
+
+/**
+ * Find the index of a link.
+ *
+ * I.e. find i such that link == ctx->(in|out)puts[i]
+ */
+#define FF_INLINK_IDX(link) ((int)((link)->dstpad - (link)->dst->input_pads))
+#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads))
+
+int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf);
+int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
+ int nb_samples);
/**
* Send a frame of data to the next filter.
*
@@ -223,6 +346,20 @@ int ff_request_frame(AVFilterLink *link);
int ff_filter_frame(AVFilterLink *link, AVFrame *frame);
/**
+ * Flags for AVFilterLink.flags.
+ */
+enum {
+
+ /**
+ * Frame requests may need to loop in order to be fulfilled.
+ * A filter must set this flags on an output link if it may return 0 in
+ * request_frame() without filtering a frame.
+ */
+ FF_LINK_FLAG_REQUEST_LOOP = 1,
+
+};
+
+/**
* Allocate a new filter context and return it.
*
* @param filter what filter to create an instance of
diff --git a/libavfilter/lavfutils.c b/libavfilter/lavfutils.c
new file mode 100644
index 0000000..80310d2
--- /dev/null
+++ b/libavfilter/lavfutils.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 Stefano Sabatini <stefasab gmail com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "lavfutils.h"
+
+int ff_load_image(uint8_t *data[4], int linesize[4],
+ int *w, int *h, enum AVPixelFormat *pix_fmt,
+ const char *filename, void *log_ctx)
+{
+ AVInputFormat *iformat = NULL;
+ AVFormatContext *format_ctx = NULL;
+ AVCodec *codec;
+ AVCodecContext *codec_ctx;
+ AVFrame *frame;
+ int frame_decoded, ret = 0;
+ AVPacket pkt;
+
+ av_init_packet(&pkt);
+
+ av_register_all();
+
+ iformat = av_find_input_format("image2");
+ if ((ret = avformat_open_input(&format_ctx, filename, iformat, NULL)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Failed to open input file '%s'\n", filename);
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(format_ctx, NULL)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Find stream info failed\n");
+ return ret;
+ }
+
+ codec_ctx = format_ctx->streams[0]->codec;
+ codec = avcodec_find_decoder(codec_ctx->codec_id);
+ if (!codec) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to find codec\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n");
+ goto end;
+ }
+
+ if (!(frame = av_frame_alloc()) ) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to alloc frame\n");
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ ret = av_read_frame(format_ctx, &pkt);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to read frame from file\n");
+ goto end;
+ }
+
+ ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
+ if (ret < 0 || !frame_decoded) {
+ av_log(log_ctx, AV_LOG_ERROR, "Failed to decode image from file\n");
+ if (ret >= 0)
+ ret = -1;
+ goto end;
+ }
+
+ *w = frame->width;
+ *h = frame->height;
+ *pix_fmt = frame->format;
+
+ if ((ret = av_image_alloc(data, linesize, *w, *h, *pix_fmt, 16)) < 0)
+ goto end;
+ ret = 0;
+
+ av_image_copy(data, linesize, (const uint8_t **)frame->data, frame->linesize, *pix_fmt, *w, *h);
+
+end:
+ av_free_packet(&pkt);
+ avcodec_close(codec_ctx);
+ avformat_close_input(&format_ctx);
+ av_freep(&frame);
+
+ if (ret < 0)
+ av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename);
+ return ret;
+}
diff --git a/libavfilter/lavfutils.h b/libavfilter/lavfutils.h
new file mode 100644
index 0000000..2d5308f
--- /dev/null
+++ b/libavfilter/lavfutils.h
@@ -0,0 +1,43 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Miscellaneous utilities which make use of the libavformat library
+ */
+
+#ifndef AVFILTER_LAVFUTILS_H
+#define AVFILTER_LAVFUTILS_H
+
+#include "libavformat/avformat.h"
+
+/**
+ * Load image from filename and put the resulting image in data.
+ *
+ * @param w pointer to the width of the loaded image
+ * @param h pointer to the height of the loaded image
+ * @param pix_fmt pointer to the pixel format of the loaded image
+ * @param filename the name of the image file to load
+ * @param log_ctx log context
+ * @return >= 0 in case of success, a negative error code otherwise.
+ */
+int ff_load_image(uint8_t *data[4], int linesize[4],
+ int *w, int *h, enum AVPixelFormat *pix_fmt,
+ const char *filename, void *log_ctx);
+
+#endif /* AVFILTER_LAVFUTILS_H */
diff --git a/libavfilter/libmpcodecs/av_helpers.h b/libavfilter/libmpcodecs/av_helpers.h
new file mode 100644
index 0000000..90b67d5
--- /dev/null
+++ b/libavfilter/libmpcodecs/av_helpers.h
@@ -0,0 +1,27 @@
+/*
+ * Generic libav* helpers
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_AV_HELPERS_H
+#define MPLAYER_AV_HELPERS_H
+
+void ff_init_avcodec(void);
+void ff_init_avformat(void);
+
+#endif /* MPLAYER_AV_HELPERS_H */
diff --git a/libavfilter/libmpcodecs/cpudetect.h b/libavfilter/libmpcodecs/cpudetect.h
new file mode 100644
index 0000000..710f6e6
--- /dev/null
+++ b/libavfilter/libmpcodecs/cpudetect.h
@@ -0,0 +1,60 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_CPUDETECT_H
+#define MPLAYER_CPUDETECT_H
+
+#define CPUTYPE_I386 3
+#define CPUTYPE_I486 4
+#define CPUTYPE_I586 5
+#define CPUTYPE_I686 6
+
+#include "libavutil/x86_cpu.h"
+
+typedef struct cpucaps_s {
+ int cpuType;
+ int cpuModel;
+ int cpuStepping;
+ int hasMMX;
+ int hasMMX2;
+ int has3DNow;
+ int has3DNowExt;
+ int hasSSE;
+ int hasSSE2;
+ int hasSSE3;
+ int hasSSSE3;
+ int hasSSE4;
+ int hasSSE42;
+ int hasSSE4a;
+ int hasAVX;
+ int isX86;
+ unsigned cl_size; /* size of cache line */
+ int hasAltiVec;
+ int hasTSC;
+} CpuCaps;
+
+extern CpuCaps ff_gCpuCaps;
+
+void ff_do_cpuid(unsigned int ax, unsigned int *p);
+
+void ff_GetCpuCaps(CpuCaps *caps);
+
+/* returned value is malloc()'ed so free() it after use */
+char *ff_GetCpuFriendlyName(unsigned int regs[], unsigned int regs2[]);
+
+#endif /* MPLAYER_CPUDETECT_H */
diff --git a/libavfilter/libmpcodecs/img_format.c b/libavfilter/libmpcodecs/img_format.c
new file mode 100644
index 0000000..dd07f00
--- /dev/null
+++ b/libavfilter/libmpcodecs/img_format.c
@@ -0,0 +1,244 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "config.h"
+#include "img_format.h"
+#include "stdio.h"
+#include "libavutil/bswap.h"
+
+const char *ff_vo_format_name(int format)
+{
+ static char unknown_format[20];
+ switch(format)
+ {
+ case IMGFMT_RGB1: return "RGB 1-bit";
+ case IMGFMT_RGB4: return "RGB 4-bit";
+ case IMGFMT_RG4B: return "RGB 4-bit per byte";
+ case IMGFMT_RGB8: return "RGB 8-bit";
+ case IMGFMT_RGB12: return "RGB 12-bit";
+ case IMGFMT_RGB15: return "RGB 15-bit";
+ case IMGFMT_RGB16: return "RGB 16-bit";
+ case IMGFMT_RGB24: return "RGB 24-bit";
+// case IMGFMT_RGB32: return "RGB 32-bit";
+ case IMGFMT_RGB48LE: return "RGB 48-bit LE";
+ case IMGFMT_RGB48BE: return "RGB 48-bit BE";
+ case IMGFMT_RGB64LE: return "RGB 64-bit LE";
+ case IMGFMT_RGB64BE: return "RGB 64-bit BE";
+ case IMGFMT_BGR1: return "BGR 1-bit";
+ case IMGFMT_BGR4: return "BGR 4-bit";
+ case IMGFMT_BG4B: return "BGR 4-bit per byte";
+ case IMGFMT_BGR8: return "BGR 8-bit";
+ case IMGFMT_BGR12: return "BGR 12-bit";
+ case IMGFMT_BGR15: return "BGR 15-bit";
+ case IMGFMT_BGR16: return "BGR 16-bit";
+ case IMGFMT_BGR24: return "BGR 24-bit";
+// case IMGFMT_BGR32: return "BGR 32-bit";
+ case IMGFMT_ABGR: return "ABGR";
+ case IMGFMT_BGRA: return "BGRA";
+ case IMGFMT_ARGB: return "ARGB";
+ case IMGFMT_RGBA: return "RGBA";
+ case IMGFMT_XYZ12LE: return "XYZ 36-bit LE";
+ case IMGFMT_XYZ12BE: return "XYZ 36-bit BE";
+ case IMGFMT_GBR24P: return "Planar GBR 24-bit";
+ case IMGFMT_GBR12P: return "Planar GBR 36-bit";
+ case IMGFMT_GBR14P: return "Planar GBR 42-bit";
+ case IMGFMT_YVU9: return "Planar YVU9";
+ case IMGFMT_IF09: return "Planar IF09";
+ case IMGFMT_YV12: return "Planar YV12";
+ case IMGFMT_I420: return "Planar I420";
+ case IMGFMT_IYUV: return "Planar IYUV";
+ case IMGFMT_CLPL: return "Planar CLPL";
+ case IMGFMT_Y800: return "Planar Y800";
+ case IMGFMT_Y8: return "Planar Y8";
+ case IMGFMT_Y8A: return "Planar Y8 with alpha";
+ case IMGFMT_Y16_LE: return "Planar Y16 little-endian";
+ case IMGFMT_Y16_BE: return "Planar Y16 big-endian";
+ case IMGFMT_420P16_LE: return "Planar 420P 16-bit little-endian";
+ case IMGFMT_420P16_BE: return "Planar 420P 16-bit big-endian";
+ case IMGFMT_420P14_LE: return "Planar 420P 14-bit little-endian";
+ case IMGFMT_420P14_BE: return "Planar 420P 14-bit big-endian";
+ case IMGFMT_420P12_LE: return "Planar 420P 12-bit little-endian";
+ case IMGFMT_420P12_BE: return "Planar 420P 12-bit big-endian";
+ case IMGFMT_420P10_LE: return "Planar 420P 10-bit little-endian";
+ case IMGFMT_420P10_BE: return "Planar 420P 10-bit big-endian";
+ case IMGFMT_420P9_LE: return "Planar 420P 9-bit little-endian";
+ case IMGFMT_420P9_BE: return "Planar 420P 9-bit big-endian";
+ case IMGFMT_422P16_LE: return "Planar 422P 16-bit little-endian";
+ case IMGFMT_422P16_BE: return "Planar 422P 16-bit big-endian";
+ case IMGFMT_422P14_LE: return "Planar 422P 14-bit little-endian";
+ case IMGFMT_422P14_BE: return "Planar 422P 14-bit big-endian";
+ case IMGFMT_422P12_LE: return "Planar 422P 12-bit little-endian";
+ case IMGFMT_422P12_BE: return "Planar 422P 12-bit big-endian";
+ case IMGFMT_422P10_LE: return "Planar 422P 10-bit little-endian";
+ case IMGFMT_422P10_BE: return "Planar 422P 10-bit big-endian";
+ case IMGFMT_422P9_LE: return "Planar 422P 9-bit little-endian";
+ case IMGFMT_422P9_BE: return "Planar 422P 9-bit big-endian";
+ case IMGFMT_444P16_LE: return "Planar 444P 16-bit little-endian";
+ case IMGFMT_444P16_BE: return "Planar 444P 16-bit big-endian";
+ case IMGFMT_444P14_LE: return "Planar 444P 14-bit little-endian";
+ case IMGFMT_444P14_BE: return "Planar 444P 14-bit big-endian";
+ case IMGFMT_444P12_LE: return "Planar 444P 12-bit little-endian";
+ case IMGFMT_444P12_BE: return "Planar 444P 12-bit big-endian";
+ case IMGFMT_444P10_LE: return "Planar 444P 10-bit little-endian";
+ case IMGFMT_444P10_BE: return "Planar 444P 10-bit big-endian";
+ case IMGFMT_444P9_LE: return "Planar 444P 9-bit little-endian";
+ case IMGFMT_444P9_BE: return "Planar 444P 9-bit big-endian";
+ case IMGFMT_420A: return "Planar 420P with alpha";
+ case IMGFMT_444P: return "Planar 444P";
+ case IMGFMT_444A: return "Planar 444P with alpha";
+ case IMGFMT_422P: return "Planar 422P";
+ case IMGFMT_422A: return "Planar 422P with alpha";
+ case IMGFMT_411P: return "Planar 411P";
+ case IMGFMT_440P: return "Planar 440P";
+ case IMGFMT_NV12: return "Planar NV12";
+ case IMGFMT_NV21: return "Planar NV21";
+ case IMGFMT_HM12: return "Planar NV12 Macroblock";
+ case IMGFMT_IUYV: return "Packed IUYV";
+ case IMGFMT_IY41: return "Packed IY41";
+ case IMGFMT_IYU1: return "Packed IYU1";
+ case IMGFMT_IYU2: return "Packed IYU2";
+ case IMGFMT_UYVY: return "Packed UYVY";
+ case IMGFMT_UYNV: return "Packed UYNV";
+ case IMGFMT_cyuv: return "Packed CYUV";
+ case IMGFMT_Y422: return "Packed Y422";
+ case IMGFMT_YUY2: return "Packed YUY2";
+ case IMGFMT_YUNV: return "Packed YUNV";
+ case IMGFMT_YVYU: return "Packed YVYU";
+ case IMGFMT_Y41P: return "Packed Y41P";
+ case IMGFMT_Y211: return "Packed Y211";
+ case IMGFMT_Y41T: return "Packed Y41T";
+ case IMGFMT_Y42T: return "Packed Y42T";
+ case IMGFMT_V422: return "Packed V422";
+ case IMGFMT_V655: return "Packed V655";
+ case IMGFMT_CLJR: return "Packed CLJR";
+ case IMGFMT_YUVP: return "Packed YUVP";
+ case IMGFMT_UYVP: return "Packed UYVP";
+ case IMGFMT_MPEGPES: return "Mpeg PES";
+ case IMGFMT_ZRMJPEGNI: return "Zoran MJPEG non-interlaced";
+ case IMGFMT_ZRMJPEGIT: return "Zoran MJPEG top field first";
+ case IMGFMT_ZRMJPEGIB: return "Zoran MJPEG bottom field first";
+ case IMGFMT_XVMC_MOCO_MPEG2: return "MPEG1/2 Motion Compensation";
+ case IMGFMT_XVMC_IDCT_MPEG2: return "MPEG1/2 Motion Compensation and IDCT";
+ case IMGFMT_VDPAU_MPEG1: return "MPEG1 VDPAU acceleration";
+ case IMGFMT_VDPAU_MPEG2: return "MPEG2 VDPAU acceleration";
+ case IMGFMT_VDPAU_H264: return "H.264 VDPAU acceleration";
+ case IMGFMT_VDPAU_MPEG4: return "MPEG-4 Part 2 VDPAU acceleration";
+ case IMGFMT_VDPAU_WMV3: return "WMV3 VDPAU acceleration";
+ case IMGFMT_VDPAU_VC1: return "VC1 VDPAU acceleration";
+ }
+ snprintf(unknown_format,20,"Unknown 0x%04x",format);
+ return unknown_format;
+}
+
+int ff_mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits)
+{
+ int xs = 0, ys = 0;
+ int bpp;
+ int err = 0;
+ int bits = 8;
+ if ((format & 0xff0000f0) == 0x34000050)
+ format = av_bswap32(format);
+ if ((format & 0xf00000ff) == 0x50000034) {
+ switch (format >> 24) {
+ case 0x50:
+ break;
+ case 0x51:
+ bits = 16;
+ break;
+ case 0x52:
+ bits = 10;
+ break;
+ case 0x53:
+ bits = 9;
+ break;
+ default:
+ err = 1;
+ break;
+ }
+ switch (format & 0x00ffffff) {
+ case 0x00343434: // 444
+ xs = 0;
+ ys = 0;
+ break;
+ case 0x00323234: // 422
+ xs = 1;
+ ys = 0;
+ break;
+ case 0x00303234: // 420
+ xs = 1;
+ ys = 1;
+ break;
+ case 0x00313134: // 411
+ xs = 2;
+ ys = 0;
+ break;
+ case 0x00303434: // 440
+ xs = 0;
+ ys = 1;
+ break;
+ default:
+ err = 1;
+ break;
+ }
+ } else switch (format) {
+ case IMGFMT_444A:
+ xs = 0;
+ ys = 0;
+ break;
+ case IMGFMT_422A:
+ xs = 1;
+ ys = 0;
+ break;
+ case IMGFMT_420A:
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ case IMGFMT_YV12:
+ xs = 1;
+ ys = 1;
+ break;
+ case IMGFMT_IF09:
+ case IMGFMT_YVU9:
+ xs = 2;
+ ys = 2;
+ break;
+ case IMGFMT_Y8:
+ case IMGFMT_Y800:
+ xs = 31;
+ ys = 31;
+ break;
+ case IMGFMT_NV12:
+ case IMGFMT_NV21:
+ xs = 1;
+ ys = 1;
+ // TODO: allowing this though currently breaks
+ // things all over the place.
+ err = 1;
+ break;
+ default:
+ err = 1;
+ break;
+ }
+ if (x_shift) *x_shift = xs;
+ if (y_shift) *y_shift = ys;
+ if (component_bits) *component_bits = bits;
+ bpp = 8 + ((16 >> xs) >> ys);
+ if (format == IMGFMT_420A || format == IMGFMT_422A || format == IMGFMT_444A)
+ bpp += 8;
+ bpp *= (bits + 7) >> 3;
+ return err ? 0 : bpp;
+}
diff --git a/libavfilter/libmpcodecs/img_format.h b/libavfilter/libmpcodecs/img_format.h
new file mode 100644
index 0000000..b5c0b90
--- /dev/null
+++ b/libavfilter/libmpcodecs/img_format.h
@@ -0,0 +1,309 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_IMG_FORMAT_H
+#define MPLAYER_IMG_FORMAT_H
+
+#include "config.h"
+
+/* RGB/BGR Formats */
+
+#define IMGFMT_RGB_MASK 0xFFFFFF00
+#define IMGFMT_RGB (('R'<<24)|('G'<<16)|('B'<<8))
+#define IMGFMT_RGB1 (IMGFMT_RGB|1)
+#define IMGFMT_RGB4 (IMGFMT_RGB|4)
+#define IMGFMT_RGB4_CHAR (IMGFMT_RGB|4|128) // RGB4 with 1 pixel per byte
+#define IMGFMT_RGB8 (IMGFMT_RGB|8)
+#define IMGFMT_RGB12 (IMGFMT_RGB|12)
+#define IMGFMT_RGB15 (IMGFMT_RGB|15)
+#define IMGFMT_RGB16 (IMGFMT_RGB|16)
+#define IMGFMT_RGB24 (IMGFMT_RGB|24)
+#define IMGFMT_RGB32 (IMGFMT_RGB|32)
+#define IMGFMT_RGB48LE (IMGFMT_RGB|48)
+#define IMGFMT_RGB48BE (IMGFMT_RGB|48|128)
+#define IMGFMT_RGB64LE (IMGFMT_RGB|64)
+#define IMGFMT_RGB64BE (IMGFMT_RGB|64|128)
+
+#define IMGFMT_BGR_MASK 0xFFFFFF00
+#define IMGFMT_BGR (('B'<<24)|('G'<<16)|('R'<<8))
+#define IMGFMT_BGR1 (IMGFMT_BGR|1)
+#define IMGFMT_BGR4 (IMGFMT_BGR|4)
+#define IMGFMT_BGR4_CHAR (IMGFMT_BGR|4|128) // BGR4 with 1 pixel per byte
+#define IMGFMT_BGR8 (IMGFMT_BGR|8)
+#define IMGFMT_BGR12 (IMGFMT_BGR|12)
+#define IMGFMT_BGR15 (IMGFMT_BGR|15)
+#define IMGFMT_BGR16 (IMGFMT_BGR|16)
+#define IMGFMT_BGR24 (IMGFMT_BGR|24)
+#define IMGFMT_BGR32 (IMGFMT_BGR|32)
+
+#define IMGFMT_XYZ_MASK 0xFFFFFF00
+#define IMGFMT_XYZ (('X'<<24)|('Y'<<16)|('Z'<<8))
+#define IMGFMT_XYZ12LE (IMGFMT_XYZ|12)
+#define IMGFMT_XYZ12BE (IMGFMT_XYZ|12|128)
+
+#define IMGFMT_GBR24P (('G'<<24)|('B'<<16)|('R'<<8)|24)
+#define IMGFMT_GBR12PLE (('G'<<24)|('B'<<16)|('R'<<8)|36)
+#define IMGFMT_GBR12PBE (('G'<<24)|('B'<<16)|('R'<<8)|36|128)
+#define IMGFMT_GBR14PLE (('G'<<24)|('B'<<16)|('R'<<8)|42)
+#define IMGFMT_GBR14PBE (('G'<<24)|('B'<<16)|('R'<<8)|42|128)
+
+#if HAVE_BIGENDIAN
+#define IMGFMT_ABGR IMGFMT_RGB32
+#define IMGFMT_BGRA (IMGFMT_RGB32|128)
+#define IMGFMT_ARGB IMGFMT_BGR32
+#define IMGFMT_RGBA (IMGFMT_BGR32|128)
+#define IMGFMT_RGB64NE IMGFMT_RGB64BE
+#define IMGFMT_RGB48NE IMGFMT_RGB48BE
+#define IMGFMT_RGB12BE IMGFMT_RGB12
+#define IMGFMT_RGB12LE (IMGFMT_RGB12|128)
+#define IMGFMT_RGB15BE IMGFMT_RGB15
+#define IMGFMT_RGB15LE (IMGFMT_RGB15|128)
+#define IMGFMT_RGB16BE IMGFMT_RGB16
+#define IMGFMT_RGB16LE (IMGFMT_RGB16|128)
+#define IMGFMT_BGR12BE IMGFMT_BGR12
+#define IMGFMT_BGR12LE (IMGFMT_BGR12|128)
+#define IMGFMT_BGR15BE IMGFMT_BGR15
+#define IMGFMT_BGR15LE (IMGFMT_BGR15|128)
+#define IMGFMT_BGR16BE IMGFMT_BGR16
+#define IMGFMT_BGR16LE (IMGFMT_BGR16|128)
+#define IMGFMT_XYZ12 IMGFMT_XYZ12BE
+#define IMGFMT_GBR12P IMGFMT_GBR12PBE
+#define IMGFMT_GBR14P IMGFMT_GBR14PBE
+#else
+#define IMGFMT_ABGR (IMGFMT_BGR32|128)
+#define IMGFMT_BGRA IMGFMT_BGR32
+#define IMGFMT_ARGB (IMGFMT_RGB32|128)
+#define IMGFMT_RGBA IMGFMT_RGB32
+#define IMGFMT_RGB64NE IMGFMT_RGB64LE
+#define IMGFMT_RGB48NE IMGFMT_RGB48LE
+#define IMGFMT_RGB12BE (IMGFMT_RGB12|128)
+#define IMGFMT_RGB12LE IMGFMT_RGB12
+#define IMGFMT_RGB15BE (IMGFMT_RGB15|128)
+#define IMGFMT_RGB15LE IMGFMT_RGB15
+#define IMGFMT_RGB16BE (IMGFMT_RGB16|128)
+#define IMGFMT_RGB16LE IMGFMT_RGB16
+#define IMGFMT_BGR12BE (IMGFMT_BGR12|128)
+#define IMGFMT_BGR12LE IMGFMT_BGR12
+#define IMGFMT_BGR15BE (IMGFMT_BGR15|128)
+#define IMGFMT_BGR15LE IMGFMT_BGR15
+#define IMGFMT_BGR16BE (IMGFMT_BGR16|128)
+#define IMGFMT_BGR16LE IMGFMT_BGR16
+#define IMGFMT_XYZ12 IMGFMT_XYZ12LE
+#define IMGFMT_GBR12P IMGFMT_GBR12PLE
+#define IMGFMT_GBR14P IMGFMT_GBR14PLE
+#endif
+
+/* old names for compatibility */
+#define IMGFMT_RG4B IMGFMT_RGB4_CHAR
+#define IMGFMT_BG4B IMGFMT_BGR4_CHAR
+
+#define IMGFMT_IS_RGB(fmt) (((fmt)&IMGFMT_RGB_MASK)==IMGFMT_RGB)
+#define IMGFMT_IS_BGR(fmt) (((fmt)&IMGFMT_BGR_MASK)==IMGFMT_BGR)
+#define IMGFMT_IS_XYZ(fmt) (((fmt)&IMGFMT_XYZ_MASK)==IMGFMT_XYZ)
+
+#define IMGFMT_RGB_DEPTH(fmt) ((fmt)&0x7F)
+#define IMGFMT_BGR_DEPTH(fmt) ((fmt)&0x7F)
+#define IMGFMT_XYZ_DEPTH(fmt) ((fmt)&0x7F)
+
+
+/* Planar YUV Formats */
+
+#define IMGFMT_YVU9 0x39555659
+#define IMGFMT_IF09 0x39304649
+#define IMGFMT_YV12 0x32315659
+#define IMGFMT_I420 0x30323449
+#define IMGFMT_IYUV 0x56555949
+#define IMGFMT_CLPL 0x4C504C43
+#define IMGFMT_Y800 0x30303859
+#define IMGFMT_Y8 0x20203859
+#define IMGFMT_NV12 0x3231564E
+#define IMGFMT_NV21 0x3132564E
+#define IMGFMT_Y16_LE 0x20363159
+
+/* unofficial Planar Formats, FIXME if official 4CC exists */
+#define IMGFMT_444P 0x50343434
+#define IMGFMT_422P 0x50323234
+#define IMGFMT_411P 0x50313134
+#define IMGFMT_440P 0x50303434
+#define IMGFMT_HM12 0x32314D48
+#define IMGFMT_Y16_BE 0x59313620
+
+// Gray with alpha
+#define IMGFMT_Y8A 0x59320008
+// 4:2:0 planar with alpha
+#define IMGFMT_420A 0x41303234
+// 4:2:2 planar with alpha
+#define IMGFMT_422A 0x41323234
+// 4:4:4 planar with alpha
+#define IMGFMT_444A 0x41343434
+
+#define IMGFMT_444P16_LE 0x51343434
+#define IMGFMT_444P16_BE 0x34343451
+#define IMGFMT_444P14_LE 0x54343434
+#define IMGFMT_444P14_BE 0x34343454
+#define IMGFMT_444P12_LE 0x55343434
+#define IMGFMT_444P12_BE 0x34343455
+#define IMGFMT_444P10_LE 0x52343434
+#define IMGFMT_444P10_BE 0x34343452
+#define IMGFMT_444P9_LE 0x53343434
+#define IMGFMT_444P9_BE 0x34343453
+#define IMGFMT_422P16_LE 0x51323234
+#define IMGFMT_422P16_BE 0x34323251
+#define IMGFMT_422P14_LE 0x54323234
+#define IMGFMT_422P14_BE 0x34323254
+#define IMGFMT_422P12_LE 0x55323234
+#define IMGFMT_422P12_BE 0x34323255
+#define IMGFMT_422P10_LE 0x52323234
+#define IMGFMT_422P10_BE 0x34323252
+#define IMGFMT_422P9_LE 0x53323234
+#define IMGFMT_422P9_BE 0x34323253
+#define IMGFMT_420P16_LE 0x51303234
+#define IMGFMT_420P16_BE 0x34323051
+#define IMGFMT_420P14_LE 0x54303234
+#define IMGFMT_420P14_BE 0x34323054
+#define IMGFMT_420P12_LE 0x55303234
+#define IMGFMT_420P12_BE 0x34323055
+#define IMGFMT_420P10_LE 0x52303234
+#define IMGFMT_420P10_BE 0x34323052
+#define IMGFMT_420P9_LE 0x53303234
+#define IMGFMT_420P9_BE 0x34323053
+#if HAVE_BIGENDIAN
+#define IMGFMT_444P16 IMGFMT_444P16_BE
+#define IMGFMT_444P14 IMGFMT_444P14_BE
+#define IMGFMT_444P12 IMGFMT_444P12_BE
+#define IMGFMT_444P10 IMGFMT_444P10_BE
+#define IMGFMT_444P9 IMGFMT_444P9_BE
+#define IMGFMT_422P16 IMGFMT_422P16_BE
+#define IMGFMT_422P14 IMGFMT_422P14_BE
+#define IMGFMT_422P12 IMGFMT_422P12_BE
+#define IMGFMT_422P10 IMGFMT_422P10_BE
+#define IMGFMT_422P9 IMGFMT_422P9_BE
+#define IMGFMT_420P16 IMGFMT_420P16_BE
+#define IMGFMT_420P14 IMGFMT_420P14_BE
+#define IMGFMT_420P12 IMGFMT_420P12_BE
+#define IMGFMT_420P10 IMGFMT_420P10_BE
+#define IMGFMT_420P9 IMGFMT_420P9_BE
+#define IMGFMT_Y16 IMGFMT_Y16_BE
+#define IMGFMT_IS_YUVP16_NE(fmt) IMGFMT_IS_YUVP16_BE(fmt)
+#else
+#define IMGFMT_444P16 IMGFMT_444P16_LE
+#define IMGFMT_444P14 IMGFMT_444P14_LE
+#define IMGFMT_444P12 IMGFMT_444P12_LE
+#define IMGFMT_444P10 IMGFMT_444P10_LE
+#define IMGFMT_444P9 IMGFMT_444P9_LE
+#define IMGFMT_422P16 IMGFMT_422P16_LE
+#define IMGFMT_422P14 IMGFMT_422P14_LE
+#define IMGFMT_422P12 IMGFMT_422P12_LE
+#define IMGFMT_422P10 IMGFMT_422P10_LE
+#define IMGFMT_422P9 IMGFMT_422P9_LE
+#define IMGFMT_420P16 IMGFMT_420P16_LE
+#define IMGFMT_420P14 IMGFMT_420P14_LE
+#define IMGFMT_420P12 IMGFMT_420P12_LE
+#define IMGFMT_420P10 IMGFMT_420P10_LE
+#define IMGFMT_420P9 IMGFMT_420P9_LE
+#define IMGFMT_Y16 IMGFMT_Y16_LE
+#define IMGFMT_IS_YUVP16_NE(fmt) IMGFMT_IS_YUVP16_LE(fmt)
+#endif
+
+#define IMGFMT_IS_YUVP16_LE(fmt) (((fmt - 0x51000034) & 0xfc0000ff) == 0)
+#define IMGFMT_IS_YUVP16_BE(fmt) (((fmt - 0x34000051) & 0xff0000fc) == 0)
+#define IMGFMT_IS_YUVP16(fmt) (IMGFMT_IS_YUVP16_LE(fmt) || IMGFMT_IS_YUVP16_BE(fmt))
+
+/**
+ * \brief Find the corresponding full 16 bit format, i.e. IMGFMT_420P10_LE -> IMGFMT_420P16_LE
+ * \return normalized format ID or 0 if none exists.
+ */
+static inline int normalize_yuvp16(int fmt) {
+ if (IMGFMT_IS_YUVP16_LE(fmt))
+ return (fmt & 0x00ffffff) | 0x51000000;
+ if (IMGFMT_IS_YUVP16_BE(fmt))
+ return (fmt & 0xffffff00) | 0x00000051;
+ return 0;
+}
+
+/* Packed YUV Formats */
+
+#define IMGFMT_IUYV 0x56595549 // Interlaced UYVY
+#define IMGFMT_IY41 0x31435949 // Interlaced Y41P
+#define IMGFMT_IYU1 0x31555949
+#define IMGFMT_IYU2 0x32555949
+#define IMGFMT_UYVY 0x59565955
+#define IMGFMT_UYNV 0x564E5955 // Exactly same as UYVY
+#define IMGFMT_cyuv 0x76757963 // upside-down UYVY
+#define IMGFMT_Y422 0x32323459 // Exactly same as UYVY
+#define IMGFMT_YUY2 0x32595559
+#define IMGFMT_YUNV 0x564E5559 // Exactly same as YUY2
+#define IMGFMT_YVYU 0x55595659
+#define IMGFMT_Y41P 0x50313459
+#define IMGFMT_Y211 0x31313259
+#define IMGFMT_Y41T 0x54313459 // Y41P, Y lsb = transparency
+#define IMGFMT_Y42T 0x54323459 // UYVY, Y lsb = transparency
+#define IMGFMT_V422 0x32323456 // upside-down UYVY?
+#define IMGFMT_V655 0x35353656
+#define IMGFMT_CLJR 0x524A4C43
+#define IMGFMT_YUVP 0x50565559 // 10-bit YUYV
+#define IMGFMT_UYVP 0x50565955 // 10-bit UYVY
+
+/* Compressed Formats */
+#define IMGFMT_MPEGPES (('M'<<24)|('P'<<16)|('E'<<8)|('S'))
+#define IMGFMT_MJPEG (('M')|('J'<<8)|('P'<<16)|('G'<<24))
+/* Formats that are understood by zoran chips, we include
+ * non-interlaced, interlaced top-first, interlaced bottom-first */
+#define IMGFMT_ZRMJPEGNI (('Z'<<24)|('R'<<16)|('N'<<8)|('I'))
+#define IMGFMT_ZRMJPEGIT (('Z'<<24)|('R'<<16)|('I'<<8)|('T'))
+#define IMGFMT_ZRMJPEGIB (('Z'<<24)|('R'<<16)|('I'<<8)|('B'))
+
+// I think that this code could not be used by any other codec/format
+#define IMGFMT_XVMC 0x1DC70000
+#define IMGFMT_XVMC_MASK 0xFFFF0000
+#define IMGFMT_IS_XVMC(fmt) (((fmt)&IMGFMT_XVMC_MASK)==IMGFMT_XVMC)
+//these are chroma420
+#define IMGFMT_XVMC_MOCO_MPEG2 (IMGFMT_XVMC|0x02)
+#define IMGFMT_XVMC_IDCT_MPEG2 (IMGFMT_XVMC|0x82)
+
+// VDPAU specific format.
+#define IMGFMT_VDPAU 0x1DC80000
+#define IMGFMT_VDPAU_MASK 0xFFFF0000
+#define IMGFMT_IS_VDPAU(fmt) (((fmt)&IMGFMT_VDPAU_MASK)==IMGFMT_VDPAU)
+#define IMGFMT_VDPAU_MPEG1 (IMGFMT_VDPAU|0x01)
+#define IMGFMT_VDPAU_MPEG2 (IMGFMT_VDPAU|0x02)
+#define IMGFMT_VDPAU_H264 (IMGFMT_VDPAU|0x03)
+#define IMGFMT_VDPAU_WMV3 (IMGFMT_VDPAU|0x04)
+#define IMGFMT_VDPAU_VC1 (IMGFMT_VDPAU|0x05)
+#define IMGFMT_VDPAU_MPEG4 (IMGFMT_VDPAU|0x06)
+
+#define IMGFMT_IS_HWACCEL(fmt) (IMGFMT_IS_VDPAU(fmt) || IMGFMT_IS_XVMC(fmt))
+
+typedef struct {
+ void* data;
+ int size;
+ int id; // stream id. usually 0x1E0
+ int timestamp; // pts, 90000 Hz counter based
+} vo_mpegpes_t;
+
+const char *ff_vo_format_name(int format);
+
+/**
+ * Calculates the scale shifts for the chroma planes for planar YUV
+ *
+ * \param component_bits bits per component
+ * \return bits-per-pixel for format if successful (i.e. format is 3 or 4-planes planar YUV), 0 otherwise
+ */
+int ff_mp_get_chroma_shift(int format, int *x_shift, int *y_shift, int *component_bits);
+
+#endif /* MPLAYER_IMG_FORMAT_H */
diff --git a/libavfilter/libmpcodecs/libvo/fastmemcpy.h b/libavfilter/libmpcodecs/libvo/fastmemcpy.h
new file mode 100644
index 0000000..5a17d01
--- /dev/null
+++ b/libavfilter/libmpcodecs/libvo/fastmemcpy.h
@@ -0,0 +1,99 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with MPlayer; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef MPLAYER_FASTMEMCPY_H
+#define MPLAYER_FASTMEMCPY_H
+
+#include <inttypes.h>
+#include <string.h>
+#include <stddef.h>
+
+void * fast_memcpy(void * to, const void * from, size_t len);
+void * mem2agpcpy(void * to, const void * from, size_t len);
+
+#if ! defined(CONFIG_FASTMEMCPY) || ! (HAVE_MMX || HAVE_MMX2 || HAVE_AMD3DNOW /* || HAVE_SSE || HAVE_SSE2 */)
+#define mem2agpcpy(a,b,c) memcpy(a,b,c)
+#define fast_memcpy(a,b,c) memcpy(a,b,c)
+#endif
+
+static inline void * mem2agpcpy_pic(void * dst, const void * src, int bytesPerLine, int height, int dstStride, int srcStride)
+{
+ int i;
+ void *retval=dst;
+
+ if(dstStride == srcStride)
+ {
+ if (srcStride < 0) {
+ src = (const uint8_t*)src + (height-1)*srcStride;
+ dst = (uint8_t*)dst + (height-1)*dstStride;
+ srcStride = -srcStride;
+ }
+
+ mem2agpcpy(dst, src, srcStride*height);
+ }
+ else
+ {
+ for(i=0; i<height; i++)
+ {
+ mem2agpcpy(dst, src, bytesPerLine);
+ src = (const uint8_t*)src + srcStride;
+ dst = (uint8_t*)dst + dstStride;
+ }
+ }
+
+ return retval;
+}
+
+#define memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 0)
+#define my_memcpy_pic(d, s, b, h, ds, ss) memcpy_pic2(d, s, b, h, ds, ss, 1)
+
+/**
+ * \param limit2width always skip data between end of line and start of next
+ * instead of copying the full block when strides are the same
+ */
+static inline void * memcpy_pic2(void * dst, const void * src,
+ int bytesPerLine, int height,
+ int dstStride, int srcStride, int limit2width)
+{
+ int i;
+ void *retval=dst;
+
+ if(!limit2width && dstStride == srcStride)
+ {
+ if (srcStride < 0) {
+ src = (const uint8_t*)src + (height-1)*srcStride;
+ dst = (uint8_t*)dst + (height-1)*dstStride;
+ srcStride = -srcStride;
+ }
+
+ fast_memcpy(dst, src, srcStride*height);
+ }
+ else
+ {
+ for(i=0; i<height; i++)
+ {
+ fast_memcpy(dst, src, bytesPerLine);
+ src = (const uint8_t*)src + srcStride;
+ dst = (uint8_t*)dst + dstStride;
+ }
+ }
+
+ return retval;
+}
+
+#endif /* MPLAYER_FASTMEMCPY_H */
diff --git a/libavfilter/libmpcodecs/libvo/video_out.h b/libavfilter/libmpcodecs/libvo/video_out.h
new file mode 100644
index 0000000..49d3098
--- /dev/null
+++ b/libavfilter/libmpcodecs/libvo/video_out.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) Aaron Holtzman - Aug 1999
+ * Strongly modified, most parts rewritten: A'rpi/ESP-team - 2000-2001
+ * (C) MPlayer developers
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_VIDEO_OUT_H
+#define MPLAYER_VIDEO_OUT_H
+
+#include <inttypes.h>
+#include <stdarg.h>
+
+//#include "sub/font_load.h"
+#include "../img_format.h"
+//#include "vidix/vidix.h"
+
+
+#define ROTATE(t, x, y) do { \
+ t rot_tmp = x; \
+ x = y; \
+ y = -rot_tmp; \
+} while(0)
+
+#define VO_EVENT_EXPOSE 1
+#define VO_EVENT_RESIZE 2
+#define VO_EVENT_KEYPRESS 4
+#define VO_EVENT_REINIT 8
+#define VO_EVENT_MOVE 16
+#define VO_EVENT_MOUSE 32
+
+/* Obsolete: VOCTRL_QUERY_VAA 1 */
+/* does the device support the required format */
+#define VOCTRL_QUERY_FORMAT 2
+/* signal a device reset seek */
+#define VOCTRL_RESET 3
+/* true if vo driver can use GUI created windows */
+#define VOCTRL_GUISUPPORT 4
+/* used to switch to fullscreen */
+#define VOCTRL_FULLSCREEN 5
+/* signal a device pause */
+#define VOCTRL_PAUSE 7
+/* start/resume playback */
+#define VOCTRL_RESUME 8
+/* libmpcodecs direct rendering: */
+#define VOCTRL_GET_IMAGE 9
+#define VOCTRL_DRAW_IMAGE 13
+#define VOCTRL_SET_SPU_PALETTE 14
+/* decoding ahead: */
+#define VOCTRL_GET_NUM_FRAMES 10
+#define VOCTRL_GET_FRAME_NUM 11
+#define VOCTRL_SET_FRAME_NUM 12
+#define VOCTRL_GET_PANSCAN 15
+#define VOCTRL_SET_PANSCAN 16
+/* equalizer controls */
+#define VOCTRL_SET_EQUALIZER 17
+#define VOCTRL_GET_EQUALIZER 18
+/* Frame duplication */
+#define VOCTRL_DUPLICATE_FRAME 20
+// ... 21
+#define VOCTRL_START_SLICE 21
+
+#define VOCTRL_ONTOP 25
+#define VOCTRL_ROOTWIN 26
+#define VOCTRL_BORDER 27
+#define VOCTRL_DRAW_EOSD 28
+#define VOCTRL_GET_EOSD_RES 29
+
+#define VOCTRL_SET_DEINTERLACE 30
+#define VOCTRL_GET_DEINTERLACE 31
+
+#define VOCTRL_UPDATE_SCREENINFO 32
+
+// Vo can be used by xover
+#define VOCTRL_XOVERLAY_SUPPORT 22
+
+#define VOCTRL_XOVERLAY_SET_COLORKEY 24
+typedef struct {
+ uint32_t x11; // The raw x11 color
+ uint16_t r,g,b;
+} mp_colorkey_t;
+
+#define VOCTRL_XOVERLAY_SET_WIN 23
+typedef struct {
+ int x,y;
+ int w,h;
+} mp_win_t;
+
+#define VO_TRUE 1
+#define VO_FALSE 0
+#define VO_ERROR -1
+#define VO_NOTAVAIL -2
+#define VO_NOTIMPL -3
+
+#define VOFLAG_FULLSCREEN 0x01
+#define VOFLAG_MODESWITCHING 0x02
+#define VOFLAG_SWSCALE 0x04
+#define VOFLAG_FLIPPING 0x08
+#define VOFLAG_HIDDEN 0x10 //< Use to create a hidden window
+#define VOFLAG_STEREO 0x20 //< Use to create a stereo-capable window
+#define VOFLAG_DEPTH 0x40 //< Request a depth buffer
+#define VOFLAG_XOVERLAY_SUB_VO 0x10000
+
+typedef struct vo_info_s
+{
+ /* driver name ("Matrox Millennium G200/G400" */
+ const char *name;
+ /* short name (for config strings) ("mga") */
+ const char *short_name;
+ /* author ("Aaron Holtzman <aholtzma@ess.engr.uvic.ca>") */
+ const char *author;
+ /* any additional comments */
+ const char *comment;
+} vo_info_t;
+
+typedef struct vo_functions_s
+{
+ const vo_info_t *info;
+ /*
+ * Preinitializes driver (real INITIALIZATION)
+ * arg - currently it's vo_subdevice
+ * returns: zero on successful initialization, non-zero on error.
+ */
+ int (*preinit)(const char *arg);
+ /*
+ * Initialize (means CONFIGURE) the display driver.
+ * params:
+ * width,height: image source size
+ * d_width,d_height: size of the requested window size, just a hint
+ * fullscreen: flag, 0=windowd 1=fullscreen, just a hint
+ * title: window title, if available
+ * format: fourcc of pixel format
+ * returns : zero on successful initialization, non-zero on error.
+ */
+ int (*config)(uint32_t width, uint32_t height, uint32_t d_width,
+ uint32_t d_height, uint32_t fullscreen, char *title,
+ uint32_t format);
+
+ /*
+ * Control interface
+ */
+ int (*control)(uint32_t request, void *data, ...);
+
+ /*
+ * Display a new RGB/BGR frame of the video to the screen.
+ * params:
+ * src[0] - pointer to the image
+ */
+ int (*draw_frame)(uint8_t *src[]);
+
+ /*
+ * Draw a planar YUV slice to the buffer:
+ * params:
+ * src[3] = source image planes (Y,U,V)
+ * stride[3] = source image planes line widths (in bytes)
+ * w,h = width*height of area to be copied (in Y pixels)
+ * x,y = position at the destination image (in Y pixels)
+ */
+ int (*draw_slice)(uint8_t *src[], int stride[], int w,int h, int x,int y);
+
+ /*
+ * Draws OSD to the screen buffer
+ */
+ void (*draw_osd)(void);
+
+ /*
+ * Blit/Flip buffer to the screen. Must be called after each frame!
+ */
+ void (*flip_page)(void);
+
+ /*
+ * This func is called after every frames to handle keyboard and
+ * other events. It's called in PAUSE mode too!
+ */
+ void (*check_events)(void);
+
+ /*
+ * Closes driver. Should restore the original state of the system.
+ */
+ void (*uninit)(void);
+} vo_functions_t;
+
+const vo_functions_t* init_best_video_out(char** vo_list);
+int config_video_out(const vo_functions_t *vo, uint32_t width, uint32_t height,
+ uint32_t d_width, uint32_t d_height, uint32_t flags,
+ char *title, uint32_t format);
+void list_video_out(void);
+
+// NULL terminated array of all drivers
+extern const vo_functions_t* const video_out_drivers[];
+
+extern int vo_flags;
+
+extern int vo_config_count;
+
+extern int xinerama_screen;
+extern int xinerama_x;
+extern int xinerama_y;
+
+// correct resolution/bpp on screen: (should be autodetected by vo_init())
+extern int vo_depthonscreen;
+extern int vo_screenwidth;
+extern int vo_screenheight;
+
+// requested resolution/bpp: (-x -y -bpp options)
+extern int vo_dx;
+extern int vo_dy;
+extern int vo_dwidth;
+extern int vo_dheight;
+extern int vo_dbpp;
+
+extern int vo_grabpointer;
+extern int vo_doublebuffering;
+extern int vo_directrendering;
+extern int vo_vsync;
+extern int vo_fsmode;
+extern float vo_panscan;
+extern float vo_border_pos_x;
+extern float vo_border_pos_y;
+extern int vo_rotate;
+extern int vo_adapter_num;
+extern int vo_refresh_rate;
+extern int vo_keepaspect;
+extern int vo_rootwin;
+extern int vo_ontop;
+extern int vo_border;
+
+extern int vo_gamma_gamma;
+extern int vo_gamma_brightness;
+extern int vo_gamma_saturation;
+extern int vo_gamma_contrast;
+extern int vo_gamma_hue;
+extern int vo_gamma_red_intensity;
+extern int vo_gamma_green_intensity;
+extern int vo_gamma_blue_intensity;
+
+extern int vo_nomouse_input;
+extern int enable_mouse_movements;
+
+extern int vo_pts;
+extern float vo_fps;
+
+extern char *vo_subdevice;
+
+extern int vo_colorkey;
+
+extern char *vo_winname;
+extern char *vo_wintitle;
+
+extern int64_t WinID;
+
+typedef struct {
+ float min;
+ float max;
+ } range_t;
+
+float range_max(range_t *r);
+int in_range(range_t *r, float f);
+range_t *str2range(char *s);
+extern char *monitor_hfreq_str;
+extern char *monitor_vfreq_str;
+extern char *monitor_dotclock_str;
+
+struct mp_keymap {
+ int from;
+ int to;
+};
+int lookup_keymap_table(const struct mp_keymap *map, int key);
+struct vo_rect {
+ int left, right, top, bottom, width, height;
+};
+void calc_src_dst_rects(int src_width, int src_height, struct vo_rect *src, struct vo_rect *dst,
+ struct vo_rect *borders, const struct vo_rect *crop);
+void vo_mouse_movement(int posx, int posy);
+
+static inline int apply_border_pos(int full, int part, float pos) {
+ if (pos >= 0.0 && pos <= 1.0) {
+ return pos*(full - part);
+ }
+ if (pos < 0)
+ return pos * part;
+ return full - part + (pos - 1) * part;
+}
+
+#endif /* MPLAYER_VIDEO_OUT_H */
diff --git a/libavfilter/libmpcodecs/mp_image.c b/libavfilter/libmpcodecs/mp_image.c
new file mode 100644
index 0000000..0e4d6d7
--- /dev/null
+++ b/libavfilter/libmpcodecs/mp_image.c
@@ -0,0 +1,257 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#include "img_format.h"
+#include "mp_image.h"
+
+#include "libvo/fastmemcpy.h"
+//#include "libavutil/mem.h"
+#include "libavutil/imgutils.h"
+
+void ff_mp_image_alloc_planes(mp_image_t *mpi) {
+ uint32_t temp[256];
+ if (avpriv_set_systematic_pal2(temp, ff_mp2ff_pix_fmt(mpi->imgfmt)) >= 0)
+ mpi->flags |= MP_IMGFLAG_RGB_PALETTE;
+
+ // IF09 - allocate space for 4. plane delta info - unused
+ if (mpi->imgfmt == IMGFMT_IF09) {
+ mpi->planes[0]=av_malloc(mpi->bpp*mpi->width*(mpi->height+2)/8+
+ mpi->chroma_width*mpi->chroma_height);
+ } else
+ mpi->planes[0]=av_malloc(mpi->bpp*mpi->width*(mpi->height+2)/8);
+ if (mpi->flags&MP_IMGFLAG_PLANAR) {
+ int bpp = IMGFMT_IS_YUVP16(mpi->imgfmt)? 2 : 1;
+ // YV12/I420/YVU9/IF09. feel free to add other planar formats here...
+ mpi->stride[0]=mpi->stride[3]=bpp*mpi->width;
+ if(mpi->num_planes > 2){
+ mpi->stride[1]=mpi->stride[2]=bpp*mpi->chroma_width;
+ if(mpi->flags&MP_IMGFLAG_SWAPPED){
+ // I420/IYUV (Y,U,V)
+ mpi->planes[1]=mpi->planes[0]+mpi->stride[0]*mpi->height;
+ mpi->planes[2]=mpi->planes[1]+mpi->stride[1]*mpi->chroma_height;
+ if (mpi->num_planes > 3)
+ mpi->planes[3]=mpi->planes[2]+mpi->stride[2]*mpi->chroma_height;
+ } else {
+ // YV12,YVU9,IF09 (Y,V,U)
+ mpi->planes[2]=mpi->planes[0]+mpi->stride[0]*mpi->height;
+ mpi->planes[1]=mpi->planes[2]+mpi->stride[1]*mpi->chroma_height;
+ if (mpi->num_planes > 3)
+ mpi->planes[3]=mpi->planes[1]+mpi->stride[1]*mpi->chroma_height;
+ }
+ } else {
+ // NV12/NV21
+ mpi->stride[1]=mpi->chroma_width;
+ mpi->planes[1]=mpi->planes[0]+mpi->stride[0]*mpi->height;
+ }
+ } else {
+ mpi->stride[0]=mpi->width*mpi->bpp/8;
+ if (mpi->flags & MP_IMGFLAG_RGB_PALETTE) {
+ mpi->planes[1] = av_malloc(1024);
+ memcpy(mpi->planes[1], temp, 1024);
+ }
+ }
+ mpi->flags|=MP_IMGFLAG_ALLOCATED;
+}
+
+mp_image_t* ff_alloc_mpi(int w, int h, unsigned long int fmt) {
+ mp_image_t* mpi = ff_new_mp_image(w,h);
+
+ ff_mp_image_setfmt(mpi,fmt);
+ ff_mp_image_alloc_planes(mpi);
+
+ return mpi;
+}
+
+void ff_copy_mpi(mp_image_t *dmpi, mp_image_t *mpi) {
+ if(mpi->flags&MP_IMGFLAG_PLANAR){
+ memcpy_pic(dmpi->planes[0],mpi->planes[0], mpi->w, mpi->h,
+ dmpi->stride[0],mpi->stride[0]);
+ memcpy_pic(dmpi->planes[1],mpi->planes[1], mpi->chroma_width, mpi->chroma_height,
+ dmpi->stride[1],mpi->stride[1]);
+ memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->chroma_width, mpi->chroma_height,
+ dmpi->stride[2],mpi->stride[2]);
+ } else {
+ memcpy_pic(dmpi->planes[0],mpi->planes[0],
+ mpi->w*(dmpi->bpp/8), mpi->h,
+ dmpi->stride[0],mpi->stride[0]);
+ }
+}
+
+void ff_mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt){
+ mpi->flags&=~(MP_IMGFLAG_PLANAR|MP_IMGFLAG_YUV|MP_IMGFLAG_SWAPPED);
+ mpi->imgfmt=out_fmt;
+ // compressed formats
+ if(out_fmt == IMGFMT_MPEGPES ||
+ out_fmt == IMGFMT_ZRMJPEGNI || out_fmt == IMGFMT_ZRMJPEGIT || out_fmt == IMGFMT_ZRMJPEGIB ||
+ IMGFMT_IS_HWACCEL(out_fmt)){
+ mpi->bpp=0;
+ return;
+ }
+ mpi->num_planes=1;
+ if (IMGFMT_IS_RGB(out_fmt)) {
+ if (IMGFMT_RGB_DEPTH(out_fmt) < 8 && !(out_fmt&128))
+ mpi->bpp = IMGFMT_RGB_DEPTH(out_fmt);
+ else
+ mpi->bpp=(IMGFMT_RGB_DEPTH(out_fmt)+7)&(~7);
+ return;
+ }
+ if (IMGFMT_IS_BGR(out_fmt)) {
+ if (IMGFMT_BGR_DEPTH(out_fmt) < 8 && !(out_fmt&128))
+ mpi->bpp = IMGFMT_BGR_DEPTH(out_fmt);
+ else
+ mpi->bpp=(IMGFMT_BGR_DEPTH(out_fmt)+7)&(~7);
+ mpi->flags|=MP_IMGFLAG_SWAPPED;
+ return;
+ }
+ if (IMGFMT_IS_XYZ(out_fmt)) {
+ mpi->bpp=3*((IMGFMT_XYZ_DEPTH(out_fmt) + 7) & ~7);
+ return;
+ }
+ mpi->num_planes=3;
+ if (out_fmt == IMGFMT_GBR24P) {
+ mpi->bpp=24;
+ mpi->flags|=MP_IMGFLAG_PLANAR;
+ return;
+ } else if (out_fmt == IMGFMT_GBR12P) {
+ mpi->bpp=36;
+ mpi->flags|=MP_IMGFLAG_PLANAR;
+ return;
+ } else if (out_fmt == IMGFMT_GBR14P) {
+ mpi->bpp=42;
+ mpi->flags|=MP_IMGFLAG_PLANAR;
+ return;
+ }
+ mpi->flags|=MP_IMGFLAG_YUV;
+ if (ff_mp_get_chroma_shift(out_fmt, NULL, NULL, NULL)) {
+ mpi->flags|=MP_IMGFLAG_PLANAR;
+ mpi->bpp = ff_mp_get_chroma_shift(out_fmt, &mpi->chroma_x_shift, &mpi->chroma_y_shift, NULL);
+ mpi->chroma_width = mpi->width >> mpi->chroma_x_shift;
+ mpi->chroma_height = mpi->height >> mpi->chroma_y_shift;
+ }
+ switch(out_fmt){
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ mpi->flags|=MP_IMGFLAG_SWAPPED;
+ case IMGFMT_YV12:
+ return;
+ case IMGFMT_420A:
+ case IMGFMT_422A:
+ case IMGFMT_444A:
+ case IMGFMT_IF09:
+ mpi->num_planes=4;
+ case IMGFMT_YVU9:
+ case IMGFMT_444P:
+ case IMGFMT_422P:
+ case IMGFMT_411P:
+ case IMGFMT_440P:
+ case IMGFMT_444P16_LE:
+ case IMGFMT_444P16_BE:
+ case IMGFMT_444P14_LE:
+ case IMGFMT_444P14_BE:
+ case IMGFMT_444P12_LE:
+ case IMGFMT_444P12_BE:
+ case IMGFMT_444P10_LE:
+ case IMGFMT_444P10_BE:
+ case IMGFMT_444P9_LE:
+ case IMGFMT_444P9_BE:
+ case IMGFMT_422P16_LE:
+ case IMGFMT_422P16_BE:
+ case IMGFMT_422P14_LE:
+ case IMGFMT_422P14_BE:
+ case IMGFMT_422P12_LE:
+ case IMGFMT_422P12_BE:
+ case IMGFMT_422P10_LE:
+ case IMGFMT_422P10_BE:
+ case IMGFMT_422P9_LE:
+ case IMGFMT_422P9_BE:
+ case IMGFMT_420P16_LE:
+ case IMGFMT_420P16_BE:
+ case IMGFMT_420P14_LE:
+ case IMGFMT_420P14_BE:
+ case IMGFMT_420P12_LE:
+ case IMGFMT_420P12_BE:
+ case IMGFMT_420P10_LE:
+ case IMGFMT_420P10_BE:
+ case IMGFMT_420P9_LE:
+ case IMGFMT_420P9_BE:
+ return;
+ case IMGFMT_Y16_LE:
+ case IMGFMT_Y16_BE:
+ mpi->bpp=16;
+ case IMGFMT_Y800:
+ case IMGFMT_Y8:
+ /* they're planar ones, but for easier handling use them as packed */
+ mpi->flags&=~MP_IMGFLAG_PLANAR;
+ mpi->num_planes=1;
+ return;
+ case IMGFMT_Y8A:
+ mpi->num_planes=2;
+ return;
+ case IMGFMT_UYVY:
+ mpi->flags|=MP_IMGFLAG_SWAPPED;
+ case IMGFMT_YUY2:
+ mpi->chroma_x_shift = 1;
+ mpi->bpp=16;
+ mpi->num_planes=1;
+ return;
+ case IMGFMT_NV12:
+ mpi->flags|=MP_IMGFLAG_SWAPPED;
+ case IMGFMT_NV21:
+ mpi->flags|=MP_IMGFLAG_PLANAR;
+ mpi->bpp=12;
+ mpi->num_planes=2;
+ mpi->chroma_width=(mpi->width>>0);
+ mpi->chroma_height=(mpi->height>>1);
+ mpi->chroma_x_shift=0;
+ mpi->chroma_y_shift=1;
+ return;
+ }
+ ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"mp_image: unknown out_fmt: 0x%X\n",out_fmt);
+ mpi->bpp=0;
+}
+
+mp_image_t* ff_new_mp_image(int w,int h){
+ mp_image_t* mpi = malloc(sizeof(mp_image_t));
+ if(!mpi) return NULL; // error!
+ memset(mpi,0,sizeof(mp_image_t));
+ mpi->width=mpi->w=w;
+ mpi->height=mpi->h=h;
+ return mpi;
+}
+
+void ff_free_mp_image(mp_image_t* mpi){
+ if(!mpi) return;
+ if(mpi->flags&MP_IMGFLAG_ALLOCATED){
+ /* because we allocate the whole image at once */
+ av_free(mpi->planes[0]);
+ if (mpi->flags & MP_IMGFLAG_RGB_PALETTE)
+ av_free(mpi->planes[1]);
+ }
+ free(mpi);
+}
+
diff --git a/libavfilter/libmpcodecs/mp_image.h b/libavfilter/libmpcodecs/mp_image.h
new file mode 100644
index 0000000..aedf451
--- /dev/null
+++ b/libavfilter/libmpcodecs/mp_image.h
@@ -0,0 +1,159 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_MP_IMAGE_H
+#define MPLAYER_MP_IMAGE_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#undef printf //FIXME
+#undef fprintf //FIXME
+#include "mp_msg.h"
+#include "libavutil/avutil.h"
+#include "libavutil/avassert.h"
+#undef realloc
+#undef malloc
+#undef free
+#undef rand
+#undef srand
+#undef printf
+#undef strncpy
+#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
+#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC).
+
+enum AVPixelFormat ff_mp2ff_pix_fmt(int mp);
+
+//--------- codec's requirements (filled by the codec/vf) ---------
+
+//--- buffer content restrictions:
+// set if buffer content shouldn't be modified:
+#define MP_IMGFLAG_PRESERVE 0x01
+// set if buffer content will be READ.
+// This can be e.g. for next frame's MC: (I/P mpeg frames) -
+// then in combination with MP_IMGFLAG_PRESERVE - or it
+// can be because a video filter or codec will read a significant
+// amount of data while processing that frame (e.g. blending something
+// onto the frame, MV based intra prediction).
+// A frame marked like this should not be placed in to uncachable
+// video RAM for example.
+#define MP_IMGFLAG_READABLE 0x02
+
+//--- buffer width/stride/plane restrictions: (used for direct rendering)
+// stride _have_to_ be aligned to MB boundary: [for DR restrictions]
+#define MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE 0x4
+// stride should be aligned to MB boundary: [for buffer allocation]
+#define MP_IMGFLAG_PREFER_ALIGNED_STRIDE 0x8
+// codec accept any stride (>=width):
+#define MP_IMGFLAG_ACCEPT_STRIDE 0x10
+// codec accept any width (width*bpp=stride -> stride%bpp==0) (>=width):
+#define MP_IMGFLAG_ACCEPT_WIDTH 0x20
+//--- for planar formats only:
+// uses only stride[0], and stride[1]=stride[2]=stride[0]>>mpi->chroma_x_shift
+#define MP_IMGFLAG_COMMON_STRIDE 0x40
+// uses only planes[0], and calculates planes[1,2] from width,height,imgfmt
+#define MP_IMGFLAG_COMMON_PLANE 0x80
+
+#define MP_IMGFLAGMASK_RESTRICTIONS 0xFF
+
+//--------- color info (filled by ff_mp_image_setfmt() ) -----------
+// set if number of planes > 1
+#define MP_IMGFLAG_PLANAR 0x100
+// set if it's YUV colorspace
+#define MP_IMGFLAG_YUV 0x200
+// set if it's swapped (BGR or YVU) plane/byteorder
+#define MP_IMGFLAG_SWAPPED 0x400
+// set if you want memory for palette allocated and managed by ff_vf_get_image etc.
+#define MP_IMGFLAG_RGB_PALETTE 0x800
+
+#define MP_IMGFLAGMASK_COLORS 0xF00
+
+// codec uses drawing/rendering callbacks (draw_slice()-like thing, DR method 2)
+// [the codec will set this flag if it supports callbacks, and the vo _may_
+// clear it in get_image() if draw_slice() not implemented]
+#define MP_IMGFLAG_DRAW_CALLBACK 0x1000
+// set if it's in video buffer/memory: [set by vo/vf's get_image() !!!]
+#define MP_IMGFLAG_DIRECT 0x2000
+// set if buffer is allocated (used in destination images):
+#define MP_IMGFLAG_ALLOCATED 0x4000
+
+// buffer type was printed (do NOT set this flag - it's for INTERNAL USE!!!)
+#define MP_IMGFLAG_TYPE_DISPLAYED 0x8000
+
+// codec doesn't support any form of direct rendering - it has own buffer
+// allocation. so we just export its buffer pointers:
+#define MP_IMGTYPE_EXPORT 0
+// codec requires a static WO buffer, but it does only partial updates later:
+#define MP_IMGTYPE_STATIC 1
+// codec just needs some WO memory, where it writes/copies the whole frame to:
+#define MP_IMGTYPE_TEMP 2
+// I+P type, requires 2+ independent static R/W buffers
+#define MP_IMGTYPE_IP 3
+// I+P+B type, requires 2+ independent static R/W and 1+ temp WO buffers
+#define MP_IMGTYPE_IPB 4
+// Upper 16 bits give desired buffer number, -1 means get next available
+#define MP_IMGTYPE_NUMBERED 5
+// Doesn't need any buffer, incomplete image (probably a first field only)
+// we need this type to be able to differentiate between half frames and
+// all other cases
+#define MP_IMGTYPE_INCOMPLETE 6
+
+#define MP_MAX_PLANES 4
+
+#define MP_IMGFIELD_ORDERED 0x01
+#define MP_IMGFIELD_TOP_FIRST 0x02
+#define MP_IMGFIELD_REPEAT_FIRST 0x04
+#define MP_IMGFIELD_TOP 0x08
+#define MP_IMGFIELD_BOTTOM 0x10
+#define MP_IMGFIELD_INTERLACED 0x20
+
+typedef struct mp_image {
+ unsigned int flags;
+ unsigned char type;
+ int number;
+ unsigned char bpp; // bits/pixel. NOT depth! for RGB it will be n*8
+ unsigned int imgfmt;
+ int width,height; // stored dimensions
+ int x,y,w,h; // visible dimensions
+ unsigned char* planes[MP_MAX_PLANES];
+ int stride[MP_MAX_PLANES];
+ char * qscale;
+ int qstride;
+ int pict_type; // 0->unknown, 1->I, 2->P, 3->B
+ int fields;
+ int qscale_type; // 0->mpeg1/4/h263, 1->mpeg2
+ int num_planes;
+ /* these are only used by planar formats Y,U(Cb),V(Cr) */
+ int chroma_width;
+ int chroma_height;
+ int chroma_x_shift; // horizontal
+ int chroma_y_shift; // vertical
+ int usage_count;
+ /* for private use by filter or vo driver (to store buffer id or dmpi) */
+ void* priv;
+} mp_image_t;
+
+void ff_mp_image_setfmt(mp_image_t* mpi,unsigned int out_fmt);
+mp_image_t* ff_new_mp_image(int w,int h);
+void ff_free_mp_image(mp_image_t* mpi);
+
+mp_image_t* ff_alloc_mpi(int w, int h, unsigned long int fmt);
+void ff_mp_image_alloc_planes(mp_image_t *mpi);
+void ff_copy_mpi(mp_image_t *dmpi, mp_image_t *mpi);
+
+#endif /* MPLAYER_MP_IMAGE_H */
diff --git a/libavfilter/libmpcodecs/mp_msg.h b/libavfilter/libmpcodecs/mp_msg.h
new file mode 100644
index 0000000..51cdff3
--- /dev/null
+++ b/libavfilter/libmpcodecs/mp_msg.h
@@ -0,0 +1,166 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_MP_MSG_H
+#define MPLAYER_MP_MSG_H
+
+#include <stdarg.h>
+
+// defined in mplayer.c and mencoder.c
+extern int verbose;
+
+// verbosity elevel:
+
+/* Only messages level MSGL_FATAL-MSGL_STATUS should be translated,
+ * messages level MSGL_V and above should not be translated. */
+
+#define MSGL_FATAL 0 // will exit/abort
+#define MSGL_ERR 1 // continues
+#define MSGL_WARN 2 // only warning
+#define MSGL_HINT 3 // short help message
+#define MSGL_INFO 4 // -quiet
+#define MSGL_STATUS 5 // v=0
+#define MSGL_V 6 // v=1
+#define MSGL_DBG2 7 // v=2
+#define MSGL_DBG3 8 // v=3
+#define MSGL_DBG4 9 // v=4
+#define MSGL_DBG5 10 // v=5
+
+#define MSGL_FIXME 1 // for conversions from printf where the appropriate MSGL is not known; set equal to ERR for obtrusiveness
+#define MSGT_FIXME 0 // for conversions from printf where the appropriate MSGT is not known; set equal to GLOBAL for obtrusiveness
+
+// code/module:
+
+#define MSGT_GLOBAL 0 // common player stuff errors
+#define MSGT_CPLAYER 1 // console player (mplayer.c)
+#define MSGT_GPLAYER 2 // gui player
+
+#define MSGT_VO 3 // libvo
+#define MSGT_AO 4 // libao
+
+#define MSGT_DEMUXER 5 // demuxer.c (general stuff)
+#define MSGT_DS 6 // demux stream (add/read packet etc)
+#define MSGT_DEMUX 7 // fileformat-specific stuff (demux_*.c)
+#define MSGT_HEADER 8 // fileformat-specific header (*header.c)
+
+#define MSGT_AVSYNC 9 // mplayer.c timer stuff
+#define MSGT_AUTOQ 10 // mplayer.c auto-quality stuff
+
+#define MSGT_CFGPARSER 11 // cfgparser.c
+
+#define MSGT_DECAUDIO 12 // av decoder
+#define MSGT_DECVIDEO 13
+
+#define MSGT_SEEK 14 // seeking code
+#define MSGT_WIN32 15 // win32 dll stuff
+#define MSGT_OPEN 16 // open.c (stream opening)
+#define MSGT_DVD 17 // open.c (DVD init/read/seek)
+
+#define MSGT_PARSEES 18 // parse_es.c (mpeg stream parser)
+#define MSGT_LIRC 19 // lirc_mp.c and input lirc driver
+
+#define MSGT_STREAM 20 // stream.c
+#define MSGT_CACHE 21 // cache2.c
+
+#define MSGT_MENCODER 22
+
+#define MSGT_XACODEC 23 // XAnim codecs
+
+#define MSGT_TV 24 // TV input subsystem
+
+#define MSGT_OSDEP 25 // OS-dependent parts
+
+#define MSGT_SPUDEC 26 // spudec.c
+
+#define MSGT_PLAYTREE 27 // Playtree handeling (playtree.c, playtreeparser.c)
+
+#define MSGT_INPUT 28
+
+#define MSGT_VFILTER 29
+
+#define MSGT_OSD 30
+
+#define MSGT_NETWORK 31
+
+#define MSGT_CPUDETECT 32
+
+#define MSGT_CODECCFG 33
+
+#define MSGT_SWS 34
+
+#define MSGT_VOBSUB 35
+#define MSGT_SUBREADER 36
+
+#define MSGT_AFILTER 37 // Audio filter messages
+
+#define MSGT_NETST 38 // Netstream
+
+#define MSGT_MUXER 39 // muxer layer
+
+#define MSGT_OSD_MENU 40
+
+#define MSGT_IDENTIFY 41 // -identify output
+
+#define MSGT_RADIO 42
+
+#define MSGT_ASS 43 // libass messages
+
+#define MSGT_LOADER 44 // dll loader messages
+
+#define MSGT_STATUSLINE 45 // playback/encoding status line
+
+#define MSGT_TELETEXT 46 // Teletext decoder
+
+#define MSGT_MAX 64
+
+
+extern char *ff_mp_msg_charset;
+extern int ff_mp_msg_color;
+extern int ff_mp_msg_module;
+
+extern int ff_mp_msg_levels[MSGT_MAX];
+extern int ff_mp_msg_level_all;
+
+
+void ff_mp_msg_init(void);
+int ff_mp_msg_test(int mod, int lev);
+
+#include "config.h"
+
+void ff_mp_msg_va(int mod, int lev, const char *format, va_list va);
+#ifdef __GNUC__
+void ff_mp_msg(int mod, int lev, const char *format, ... ) __attribute__ ((format (printf, 3, 4)));
+# ifdef MP_DEBUG
+# define mp_dbg(mod,lev, args... ) ff_mp_msg(mod, lev, ## args )
+# else
+ // only useful for developers, disable but check syntax
+# define mp_dbg(mod,lev, args... ) do { if (0) ff_mp_msg(mod, lev, ## args ); } while (0)
+# endif
+#else // not GNU C
+void ff_mp_msg(int mod, int lev, const char *format, ... );
+# ifdef MP_DEBUG
+# define mp_dbg(mod,lev, ... ) ff_mp_msg(mod, lev, __VA_ARGS__)
+# else
+ // only useful for developers, disable but check syntax
+# define mp_dbg(mod,lev, ... ) do { if (0) ff_mp_msg(mod, lev, __VA_ARGS__); } while (0)
+# endif
+#endif /* __GNUC__ */
+
+const char* ff_filename_recode(const char* filename);
+
+#endif /* MPLAYER_MP_MSG_H */
diff --git a/libavfilter/libmpcodecs/mpc_info.h b/libavfilter/libmpcodecs/mpc_info.h
new file mode 100644
index 0000000..8554699
--- /dev/null
+++ b/libavfilter/libmpcodecs/mpc_info.h
@@ -0,0 +1,43 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_MPC_INFO_H
+#define MPLAYER_MPC_INFO_H
+
+typedef struct mp_codec_info_s
+{
+ /* codec long name ("Autodesk FLI/FLC Animation decoder" */
+ const char *name;
+ /* short name (same as driver name in codecs.conf) ("dshow") */
+ const char *short_name;
+ /* interface author/maintainer */
+ const char *maintainer;
+ /* codec author ("Aaron Holtzman <aholtzma@ess.engr.uvic.ca>") */
+ const char *author;
+ /* any additional comments */
+ const char *comment;
+} mp_codec_info_t;
+
+#define CONTROL_OK 1
+#define CONTROL_TRUE 1
+#define CONTROL_FALSE 0
+#define CONTROL_UNKNOWN -1
+#define CONTROL_ERROR -2
+#define CONTROL_NA -3
+
+#endif /* MPLAYER_MPC_INFO_H */
diff --git a/libavfilter/libmpcodecs/vf.h b/libavfilter/libmpcodecs/vf.h
new file mode 100644
index 0000000..d8fc66b
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf.h
@@ -0,0 +1,169 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_VF_H
+#define MPLAYER_VF_H
+
+//#include "m_option.h"
+#include "mp_image.h"
+
+//extern m_obj_settings_t* vf_settings;
+//extern const m_obj_list_t vf_obj_list;
+
+struct vf_instance;
+struct vf_priv_s;
+
+typedef struct vf_info_s {
+ const char *info;
+ const char *name;
+ const char *author;
+ const char *comment;
+ int (*vf_open)(struct vf_instance *vf,char* args);
+ // Ptr to a struct dscribing the options
+ const void* opts;
+} vf_info_t;
+
+#define NUM_NUMBERED_MPI 50
+
+typedef struct vf_image_context_s {
+ mp_image_t* static_images[2];
+ mp_image_t* temp_images[1];
+ mp_image_t* export_images[1];
+ mp_image_t* numbered_images[NUM_NUMBERED_MPI];
+ int static_idx;
+} vf_image_context_t;
+
+typedef struct vf_format_context_t {
+ int have_configured;
+ int orig_width, orig_height, orig_fmt;
+} vf_format_context_t;
+
+typedef struct vf_instance {
+ const vf_info_t* info;
+ // funcs:
+ int (*config)(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt);
+ int (*control)(struct vf_instance *vf,
+ int request, void* data);
+ int (*query_format)(struct vf_instance *vf,
+ unsigned int fmt);
+ void (*get_image)(struct vf_instance *vf,
+ mp_image_t *mpi);
+ int (*put_image)(struct vf_instance *vf,
+ mp_image_t *mpi, double pts);
+ void (*start_slice)(struct vf_instance *vf,
+ mp_image_t *mpi);
+ void (*draw_slice)(struct vf_instance *vf,
+ unsigned char** src, int* stride, int w,int h, int x, int y);
+ void (*uninit)(struct vf_instance *vf);
+
+ int (*continue_buffered_image)(struct vf_instance *vf);
+ // caps:
+ unsigned int default_caps; // used by default query_format()
+ unsigned int default_reqs; // used by default config()
+ // data:
+ int w, h;
+ vf_image_context_t imgctx;
+ vf_format_context_t fmt;
+ struct vf_instance *next;
+ mp_image_t *dmpi;
+ struct vf_priv_s* priv;
+} vf_instance_t;
+
+// control codes:
+#include "mpc_info.h"
+
+typedef struct vf_seteq_s
+{
+ const char *item;
+ int value;
+} vf_equalizer_t;
+
+#define VFCTRL_QUERY_MAX_PP_LEVEL 4 /* test for postprocessing support (max level) */
+#define VFCTRL_SET_PP_LEVEL 5 /* set postprocessing level */
+#define VFCTRL_SET_EQUALIZER 6 /* set color options (brightness,contrast etc) */
+#define VFCTRL_GET_EQUALIZER 8 /* gset color options (brightness,contrast etc) */
+#define VFCTRL_DRAW_OSD 7
+#define VFCTRL_CHANGE_RECTANGLE 9 /* Change the rectangle boundaries */
+#define VFCTRL_FLIP_PAGE 10 /* Tell the vo to flip pages */
+#define VFCTRL_DUPLICATE_FRAME 11 /* For encoding - encode zero-change frame */
+#define VFCTRL_SKIP_NEXT_FRAME 12 /* For encoding - drop the next frame that passes through */
+#define VFCTRL_FLUSH_FRAMES 13 /* For encoding - flush delayed frames */
+#define VFCTRL_SCREENSHOT 14 /* Make a screenshot */
+#define VFCTRL_INIT_EOSD 15 /* Select EOSD renderer */
+#define VFCTRL_DRAW_EOSD 16 /* Render EOSD */
+#define VFCTRL_GET_PTS 17 /* Return last pts value that reached vf_vo*/
+#define VFCTRL_SET_DEINTERLACE 18 /* Set deinterlacing status */
+#define VFCTRL_GET_DEINTERLACE 19 /* Get deinterlacing status */
+
+#include "vfcap.h"
+
+//FIXME this should be in a common header, but i dunno which
+#define MP_NOPTS_VALUE (-1LL<<63) //both int64_t and double should be able to represent this exactly
+
+
+// functions:
+void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h);
+mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h);
+
+vf_instance_t* vf_open_plugin(const vf_info_t* const* filter_list, vf_instance_t* next, const char *name, char **args);
+vf_instance_t* vf_open_filter(vf_instance_t* next, const char *name, char **args);
+vf_instance_t* ff_vf_add_before_vo(vf_instance_t **vf, char *name, char **args);
+vf_instance_t* vf_open_encoder(vf_instance_t* next, const char *name, char *args);
+
+unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred);
+void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src);
+void ff_vf_queue_frame(vf_instance_t *vf, int (*)(vf_instance_t *));
+int ff_vf_output_queued_frame(vf_instance_t *vf);
+
+// default wrappers:
+int ff_vf_next_config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt);
+int ff_vf_next_control(struct vf_instance *vf, int request, void* data);
+void ff_vf_extra_flip(struct vf_instance *vf);
+int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt);
+int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts);
+void ff_vf_next_draw_slice (struct vf_instance *vf, unsigned char** src, int* stride, int w,int h, int x, int y);
+
+vf_instance_t* ff_append_filters(vf_instance_t* last);
+
+void ff_vf_uninit_filter(vf_instance_t* vf);
+void ff_vf_uninit_filter_chain(vf_instance_t* vf);
+
+int ff_vf_config_wrapper(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt);
+
+static inline int norm_qscale(int qscale, int type)
+{
+ switch (type) {
+ case 0: // MPEG-1
+ return qscale;
+ case 1: // MPEG-2
+ return qscale >> 1;
+ case 2: // H264
+ return qscale >> 2;
+ case 3: // VP56
+ return (63 - qscale + 2) >> 2;
+ }
+ return qscale;
+}
+
+#endif /* MPLAYER_VF_H */
diff --git a/libavfilter/libmpcodecs/vf_eq.c b/libavfilter/libmpcodecs/vf_eq.c
new file mode 100644
index 0000000..f8efa84
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_eq.c
@@ -0,0 +1,240 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "config.h"
+#include "mp_msg.h"
+#include "cpudetect.h"
+
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+
+#include "libvo/video_out.h"
+
+struct vf_priv_s {
+ unsigned char *buf;
+ int brightness;
+ int contrast;
+};
+
+#if HAVE_MMX && HAVE_6REGS
+static void process_MMX(unsigned char *dest, int dstride, unsigned char *src, int sstride,
+ int w, int h, int brightness, int contrast)
+{
+ int i;
+ int pel;
+ int dstep = dstride-w;
+ int sstep = sstride-w;
+ short brvec[4];
+ short contvec[4];
+
+ contrast = ((contrast+100)*256*16)/100;
+ brightness = ((brightness+100)*511)/200-128 - contrast/32;
+
+ brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness;
+ contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast;
+
+ while (h--) {
+ __asm__ volatile (
+ "movq (%5), %%mm3 \n\t"
+ "movq (%6), %%mm4 \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+ "movl %4, %%eax\n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0), %%mm1 \n\t"
+ "movq (%0), %%mm2 \n\t"
+ "punpcklbw %%mm0, %%mm1 \n\t"
+ "punpckhbw %%mm0, %%mm2 \n\t"
+ "psllw $4, %%mm1 \n\t"
+ "psllw $4, %%mm2 \n\t"
+ "pmulhw %%mm4, %%mm1 \n\t"
+ "pmulhw %%mm4, %%mm2 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "packuswb %%mm2, %%mm1 \n\t"
+ "add $8, %0 \n\t"
+ "movq %%mm1, (%1) \n\t"
+ "add $8, %1 \n\t"
+ "decl %%eax \n\t"
+ "jnz 1b \n\t"
+ : "=r" (src), "=r" (dest)
+ : "0" (src), "1" (dest), "r" (w>>3), "r" (brvec), "r" (contvec)
+ : "%eax"
+ );
+
+ for (i = w&7; i; i--)
+ {
+ pel = ((*src++* contrast)>>12) + brightness;
+ if(pel&768) pel = (-pel)>>31;
+ *dest++ = pel;
+ }
+
+ src += sstep;
+ dest += dstep;
+ }
+ __asm__ volatile ( "emms \n\t" ::: "memory" );
+}
+#endif
+
+static void process_C(unsigned char *dest, int dstride, unsigned char *src, int sstride,
+ int w, int h, int brightness, int contrast)
+{
+ int i;
+ int pel;
+ int dstep = dstride-w;
+ int sstep = sstride-w;
+
+ contrast = ((contrast+100)*256*256)/100;
+ brightness = ((brightness+100)*511)/200-128 - contrast/512;
+
+ while (h--) {
+ for (i = w; i; i--)
+ {
+ pel = ((*src++* contrast)>>16) + brightness;
+ if(pel&768) pel = (-pel)>>31;
+ *dest++ = pel;
+ }
+ src += sstep;
+ dest += dstep;
+ }
+}
+
+static void (*process)(unsigned char *dest, int dstride, unsigned char *src, int sstride,
+ int w, int h, int brightness, int contrast);
+
+/* FIXME: add packed yuv version of process */
+
+static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
+{
+ mp_image_t *dmpi;
+
+ dmpi=ff_vf_get_image(vf->next, mpi->imgfmt,
+ MP_IMGTYPE_EXPORT, 0,
+ mpi->w, mpi->h);
+
+ dmpi->stride[0] = mpi->stride[0];
+ dmpi->planes[1] = mpi->planes[1];
+ dmpi->planes[2] = mpi->planes[2];
+ dmpi->stride[1] = mpi->stride[1];
+ dmpi->stride[2] = mpi->stride[2];
+
+ if (!vf->priv->buf) vf->priv->buf = malloc(mpi->stride[0]*mpi->h);
+
+ if ((vf->priv->brightness == 0) && (vf->priv->contrast == 0))
+ dmpi->planes[0] = mpi->planes[0];
+ else {
+ dmpi->planes[0] = vf->priv->buf;
+ process(dmpi->planes[0], dmpi->stride[0],
+ mpi->planes[0], mpi->stride[0],
+ mpi->w, mpi->h, vf->priv->brightness,
+ vf->priv->contrast);
+ }
+
+ return ff_vf_next_put_image(vf,dmpi, pts);
+}
+
+static int control(struct vf_instance *vf, int request, void* data)
+{
+ vf_equalizer_t *eq;
+
+ switch (request) {
+ case VFCTRL_SET_EQUALIZER:
+ eq = data;
+ if (!strcmp(eq->item,"brightness")) {
+ vf->priv->brightness = eq->value;
+ return CONTROL_TRUE;
+ }
+ else if (!strcmp(eq->item,"contrast")) {
+ vf->priv->contrast = eq->value;
+ return CONTROL_TRUE;
+ }
+ break;
+ case VFCTRL_GET_EQUALIZER:
+ eq = data;
+ if (!strcmp(eq->item,"brightness")) {
+ eq->value = vf->priv->brightness;
+ return CONTROL_TRUE;
+ }
+ else if (!strcmp(eq->item,"contrast")) {
+ eq->value = vf->priv->contrast;
+ return CONTROL_TRUE;
+ }
+ break;
+ }
+ return ff_vf_next_control(vf, request, data);
+}
+
+static int query_format(struct vf_instance *vf, unsigned int fmt)
+{
+ switch (fmt) {
+ case IMGFMT_YVU9:
+ case IMGFMT_IF09:
+ case IMGFMT_YV12:
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ case IMGFMT_CLPL:
+ case IMGFMT_Y800:
+ case IMGFMT_Y8:
+ case IMGFMT_NV12:
+ case IMGFMT_NV21:
+ case IMGFMT_444P:
+ case IMGFMT_422P:
+ case IMGFMT_411P:
+ return ff_vf_next_query_format(vf, fmt);
+ }
+ return 0;
+}
+
+static void uninit(struct vf_instance *vf)
+{
+ free(vf->priv->buf);
+ free(vf->priv);
+}
+
+static int vf_open(vf_instance_t *vf, char *args)
+{
+ vf->control=control;
+ vf->query_format=query_format;
+ vf->put_image=put_image;
+ vf->uninit=uninit;
+
+ vf->priv = malloc(sizeof(struct vf_priv_s));
+ memset(vf->priv, 0, sizeof(struct vf_priv_s));
+ if (args) sscanf(args, "%d:%d", &vf->priv->brightness, &vf->priv->contrast);
+
+ process = process_C;
+#if HAVE_MMX && HAVE_6REGS
+ if(ff_gCpuCaps.hasMMX) process = process_MMX;
+#endif
+
+ return 1;
+}
+
+const vf_info_t ff_vf_info_eq = {
+ "soft video equalizer",
+ "eq",
+ "Richard Felker",
+ "",
+ vf_open,
+};
diff --git a/libavfilter/libmpcodecs/vf_eq2.c b/libavfilter/libmpcodecs/vf_eq2.c
new file mode 100644
index 0000000..0356813
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_eq2.c
@@ -0,0 +1,519 @@
+/*
+ * Software equalizer (brightness, contrast, gamma, saturation)
+ *
+ * Hampa Hug <hampa@hampa.ch> (original LUT gamma/contrast/brightness filter)
+ * Daniel Moreno <comac@comac.darktech.org> (saturation, R/G/B gamma support)
+ * Richard Felker (original MMX contrast/brightness code (vf_eq.c))
+ * Michael Niedermayer <michalni@gmx.at> (LUT16)
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <inttypes.h>
+
+#include "config.h"
+#include "mp_msg.h"
+#include "cpudetect.h"
+
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+
+#define LUT16
+
+/* Per channel parameters */
+typedef struct eq2_param_t {
+ unsigned char lut[256];
+#ifdef LUT16
+ uint16_t lut16[256*256];
+#endif
+ int lut_clean;
+
+ void (*adjust) (struct eq2_param_t *par, unsigned char *dst, unsigned char *src,
+ unsigned w, unsigned h, unsigned dstride, unsigned sstride);
+
+ double c;
+ double b;
+ double g;
+ double w;
+} eq2_param_t;
+
+typedef struct vf_priv_s {
+ eq2_param_t param[3];
+
+ double contrast;
+ double brightness;
+ double saturation;
+
+ double gamma;
+ double gamma_weight;
+ double rgamma;
+ double ggamma;
+ double bgamma;
+
+ unsigned buf_w[3];
+ unsigned buf_h[3];
+ unsigned char *buf[3];
+} vf_eq2_t;
+
+
+static
+void create_lut (eq2_param_t *par)
+{
+ unsigned i;
+ double g, v;
+ double lw, gw;
+
+ g = par->g;
+ gw = par->w;
+ lw = 1.0 - gw;
+
+ if ((g < 0.001) || (g > 1000.0)) {
+ g = 1.0;
+ }
+
+ g = 1.0 / g;
+
+ for (i = 0; i < 256; i++) {
+ v = (double) i / 255.0;
+ v = par->c * (v - 0.5) + 0.5 + par->b;
+
+ if (v <= 0.0) {
+ par->lut[i] = 0;
+ }
+ else {
+ v = v*lw + pow(v, g)*gw;
+
+ if (v >= 1.0) {
+ par->lut[i] = 255;
+ }
+ else {
+ par->lut[i] = (unsigned char) (256.0 * v);
+ }
+ }
+ }
+
+#ifdef LUT16
+ for(i=0; i<256*256; i++){
+ par->lut16[i]= par->lut[i&0xFF] + (par->lut[i>>8]<<8);
+ }
+#endif
+
+ par->lut_clean = 1;
+}
+
+#if HAVE_MMX && HAVE_6REGS
+static
+void affine_1d_MMX (eq2_param_t *par, unsigned char *dst, unsigned char *src,
+ unsigned w, unsigned h, unsigned dstride, unsigned sstride)
+{
+ unsigned i;
+ int contrast, brightness;
+ unsigned dstep, sstep;
+ int pel;
+ short brvec[4];
+ short contvec[4];
+
+// printf("\nmmx: src=%p dst=%p w=%d h=%d ds=%d ss=%d\n",src,dst,w,h,dstride,sstride);
+
+ contrast = (int) (par->c * 256 * 16);
+ brightness = ((int) (100.0 * par->b + 100.0) * 511) / 200 - 128 - contrast / 32;
+
+ brvec[0] = brvec[1] = brvec[2] = brvec[3] = brightness;
+ contvec[0] = contvec[1] = contvec[2] = contvec[3] = contrast;
+
+ sstep = sstride - w;
+ dstep = dstride - w;
+
+ while (h-- > 0) {
+ __asm__ volatile (
+ "movq (%5), %%mm3 \n\t"
+ "movq (%6), %%mm4 \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+ "movl %4, %%eax\n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0), %%mm1 \n\t"
+ "movq (%0), %%mm2 \n\t"
+ "punpcklbw %%mm0, %%mm1 \n\t"
+ "punpckhbw %%mm0, %%mm2 \n\t"
+ "psllw $4, %%mm1 \n\t"
+ "psllw $4, %%mm2 \n\t"
+ "pmulhw %%mm4, %%mm1 \n\t"
+ "pmulhw %%mm4, %%mm2 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "packuswb %%mm2, %%mm1 \n\t"
+ "add $8, %0 \n\t"
+ "movq %%mm1, (%1) \n\t"
+ "add $8, %1 \n\t"
+ "decl %%eax \n\t"
+ "jnz 1b \n\t"
+ : "=r" (src), "=r" (dst)
+ : "0" (src), "1" (dst), "r" (w >> 3), "r" (brvec), "r" (contvec)
+ : "%eax"
+ );
+
+ for (i = w & 7; i > 0; i--) {
+ pel = ((*src++ * contrast) >> 12) + brightness;
+ if (pel & 768) {
+ pel = (-pel) >> 31;
+ }
+ *dst++ = pel;
+ }
+
+ src += sstep;
+ dst += dstep;
+ }
+
+ __asm__ volatile ( "emms \n\t" ::: "memory" );
+}
+#endif
+
+static
+void apply_lut (eq2_param_t *par, unsigned char *dst, unsigned char *src,
+ unsigned w, unsigned h, unsigned dstride, unsigned sstride)
+{
+ unsigned i, j, w2;
+ unsigned char *lut;
+ uint16_t *lut16;
+
+ if (!par->lut_clean) {
+ create_lut (par);
+ }
+
+ lut = par->lut;
+#ifdef LUT16
+ lut16 = par->lut16;
+ w2= (w>>3)<<2;
+ for (j = 0; j < h; j++) {
+ uint16_t *src16= (uint16_t*)src;
+ uint16_t *dst16= (uint16_t*)dst;
+ for (i = 0; i < w2; i+=4) {
+ dst16[i+0] = lut16[src16[i+0]];
+ dst16[i+1] = lut16[src16[i+1]];
+ dst16[i+2] = lut16[src16[i+2]];
+ dst16[i+3] = lut16[src16[i+3]];
+ }
+ i <<= 1;
+#else
+ w2= (w>>3)<<3;
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < w2; i+=8) {
+ dst[i+0] = lut[src[i+0]];
+ dst[i+1] = lut[src[i+1]];
+ dst[i+2] = lut[src[i+2]];
+ dst[i+3] = lut[src[i+3]];
+ dst[i+4] = lut[src[i+4]];
+ dst[i+5] = lut[src[i+5]];
+ dst[i+6] = lut[src[i+6]];
+ dst[i+7] = lut[src[i+7]];
+ }
+#endif
+ for (; i < w; i++) {
+ dst[i] = lut[src[i]];
+ }
+
+ src += sstride;
+ dst += dstride;
+ }
+}
+
+static
+int put_image (vf_instance_t *vf, mp_image_t *src, double pts)
+{
+ unsigned i;
+ vf_eq2_t *eq2;
+ mp_image_t *dst;
+ unsigned long img_n,img_c;
+
+ eq2 = vf->priv;
+
+ if ((eq2->buf_w[0] != src->w) || (eq2->buf_h[0] != src->h)) {
+ eq2->buf_w[0] = src->w;
+ eq2->buf_h[0] = src->h;
+ eq2->buf_w[1] = eq2->buf_w[2] = src->w >> src->chroma_x_shift;
+ eq2->buf_h[1] = eq2->buf_h[2] = src->h >> src->chroma_y_shift;
+ img_n = eq2->buf_w[0]*eq2->buf_h[0];
+ if(src->num_planes>1){
+ img_c = eq2->buf_w[1]*eq2->buf_h[1];
+ eq2->buf[0] = realloc (eq2->buf[0], img_n + 2*img_c);
+ eq2->buf[1] = eq2->buf[0] + img_n;
+ eq2->buf[2] = eq2->buf[1] + img_c;
+ } else
+ eq2->buf[0] = realloc (eq2->buf[0], img_n);
+ }
+
+ dst = ff_vf_get_image (vf->next, src->imgfmt, MP_IMGTYPE_EXPORT, 0, src->w, src->h);
+
+ for (i = 0; i < ((src->num_planes>1)?3:1); i++) {
+ if (eq2->param[i].adjust) {
+ dst->planes[i] = eq2->buf[i];
+ dst->stride[i] = eq2->buf_w[i];
+
+ eq2->param[i].adjust (&eq2->param[i], dst->planes[i], src->planes[i],
+ eq2->buf_w[i], eq2->buf_h[i], dst->stride[i], src->stride[i]);
+ }
+ else {
+ dst->planes[i] = src->planes[i];
+ dst->stride[i] = src->stride[i];
+ }
+ }
+
+ return ff_vf_next_put_image (vf, dst, pts);
+}
+
+static
+void check_values (eq2_param_t *par)
+{
+ /* yuck! floating point comparisons... */
+
+ if ((par->c == 1.0) && (par->b == 0.0) && (par->g == 1.0)) {
+ par->adjust = NULL;
+ }
+#if HAVE_MMX && HAVE_6REGS
+ else if (par->g == 1.0 && ff_gCpuCaps.hasMMX) {
+ par->adjust = &affine_1d_MMX;
+ }
+#endif
+ else {
+ par->adjust = &apply_lut;
+ }
+}
+
+static
+void print_values (vf_eq2_t *eq2)
+{
+ ff_mp_msg (MSGT_VFILTER, MSGL_V, "vf_eq2: c=%.2f b=%.2f g=%.4f s=%.2f \n",
+ eq2->contrast, eq2->brightness, eq2->gamma, eq2->saturation
+ );
+}
+
+static
+void set_contrast (vf_eq2_t *eq2, double c)
+{
+ eq2->contrast = c;
+ eq2->param[0].c = c;
+ eq2->param[0].lut_clean = 0;
+ check_values (&eq2->param[0]);
+ print_values (eq2);
+}
+
+static
+void set_brightness (vf_eq2_t *eq2, double b)
+{
+ eq2->brightness = b;
+ eq2->param[0].b = b;
+ eq2->param[0].lut_clean = 0;
+ check_values (&eq2->param[0]);
+ print_values (eq2);
+}
+
+static
+void set_gamma (vf_eq2_t *eq2, double g)
+{
+ eq2->gamma = g;
+
+ eq2->param[0].g = eq2->gamma * eq2->ggamma;
+ eq2->param[1].g = sqrt (eq2->bgamma / eq2->ggamma);
+ eq2->param[2].g = sqrt (eq2->rgamma / eq2->ggamma);
+ eq2->param[0].w = eq2->param[1].w = eq2->param[2].w = eq2->gamma_weight;
+
+ eq2->param[0].lut_clean = 0;
+ eq2->param[1].lut_clean = 0;
+ eq2->param[2].lut_clean = 0;
+
+ check_values (&eq2->param[0]);
+ check_values (&eq2->param[1]);
+ check_values (&eq2->param[2]);
+
+ print_values (eq2);
+}
+
+static
+void set_saturation (vf_eq2_t *eq2, double s)
+{
+ eq2->saturation = s;
+
+ eq2->param[1].c = s;
+ eq2->param[2].c = s;
+
+ eq2->param[1].lut_clean = 0;
+ eq2->param[2].lut_clean = 0;
+
+ check_values (&eq2->param[1]);
+ check_values (&eq2->param[2]);
+
+ print_values (eq2);
+}
+
+static
+int control (vf_instance_t *vf, int request, void *data)
+{
+ vf_equalizer_t *eq;
+
+ switch (request) {
+ case VFCTRL_SET_EQUALIZER:
+ eq = (vf_equalizer_t *) data;
+
+ if (strcmp (eq->item, "gamma") == 0) {
+ set_gamma (vf->priv, exp (log (8.0) * eq->value / 100.0));
+ return CONTROL_TRUE;
+ }
+ else if (strcmp (eq->item, "contrast") == 0) {
+ set_contrast (vf->priv, (1.0 / 100.0) * (eq->value + 100));
+ return CONTROL_TRUE;
+ }
+ else if (strcmp (eq->item, "brightness") == 0) {
+ set_brightness (vf->priv, (1.0 / 100.0) * eq->value);
+ return CONTROL_TRUE;
+ }
+ else if (strcmp (eq->item, "saturation") == 0) {
+ set_saturation (vf->priv, (double) (eq->value + 100) / 100.0);
+ return CONTROL_TRUE;
+ }
+ break;
+
+ case VFCTRL_GET_EQUALIZER:
+ eq = (vf_equalizer_t *) data;
+ if (strcmp (eq->item, "gamma") == 0) {
+ eq->value = (int) (100.0 * log (vf->priv->gamma) / log (8.0));
+ return CONTROL_TRUE;
+ }
+ else if (strcmp (eq->item, "contrast") == 0) {
+ eq->value = (int) (100.0 * vf->priv->contrast) - 100;
+ return CONTROL_TRUE;
+ }
+ else if (strcmp (eq->item, "brightness") == 0) {
+ eq->value = (int) (100.0 * vf->priv->brightness);
+ return CONTROL_TRUE;
+ }
+ else if (strcmp (eq->item, "saturation") == 0) {
+ eq->value = (int) (100.0 * vf->priv->saturation) - 100;
+ return CONTROL_TRUE;
+ }
+ break;
+ }
+
+ return ff_vf_next_control (vf, request, data);
+}
+
+static
+int query_format (vf_instance_t *vf, unsigned fmt)
+{
+ switch (fmt) {
+ case IMGFMT_YVU9:
+ case IMGFMT_IF09:
+ case IMGFMT_YV12:
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ case IMGFMT_Y800:
+ case IMGFMT_Y8:
+ case IMGFMT_444P:
+ case IMGFMT_422P:
+ case IMGFMT_411P:
+ return ff_vf_next_query_format (vf, fmt);
+ }
+
+ return 0;
+}
+
+static
+void uninit (vf_instance_t *vf)
+{
+ if (vf->priv) {
+ free (vf->priv->buf[0]);
+ free (vf->priv);
+ }
+}
+
+static
+int vf_open(vf_instance_t *vf, char *args)
+{
+ unsigned i;
+ vf_eq2_t *eq2;
+ double par[8];
+
+ vf->control = control;
+ vf->query_format = query_format;
+ vf->put_image = put_image;
+ vf->uninit = uninit;
+
+ vf->priv = malloc (sizeof (vf_eq2_t));
+ eq2 = vf->priv;
+
+ for (i = 0; i < 3; i++) {
+ eq2->buf[i] = NULL;
+ eq2->buf_w[i] = 0;
+ eq2->buf_h[i] = 0;
+
+ eq2->param[i].adjust = NULL;
+ eq2->param[i].c = 1.0;
+ eq2->param[i].b = 0.0;
+ eq2->param[i].g = 1.0;
+ eq2->param[i].lut_clean = 0;
+ }
+
+ eq2->contrast = 1.0;
+ eq2->brightness = 0.0;
+ eq2->saturation = 1.0;
+
+ eq2->gamma = 1.0;
+ eq2->gamma_weight = 1.0;
+ eq2->rgamma = 1.0;
+ eq2->ggamma = 1.0;
+ eq2->bgamma = 1.0;
+
+ if (args) {
+ par[0] = 1.0;
+ par[1] = 1.0;
+ par[2] = 0.0;
+ par[3] = 1.0;
+ par[4] = 1.0;
+ par[5] = 1.0;
+ par[6] = 1.0;
+ par[7] = 1.0;
+ sscanf (args, "%lf:%lf:%lf:%lf:%lf:%lf:%lf:%lf",
+ par, par + 1, par + 2, par + 3, par + 4, par + 5, par + 6, par + 7
+ );
+
+ eq2->rgamma = par[4];
+ eq2->ggamma = par[5];
+ eq2->bgamma = par[6];
+ eq2->gamma_weight = par[7];
+
+ set_gamma (eq2, par[0]);
+ set_contrast (eq2, par[1]);
+ set_brightness (eq2, par[2]);
+ set_saturation (eq2, par[3]);
+ }
+
+ return 1;
+}
+
+const vf_info_t ff_vf_info_eq2 = {
+ "Software equalizer",
+ "eq2",
+ "Hampa Hug, Daniel Moreno, Richard Felker",
+ "",
+ &vf_open,
+ NULL
+};
diff --git a/libavfilter/libmpcodecs/vf_fspp.c b/libavfilter/libmpcodecs/vf_fspp.c
new file mode 100644
index 0000000..c4a36ef
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_fspp.c
@@ -0,0 +1,2124 @@
+/*
+ * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2005 Nikolaj Poroshin <porosh3@psu.ru>
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * This implementation is based on an algorithm described in
+ * "Aria Nosratinia Embedded Post-Processing for
+ * Enhancement of Compressed Images (1999)"
+ * (http://citeseer.nj.nec.com/nosratinia99embedded.html)
+ * Further, with splitting (i)dct into hor/ver passes, one of them can be
+ * performed once per block, not pixel. This allows for much better speed.
+ */
+
+/*
+ Heavily optimized version of SPP filter by Nikolaj
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <math.h>
+
+#include "config.h"
+
+#include "mp_msg.h"
+#include "cpudetect.h"
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+#include "av_helpers.h"
+#include "libvo/fastmemcpy.h"
+
+#include "libavutil/internal.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavcodec/avcodec.h"
+
+#undef free
+#undef malloc
+
+//===========================================================================//
+#define BLOCKSZ 12
+
+static const short custom_threshold[64]=
+// values (296) can't be too high
+// -it causes too big quant dependence
+// or maybe overflow(check), which results in some flashing
+{ 71, 296, 295, 237, 71, 40, 38, 19,
+ 245, 193, 185, 121, 102, 73, 53, 27,
+ 158, 129, 141, 107, 97, 73, 50, 26,
+ 102, 116, 109, 98, 82, 66, 45, 23,
+ 71, 94, 95, 81, 70, 56, 38, 20,
+ 56, 77, 74, 66, 56, 44, 30, 15,
+ 38, 53, 50, 45, 38, 30, 21, 11,
+ 20, 27, 26, 23, 20, 15, 11, 5
+};
+
+DECLARE_ALIGNED(32, static const uint8_t, dither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63, },
+ { 32, 16, 44, 28, 35, 19, 47, 31, },
+ { 8, 56, 4, 52, 11, 59, 7, 55, },
+ { 40, 24, 36, 20, 43, 27, 39, 23, },
+ { 2, 50, 14, 62, 1, 49, 13, 61, },
+ { 34, 18, 46, 30, 33, 17, 45, 29, },
+ { 10, 58, 6, 54, 9, 57, 5, 53, },
+ { 42, 26, 38, 22, 41, 25, 37, 21, },
+};
+
+struct vf_priv_s { //align 16 !
+ uint64_t threshold_mtx_noq[8*2];
+ uint64_t threshold_mtx[8*2];//used in both C & MMX (& later SSE2) versions
+
+ int log2_count;
+ int temp_stride;
+ int qp;
+ int mpeg2;
+ int prev_q;
+ uint8_t *src;
+ int16_t *temp;
+ int bframes;
+ char *non_b_qp;
+};
+
+
+#if !HAVE_MMX
+
+//This func reads from 1 slice, 1 and clears 0 & 1
+static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)
+{int y, x;
+#define STORE(pos) \
+ temp= (src[x + pos] + (d[pos]>>log2_scale))>>(6-log2_scale); \
+ src[x + pos]=src[x + pos - 8*src_stride]=0; \
+ if(temp & 0x100) temp= ~(temp>>31); \
+ dst[x + pos]= temp;
+
+ for(y=0; y<height; y++){
+ const uint8_t *d= dither[y];
+ for(x=0; x<width; x+=8){
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ src+=src_stride;
+ dst+=dst_stride;
+ }
+}
+
+//This func reads from 2 slices, 0 & 2 and clears 2-nd
+static void store_slice2_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)
+{int y, x;
+#define STORE2(pos) \
+ temp= (src[x + pos] + src[x + pos + 16*src_stride] + (d[pos]>>log2_scale))>>(6-log2_scale); \
+ src[x + pos + 16*src_stride]=0; \
+ if(temp & 0x100) temp= ~(temp>>31); \
+ dst[x + pos]= temp;
+
+ for(y=0; y<height; y++){
+ const uint8_t *d= dither[y];
+ for(x=0; x<width; x+=8){
+ int temp;
+ STORE2(0);
+ STORE2(1);
+ STORE2(2);
+ STORE2(3);
+ STORE2(4);
+ STORE2(5);
+ STORE2(6);
+ STORE2(7);
+ }
+ src+=src_stride;
+ dst+=dst_stride;
+ }
+}
+
+static void mul_thrmat_c(struct vf_priv_s *p,int q)
+{
+ int a;
+ for(a=0;a<64;a++)
+ ((short*)p->threshold_mtx)[a]=q * ((short*)p->threshold_mtx_noq)[a];//ints faster in C
+}
+
+static void column_fidct_c(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt);
+static void row_idct_c(int16_t* workspace,
+ int16_t* output_adr, int output_stride, int cnt);
+static void row_fdct_c(int16_t *data, const uint8_t *pixels, int line_size, int cnt);
+
+//this is rather ugly, but there is no need for function pointers
+#define store_slice_s store_slice_c
+#define store_slice2_s store_slice2_c
+#define mul_thrmat_s mul_thrmat_c
+#define column_fidct_s column_fidct_c
+#define row_idct_s row_idct_c
+#define row_fdct_s row_fdct_c
+
+#else /* HAVE_MMX */
+
+//This func reads from 1 slice, 1 and clears 0 & 1
+static void store_slice_mmx(uint8_t *dst, int16_t *src, long dst_stride, long src_stride, long width, long height, long log2_scale)
+{
+ const uint8_t *od=&dither[0][0];
+ const uint8_t *end=&dither[height][0];
+ width = (width+7)&~7;
+ dst_stride-=width;
+ //src_stride=(src_stride-width)*2;
+ __asm__ volatile(
+ "mov %5, %%"REG_d" \n\t"
+ "mov %6, %%"REG_S" \n\t"
+ "mov %7, %%"REG_D" \n\t"
+ "mov %1, %%"REG_a" \n\t"
+ "movd %%"REG_d", %%mm5 \n\t"
+ "xor $-1, %%"REG_d" \n\t"
+ "mov %%"REG_a", %%"REG_c" \n\t"
+ "add $7, %%"REG_d" \n\t"
+ "neg %%"REG_a" \n\t"
+ "sub %0, %%"REG_c" \n\t"
+ "add %%"REG_c", %%"REG_c" \n\t"
+ "movd %%"REG_d", %%mm2 \n\t"
+ "mov %%"REG_c", %1 \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "shl $4, %%"REG_a" \n\t"
+
+ "2: \n\t"
+ "movq (%%"REG_d"), %%mm3 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "punpckhbw %%mm7, %%mm4 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "psraw %%mm5, %%mm3 \n\t"
+ "psraw %%mm5, %%mm4 \n\t"
+ "1: \n\t"
+ "movq %%mm7, (%%"REG_S",%%"REG_a") \n\t"
+ "movq (%%"REG_S"), %%mm0 \n\t"
+ "movq 8(%%"REG_S"), %%mm1 \n\t"
+
+ "movq %%mm7, 8(%%"REG_S",%%"REG_a") \n\t"
+ "paddw %%mm3, %%mm0 \n\t"
+ "paddw %%mm4, %%mm1 \n\t"
+
+ "movq %%mm7, (%%"REG_S") \n\t"
+ "psraw %%mm2, %%mm0 \n\t"
+ "psraw %%mm2, %%mm1 \n\t"
+
+ "movq %%mm7, 8(%%"REG_S") \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "add $16, %%"REG_S" \n\t"
+
+ "movq %%mm0, (%%"REG_D") \n\t"
+ "add $8, %%"REG_D" \n\t"
+ "sub $8, %%"REG_c" \n\t"
+ "jg 1b \n\t"
+ "add %1, %%"REG_S" \n\t"
+ "add $8, %%"REG_d" \n\t"
+ "add %3, %%"REG_D" \n\t"
+ "cmp %4, %%"REG_d" \n\t"
+ "jl 2b \n\t"
+
+ :
+ : "m" (width), "m" (src_stride), "erm" (od), "m" (dst_stride), "erm" (end),
+ "m" (log2_scale), "m" (src), "m" (dst) //input
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+ );
+}
+
+//This func reads from 2 slices, 0 & 2 and clears 2-nd
+static void store_slice2_mmx(uint8_t *dst, int16_t *src, long dst_stride, long src_stride, long width, long height, long log2_scale)
+{
+ const uint8_t *od=&dither[0][0];
+ const uint8_t *end=&dither[height][0];
+ width = (width+7)&~7;
+ dst_stride-=width;
+ //src_stride=(src_stride-width)*2;
+ __asm__ volatile(
+ "mov %5, %%"REG_d" \n\t"
+ "mov %6, %%"REG_S" \n\t"
+ "mov %7, %%"REG_D" \n\t"
+ "mov %1, %%"REG_a" \n\t"
+ "movd %%"REG_d", %%mm5 \n\t"
+ "xor $-1, %%"REG_d" \n\t"
+ "mov %%"REG_a", %%"REG_c" \n\t"
+ "add $7, %%"REG_d" \n\t"
+ "sub %0, %%"REG_c" \n\t"
+ "add %%"REG_c", %%"REG_c" \n\t"
+ "movd %%"REG_d", %%mm2 \n\t"
+ "mov %%"REG_c", %1 \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "shl $5, %%"REG_a" \n\t"
+
+ "2: \n\t"
+ "movq (%%"REG_d"), %%mm3 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "punpckhbw %%mm7, %%mm4 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "psraw %%mm5, %%mm3 \n\t"
+ "psraw %%mm5, %%mm4 \n\t"
+ "1: \n\t"
+ "movq (%%"REG_S"), %%mm0 \n\t"
+ "movq 8(%%"REG_S"), %%mm1 \n\t"
+ "paddw %%mm3, %%mm0 \n\t"
+
+ "paddw (%%"REG_S",%%"REG_a"), %%mm0 \n\t"
+ "paddw %%mm4, %%mm1 \n\t"
+ "movq 8(%%"REG_S",%%"REG_a"), %%mm6 \n\t"
+
+ "movq %%mm7, (%%"REG_S",%%"REG_a") \n\t"
+ "psraw %%mm2, %%mm0 \n\t"
+ "paddw %%mm6, %%mm1 \n\t"
+
+ "movq %%mm7, 8(%%"REG_S",%%"REG_a") \n\t"
+ "psraw %%mm2, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D") \n\t"
+ "add $16, %%"REG_S" \n\t"
+ "add $8, %%"REG_D" \n\t"
+ "sub $8, %%"REG_c" \n\t"
+ "jg 1b \n\t"
+ "add %1, %%"REG_S" \n\t"
+ "add $8, %%"REG_d" \n\t"
+ "add %3, %%"REG_D" \n\t"
+ "cmp %4, %%"REG_d" \n\t"
+ "jl 2b \n\t"
+
+ :
+ : "m" (width), "m" (src_stride), "erm" (od), "m" (dst_stride), "erm" (end),
+ "m" (log2_scale), "m" (src), "m" (dst) //input
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_D, "%"REG_S
+ );
+}
+
+static void mul_thrmat_mmx(struct vf_priv_s *p, int q)
+{
+ uint64_t *adr=&p->threshold_mtx_noq[0];
+ __asm__ volatile(
+ "movd %0, %%mm7 \n\t"
+ "add $8*8*2, %%"REG_D" \n\t"
+ "movq 0*8(%%"REG_S"), %%mm0 \n\t"
+ "punpcklwd %%mm7, %%mm7 \n\t"
+ "movq 1*8(%%"REG_S"), %%mm1 \n\t"
+ "punpckldq %%mm7, %%mm7 \n\t"
+ "pmullw %%mm7, %%mm0 \n\t"
+
+ "movq 2*8(%%"REG_S"), %%mm2 \n\t"
+ "pmullw %%mm7, %%mm1 \n\t"
+
+ "movq 3*8(%%"REG_S"), %%mm3 \n\t"
+ "pmullw %%mm7, %%mm2 \n\t"
+
+ "movq %%mm0, 0*8(%%"REG_D") \n\t"
+ "movq 4*8(%%"REG_S"), %%mm4 \n\t"
+ "pmullw %%mm7, %%mm3 \n\t"
+
+ "movq %%mm1, 1*8(%%"REG_D") \n\t"
+ "movq 5*8(%%"REG_S"), %%mm5 \n\t"
+ "pmullw %%mm7, %%mm4 \n\t"
+
+ "movq %%mm2, 2*8(%%"REG_D") \n\t"
+ "movq 6*8(%%"REG_S"), %%mm6 \n\t"
+ "pmullw %%mm7, %%mm5 \n\t"
+
+ "movq %%mm3, 3*8(%%"REG_D") \n\t"
+ "movq 7*8+0*8(%%"REG_S"), %%mm0 \n\t"
+ "pmullw %%mm7, %%mm6 \n\t"
+
+ "movq %%mm4, 4*8(%%"REG_D") \n\t"
+ "movq 7*8+1*8(%%"REG_S"), %%mm1 \n\t"
+ "pmullw %%mm7, %%mm0 \n\t"
+
+ "movq %%mm5, 5*8(%%"REG_D") \n\t"
+ "movq 7*8+2*8(%%"REG_S"), %%mm2 \n\t"
+ "pmullw %%mm7, %%mm1 \n\t"
+
+ "movq %%mm6, 6*8(%%"REG_D") \n\t"
+ "movq 7*8+3*8(%%"REG_S"), %%mm3 \n\t"
+ "pmullw %%mm7, %%mm2 \n\t"
+
+ "movq %%mm0, 7*8+0*8(%%"REG_D") \n\t"
+ "movq 7*8+4*8(%%"REG_S"), %%mm4 \n\t"
+ "pmullw %%mm7, %%mm3 \n\t"
+
+ "movq %%mm1, 7*8+1*8(%%"REG_D") \n\t"
+ "movq 7*8+5*8(%%"REG_S"), %%mm5 \n\t"
+ "pmullw %%mm7, %%mm4 \n\t"
+
+ "movq %%mm2, 7*8+2*8(%%"REG_D") \n\t"
+ "movq 7*8+6*8(%%"REG_S"), %%mm6 \n\t"
+ "pmullw %%mm7, %%mm5 \n\t"
+
+ "movq %%mm3, 7*8+3*8(%%"REG_D") \n\t"
+ "movq 14*8+0*8(%%"REG_S"), %%mm0 \n\t"
+ "pmullw %%mm7, %%mm6 \n\t"
+
+ "movq %%mm4, 7*8+4*8(%%"REG_D") \n\t"
+ "movq 14*8+1*8(%%"REG_S"), %%mm1 \n\t"
+ "pmullw %%mm7, %%mm0 \n\t"
+
+ "movq %%mm5, 7*8+5*8(%%"REG_D") \n\t"
+ "pmullw %%mm7, %%mm1 \n\t"
+
+ "movq %%mm6, 7*8+6*8(%%"REG_D") \n\t"
+ "movq %%mm0, 14*8+0*8(%%"REG_D") \n\t"
+ "movq %%mm1, 14*8+1*8(%%"REG_D") \n\t"
+
+ : "+g" (q), "+S" (adr), "+D" (adr)
+ :
+ );
+}
+
+static void column_fidct_mmx(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt);
+static void row_idct_mmx(int16_t* workspace,
+ int16_t* output_adr, int output_stride, int cnt);
+static void row_fdct_mmx(int16_t *data, const uint8_t *pixels, int line_size, int cnt);
+
+#define store_slice_s store_slice_mmx
+#define store_slice2_s store_slice2_mmx
+#define mul_thrmat_s mul_thrmat_mmx
+#define column_fidct_s column_fidct_mmx
+#define row_idct_s row_idct_mmx
+#define row_fdct_s row_fdct_mmx
+#endif // HAVE_MMX
+
+static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src,
+ int dst_stride, int src_stride,
+ int width, int height,
+ uint8_t *qp_store, int qp_stride, int is_luma)
+{
+ int x, x0, y, es, qy, t;
+ const int stride= is_luma ? p->temp_stride : (width+16);//((width+16+15)&(~15))
+ const int step=6-p->log2_count;
+ const int qps= 3 + is_luma;
+ DECLARE_ALIGNED(32, int32_t, block_align)[4*8*BLOCKSZ+ 4*8*BLOCKSZ];
+ int16_t *block= (int16_t *)block_align;
+ int16_t *block3=(int16_t *)(block_align+4*8*BLOCKSZ);
+
+ memset(block3, 0, 4*8*BLOCKSZ);
+
+ //p->src=src-src_stride*8-8;//!
+ if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
+ for(y=0; y<height; y++){
+ int index= 8 + 8*stride + y*stride;
+ fast_memcpy(p->src + index, src + y*src_stride, width);//this line can be avoided by using DR & user fr.buffers
+ for(x=0; x<8; x++){
+ p->src[index - x - 1]= p->src[index + x ];
+ p->src[index + width + x ]= p->src[index + width - x - 1];
+ }
+ }
+ for(y=0; y<8; y++){
+ fast_memcpy(p->src + ( 7-y)*stride, p->src + ( y+8)*stride, stride);
+ fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride);
+ }
+ //FIXME (try edge emu)
+
+ for(y=8; y<24; y++)
+ memset(p->temp+ 8 +y*stride, 0,width*sizeof(int16_t));
+
+ for(y=step; y<height+8; y+=step){ //step= 1,2
+ qy=y-4;
+ if (qy>height-1) qy=height-1;
+ if (qy<0) qy=0;
+ qy=(qy>>qps)*qp_stride;
+ row_fdct_s(block, p->src + y*stride +2-(y&1), stride, 2);
+ for(x0=0; x0<width+8-8*(BLOCKSZ-1); x0+=8*(BLOCKSZ-1)){
+ row_fdct_s(block+8*8, p->src + y*stride+8+x0 +2-(y&1), stride, 2*(BLOCKSZ-1));
+ if(p->qp)
+ column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block+0*8, block3+0*8, 8*(BLOCKSZ-1)); //yes, this is a HOTSPOT
+ else
+ for (x=0; x<8*(BLOCKSZ-1); x+=8) {
+ t=x+x0-2; //correct t=x+x0-2-(y&1), but its the same
+ if (t<0) t=0;//t always < width-2
+ t=qp_store[qy+(t>>qps)];
+ t=norm_qscale(t, p->mpeg2);
+ if (t!=p->prev_q) p->prev_q=t, mul_thrmat_s(p, t);
+ column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block+x*8, block3+x*8, 8); //yes, this is a HOTSPOT
+ }
+ row_idct_s(block3+0*8, p->temp + (y&15)*stride+x0+2-(y&1), stride, 2*(BLOCKSZ-1));
+ memmove(block, block+(BLOCKSZ-1)*64, 8*8*sizeof(int16_t)); //cycling
+ memmove(block3, block3+(BLOCKSZ-1)*64, 6*8*sizeof(int16_t));
+ }
+ //
+ es=width+8-x0; // 8, ...
+ if (es>8)
+ row_fdct_s(block+8*8, p->src + y*stride+8+x0 +2-(y&1), stride, (es-4)>>2);
+ column_fidct_s((int16_t*)(&p->threshold_mtx[0]), block, block3, es&(~1));
+ row_idct_s(block3+0*8, p->temp + (y&15)*stride+x0+2-(y&1), stride, es>>2);
+ {const int y1=y-8+step;//l5-7 l4-6
+ if (!(y1&7) && y1) {
+ if (y1&8) store_slice_s(dst + (y1-8)*dst_stride, p->temp+ 8 +8*stride,
+ dst_stride, stride, width, 8, 5-p->log2_count);
+ else store_slice2_s(dst + (y1-8)*dst_stride, p->temp+ 8 +0*stride,
+ dst_stride, stride, width, 8, 5-p->log2_count);
+ } }
+ }
+
+ if (y&7) { // == height & 7
+ if (y&8) store_slice_s(dst + ((y-8)&~7)*dst_stride, p->temp+ 8 +8*stride,
+ dst_stride, stride, width, y&7, 5-p->log2_count);
+ else store_slice2_s(dst + ((y-8)&~7)*dst_stride, p->temp+ 8 +0*stride,
+ dst_stride, stride, width, y&7, 5-p->log2_count);
+ }
+}
+
+static int config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt)
+{
+ int h= (height+16+15)&(~15);
+
+ vf->priv->temp_stride= (width+16+15)&(~15);
+ vf->priv->temp= (int16_t*)av_mallocz(vf->priv->temp_stride*3*8*sizeof(int16_t));
+ //this can also be avoided, see above
+ vf->priv->src = (uint8_t*)av_malloc(vf->priv->temp_stride*h*sizeof(uint8_t));
+
+ return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
+}
+
+static void get_image(struct vf_instance *vf, mp_image_t *mpi)
+{
+ if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
+ // ok, we can do pp in-place (or pp disabled):
+ vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
+ mpi->type, mpi->flags, mpi->width, mpi->height);
+ mpi->planes[0]=vf->dmpi->planes[0];
+ mpi->stride[0]=vf->dmpi->stride[0];
+ mpi->width=vf->dmpi->width;
+ if(mpi->flags&MP_IMGFLAG_PLANAR){
+ mpi->planes[1]=vf->dmpi->planes[1];
+ mpi->planes[2]=vf->dmpi->planes[2];
+ mpi->stride[1]=vf->dmpi->stride[1];
+ mpi->stride[2]=vf->dmpi->stride[2];
+ }
+ mpi->flags|=MP_IMGFLAG_DIRECT;
+}
+
+static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
+{
+ mp_image_t *dmpi;
+ if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
+ // no DR, so get a new image! hope we'll get DR buffer:
+ dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
+ MP_IMGTYPE_TEMP,
+ MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
+ mpi->width,mpi->height);
+ ff_vf_clone_mpi_attributes(dmpi, mpi);
+ }else{
+ dmpi=vf->dmpi;
+ }
+
+ vf->priv->mpeg2= mpi->qscale_type;
+ if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){
+ int w = mpi->qstride;
+ int h = (mpi->h + 15) >> 4;
+ if (!w) {
+ w = (mpi->w + 15) >> 4;
+ h = 1;
+ }
+ if(!vf->priv->non_b_qp)
+ vf->priv->non_b_qp= malloc(w*h);
+ fast_memcpy(vf->priv->non_b_qp, mpi->qscale, w*h);
+ }
+ if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
+ char *qp_tab= vf->priv->non_b_qp;
+ if(vf->priv->bframes || !qp_tab)
+ qp_tab= mpi->qscale;
+
+ if(qp_tab || vf->priv->qp){
+ filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0],
+ mpi->w, mpi->h, qp_tab, mpi->qstride, 1);
+ filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1],
+ mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
+ filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2],
+ mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
+ }else{
+ memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
+ memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
+ memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
+ }
+ }
+
+#if HAVE_MMX
+ if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
+#endif
+#if HAVE_MMX2
+ if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
+#endif
+ return ff_vf_next_put_image(vf,dmpi, pts);
+}
+
+static void uninit(struct vf_instance *vf)
+{
+ if(!vf->priv) return;
+
+ av_free(vf->priv->temp);
+ vf->priv->temp= NULL;
+ av_free(vf->priv->src);
+ vf->priv->src= NULL;
+ //free(vf->priv->avctx);
+ //vf->priv->avctx= NULL;
+ free(vf->priv->non_b_qp);
+ vf->priv->non_b_qp= NULL;
+
+ av_free(vf->priv);
+ vf->priv=NULL;
+}
+
+//===========================================================================//
+
+static int query_format(struct vf_instance *vf, unsigned int fmt)
+{
+ switch(fmt){
+ case IMGFMT_YVU9:
+ case IMGFMT_IF09:
+ case IMGFMT_YV12:
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ case IMGFMT_CLPL:
+ case IMGFMT_Y800:
+ case IMGFMT_Y8:
+ case IMGFMT_444P:
+ case IMGFMT_422P:
+ case IMGFMT_411P:
+ return ff_vf_next_query_format(vf,fmt);
+ }
+ return 0;
+}
+
+static int control(struct vf_instance *vf, int request, void* data)
+{
+ switch(request){
+ case VFCTRL_QUERY_MAX_PP_LEVEL:
+ return 5;
+ case VFCTRL_SET_PP_LEVEL:
+ vf->priv->log2_count= *((unsigned int*)data);
+ if (vf->priv->log2_count < 4) vf->priv->log2_count=4;
+ return CONTROL_TRUE;
+ }
+ return ff_vf_next_control(vf,request,data);
+}
+
+static int vf_open(vf_instance_t *vf, char *args)
+{
+ int i=0, bias;
+ int custom_threshold_m[64];
+ int log2c=-1;
+
+ vf->config=config;
+ vf->put_image=put_image;
+ vf->get_image=get_image;
+ vf->query_format=query_format;
+ vf->uninit=uninit;
+ vf->control= control;
+ vf->priv=av_mallocz(sizeof(struct vf_priv_s));//assumes align 16 !
+
+ ff_init_avcodec();
+
+ //vf->priv->avctx= avcodec_alloc_context();
+ //dsputil_init(&vf->priv->dsp, vf->priv->avctx);
+
+ vf->priv->log2_count= 4;
+ vf->priv->bframes = 0;
+
+ if (args) sscanf(args, "%d:%d:%d:%d", &log2c, &vf->priv->qp, &i, &vf->priv->bframes);
+
+ if( log2c >=4 && log2c <=5 )
+ vf->priv->log2_count = log2c;
+ else if( log2c >= 6 )
+ vf->priv->log2_count = 5;
+
+ if(vf->priv->qp < 0)
+ vf->priv->qp = 0;
+
+ if (i < -15) i = -15;
+ if (i > 32) i = 32;
+
+ bias= (1<<4)+i; //regulable
+ vf->priv->prev_q=0;
+ //
+ for(i=0;i<64;i++) //FIXME: tune custom_threshold[] and remove this !
+ custom_threshold_m[i]=(int)(custom_threshold[i]*(bias/71.)+ 0.5);
+ for(i=0;i<8;i++){
+ vf->priv->threshold_mtx_noq[2*i]=(uint64_t)custom_threshold_m[i*8+2]
+ |(((uint64_t)custom_threshold_m[i*8+6])<<16)
+ |(((uint64_t)custom_threshold_m[i*8+0])<<32)
+ |(((uint64_t)custom_threshold_m[i*8+4])<<48);
+ vf->priv->threshold_mtx_noq[2*i+1]=(uint64_t)custom_threshold_m[i*8+5]
+ |(((uint64_t)custom_threshold_m[i*8+3])<<16)
+ |(((uint64_t)custom_threshold_m[i*8+1])<<32)
+ |(((uint64_t)custom_threshold_m[i*8+7])<<48);
+ }
+
+ if (vf->priv->qp) vf->priv->prev_q=vf->priv->qp, mul_thrmat_s(vf->priv, vf->priv->qp);
+
+ return 1;
+}
+
+const vf_info_t ff_vf_info_fspp = {
+ "fast simple postprocess",
+ "fspp",
+ "Michael Niedermayer, Nikolaj Poroshin",
+ "",
+ vf_open,
+ NULL
+};
+
+//====================================================================
+//Specific spp's dct, idct and threshold functions
+//I'd prefer to have them in the separate file.
+
+//#define MANGLE(a) #a
+
+//typedef int16_t int16_t; //! only int16_t
+
+#define DCTSIZE 8
+#define DCTSIZE_S "8"
+
+#define FIX(x,s) ((int) ((x) * (1<<s) + 0.5)&0xffff)
+#define C64(x) ((uint64_t)((x)|(x)<<16))<<32 | (uint64_t)(x) | (uint64_t)(x)<<16
+#define FIX64(x,s) C64(FIX(x,s))
+
+#define MULTIPLY16H(x,k) (((x)*(k))>>16)
+#define THRESHOLD(r,x,t) if(((unsigned)((x)+t))>t*2) r=(x);else r=0;
+#define DESCALE(x,n) (((x) + (1 << ((n)-1))) >> n)
+
+#if HAVE_MMX
+
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_382683433)=FIX64(0.382683433, 14);
+DECLARE_ALIGNED(8, uint64_t, ff_MM_FIX_0_541196100)=FIX64(0.541196100, 14);
+DECLARE_ALIGNED(8, uint64_t, ff_MM_FIX_0_707106781)=FIX64(0.707106781, 14);
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_306562965)=FIX64(1.306562965, 14);
+
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_414213562_A)=FIX64(1.414213562, 14);
+
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_847759065)=FIX64(1.847759065, 13);
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_2_613125930)=FIX64(-2.613125930, 13); //-
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_414213562)=FIX64(1.414213562, 13);
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_1_082392200)=FIX64(1.082392200, 13);
+//for t3,t5,t7 == 0 shortcut
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_847759065)=FIX64(0.847759065, 14);
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_566454497)=FIX64(0.566454497, 14);
+DECLARE_ASM_CONST(8, uint64_t, MM_FIX_0_198912367)=FIX64(0.198912367, 14);
+
+DECLARE_ASM_CONST(8, uint64_t, MM_DESCALE_RND)=C64(4);
+DECLARE_ASM_CONST(8, uint64_t, MM_2)=C64(2);
+
+#else /* !HAVE_MMX */
+
+typedef int32_t int_simd16_t;
+static const int16_t FIX_0_382683433=FIX(0.382683433, 14);
+static const int16_t FIX_0_541196100=FIX(0.541196100, 14);
+static const int16_t FIX_0_707106781=FIX(0.707106781, 14);
+static const int16_t FIX_1_306562965=FIX(1.306562965, 14);
+static const int16_t FIX_1_414213562_A=FIX(1.414213562, 14);
+static const int16_t FIX_1_847759065=FIX(1.847759065, 13);
+static const int16_t FIX_2_613125930=FIX(-2.613125930, 13); //-
+static const int16_t FIX_1_414213562=FIX(1.414213562, 13);
+static const int16_t FIX_1_082392200=FIX(1.082392200, 13);
+
+#endif
+
+#if !HAVE_MMX
+
+static void column_fidct_c(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt)
+{
+ int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int_simd16_t tmp10, tmp11, tmp12, tmp13;
+ int_simd16_t z1,z2,z3,z4,z5, z10, z11, z12, z13;
+ int_simd16_t d0, d1, d2, d3, d4, d5, d6, d7;
+
+ int16_t* dataptr;
+ int16_t* wsptr;
+ int16_t *threshold;
+ int ctr;
+
+ dataptr = data;
+ wsptr = output;
+
+ for (; cnt > 0; cnt-=2) { //start positions
+ threshold=(int16_t*)thr_adr;//threshold_mtx
+ for (ctr = DCTSIZE; ctr > 0; ctr--) {
+ // Process columns from input, add to output.
+ tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
+ tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
+
+ tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
+ tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
+
+ tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
+ tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
+
+ tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
+ tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
+
+ // Even part of FDCT
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+
+ d0 = tmp10 + tmp11;
+ d4 = tmp10 - tmp11;
+
+ z1 = MULTIPLY16H((tmp12 + tmp13) <<2, FIX_0_707106781);
+ d2 = tmp13 + z1;
+ d6 = tmp13 - z1;
+
+ // Even part of IDCT
+
+ THRESHOLD(tmp0, d0, threshold[0*8]);
+ THRESHOLD(tmp1, d2, threshold[2*8]);
+ THRESHOLD(tmp2, d4, threshold[4*8]);
+ THRESHOLD(tmp3, d6, threshold[6*8]);
+ tmp0+=2;
+ tmp10 = (tmp0 + tmp2)>>2;
+ tmp11 = (tmp0 - tmp2)>>2;
+
+ tmp13 = (tmp1 + tmp3)>>2; //+2 ! (psnr decides)
+ tmp12 = MULTIPLY16H((tmp1 - tmp3), FIX_1_414213562_A) - tmp13; //<<2
+
+ tmp0 = tmp10 + tmp13; //->temps
+ tmp3 = tmp10 - tmp13; //->temps
+ tmp1 = tmp11 + tmp12; //->temps
+ tmp2 = tmp11 - tmp12; //->temps
+
+ // Odd part of FDCT
+
+ tmp10 = tmp4 + tmp5;
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp6 + tmp7;
+
+ z5 = MULTIPLY16H((tmp10 - tmp12)<<2, FIX_0_382683433);
+ z2 = MULTIPLY16H(tmp10 <<2, FIX_0_541196100) + z5;
+ z4 = MULTIPLY16H(tmp12 <<2, FIX_1_306562965) + z5;
+ z3 = MULTIPLY16H(tmp11 <<2, FIX_0_707106781);
+
+ z11 = tmp7 + z3;
+ z13 = tmp7 - z3;
+
+ d5 = z13 + z2;
+ d3 = z13 - z2;
+ d1 = z11 + z4;
+ d7 = z11 - z4;
+
+ // Odd part of IDCT
+
+ THRESHOLD(tmp4, d1, threshold[1*8]);
+ THRESHOLD(tmp5, d3, threshold[3*8]);
+ THRESHOLD(tmp6, d5, threshold[5*8]);
+ THRESHOLD(tmp7, d7, threshold[7*8]);
+
+ //Simd version uses here a shortcut for the tmp5,tmp6,tmp7 == 0
+ z13 = tmp6 + tmp5;
+ z10 = (tmp6 - tmp5)<<1;
+ z11 = tmp4 + tmp7;
+ z12 = (tmp4 - tmp7)<<1;
+
+ tmp7 = (z11 + z13)>>2; //+2 !
+ tmp11 = MULTIPLY16H((z11 - z13)<<1, FIX_1_414213562);
+ z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065);
+ tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5;
+ tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - !!
+
+ tmp6 = tmp12 - tmp7;
+ tmp5 = tmp11 - tmp6;
+ tmp4 = tmp10 + tmp5;
+
+ wsptr[DCTSIZE*0]+= (tmp0 + tmp7);
+ wsptr[DCTSIZE*1]+= (tmp1 + tmp6);
+ wsptr[DCTSIZE*2]+= (tmp2 + tmp5);
+ wsptr[DCTSIZE*3]+= (tmp3 - tmp4);
+ wsptr[DCTSIZE*4]+= (tmp3 + tmp4);
+ wsptr[DCTSIZE*5]+= (tmp2 - tmp5);
+ wsptr[DCTSIZE*6]= (tmp1 - tmp6);
+ wsptr[DCTSIZE*7]= (tmp0 - tmp7);
+ //
+ dataptr++; //next column
+ wsptr++;
+ threshold++;
+ }
+ dataptr+=8; //skip each second start pos
+ wsptr +=8;
+ }
+}
+
+#else /* HAVE_MMX */
+
+static void column_fidct_mmx(int16_t* thr_adr, int16_t *data, int16_t *output, int cnt)
+{
+ DECLARE_ALIGNED(8, uint64_t, temps)[4];
+ __asm__ volatile(
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm1 \n\t"
+ //
+ "movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm7 \n\t"
+ "movq %%mm1, %%mm0 \n\t"
+
+ "paddw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm1 \n\t" //t0
+ "movq %%mm7, %%mm3 \n\t"
+
+ "paddw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm7 \n\t" //t3
+ "movq %%mm1, %%mm5 \n\t"
+
+ "movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm6 \n\t"
+ "psubw %%mm7, %%mm1 \n\t" //t13
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
+ "movq %%mm6, %%mm4 \n\t"
+
+ "paddw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm6 \n\t" //t1
+ "paddw %%mm7, %%mm5 \n\t" //t10
+
+ "paddw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t2
+ "movq %%mm6, %%mm7 \n\t"
+
+ "paddw %%mm2, %%mm6 \n\t" //t11
+ "psubw %%mm2, %%mm7 \n\t" //t12
+
+ "movq %%mm5, %%mm2 \n\t"
+ "paddw %%mm6, %%mm5 \n\t" //d0
+ // i0 t13 t12 i3 i1 d0 - d4
+ "psubw %%mm6, %%mm2 \n\t" //d4
+ "paddw %%mm1, %%mm7 \n\t"
+
+ "movq 4*16(%%"REG_d"), %%mm6 \n\t"
+ "psllw $2, %%mm7 \n\t"
+
+ "psubw 0*16(%%"REG_d"), %%mm5 \n\t"
+ "psubw %%mm6, %%mm2 \n\t"
+
+ "paddusw 0*16(%%"REG_d"), %%mm5 \n\t"
+ "paddusw %%mm6, %%mm2 \n\t"
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm7 \n\t"
+ //
+ "paddw 0*16(%%"REG_d"), %%mm5 \n\t"
+ "paddw %%mm6, %%mm2 \n\t"
+
+ "psubusw 0*16(%%"REG_d"), %%mm5 \n\t"
+ "psubusw %%mm6, %%mm2 \n\t"
+
+//This func is totally compute-bound, operates at huge speed. So, DC shortcut
+// at this place isn't worthwhile due to BTB miss penalty (checked on Pent. 3).
+//However, typical numbers: nondc - 29%%, dc - 46%%, zero - 25%%. All <> 0 case is very rare.
+ "paddw "MANGLE(MM_2)", %%mm5 \n\t"
+ "movq %%mm2, %%mm6 \n\t"
+
+ "paddw %%mm5, %%mm2 \n\t"
+ "psubw %%mm6, %%mm5 \n\t"
+
+ "movq %%mm1, %%mm6 \n\t"
+ "paddw %%mm7, %%mm1 \n\t" //d2
+
+ "psubw 2*16(%%"REG_d"), %%mm1 \n\t"
+ "psubw %%mm7, %%mm6 \n\t" //d6
+
+ "movq 6*16(%%"REG_d"), %%mm7 \n\t"
+ "psraw $2, %%mm5 \n\t"
+
+ "paddusw 2*16(%%"REG_d"), %%mm1 \n\t"
+ "psubw %%mm7, %%mm6 \n\t"
+ // t7 d2 /t11 t4 t6 - d6 /t10
+
+ "paddw 2*16(%%"REG_d"), %%mm1 \n\t"
+ "paddusw %%mm7, %%mm6 \n\t"
+
+ "psubusw 2*16(%%"REG_d"), %%mm1 \n\t"
+ "paddw %%mm7, %%mm6 \n\t"
+
+ "psubw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm3 \n\t"
+ "psubusw %%mm7, %%mm6 \n\t"
+
+ //movq [edi+"DCTSIZE_S"*2*2], mm1
+ //movq [edi+"DCTSIZE_S"*6*2], mm6
+ "movq %%mm1, %%mm7 \n\t"
+ "psraw $2, %%mm2 \n\t"
+
+ "psubw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm4 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+
+ "psubw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm0 \n\t"
+ "paddw %%mm7, %%mm6 \n\t" //'t13
+
+ "psraw $2, %%mm6 \n\t" //paddw mm6, MM_2 !! ---
+ "movq %%mm2, %%mm7 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm1 \n\t"
+ "paddw %%mm6, %%mm2 \n\t" //'t0
+
+ "movq %%mm2, 0*8+%3 \n\t" //!
+ "psubw %%mm6, %%mm7 \n\t" //'t3
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
+ "psubw %%mm6, %%mm1 \n\t" //'t12
+
+ "psubw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t5
+ "movq %%mm5, %%mm6 \n\t"
+
+ "movq %%mm7, 3*8+%3 \n\t"
+ "paddw %%mm2, %%mm3 \n\t" //t10
+
+ "paddw %%mm4, %%mm2 \n\t" //t11
+ "paddw %%mm0, %%mm4 \n\t" //t12
+
+ "movq %%mm3, %%mm7 \n\t"
+ "psubw %%mm4, %%mm3 \n\t"
+
+ "psllw $2, %%mm3 \n\t"
+ "psllw $2, %%mm7 \n\t" //opt for P6
+
+ "pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t"
+ "psllw $2, %%mm4 \n\t"
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm7 \n\t"
+ "psllw $2, %%mm2 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm4 \n\t"
+ "paddw %%mm1, %%mm5 \n\t" //'t1
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm2 \n\t"
+ "psubw %%mm1, %%mm6 \n\t" //'t2
+ // t7 't12 't11 t4 t6 - 't13 't10 ---
+
+ "paddw %%mm3, %%mm7 \n\t" //z2
+
+ "movq %%mm5, 1*8+%3 \n\t"
+ "paddw %%mm3, %%mm4 \n\t" //z4
+
+ "movq 3*16(%%"REG_d"), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+
+ "movq %%mm6, 2*8+%3 \n\t"
+ "psubw %%mm2, %%mm1 \n\t" //z13
+
+//===
+ "paddw %%mm2, %%mm0 \n\t" //z11
+ "movq %%mm1, %%mm5 \n\t"
+
+ "movq 5*16(%%"REG_d"), %%mm2 \n\t"
+ "psubw %%mm7, %%mm1 \n\t" //d3
+
+ "paddw %%mm7, %%mm5 \n\t" //d5
+ "psubw %%mm3, %%mm1 \n\t"
+
+ "movq 1*16(%%"REG_d"), %%mm7 \n\t"
+ "psubw %%mm2, %%mm5 \n\t"
+
+ "movq %%mm0, %%mm6 \n\t"
+ "paddw %%mm4, %%mm0 \n\t" //d1
+
+ "paddusw %%mm3, %%mm1 \n\t"
+ "psubw %%mm4, %%mm6 \n\t" //d7
+
+ // d1 d3 - - - d5 d7 -
+ "movq 7*16(%%"REG_d"), %%mm4 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+
+ "psubw %%mm4, %%mm6 \n\t"
+ "paddusw %%mm2, %%mm5 \n\t"
+
+ "paddusw %%mm4, %%mm6 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+
+ "paddw %%mm2, %%mm5 \n\t"
+ "paddw %%mm4, %%mm6 \n\t"
+
+ "psubusw %%mm3, %%mm1 \n\t"
+ "psubusw %%mm2, %%mm5 \n\t"
+
+ "psubusw %%mm4, %%mm6 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+
+ "por %%mm5, %%mm4 \n\t"
+ "paddusw %%mm7, %%mm0 \n\t"
+
+ "por %%mm6, %%mm4 \n\t"
+ "paddw %%mm7, %%mm0 \n\t"
+
+ "packssdw %%mm4, %%mm4 \n\t"
+ "psubusw %%mm7, %%mm0 \n\t"
+
+ "movd %%mm4, %%"REG_a" \n\t"
+ "or %%"REG_a", %%"REG_a" \n\t"
+ "jnz 2f \n\t"
+ //movq [edi+"DCTSIZE_S"*3*2], mm1
+ //movq [edi+"DCTSIZE_S"*5*2], mm5
+ //movq [edi+"DCTSIZE_S"*1*2], mm0
+ //movq [edi+"DCTSIZE_S"*7*2], mm6
+ // t4 t5 - - - t6 t7 -
+ //--- t4 (mm0) may be <>0; mm1, mm5, mm6 == 0
+//Typical numbers: nondc - 19%%, dc - 26%%, zero - 55%%. zero case alone isn't worthwhile
+ "movq 0*8+%3, %%mm4 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_0_847759065)", %%mm0 \n\t" //tmp6
+ "movq %%mm1, %%mm2 \n\t"
+
+ "movq "DCTSIZE_S"*0*2(%%"REG_D"), %%mm5 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_0_566454497)", %%mm1 \n\t" //tmp5
+ "paddw %%mm4, %%mm5 \n\t"
+
+ "movq 1*8+%3, %%mm6 \n\t"
+ //paddw mm3, MM_2
+ "psraw $2, %%mm3 \n\t" //tmp7
+
+ "pmulhw "MANGLE(MM_FIX_0_198912367)", %%mm2 \n\t" //-tmp4
+ "psubw %%mm3, %%mm4 \n\t"
+
+ "movq "DCTSIZE_S"*1*2(%%"REG_D"), %%mm7 \n\t"
+ "paddw %%mm3, %%mm5 \n\t"
+
+ "movq %%mm4, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
+ "paddw %%mm6, %%mm7 \n\t"
+
+ "movq 2*8+%3, %%mm3 \n\t"
+ "psubw %%mm0, %%mm6 \n\t"
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_D"), %%mm4 \n\t"
+ "paddw %%mm0, %%mm7 \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
+ "psubw %%mm1, %%mm3 \n\t"
+
+ "movq "DCTSIZE_S"*5*2(%%"REG_D"), %%mm5 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+
+ "movq "DCTSIZE_S"*3*2(%%"REG_D"), %%mm6 \n\t"
+ "paddw %%mm3, %%mm5 \n\t"
+
+ "movq 3*8+%3, %%mm0 \n\t"
+ "add $8, %%"REG_S" \n\t"
+
+ "movq %%mm7, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
+ "paddw %%mm0, %%mm6 \n\t"
+
+ "movq %%mm4, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+
+ "movq "DCTSIZE_S"*4*2(%%"REG_D"), %%mm7 \n\t"
+ "paddw %%mm2, %%mm6 \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
+ "paddw %%mm0, %%mm7 \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
+
+ "movq %%mm7, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
+ "add $8, %%"REG_D" \n\t"
+ "jmp 4f \n\t"
+
+ "2: \n\t"
+ //--- non DC2
+ //psraw mm1, 2 w/o it -> offset. thr1, thr1, thr1 (actually thr1, thr1, thr1-1)
+ //psraw mm5, 2
+ //psraw mm0, 2
+ //psraw mm6, 2
+ "movq %%mm5, %%mm3 \n\t"
+ "psubw %%mm1, %%mm5 \n\t"
+
+ "psllw $1, %%mm5 \n\t" //'z10
+ "paddw %%mm1, %%mm3 \n\t" //'z13
+
+ "movq %%mm0, %%mm2 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+
+ "movq %%mm5, %%mm1 \n\t"
+ "psllw $1, %%mm0 \n\t" //'z12
+
+ "pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm1 \n\t" //-
+ "paddw %%mm0, %%mm5 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm5 \n\t" //'z5
+ "paddw %%mm6, %%mm2 \n\t" //'z11
+
+ "pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm0 \n\t"
+ "movq %%mm2, %%mm7 \n\t"
+
+ //---
+ "movq 0*8+%3, %%mm4 \n\t"
+ "psubw %%mm3, %%mm2 \n\t"
+
+ "psllw $1, %%mm2 \n\t"
+ "paddw %%mm3, %%mm7 \n\t" //'t7
+
+ "pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //'t11
+ "movq %%mm4, %%mm6 \n\t"
+ //paddw mm7, MM_2
+ "psraw $2, %%mm7 \n\t"
+
+ "paddw "DCTSIZE_S"*0*2(%%"REG_D"), %%mm4 \n\t"
+ "psubw %%mm7, %%mm6 \n\t"
+
+ "movq 1*8+%3, %%mm3 \n\t"
+ "paddw %%mm7, %%mm4 \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
+ "paddw %%mm5, %%mm1 \n\t" //'t12
+
+ "movq %%mm4, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
+ "psubw %%mm7, %%mm1 \n\t" //'t6
+
+ "movq 2*8+%3, %%mm7 \n\t"
+ "psubw %%mm5, %%mm0 \n\t" //'t10
+
+ "movq 3*8+%3, %%mm6 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+
+ "paddw "DCTSIZE_S"*1*2(%%"REG_D"), %%mm3 \n\t"
+ "psubw %%mm1, %%mm5 \n\t"
+
+ "psubw %%mm1, %%mm2 \n\t" //'t5
+ "paddw %%mm1, %%mm3 \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
+ "movq %%mm7, %%mm4 \n\t"
+
+ "paddw "DCTSIZE_S"*2*2(%%"REG_D"), %%mm7 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+
+ "paddw "DCTSIZE_S"*5*2(%%"REG_D"), %%mm4 \n\t"
+ "paddw %%mm2, %%mm7 \n\t"
+
+ "movq %%mm3, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
+ "paddw %%mm2, %%mm0 \n\t" //'t4
+
+ // 't4 't6 't5 - - - - 't7
+ "movq %%mm7, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
+ "movq %%mm6, %%mm1 \n\t"
+
+ "paddw "DCTSIZE_S"*4*2(%%"REG_D"), %%mm6 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+
+ "paddw "DCTSIZE_S"*3*2(%%"REG_D"), %%mm1 \n\t"
+ "paddw %%mm0, %%mm6 \n\t"
+
+ "movq %%mm4, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
+ "add $8, %%"REG_S" \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
+
+ "movq %%mm1, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
+ "add $8, %%"REG_D" \n\t"
+
+ "4: \n\t"
+//=part 2 (the same)===========================================================
+ "movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm1 \n\t"
+ //
+ "movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm7 \n\t"
+ "movq %%mm1, %%mm0 \n\t"
+
+ "paddw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm1 \n\t" //t0
+ "movq %%mm7, %%mm3 \n\t"
+
+ "paddw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm7 \n\t" //t3
+ "movq %%mm1, %%mm5 \n\t"
+
+ "movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm6 \n\t"
+ "psubw %%mm7, %%mm1 \n\t" //t13
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
+ "movq %%mm6, %%mm4 \n\t"
+
+ "paddw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm6 \n\t" //t1
+ "paddw %%mm7, %%mm5 \n\t" //t10
+
+ "paddw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t2
+ "movq %%mm6, %%mm7 \n\t"
+
+ "paddw %%mm2, %%mm6 \n\t" //t11
+ "psubw %%mm2, %%mm7 \n\t" //t12
+
+ "movq %%mm5, %%mm2 \n\t"
+ "paddw %%mm6, %%mm5 \n\t" //d0
+ // i0 t13 t12 i3 i1 d0 - d4
+ "psubw %%mm6, %%mm2 \n\t" //d4
+ "paddw %%mm1, %%mm7 \n\t"
+
+ "movq 1*8+4*16(%%"REG_d"), %%mm6 \n\t"
+ "psllw $2, %%mm7 \n\t"
+
+ "psubw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
+ "psubw %%mm6, %%mm2 \n\t"
+
+ "paddusw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
+ "paddusw %%mm6, %%mm2 \n\t"
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm7 \n\t"
+ //
+ "paddw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
+ "paddw %%mm6, %%mm2 \n\t"
+
+ "psubusw 1*8+0*16(%%"REG_d"), %%mm5 \n\t"
+ "psubusw %%mm6, %%mm2 \n\t"
+
+//This func is totally compute-bound, operates at huge speed. So, DC shortcut
+// at this place isn't worthwhile due to BTB miss penalty (checked on Pent. 3).
+//However, typical numbers: nondc - 29%%, dc - 46%%, zero - 25%%. All <> 0 case is very rare.
+ "paddw "MANGLE(MM_2)", %%mm5 \n\t"
+ "movq %%mm2, %%mm6 \n\t"
+
+ "paddw %%mm5, %%mm2 \n\t"
+ "psubw %%mm6, %%mm5 \n\t"
+
+ "movq %%mm1, %%mm6 \n\t"
+ "paddw %%mm7, %%mm1 \n\t" //d2
+
+ "psubw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
+ "psubw %%mm7, %%mm6 \n\t" //d6
+
+ "movq 1*8+6*16(%%"REG_d"), %%mm7 \n\t"
+ "psraw $2, %%mm5 \n\t"
+
+ "paddusw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
+ "psubw %%mm7, %%mm6 \n\t"
+ // t7 d2 /t11 t4 t6 - d6 /t10
+
+ "paddw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
+ "paddusw %%mm7, %%mm6 \n\t"
+
+ "psubusw 1*8+2*16(%%"REG_d"), %%mm1 \n\t"
+ "paddw %%mm7, %%mm6 \n\t"
+
+ "psubw "DCTSIZE_S"*4*2(%%"REG_S"), %%mm3 \n\t"
+ "psubusw %%mm7, %%mm6 \n\t"
+
+ //movq [edi+"DCTSIZE_S"*2*2], mm1
+ //movq [edi+"DCTSIZE_S"*6*2], mm6
+ "movq %%mm1, %%mm7 \n\t"
+ "psraw $2, %%mm2 \n\t"
+
+ "psubw "DCTSIZE_S"*6*2(%%"REG_S"), %%mm4 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+
+ "psubw "DCTSIZE_S"*7*2(%%"REG_S"), %%mm0 \n\t"
+ "paddw %%mm7, %%mm6 \n\t" //'t13
+
+ "psraw $2, %%mm6 \n\t" //paddw mm6, MM_2 !! ---
+ "movq %%mm2, %%mm7 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm1 \n\t"
+ "paddw %%mm6, %%mm2 \n\t" //'t0
+
+ "movq %%mm2, 0*8+%3 \n\t" //!
+ "psubw %%mm6, %%mm7 \n\t" //'t3
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
+ "psubw %%mm6, %%mm1 \n\t" //'t12
+
+ "psubw "DCTSIZE_S"*5*2(%%"REG_S"), %%mm2 \n\t" //t5
+ "movq %%mm5, %%mm6 \n\t"
+
+ "movq %%mm7, 3*8+%3 \n\t"
+ "paddw %%mm2, %%mm3 \n\t" //t10
+
+ "paddw %%mm4, %%mm2 \n\t" //t11
+ "paddw %%mm0, %%mm4 \n\t" //t12
+
+ "movq %%mm3, %%mm7 \n\t"
+ "psubw %%mm4, %%mm3 \n\t"
+
+ "psllw $2, %%mm3 \n\t"
+ "psllw $2, %%mm7 \n\t" //opt for P6
+
+ "pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t"
+ "psllw $2, %%mm4 \n\t"
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm7 \n\t"
+ "psllw $2, %%mm2 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm4 \n\t"
+ "paddw %%mm1, %%mm5 \n\t" //'t1
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm2 \n\t"
+ "psubw %%mm1, %%mm6 \n\t" //'t2
+ // t7 't12 't11 t4 t6 - 't13 't10 ---
+
+ "paddw %%mm3, %%mm7 \n\t" //z2
+
+ "movq %%mm5, 1*8+%3 \n\t"
+ "paddw %%mm3, %%mm4 \n\t" //z4
+
+ "movq 1*8+3*16(%%"REG_d"), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+
+ "movq %%mm6, 2*8+%3 \n\t"
+ "psubw %%mm2, %%mm1 \n\t" //z13
+
+//===
+ "paddw %%mm2, %%mm0 \n\t" //z11
+ "movq %%mm1, %%mm5 \n\t"
+
+ "movq 1*8+5*16(%%"REG_d"), %%mm2 \n\t"
+ "psubw %%mm7, %%mm1 \n\t" //d3
+
+ "paddw %%mm7, %%mm5 \n\t" //d5
+ "psubw %%mm3, %%mm1 \n\t"
+
+ "movq 1*8+1*16(%%"REG_d"), %%mm7 \n\t"
+ "psubw %%mm2, %%mm5 \n\t"
+
+ "movq %%mm0, %%mm6 \n\t"
+ "paddw %%mm4, %%mm0 \n\t" //d1
+
+ "paddusw %%mm3, %%mm1 \n\t"
+ "psubw %%mm4, %%mm6 \n\t" //d7
+
+ // d1 d3 - - - d5 d7 -
+ "movq 1*8+7*16(%%"REG_d"), %%mm4 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+
+ "psubw %%mm4, %%mm6 \n\t"
+ "paddusw %%mm2, %%mm5 \n\t"
+
+ "paddusw %%mm4, %%mm6 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+
+ "paddw %%mm2, %%mm5 \n\t"
+ "paddw %%mm4, %%mm6 \n\t"
+
+ "psubusw %%mm3, %%mm1 \n\t"
+ "psubusw %%mm2, %%mm5 \n\t"
+
+ "psubusw %%mm4, %%mm6 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+
+ "por %%mm5, %%mm4 \n\t"
+ "paddusw %%mm7, %%mm0 \n\t"
+
+ "por %%mm6, %%mm4 \n\t"
+ "paddw %%mm7, %%mm0 \n\t"
+
+ "packssdw %%mm4, %%mm4 \n\t"
+ "psubusw %%mm7, %%mm0 \n\t"
+
+ "movd %%mm4, %%"REG_a" \n\t"
+ "or %%"REG_a", %%"REG_a" \n\t"
+ "jnz 3f \n\t"
+ //movq [edi+"DCTSIZE_S"*3*2], mm1
+ //movq [edi+"DCTSIZE_S"*5*2], mm5
+ //movq [edi+"DCTSIZE_S"*1*2], mm0
+ //movq [edi+"DCTSIZE_S"*7*2], mm6
+ // t4 t5 - - - t6 t7 -
+ //--- t4 (mm0) may be <>0; mm1, mm5, mm6 == 0
+//Typical numbers: nondc - 19%%, dc - 26%%, zero - 55%%. zero case alone isn't worthwhile
+ "movq 0*8+%3, %%mm4 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_0_847759065)", %%mm0 \n\t" //tmp6
+ "movq %%mm1, %%mm2 \n\t"
+
+ "movq "DCTSIZE_S"*0*2(%%"REG_D"), %%mm5 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_0_566454497)", %%mm1 \n\t" //tmp5
+ "paddw %%mm4, %%mm5 \n\t"
+
+ "movq 1*8+%3, %%mm6 \n\t"
+ //paddw mm3, MM_2
+ "psraw $2, %%mm3 \n\t" //tmp7
+
+ "pmulhw "MANGLE(MM_FIX_0_198912367)", %%mm2 \n\t" //-tmp4
+ "psubw %%mm3, %%mm4 \n\t"
+
+ "movq "DCTSIZE_S"*1*2(%%"REG_D"), %%mm7 \n\t"
+ "paddw %%mm3, %%mm5 \n\t"
+
+ "movq %%mm4, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
+ "paddw %%mm6, %%mm7 \n\t"
+
+ "movq 2*8+%3, %%mm3 \n\t"
+ "psubw %%mm0, %%mm6 \n\t"
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_D"), %%mm4 \n\t"
+ "paddw %%mm0, %%mm7 \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
+ "psubw %%mm1, %%mm3 \n\t"
+
+ "movq "DCTSIZE_S"*5*2(%%"REG_D"), %%mm5 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+
+ "movq "DCTSIZE_S"*3*2(%%"REG_D"), %%mm6 \n\t"
+ "paddw %%mm3, %%mm5 \n\t"
+
+ "movq 3*8+%3, %%mm0 \n\t"
+ "add $24, %%"REG_S" \n\t"
+
+ "movq %%mm7, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
+ "paddw %%mm0, %%mm6 \n\t"
+
+ "movq %%mm4, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+
+ "movq "DCTSIZE_S"*4*2(%%"REG_D"), %%mm7 \n\t"
+ "paddw %%mm2, %%mm6 \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
+ "paddw %%mm0, %%mm7 \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
+
+ "movq %%mm7, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
+ "add $24, %%"REG_D" \n\t"
+ "sub $2, %%"REG_c" \n\t"
+ "jnz 1b \n\t"
+ "jmp 5f \n\t"
+
+ "3: \n\t"
+ //--- non DC2
+ //psraw mm1, 2 w/o it -> offset. thr1, thr1, thr1 (actually thr1, thr1, thr1-1)
+ //psraw mm5, 2
+ //psraw mm0, 2
+ //psraw mm6, 2
+ "movq %%mm5, %%mm3 \n\t"
+ "psubw %%mm1, %%mm5 \n\t"
+
+ "psllw $1, %%mm5 \n\t" //'z10
+ "paddw %%mm1, %%mm3 \n\t" //'z13
+
+ "movq %%mm0, %%mm2 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+
+ "movq %%mm5, %%mm1 \n\t"
+ "psllw $1, %%mm0 \n\t" //'z12
+
+ "pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm1 \n\t" //-
+ "paddw %%mm0, %%mm5 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm5 \n\t" //'z5
+ "paddw %%mm6, %%mm2 \n\t" //'z11
+
+ "pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm0 \n\t"
+ "movq %%mm2, %%mm7 \n\t"
+
+ //---
+ "movq 0*8+%3, %%mm4 \n\t"
+ "psubw %%mm3, %%mm2 \n\t"
+
+ "psllw $1, %%mm2 \n\t"
+ "paddw %%mm3, %%mm7 \n\t" //'t7
+
+ "pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //'t11
+ "movq %%mm4, %%mm6 \n\t"
+ //paddw mm7, MM_2
+ "psraw $2, %%mm7 \n\t"
+
+ "paddw "DCTSIZE_S"*0*2(%%"REG_D"), %%mm4 \n\t"
+ "psubw %%mm7, %%mm6 \n\t"
+
+ "movq 1*8+%3, %%mm3 \n\t"
+ "paddw %%mm7, %%mm4 \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*7*2(%%"REG_D") \n\t"
+ "paddw %%mm5, %%mm1 \n\t" //'t12
+
+ "movq %%mm4, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
+ "psubw %%mm7, %%mm1 \n\t" //'t6
+
+ "movq 2*8+%3, %%mm7 \n\t"
+ "psubw %%mm5, %%mm0 \n\t" //'t10
+
+ "movq 3*8+%3, %%mm6 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+
+ "paddw "DCTSIZE_S"*1*2(%%"REG_D"), %%mm3 \n\t"
+ "psubw %%mm1, %%mm5 \n\t"
+
+ "psubw %%mm1, %%mm2 \n\t" //'t5
+ "paddw %%mm1, %%mm3 \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*6*2(%%"REG_D") \n\t"
+ "movq %%mm7, %%mm4 \n\t"
+
+ "paddw "DCTSIZE_S"*2*2(%%"REG_D"), %%mm7 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+
+ "paddw "DCTSIZE_S"*5*2(%%"REG_D"), %%mm4 \n\t"
+ "paddw %%mm2, %%mm7 \n\t"
+
+ "movq %%mm3, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
+ "paddw %%mm2, %%mm0 \n\t" //'t4
+
+ // 't4 't6 't5 - - - - 't7
+ "movq %%mm7, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
+ "movq %%mm6, %%mm1 \n\t"
+
+ "paddw "DCTSIZE_S"*4*2(%%"REG_D"), %%mm6 \n\t"
+ "psubw %%mm0, %%mm1 \n\t"
+
+ "paddw "DCTSIZE_S"*3*2(%%"REG_D"), %%mm1 \n\t"
+ "paddw %%mm0, %%mm6 \n\t"
+
+ "movq %%mm4, "DCTSIZE_S"*5*2(%%"REG_D") \n\t"
+ "add $24, %%"REG_S" \n\t"
+
+ "movq %%mm6, "DCTSIZE_S"*4*2(%%"REG_D") \n\t"
+
+ "movq %%mm1, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
+ "add $24, %%"REG_D" \n\t"
+ "sub $2, %%"REG_c" \n\t"
+ "jnz 1b \n\t"
+ "5: \n\t"
+
+ : "+S"(data), "+D"(output), "+c"(cnt), "=o"(temps)
+ : "d"(thr_adr)
+ NAMED_CONSTRAINTS_ADD(ff_MM_FIX_0_707106781,MM_2,MM_FIX_1_414213562_A,MM_FIX_1_414213562,MM_FIX_0_382683433,
+ ff_MM_FIX_0_541196100,MM_FIX_1_306562965,MM_FIX_0_847759065)
+ NAMED_CONSTRAINTS_ADD(MM_FIX_0_566454497,MM_FIX_0_198912367,MM_FIX_2_613125930,MM_FIX_1_847759065,
+ MM_FIX_1_082392200)
+ : "%"REG_a
+ );
+}
+
+#endif // HAVE_MMX
+
+#if !HAVE_MMX
+
+static void row_idct_c(int16_t* workspace,
+ int16_t* output_adr, int output_stride, int cnt)
+{
+ int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int_simd16_t tmp10, tmp11, tmp12, tmp13;
+ int_simd16_t z5, z10, z11, z12, z13;
+ int16_t* outptr;
+ int16_t* wsptr;
+
+ cnt*=4;
+ wsptr = workspace;
+ outptr = output_adr;
+ for (; cnt > 0; cnt--) {
+ // Even part
+ //Simd version reads 4x4 block and transposes it
+ tmp10 = ( wsptr[2] + wsptr[3]);
+ tmp11 = ( wsptr[2] - wsptr[3]);
+
+ tmp13 = ( wsptr[0] + wsptr[1]);
+ tmp12 = (MULTIPLY16H( wsptr[0] - wsptr[1], FIX_1_414213562_A)<<2) - tmp13;//this shift order to avoid overflow
+
+ tmp0 = tmp10 + tmp13; //->temps
+ tmp3 = tmp10 - tmp13; //->temps
+ tmp1 = tmp11 + tmp12;
+ tmp2 = tmp11 - tmp12;
+
+ // Odd part
+ //Also transpose, with previous:
+ // ---- ---- ||||
+ // ---- ---- idct ||||
+ // ---- ---- ---> ||||
+ // ---- ---- ||||
+ z13 = wsptr[4] + wsptr[5];
+ z10 = wsptr[4] - wsptr[5];
+ z11 = wsptr[6] + wsptr[7];
+ z12 = wsptr[6] - wsptr[7];
+
+ tmp7 = z11 + z13;
+ tmp11 = MULTIPLY16H(z11 - z13, FIX_1_414213562);
+
+ z5 = MULTIPLY16H(z10 + z12, FIX_1_847759065);
+ tmp10 = MULTIPLY16H(z12, FIX_1_082392200) - z5;
+ tmp12 = MULTIPLY16H(z10, FIX_2_613125930) + z5; // - FIX_
+
+ tmp6 = (tmp12<<3) - tmp7;
+ tmp5 = (tmp11<<3) - tmp6;
+ tmp4 = (tmp10<<3) + tmp5;
+
+ // Final output stage: descale and write column
+ outptr[0*output_stride]+= DESCALE(tmp0 + tmp7, 3);
+ outptr[1*output_stride]+= DESCALE(tmp1 + tmp6, 3);
+ outptr[2*output_stride]+= DESCALE(tmp2 + tmp5, 3);
+ outptr[3*output_stride]+= DESCALE(tmp3 - tmp4, 3);
+ outptr[4*output_stride]+= DESCALE(tmp3 + tmp4, 3);
+ outptr[5*output_stride]+= DESCALE(tmp2 - tmp5, 3);
+ outptr[6*output_stride]+= DESCALE(tmp1 - tmp6, 3); //no += ?
+ outptr[7*output_stride]+= DESCALE(tmp0 - tmp7, 3); //no += ?
+ outptr++;
+
+ wsptr += DCTSIZE; // advance pointer to next row
+ }
+}
+
+#else /* HAVE_MMX */
+
+static void row_idct_mmx (int16_t* workspace,
+ int16_t* output_adr, int output_stride, int cnt)
+{
+ DECLARE_ALIGNED(8, uint64_t, temps)[4];
+ __asm__ volatile(
+ "lea (%%"REG_a",%%"REG_a",2), %%"REG_d" \n\t"
+ "1: \n\t"
+ "movq "DCTSIZE_S"*0*2(%%"REG_S"), %%mm0 \n\t"
+ //
+
+ "movq "DCTSIZE_S"*1*2(%%"REG_S"), %%mm1 \n\t"
+ "movq %%mm0, %%mm4 \n\t"
+
+ "movq "DCTSIZE_S"*2*2(%%"REG_S"), %%mm2 \n\t"
+ "punpcklwd %%mm1, %%mm0 \n\t"
+
+ "movq "DCTSIZE_S"*3*2(%%"REG_S"), %%mm3 \n\t"
+ "punpckhwd %%mm1, %%mm4 \n\t"
+
+ //transpose 4x4
+ "movq %%mm2, %%mm7 \n\t"
+ "punpcklwd %%mm3, %%mm2 \n\t"
+
+ "movq %%mm0, %%mm6 \n\t"
+ "punpckldq %%mm2, %%mm0 \n\t" //0
+
+ "punpckhdq %%mm2, %%mm6 \n\t" //1
+ "movq %%mm0, %%mm5 \n\t"
+
+ "punpckhwd %%mm3, %%mm7 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_414213562_A)", %%mm0 \n\t"
+ "movq %%mm4, %%mm2 \n\t"
+
+ "punpckldq %%mm7, %%mm4 \n\t" //2
+ "paddw %%mm6, %%mm5 \n\t"
+
+ "punpckhdq %%mm7, %%mm2 \n\t" //3
+ "movq %%mm4, %%mm1 \n\t"
+
+ "psllw $2, %%mm0 \n\t"
+ "paddw %%mm2, %%mm4 \n\t" //t10
+
+ "movq "DCTSIZE_S"*0*2+"DCTSIZE_S"(%%"REG_S"), %%mm3 \n\t"
+ "psubw %%mm2, %%mm1 \n\t" //t11
+
+ "movq "DCTSIZE_S"*1*2+"DCTSIZE_S"(%%"REG_S"), %%mm2 \n\t"
+ "psubw %%mm5, %%mm0 \n\t"
+
+ "movq %%mm4, %%mm6 \n\t"
+ "paddw %%mm5, %%mm4 \n\t" //t0
+
+ "psubw %%mm5, %%mm6 \n\t" //t3
+ "movq %%mm1, %%mm7 \n\t"
+
+ "movq "DCTSIZE_S"*2*2+"DCTSIZE_S"(%%"REG_S"), %%mm5 \n\t"
+ "paddw %%mm0, %%mm1 \n\t" //t1
+
+ "movq %%mm4, 0*8+%3 \n\t" //t0
+ "movq %%mm3, %%mm4 \n\t"
+
+ "movq %%mm6, 1*8+%3 \n\t" //t3
+ "punpcklwd %%mm2, %%mm3 \n\t"
+
+ //transpose 4x4
+ "movq "DCTSIZE_S"*3*2+"DCTSIZE_S"(%%"REG_S"), %%mm6 \n\t"
+ "punpckhwd %%mm2, %%mm4 \n\t"
+
+ "movq %%mm5, %%mm2 \n\t"
+ "punpcklwd %%mm6, %%mm5 \n\t"
+
+ "psubw %%mm0, %%mm7 \n\t" //t2
+ "punpckhwd %%mm6, %%mm2 \n\t"
+
+ "movq %%mm3, %%mm0 \n\t"
+ "punpckldq %%mm5, %%mm3 \n\t" //4
+
+ "punpckhdq %%mm5, %%mm0 \n\t" //5
+ "movq %%mm4, %%mm5 \n\t"
+
+ //
+ "movq %%mm3, %%mm6 \n\t"
+ "punpckldq %%mm2, %%mm4 \n\t" //6
+
+ "psubw %%mm0, %%mm3 \n\t" //z10
+ "punpckhdq %%mm2, %%mm5 \n\t" //7
+
+ "paddw %%mm0, %%mm6 \n\t" //z13
+ "movq %%mm4, %%mm2 \n\t"
+
+ "movq %%mm3, %%mm0 \n\t"
+ "psubw %%mm5, %%mm4 \n\t" //z12
+
+ "pmulhw "MANGLE(MM_FIX_2_613125930)", %%mm0 \n\t" //-
+ "paddw %%mm4, %%mm3 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_847759065)", %%mm3 \n\t" //z5
+ "paddw %%mm5, %%mm2 \n\t" //z11 >
+
+ "pmulhw "MANGLE(MM_FIX_1_082392200)", %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+
+ "psubw %%mm6, %%mm2 \n\t"
+ "paddw %%mm6, %%mm5 \n\t" //t7
+
+ "pmulhw "MANGLE(MM_FIX_1_414213562)", %%mm2 \n\t" //t11
+ "paddw %%mm3, %%mm0 \n\t" //t12
+
+ "psllw $3, %%mm0 \n\t"
+ "psubw %%mm3, %%mm4 \n\t" //t10
+
+ "movq 0*8+%3, %%mm6 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+
+ "psllw $3, %%mm4 \n\t"
+ "psubw %%mm5, %%mm0 \n\t" //t6
+
+ "psllw $3, %%mm2 \n\t"
+ "paddw %%mm0, %%mm1 \n\t" //d1
+
+ "psubw %%mm0, %%mm2 \n\t" //t5
+ "psubw %%mm0, %%mm3 \n\t" //d6
+
+ "paddw %%mm2, %%mm4 \n\t" //t4
+ "movq %%mm7, %%mm0 \n\t"
+
+ "paddw %%mm2, %%mm7 \n\t" //d2
+ "psubw %%mm2, %%mm0 \n\t" //d5
+
+ "movq "MANGLE(MM_DESCALE_RND)", %%mm2 \n\t" //4
+ "psubw %%mm5, %%mm6 \n\t" //d7
+
+ "paddw 0*8+%3, %%mm5 \n\t" //d0
+ "paddw %%mm2, %%mm1 \n\t"
+
+ "paddw %%mm2, %%mm5 \n\t"
+ "psraw $3, %%mm1 \n\t"
+
+ "paddw %%mm2, %%mm7 \n\t"
+ "psraw $3, %%mm5 \n\t"
+
+ "paddw (%%"REG_D"), %%mm5 \n\t"
+ "psraw $3, %%mm7 \n\t"
+
+ "paddw (%%"REG_D",%%"REG_a"), %%mm1 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+
+ "paddw (%%"REG_D",%%"REG_a",2), %%mm7 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+
+ "movq %%mm5, (%%"REG_D") \n\t"
+ "paddw %%mm2, %%mm6 \n\t"
+
+ "movq %%mm1, (%%"REG_D",%%"REG_a") \n\t"
+ "psraw $3, %%mm0 \n\t"
+
+ "movq %%mm7, (%%"REG_D",%%"REG_a",2) \n\t"
+ "add %%"REG_d", %%"REG_D" \n\t" //3*ls
+
+ "movq 1*8+%3, %%mm5 \n\t" //t3
+ "psraw $3, %%mm3 \n\t"
+
+ "paddw (%%"REG_D",%%"REG_a",2), %%mm0 \n\t"
+ "psubw %%mm4, %%mm5 \n\t" //d3
+
+ "paddw (%%"REG_D",%%"REG_d"), %%mm3 \n\t"
+ "psraw $3, %%mm6 \n\t"
+
+ "paddw 1*8+%3, %%mm4 \n\t" //d4
+ "paddw %%mm2, %%mm5 \n\t"
+
+ "paddw (%%"REG_D",%%"REG_a",4), %%mm6 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+
+ "movq %%mm0, (%%"REG_D",%%"REG_a",2) \n\t"
+ "psraw $3, %%mm5 \n\t"
+
+ "paddw (%%"REG_D"), %%mm5 \n\t"
+ "psraw $3, %%mm4 \n\t"
+
+ "paddw (%%"REG_D",%%"REG_a"), %%mm4 \n\t"
+ "add $"DCTSIZE_S"*2*4, %%"REG_S" \n\t" //4 rows
+
+ "movq %%mm3, (%%"REG_D",%%"REG_d") \n\t"
+ "movq %%mm6, (%%"REG_D",%%"REG_a",4) \n\t"
+ "movq %%mm5, (%%"REG_D") \n\t"
+ "movq %%mm4, (%%"REG_D",%%"REG_a") \n\t"
+
+ "sub %%"REG_d", %%"REG_D" \n\t"
+ "add $8, %%"REG_D" \n\t"
+ "dec %%"REG_c" \n\t"
+ "jnz 1b \n\t"
+
+ : "+S"(workspace), "+D"(output_adr), "+c"(cnt), "=o"(temps)
+ : "a"(output_stride*sizeof(short))
+ NAMED_CONSTRAINTS_ADD(MM_FIX_1_414213562_A,MM_FIX_2_613125930,MM_FIX_1_847759065,MM_FIX_1_082392200,
+ MM_FIX_1_414213562,MM_DESCALE_RND)
+ : "%"REG_d
+ );
+}
+
+#endif // HAVE_MMX
+
+#if !HAVE_MMX
+
+static void row_fdct_c(int16_t *data, const uint8_t *pixels, int line_size, int cnt)
+{
+ int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int_simd16_t tmp10, tmp11, tmp12, tmp13;
+ int_simd16_t z1, z2, z3, z4, z5, z11, z13;
+ int16_t *dataptr;
+
+ cnt*=4;
+ // Pass 1: process rows.
+
+ dataptr = data;
+ for (; cnt > 0; cnt--) {
+ tmp0 = pixels[line_size*0] + pixels[line_size*7];
+ tmp7 = pixels[line_size*0] - pixels[line_size*7];
+ tmp1 = pixels[line_size*1] + pixels[line_size*6];
+ tmp6 = pixels[line_size*1] - pixels[line_size*6];
+ tmp2 = pixels[line_size*2] + pixels[line_size*5];
+ tmp5 = pixels[line_size*2] - pixels[line_size*5];
+ tmp3 = pixels[line_size*3] + pixels[line_size*4];
+ tmp4 = pixels[line_size*3] - pixels[line_size*4];
+
+ // Even part
+
+ tmp10 = tmp0 + tmp3;
+ tmp13 = tmp0 - tmp3;
+ tmp11 = tmp1 + tmp2;
+ tmp12 = tmp1 - tmp2;
+ //Even columns are written first, this leads to different order of columns
+ //in column_fidct(), but they are processed independently, so all ok.
+ //Later in the row_idct() columns readed at the same order.
+ dataptr[2] = tmp10 + tmp11;
+ dataptr[3] = tmp10 - tmp11;
+
+ z1 = MULTIPLY16H((tmp12 + tmp13)<<2, FIX_0_707106781);
+ dataptr[0] = tmp13 + z1;
+ dataptr[1] = tmp13 - z1;
+
+ // Odd part
+
+ tmp10 = (tmp4 + tmp5) <<2;
+ tmp11 = (tmp5 + tmp6) <<2;
+ tmp12 = (tmp6 + tmp7) <<2;
+
+ z5 = MULTIPLY16H(tmp10 - tmp12, FIX_0_382683433);
+ z2 = MULTIPLY16H(tmp10, FIX_0_541196100) + z5;
+ z4 = MULTIPLY16H(tmp12, FIX_1_306562965) + z5;
+ z3 = MULTIPLY16H(tmp11, FIX_0_707106781);
+
+ z11 = tmp7 + z3;
+ z13 = tmp7 - z3;
+
+ dataptr[4] = z13 + z2;
+ dataptr[5] = z13 - z2;
+ dataptr[6] = z11 + z4;
+ dataptr[7] = z11 - z4;
+
+ pixels++; // advance pointer to next column
+ dataptr += DCTSIZE;
+ }
+}
+
+#else /* HAVE_MMX */
+
+static void row_fdct_mmx(int16_t *data, const uint8_t *pixels, int line_size, int cnt)
+{
+ DECLARE_ALIGNED(8, uint64_t, temps)[4];
+ __asm__ volatile(
+ "lea (%%"REG_a",%%"REG_a",2), %%"REG_d" \n\t"
+ "6: \n\t"
+ "movd (%%"REG_S"), %%mm0 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+
+ "movd (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+
+ "movd (%%"REG_S",%%"REG_a",2), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "add %%"REG_d", %%"REG_S" \n\t"
+
+ "movq %%mm0, %%mm5 \n\t"
+ //
+
+ "movd (%%"REG_S",%%"REG_a",4), %%mm3 \n\t" //7 ;prefetch!
+ "movq %%mm1, %%mm6 \n\t"
+
+ "movd (%%"REG_S",%%"REG_d"), %%mm4 \n\t" //6
+ "punpcklbw %%mm7, %%mm3 \n\t"
+
+ "psubw %%mm3, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+
+ "paddw %%mm3, %%mm0 \n\t"
+ "psubw %%mm4, %%mm6 \n\t"
+
+ "movd (%%"REG_S",%%"REG_a",2), %%mm3 \n\t" //5
+ "paddw %%mm4, %%mm1 \n\t"
+
+ "movq %%mm5, %3 \n\t" //t7
+ "punpcklbw %%mm7, %%mm3 \n\t"
+
+ "movq %%mm6, %4 \n\t" //t6
+ "movq %%mm2, %%mm4 \n\t"
+
+ "movd (%%"REG_S"), %%mm5 \n\t" //3
+ "paddw %%mm3, %%mm2 \n\t"
+
+ "movd (%%"REG_S",%%"REG_a"), %%mm6 \n\t" //4
+ "punpcklbw %%mm7, %%mm5 \n\t"
+
+ "psubw %%mm3, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t"
+
+ "movq %%mm5, %%mm3 \n\t"
+ "paddw %%mm6, %%mm5 \n\t" //t3
+
+ "psubw %%mm6, %%mm3 \n\t" //t4 ; t0 t1 t2 t4 t5 t3 - -
+ "movq %%mm0, %%mm6 \n\t"
+
+ "movq %%mm1, %%mm7 \n\t"
+ "psubw %%mm5, %%mm0 \n\t" //t13
+
+ "psubw %%mm2, %%mm1 \n\t"
+ "paddw %%mm2, %%mm7 \n\t" //t11
+
+ "paddw %%mm0, %%mm1 \n\t"
+ "movq %%mm7, %%mm2 \n\t"
+
+ "psllw $2, %%mm1 \n\t"
+ "paddw %%mm5, %%mm6 \n\t" //t10
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm1 \n\t"
+ "paddw %%mm6, %%mm7 \n\t" //d2
+
+ "psubw %%mm2, %%mm6 \n\t" //d3
+ "movq %%mm0, %%mm5 \n\t"
+
+ //transpose 4x4
+ "movq %%mm7, %%mm2 \n\t"
+ "punpcklwd %%mm6, %%mm7 \n\t"
+
+ "paddw %%mm1, %%mm0 \n\t" //d0
+ "punpckhwd %%mm6, %%mm2 \n\t"
+
+ "psubw %%mm1, %%mm5 \n\t" //d1
+ "movq %%mm0, %%mm6 \n\t"
+
+ "movq %4, %%mm1 \n\t"
+ "punpcklwd %%mm5, %%mm0 \n\t"
+
+ "punpckhwd %%mm5, %%mm6 \n\t"
+ "movq %%mm0, %%mm5 \n\t"
+
+ "punpckldq %%mm7, %%mm0 \n\t" //0
+ "paddw %%mm4, %%mm3 \n\t"
+
+ "punpckhdq %%mm7, %%mm5 \n\t" //1
+ "movq %%mm6, %%mm7 \n\t"
+
+ "movq %%mm0, "DCTSIZE_S"*0*2(%%"REG_D") \n\t"
+ "punpckldq %%mm2, %%mm6 \n\t" //2
+
+ "movq %%mm5, "DCTSIZE_S"*1*2(%%"REG_D") \n\t"
+ "punpckhdq %%mm2, %%mm7 \n\t" //3
+
+ "movq %%mm6, "DCTSIZE_S"*2*2(%%"REG_D") \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+
+ "movq %%mm7, "DCTSIZE_S"*3*2(%%"REG_D") \n\t"
+ "psllw $2, %%mm3 \n\t" //t10
+
+ "movq %3, %%mm2 \n\t"
+ "psllw $2, %%mm4 \n\t" //t11
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_707106781)", %%mm4 \n\t" //z3
+ "paddw %%mm2, %%mm1 \n\t"
+
+ "psllw $2, %%mm1 \n\t" //t12
+ "movq %%mm3, %%mm0 \n\t"
+
+ "pmulhw "MANGLE(ff_MM_FIX_0_541196100)", %%mm0 \n\t"
+ "psubw %%mm1, %%mm3 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_0_382683433)", %%mm3 \n\t" //z5
+ "movq %%mm2, %%mm5 \n\t"
+
+ "pmulhw "MANGLE(MM_FIX_1_306562965)", %%mm1 \n\t"
+ "psubw %%mm4, %%mm2 \n\t" //z13
+
+ "paddw %%mm4, %%mm5 \n\t" //z11
+ "movq %%mm2, %%mm6 \n\t"
+
+ "paddw %%mm3, %%mm0 \n\t" //z2
+ "movq %%mm5, %%mm7 \n\t"
+
+ "paddw %%mm0, %%mm2 \n\t" //d4
+ "psubw %%mm0, %%mm6 \n\t" //d5
+
+ "movq %%mm2, %%mm4 \n\t"
+ "paddw %%mm3, %%mm1 \n\t" //z4
+
+ //transpose 4x4
+ "punpcklwd %%mm6, %%mm2 \n\t"
+ "paddw %%mm1, %%mm5 \n\t" //d6
+
+ "punpckhwd %%mm6, %%mm4 \n\t"
+ "psubw %%mm1, %%mm7 \n\t" //d7
+
+ "movq %%mm5, %%mm6 \n\t"
+ "punpcklwd %%mm7, %%mm5 \n\t"
+
+ "punpckhwd %%mm7, %%mm6 \n\t"
+ "movq %%mm2, %%mm7 \n\t"
+
+ "punpckldq %%mm5, %%mm2 \n\t" //4
+ "sub %%"REG_d", %%"REG_S" \n\t"
+
+ "punpckhdq %%mm5, %%mm7 \n\t" //5
+ "movq %%mm4, %%mm5 \n\t"
+
+ "movq %%mm2, "DCTSIZE_S"*0*2+"DCTSIZE_S"(%%"REG_D") \n\t"
+ "punpckldq %%mm6, %%mm4 \n\t" //6
+
+ "movq %%mm7, "DCTSIZE_S"*1*2+"DCTSIZE_S"(%%"REG_D") \n\t"
+ "punpckhdq %%mm6, %%mm5 \n\t" //7
+
+ "movq %%mm4, "DCTSIZE_S"*2*2+"DCTSIZE_S"(%%"REG_D") \n\t"
+ "add $4, %%"REG_S" \n\t"
+
+ "movq %%mm5, "DCTSIZE_S"*3*2+"DCTSIZE_S"(%%"REG_D") \n\t"
+ "add $"DCTSIZE_S"*2*4, %%"REG_D" \n\t" //4 rows
+ "dec %%"REG_c" \n\t"
+ "jnz 6b \n\t"
+
+ : "+S"(pixels), "+D"(data), "+c"(cnt), "=o"(temps), "=o"(temps[1])
+ : "a"(line_size)
+ NAMED_CONSTRAINTS_ADD(ff_MM_FIX_0_707106781,ff_MM_FIX_0_541196100,MM_FIX_0_382683433,MM_FIX_1_306562965)
+ : "%"REG_d);
+}
+
+#endif // HAVE_MMX
diff --git a/libavfilter/libmpcodecs/vf_ilpack.c b/libavfilter/libmpcodecs/vf_ilpack.c
new file mode 100644
index 0000000..fbf5817
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_ilpack.c
@@ -0,0 +1,458 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "config.h"
+#include "mp_msg.h"
+#include "cpudetect.h"
+
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+#include "libavutil/attributes.h"
+#include "libavutil/x86/asm.h"
+
+typedef void (pack_func_t)(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w, int us, int vs);
+
+struct vf_priv_s {
+ int mode;
+ pack_func_t *pack[2];
+};
+
+static void pack_nn_C(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w,
+ int av_unused us, int av_unused vs)
+{
+ int j;
+ for (j = w/2; j; j--) {
+ *dst++ = *y++;
+ *dst++ = *u++;
+ *dst++ = *y++;
+ *dst++ = *v++;
+ }
+}
+
+static void pack_li_0_C(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w, int us, int vs)
+{
+ int j;
+ for (j = w/2; j; j--) {
+ *dst++ = *y++;
+ *dst++ = (u[us+us] + 7*u[0])>>3;
+ *dst++ = *y++;
+ *dst++ = (v[vs+vs] + 7*v[0])>>3;
+ u++; v++;
+ }
+}
+
+static void pack_li_1_C(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w, int us, int vs)
+{
+ int j;
+ for (j = w/2; j; j--) {
+ *dst++ = *y++;
+ *dst++ = (3*u[us+us] + 5*u[0])>>3;
+ *dst++ = *y++;
+ *dst++ = (3*v[vs+vs] + 5*v[0])>>3;
+ u++; v++;
+ }
+}
+
+#if HAVE_MMX
+static void pack_nn_MMX(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w,
+ int av_unused us, int av_unused vs)
+{
+ __asm__ volatile (""
+ ASMALIGN(4)
+ "1: \n\t"
+ "movq (%0), %%mm1 \n\t"
+ "movq (%0), %%mm2 \n\t"
+ "movq (%1), %%mm4 \n\t"
+ "movq (%2), %%mm6 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm1 \n\t"
+ "punpckhbw %%mm4, %%mm2 \n\t"
+
+ "add $8, %0 \n\t"
+ "add $4, %1 \n\t"
+ "add $4, %2 \n\t"
+ "movq %%mm1, (%3) \n\t"
+ "movq %%mm2, 8(%3) \n\t"
+ "add $16, %3 \n\t"
+ "decl %4 \n\t"
+ "jnz 1b \n\t"
+ "emms \n\t"
+ :
+ : "r" (y), "r" (u), "r" (v), "r" (dst), "r" (w/8)
+ : "memory"
+ );
+ pack_nn_C(dst, y, u, v, (w&7), 0, 0);
+}
+
+#if HAVE_EBX_AVAILABLE
+static void pack_li_0_MMX(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w, int us, int vs)
+{
+ __asm__ volatile (""
+ "push %%"REG_BP" \n\t"
+#if ARCH_X86_64
+ "mov %6, %%"REG_BP" \n\t"
+#else
+ "movl 4(%%"REG_d"), %%"REG_BP" \n\t"
+ "movl (%%"REG_d"), %%"REG_d" \n\t"
+#endif
+ "pxor %%mm0, %%mm0 \n\t"
+
+ ASMALIGN(4)
+ "2: \n\t"
+ "movq (%%"REG_S"), %%mm1 \n\t"
+ "movq (%%"REG_S"), %%mm2 \n\t"
+
+ "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+ "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
+ "punpcklbw %%mm0, %%mm4 \n\t"
+ "punpcklbw %%mm0, %%mm6 \n\t"
+ "movq (%%"REG_a"), %%mm3 \n\t"
+ "movq (%%"REG_b"), %%mm5 \n\t"
+ "punpcklbw %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm0, %%mm5 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "psrlw $3, %%mm4 \n\t"
+ "psrlw $3, %%mm6 \n\t"
+ "packuswb %%mm4, %%mm4 \n\t"
+ "packuswb %%mm6, %%mm6 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm1 \n\t"
+ "punpckhbw %%mm4, %%mm2 \n\t"
+
+ "movq %%mm1, (%%"REG_D") \n\t"
+ "movq %%mm2, 8(%%"REG_D") \n\t"
+
+ "movq 8(%%"REG_S"), %%mm1 \n\t"
+ "movq 8(%%"REG_S"), %%mm2 \n\t"
+
+ "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+ "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
+ "punpckhbw %%mm0, %%mm4 \n\t"
+ "punpckhbw %%mm0, %%mm6 \n\t"
+ "movq (%%"REG_a"), %%mm3 \n\t"
+ "movq (%%"REG_b"), %%mm5 \n\t"
+ "punpckhbw %%mm0, %%mm3 \n\t"
+ "punpckhbw %%mm0, %%mm5 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "psrlw $3, %%mm4 \n\t"
+ "psrlw $3, %%mm6 \n\t"
+ "packuswb %%mm4, %%mm4 \n\t"
+ "packuswb %%mm6, %%mm6 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm1 \n\t"
+ "punpckhbw %%mm4, %%mm2 \n\t"
+
+ "add $16, %%"REG_S" \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "add $8, %%"REG_b" \n\t"
+
+ "movq %%mm1, 16(%%"REG_D") \n\t"
+ "movq %%mm2, 24(%%"REG_D") \n\t"
+ "add $32, %%"REG_D" \n\t"
+
+ "decl %%ecx \n\t"
+ "jnz 2b \n\t"
+ "emms \n\t"
+ "pop %%"REG_BP" \n\t"
+ :
+ : "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
+#if ARCH_X86_64
+ "d" ((x86_reg)us), "r" ((x86_reg)vs)
+#else
+ "d" (&us)
+#endif
+ : "memory"
+ );
+ pack_li_0_C(dst, y, u, v, (w&15), us, vs);
+}
+
+static void pack_li_1_MMX(unsigned char *dst, unsigned char *y,
+ unsigned char *u, unsigned char *v, int w, int us, int vs)
+{
+ __asm__ volatile (""
+ "push %%"REG_BP" \n\t"
+#if ARCH_X86_64
+ "mov %6, %%"REG_BP" \n\t"
+#else
+ "movl 4(%%"REG_d"), %%"REG_BP" \n\t"
+ "movl (%%"REG_d"), %%"REG_d" \n\t"
+#endif
+ "pxor %%mm0, %%mm0 \n\t"
+
+ ASMALIGN(4)
+ "3: \n\t"
+ "movq (%%"REG_S"), %%mm1 \n\t"
+ "movq (%%"REG_S"), %%mm2 \n\t"
+
+ "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+ "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
+ "punpcklbw %%mm0, %%mm4 \n\t"
+ "punpcklbw %%mm0, %%mm6 \n\t"
+ "movq (%%"REG_a"), %%mm3 \n\t"
+ "movq (%%"REG_b"), %%mm5 \n\t"
+ "punpcklbw %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm0, %%mm5 \n\t"
+ "movq %%mm4, %%mm7 \n\t"
+ "paddw %%mm4, %%mm4 \n\t"
+ "paddw %%mm7, %%mm4 \n\t"
+ "movq %%mm6, %%mm7 \n\t"
+ "paddw %%mm6, %%mm6 \n\t"
+ "paddw %%mm7, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "psrlw $3, %%mm4 \n\t"
+ "psrlw $3, %%mm6 \n\t"
+ "packuswb %%mm4, %%mm4 \n\t"
+ "packuswb %%mm6, %%mm6 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm1 \n\t"
+ "punpckhbw %%mm4, %%mm2 \n\t"
+
+ "movq %%mm1, (%%"REG_D") \n\t"
+ "movq %%mm2, 8(%%"REG_D") \n\t"
+
+ "movq 8(%%"REG_S"), %%mm1 \n\t"
+ "movq 8(%%"REG_S"), %%mm2 \n\t"
+
+ "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+ "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
+ "punpckhbw %%mm0, %%mm4 \n\t"
+ "punpckhbw %%mm0, %%mm6 \n\t"
+ "movq (%%"REG_a"), %%mm3 \n\t"
+ "movq (%%"REG_b"), %%mm5 \n\t"
+ "punpckhbw %%mm0, %%mm3 \n\t"
+ "punpckhbw %%mm0, %%mm5 \n\t"
+ "movq %%mm4, %%mm7 \n\t"
+ "paddw %%mm4, %%mm4 \n\t"
+ "paddw %%mm7, %%mm4 \n\t"
+ "movq %%mm6, %%mm7 \n\t"
+ "paddw %%mm6, %%mm6 \n\t"
+ "paddw %%mm7, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "paddw %%mm3, %%mm4 \n\t"
+ "paddw %%mm5, %%mm6 \n\t"
+ "psrlw $3, %%mm4 \n\t"
+ "psrlw $3, %%mm6 \n\t"
+ "packuswb %%mm4, %%mm4 \n\t"
+ "packuswb %%mm6, %%mm6 \n\t"
+ "punpcklbw %%mm6, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm1 \n\t"
+ "punpckhbw %%mm4, %%mm2 \n\t"
+
+ "add $16, %%"REG_S" \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "add $8, %%"REG_b" \n\t"
+
+ "movq %%mm1, 16(%%"REG_D") \n\t"
+ "movq %%mm2, 24(%%"REG_D") \n\t"
+ "add $32, %%"REG_D" \n\t"
+
+ "decl %%ecx \n\t"
+ "jnz 3b \n\t"
+ "emms \n\t"
+ "pop %%"REG_BP" \n\t"
+ :
+ : "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
+#if ARCH_X86_64
+ "d" ((x86_reg)us), "r" ((x86_reg)vs)
+#else
+ "d" (&us)
+#endif
+ : "memory"
+ );
+ pack_li_1_C(dst, y, u, v, (w&15), us, vs);
+}
+#endif /* HAVE_EBX_AVAILABLE */
+#endif
+
+static pack_func_t *pack_nn;
+static pack_func_t *pack_li_0;
+static pack_func_t *pack_li_1;
+
+static void ilpack(unsigned char *dst, unsigned char *src[3],
+ int dststride, int srcstride[3], int w, int h, pack_func_t *pack[2])
+{
+ int i;
+ unsigned char *y, *u, *v;
+ int ys = srcstride[0], us = srcstride[1], vs = srcstride[2];
+ int a, b;
+
+ y = src[0];
+ u = src[1];
+ v = src[2];
+
+ pack_nn(dst, y, u, v, w, 0, 0);
+ y += ys; dst += dststride;
+ pack_nn(dst, y, u+us, v+vs, w, 0, 0);
+ y += ys; dst += dststride;
+ for (i=2; i<h-2; i++) {
+ a = (i&2) ? 1 : -1;
+ b = (i&1) ^ ((i&2)>>1);
+ pack[b](dst, y, u, v, w, us*a, vs*a);
+ y += ys;
+ if ((i&3) == 1) {
+ u -= us;
+ v -= vs;
+ } else {
+ u += us;
+ v += vs;
+ }
+ dst += dststride;
+ }
+ pack_nn(dst, y, u, v, w, 0, 0);
+ y += ys; dst += dststride; u += us; v += vs;
+ pack_nn(dst, y, u, v, w, 0, 0);
+}
+
+
+static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
+{
+ mp_image_t *dmpi;
+
+ // hope we'll get DR buffer:
+ dmpi=ff_vf_get_image(vf->next, IMGFMT_YUY2,
+ MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
+ mpi->w, mpi->h);
+
+ ilpack(dmpi->planes[0], mpi->planes, dmpi->stride[0], mpi->stride, mpi->w, mpi->h, vf->priv->pack);
+
+ return ff_vf_next_put_image(vf,dmpi, pts);
+}
+
+static int config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt)
+{
+ /* FIXME - also support UYVY output? */
+ return ff_vf_next_config(vf, width, height, d_width, d_height, flags, IMGFMT_YUY2);
+}
+
+
+static int query_format(struct vf_instance *vf, unsigned int fmt)
+{
+ /* FIXME - really any YUV 4:2:0 input format should work */
+ switch (fmt) {
+ case IMGFMT_YV12:
+ case IMGFMT_IYUV:
+ case IMGFMT_I420:
+ return ff_vf_next_query_format(vf,IMGFMT_YUY2);
+ }
+ return 0;
+}
+
+static int vf_open(vf_instance_t *vf, char *args)
+{
+ vf->config=config;
+ vf->query_format=query_format;
+ vf->put_image=put_image;
+ vf->priv = calloc(1, sizeof(struct vf_priv_s));
+ vf->priv->mode = 1;
+ if (args) sscanf(args, "%d", &vf->priv->mode);
+
+ pack_nn = pack_nn_C;
+ pack_li_0 = pack_li_0_C;
+ pack_li_1 = pack_li_1_C;
+#if HAVE_MMX
+ if(ff_gCpuCaps.hasMMX) {
+ pack_nn = pack_nn_MMX;
+#if HAVE_EBX_AVAILABLE
+ pack_li_0 = pack_li_0_MMX;
+ pack_li_1 = pack_li_1_MMX;
+#endif
+ }
+#endif
+
+ switch(vf->priv->mode) {
+ case 0:
+ vf->priv->pack[0] = vf->priv->pack[1] = pack_nn;
+ break;
+ default:
+ ff_mp_msg(MSGT_VFILTER, MSGL_WARN,
+ "ilpack: unknown mode %d (fallback to linear)\n",
+ vf->priv->mode);
+ /* Fallthrough */
+ case 1:
+ vf->priv->pack[0] = pack_li_0;
+ vf->priv->pack[1] = pack_li_1;
+ break;
+ }
+
+ return 1;
+}
+
+const vf_info_t ff_vf_info_ilpack = {
+ "4:2:0 planar -> 4:2:2 packed reinterlacer",
+ "ilpack",
+ "Richard Felker",
+ "",
+ vf_open,
+ NULL
+};
diff --git a/libavfilter/libmpcodecs/vf_pp7.c b/libavfilter/libmpcodecs/vf_pp7.c
new file mode 100644
index 0000000..89ed4fe
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_pp7.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <math.h>
+
+#include "config.h"
+
+#include "mp_msg.h"
+#include "cpudetect.h"
+
+#if HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#include "libavutil/mem.h"
+
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+#include "libvo/fastmemcpy.h"
+
+#define XMIN(a,b) ((a) < (b) ? (a) : (b))
+#define XMAX(a,b) ((a) > (b) ? (a) : (b))
+
+//===========================================================================//
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+{ 0, 48, 12, 60, 3, 51, 15, 63, },
+{ 32, 16, 44, 28, 35, 19, 47, 31, },
+{ 8, 56, 4, 52, 11, 59, 7, 55, },
+{ 40, 24, 36, 20, 43, 27, 39, 23, },
+{ 2, 50, 14, 62, 1, 49, 13, 61, },
+{ 34, 18, 46, 30, 33, 17, 45, 29, },
+{ 10, 58, 6, 54, 9, 57, 5, 53, },
+{ 42, 26, 38, 22, 41, 25, 37, 21, },
+};
+
+struct vf_priv_s {
+ int qp;
+ int mode;
+ int mpeg2;
+ int temp_stride;
+ uint8_t *src;
+};
+#if 0
+static inline void dct7_c(int16_t *dst, int s0, int s1, int s2, int s3, int step){
+ int s, d;
+ int dst2[64];
+//#define S0 (1024/0.37796447300922719759)
+#define C0 ((int)(1024*0.37796447300922719759+0.5)) //sqrt(1/7)
+#define C1 ((int)(1024*0.53452248382484879308/6+0.5)) //sqrt(2/7)/6
+
+#define C2 ((int)(1024*0.45221175985034745004/2+0.5))
+#define C3 ((int)(1024*0.36264567479870879474/2+0.5))
+
+//0.1962505182412941918 0.0149276808419397944-0.2111781990832339584
+#define C4 ((int)(1024*0.1962505182412941918+0.5))
+#define C5 ((int)(1024*0.0149276808419397944+0.5))
+//#define C6 ((int)(1024*0.2111781990832339584+0.5))
+#if 0
+ s= s0 + s1 + s2;
+ dst[0*step] = ((s + s3)*C0 + 512) >> 10;
+ s= (s - 6*s3)*C1 + 512;
+ d= (s0-s2)*C4 + (s1-s2)*C5;
+ dst[1*step] = (s + 2*d)>>10;
+ s -= d;
+ d= (s1-s0)*C2 + (s1-s2)*C3;
+ dst[2*step] = (s + d)>>10;
+ dst[3*step] = (s - d)>>10;
+#elif 1
+ s = s3+s3;
+ s3= s-s0;
+ s0= s+s0;
+ s = s2+s1;
+ s2= s2-s1;
+ dst[0*step]= s0 + s;
+ dst[2*step]= s0 - s;
+ dst[1*step]= 2*s3 + s2;
+ dst[3*step]= s3 - 2*s2;
+#else
+ int i,j,n=7;
+ for(i=0; i<7; i+=2){
+ dst2[i*step/2]= 0;
+ for(j=0; j<4; j++)
+ dst2[i*step/2] += src[j*step] * cos(i*M_PI/n*(j+0.5)) * sqrt((i?2.0:1.0)/n);
+ if(fabs(dst2[i*step/2] - dst[i*step/2]) > 20)
+ printf("%d %d %d (%d %d %d %d) -> (%d %d %d %d)\n", i,dst2[i*step/2], dst[i*step/2],src[0*step], src[1*step], src[2*step], src[3*step], dst[0*step], dst[1*step],dst[2*step],dst[3*step]);
+ }
+#endif
+}
+#endif
+
+static inline void dctA_c(int16_t *dst, uint8_t *src, int stride){
+ int i;
+
+ for(i=0; i<4; i++){
+ int s0= src[0*stride] + src[6*stride];
+ int s1= src[1*stride] + src[5*stride];
+ int s2= src[2*stride] + src[4*stride];
+ int s3= src[3*stride];
+ int s= s3+s3;
+ s3= s-s0;
+ s0= s+s0;
+ s = s2+s1;
+ s2= s2-s1;
+ dst[0]= s0 + s;
+ dst[2]= s0 - s;
+ dst[1]= 2*s3 + s2;
+ dst[3]= s3 - 2*s2;
+ src++;
+ dst+=4;
+ }
+}
+
+static void dctB_c(int16_t *dst, int16_t *src){
+ int i;
+
+ for(i=0; i<4; i++){
+ int s0= src[0*4] + src[6*4];
+ int s1= src[1*4] + src[5*4];
+ int s2= src[2*4] + src[4*4];
+ int s3= src[3*4];
+ int s= s3+s3;
+ s3= s-s0;
+ s0= s+s0;
+ s = s2+s1;
+ s2= s2-s1;
+ dst[0*4]= s0 + s;
+ dst[2*4]= s0 - s;
+ dst[1*4]= 2*s3 + s2;
+ dst[3*4]= s3 - 2*s2;
+ src++;
+ dst++;
+ }
+}
+
+#if HAVE_MMX
+static void dctB_mmx(int16_t *dst, int16_t *src){
+ __asm__ volatile (
+ "movq (%0), %%mm0 \n\t"
+ "movq 1*4*2(%0), %%mm1 \n\t"
+ "paddw 6*4*2(%0), %%mm0 \n\t"
+ "paddw 5*4*2(%0), %%mm1 \n\t"
+ "movq 2*4*2(%0), %%mm2 \n\t"
+ "movq 3*4*2(%0), %%mm3 \n\t"
+ "paddw 4*4*2(%0), %%mm2 \n\t"
+ "paddw %%mm3, %%mm3 \n\t" //s
+ "movq %%mm3, %%mm4 \n\t" //s
+ "psubw %%mm0, %%mm3 \n\t" //s-s0
+ "paddw %%mm0, %%mm4 \n\t" //s+s0
+ "movq %%mm2, %%mm0 \n\t" //s2
+ "psubw %%mm1, %%mm2 \n\t" //s2-s1
+ "paddw %%mm1, %%mm0 \n\t" //s2+s1
+ "movq %%mm4, %%mm1 \n\t" //s0'
+ "psubw %%mm0, %%mm4 \n\t" //s0'-s'
+ "paddw %%mm0, %%mm1 \n\t" //s0'+s'
+ "movq %%mm3, %%mm0 \n\t" //s3'
+ "psubw %%mm2, %%mm3 \n\t"
+ "psubw %%mm2, %%mm3 \n\t"
+ "paddw %%mm0, %%mm2 \n\t"
+ "paddw %%mm0, %%mm2 \n\t"
+ "movq %%mm1, (%1) \n\t"
+ "movq %%mm4, 2*4*2(%1) \n\t"
+ "movq %%mm2, 1*4*2(%1) \n\t"
+ "movq %%mm3, 3*4*2(%1) \n\t"
+ :: "r" (src), "r"(dst)
+ );
+}
+#endif
+
+static void (*dctB)(int16_t *dst, int16_t *src)= dctB_c;
+
+#define N0 4
+#define N1 5
+#define N2 10
+#define SN0 2
+#define SN1 2.2360679775
+#define SN2 3.16227766017
+#define N (1<<16)
+
+static const int factor[16]={
+ N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
+ N/(N1*N0), N/(N1*N1), N/(N1*N0),N/(N1*N2),
+ N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
+ N/(N2*N0), N/(N2*N1), N/(N2*N0),N/(N2*N2),
+};
+
+static const int thres[16]={
+ N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2),
+ N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2),
+ N/(SN0*SN0), N/(SN0*SN2), N/(SN0*SN0),N/(SN0*SN2),
+ N/(SN2*SN0), N/(SN2*SN2), N/(SN2*SN0),N/(SN2*SN2),
+};
+
+static int thres2[99][16];
+
+static void init_thres2(void){
+ int qp, i;
+ int bias= 0; //FIXME
+
+ for(qp=0; qp<99; qp++){
+ for(i=0; i<16; i++){
+ thres2[qp][i]= ((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) * XMAX(1,qp) * (1<<2) - 1 - bias;
+ }
+ }
+}
+
+static int hardthresh_c(int16_t *src, int qp){
+ int i;
+ int a;
+
+ a= src[0] * factor[0];
+ for(i=1; i<16; i++){
+ unsigned int threshold1= thres2[qp][i];
+ unsigned int threshold2= (threshold1<<1);
+ int level= src[i];
+ if(((unsigned)(level+threshold1))>threshold2){
+ a += level * factor[i];
+ }
+ }
+ return (a + (1<<11))>>12;
+}
+
+static int mediumthresh_c(int16_t *src, int qp){
+ int i;
+ int a;
+
+ a= src[0] * factor[0];
+ for(i=1; i<16; i++){
+ unsigned int threshold1= thres2[qp][i];
+ unsigned int threshold2= (threshold1<<1);
+ int level= src[i];
+ if(((unsigned)(level+threshold1))>threshold2){
+ if(((unsigned)(level+2*threshold1))>2*threshold2){
+ a += level * factor[i];
+ }else{
+ if(level>0) a+= 2*(level - (int)threshold1)*factor[i];
+ else a+= 2*(level + (int)threshold1)*factor[i];
+ }
+ }
+ }
+ return (a + (1<<11))>>12;
+}
+
+static int softthresh_c(int16_t *src, int qp){
+ int i;
+ int a;
+
+ a= src[0] * factor[0];
+ for(i=1; i<16; i++){
+ unsigned int threshold1= thres2[qp][i];
+ unsigned int threshold2= (threshold1<<1);
+ int level= src[i];
+ if(((unsigned)(level+threshold1))>threshold2){
+ if(level>0) a+= (level - (int)threshold1)*factor[i];
+ else a+= (level + (int)threshold1)*factor[i];
+ }
+ }
+ return (a + (1<<11))>>12;
+}
+
+static int (*requantize)(int16_t *src, int qp)= hardthresh_c;
+
+static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){
+ int x, y;
+ const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15));
+ uint8_t *p_src= p->src + 8*stride;
+ int16_t *block= (int16_t *)p->src;
+ int16_t *temp= (int16_t *)(p->src + 32);
+
+ if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
+ for(y=0; y<height; y++){
+ int index= 8 + 8*stride + y*stride;
+ fast_memcpy(p_src + index, src + y*src_stride, width);
+ for(x=0; x<8; x++){
+ p_src[index - x - 1]= p_src[index + x ];
+ p_src[index + width + x ]= p_src[index + width - x - 1];
+ }
+ }
+ for(y=0; y<8; y++){
+ fast_memcpy(p_src + ( 7-y)*stride, p_src + ( y+8)*stride, stride);
+ fast_memcpy(p_src + (height+8+y)*stride, p_src + (height-y+7)*stride, stride);
+ }
+ //FIXME (try edge emu)
+
+ for(y=0; y<height; y++){
+ for(x=-8; x<0; x+=4){
+ const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset
+ uint8_t *src = p_src + index;
+ int16_t *tp= temp+4*x;
+
+ dctA_c(tp+4*8, src, stride);
+ }
+ for(x=0; x<width; ){
+ const int qps= 3 + is_luma;
+ int qp;
+ int end= XMIN(x+8, width);
+
+ if(p->qp)
+ qp= p->qp;
+ else{
+ qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride];
+ qp=norm_qscale(qp, p->mpeg2);
+ }
+ for(; x<end; x++){
+ const int index= x + y*stride + (8-3)*(1+stride) + 8; //FIXME silly offset
+ uint8_t *src = p_src + index;
+ int16_t *tp= temp+4*x;
+ int v;
+
+ if((x&3)==0)
+ dctA_c(tp+4*8, src, stride);
+
+ dctB(block, tp);
+
+ v= requantize(block, qp);
+ v= (v + dither[y&7][x&7])>>6;
+ if((unsigned)v > 255)
+ v= (-v)>>31;
+ dst[x + y*dst_stride]= v;
+ }
+ }
+ }
+}
+
+static int config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt){
+ int h= (height+16+15)&(~15);
+
+ vf->priv->temp_stride= (width+16+15)&(~15);
+ vf->priv->src = av_malloc(vf->priv->temp_stride*(h+8)*sizeof(uint8_t));
+
+ return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
+}
+
+static void get_image(struct vf_instance *vf, mp_image_t *mpi){
+ if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
+ // ok, we can do pp in-place (or pp disabled):
+ vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
+ mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
+ mpi->planes[0]=vf->dmpi->planes[0];
+ mpi->stride[0]=vf->dmpi->stride[0];
+ mpi->width=vf->dmpi->width;
+ if(mpi->flags&MP_IMGFLAG_PLANAR){
+ mpi->planes[1]=vf->dmpi->planes[1];
+ mpi->planes[2]=vf->dmpi->planes[2];
+ mpi->stride[1]=vf->dmpi->stride[1];
+ mpi->stride[2]=vf->dmpi->stride[2];
+ }
+ mpi->flags|=MP_IMGFLAG_DIRECT;
+}
+
+static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
+ mp_image_t *dmpi;
+
+ if(mpi->flags&MP_IMGFLAG_DIRECT){
+ dmpi=vf->dmpi;
+ }else{
+ // no DR, so get a new image! hope we'll get DR buffer:
+ dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
+ MP_IMGTYPE_TEMP,
+ MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
+ mpi->width,mpi->height);
+ ff_vf_clone_mpi_attributes(dmpi, mpi);
+ }
+
+ vf->priv->mpeg2= mpi->qscale_type;
+ if(mpi->qscale || vf->priv->qp){
+ filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, mpi->qscale, mpi->qstride, 1);
+ filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0);
+ filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, mpi->qscale, mpi->qstride, 0);
+ }else{
+ memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
+ memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
+ memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
+ }
+
+#if HAVE_MMX
+ if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
+#endif
+#if HAVE_MMX2
+ if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
+#endif
+
+ return ff_vf_next_put_image(vf,dmpi, pts);
+}
+
+static void uninit(struct vf_instance *vf){
+ if(!vf->priv) return;
+
+ av_free(vf->priv->src);
+ vf->priv->src= NULL;
+
+ free(vf->priv);
+ vf->priv=NULL;
+}
+
+//===========================================================================//
+static int query_format(struct vf_instance *vf, unsigned int fmt){
+ switch(fmt){
+ case IMGFMT_YVU9:
+ case IMGFMT_IF09:
+ case IMGFMT_YV12:
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ case IMGFMT_CLPL:
+ case IMGFMT_Y800:
+ case IMGFMT_Y8:
+ case IMGFMT_444P:
+ case IMGFMT_422P:
+ case IMGFMT_411P:
+ return ff_vf_next_query_format(vf,fmt);
+ }
+ return 0;
+}
+
+static int control(struct vf_instance *vf, int request, void* data){
+ return ff_vf_next_control(vf,request,data);
+}
+
+static int vf_open(vf_instance_t *vf, char *args){
+ vf->config=config;
+ vf->put_image=put_image;
+ vf->get_image=get_image;
+ vf->query_format=query_format;
+ vf->uninit=uninit;
+ vf->control= control;
+ vf->priv=malloc(sizeof(struct vf_priv_s));
+ memset(vf->priv, 0, sizeof(struct vf_priv_s));
+
+ if (args) sscanf(args, "%d:%d", &vf->priv->qp, &vf->priv->mode);
+
+ if(vf->priv->qp < 0)
+ vf->priv->qp = 0;
+
+ init_thres2();
+
+ switch(vf->priv->mode){
+ case 0: requantize= hardthresh_c; break;
+ case 1: requantize= softthresh_c; break;
+ default:
+ case 2: requantize= mediumthresh_c; break;
+ }
+
+#if HAVE_MMX
+ if(ff_gCpuCaps.hasMMX){
+ dctB= dctB_mmx;
+ }
+#endif
+#if 0
+ if(ff_gCpuCaps.hasMMX){
+ switch(vf->priv->mode){
+ case 0: requantize= hardthresh_mmx; break;
+ case 1: requantize= softthresh_mmx; break;
+ }
+ }
+#endif
+
+ return 1;
+}
+
+const vf_info_t ff_vf_info_pp7 = {
+ "postprocess 7",
+ "pp7",
+ "Michael Niedermayer",
+ "",
+ vf_open,
+ NULL
+};
diff --git a/libavfilter/libmpcodecs/vf_softpulldown.c b/libavfilter/libmpcodecs/vf_softpulldown.c
new file mode 100644
index 0000000..556374e
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_softpulldown.c
@@ -0,0 +1,163 @@
+/*
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "config.h"
+#include "mp_msg.h"
+
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+
+#include "libvo/fastmemcpy.h"
+
+struct vf_priv_s {
+ int state;
+ long long in;
+ long long out;
+};
+
+static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
+{
+ mp_image_t *dmpi;
+ int ret = 0;
+ int flags = mpi->fields;
+ int state = vf->priv->state;
+
+ dmpi = ff_vf_get_image(vf->next, mpi->imgfmt,
+ MP_IMGTYPE_STATIC, MP_IMGFLAG_ACCEPT_STRIDE |
+ MP_IMGFLAG_PRESERVE, mpi->width, mpi->height);
+
+ vf->priv->in++;
+
+ if ((state == 0 &&
+ !(flags & MP_IMGFIELD_TOP_FIRST)) ||
+ (state == 1 &&
+ flags & MP_IMGFIELD_TOP_FIRST)) {
+ ff_mp_msg(MSGT_VFILTER, MSGL_WARN,
+ "softpulldown: Unexpected field flags: state=%d top_field_first=%d repeat_first_field=%d\n",
+ state,
+ (flags & MP_IMGFIELD_TOP_FIRST) != 0,
+ (flags & MP_IMGFIELD_REPEAT_FIRST) != 0);
+ state ^= 1;
+ }
+
+ if (state == 0) {
+ ret = ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE);
+ vf->priv->out++;
+ if (flags & MP_IMGFIELD_REPEAT_FIRST) {
+ my_memcpy_pic(dmpi->planes[0],
+ mpi->planes[0], mpi->w, mpi->h/2,
+ dmpi->stride[0]*2, mpi->stride[0]*2);
+ if (mpi->flags & MP_IMGFLAG_PLANAR) {
+ my_memcpy_pic(dmpi->planes[1],
+ mpi->planes[1],
+ mpi->chroma_width,
+ mpi->chroma_height/2,
+ dmpi->stride[1]*2,
+ mpi->stride[1]*2);
+ my_memcpy_pic(dmpi->planes[2],
+ mpi->planes[2],
+ mpi->chroma_width,
+ mpi->chroma_height/2,
+ dmpi->stride[2]*2,
+ mpi->stride[2]*2);
+ }
+ state=1;
+ }
+ } else {
+ my_memcpy_pic(dmpi->planes[0]+dmpi->stride[0],
+ mpi->planes[0]+mpi->stride[0], mpi->w, mpi->h/2,
+ dmpi->stride[0]*2, mpi->stride[0]*2);
+ if (mpi->flags & MP_IMGFLAG_PLANAR) {
+ my_memcpy_pic(dmpi->planes[1]+dmpi->stride[1],
+ mpi->planes[1]+mpi->stride[1],
+ mpi->chroma_width, mpi->chroma_height/2,
+ dmpi->stride[1]*2, mpi->stride[1]*2);
+ my_memcpy_pic(dmpi->planes[2]+dmpi->stride[2],
+ mpi->planes[2]+mpi->stride[2],
+ mpi->chroma_width, mpi->chroma_height/2,
+ dmpi->stride[2]*2, mpi->stride[2]*2);
+ }
+ ret = ff_vf_next_put_image(vf, dmpi, MP_NOPTS_VALUE);
+ vf->priv->out++;
+ if (flags & MP_IMGFIELD_REPEAT_FIRST) {
+ ret |= ff_vf_next_put_image(vf, mpi, MP_NOPTS_VALUE);
+ vf->priv->out++;
+ state=0;
+ } else {
+ my_memcpy_pic(dmpi->planes[0],
+ mpi->planes[0], mpi->w, mpi->h/2,
+ dmpi->stride[0]*2, mpi->stride[0]*2);
+ if (mpi->flags & MP_IMGFLAG_PLANAR) {
+ my_memcpy_pic(dmpi->planes[1],
+ mpi->planes[1],
+ mpi->chroma_width,
+ mpi->chroma_height/2,
+ dmpi->stride[1]*2,
+ mpi->stride[1]*2);
+ my_memcpy_pic(dmpi->planes[2],
+ mpi->planes[2],
+ mpi->chroma_width,
+ mpi->chroma_height/2,
+ dmpi->stride[2]*2,
+ mpi->stride[2]*2);
+ }
+ }
+ }
+
+ vf->priv->state = state;
+
+ return ret;
+}
+
+static int config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt)
+{
+ return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
+}
+
+static void uninit(struct vf_instance *vf)
+{
+ ff_mp_msg(MSGT_VFILTER, MSGL_INFO, "softpulldown: %lld frames in, %lld frames out\n", vf->priv->in, vf->priv->out);
+ free(vf->priv);
+}
+
+static int vf_open(vf_instance_t *vf, char *args)
+{
+ vf->config = config;
+ vf->put_image = put_image;
+ vf->uninit = uninit;
+ vf->default_reqs = VFCAP_ACCEPT_STRIDE;
+ vf->priv = calloc(1, sizeof(struct vf_priv_s));
+ vf->priv->state = 0;
+ return 1;
+}
+
+const vf_info_t ff_vf_info_softpulldown = {
+ "mpeg2 soft 3:2 pulldown",
+ "softpulldown",
+ "Tobias Diedrich <ranma+mplayer@tdiedrich.de>",
+ "",
+ vf_open,
+ NULL
+};
diff --git a/libavfilter/libmpcodecs/vf_uspp.c b/libavfilter/libmpcodecs/vf_uspp.c
new file mode 100644
index 0000000..c9d9c1f
--- /dev/null
+++ b/libavfilter/libmpcodecs/vf_uspp.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <math.h>
+#include <assert.h>
+
+#include "config.h"
+
+#include "mp_msg.h"
+#include "cpudetect.h"
+
+#include "libavutil/mem.h"
+#include "libavcodec/avcodec.h"
+
+#include "img_format.h"
+#include "mp_image.h"
+#include "vf.h"
+#include "av_helpers.h"
+#include "libvo/fastmemcpy.h"
+
+#define XMIN(a,b) ((a) < (b) ? (a) : (b))
+
+#define BLOCK 16
+
+//===========================================================================//
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+{ 0*4, 48*4, 12*4, 60*4, 3*4, 51*4, 15*4, 63*4, },
+{ 32*4, 16*4, 44*4, 28*4, 35*4, 19*4, 47*4, 31*4, },
+{ 8*4, 56*4, 4*4, 52*4, 11*4, 59*4, 7*4, 55*4, },
+{ 40*4, 24*4, 36*4, 20*4, 43*4, 27*4, 39*4, 23*4, },
+{ 2*4, 50*4, 14*4, 62*4, 1*4, 49*4, 13*4, 61*4, },
+{ 34*4, 18*4, 46*4, 30*4, 33*4, 17*4, 45*4, 29*4, },
+{ 10*4, 58*4, 6*4, 54*4, 9*4, 57*4, 5*4, 53*4, },
+{ 42*4, 26*4, 38*4, 22*4, 41*4, 25*4, 37*4, 21*4, },
+};
+
+static const uint8_t offset[511][2]= {
+{ 0, 0},
+{ 0, 0}, { 8, 8},
+{ 0, 0}, { 4, 4}, {12, 8}, { 8,12},
+{ 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14},
+
+{ 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14},
+{ 5, 1}, {15, 3}, { 9, 5}, { 3, 7}, {13, 9}, { 7,11}, { 1,13}, {11,15},
+
+{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+{ 2, 2}, {10, 2}, { 2,10}, {10,10}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+{ 4, 4}, {12, 4}, { 4,12}, {12,12}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13},
+{ 6, 6}, {14, 6}, { 6,14}, {14,14}, { 3, 7}, {11, 7}, { 3,15}, {11,15},
+
+{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8},
+{ 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+{ 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 2}, {15, 2}, { 7,10}, {15,10},
+{ 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 3}, {14, 3}, { 6,11}, {14,11},
+{ 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 4}, {12, 4}, { 4,12}, {12,12},
+{ 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 5}, {13, 5}, { 5,13}, {13,13},
+{ 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 6}, {15, 6}, { 7,14}, {15,14},
+{ 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 7}, {14, 7}, { 6,15}, {14,15},
+
+{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10},
+{ 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14},
+{ 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11},
+{ 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15},
+{ 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10},
+{ 2, 4}, {10, 4}, { 2,12}, {10,12}, { 2, 6}, {10, 6}, { 2,14}, {10,14},
+{ 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11},
+{ 3, 5}, {11, 5}, { 3,13}, {11,13}, { 3, 7}, {11, 7}, { 3,15}, {11,15},
+{ 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 4, 2}, {12, 2}, { 4,10}, {12,10},
+{ 4, 4}, {12, 4}, { 4,12}, {12,12}, { 4, 6}, {12, 6}, { 4,14}, {12,14},
+{ 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 5, 3}, {13, 3}, { 5,11}, {13,11},
+{ 5, 5}, {13, 5}, { 5,13}, {13,13}, { 5, 7}, {13, 7}, { 5,15}, {13,15},
+{ 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 6, 2}, {14, 2}, { 6,10}, {14,10},
+{ 6, 4}, {14, 4}, { 6,12}, {14,12}, { 6, 6}, {14, 6}, { 6,14}, {14,14},
+{ 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+{ 7, 5}, {15, 5}, { 7,13}, {15,13}, { 7, 7}, {15, 7}, { 7,15}, {15,15},
+
+{ 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 6, 6}, {14, 6}, { 6,14}, {14,14}, { 2, 6}, {10, 6}, { 2,14}, {10,14}, { 6, 2}, {14, 2}, { 6,10}, {14,10}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10}, { 4, 6}, {12, 6}, { 4,14}, {12,14}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14}, { 4, 2}, {12, 2}, { 4,10}, {12,10}, { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 6, 4}, {14, 4}, { 6,12}, {14,12}, { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 5}, {13, 5}, { 5,13}, {13,13}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11}, { 7, 7}, {15, 7}, { 7,15}, {15,15}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, { 7, 3}, {15, 3}, { 7,11}, {15,11}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11}, { 5, 7}, {13, 7}, { 5,15}, {13,15}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15}, { 5, 3}, {13, 3}, { 5,11}, {13,11}, { 3, 1}, {11, 1}
+, { 3, 9}, {11, 9}, { 7, 5}, {15, 5}, { 7,13}, {15,13}, { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 0, 1}, { 8, 1}, { 0, 9}, { 8, 9}, { 4, 5}, {12, 5}, { 4,13}, {12,13}, { 0, 5}, { 8, 5}, { 0,13}, { 8,13}, { 4, 1}, {12, 1}, { 4, 9}, {12, 9}, { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 7}, {14, 7}, { 6,15}, {14,15}, { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 3}, {14, 3}, { 6,11}, {14,11}, { 0, 3}, { 8, 3}, { 0,11}, { 8,11}, { 4, 7}, {12, 7}, { 4,15}, {12,15}, { 0, 7}, { 8, 7}, { 0,15}, { 8,15}, { 4, 3}, {12, 3}, { 4,11}, {12,11}, { 2, 1}, {10, 1}, { 2, 9}, {10, 9}, { 6, 5}, {14, 5}, { 6,13}, {14,13}, { 2, 5}, {10, 5}, { 2,13}, {10,13}, { 6, 1}, {14, 1}, { 6, 9}, {14, 9}, { 1, 0}, { 9, 0}, { 1, 8}, { 9, 8}, { 5, 4}, {13, 4}, { 5,12}, {13,12}, { 1, 4}, { 9, 4}, { 1,12}, { 9,12}, { 5, 0}, {13, 0}, { 5, 8}, {13, 8}, { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 6}, {15, 6}, { 7,14}, {15,14}, { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 2}, {15, 2}, { 7,10}, {15,10}, { 1, 2}, { 9, 2}, { 1,10}, { 9,
+10}, { 5, 6}, {13, 6}, { 5,14}, {13,14}, { 1, 6}, { 9, 6}, { 1,14}, { 9,14}, { 5, 2}, {13, 2}, { 5,10}, {13,10}, { 3, 0}, {11, 0}, { 3, 8}, {11, 8}, { 7, 4}, {15, 4}, { 7,12}, {15,12}, { 3, 4}, {11, 4}, { 3,12}, {11,12}, { 7, 0}, {15, 0}, { 7, 8}, {15, 8},
+};
+
+struct vf_priv_s {
+ int log2_count;
+ int qp;
+ int mode;
+ int mpeg2;
+ int temp_stride[3];
+ uint8_t *src[3];
+ int16_t *temp[3];
+ int outbuf_size;
+ uint8_t *outbuf;
+ AVCodecContext *avctx_enc[BLOCK*BLOCK];
+ AVFrame *frame;
+ AVFrame *frame_dec;
+};
+
+static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
+ int y, x;
+
+#define STORE(pos) \
+ temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>8;\
+ if(temp & 0x100) temp= ~(temp>>31);\
+ dst[x + y*dst_stride + pos]= temp;
+
+ for(y=0; y<height; y++){
+ const uint8_t *d= dither[y&7];
+ for(x=0; x<width; x+=8){
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ }
+}
+
+static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height, uint8_t *qp_store, int qp_stride){
+ int x, y, i, j;
+ const int count= 1<<p->log2_count;
+
+ for(i=0; i<3; i++){
+ int is_chroma= !!i;
+ int w= width >>is_chroma;
+ int h= height>>is_chroma;
+ int stride= p->temp_stride[i];
+ int block= BLOCK>>is_chroma;
+
+ if (!src[i] || !dst[i])
+ continue; // HACK avoid crash for Y8 colourspace
+ for(y=0; y<h; y++){
+ int index= block + block*stride + y*stride;
+ fast_memcpy(p->src[i] + index, src[i] + y*src_stride[i], w);
+ for(x=0; x<block; x++){
+ p->src[i][index - x - 1]= p->src[i][index + x ];
+ p->src[i][index + w + x ]= p->src[i][index + w - x - 1];
+ }
+ }
+ for(y=0; y<block; y++){
+ fast_memcpy(p->src[i] + ( block-1-y)*stride, p->src[i] + ( y+block )*stride, stride);
+ fast_memcpy(p->src[i] + (h+block +y)*stride, p->src[i] + (h-y+block-1)*stride, stride);
+ }
+
+ p->frame->linesize[i]= stride;
+ memset(p->temp[i], 0, (h+2*block)*stride*sizeof(int16_t));
+ }
+
+ if(p->qp)
+ p->frame->quality= p->qp * FF_QP2LAMBDA;
+ else
+ p->frame->quality= norm_qscale(qp_store[0], p->mpeg2) * FF_QP2LAMBDA;
+// init per MB qscale stuff FIXME
+
+ for(i=0; i<count; i++){
+ const int x1= offset[i+count-1][0];
+ const int y1= offset[i+count-1][1];
+ int offset;
+ p->frame->data[0]= p->src[0] + x1 + y1 * p->frame->linesize[0];
+ p->frame->data[1]= p->src[1] + x1/2 + y1/2 * p->frame->linesize[1];
+ p->frame->data[2]= p->src[2] + x1/2 + y1/2 * p->frame->linesize[2];
+
+ avcodec_encode_video(p->avctx_enc[i], p->outbuf, p->outbuf_size, p->frame);
+ p->frame_dec = p->avctx_enc[i]->coded_frame;
+
+ offset= (BLOCK-x1) + (BLOCK-y1)*p->frame_dec->linesize[0];
+ //FIXME optimize
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ p->temp[0][ x + y*p->temp_stride[0] ] += p->frame_dec->data[0][ x + y*p->frame_dec->linesize[0] + offset ];
+ }
+ }
+ offset= (BLOCK/2-x1/2) + (BLOCK/2-y1/2)*p->frame_dec->linesize[1];
+ for(y=0; y<height/2; y++){
+ for(x=0; x<width/2; x++){
+ p->temp[1][ x + y*p->temp_stride[1] ] += p->frame_dec->data[1][ x + y*p->frame_dec->linesize[1] + offset ];
+ p->temp[2][ x + y*p->temp_stride[2] ] += p->frame_dec->data[2][ x + y*p->frame_dec->linesize[2] + offset ];
+ }
+ }
+ }
+
+ for(j=0; j<3; j++){
+ int is_chroma= !!j;
+ if (!dst[j])
+ continue; // HACK avoid crash for Y8 colourspace
+ store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j], width>>is_chroma, height>>is_chroma, 8-p->log2_count);
+ }
+}
+
+static int config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int flags, unsigned int outfmt){
+ int i;
+ AVCodec *enc= avcodec_find_encoder(AV_CODEC_ID_SNOW);
+
+ for(i=0; i<3; i++){
+ int is_chroma= !!i;
+ int w= ((width + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma;
+ int h= ((height + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma;
+
+ vf->priv->temp_stride[i]= w;
+ vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
+ vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
+ }
+ for(i=0; i< (1<<vf->priv->log2_count); i++){
+ AVCodecContext *avctx_enc;
+ AVDictionary *opts = NULL;
+
+ avctx_enc=
+ vf->priv->avctx_enc[i]= avcodec_alloc_context3(NULL);
+ avctx_enc->width = width + BLOCK;
+ avctx_enc->height = height + BLOCK;
+ avctx_enc->time_base= (AVRational){1,25}; // meaningless
+ avctx_enc->gop_size = 300;
+ avctx_enc->max_b_frames= 0;
+ avctx_enc->pix_fmt = AV_PIX_FMT_YUV420P;
+ avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ avctx_enc->global_quality= 123;
+ av_dict_set(&opts, "no_bitstream", "1", 0);
+ if (avcodec_open2(avctx_enc, enc, &opts) < 0)
+ return 0;
+ av_dict_free(&opts);
+ assert(avctx_enc->codec);
+ }
+ vf->priv->frame= av_frame_alloc();
+ vf->priv->frame_dec= av_frame_alloc();
+
+ vf->priv->outbuf_size= (width + BLOCK)*(height + BLOCK)*10;
+ vf->priv->outbuf= malloc(vf->priv->outbuf_size);
+
+ return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
+}
+
+static void get_image(struct vf_instance *vf, mp_image_t *mpi){
+ if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
+ // ok, we can do pp in-place (or pp disabled):
+ vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
+ mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
+ mpi->planes[0]=vf->dmpi->planes[0];
+ mpi->stride[0]=vf->dmpi->stride[0];
+ mpi->width=vf->dmpi->width;
+ if(mpi->flags&MP_IMGFLAG_PLANAR){
+ mpi->planes[1]=vf->dmpi->planes[1];
+ mpi->planes[2]=vf->dmpi->planes[2];
+ mpi->stride[1]=vf->dmpi->stride[1];
+ mpi->stride[2]=vf->dmpi->stride[2];
+ }
+ mpi->flags|=MP_IMGFLAG_DIRECT;
+}
+
+static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
+ mp_image_t *dmpi;
+
+ if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
+ // no DR, so get a new image! hope we'll get DR buffer:
+ dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
+ MP_IMGTYPE_TEMP,
+ MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
+ mpi->width,mpi->height);
+ ff_vf_clone_mpi_attributes(dmpi, mpi);
+ }else{
+ dmpi=vf->dmpi;
+ }
+
+ vf->priv->mpeg2= mpi->qscale_type;
+ if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
+ if(mpi->qscale || vf->priv->qp){
+ filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h, mpi->qscale, mpi->qstride);
+ }else{
+ memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
+ memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
+ memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
+ }
+ }
+
+#if HAVE_MMX
+ if(ff_gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
+#endif
+#if HAVE_MMX2
+ if(ff_gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
+#endif
+
+ return ff_vf_next_put_image(vf,dmpi, pts);
+}
+
+static void uninit(struct vf_instance *vf){
+ int i;
+ if(!vf->priv) return;
+
+ for(i=0; i<3; i++){
+ free(vf->priv->temp[i]);
+ vf->priv->temp[i]= NULL;
+ free(vf->priv->src[i]);
+ vf->priv->src[i]= NULL;
+ }
+ for(i=0; i<BLOCK*BLOCK; i++){
+ av_freep(&vf->priv->avctx_enc[i]);
+ }
+
+ free(vf->priv);
+ vf->priv=NULL;
+}
+
+//===========================================================================//
+static int query_format(struct vf_instance *vf, unsigned int fmt){
+ switch(fmt){
+ case IMGFMT_YV12:
+ case IMGFMT_I420:
+ case IMGFMT_IYUV:
+ case IMGFMT_Y800:
+ case IMGFMT_Y8:
+ return ff_vf_next_query_format(vf,fmt);
+ }
+ return 0;
+}
+
+static int control(struct vf_instance *vf, int request, void* data){
+ switch(request){
+ case VFCTRL_QUERY_MAX_PP_LEVEL:
+ return 8;
+ case VFCTRL_SET_PP_LEVEL:
+ vf->priv->log2_count= *((unsigned int*)data);
+ //FIXME we have to realloc a few things here
+ return CONTROL_TRUE;
+ }
+ return ff_vf_next_control(vf,request,data);
+}
+
+static int vf_open(vf_instance_t *vf, char *args){
+
+ int log2c=-1;
+
+ vf->config=config;
+ vf->put_image=put_image;
+ vf->get_image=get_image;
+ vf->query_format=query_format;
+ vf->uninit=uninit;
+ vf->control= control;
+ vf->priv=malloc(sizeof(struct vf_priv_s));
+ memset(vf->priv, 0, sizeof(struct vf_priv_s));
+
+ ff_init_avcodec();
+
+ vf->priv->log2_count= 4;
+
+ if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode);
+
+ if( log2c >=0 && log2c <=8 )
+ vf->priv->log2_count = log2c;
+
+ if(vf->priv->qp < 0)
+ vf->priv->qp = 0;
+
+// #if HAVE_MMX
+// if(ff_gCpuCaps.hasMMX){
+// store_slice= store_slice_mmx;
+// }
+// #endif
+
+ return 1;
+}
+
+const vf_info_t ff_vf_info_uspp = {
+ "ultra simple/slow postprocess",
+ "uspp",
+ "Michael Niedermayer",
+ "",
+ vf_open,
+ NULL
+};
diff --git a/libavfilter/libmpcodecs/vfcap.h b/libavfilter/libmpcodecs/vfcap.h
new file mode 100644
index 0000000..611d642
--- /dev/null
+++ b/libavfilter/libmpcodecs/vfcap.h
@@ -0,0 +1,56 @@
+/* VFCAP_* values: they are flags, returned by query_format():
+ *
+ * This file is part of MPlayer.
+ *
+ * MPlayer is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * MPlayer is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with MPlayer; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MPLAYER_VFCAP_H
+#define MPLAYER_VFCAP_H
+
+// set, if the given colorspace is supported (with or without conversion)
+#define VFCAP_CSP_SUPPORTED 0x1
+// set, if the given colorspace is supported _without_ conversion
+#define VFCAP_CSP_SUPPORTED_BY_HW 0x2
+// set if the driver/filter can draw OSD
+#define VFCAP_OSD 0x4
+// set if the driver/filter can handle compressed SPU stream
+#define VFCAP_SPU 0x8
+// scaling up/down by hardware, or software:
+#define VFCAP_HWSCALE_UP 0x10
+#define VFCAP_HWSCALE_DOWN 0x20
+#define VFCAP_SWSCALE 0x40
+// driver/filter can do vertical flip (upside-down)
+#define VFCAP_FLIP 0x80
+
+// driver/hardware handles timing (blocking)
+#define VFCAP_TIMER 0x100
+// driver _always_ flip image upside-down (for ve_vfw)
+#define VFCAP_FLIPPED 0x200
+// vf filter: accepts stride (put_image)
+// vo driver: has draw_slice() support for the given csp
+#define VFCAP_ACCEPT_STRIDE 0x400
+// filter does postprocessing (so you shouldn't scale/filter image before it)
+#define VFCAP_POSTPROC 0x800
+// filter cannot be reconfigured to different size & format
+#define VFCAP_CONSTANT 0x1000
+// filter can draw EOSD
+#define VFCAP_EOSD 0x2000
+// filter will draw EOSD at screen resolution (without scaling)
+#define VFCAP_EOSD_UNSCALED 0x4000
+// used by libvo and vf_vo, indicates the VO does not support draw_slice for this format
+#define VOCAP_NOSLICES 0x8000
+
+#endif /* MPLAYER_VFCAP_H */
diff --git a/libavfilter/log2_tab.c b/libavfilter/log2_tab.c
new file mode 100644
index 0000000..47a1df0
--- /dev/null
+++ b/libavfilter/log2_tab.c
@@ -0,0 +1 @@
+#include "libavutil/log2_tab.c"
diff --git a/libavfilter/lswsutils.c b/libavfilter/lswsutils.c
new file mode 100644
index 0000000..ebb4f93
--- /dev/null
+++ b/libavfilter/lswsutils.c
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "lswsutils.h"
+
+int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
+ int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt,
+ uint8_t * const src_data[4], int src_linesize[4],
+ int src_w, int src_h, enum AVPixelFormat src_pix_fmt,
+ void *log_ctx)
+{
+ int ret;
+ struct SwsContext *sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
+ dst_w, dst_h, dst_pix_fmt,
+ 0, NULL, NULL, NULL);
+ if (!sws_ctx) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Impossible to create scale context for the conversion "
+ "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
+ av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
+ av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if ((ret = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 16)) < 0)
+ goto end;
+ ret = 0;
+ sws_scale(sws_ctx, (const uint8_t * const*)src_data, src_linesize, 0, src_h, dst_data, dst_linesize);
+
+end:
+ sws_freeContext(sws_ctx);
+ return ret;
+}
diff --git a/libavfilter/lswsutils.h b/libavfilter/lswsutils.h
new file mode 100644
index 0000000..f5f5320
--- /dev/null
+++ b/libavfilter/lswsutils.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Miscellaneous utilities which make use of the libswscale library
+ */
+
+#ifndef AVFILTER_LSWSUTILS_H
+#define AVFILTER_LSWSUTILS_H
+
+#include "libswscale/swscale.h"
+
+/**
+ * Scale image using libswscale.
+ */
+int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4],
+ int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt,
+ uint8_t *const src_data[4], int src_linesize[4],
+ int src_w, int src_h, enum AVPixelFormat src_pix_fmt,
+ void *log_ctx);
+
+#endif /* AVFILTER_LSWSUTILS_H */
diff --git a/libavfilter/opencl_allkernels.c b/libavfilter/opencl_allkernels.c
new file mode 100644
index 0000000..6d80fa8
--- /dev/null
+++ b/libavfilter/opencl_allkernels.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "opencl_allkernels.h"
+#if CONFIG_OPENCL
+#include "libavutil/opencl.h"
+#include "deshake_opencl_kernel.h"
+#include "unsharp_opencl_kernel.h"
+#endif
+
+#define OPENCL_REGISTER_KERNEL_CODE(X, x) \
+ { \
+ if (CONFIG_##X##_FILTER) { \
+ av_opencl_register_kernel_code(ff_kernel_##x##_opencl); \
+ } \
+ }
+
+void ff_opencl_register_filter_kernel_code_all(void)
+{
+ #if CONFIG_OPENCL
+ OPENCL_REGISTER_KERNEL_CODE(DESHAKE, deshake);
+ OPENCL_REGISTER_KERNEL_CODE(UNSHARP, unsharp);
+ #endif
+}
diff --git a/libavfilter/opencl_allkernels.h b/libavfilter/opencl_allkernels.h
new file mode 100644
index 0000000..aca02e0
--- /dev/null
+++ b/libavfilter/opencl_allkernels.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_OPENCL_ALLKERNEL_H
+#define AVFILTER_OPENCL_ALLKERNEL_H
+
+#include "avfilter.h"
+#include "config.h"
+
+void ff_opencl_register_filter_kernel_code_all(void);
+
+#endif /* AVFILTER_OPENCL_ALLKERNEL_H */
diff --git a/libavfilter/pthread.c b/libavfilter/pthread.c
index dd3b174..070b3bd 100644
--- a/libavfilter/pthread.c
+++ b/libavfilter/pthread.c
@@ -1,19 +1,19 @@
/*
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,6 +34,8 @@
#if HAVE_PTHREADS
#include <pthread.h>
+#elif HAVE_OS2THREADS
+#include "compat/os2threads.h"
#elif HAVE_W32THREADS
#include "compat/w32pthreads.h"
#endif
@@ -157,7 +159,6 @@ static int thread_init_internal(ThreadContext *c, int nb_threads)
if (!nb_threads) {
int nb_cpus = av_cpu_count();
- av_log(c->graph, AV_LOG_DEBUG, "Detected %d logical cores.\n", nb_cpus);
// use number of cores + 1 as thread count if there is more than one
if (nb_cpus > 1)
nb_threads = nb_cpus + 1;
@@ -169,7 +170,7 @@ static int thread_init_internal(ThreadContext *c, int nb_threads)
return 1;
c->nb_threads = nb_threads;
- c->workers = av_mallocz(sizeof(*c->workers) * nb_threads);
+ c->workers = av_mallocz_array(sizeof(*c->workers), nb_threads);
if (!c->workers)
return AVERROR(ENOMEM);
diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c
index fa7a0be..92b07fb 100644
--- a/libavfilter/setpts.c
+++ b/libavfilter/setpts.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Victor Paesa
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,24 +31,27 @@
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
-
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
-#include "config.h"
-
static const char *const var_names[] = {
- "E", ///< Euler number
+ "FRAME_RATE", ///< defined only for constant frame-rate video
"INTERLACED", ///< tell if the current frame is interlaced
"N", ///< frame / sample number (starting at zero)
- "PHI", ///< golden ratio
- "PI", ///< greek pi
+ "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
+ "NB_SAMPLES", ///< number of samples in the current frame (only audio)
+ "POS", ///< original position in the file of the frame
"PREV_INPTS", ///< previous input PTS
+ "PREV_INT", ///< previous input time in seconds
"PREV_OUTPTS", ///< previous output PTS
+ "PREV_OUTT", ///< previous output time in seconds
"PTS", ///< original pts in the file of the frame
+ "SAMPLE_RATE", ///< sample rate (only audio)
"STARTPTS", ///< PTS at start of movie
+ "STARTT", ///< time at start of movie
+ "T", ///< original time in the file of the frame
"TB", ///< timebase
"RTCTIME", ///< wallclock (RTC) time in micro seconds
"RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
@@ -58,15 +61,21 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
+ VAR_FRAME_RATE,
VAR_INTERLACED,
VAR_N,
- VAR_PHI,
- VAR_PI,
+ VAR_NB_CONSUMED_SAMPLES,
+ VAR_NB_SAMPLES,
+ VAR_POS,
VAR_PREV_INPTS,
+ VAR_PREV_INT,
VAR_PREV_OUTPTS,
+ VAR_PREV_OUTT,
VAR_PTS,
+ VAR_SAMPLE_RATE,
VAR_STARTPTS,
+ VAR_STARTT,
+ VAR_T,
VAR_TB,
VAR_RTCTIME,
VAR_RTCSTART,
@@ -80,6 +89,7 @@ typedef struct SetPTSContext {
char *expr_str;
AVExpr *expr;
double var_values[VAR_VARS_NB];
+ enum AVMediaType type;
} SetPTSContext;
static av_cold int init(AVFilterContext *ctx)
@@ -93,34 +103,54 @@ static av_cold int init(AVFilterContext *ctx)
return ret;
}
- setpts->var_values[VAR_E] = M_E;
setpts->var_values[VAR_N] = 0.0;
setpts->var_values[VAR_S] = 0.0;
- setpts->var_values[VAR_PHI] = M_PHI;
- setpts->var_values[VAR_PI] = M_PI;
setpts->var_values[VAR_PREV_INPTS] = NAN;
+ setpts->var_values[VAR_PREV_INT] = NAN;
setpts->var_values[VAR_PREV_OUTPTS] = NAN;
+ setpts->var_values[VAR_PREV_OUTT] = NAN;
setpts->var_values[VAR_STARTPTS] = NAN;
+ setpts->var_values[VAR_STARTT] = NAN;
return 0;
}
static int config_input(AVFilterLink *inlink)
{
- SetPTSContext *setpts = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ SetPTSContext *setpts = ctx->priv;
+ setpts->type = inlink->type;
setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
setpts->var_values[VAR_RTCSTART] = av_gettime();
- if (inlink->type == AVMEDIA_TYPE_AUDIO) {
- setpts->var_values[VAR_SR] = inlink->sample_rate;
- }
+ setpts->var_values[VAR_SR] =
+ setpts->var_values[VAR_SAMPLE_RATE] =
+ setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
+
+ setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ?
+ av_q2d(inlink->frame_rate) : NAN;
- av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f\n", setpts->var_values[VAR_TB]);
+ av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
+ setpts->var_values[VAR_TB],
+ setpts->var_values[VAR_FRAME_RATE],
+ setpts->var_values[VAR_SAMPLE_RATE]);
return 0;
}
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
+#define BUF_SIZE 64
+
+static inline char *double2int64str(char *buf, double v)
+{
+ if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
+ else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
+ return buf;
+}
+
+#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
@@ -128,27 +158,43 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int64_t in_pts = frame->pts;
double d;
- if (isnan(setpts->var_values[VAR_STARTPTS]))
+ if (isnan(setpts->var_values[VAR_STARTPTS])) {
setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
-
+ setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base);
+ }
setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
+ setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
+ setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
setpts->var_values[VAR_RTCTIME ] = av_gettime();
if (inlink->type == AVMEDIA_TYPE_VIDEO) {
setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
- } else {
+ } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
setpts->var_values[VAR_S] = frame->nb_samples;
+ setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
}
d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
frame->pts = D2TS(d);
av_dlog(inlink->dst,
- "n:%"PRId64" interlaced:%d pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n",
+ "N:%"PRId64" PTS:%s T:%f POS:%s",
(int64_t)setpts->var_values[VAR_N],
- (int)setpts->var_values[VAR_INTERLACED],
- in_pts, in_pts * av_q2d(inlink->time_base),
- frame->pts, frame->pts * av_q2d(inlink->time_base));
+ d2istr(setpts->var_values[VAR_PTS]),
+ setpts->var_values[VAR_T],
+ d2istr(setpts->var_values[VAR_POS]));
+ switch (inlink->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ av_dlog(inlink->dst, " INTERLACED:%"PRId64,
+ (int64_t)setpts->var_values[VAR_INTERLACED]);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_dlog(inlink->dst, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
+ (int64_t)setpts->var_values[VAR_NB_SAMPLES],
+ (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
+ break;
+ }
+ av_dlog(inlink->dst, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
if (inlink->type == AVMEDIA_TYPE_VIDEO) {
setpts->var_values[VAR_N] += 1.0;
@@ -157,7 +203,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
+ setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
+ setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
+ if (setpts->type == AVMEDIA_TYPE_AUDIO) {
+ setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
+ }
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
@@ -169,27 +220,22 @@ static av_cold void uninit(AVFilterContext *ctx)
}
#define OFFSET(x) offsetof(SetPTSContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption options[] = {
{ "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
- { NULL },
+ { NULL }
};
#if CONFIG_SETPTS_FILTER
-static const AVClass setpts_class = {
- .class_name = "setpts",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+#define setpts_options options
+AVFILTER_DEFINE_CLASS(setpts);
static const AVFilterPad avfilter_vf_setpts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -214,23 +260,19 @@ AVFilter ff_vf_setpts = {
.inputs = avfilter_vf_setpts_inputs,
.outputs = avfilter_vf_setpts_outputs,
};
-#endif
+#endif /* CONFIG_SETPTS_FILTER */
#if CONFIG_ASETPTS_FILTER
-static const AVClass asetpts_class = {
- .class_name = "asetpts",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+#define asetpts_options options
+AVFILTER_DEFINE_CLASS(asetpts);
static const AVFilterPad asetpts_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -248,11 +290,9 @@ AVFilter ff_af_asetpts = {
.description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
.init = init,
.uninit = uninit,
-
- .priv_size = sizeof(SetPTSContext),
- .priv_class = &asetpts_class,
-
- .inputs = asetpts_inputs,
- .outputs = asetpts_outputs,
+ .priv_size = sizeof(SetPTSContext),
+ .priv_class = &asetpts_class,
+ .inputs = asetpts_inputs,
+ .outputs = asetpts_outputs,
};
-#endif
+#endif /* CONFIG_ASETPTS_FILTER */
diff --git a/libavfilter/settb.c b/libavfilter/settb.c
index 169037f..83616c1 100644
--- a/libavfilter/settb.c
+++ b/libavfilter/settb.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,9 +38,6 @@
#include "video.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"AVTB", /* default timebase 1/AV_TIME_BASE */
"intb", /* input timebase */
"sr", /* sample rate */
@@ -48,9 +45,6 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_AVTB,
VAR_INTB,
VAR_SR,
@@ -63,6 +57,16 @@ typedef struct SetTBContext {
double var_values[VAR_VARS_NB];
} SetTBContext;
+#define OFFSET(x) offsetof(SetTBContext, x)
+#define DEFINE_OPTIONS(filt_name, filt_type) \
+static const AVOption filt_name##_options[] = { \
+ { "expr", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
+ .flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
+ { "tb", "set expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, {.str="intb"}, \
+ .flags=AV_OPT_FLAG_##filt_type##_PARAM|AV_OPT_FLAG_FILTERING_PARAM }, \
+ { NULL } \
+}
+
static int config_output_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
@@ -72,9 +76,6 @@ static int config_output_props(AVFilterLink *outlink)
int ret;
double res;
- settb->var_values[VAR_E] = M_E;
- settb->var_values[VAR_PHI] = M_PHI;
- settb->var_values[VAR_PI] = M_PI;
settb->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q);
settb->var_values[VAR_INTB] = av_q2d(inlink->time_base);
settb->var_values[VAR_SR] = inlink->sample_rate;
@@ -119,27 +120,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ff_filter_frame(outlink, frame);
}
-#define OFFSET(x) offsetof(SetTBContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM
-static const AVOption options[] = {
- { "expr", "Expression determining the output timebase", OFFSET(tb_expr), AV_OPT_TYPE_STRING, { .str = "intb" }, .flags = FLAGS },
- { NULL },
-};
-
#if CONFIG_SETTB_FILTER
-static const AVClass settb_class = {
- .class_name = "settb",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+DEFINE_OPTIONS(settb, VIDEO);
+AVFILTER_DEFINE_CLASS(settb);
static const AVFilterPad avfilter_vf_settb_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -154,31 +144,24 @@ static const AVFilterPad avfilter_vf_settb_outputs[] = {
};
AVFilter ff_vf_settb = {
- .name = "settb",
+ .name = "settb",
.description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."),
-
- .priv_size = sizeof(SetTBContext),
- .priv_class = &settb_class,
-
- .inputs = avfilter_vf_settb_inputs,
-
- .outputs = avfilter_vf_settb_outputs,
+ .priv_size = sizeof(SetTBContext),
+ .priv_class = &settb_class,
+ .inputs = avfilter_vf_settb_inputs,
+ .outputs = avfilter_vf_settb_outputs,
};
#endif /* CONFIG_SETTB_FILTER */
#if CONFIG_ASETTB_FILTER
-static const AVClass asettb_class = {
- .class_name = "asettb",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+DEFINE_OPTIONS(asettb, AUDIO);
+AVFILTER_DEFINE_CLASS(asettb);
static const AVFilterPad avfilter_af_asettb_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = filter_frame,
},
{ NULL }
diff --git a/libavfilter/split.c b/libavfilter/split.c
index 41395e7..6abd5ee 100644
--- a/libavfilter/split.c
+++ b/libavfilter/split.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -70,10 +70,14 @@ static av_cold void split_uninit(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- int i, ret = 0;
+ int i, ret = AVERROR_EOF;
for (i = 0; i < ctx->nb_outputs; i++) {
- AVFrame *buf_out = av_frame_clone(frame);
+ AVFrame *buf_out;
+
+ if (ctx->outputs[i]->closed)
+ continue;
+ buf_out = av_frame_clone(frame);
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
@@ -90,56 +94,42 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
#define OFFSET(x) offsetof(SplitContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
- { "outputs", "Number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
- { NULL },
+ { "outputs", "set number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass split_class = {
- .class_name = "split",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+#define split_options options
+AVFILTER_DEFINE_CLASS(split);
-static const AVClass asplit_class = {
- .class_name = "asplit",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+#define asplit_options options
+AVFILTER_DEFINE_CLASS(asplit);
static const AVFilterPad avfilter_vf_split_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
AVFilter ff_vf_split = {
- .name = "split",
+ .name = "split",
.description = NULL_IF_CONFIG_SMALL("Pass on the input to N video outputs."),
-
- .priv_size = sizeof(SplitContext),
- .priv_class = &split_class,
-
- .init = split_init,
- .uninit = split_uninit,
-
- .inputs = avfilter_vf_split_inputs,
- .outputs = NULL,
-
- .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .priv_size = sizeof(SplitContext),
+ .priv_class = &split_class,
+ .init = split_init,
+ .uninit = split_uninit,
+ .inputs = avfilter_vf_split_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
static const AVFilterPad avfilter_af_asplit_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .get_audio_buffer = ff_null_get_audio_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -147,15 +137,11 @@ static const AVFilterPad avfilter_af_asplit_inputs[] = {
AVFilter ff_af_asplit = {
.name = "asplit",
.description = NULL_IF_CONFIG_SMALL("Pass on the audio input to N audio outputs."),
-
- .priv_size = sizeof(SplitContext),
- .priv_class = &asplit_class,
-
- .init = split_init,
- .uninit = split_uninit,
-
- .inputs = avfilter_af_asplit_inputs,
- .outputs = NULL,
-
- .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+ .priv_size = sizeof(SplitContext),
+ .priv_class = &asplit_class,
+ .init = split_init,
+ .uninit = split_uninit,
+ .inputs = avfilter_af_asplit_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};
diff --git a/libavfilter/src_movie.c b/libavfilter/src_movie.c
new file mode 100644
index 0000000..0b97b82
--- /dev/null
+++ b/libavfilter/src_movie.c
@@ -0,0 +1,609 @@
+/*
+ * Copyright (c) 2010 Stefano Sabatini
+ * Copyright (c) 2008 Victor Paesa
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * movie video source
+ *
+ * @todo use direct rendering (no allocation of a new frame)
+ * @todo support a PTS correction mechanism
+ */
+
+#include <float.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/avstring.h"
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/timestamp.h"
+#include "libavformat/avformat.h"
+#include "audio.h"
+#include "avcodec.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct MovieStream {
+ AVStream *st;
+ int done;
+} MovieStream;
+
+typedef struct MovieContext {
+ /* common A/V fields */
+ const AVClass *class;
+ int64_t seek_point; ///< seekpoint in microseconds
+ double seek_point_d;
+ char *format_name;
+ char *file_name;
+ char *stream_specs; /**< user-provided list of streams, separated by + */
+ int stream_index; /**< for compatibility */
+ int loop_count;
+
+ AVFormatContext *format_ctx;
+ int eof;
+ AVPacket pkt, pkt0;
+
+ int max_stream_index; /**< max stream # actually used for output */
+ MovieStream *st; /**< array of all streams, one per output */
+ int *out_index; /**< stream number -> output number map, or -1 */
+} MovieContext;
+
+#define OFFSET(x) offsetof(MovieContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption movie_options[]= {
+ { "filename", NULL, OFFSET(file_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { "seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
+ { "sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
+ { "streams", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
+ { "s", "set streams", OFFSET(stream_specs), AV_OPT_TYPE_STRING, {.str = 0}, CHAR_MAX, CHAR_MAX, FLAGS },
+ { "loop", "set loop count", OFFSET(loop_count), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, FLAGS },
+ { NULL },
+};
+
+static int movie_config_output_props(AVFilterLink *outlink);
+static int movie_request_frame(AVFilterLink *outlink);
+
+static AVStream *find_stream(void *log, AVFormatContext *avf, const char *spec)
+{
+ int i, ret, already = 0, stream_id = -1;
+ char type_char[2], dummy;
+ AVStream *found = NULL;
+ enum AVMediaType type;
+
+ ret = sscanf(spec, "d%1[av]%d%c", type_char, &stream_id, &dummy);
+ if (ret >= 1 && ret <= 2) {
+ type = type_char[0] == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
+ ret = av_find_best_stream(avf, type, stream_id, -1, NULL, 0);
+ if (ret < 0) {
+ av_log(log, AV_LOG_ERROR, "No %s stream with index '%d' found\n",
+ av_get_media_type_string(type), stream_id);
+ return NULL;
+ }
+ return avf->streams[ret];
+ }
+ for (i = 0; i < avf->nb_streams; i++) {
+ ret = avformat_match_stream_specifier(avf, avf->streams[i], spec);
+ if (ret < 0) {
+ av_log(log, AV_LOG_ERROR,
+ "Invalid stream specifier \"%s\"\n", spec);
+ return NULL;
+ }
+ if (!ret)
+ continue;
+ if (avf->streams[i]->discard != AVDISCARD_ALL) {
+ already++;
+ continue;
+ }
+ if (found) {
+ av_log(log, AV_LOG_WARNING,
+ "Ambiguous stream specifier \"%s\", using #%d\n", spec, i);
+ break;
+ }
+ found = avf->streams[i];
+ }
+ if (!found) {
+ av_log(log, AV_LOG_WARNING, "Stream specifier \"%s\" %s\n", spec,
+ already ? "matched only already used streams" :
+ "did not match any stream");
+ return NULL;
+ }
+ if (found->codec->codec_type != AVMEDIA_TYPE_VIDEO &&
+ found->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
+ av_log(log, AV_LOG_ERROR, "Stream specifier \"%s\" matched a %s stream,"
+ "currently unsupported by libavfilter\n", spec,
+ av_get_media_type_string(found->codec->codec_type));
+ return NULL;
+ }
+ return found;
+}
+
+static int open_stream(void *log, MovieStream *st)
+{
+ AVCodec *codec;
+ int ret;
+
+ codec = avcodec_find_decoder(st->st->codec->codec_id);
+ if (!codec) {
+ av_log(log, AV_LOG_ERROR, "Failed to find any codec\n");
+ return AVERROR(EINVAL);
+ }
+
+ st->st->codec->refcounted_frames = 1;
+
+ if ((ret = avcodec_open2(st->st->codec, codec, NULL)) < 0) {
+ av_log(log, AV_LOG_ERROR, "Failed to open codec\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int guess_channel_layout(MovieStream *st, int st_index, void *log_ctx)
+{
+ AVCodecContext *dec_ctx = st->st->codec;
+ char buf[256];
+ int64_t chl = av_get_default_channel_layout(dec_ctx->channels);
+
+ if (!chl) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Channel layout is not set in stream %d, and could not "
+ "be guessed from the number of channels (%d)\n",
+ st_index, dec_ctx->channels);
+ return AVERROR(EINVAL);
+ }
+
+ av_get_channel_layout_string(buf, sizeof(buf), dec_ctx->channels, chl);
+ av_log(log_ctx, AV_LOG_WARNING,
+ "Channel layout is not set in output stream %d, "
+ "guessed channel layout is '%s'\n",
+ st_index, buf);
+ dec_ctx->channel_layout = chl;
+ return 0;
+}
+
+static av_cold int movie_common_init(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ AVInputFormat *iformat = NULL;
+ int64_t timestamp;
+ int nb_streams = 1, ret, i;
+ char default_streams[16], *stream_specs, *spec, *cursor;
+ char name[16];
+ AVStream *st;
+
+ if (!movie->file_name) {
+ av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
+ return AVERROR(EINVAL);
+ }
+
+ movie->seek_point = movie->seek_point_d * 1000000 + 0.5;
+
+ stream_specs = movie->stream_specs;
+ if (!stream_specs) {
+ snprintf(default_streams, sizeof(default_streams), "d%c%d",
+ !strcmp(ctx->filter->name, "amovie") ? 'a' : 'v',
+ movie->stream_index);
+ stream_specs = default_streams;
+ }
+ for (cursor = stream_specs; *cursor; cursor++)
+ if (*cursor == '+')
+ nb_streams++;
+
+ if (movie->loop_count != 1 && nb_streams != 1) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Loop with several streams is currently unsupported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ av_register_all();
+
+ // Try to find the movie format (container)
+ iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;
+
+ movie->format_ctx = NULL;
+ if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Failed to avformat_open_input '%s'\n", movie->file_name);
+ return ret;
+ }
+ if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0)
+ av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n");
+
+ // if seeking requested, we execute it
+ if (movie->seek_point > 0) {
+ timestamp = movie->seek_point;
+ // add the stream start time, should it exist
+ if (movie->format_ctx->start_time != AV_NOPTS_VALUE) {
+ if (timestamp > INT64_MAX - movie->format_ctx->start_time) {
+ av_log(ctx, AV_LOG_ERROR,
+ "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n",
+ movie->file_name, movie->format_ctx->start_time, movie->seek_point);
+ return AVERROR(EINVAL);
+ }
+ timestamp += movie->format_ctx->start_time;
+ }
+ if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n",
+ movie->file_name, timestamp);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < movie->format_ctx->nb_streams; i++)
+ movie->format_ctx->streams[i]->discard = AVDISCARD_ALL;
+
+ movie->st = av_calloc(nb_streams, sizeof(*movie->st));
+ if (!movie->st)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < nb_streams; i++) {
+ spec = av_strtok(stream_specs, "+", &cursor);
+ if (!spec)
+ return AVERROR_BUG;
+ stream_specs = NULL; /* for next strtok */
+ st = find_stream(ctx, movie->format_ctx, spec);
+ if (!st)
+ return AVERROR(EINVAL);
+ st->discard = AVDISCARD_DEFAULT;
+ movie->st[i].st = st;
+ movie->max_stream_index = FFMAX(movie->max_stream_index, st->index);
+ }
+ if (av_strtok(NULL, "+", &cursor))
+ return AVERROR_BUG;
+
+ movie->out_index = av_calloc(movie->max_stream_index + 1,
+ sizeof(*movie->out_index));
+ if (!movie->out_index)
+ return AVERROR(ENOMEM);
+ for (i = 0; i <= movie->max_stream_index; i++)
+ movie->out_index[i] = -1;
+ for (i = 0; i < nb_streams; i++) {
+ AVFilterPad pad = { 0 };
+ movie->out_index[movie->st[i].st->index] = i;
+ snprintf(name, sizeof(name), "out%d", i);
+ pad.type = movie->st[i].st->codec->codec_type;
+ pad.name = av_strdup(name);
+ pad.config_props = movie_config_output_props;
+ pad.request_frame = movie_request_frame;
+ ff_insert_outpad(ctx, i, &pad);
+ ret = open_stream(ctx, &movie->st[i]);
+ if (ret < 0)
+ return ret;
+ if ( movie->st[i].st->codec->codec->type == AVMEDIA_TYPE_AUDIO &&
+ !movie->st[i].st->codec->channel_layout) {
+ ret = guess_channel_layout(&movie->st[i], i, ctx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
+ movie->seek_point, movie->format_name, movie->file_name,
+ movie->stream_index);
+
+ return 0;
+}
+
+static av_cold void movie_uninit(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ int i;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ av_freep(&ctx->output_pads[i].name);
+ if (movie->st[i].st)
+ avcodec_close(movie->st[i].st->codec);
+ }
+ av_freep(&movie->st);
+ av_freep(&movie->out_index);
+ if (movie->format_ctx)
+ avformat_close_input(&movie->format_ctx);
+}
+
+static int movie_query_formats(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ int list[] = { 0, -1 };
+ int64_t list64[] = { 0, -1 };
+ int i;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ MovieStream *st = &movie->st[i];
+ AVCodecContext *c = st->st->codec;
+ AVFilterLink *outlink = ctx->outputs[i];
+
+ switch (c->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ list[0] = c->pix_fmt;
+ ff_formats_ref(ff_make_format_list(list), &outlink->in_formats);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ list[0] = c->sample_fmt;
+ ff_formats_ref(ff_make_format_list(list), &outlink->in_formats);
+ list[0] = c->sample_rate;
+ ff_formats_ref(ff_make_format_list(list), &outlink->in_samplerates);
+ list64[0] = c->channel_layout;
+ ff_channel_layouts_ref(avfilter_make_format64_list(list64),
+ &outlink->in_channel_layouts);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int movie_config_output_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MovieContext *movie = ctx->priv;
+ unsigned out_id = FF_OUTLINK_IDX(outlink);
+ MovieStream *st = &movie->st[out_id];
+ AVCodecContext *c = st->st->codec;
+
+ outlink->time_base = st->st->time_base;
+
+ switch (c->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ outlink->w = c->width;
+ outlink->h = c->height;
+ outlink->frame_rate = st->st->r_frame_rate;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ break;
+ }
+
+ return 0;
+}
+
+static char *describe_frame_to_str(char *dst, size_t dst_size,
+ AVFrame *frame, enum AVMediaType frame_type,
+ AVFilterLink *link)
+{
+ switch (frame_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ snprintf(dst, dst_size,
+ "video pts:%s time:%s size:%dx%d aspect:%d/%d",
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
+ frame->width, frame->height,
+ frame->sample_aspect_ratio.num,
+ frame->sample_aspect_ratio.den);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ snprintf(dst, dst_size,
+ "audio pts:%s time:%s samples:%d",
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
+ frame->nb_samples);
+ break;
+ default:
+ snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(frame_type));
+ break;
+ }
+ return dst;
+}
+
+static int rewind_file(AVFilterContext *ctx)
+{
+ MovieContext *movie = ctx->priv;
+ int64_t timestamp = movie->seek_point;
+ int ret, i;
+
+ if (movie->format_ctx->start_time != AV_NOPTS_VALUE)
+ timestamp += movie->format_ctx->start_time;
+ ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to loop: %s\n", av_err2str(ret));
+ movie->loop_count = 1; /* do not try again */
+ return ret;
+ }
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ avcodec_flush_buffers(movie->st[i].st->codec);
+ movie->st[i].done = 0;
+ }
+ movie->eof = 0;
+ return 0;
+}
+
+/**
+ * Try to push a frame to the requested output.
+ *
+ * @param ctx filter context
+ * @param out_id number of output where a frame is wanted;
+ * if the frame is read from file, used to set the return value;
+ * if the codec is being flushed, flush the corresponding stream
+ * @return 1 if a frame was pushed on the requested output,
+ * 0 if another attempt is possible,
+ * <0 AVERROR code
+ */
+static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
+{
+ MovieContext *movie = ctx->priv;
+ AVPacket *pkt = &movie->pkt;
+ enum AVMediaType frame_type;
+ MovieStream *st;
+ int ret, got_frame = 0, pkt_out_id;
+ AVFilterLink *outlink;
+ AVFrame *frame;
+
+ if (!pkt->size) {
+ if (movie->eof) {
+ if (movie->st[out_id].done) {
+ if (movie->loop_count != 1) {
+ ret = rewind_file(ctx);
+ if (ret < 0)
+ return ret;
+ movie->loop_count -= movie->loop_count > 1;
+ av_log(ctx, AV_LOG_VERBOSE, "Stream finished, looping.\n");
+ return 0; /* retry */
+ }
+ return AVERROR_EOF;
+ }
+ pkt->stream_index = movie->st[out_id].st->index;
+ /* packet is already ready for flushing */
+ } else {
+ ret = av_read_frame(movie->format_ctx, &movie->pkt0);
+ if (ret < 0) {
+ av_init_packet(&movie->pkt0); /* ready for flushing */
+ *pkt = movie->pkt0;
+ if (ret == AVERROR_EOF) {
+ movie->eof = 1;
+ return 0; /* start flushing */
+ }
+ return ret;
+ }
+ *pkt = movie->pkt0;
+ }
+ }
+
+ pkt_out_id = pkt->stream_index > movie->max_stream_index ? -1 :
+ movie->out_index[pkt->stream_index];
+ if (pkt_out_id < 0) {
+ av_free_packet(&movie->pkt0);
+ pkt->size = 0; /* ready for next run */
+ pkt->data = NULL;
+ return 0;
+ }
+ st = &movie->st[pkt_out_id];
+ outlink = ctx->outputs[pkt_out_id];
+
+ frame = av_frame_alloc();
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ frame_type = st->st->codec->codec_type;
+ switch (frame_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ ret = avcodec_decode_video2(st->st->codec, frame, &got_frame, pkt);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ ret = avcodec_decode_audio4(st->st->codec, frame, &got_frame, pkt);
+ break;
+ default:
+ ret = AVERROR(ENOSYS);
+ break;
+ }
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret));
+ av_frame_free(&frame);
+ av_free_packet(&movie->pkt0);
+ movie->pkt.size = 0;
+ movie->pkt.data = NULL;
+ return 0;
+ }
+ if (!ret || st->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ ret = pkt->size;
+
+ pkt->data += ret;
+ pkt->size -= ret;
+ if (pkt->size <= 0) {
+ av_free_packet(&movie->pkt0);
+ pkt->size = 0; /* ready for next run */
+ pkt->data = NULL;
+ }
+ if (!got_frame) {
+ if (!ret)
+ st->done = 1;
+ av_frame_free(&frame);
+ return 0;
+ }
+
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name,
+ describe_frame_to_str((char[1024]){0}, 1024, frame, frame_type, outlink));
+
+ if (st->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (frame->format != outlink->format) {
+ av_log(ctx, AV_LOG_ERROR, "Format changed %s -> %s, discarding frame\n",
+ av_get_pix_fmt_name(outlink->format),
+ av_get_pix_fmt_name(frame->format)
+ );
+ av_frame_free(&frame);
+ return 0;
+ }
+ }
+ ret = ff_filter_frame(outlink, frame);
+
+ if (ret < 0)
+ return ret;
+ return pkt_out_id == out_id;
+}
+
+static int movie_request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ unsigned out_id = FF_OUTLINK_IDX(outlink);
+ int ret;
+
+ while (1) {
+ ret = movie_push_frame(ctx, out_id);
+ if (ret)
+ return FFMIN(ret, 0);
+ }
+}
+
+#if CONFIG_MOVIE_FILTER
+
+AVFILTER_DEFINE_CLASS(movie);
+
+AVFilter ff_avsrc_movie = {
+ .name = "movie",
+ .description = NULL_IF_CONFIG_SMALL("Read from a movie source."),
+ .priv_size = sizeof(MovieContext),
+ .priv_class = &movie_class,
+ .init = movie_common_init,
+ .uninit = movie_uninit,
+ .query_formats = movie_query_formats,
+
+ .inputs = NULL,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+
+#endif /* CONFIG_MOVIE_FILTER */
+
+#if CONFIG_AMOVIE_FILTER
+
+#define amovie_options movie_options
+AVFILTER_DEFINE_CLASS(amovie);
+
+AVFilter ff_avsrc_amovie = {
+ .name = "amovie",
+ .description = NULL_IF_CONFIG_SMALL("Read audio from a movie source."),
+ .priv_size = sizeof(MovieContext),
+ .init = movie_common_init,
+ .uninit = movie_uninit,
+ .query_formats = movie_query_formats,
+
+ .inputs = NULL,
+ .outputs = NULL,
+ .priv_class = &amovie_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+
+#endif /* CONFIG_AMOVIE_FILTER */
diff --git a/libavfilter/thread.h b/libavfilter/thread.h
index 1cfea3e..5f347e8 100644
--- a/libavfilter/thread.h
+++ b/libavfilter/thread.h
@@ -1,19 +1,19 @@
/*
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/transform.c b/libavfilter/transform.c
new file mode 100644
index 0000000..3fc547e
--- /dev/null
+++ b/libavfilter/transform.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * transform input video
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/avassert.h"
+
+#include "transform.h"
+
+#define INTERPOLATE_METHOD(name) \
+ static uint8_t name(float x, float y, const uint8_t *src, \
+ int width, int height, int stride, uint8_t def)
+
+#define PIXEL(img, x, y, w, h, stride, def) \
+ ((x) < 0 || (y) < 0) ? (def) : \
+ (((x) >= (w) || (y) >= (h)) ? (def) : \
+ img[(x) + (y) * (stride)])
+
+/**
+ * Nearest neighbor interpolation
+ */
+INTERPOLATE_METHOD(interpolate_nearest)
+{
+ return PIXEL(src, (int)(x + 0.5), (int)(y + 0.5), width, height, stride, def);
+}
+
+/**
+ * Bilinear interpolation
+ */
+INTERPOLATE_METHOD(interpolate_bilinear)
+{
+ int x_c, x_f, y_c, y_f;
+ int v1, v2, v3, v4;
+
+ if (x < -1 || x > width || y < -1 || y > height) {
+ return def;
+ } else {
+ x_f = (int)x;
+ x_c = x_f + 1;
+
+ y_f = (int)y;
+ y_c = y_f + 1;
+
+ v1 = PIXEL(src, x_c, y_c, width, height, stride, def);
+ v2 = PIXEL(src, x_c, y_f, width, height, stride, def);
+ v3 = PIXEL(src, x_f, y_c, width, height, stride, def);
+ v4 = PIXEL(src, x_f, y_f, width, height, stride, def);
+
+ return (v1*(x - x_f)*(y - y_f) + v2*((x - x_f)*(y_c - y)) +
+ v3*(x_c - x)*(y - y_f) + v4*((x_c - x)*(y_c - y)));
+ }
+}
+
+/**
+ * Biquadratic interpolation
+ */
+INTERPOLATE_METHOD(interpolate_biquadratic)
+{
+ int x_c, x_f, y_c, y_f;
+ uint8_t v1, v2, v3, v4;
+ float f1, f2, f3, f4;
+
+ if (x < - 1 || x > width || y < -1 || y > height)
+ return def;
+ else {
+ x_f = (int)x;
+ x_c = x_f + 1;
+ y_f = (int)y;
+ y_c = y_f + 1;
+
+ v1 = PIXEL(src, x_c, y_c, width, height, stride, def);
+ v2 = PIXEL(src, x_c, y_f, width, height, stride, def);
+ v3 = PIXEL(src, x_f, y_c, width, height, stride, def);
+ v4 = PIXEL(src, x_f, y_f, width, height, stride, def);
+
+ f1 = 1 - sqrt((x_c - x) * (y_c - y));
+ f2 = 1 - sqrt((x_c - x) * (y - y_f));
+ f3 = 1 - sqrt((x - x_f) * (y_c - y));
+ f4 = 1 - sqrt((x - x_f) * (y - y_f));
+ return (v1 * f1 + v2 * f2 + v3 * f3 + v4 * f4) / (f1 + f2 + f3 + f4);
+ }
+}
+
+void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix) {
+ matrix[0] = zoom * cos(angle);
+ matrix[1] = -sin(angle);
+ matrix[2] = x_shift;
+ matrix[3] = -matrix[1];
+ matrix[4] = matrix[0];
+ matrix[5] = y_shift;
+ matrix[6] = 0;
+ matrix[7] = 0;
+ matrix[8] = 1;
+}
+
+void avfilter_add_matrix(const float *m1, const float *m2, float *result)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ result[i] = m1[i] + m2[i];
+}
+
+void avfilter_sub_matrix(const float *m1, const float *m2, float *result)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ result[i] = m1[i] - m2[i];
+}
+
+void avfilter_mul_matrix(const float *m1, float scalar, float *result)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ result[i] = m1[i] * scalar;
+}
+
+static inline int mirror(int v, int m)
+{
+ while ((unsigned)v > (unsigned)m) {
+ v = -v;
+ if (v < 0)
+ v += 2 * m;
+ }
+ return v;
+}
+
+int avfilter_transform(const uint8_t *src, uint8_t *dst,
+ int src_stride, int dst_stride,
+ int width, int height, const float *matrix,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill)
+{
+ int x, y;
+ float x_s, y_s;
+ uint8_t def = 0;
+ uint8_t (*func)(float, float, const uint8_t *, int, int, int, uint8_t) = NULL;
+
+ switch(interpolate) {
+ case INTERPOLATE_NEAREST:
+ func = interpolate_nearest;
+ break;
+ case INTERPOLATE_BILINEAR:
+ func = interpolate_bilinear;
+ break;
+ case INTERPOLATE_BIQUADRATIC:
+ func = interpolate_biquadratic;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ for (y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+ x_s = x * matrix[0] + y * matrix[1] + matrix[2];
+ y_s = x * matrix[3] + y * matrix[4] + matrix[5];
+
+ switch(fill) {
+ case FILL_ORIGINAL:
+ def = src[y * src_stride + x];
+ break;
+ case FILL_CLAMP:
+ y_s = av_clipf(y_s, 0, height - 1);
+ x_s = av_clipf(x_s, 0, width - 1);
+ def = src[(int)y_s * src_stride + (int)x_s];
+ break;
+ case FILL_MIRROR:
+ x_s = mirror(x_s, width-1);
+ y_s = mirror(y_s, height-1);
+
+ av_assert2(x_s >= 0 && y_s >= 0);
+ av_assert2(x_s < width && y_s < height);
+ def = src[(int)y_s * src_stride + (int)x_s];
+ }
+
+ dst[y * dst_stride + x] = func(x_s, y_s, src, width, height, src_stride, def);
+ }
+ }
+ return 0;
+}
diff --git a/libavfilter/transform.h b/libavfilter/transform.h
new file mode 100644
index 0000000..07436bf
--- /dev/null
+++ b/libavfilter/transform.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_TRANSFORM_H
+#define AVFILTER_TRANSFORM_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * transform input video
+ *
+ * All matrices are defined as a single 9-item block of contiguous memory. For
+ * example, the identity matrix would be:
+ *
+ * float *matrix = {1, 0, 0,
+ * 0, 1, 0,
+ * 0, 0, 1};
+ */
+
+enum InterpolateMethod {
+ INTERPOLATE_NEAREST, //< Nearest-neighbor (fast)
+ INTERPOLATE_BILINEAR, //< Bilinear
+ INTERPOLATE_BIQUADRATIC, //< Biquadratic (best)
+ INTERPOLATE_COUNT, //< Number of interpolation methods
+};
+
+// Shortcuts for the fastest and best interpolation methods
+#define INTERPOLATE_DEFAULT INTERPOLATE_BILINEAR
+#define INTERPOLATE_FAST INTERPOLATE_NEAREST
+#define INTERPOLATE_BEST INTERPOLATE_BIQUADRATIC
+
+enum FillMethod {
+ FILL_BLANK, //< Fill zeroes at blank locations
+ FILL_ORIGINAL, //< Original image at blank locations
+ FILL_CLAMP, //< Extruded edge value at blank locations
+ FILL_MIRROR, //< Mirrored edge at blank locations
+ FILL_COUNT, //< Number of edge fill methods
+};
+
+// Shortcuts for fill methods
+#define FILL_DEFAULT FILL_ORIGINAL
+
+/**
+ * Get an affine transformation matrix from a given translation, rotation, and
+ * zoom factor. The matrix will look like:
+ *
+ * [ zoom * cos(angle), -sin(angle), x_shift,
+ * sin(angle), zoom * cos(angle), y_shift,
+ * 0, 0, 1 ]
+ *
+ * @param x_shift horizontal translation
+ * @param y_shift vertical translation
+ * @param angle rotation in radians
+ * @param zoom scale percent (1.0 = 100%)
+ * @param matrix 9-item affine transformation matrix
+ */
+void avfilter_get_matrix(float x_shift, float y_shift, float angle, float zoom, float *matrix);
+
+/**
+ * Add two matrices together. result = m1 + m2.
+ *
+ * @param m1 9-item transformation matrix
+ * @param m2 9-item transformation matrix
+ * @param result 9-item transformation matrix
+ */
+void avfilter_add_matrix(const float *m1, const float *m2, float *result);
+
+/**
+ * Subtract one matrix from another. result = m1 - m2.
+ *
+ * @param m1 9-item transformation matrix
+ * @param m2 9-item transformation matrix
+ * @param result 9-item transformation matrix
+ */
+void avfilter_sub_matrix(const float *m1, const float *m2, float *result);
+
+/**
+ * Multiply a matrix by a scalar value. result = m1 * scalar.
+ *
+ * @param m1 9-item transformation matrix
+ * @param scalar a number
+ * @param result 9-item transformation matrix
+ */
+void avfilter_mul_matrix(const float *m1, float scalar, float *result);
+
+/**
+ * Do an affine transformation with the given interpolation method. This
+ * multiplies each vector [x,y,1] by the matrix and then interpolates to
+ * get the final value.
+ *
+ * @param src source image
+ * @param dst destination image
+ * @param src_stride source image line size in bytes
+ * @param dst_stride destination image line size in bytes
+ * @param width image width in pixels
+ * @param height image height in pixels
+ * @param matrix 9-item affine transformation matrix
+ * @param interpolate pixel interpolation method
+ * @param fill edge fill method
+ * @return negative on error
+ */
+int avfilter_transform(const uint8_t *src, uint8_t *dst,
+ int src_stride, int dst_stride,
+ int width, int height, const float *matrix,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill);
+
+#endif /* AVFILTER_TRANSFORM_H */
diff --git a/libavfilter/trim.c b/libavfilter/trim.c
index 2b57540..468dc03 100644
--- a/libavfilter/trim.c
+++ b/libavfilter/trim.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -40,9 +40,12 @@ typedef struct TrimContext {
/*
* AVOptions
*/
- double duration;
- double start_time, end_time;
+ int64_t duration;
+ int64_t start_time, end_time;
int64_t start_frame, end_frame;
+
+ double duration_dbl;
+ double start_time_dbl, end_time_dbl;
/*
* in the link timebase for video,
* in 1/samplerate for audio
@@ -70,10 +73,9 @@ typedef struct TrimContext {
int64_t next_pts;
int eof;
- int got_output;
} TrimContext;
-static int init(AVFilterContext *ctx)
+static av_cold int init(AVFilterContext *ctx)
{
TrimContext *s = ctx->priv;
@@ -89,52 +91,53 @@ static int config_input(AVFilterLink *inlink)
AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
inlink->time_base : (AVRational){ 1, inlink->sample_rate };
- if (s->start_time != DBL_MAX) {
- int64_t start_pts = lrintf(s->start_time / av_q2d(tb));
+ if (s->start_time_dbl != DBL_MAX)
+ s->start_time = s->start_time_dbl * 1e6;
+ if (s->end_time_dbl != DBL_MAX)
+ s->end_time = s->end_time_dbl * 1e6;
+ if (s->duration_dbl != 0)
+ s->duration = s->duration_dbl * 1e6;
+
+ if (s->start_time != INT64_MAX) {
+ int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
s->start_pts = start_pts;
}
- if (s->end_time != DBL_MAX) {
- int64_t end_pts = lrintf(s->end_time / av_q2d(tb));
+ if (s->end_time != INT64_MAX) {
+ int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
s->end_pts = end_pts;
}
if (s->duration)
- s->duration_tb = lrintf(s->duration / av_q2d(tb));
+ s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
return 0;
}
-static int request_frame(AVFilterLink *outlink)
+static int config_output(AVFilterLink *outlink)
{
- AVFilterContext *ctx = outlink->src;
- TrimContext *s = ctx->priv;
- int ret;
-
- s->got_output = 0;
- while (!s->got_output) {
- if (s->eof)
- return AVERROR_EOF;
-
- ret = ff_request_frame(ctx->inputs[0]);
- if (ret < 0)
- return ret;
- }
-
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
#define OFFSET(x) offsetof(TrimContext, x)
#define COMMON_OPTS \
- { "start", "Timestamp in seconds of the first frame that " \
- "should be passed", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
- { "end", "Timestamp in seconds of the first frame that " \
- "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
+ { "starti", "Timestamp of the first frame that " \
+ "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
+ { "endi", "Timestamp of the first frame that " \
+ "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "start_pts", "Timestamp of the first frame that should be " \
" passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
{ "end_pts", "Timestamp of the first frame that should be " \
"dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
- { "duration", "Maximum duration of the output in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
+ { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
+
+#define COMPAT_OPTS \
+ { "start", "Timestamp in seconds of the first frame that " \
+ "should be passed", OFFSET(start_time_dbl),AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
+ { "end", "Timestamp in seconds of the first frame that " \
+ "should be dropped again", OFFSET(end_time_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
+ { "duration", "Maximum duration of the output in seconds", OFFSET(duration_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
#if CONFIG_TRIM_FILTER
@@ -177,13 +180,12 @@ static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
drop = 0;
if (drop) {
- s->eof = 1;
+ s->eof = inlink->closed = 1;
goto drop;
}
}
s->nb_frames++;
- s->got_output = 1;
return ff_filter_frame(ctx->outputs[0], frame);
@@ -193,23 +195,19 @@ drop:
return 0;
}
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption trim_options[] = {
COMMON_OPTS
{ "start_frame", "Number of the first frame that should be passed "
"to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ "end_frame", "Number of the first frame that should be dropped "
"again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
- { NULL },
+ COMPAT_OPTS
+ { NULL }
};
#undef FLAGS
-static const AVClass trim_class = {
- .class_name = "trim",
- .item_name = av_default_item_name,
- .option = trim_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(trim);
static const AVFilterPad trim_inputs[] = {
{
@@ -223,9 +221,9 @@ static const AVFilterPad trim_inputs[] = {
static const AVFilterPad trim_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
},
{ NULL }
};
@@ -233,12 +231,9 @@ static const AVFilterPad trim_outputs[] = {
AVFilter ff_vf_trim = {
.name = "trim",
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
-
.init = init,
-
.priv_size = sizeof(TrimContext),
.priv_class = &trim_class,
-
.inputs = trim_inputs,
.outputs = trim_outputs,
};
@@ -249,7 +244,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
TrimContext *s = ctx->priv;
- int64_t start_sample, end_sample = frame->nb_samples;
+ int64_t start_sample, end_sample;
int64_t pts;
int drop;
@@ -317,7 +312,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
if (drop) {
- s->eof = 1;
+ s->eof = inlink->closed = 1;
goto drop;
}
}
@@ -325,7 +320,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
s->nb_samples += frame->nb_samples;
start_sample = FFMAX(0, start_sample);
end_sample = FFMIN(frame->nb_samples, end_sample);
- av_assert0(start_sample < end_sample);
+ av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
if (start_sample) {
AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
@@ -336,7 +331,7 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
av_frame_copy_props(out, frame);
av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
- out->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout),
+ out->nb_samples, inlink->channels,
frame->format);
if (out->pts != AV_NOPTS_VALUE)
out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
@@ -347,7 +342,6 @@ static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
} else
frame->nb_samples = end_sample;
- s->got_output = 1;
return ff_filter_frame(ctx->outputs[0], frame);
drop:
@@ -356,23 +350,19 @@ drop:
return 0;
}
-#define FLAGS AV_OPT_FLAG_AUDIO_PARAM
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption atrim_options[] = {
COMMON_OPTS
{ "start_sample", "Number of the first audio sample that should be "
"passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
{ "end_sample", "Number of the first audio sample that should be "
"dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
- { NULL },
+ COMPAT_OPTS
+ { NULL }
};
#undef FLAGS
-static const AVClass atrim_class = {
- .class_name = "atrim",
- .item_name = av_default_item_name,
- .option = atrim_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(atrim);
static const AVFilterPad atrim_inputs[] = {
{
@@ -386,9 +376,9 @@ static const AVFilterPad atrim_inputs[] = {
static const AVFilterPad atrim_outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_AUDIO,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output,
},
{ NULL }
};
@@ -396,12 +386,9 @@ static const AVFilterPad atrim_outputs[] = {
AVFilter ff_af_atrim = {
.name = "atrim",
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
-
.init = init,
-
.priv_size = sizeof(TrimContext),
.priv_class = &atrim_class,
-
.inputs = atrim_inputs,
.outputs = atrim_outputs,
};
diff --git a/libavfilter/unsharp.h b/libavfilter/unsharp.h
new file mode 100644
index 0000000..c2aed64
--- /dev/null
+++ b/libavfilter/unsharp.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_UNSHARP_H
+#define AVFILTER_UNSHARP_H
+
+#include "config.h"
+#include "avfilter.h"
+#if CONFIG_OPENCL
+#include "libavutil/opencl.h"
+#endif
+
+#define MIN_MATRIX_SIZE 3
+#define MAX_MATRIX_SIZE 63
+
+#if CONFIG_OPENCL
+
+typedef struct {
+ cl_command_queue command_queue;
+ cl_program program;
+ cl_kernel kernel_default;
+ cl_kernel kernel_luma;
+ cl_kernel kernel_chroma;
+ cl_mem cl_luma_mask;
+ cl_mem cl_chroma_mask;
+ int in_plane_size[8];
+ int out_plane_size[8];
+ int plane_num;
+ cl_mem cl_inbuf;
+ size_t cl_inbuf_size;
+ cl_mem cl_outbuf;
+ size_t cl_outbuf_size;
+ int use_fast_kernels;
+} UnsharpOpenclContext;
+
+#endif
+
+typedef struct UnsharpFilterParam {
+ int msize_x; ///< matrix width
+ int msize_y; ///< matrix height
+ int amount; ///< effect amount
+ int steps_x; ///< horizontal step count
+ int steps_y; ///< vertical step count
+ int scalebits; ///< bits to shift pixel
+ int32_t halfscale; ///< amount to add to pixel
+ uint32_t *sc[MAX_MATRIX_SIZE - 1]; ///< finite state machine storage
+} UnsharpFilterParam;
+
+typedef struct UnsharpContext {
+ const AVClass *class;
+ int lmsize_x, lmsize_y, cmsize_x, cmsize_y;
+ float lamount, camount;
+ UnsharpFilterParam luma; ///< luma parameters (width, height, amount)
+ UnsharpFilterParam chroma; ///< chroma parameters (width, height, amount)
+ int hsub, vsub;
+ int opencl;
+#if CONFIG_OPENCL
+ UnsharpOpenclContext opencl_ctx;
+#endif
+ int (* apply_unsharp)(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+} UnsharpContext;
+
+#endif /* AVFILTER_UNSHARP_H */
diff --git a/libavfilter/unsharp_opencl.c b/libavfilter/unsharp_opencl.c
new file mode 100644
index 0000000..5c6b5ef
--- /dev/null
+++ b/libavfilter/unsharp_opencl.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * unsharp input video
+ */
+
+#include "unsharp_opencl.h"
+#include "libavutil/common.h"
+#include "libavutil/opencl_internal.h"
+
+#define PLANE_NUM 3
+#define ROUND_TO_16(a) (((((a) - 1)/16)+1)*16)
+
+static inline void add_mask_counter(uint32_t *dst, uint32_t *counter1, uint32_t *counter2, int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ dst[i] = counter1[i] + counter2[i];
+ }
+}
+
+static int compute_mask(int step, uint32_t *mask)
+{
+ int i, z, ret = 0;
+ int counter_size = sizeof(uint32_t) * (2 * step + 1);
+ uint32_t *temp1_counter, *temp2_counter, **counter;
+ temp1_counter = av_mallocz(counter_size);
+ if (!temp1_counter) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ temp2_counter = av_mallocz(counter_size);
+ if (!temp2_counter) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ counter = av_mallocz_array(2 * step + 1, sizeof(uint32_t *));
+ if (!counter) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ for (i = 0; i < 2 * step + 1; i++) {
+ counter[i] = av_mallocz(counter_size);
+ if (!counter[i]) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ }
+ for (i = 0; i < 2 * step + 1; i++) {
+ memset(temp1_counter, 0, counter_size);
+ temp1_counter[i] = 1;
+ for (z = 0; z < step * 2; z += 2) {
+ add_mask_counter(temp2_counter, counter[z], temp1_counter, step * 2);
+ memcpy(counter[z], temp1_counter, counter_size);
+ add_mask_counter(temp1_counter, counter[z + 1], temp2_counter, step * 2);
+ memcpy(counter[z + 1], temp2_counter, counter_size);
+ }
+ }
+ memcpy(mask, temp1_counter, counter_size);
+end:
+ av_freep(&temp1_counter);
+ av_freep(&temp2_counter);
+ for (i = 0; i < 2 * step + 1; i++) {
+ av_freep(&counter[i]);
+ }
+ av_freep(&counter);
+ return ret;
+}
+
+static int compute_mask_matrix(cl_mem cl_mask_matrix, int step_x, int step_y)
+{
+ int i, j, ret = 0;
+ uint32_t *mask_matrix, *mask_x, *mask_y;
+ size_t size_matrix = sizeof(uint32_t) * (2 * step_x + 1) * (2 * step_y + 1);
+ mask_x = av_mallocz_array(2 * step_x + 1, sizeof(uint32_t));
+ if (!mask_x) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ mask_y = av_mallocz_array(2 * step_y + 1, sizeof(uint32_t));
+ if (!mask_y) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ mask_matrix = av_mallocz(size_matrix);
+ if (!mask_matrix) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ ret = compute_mask(step_x, mask_x);
+ if (ret < 0)
+ goto end;
+ ret = compute_mask(step_y, mask_y);
+ if (ret < 0)
+ goto end;
+ for (j = 0; j < 2 * step_y + 1; j++) {
+ for (i = 0; i < 2 * step_x + 1; i++) {
+ mask_matrix[i + j * (2 * step_x + 1)] = mask_y[j] * mask_x[i];
+ }
+ }
+ ret = av_opencl_buffer_write(cl_mask_matrix, (uint8_t *)mask_matrix, size_matrix);
+end:
+ av_freep(&mask_x);
+ av_freep(&mask_y);
+ av_freep(&mask_matrix);
+ return ret;
+}
+
+static int generate_mask(AVFilterContext *ctx)
+{
+ UnsharpContext *unsharp = ctx->priv;
+ int i, ret = 0, step_x[2], step_y[2];
+ cl_mem mask_matrix[2];
+ mask_matrix[0] = unsharp->opencl_ctx.cl_luma_mask;
+ mask_matrix[1] = unsharp->opencl_ctx.cl_chroma_mask;
+ step_x[0] = unsharp->luma.steps_x;
+ step_x[1] = unsharp->chroma.steps_x;
+ step_y[0] = unsharp->luma.steps_y;
+ step_y[1] = unsharp->chroma.steps_y;
+
+ /* use default kernel if any matrix dim larger than 8 due to limited local mem size */
+ if (step_x[0]>8 || step_x[1]>8 || step_y[0]>8 || step_y[1]>8)
+ unsharp->opencl_ctx.use_fast_kernels = 0;
+ else
+ unsharp->opencl_ctx.use_fast_kernels = 1;
+
+ if (!mask_matrix[0] || !mask_matrix[1]) {
+ av_log(ctx, AV_LOG_ERROR, "Luma mask and chroma mask should not be NULL\n");
+ return AVERROR(EINVAL);
+ }
+ for (i = 0; i < 2; i++) {
+ ret = compute_mask_matrix(mask_matrix[i], step_x[i], step_y[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+int ff_opencl_apply_unsharp(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ int ret;
+ AVFilterLink *link = ctx->inputs[0];
+ UnsharpContext *unsharp = ctx->priv;
+ cl_int status;
+ FFOpenclParam kernel1 = {0};
+ FFOpenclParam kernel2 = {0};
+ int width = link->w;
+ int height = link->h;
+ int cw = FF_CEIL_RSHIFT(link->w, unsharp->hsub);
+ int ch = FF_CEIL_RSHIFT(link->h, unsharp->vsub);
+ size_t globalWorkSize1d = width * height + 2 * ch * cw;
+ size_t globalWorkSize2dLuma[2];
+ size_t globalWorkSize2dChroma[2];
+ size_t localWorkSize2d[2] = {16, 16};
+
+ if (unsharp->opencl_ctx.use_fast_kernels) {
+ globalWorkSize2dLuma[0] = (size_t)ROUND_TO_16(width);
+ globalWorkSize2dLuma[1] = (size_t)ROUND_TO_16(height);
+ globalWorkSize2dChroma[0] = (size_t)ROUND_TO_16(cw);
+ globalWorkSize2dChroma[1] = (size_t)(2*ROUND_TO_16(ch));
+
+ kernel1.ctx = ctx;
+ kernel1.kernel = unsharp->opencl_ctx.kernel_luma;
+ ret = avpriv_opencl_set_parameter(&kernel1,
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_luma_mask),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.halfscale),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(width),
+ FF_OPENCL_PARAM_INFO(height),
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ kernel2.ctx = ctx;
+ kernel2.kernel = unsharp->opencl_ctx.kernel_chroma;
+ ret = avpriv_opencl_set_parameter(&kernel2,
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_chroma_mask),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.halfscale),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(in->linesize[1]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[1]),
+ FF_OPENCL_PARAM_INFO(link->w),
+ FF_OPENCL_PARAM_INFO(link->h),
+ FF_OPENCL_PARAM_INFO(cw),
+ FF_OPENCL_PARAM_INFO(ch),
+ NULL);
+ if (ret < 0)
+ return ret;
+ status = clEnqueueNDRangeKernel(unsharp->opencl_ctx.command_queue,
+ unsharp->opencl_ctx.kernel_luma, 2, NULL,
+ globalWorkSize2dLuma, localWorkSize2d, 0, NULL, NULL);
+ status |=clEnqueueNDRangeKernel(unsharp->opencl_ctx.command_queue,
+ unsharp->opencl_ctx.kernel_chroma, 2, NULL,
+ globalWorkSize2dChroma, localWorkSize2d, 0, NULL, NULL);
+ if (status != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
+ return AVERROR_EXTERNAL;
+ }
+ } else { /* use default kernel */
+ kernel1.ctx = ctx;
+ kernel1.kernel = unsharp->opencl_ctx.kernel_default;
+
+ ret = avpriv_opencl_set_parameter(&kernel1,
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_inbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_outbuf),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_luma_mask),
+ FF_OPENCL_PARAM_INFO(unsharp->opencl_ctx.cl_chroma_mask),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.amount),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.steps_x),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.steps_y),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.steps_x),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.steps_y),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.scalebits),
+ FF_OPENCL_PARAM_INFO(unsharp->luma.halfscale),
+ FF_OPENCL_PARAM_INFO(unsharp->chroma.halfscale),
+ FF_OPENCL_PARAM_INFO(in->linesize[0]),
+ FF_OPENCL_PARAM_INFO(in->linesize[1]),
+ FF_OPENCL_PARAM_INFO(out->linesize[0]),
+ FF_OPENCL_PARAM_INFO(out->linesize[1]),
+ FF_OPENCL_PARAM_INFO(link->h),
+ FF_OPENCL_PARAM_INFO(link->w),
+ FF_OPENCL_PARAM_INFO(ch),
+ FF_OPENCL_PARAM_INFO(cw),
+ NULL);
+ if (ret < 0)
+ return ret;
+ status = clEnqueueNDRangeKernel(unsharp->opencl_ctx.command_queue,
+ unsharp->opencl_ctx.kernel_default, 1, NULL,
+ &globalWorkSize1d, NULL, 0, NULL, NULL);
+ if (status != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL run kernel error occurred: %s\n", av_opencl_errstr(status));
+ return AVERROR_EXTERNAL;
+ }
+ }
+ clFinish(unsharp->opencl_ctx.command_queue);
+ return av_opencl_buffer_read_image(out->data, unsharp->opencl_ctx.out_plane_size,
+ unsharp->opencl_ctx.plane_num, unsharp->opencl_ctx.cl_outbuf,
+ unsharp->opencl_ctx.cl_outbuf_size);
+}
+
+int ff_opencl_unsharp_init(AVFilterContext *ctx)
+{
+ int ret = 0;
+ char build_opts[96];
+ UnsharpContext *unsharp = ctx->priv;
+ ret = av_opencl_init(NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_luma_mask,
+ sizeof(uint32_t) * (2 * unsharp->luma.steps_x + 1) * (2 * unsharp->luma.steps_y + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_chroma_mask,
+ sizeof(uint32_t) * (2 * unsharp->chroma.steps_x + 1) * (2 * unsharp->chroma.steps_y + 1),
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ ret = generate_mask(ctx);
+ if (ret < 0)
+ return ret;
+ unsharp->opencl_ctx.plane_num = PLANE_NUM;
+ unsharp->opencl_ctx.command_queue = av_opencl_get_command_queue();
+ if (!unsharp->opencl_ctx.command_queue) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to get OpenCL command queue in filter 'unsharp'\n");
+ return AVERROR(EINVAL);
+ }
+ snprintf(build_opts, 96, "-D LU_RADIUS_X=%d -D LU_RADIUS_Y=%d -D CH_RADIUS_X=%d -D CH_RADIUS_Y=%d",
+ 2*unsharp->luma.steps_x+1, 2*unsharp->luma.steps_y+1, 2*unsharp->chroma.steps_x+1, 2*unsharp->chroma.steps_y+1);
+ unsharp->opencl_ctx.program = av_opencl_compile("unsharp", build_opts);
+ if (!unsharp->opencl_ctx.program) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to compile program 'unsharp'\n");
+ return AVERROR(EINVAL);
+ }
+ if (unsharp->opencl_ctx.use_fast_kernels) {
+ if (!unsharp->opencl_ctx.kernel_luma) {
+ unsharp->opencl_ctx.kernel_luma = clCreateKernel(unsharp->opencl_ctx.program, "unsharp_luma", &ret);
+ if (ret != CL_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'unsharp_luma'\n");
+ return ret;
+ }
+ }
+ if (!unsharp->opencl_ctx.kernel_chroma) {
+ unsharp->opencl_ctx.kernel_chroma = clCreateKernel(unsharp->opencl_ctx.program, "unsharp_chroma", &ret);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'unsharp_chroma'\n");
+ return ret;
+ }
+ }
+ }
+ else {
+ if (!unsharp->opencl_ctx.kernel_default) {
+ unsharp->opencl_ctx.kernel_default = clCreateKernel(unsharp->opencl_ctx.program, "unsharp_default", &ret);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL failed to create kernel 'unsharp_default'\n");
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+void ff_opencl_unsharp_uninit(AVFilterContext *ctx)
+{
+ UnsharpContext *unsharp = ctx->priv;
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_inbuf);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_outbuf);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_luma_mask);
+ av_opencl_buffer_release(&unsharp->opencl_ctx.cl_chroma_mask);
+ clReleaseKernel(unsharp->opencl_ctx.kernel_default);
+ clReleaseKernel(unsharp->opencl_ctx.kernel_luma);
+ clReleaseKernel(unsharp->opencl_ctx.kernel_chroma);
+ clReleaseProgram(unsharp->opencl_ctx.program);
+ unsharp->opencl_ctx.command_queue = NULL;
+ av_opencl_uninit();
+}
+
+int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ int ret = 0;
+ AVFilterLink *link = ctx->inputs[0];
+ UnsharpContext *unsharp = ctx->priv;
+ int ch = FF_CEIL_RSHIFT(link->h, unsharp->vsub);
+
+ if ((!unsharp->opencl_ctx.cl_inbuf) || (!unsharp->opencl_ctx.cl_outbuf)) {
+ unsharp->opencl_ctx.in_plane_size[0] = (in->linesize[0] * in->height);
+ unsharp->opencl_ctx.in_plane_size[1] = (in->linesize[1] * ch);
+ unsharp->opencl_ctx.in_plane_size[2] = (in->linesize[2] * ch);
+ unsharp->opencl_ctx.out_plane_size[0] = (out->linesize[0] * out->height);
+ unsharp->opencl_ctx.out_plane_size[1] = (out->linesize[1] * ch);
+ unsharp->opencl_ctx.out_plane_size[2] = (out->linesize[2] * ch);
+ unsharp->opencl_ctx.cl_inbuf_size = unsharp->opencl_ctx.in_plane_size[0] +
+ unsharp->opencl_ctx.in_plane_size[1] +
+ unsharp->opencl_ctx.in_plane_size[2];
+ unsharp->opencl_ctx.cl_outbuf_size = unsharp->opencl_ctx.out_plane_size[0] +
+ unsharp->opencl_ctx.out_plane_size[1] +
+ unsharp->opencl_ctx.out_plane_size[2];
+ if (!unsharp->opencl_ctx.cl_inbuf) {
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_inbuf,
+ unsharp->opencl_ctx.cl_inbuf_size,
+ CL_MEM_READ_ONLY, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ if (!unsharp->opencl_ctx.cl_outbuf) {
+ ret = av_opencl_buffer_create(&unsharp->opencl_ctx.cl_outbuf,
+ unsharp->opencl_ctx.cl_outbuf_size,
+ CL_MEM_READ_WRITE, NULL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return av_opencl_buffer_write_image(unsharp->opencl_ctx.cl_inbuf,
+ unsharp->opencl_ctx.cl_inbuf_size,
+ 0, in->data, unsharp->opencl_ctx.in_plane_size,
+ unsharp->opencl_ctx.plane_num);
+}
diff --git a/libavfilter/unsharp_opencl.h b/libavfilter/unsharp_opencl.h
new file mode 100644
index 0000000..3aefab6
--- /dev/null
+++ b/libavfilter/unsharp_opencl.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_UNSHARP_OPENCL_H
+#define AVFILTER_UNSHARP_OPENCL_H
+
+#include "unsharp.h"
+
+int ff_opencl_unsharp_init(AVFilterContext *ctx);
+
+void ff_opencl_unsharp_uninit(AVFilterContext *ctx);
+
+int ff_opencl_unsharp_process_inout_buf(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+
+int ff_opencl_apply_unsharp(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
+
+#endif /* AVFILTER_UNSHARP_OPENCL_H */
diff --git a/libavfilter/unsharp_opencl_kernel.h b/libavfilter/unsharp_opencl_kernel.h
new file mode 100644
index 0000000..9c4fd65
--- /dev/null
+++ b/libavfilter/unsharp_opencl_kernel.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2013 Wei Gao <weigao@multicorewareinc.com>
+ * Copyright (C) 2013 Lenny Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_UNSHARP_OPENCL_KERNEL_H
+#define AVFILTER_UNSHARP_OPENCL_KERNEL_H
+
+#include "libavutil/opencl.h"
+
+const char *ff_kernel_unsharp_opencl = AV_OPENCL_KERNEL(
+inline unsigned char clip_uint8(int a)
+{
+ if (a & (~0xFF))
+ return (-a)>>31;
+ else
+ return a;
+}
+
+kernel void unsharp_luma(
+ global unsigned char *src,
+ global unsigned char *dst,
+ global int *mask,
+ int amount,
+ int scalebits,
+ int halfscale,
+ int src_stride,
+ int dst_stride,
+ int width,
+ int height)
+{
+ int2 threadIdx, blockIdx, globalIdx;
+ threadIdx.x = get_local_id(0);
+ threadIdx.y = get_local_id(1);
+ blockIdx.x = get_group_id(0);
+ blockIdx.y = get_group_id(1);
+ globalIdx.x = get_global_id(0);
+ globalIdx.y = get_global_id(1);
+
+ if (!amount) {
+ if (globalIdx.x < width && globalIdx.y < height)
+ dst[globalIdx.x + globalIdx.y*dst_stride] = src[globalIdx.x + globalIdx.y*src_stride];
+ return;
+ }
+
+ local uchar l[32][32];
+ local int lc[LU_RADIUS_X*LU_RADIUS_Y];
+ int indexIx, indexIy, i, j;
+
+ for(i = 0; i <= 1; i++) {
+ indexIy = -8 + (blockIdx.y + i) * 16 + threadIdx.y;
+ indexIy = indexIy < 0 ? 0 : indexIy;
+ indexIy = indexIy >= height ? height - 1: indexIy;
+ for(j = 0; j <= 1; j++) {
+ indexIx = -8 + (blockIdx.x + j) * 16 + threadIdx.x;
+ indexIx = indexIx < 0 ? 0 : indexIx;
+ indexIx = indexIx >= width ? width - 1: indexIx;
+ l[i*16 + threadIdx.y][j*16 + threadIdx.x] = src[indexIy*src_stride + indexIx];
+ }
+ }
+
+ int indexL = threadIdx.y*16 + threadIdx.x;
+ if (indexL < LU_RADIUS_X*LU_RADIUS_Y)
+ lc[indexL] = mask[indexL];
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ int idx, idy, maskIndex;
+ int sum = 0;
+ int steps_x = LU_RADIUS_X/2;
+ int steps_y = LU_RADIUS_Y/2;
+
+ \n#pragma unroll\n
+ for (i = -steps_y; i <= steps_y; i++) {
+ idy = 8 + i + threadIdx.y;
+ \n#pragma unroll\n
+ for (j = -steps_x; j <= steps_x; j++) {
+ idx = 8 + j + threadIdx.x;
+ maskIndex = (i + steps_y)*LU_RADIUS_X + j + steps_x;
+ sum += (int)l[idy][idx] * lc[maskIndex];
+ }
+ }
+ int temp = (int)l[threadIdx.y + 8][threadIdx.x + 8];
+ int res = temp + (((temp - (int)((sum + halfscale) >> scalebits)) * amount) >> 16);
+ if (globalIdx.x < width && globalIdx.y < height)
+ dst[globalIdx.x + globalIdx.y*dst_stride] = clip_uint8(res);
+}
+
+kernel void unsharp_chroma(
+ global unsigned char *src_y,
+ global unsigned char *dst_y,
+ global int *mask,
+ int amount,
+ int scalebits,
+ int halfscale,
+ int src_stride_lu,
+ int src_stride_ch,
+ int dst_stride_lu,
+ int dst_stride_ch,
+ int width,
+ int height,
+ int cw,
+ int ch)
+{
+ global unsigned char *dst_u = dst_y + height * dst_stride_lu;
+ global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
+ global unsigned char *src_u = src_y + height * src_stride_lu;
+ global unsigned char *src_v = src_u + ch * src_stride_ch;
+ int2 threadIdx, blockIdx, globalIdx;
+ threadIdx.x = get_local_id(0);
+ threadIdx.y = get_local_id(1);
+ blockIdx.x = get_group_id(0);
+ blockIdx.y = get_group_id(1);
+ globalIdx.x = get_global_id(0);
+ globalIdx.y = get_global_id(1);
+ int padch = get_global_size(1)/2;
+ global unsigned char *src = globalIdx.y>=padch ? src_v : src_u;
+ global unsigned char *dst = globalIdx.y>=padch ? dst_v : dst_u;
+
+ blockIdx.y = globalIdx.y>=padch ? blockIdx.y - get_num_groups(1)/2 : blockIdx.y;
+ globalIdx.y = globalIdx.y>=padch ? globalIdx.y - padch : globalIdx.y;
+
+ if (!amount) {
+ if (globalIdx.x < cw && globalIdx.y < ch)
+ dst[globalIdx.x + globalIdx.y*dst_stride_ch] = src[globalIdx.x + globalIdx.y*src_stride_ch];
+ return;
+ }
+
+ local uchar l[32][32];
+ local int lc[CH_RADIUS_X*CH_RADIUS_Y];
+ int indexIx, indexIy, i, j;
+ for(i = 0; i <= 1; i++) {
+ indexIy = -8 + (blockIdx.y + i) * 16 + threadIdx.y;
+ indexIy = indexIy < 0 ? 0 : indexIy;
+ indexIy = indexIy >= ch ? ch - 1: indexIy;
+ for(j = 0; j <= 1; j++) {
+ indexIx = -8 + (blockIdx.x + j) * 16 + threadIdx.x;
+ indexIx = indexIx < 0 ? 0 : indexIx;
+ indexIx = indexIx >= cw ? cw - 1: indexIx;
+ l[i*16 + threadIdx.y][j*16 + threadIdx.x] = src[indexIy * src_stride_ch + indexIx];
+ }
+ }
+
+ int indexL = threadIdx.y*16 + threadIdx.x;
+ if (indexL < CH_RADIUS_X*CH_RADIUS_Y)
+ lc[indexL] = mask[indexL];
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ int idx, idy, maskIndex;
+ int sum = 0;
+ int steps_x = CH_RADIUS_X/2;
+ int steps_y = CH_RADIUS_Y/2;
+
+ \n#pragma unroll\n
+ for (i = -steps_y; i <= steps_y; i++) {
+ idy = 8 + i + threadIdx.y;
+ \n#pragma unroll\n
+ for (j = -steps_x; j <= steps_x; j++) {
+ idx = 8 + j + threadIdx.x;
+ maskIndex = (i + steps_y)*CH_RADIUS_X + j + steps_x;
+ sum += (int)l[idy][idx] * lc[maskIndex];
+ }
+ }
+ int temp = (int)l[threadIdx.y + 8][threadIdx.x + 8];
+ int res = temp + (((temp - (int)((sum + halfscale) >> scalebits)) * amount) >> 16);
+ if (globalIdx.x < cw && globalIdx.y < ch)
+ dst[globalIdx.x + globalIdx.y*dst_stride_ch] = clip_uint8(res);
+}
+
+kernel void unsharp_default(global unsigned char *src,
+ global unsigned char *dst,
+ const global unsigned int *mask_lu,
+ const global unsigned int *mask_ch,
+ int amount_lu,
+ int amount_ch,
+ int step_x_lu,
+ int step_y_lu,
+ int step_x_ch,
+ int step_y_ch,
+ int scalebits_lu,
+ int scalebits_ch,
+ int halfscale_lu,
+ int halfscale_ch,
+ int src_stride_lu,
+ int src_stride_ch,
+ int dst_stride_lu,
+ int dst_stride_ch,
+ int height,
+ int width,
+ int ch,
+ int cw)
+{
+ global unsigned char *dst_y = dst;
+ global unsigned char *dst_u = dst_y + height * dst_stride_lu;
+ global unsigned char *dst_v = dst_u + ch * dst_stride_ch;
+
+ global unsigned char *src_y = src;
+ global unsigned char *src_u = src_y + height * src_stride_lu;
+ global unsigned char *src_v = src_u + ch * src_stride_ch;
+
+ global unsigned char *temp_dst;
+ global unsigned char *temp_src;
+ const global unsigned int *temp_mask;
+ int global_id = get_global_id(0);
+ int i, j, x, y, temp_src_stride, temp_dst_stride, temp_height, temp_width, temp_steps_x, temp_steps_y,
+ temp_amount, temp_scalebits, temp_halfscale, sum, idx_x, idx_y, temp, res;
+ if (global_id < width * height) {
+ y = global_id / width;
+ x = global_id % width;
+ temp_dst = dst_y;
+ temp_src = src_y;
+ temp_src_stride = src_stride_lu;
+ temp_dst_stride = dst_stride_lu;
+ temp_height = height;
+ temp_width = width;
+ temp_steps_x = step_x_lu;
+ temp_steps_y = step_y_lu;
+ temp_mask = mask_lu;
+ temp_amount = amount_lu;
+ temp_scalebits = scalebits_lu;
+ temp_halfscale = halfscale_lu;
+ } else if ((global_id >= width * height) && (global_id < width * height + ch * cw)) {
+ y = (global_id - width * height) / cw;
+ x = (global_id - width * height) % cw;
+ temp_dst = dst_u;
+ temp_src = src_u;
+ temp_src_stride = src_stride_ch;
+ temp_dst_stride = dst_stride_ch;
+ temp_height = ch;
+ temp_width = cw;
+ temp_steps_x = step_x_ch;
+ temp_steps_y = step_y_ch;
+ temp_mask = mask_ch;
+ temp_amount = amount_ch;
+ temp_scalebits = scalebits_ch;
+ temp_halfscale = halfscale_ch;
+ } else {
+ y = (global_id - width * height - ch * cw) / cw;
+ x = (global_id - width * height - ch * cw) % cw;
+ temp_dst = dst_v;
+ temp_src = src_v;
+ temp_src_stride = src_stride_ch;
+ temp_dst_stride = dst_stride_ch;
+ temp_height = ch;
+ temp_width = cw;
+ temp_steps_x = step_x_ch;
+ temp_steps_y = step_y_ch;
+ temp_mask = mask_ch;
+ temp_amount = amount_ch;
+ temp_scalebits = scalebits_ch;
+ temp_halfscale = halfscale_ch;
+ }
+ if (temp_amount) {
+ sum = 0;
+ for (j = 0; j <= 2 * temp_steps_y; j++) {
+ idx_y = (y - temp_steps_y + j) <= 0 ? 0 : (y - temp_steps_y + j) >= temp_height ? temp_height-1 : y - temp_steps_y + j;
+ for (i = 0; i <= 2 * temp_steps_x; i++) {
+ idx_x = (x - temp_steps_x + i) <= 0 ? 0 : (x - temp_steps_x + i) >= temp_width ? temp_width-1 : x - temp_steps_x + i;
+ sum += temp_mask[i + j * (2 * temp_steps_x + 1)] * temp_src[idx_x + idx_y * temp_src_stride];
+ }
+ }
+ temp = (int)temp_src[x + y * temp_src_stride];
+ res = temp + (((temp - (int)((sum + temp_halfscale) >> temp_scalebits)) * temp_amount) >> 16);
+ temp_dst[x + y * temp_dst_stride] = clip_uint8(res);
+ } else {
+ temp_dst[x + y * temp_dst_stride] = temp_src[x + y * temp_src_stride];
+ }
+}
+);
+
+#endif /* AVFILTER_UNSHARP_OPENCL_KERNEL_H */
diff --git a/libavfilter/version.h b/libavfilter/version.h
index 70b08e5..6f61aee 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -1,20 +1,20 @@
/*
* Version macros.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,8 +30,8 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 5
-#define LIBAVFILTER_VERSION_MINOR 0
-#define LIBAVFILTER_VERSION_MICRO 0
+#define LIBAVFILTER_VERSION_MINOR 2
+#define LIBAVFILTER_VERSION_MICRO 103
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
@@ -41,6 +41,8 @@
LIBAVFILTER_VERSION_MICRO)
#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
+#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION)
+
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
@@ -68,6 +70,9 @@
#ifndef FF_API_OLD_FILTER_REGISTER
#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 6)
#endif
+#ifndef FF_API_OLD_GRAPH_PARSE
+#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
#ifndef FF_API_NOCONST_GET_NAME
#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 6)
#endif
diff --git a/libavfilter/vf_alphamerge.c b/libavfilter/vf_alphamerge.c
new file mode 100644
index 0000000..5f0da35
--- /dev/null
+++ b/libavfilter/vf_alphamerge.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2012 Steven Robertson
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * copy an alpha component from another video's luma
+ */
+
+#include <string.h>
+
+#include "libavutil/pixfmt.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum { Y, U, V, A };
+
+typedef struct {
+ int frame_requested;
+ int is_packed_rgb;
+ uint8_t rgba_map[4];
+ struct FFBufQueue queue_main;
+ struct FFBufQueue queue_alpha;
+} AlphaMergeContext;
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AlphaMergeContext *merge = ctx->priv;
+ ff_bufqueue_discard_all(&merge->queue_main);
+ ff_bufqueue_discard_all(&merge->queue_alpha);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat main_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat alpha_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+ AVFilterFormats *main_formats = ff_make_format_list(main_fmts);
+ AVFilterFormats *alpha_formats = ff_make_format_list(alpha_fmts);
+ ff_formats_ref(main_formats, &ctx->inputs[0]->out_formats);
+ ff_formats_ref(alpha_formats, &ctx->inputs[1]->out_formats);
+ ff_formats_ref(main_formats, &ctx->outputs[0]->in_formats);
+ return 0;
+}
+
+static int config_input_main(AVFilterLink *inlink)
+{
+ AlphaMergeContext *merge = inlink->dst->priv;
+ merge->is_packed_rgb =
+ ff_fill_rgba_map(merge->rgba_map, inlink->format) >= 0;
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *mainlink = ctx->inputs[0];
+ AVFilterLink *alphalink = ctx->inputs[1];
+ if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Input frame sizes do not match (%dx%d vs %dx%d).\n",
+ mainlink->w, mainlink->h,
+ alphalink->w, alphalink->h);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = mainlink->w;
+ outlink->h = mainlink->h;
+ outlink->time_base = mainlink->time_base;
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
+ outlink->frame_rate = mainlink->frame_rate;
+ return 0;
+}
+
+static void draw_frame(AVFilterContext *ctx,
+ AVFrame *main_buf,
+ AVFrame *alpha_buf)
+{
+ AlphaMergeContext *merge = ctx->priv;
+ int h = main_buf->height;
+
+ if (merge->is_packed_rgb) {
+ int x, y;
+ uint8_t *pin, *pout;
+ for (y = 0; y < h; y++) {
+ pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
+ pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
+ for (x = 0; x < main_buf->width; x++) {
+ *pout = *pin;
+ pin += 1;
+ pout += 4;
+ }
+ }
+ } else {
+ int y;
+ const int main_linesize = main_buf->linesize[A];
+ const int alpha_linesize = alpha_buf->linesize[Y];
+ for (y = 0; y < h && y < alpha_buf->height; y++) {
+ memcpy(main_buf->data[A] + y * main_linesize,
+ alpha_buf->data[Y] + y * alpha_linesize,
+ FFMIN(main_linesize, alpha_linesize));
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AlphaMergeContext *merge = ctx->priv;
+
+ int ret = 0;
+ int is_alpha = (inlink == ctx->inputs[1]);
+ struct FFBufQueue *queue =
+ (is_alpha ? &merge->queue_alpha : &merge->queue_main);
+ ff_bufqueue_add(ctx, queue, buf);
+
+ do {
+ AVFrame *main_buf, *alpha_buf;
+
+ if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
+ !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;
+
+ main_buf = ff_bufqueue_get(&merge->queue_main);
+ alpha_buf = ff_bufqueue_get(&merge->queue_alpha);
+
+ merge->frame_requested = 0;
+ draw_frame(ctx, main_buf, alpha_buf);
+ ret = ff_filter_frame(ctx->outputs[0], main_buf);
+ av_frame_free(&alpha_buf);
+ } while (ret >= 0);
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AlphaMergeContext *merge = ctx->priv;
+ int in, ret;
+
+ merge->frame_requested = 1;
+ while (merge->frame_requested) {
+ in = ff_bufqueue_peek(&merge->queue_main, 0) ? 1 : 0;
+ ret = ff_request_frame(ctx->inputs[in]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static const AVFilterPad alphamerge_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input_main,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },{
+ .name = "alpha",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad alphamerge_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_alphamerge = {
+ .name = "alphamerge",
+ .description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
+ "input into the alpha channel of the first input."),
+ .uninit = uninit,
+ .priv_size = sizeof(AlphaMergeContext),
+ .query_formats = query_formats,
+ .inputs = alphamerge_inputs,
+ .outputs = alphamerge_outputs,
+};
diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c
index 2c28213..84dbee9 100644
--- a/libavfilter/vf_aspect.c
+++ b/libavfilter/vf_aspect.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Bobby Bingham
-
- * This file is part of Libav.
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,9 +37,6 @@
#include "video.h"
static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
"w",
"h",
"a", "dar",
@@ -50,9 +47,6 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
VAR_W,
VAR_H,
VAR_A, VAR_DAR,
@@ -66,26 +60,35 @@ typedef struct AspectContext {
const AVClass *class;
AVRational dar;
AVRational sar;
+ int max;
#if FF_API_OLD_FILTER_OPTS
- float aspect_num, aspect_den;
+ float aspect_den;
#endif
char *ratio_expr;
} AspectContext;
-#if FF_API_OLD_FILTER_OPTS
static av_cold int init(AVFilterContext *ctx)
{
AspectContext *s = ctx->priv;
+ int ret;
- if (s->aspect_num > 0 && s->aspect_den > 0) {
- av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use "
- "dar=<number> or dar=num/den.\n");
- s->sar = s->dar = av_d2q(s->aspect_num / s->aspect_den, INT_MAX);
+#if FF_API_OLD_FILTER_OPTS
+ if (s->ratio_expr && s->aspect_den > 0) {
+ double num;
+ av_log(ctx, AV_LOG_WARNING,
+ "num:den syntax is deprecated, please use num/den or named options instead\n");
+ ret = av_expr_parse_and_eval(&num, s->ratio_expr, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse ratio numerator \"%s\"\n", s->ratio_expr);
+ return AVERROR(EINVAL);
+ }
+ s->sar = s->dar = av_d2q(num / s->aspect_den, s->max);
}
+#endif
return 0;
}
-#endif
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
@@ -96,7 +99,16 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
}
#define OFFSET(x) offsetof(AspectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static inline void compute_dar(AVRational *dar, AVRational sar, int w, int h)
+{
+ if (sar.num && sar.den) {
+ av_reduce(&dar->num, &dar->den, sar.num * w, sar.den * h, INT_MAX);
+ } else {
+ av_reduce(&dar->num, &dar->den, w, h, INT_MAX);
+ }
+}
static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
{
@@ -106,9 +118,6 @@ static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
double var_values[VARS_NB], res;
int ret;
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
var_values[VAR_W] = inlink->w;
var_values[VAR_H] = inlink->h;
var_values[VAR_A] = (double) inlink->w / inlink->h;
@@ -119,27 +128,39 @@ static int get_aspect_ratio(AVFilterLink *inlink, AVRational *aspect_ratio)
var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
/* evaluate new aspect ratio*/
- if ((ret = av_expr_parse_and_eval(&res, s->ratio_expr,
+ ret = av_expr_parse_and_eval(&res, s->ratio_expr,
var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
- av_log(NULL, AV_LOG_ERROR,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ ret = av_parse_ratio(aspect_ratio, s->ratio_expr, s->max, 0, ctx);
+ } else
+ *aspect_ratio = av_d2q(res, s->max);
+
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s'\n", s->ratio_expr);
return ret;
}
- *aspect_ratio = av_d2q(res, INT_MAX);
+ if (aspect_ratio->num < 0 || aspect_ratio->den <= 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid string '%s' for aspect ratio\n", s->ratio_expr);
+ return AVERROR(EINVAL);
+ }
return 0;
}
#if CONFIG_SETDAR_FILTER
-/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */
+
static int setdar_config_props(AVFilterLink *inlink)
{
AspectContext *s = inlink->dst->priv;
AVRational dar;
+ AVRational old_dar;
+ AVRational old_sar = inlink->sample_aspect_ratio;
int ret;
#if FF_API_OLD_FILTER_OPTS
- if (!(s->aspect_num > 0 && s->aspect_den > 0)) {
+ if (!(s->ratio_expr && s->aspect_den > 0)) {
#endif
if ((ret = get_aspect_ratio(inlink, &s->dar)))
return ret;
@@ -150,7 +171,7 @@ static int setdar_config_props(AVFilterLink *inlink)
if (s->dar.num && s->dar.den) {
av_reduce(&s->sar.num, &s->sar.den,
s->dar.num * inlink->h,
- s->dar.den * inlink->w, 100);
+ s->dar.den * inlink->w, INT_MAX);
inlink->sample_aspect_ratio = s->sar;
dar = s->dar;
} else {
@@ -158,36 +179,33 @@ static int setdar_config_props(AVFilterLink *inlink)
dar = (AVRational){ inlink->w, inlink->h };
}
- av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d -> dar:%d/%d sar:%d/%d\n",
- inlink->w, inlink->h, dar.num, dar.den,
- inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den);
+ compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
+ av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d dar:%d/%d sar:%d/%d -> dar:%d/%d sar:%d/%d\n",
+ inlink->w, inlink->h, old_dar.num, old_dar.den, old_sar.num, old_sar.den,
+ dar.num, dar.den, inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den);
return 0;
}
static const AVOption setdar_options[] = {
+ { "dar", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "ratio", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "r", "set display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
#if FF_API_OLD_FILTER_OPTS
- { "dar_num", NULL, OFFSET(aspect_num), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
{ "dar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
#endif
- { "dar", "display aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { NULL },
+ { "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass setdar_class = {
- .class_name = "setdar",
- .item_name = av_default_item_name,
- .option = setdar_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(setdar);
static const AVFilterPad avfilter_vf_setdar_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = setdar_config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = setdar_config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -201,31 +219,28 @@ static const AVFilterPad avfilter_vf_setdar_outputs[] = {
};
AVFilter ff_vf_setdar = {
- .name = "setdar",
+ .name = "setdar",
.description = NULL_IF_CONFIG_SMALL("Set the frame display aspect ratio."),
-
-#if FF_API_OLD_FILTER_OPTS
- .init = init,
-#endif
-
- .priv_size = sizeof(AspectContext),
- .priv_class = &setdar_class,
-
- .inputs = avfilter_vf_setdar_inputs,
-
- .outputs = avfilter_vf_setdar_outputs,
+ .init = init,
+ .priv_size = sizeof(AspectContext),
+ .priv_class = &setdar_class,
+ .inputs = avfilter_vf_setdar_inputs,
+ .outputs = avfilter_vf_setdar_outputs,
};
+
#endif /* CONFIG_SETDAR_FILTER */
#if CONFIG_SETSAR_FILTER
-/* for setdar filter, convert from frame aspect ratio to pixel aspect ratio */
+
static int setsar_config_props(AVFilterLink *inlink)
{
AspectContext *s = inlink->dst->priv;
+ AVRational old_sar = inlink->sample_aspect_ratio;
+ AVRational old_dar, dar;
int ret;
#if FF_API_OLD_FILTER_OPTS
- if (!(s->aspect_num > 0 && s->aspect_den > 0)) {
+ if (!(s->ratio_expr && s->aspect_den > 0)) {
#endif
if ((ret = get_aspect_ratio(inlink, &s->sar)))
return ret;
@@ -235,32 +250,34 @@ static int setsar_config_props(AVFilterLink *inlink)
inlink->sample_aspect_ratio = s->sar;
+ compute_dar(&old_dar, old_sar, inlink->w, inlink->h);
+ compute_dar(&dar, s->sar, inlink->w, inlink->h);
+ av_log(inlink->dst, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d dar:%d/%d -> sar:%d/%d dar:%d/%d\n",
+ inlink->w, inlink->h, old_sar.num, old_sar.den, old_dar.num, old_dar.den,
+ inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, dar.num, dar.den);
+
return 0;
}
static const AVOption setsar_options[] = {
+ { "sar", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "ratio", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+ { "r", "set sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
#if FF_API_OLD_FILTER_OPTS
- { "sar_num", NULL, OFFSET(aspect_num), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
{ "sar_den", NULL, OFFSET(aspect_den), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, 0, FLT_MAX, FLAGS },
#endif
- { "sar", "sample (pixel) aspect ratio", OFFSET(ratio_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { NULL },
+ { "max", "set max value for nominator or denominator in the ratio", OFFSET(max), AV_OPT_TYPE_INT, {.i64=100}, 1, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass setsar_class = {
- .class_name = "setsar",
- .item_name = av_default_item_name,
- .option = setsar_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(setsar);
static const AVFilterPad avfilter_vf_setsar_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = setsar_config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = setsar_config_props,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -274,18 +291,13 @@ static const AVFilterPad avfilter_vf_setsar_outputs[] = {
};
AVFilter ff_vf_setsar = {
- .name = "setsar",
+ .name = "setsar",
.description = NULL_IF_CONFIG_SMALL("Set the pixel sample aspect ratio."),
-
-#if FF_API_OLD_FILTER_OPTS
- .init = init,
-#endif
-
- .priv_size = sizeof(AspectContext),
- .priv_class = &setsar_class,
-
- .inputs = avfilter_vf_setsar_inputs,
-
- .outputs = avfilter_vf_setsar_outputs,
+ .init = init,
+ .priv_size = sizeof(AspectContext),
+ .priv_class = &setsar_class,
+ .inputs = avfilter_vf_setsar_inputs,
+ .outputs = avfilter_vf_setsar_outputs,
};
+
#endif /* CONFIG_SETSAR_FILTER */
diff --git a/libavfilter/vf_bbox.c b/libavfilter/vf_bbox.c
new file mode 100644
index 0000000..1e6feff
--- /dev/null
+++ b/libavfilter/vf_bbox.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * bounding box detection filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "bbox.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int min_val;
+} BBoxContext;
+
+#define OFFSET(x) offsetof(BBoxContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption bbox_options[] = {
+ { "min_val", "set minimum luminance value for bounding box", OFFSET(min_val), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 254, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(bbox);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE,
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+#define SET_META(key, value) \
+ av_dict_set_int(metadata, key, value, 0);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BBoxContext *bbox = ctx->priv;
+ FFBoundingBox box;
+ int has_bbox, w, h;
+
+ has_bbox =
+ ff_calculate_bounding_box(&box,
+ frame->data[0], frame->linesize[0],
+ inlink->w, inlink->h, bbox->min_val);
+ w = box.x2 - box.x1 + 1;
+ h = box.y2 - box.y1 + 1;
+
+ av_log(ctx, AV_LOG_INFO,
+ "n:%"PRId64" pts:%s pts_time:%s", inlink->frame_count,
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
+
+ if (has_bbox) {
+ AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
+
+ SET_META("lavfi.bbox.x1", box.x1)
+ SET_META("lavfi.bbox.x2", box.x2)
+ SET_META("lavfi.bbox.y1", box.y1)
+ SET_META("lavfi.bbox.y2", box.y2)
+ SET_META("lavfi.bbox.w", w)
+ SET_META("lavfi.bbox.h", h)
+
+ av_log(ctx, AV_LOG_INFO,
+ " x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
+ " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
+ box.x1, box.x2, box.y1, box.y2, w, h,
+ w, h, box.x1, box.y1, /* crop params */
+ box.x1, box.y1, w, h); /* drawbox params */
+ }
+ av_log(ctx, AV_LOG_INFO, "\n");
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static const AVFilterPad bbox_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad bbox_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_bbox = {
+ .name = "bbox",
+ .description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."),
+ .priv_size = sizeof(BBoxContext),
+ .priv_class = &bbox_class,
+ .query_formats = query_formats,
+ .inputs = bbox_inputs,
+ .outputs = bbox_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_blackdetect.c b/libavfilter/vf_blackdetect.c
new file mode 100644
index 0000000..87a7a23
--- /dev/null
+++ b/libavfilter/vf_blackdetect.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Video black detector, loosely based on blackframe with extended
+ * syntax and features
+ */
+
+#include <float.h>
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double black_min_duration_time; ///< minimum duration of detected black, in seconds
+ int64_t black_min_duration; ///< minimum duration of detected black, expressed in timebase units
+ int64_t black_start; ///< pts start time of the first black picture
+ int64_t black_end; ///< pts end time of the last black picture
+ int64_t last_picref_pts; ///< pts of the last input picture
+ int black_started;
+
+ double picture_black_ratio_th;
+ double pixel_black_th;
+ unsigned int pixel_black_th_i;
+
+ unsigned int nb_black_pixels; ///< number of black pixels counted so far
+} BlackDetectContext;
+
+#define OFFSET(x) offsetof(BlackDetectContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption blackdetect_options[] = {
+ { "d", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
+ { "black_min_duration", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
+ { "picture_black_ratio_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
+ { "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
+ { "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
+ { "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(blackdetect);
+
+#define YUVJ_FORMATS \
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
+
+static const enum AVPixelFormat yuvj_formats[] = {
+ YUVJ_FORMATS, AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
+ YUVJ_FORMATS,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+
+ blackdetect->black_min_duration =
+ blackdetect->black_min_duration_time / av_q2d(inlink->time_base);
+
+ blackdetect->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ?
+ // luminance_minimum_value + pixel_black_th * luminance_range_size
+ blackdetect->pixel_black_th * 255 :
+ 16 + blackdetect->pixel_black_th * (235 - 16);
+
+ av_log(blackdetect, AV_LOG_VERBOSE,
+ "black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n",
+ av_ts2timestr(blackdetect->black_min_duration, &inlink->time_base),
+ blackdetect->pixel_black_th, blackdetect->pixel_black_th_i,
+ blackdetect->picture_black_ratio_th);
+ return 0;
+}
+
+static void check_black_end(AVFilterContext *ctx)
+{
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ if ((blackdetect->black_end - blackdetect->black_start) >= blackdetect->black_min_duration) {
+ av_log(blackdetect, AV_LOG_INFO,
+ "black_start:%s black_end:%s black_duration:%s\n",
+ av_ts2timestr(blackdetect->black_start, &inlink->time_base),
+ av_ts2timestr(blackdetect->black_end, &inlink->time_base),
+ av_ts2timestr(blackdetect->black_end - blackdetect->black_start, &inlink->time_base));
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ BlackDetectContext *blackdetect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int ret = ff_request_frame(inlink);
+
+ if (ret == AVERROR_EOF && blackdetect->black_started) {
+ // FIXME: black_end should be set to last_picref_pts + last_picref_duration
+ blackdetect->black_end = blackdetect->last_picref_pts;
+ check_black_end(ctx);
+ }
+ return ret;
+}
+
+// TODO: document metadata
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ BlackDetectContext *blackdetect = ctx->priv;
+ double picture_black_ratio = 0;
+ const uint8_t *p = picref->data[0];
+ int x, i;
+
+ for (i = 0; i < inlink->h; i++) {
+ for (x = 0; x < inlink->w; x++)
+ blackdetect->nb_black_pixels += p[x] <= blackdetect->pixel_black_th_i;
+ p += picref->linesize[0];
+ }
+
+ picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
+
+ av_log(ctx, AV_LOG_DEBUG,
+ "frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
+ inlink->frame_count, picture_black_ratio,
+ av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
+ av_get_picture_type_char(picref->pict_type));
+
+ if (picture_black_ratio >= blackdetect->picture_black_ratio_th) {
+ if (!blackdetect->black_started) {
+ /* black starts here */
+ blackdetect->black_started = 1;
+ blackdetect->black_start = picref->pts;
+ av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_start",
+ av_ts2timestr(blackdetect->black_start, &inlink->time_base), 0);
+ }
+ } else if (blackdetect->black_started) {
+ /* black ends here */
+ blackdetect->black_started = 0;
+ blackdetect->black_end = picref->pts;
+ check_black_end(ctx);
+ av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_end",
+ av_ts2timestr(blackdetect->black_end, &inlink->time_base), 0);
+ }
+
+ blackdetect->last_picref_pts = picref->pts;
+ blackdetect->nb_black_pixels = 0;
+ return ff_filter_frame(inlink->dst->outputs[0], picref);
+}
+
+static const AVFilterPad blackdetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad blackdetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_blackdetect = {
+ .name = "blackdetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."),
+ .priv_size = sizeof(BlackDetectContext),
+ .query_formats = query_formats,
+ .inputs = blackdetect_inputs,
+ .outputs = blackdetect_outputs,
+ .priv_class = &blackdetect_class,
+};
diff --git a/libavfilter/vf_blackframe.c b/libavfilter/vf_blackframe.c
index 8cbcc00..1be9fcc 100644
--- a/libavfilter/vf_blackframe.c
+++ b/libavfilter/vf_blackframe.c
@@ -4,20 +4,20 @@
* Copyright (c) 2006 Julian Hall
* Copyright (c) 2002-2003 Brian J. Murrell
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -32,7 +32,6 @@
#include "libavutil/internal.h"
#include "libavutil/opt.h"
-
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -44,6 +43,7 @@ typedef struct BlackFrameContext {
int bthresh; ///< black threshold
unsigned int frame; ///< frame number
unsigned int nblack; ///< number of black pixels counted so far
+ unsigned int last_keyframe; ///< frame number of the last received key-frame
} BlackFrameContext;
static int query_formats(AVFilterContext *ctx)
@@ -58,6 +58,10 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
+#define SET_META(key, format, value) \
+ snprintf(buf, sizeof(buf), format, value); \
+ av_dict_set(metadata, key, buf, 0)
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
@@ -65,6 +69,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
int x, i;
int pblack = 0;
uint8_t *p = frame->data[0];
+ AVDictionary **metadata;
+ char buf[32];
for (i = 0; i < frame->height; i++) {
for (x = 0; x < inlink->w; x++)
@@ -72,11 +78,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
p += frame->linesize[0];
}
+ if (frame->key_frame)
+ s->last_keyframe = s->frame;
+
pblack = s->nblack * 100 / (inlink->w * inlink->h);
- if (pblack >= s->bamount)
- av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f\n",
+ if (pblack >= s->bamount) {
+ metadata = avpriv_frame_get_metadatap(frame);
+
+ av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
+ "type:%c last_keyframe:%d\n",
s->frame, pblack, frame->pts,
- frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base));
+ frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
+ av_get_picture_type_char(frame->pict_type), s->last_keyframe);
+
+ SET_META("lavfi.blackframe.pblack", "%u", pblack);
+ }
s->frame++;
s->nblack = 0;
@@ -84,28 +100,24 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(BlackFrameContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption blackframe_options[] = {
{ "amount", "Percentage of the pixels that have to be below the threshold "
"for the frame to be considered black.", OFFSET(bamount), AV_OPT_TYPE_INT, { .i64 = 98 }, 0, 100, FLAGS },
{ "threshold", "threshold below which a pixel value is considered black",
- OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, INT_MAX, FLAGS },
- { NULL },
+ OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
+ { "thresh", "threshold below which a pixel value is considered black",
+ OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
+ { NULL }
};
-static const AVClass blackframe_class = {
- .class_name = "blackframe",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(blackframe);
static const AVFilterPad avfilter_vf_blackframe_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -119,15 +131,11 @@ static const AVFilterPad avfilter_vf_blackframe_outputs[] = {
};
AVFilter ff_vf_blackframe = {
- .name = "blackframe",
- .description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
-
- .priv_size = sizeof(BlackFrameContext),
- .priv_class = &blackframe_class,
-
+ .name = "blackframe",
+ .description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
+ .priv_size = sizeof(BlackFrameContext),
+ .priv_class = &blackframe_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_blackframe_inputs,
-
- .outputs = avfilter_vf_blackframe_outputs,
+ .inputs = avfilter_vf_blackframe_inputs,
+ .outputs = avfilter_vf_blackframe_outputs,
};
diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c
new file mode 100644
index 0000000..8bf19ff
--- /dev/null
+++ b/libavfilter/vf_blend.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixfmt.h"
+#include "avfilter.h"
+#include "bufferqueue.h"
+#include "formats.h"
+#include "internal.h"
+#include "dualinput.h"
+#include "video.h"
+
+#define TOP 0
+#define BOTTOM 1
+
+enum BlendMode {
+ BLEND_UNSET = -1,
+ BLEND_NORMAL,
+ BLEND_ADDITION,
+ BLEND_AND,
+ BLEND_AVERAGE,
+ BLEND_BURN,
+ BLEND_DARKEN,
+ BLEND_DIFFERENCE,
+ BLEND_DIVIDE,
+ BLEND_DODGE,
+ BLEND_EXCLUSION,
+ BLEND_HARDLIGHT,
+ BLEND_LIGHTEN,
+ BLEND_MULTIPLY,
+ BLEND_NEGATION,
+ BLEND_OR,
+ BLEND_OVERLAY,
+ BLEND_PHOENIX,
+ BLEND_PINLIGHT,
+ BLEND_REFLECT,
+ BLEND_SCREEN,
+ BLEND_SOFTLIGHT,
+ BLEND_SUBTRACT,
+ BLEND_VIVIDLIGHT,
+ BLEND_XOR,
+ BLEND_NB
+};
+
+static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
+
+typedef struct FilterParams {
+ enum BlendMode mode;
+ double opacity;
+ AVExpr *e;
+ char *expr_str;
+ void (*blend)(const uint8_t *top, int top_linesize,
+ const uint8_t *bottom, int bottom_linesize,
+ uint8_t *dst, int dst_linesize,
+ int width, int start, int end,
+ struct FilterParams *param, double *values);
+} FilterParams;
+
+typedef struct ThreadData {
+ const AVFrame *top, *bottom;
+ AVFrame *dst;
+ AVFilterLink *inlink;
+ int plane;
+ int w, h;
+ FilterParams *param;
+} ThreadData;
+
+typedef struct {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ int hsub, vsub; ///< chroma subsampling values
+ int nb_planes;
+ char *all_expr;
+ enum BlendMode all_mode;
+ double all_opacity;
+
+ FilterParams params[4];
+} BlendContext;
+
+#define OFFSET(x) offsetof(BlendContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption blend_options[] = {
+ { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
+ { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
+ { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
+ { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
+ { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},
+ { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },
+ { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },
+ { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },
+ { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },
+ { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },
+ { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },
+ { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },
+ { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },
+ { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },
+ { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },
+ { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },
+ { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },
+ { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },
+ { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },
+ { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },
+ { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },
+ { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },
+ { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },
+ { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },
+ { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },
+ { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },
+ { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },
+ { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },
+ { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },
+ { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
+ { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(blend);
+
+static void blend_normal(const uint8_t *top, int top_linesize,
+ const uint8_t *bottom, int bottom_linesize,
+ uint8_t *dst, int dst_linesize,
+ int width, int start, int end,
+ FilterParams *param, double *values)
+{
+ av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, end - start);
+}
+
+#define DEFINE_BLEND(name, expr) \
+static void blend_## name(const uint8_t *top, int top_linesize, \
+ const uint8_t *bottom, int bottom_linesize, \
+ uint8_t *dst, int dst_linesize, \
+ int width, int start, int end, \
+ FilterParams *param, double *values) \
+{ \
+ double opacity = param->opacity; \
+ int i, j; \
+ \
+ for (i = start; i < end; i++) { \
+ for (j = 0; j < width; j++) { \
+ dst[j] = top[j] + ((expr) - top[j]) * opacity; \
+ } \
+ dst += dst_linesize; \
+ top += top_linesize; \
+ bottom += bottom_linesize; \
+ } \
+}
+
+#define A top[j]
+#define B bottom[j]
+
+#define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
+#define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
+#define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
+#define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
+
+DEFINE_BLEND(addition, FFMIN(255, A + B))
+DEFINE_BLEND(average, (A + B) / 2)
+DEFINE_BLEND(subtract, FFMAX(0, A - B))
+DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
+DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
+DEFINE_BLEND(difference, FFABS(A - B))
+DEFINE_BLEND(screen, SCREEN(1, A, B))
+DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
+DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
+DEFINE_BLEND(darken, FFMIN(A, B))
+DEFINE_BLEND(lighten, FFMAX(A, B))
+DEFINE_BLEND(divide, ((float)A / ((float)B) * 255))
+DEFINE_BLEND(dodge, DODGE(A, B))
+DEFINE_BLEND(burn, BURN(A, B))
+DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
+DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
+DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
+DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
+DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
+DEFINE_BLEND(and, A & B)
+DEFINE_BLEND(or, A | B)
+DEFINE_BLEND(xor, A ^ B)
+DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
+
+static void blend_expr(const uint8_t *top, int top_linesize,
+ const uint8_t *bottom, int bottom_linesize,
+ uint8_t *dst, int dst_linesize,
+ int width, int start, int end,
+ FilterParams *param, double *values)
+{
+ AVExpr *e = param->e;
+ int y, x;
+
+ for (y = start; y < end; y++) {
+ values[VAR_Y] = y;
+ for (x = 0; x < width; x++) {
+ values[VAR_X] = x;
+ values[VAR_TOP] = values[VAR_A] = top[x];
+ values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
+ dst[x] = av_expr_eval(e, values, NULL);
+ }
+ dst += dst_linesize;
+ top += top_linesize;
+ bottom += bottom_linesize;
+ }
+}
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ ThreadData *td = arg;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
+ const uint8_t *top = td->top->data[td->plane];
+ const uint8_t *bottom = td->bottom->data[td->plane];
+ uint8_t *dst = td->dst->data[td->plane];
+ double values[VAR_VARS_NB];
+
+ values[VAR_N] = td->inlink->frame_count;
+ values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
+ values[VAR_W] = td->w;
+ values[VAR_H] = td->h;
+ values[VAR_SW] = td->w / (double)td->dst->width;
+ values[VAR_SH] = td->h / (double)td->dst->height;
+
+ td->param->blend(top + slice_start * td->top->linesize[td->plane],
+ td->top->linesize[td->plane],
+ bottom + slice_start * td->bottom->linesize[td->plane],
+ td->bottom->linesize[td->plane],
+ dst + slice_start * td->dst->linesize[td->plane],
+ td->dst->linesize[td->plane],
+ td->w, slice_start, slice_end, td->param, &values[0]);
+ return 0;
+}
+
+static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
+ const AVFrame *bottom_buf)
+{
+ BlendContext *b = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *dst_buf;
+ int plane;
+
+ dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!dst_buf)
+ return top_buf;
+ av_frame_copy_props(dst_buf, top_buf);
+
+ for (plane = 0; plane < b->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
+ int outw = FF_CEIL_RSHIFT(dst_buf->width, hsub);
+ int outh = FF_CEIL_RSHIFT(dst_buf->height, vsub);
+ FilterParams *param = &b->params[plane];
+ ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
+ .w = outw, .h = outh, .param = param, .plane = plane,
+ .inlink = inlink };
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
+ }
+
+ av_frame_free(&top_buf);
+
+ return dst_buf;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ BlendContext *b = ctx->priv;
+ int ret, plane;
+
+ for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
+ FilterParams *param = &b->params[plane];
+
+ if (b->all_mode >= 0)
+ param->mode = b->all_mode;
+ if (b->all_opacity < 1)
+ param->opacity = b->all_opacity;
+
+ switch (param->mode) {
+ case BLEND_ADDITION: param->blend = blend_addition; break;
+ case BLEND_AND: param->blend = blend_and; break;
+ case BLEND_AVERAGE: param->blend = blend_average; break;
+ case BLEND_BURN: param->blend = blend_burn; break;
+ case BLEND_DARKEN: param->blend = blend_darken; break;
+ case BLEND_DIFFERENCE: param->blend = blend_difference; break;
+ case BLEND_DIVIDE: param->blend = blend_divide; break;
+ case BLEND_DODGE: param->blend = blend_dodge; break;
+ case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
+ case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
+ case BLEND_LIGHTEN: param->blend = blend_lighten; break;
+ case BLEND_MULTIPLY: param->blend = blend_multiply; break;
+ case BLEND_NEGATION: param->blend = blend_negation; break;
+ case BLEND_NORMAL: param->blend = blend_normal; break;
+ case BLEND_OR: param->blend = blend_or; break;
+ case BLEND_OVERLAY: param->blend = blend_overlay; break;
+ case BLEND_PHOENIX: param->blend = blend_phoenix; break;
+ case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
+ case BLEND_REFLECT: param->blend = blend_reflect; break;
+ case BLEND_SCREEN: param->blend = blend_screen; break;
+ case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
+ case BLEND_SUBTRACT: param->blend = blend_subtract; break;
+ case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
+ case BLEND_XOR: param->blend = blend_xor; break;
+ }
+
+ if (b->all_expr && !param->expr_str) {
+ param->expr_str = av_strdup(b->all_expr);
+ if (!param->expr_str)
+ return AVERROR(ENOMEM);
+ }
+ if (param->expr_str) {
+ ret = av_expr_parse(&param->e, param->expr_str, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+ param->blend = blend_expr;
+ }
+ }
+
+ b->dinput.process = blend_frame;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *toplink = ctx->inputs[TOP];
+ AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
+ BlendContext *b = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
+ int ret;
+
+ if (toplink->format != bottomlink->format) {
+ av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
+ return AVERROR(EINVAL);
+ }
+ if (toplink->w != bottomlink->w ||
+ toplink->h != bottomlink->h ||
+ toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
+ toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
+ "(size %dx%d, SAR %d:%d) do not match the corresponding "
+ "second input link %s parameters (%dx%d, SAR %d:%d)\n",
+ ctx->input_pads[TOP].name, toplink->w, toplink->h,
+ toplink->sample_aspect_ratio.num,
+ toplink->sample_aspect_ratio.den,
+ ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
+ bottomlink->sample_aspect_ratio.num,
+ bottomlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = toplink->w;
+ outlink->h = toplink->h;
+ outlink->time_base = toplink->time_base;
+ outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
+ outlink->frame_rate = toplink->frame_rate;
+
+ b->hsub = pix_desc->log2_chroma_w;
+ b->vsub = pix_desc->log2_chroma_h;
+ b->nb_planes = av_pix_fmt_count_planes(toplink->format);
+
+ if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ BlendContext *b = ctx->priv;
+ int i;
+
+ ff_dualinput_uninit(&b->dinput);
+ for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
+ av_expr_free(b->params[i].e);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ BlendContext *b = outlink->src->priv;
+ return ff_dualinput_request_frame(&b->dinput, outlink);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
+{
+ BlendContext *b = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
+}
+
+static const AVFilterPad blend_inputs[] = {
+ {
+ .name = "top",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "bottom",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad blend_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_blend = {
+ .name = "blend",
+ .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(BlendContext),
+ .query_formats = query_formats,
+ .inputs = blend_inputs,
+ .outputs = blend_outputs,
+ .priv_class = &blend_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_boxblur.c b/libavfilter/vf_boxblur.c
index 4cbfe2c..1fa5135 100644
--- a/libavfilter/vf_boxblur.c
+++ b/libavfilter/vf_boxblur.c
@@ -2,20 +2,20 @@
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -58,6 +58,7 @@ enum var_name {
typedef struct FilterParam {
int radius;
int power;
+ char *radius_expr;
} FilterParam;
typedef struct BoxBlurContext {
@@ -65,9 +66,6 @@ typedef struct BoxBlurContext {
FilterParam luma_param;
FilterParam chroma_param;
FilterParam alpha_param;
- char *luma_radius_expr;
- char *chroma_radius_expr;
- char *alpha_radius_expr;
int hsub, vsub;
int radius[4];
@@ -84,23 +82,27 @@ static av_cold int init(AVFilterContext *ctx)
{
BoxBlurContext *s = ctx->priv;
- if (!s->luma_radius_expr) {
+ if (!s->luma_param.radius_expr) {
av_log(ctx, AV_LOG_ERROR, "Luma radius expression is not set.\n");
return AVERROR(EINVAL);
}
- if (!s->chroma_radius_expr) {
- s->chroma_radius_expr = av_strdup(s->luma_radius_expr);
- if (!s->chroma_radius_expr)
+ /* fill missing params */
+ if (!s->chroma_param.radius_expr) {
+ s->chroma_param.radius_expr = av_strdup(s->luma_param.radius_expr);
+ if (!s->chroma_param.radius_expr)
return AVERROR(ENOMEM);
- s->chroma_param.power = s->luma_param.power;
}
- if (!s->alpha_radius_expr) {
- s->alpha_radius_expr = av_strdup(s->luma_radius_expr);
- if (!s->alpha_radius_expr)
+ if (s->chroma_param.power < 0)
+ s->chroma_param.power = s->luma_param.power;
+
+ if (!s->alpha_param.radius_expr) {
+ s->alpha_param.radius_expr = av_strdup(s->luma_param.radius_expr);
+ if (!s->alpha_param.radius_expr)
return AVERROR(ENOMEM);
- s->alpha_param.power = s->luma_param.power;
}
+ if (s->alpha_param.power < 0)
+ s->alpha_param.power = s->luma_param.power;
return 0;
}
@@ -115,7 +117,7 @@ static av_cold void uninit(AVFilterContext *ctx)
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
@@ -139,14 +141,9 @@ static int config_input(AVFilterLink *inlink)
char *expr;
int ret;
- av_freep(&s->temp[0]);
- av_freep(&s->temp[1]);
- if (!(s->temp[0] = av_malloc(FFMAX(w, h))))
- return AVERROR(ENOMEM);
- if (!(s->temp[1] = av_malloc(FFMAX(w, h)))) {
- av_freep(&s->temp[0]);
+ if (!(s->temp[0] = av_malloc(FFMAX(w, h))) ||
+ !(s->temp[1] = av_malloc(FFMAX(w, h))))
return AVERROR(ENOMEM);
- }
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
@@ -159,7 +156,7 @@ static int config_input(AVFilterLink *inlink)
var_values[VAR_VSUB] = 1<<s->vsub;
#define EVAL_RADIUS_EXPR(comp) \
- expr = s->comp##_radius_expr; \
+ expr = s->comp##_param.radius_expr; \
ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \
NULL, NULL, NULL, NULL, NULL, 0, ctx); \
s->comp##_param.radius = res; \
@@ -172,7 +169,7 @@ static int config_input(AVFilterLink *inlink)
EVAL_RADIUS_EXPR(chroma);
EVAL_RADIUS_EXPR(alpha);
- av_log(ctx, AV_LOG_DEBUG,
+ av_log(ctx, AV_LOG_VERBOSE,
"luma_radius:%d luma_power:%d "
"chroma_radius:%d chroma_power:%d "
"alpha_radius:%d alpha_power:%d "
@@ -305,7 +302,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int plane;
- int cw = inlink->w >> s->hsub, ch = in->height >> s->vsub;
+ int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub), ch = FF_CEIL_RSHIFT(in->height, s->vsub);
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->height, ch, ch, in->height };
@@ -316,13 +313,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- for (plane = 0; in->data[plane] && plane < 4; plane++)
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
hblur(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
s->temp);
- for (plane = 0; in->data[plane] && plane < 4; plane++)
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
vblur(out->data[plane], out->linesize[plane],
out->data[plane], out->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
@@ -334,27 +331,29 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(BoxBlurContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "luma_radius", "Radius of the luma blurring box", OFFSET(luma_radius_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "luma_power", "How many times should the boxblur be applied to luma",
- OFFSET(luma_param.power), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_radius_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "chroma_power", "How many times should the boxblur be applied to chroma",
- OFFSET(chroma_param.power), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_radius_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "alpha_power", "How many times should the boxblur be applied to alpha",
- OFFSET(alpha_param.power), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption boxblur_options[] = {
+ { "luma_radius", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
+ { "lr", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
+ { "luma_power", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
+ { "lp", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
+
+ { "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "cr", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "chroma_power", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "cp", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+
+ { "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "ar", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "alpha_power", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "ap", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
-static const AVClass boxblur_class = {
- .class_name = "boxblur",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(boxblur);
+
static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
{
.name = "default",
@@ -381,7 +380,7 @@ AVFilter ff_vf_boxblur = {
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_boxblur_inputs,
- .outputs = avfilter_vf_boxblur_outputs,
+ .inputs = avfilter_vf_boxblur_inputs,
+ .outputs = avfilter_vf_boxblur_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_codecview.c b/libavfilter/vf_codecview.c
new file mode 100644
index 0000000..d777f97
--- /dev/null
+++ b/libavfilter/vf_codecview.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2014 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Codec debug viewer filter.
+ *
+ * All the MV drawing code from Michael Niedermayer is extracted from
+ * libavcodec/mpegvideo.c.
+ *
+ * TODO: segmentation
+ * TODO: quantization
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/motion_vector.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define MV_P_FOR (1<<0)
+#define MV_B_FOR (1<<1)
+#define MV_B_BACK (1<<2)
+
+typedef struct {
+ const AVClass *class;
+ unsigned mv;
+} CodecViewContext;
+
+#define OFFSET(x) offsetof(CodecViewContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption codecview_options[] = {
+ { "mv", "set motion vectors to visualize", OFFSET(mv), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "mv" },
+ {"pf", "forward predicted MVs of P-frames", 0, AV_OPT_TYPE_CONST, {.i64 = MV_P_FOR }, INT_MIN, INT_MAX, FLAGS, "mv"},
+ {"bf", "forward predicted MVs of B-frames", 0, AV_OPT_TYPE_CONST, {.i64 = MV_B_FOR }, INT_MIN, INT_MAX, FLAGS, "mv"},
+ {"bb", "backward predicted MVs of B-frames", 0, AV_OPT_TYPE_CONST, {.i64 = MV_B_BACK }, INT_MIN, INT_MAX, FLAGS, "mv"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(codecview);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // TODO: we can probably add way more pixel formats without any other
+ // changes; anything with 8-bit luma in first plane should be working
+ static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
+{
+ if(*sx > *ex)
+ return clip_line(ex, ey, sx, sy, maxx);
+
+ if (*sx < 0) {
+ if (*ex < 0)
+ return 1;
+ *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
+ *sx = 0;
+ }
+
+ if (*ex > maxx) {
+ if (*sx > maxx)
+ return 1;
+ *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
+ *ex = maxx;
+ }
+ return 0;
+}
+
+/**
+ * Draw a line from (ex, ey) -> (sx, sy).
+ * @param w width of the image
+ * @param h height of the image
+ * @param stride stride/linesize of the image
+ * @param color color of the arrow
+ */
+static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
+ int w, int h, int stride, int color)
+{
+ int x, y, fr, f;
+
+ if (clip_line(&sx, &sy, &ex, &ey, w - 1))
+ return;
+ if (clip_line(&sy, &sx, &ey, &ex, h - 1))
+ return;
+
+ sx = av_clip(sx, 0, w - 1);
+ sy = av_clip(sy, 0, h - 1);
+ ex = av_clip(ex, 0, w - 1);
+ ey = av_clip(ey, 0, h - 1);
+
+ buf[sy * stride + sx] += color;
+
+ if (FFABS(ex - sx) > FFABS(ey - sy)) {
+ if (sx > ex) {
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
+ }
+ buf += sx + sy * stride;
+ ex -= sx;
+ f = ((ey - sy) << 16) / ex;
+ for (x = 0; x <= ex; x++) {
+ y = (x * f) >> 16;
+ fr = (x * f) & 0xFFFF;
+ buf[ y * stride + x] += (color * (0x10000 - fr)) >> 16;
+ if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
+ }
+ } else {
+ if (sy > ey) {
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
+ }
+ buf += sx + sy * stride;
+ ey -= sy;
+ if (ey)
+ f = ((ex - sx) << 16) / ey;
+ else
+ f = 0;
+ for(y= 0; y <= ey; y++){
+ x = (y*f) >> 16;
+ fr = (y*f) & 0xFFFF;
+ buf[y * stride + x ] += (color * (0x10000 - fr)) >> 16;
+ if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
+ }
+ }
+}
+
+/**
+ * Draw an arrow from (ex, ey) -> (sx, sy).
+ * @param w width of the image
+ * @param h height of the image
+ * @param stride stride/linesize of the image
+ * @param color color of the arrow
+ */
+static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
+ int ey, int w, int h, int stride, int color, int tail, int direction)
+{
+ int dx,dy;
+
+ if (direction) {
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
+ }
+
+ sx = av_clip(sx, -100, w + 100);
+ sy = av_clip(sy, -100, h + 100);
+ ex = av_clip(ex, -100, w + 100);
+ ey = av_clip(ey, -100, h + 100);
+
+ dx = ex - sx;
+ dy = ey - sy;
+
+ if (dx * dx + dy * dy > 3 * 3) {
+ int rx = dx + dy;
+ int ry = -dx + dy;
+ int length = sqrt((rx * rx + ry * ry) << 8);
+
+ // FIXME subpixel accuracy
+ rx = ROUNDED_DIV(rx * 3 << 4, length);
+ ry = ROUNDED_DIV(ry * 3 << 4, length);
+
+ if (tail) {
+ rx = -rx;
+ ry = -ry;
+ }
+
+ draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
+ draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
+ }
+ draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CodecViewContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
+ if (sd) {
+ int i;
+ const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
+ for (i = 0; i < sd->size / sizeof(*mvs); i++) {
+ const AVMotionVector *mv = &mvs[i];
+ const int direction = mv->source > 0;
+ if ((direction == 0 && (s->mv & MV_P_FOR) && frame->pict_type == AV_PICTURE_TYPE_P) ||
+ (direction == 0 && (s->mv & MV_B_FOR) && frame->pict_type == AV_PICTURE_TYPE_B) ||
+ (direction == 1 && (s->mv & MV_B_BACK) && frame->pict_type == AV_PICTURE_TYPE_B))
+ draw_arrow(frame->data[0], mv->dst_x, mv->dst_y, mv->src_x, mv->src_y,
+ frame->width, frame->height, frame->linesize[0],
+ 100, 0, mv->source > 0);
+ }
+ }
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad codecview_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad codecview_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_codecview = {
+ .name = "codecview",
+ .description = NULL_IF_CONFIG_SMALL("Visualize information about some codecs"),
+ .priv_size = sizeof(CodecViewContext),
+ .query_formats = query_formats,
+ .inputs = codecview_inputs,
+ .outputs = codecview_outputs,
+ .priv_class = &codecview_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colorbalance.c b/libavfilter/vf_colorbalance.c
new file mode 100644
index 0000000..c151c33
--- /dev/null
+++ b/libavfilter/vf_colorbalance.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+typedef struct {
+ double shadows;
+ double midtones;
+ double highlights;
+} Range;
+
+typedef struct {
+ const AVClass *class;
+ Range cyan_red;
+ Range magenta_green;
+ Range yellow_blue;
+
+ uint8_t lut[3][256];
+
+ uint8_t rgba_map[4];
+ int step;
+} ColorBalanceContext;
+
+#define OFFSET(x) offsetof(ColorBalanceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption colorbalance_options[] = {
+ { "rs", "set red shadows", OFFSET(cyan_red.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gs", "set green shadows", OFFSET(magenta_green.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bs", "set blue shadows", OFFSET(yellow_blue.shadows), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "rm", "set red midtones", OFFSET(cyan_red.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gm", "set green midtones", OFFSET(magenta_green.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bm", "set blue midtones", OFFSET(yellow_blue.midtones), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "rh", "set red highlights", OFFSET(cyan_red.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "gh", "set green highlights", OFFSET(magenta_green.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { "bh", "set blue highlights", OFFSET(yellow_blue.highlights), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorbalance);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_0BGR, AV_PIX_FMT_0RGB,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ColorBalanceContext *cb = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ double *shadows, *midtones, *highlights, *buffer;
+ int i, r, g, b;
+
+ buffer = av_malloc(256 * 3 * sizeof(*buffer));
+ if (!buffer)
+ return AVERROR(ENOMEM);
+
+ shadows = buffer + 256 * 0;
+ midtones = buffer + 256 * 1;
+ highlights = buffer + 256 * 2;
+
+ for (i = 0; i < 256; i++) {
+ double low = av_clipd((i - 85.0) / -64.0 + 0.5, 0, 1) * 178.5;
+ double mid = av_clipd((i - 85.0) / 64.0 + 0.5, 0, 1) *
+ av_clipd((i + 85.0 - 255.0) / -64.0 + 0.5, 0, 1) * 178.5;
+
+ shadows[i] = low;
+ midtones[i] = mid;
+ highlights[255 - i] = low;
+ }
+
+ for (i = 0; i < 256; i++) {
+ r = g = b = i;
+
+ r = av_clip_uint8(r + cb->cyan_red.shadows * shadows[r]);
+ r = av_clip_uint8(r + cb->cyan_red.midtones * midtones[r]);
+ r = av_clip_uint8(r + cb->cyan_red.highlights * highlights[r]);
+
+ g = av_clip_uint8(g + cb->magenta_green.shadows * shadows[g]);
+ g = av_clip_uint8(g + cb->magenta_green.midtones * midtones[g]);
+ g = av_clip_uint8(g + cb->magenta_green.highlights * highlights[g]);
+
+ b = av_clip_uint8(b + cb->yellow_blue.shadows * shadows[b]);
+ b = av_clip_uint8(b + cb->yellow_blue.midtones * midtones[b]);
+ b = av_clip_uint8(b + cb->yellow_blue.highlights * highlights[b]);
+
+ cb->lut[R][i] = r;
+ cb->lut[G][i] = g;
+ cb->lut[B][i] = b;
+ }
+
+ av_free(buffer);
+
+ ff_fill_rgba_map(cb->rgba_map, outlink->format);
+ cb->step = av_get_padded_bits_per_pixel(desc) >> 3;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorBalanceContext *cb = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const uint8_t roffset = cb->rgba_map[R];
+ const uint8_t goffset = cb->rgba_map[G];
+ const uint8_t boffset = cb->rgba_map[B];
+ const uint8_t aoffset = cb->rgba_map[A];
+ const int step = cb->step;
+ const uint8_t *srcrow = in->data[0];
+ uint8_t *dstrow;
+ AVFrame *out;
+ int i, j;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ dstrow = out->data[0];
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * step; j += step) {
+ dst[j + roffset] = cb->lut[R][src[j + roffset]];
+ dst[j + goffset] = cb->lut[G][src[j + goffset]];
+ dst[j + boffset] = cb->lut[B][src[j + boffset]];
+ if (in != out && step == 4)
+ dst[j + aoffset] = src[j + aoffset];
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static const AVFilterPad colorbalance_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colorbalance_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colorbalance = {
+ .name = "colorbalance",
+ .description = NULL_IF_CONFIG_SMALL("Adjust the color balance."),
+ .priv_size = sizeof(ColorBalanceContext),
+ .priv_class = &colorbalance_class,
+ .query_formats = query_formats,
+ .inputs = colorbalance_inputs,
+ .outputs = colorbalance_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colorchannelmixer.c b/libavfilter/vf_colorchannelmixer.c
new file mode 100644
index 0000000..c7e63b5
--- /dev/null
+++ b/libavfilter/vf_colorchannelmixer.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+typedef struct {
+ const AVClass *class;
+ double rr, rg, rb, ra;
+ double gr, gg, gb, ga;
+ double br, bg, bb, ba;
+ double ar, ag, ab, aa;
+
+ int *lut[4][4];
+
+ int *buffer;
+
+ uint8_t rgba_map[4];
+} ColorChannelMixerContext;
+
+#define OFFSET(x) offsetof(ColorChannelMixerContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption colorchannelmixer_options[] = {
+ { "rr", "set the red gain for the red channel", OFFSET(rr), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { "rg", "set the green gain for the red channel", OFFSET(rg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "rb", "set the blue gain for the red channel", OFFSET(rb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ra", "set the alpha gain for the red channel", OFFSET(ra), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "gr", "set the red gain for the green channel", OFFSET(gr), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "gg", "set the green gain for the green channel", OFFSET(gg), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { "gb", "set the blue gain for the green channel", OFFSET(gb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ga", "set the alpha gain for the green channel", OFFSET(ga), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "br", "set the red gain for the blue channel", OFFSET(br), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "bg", "set the green gain for the blue channel", OFFSET(bg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "bb", "set the blue gain for the blue channel", OFFSET(bb), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { "ba", "set the alpha gain for the blue channel", OFFSET(ba), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ar", "set the red gain for the alpha channel", OFFSET(ar), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ag", "set the green gain for the alpha channel", OFFSET(ag), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "ab", "set the blue gain for the alpha channel", OFFSET(ab), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
+ { "aa", "set the alpha gain for the alpha channel", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colorchannelmixer);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ColorChannelMixerContext *cm = ctx->priv;
+ int i, j, size, *buffer;
+
+ ff_fill_rgba_map(cm->rgba_map, outlink->format);
+
+ switch (outlink->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ size = 65536;
+ break;
+ default:
+ size = 256;
+ }
+
+ cm->buffer = buffer = av_malloc(16 * size * sizeof(*cm->buffer));
+ if (!cm->buffer)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++, buffer += size)
+ cm->lut[i][j] = buffer;
+
+ for (i = 0; i < size; i++) {
+ cm->lut[R][R][i] = round(i * cm->rr);
+ cm->lut[R][G][i] = round(i * cm->rg);
+ cm->lut[R][B][i] = round(i * cm->rb);
+ cm->lut[R][A][i] = round(i * cm->ra);
+
+ cm->lut[G][R][i] = round(i * cm->gr);
+ cm->lut[G][G][i] = round(i * cm->gg);
+ cm->lut[G][B][i] = round(i * cm->gb);
+ cm->lut[G][A][i] = round(i * cm->ga);
+
+ cm->lut[B][R][i] = round(i * cm->br);
+ cm->lut[B][G][i] = round(i * cm->bg);
+ cm->lut[B][B][i] = round(i * cm->bb);
+ cm->lut[B][A][i] = round(i * cm->ba);
+
+ cm->lut[A][R][i] = round(i * cm->ar);
+ cm->lut[A][G][i] = round(i * cm->ag);
+ cm->lut[A][B][i] = round(i * cm->ab);
+ cm->lut[A][A][i] = round(i * cm->aa);
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorChannelMixerContext *cm = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const uint8_t roffset = cm->rgba_map[R];
+ const uint8_t goffset = cm->rgba_map[G];
+ const uint8_t boffset = cm->rgba_map[B];
+ const uint8_t aoffset = cm->rgba_map[A];
+ const uint8_t *srcrow = in->data[0];
+ uint8_t *dstrow;
+ AVFrame *out;
+ int i, j;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ dstrow = out->data[0];
+ switch (outlink->format) {
+ case AV_PIX_FMT_BGR24:
+ case AV_PIX_FMT_RGB24:
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * 3; j += 3) {
+ const uint8_t rin = src[j + roffset];
+ const uint8_t gin = src[j + goffset];
+ const uint8_t bin = src[j + boffset];
+
+ dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
+ cm->lut[R][G][gin] +
+ cm->lut[R][B][bin]);
+ dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
+ cm->lut[G][G][gin] +
+ cm->lut[G][B][bin]);
+ dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
+ cm->lut[B][G][gin] +
+ cm->lut[B][B][bin]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_0BGR:
+ case AV_PIX_FMT_0RGB:
+ case AV_PIX_FMT_BGR0:
+ case AV_PIX_FMT_RGB0:
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * 4; j += 4) {
+ const uint8_t rin = src[j + roffset];
+ const uint8_t gin = src[j + goffset];
+ const uint8_t bin = src[j + boffset];
+
+ dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
+ cm->lut[R][G][gin] +
+ cm->lut[R][B][bin]);
+ dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
+ cm->lut[G][G][gin] +
+ cm->lut[G][B][bin]);
+ dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
+ cm->lut[B][G][gin] +
+ cm->lut[B][B][bin]);
+ if (in != out)
+ dst[j + aoffset] = 0;
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_ABGR:
+ case AV_PIX_FMT_ARGB:
+ case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_RGBA:
+ for (i = 0; i < outlink->h; i++) {
+ const uint8_t *src = srcrow;
+ uint8_t *dst = dstrow;
+
+ for (j = 0; j < outlink->w * 4; j += 4) {
+ const uint8_t rin = src[j + roffset];
+ const uint8_t gin = src[j + goffset];
+ const uint8_t bin = src[j + boffset];
+ const uint8_t ain = src[j + aoffset];
+
+ dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
+ cm->lut[R][G][gin] +
+ cm->lut[R][B][bin] +
+ cm->lut[R][A][ain]);
+ dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
+ cm->lut[G][G][gin] +
+ cm->lut[G][B][bin] +
+ cm->lut[G][A][ain]);
+ dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
+ cm->lut[B][G][gin] +
+ cm->lut[B][B][bin] +
+ cm->lut[B][A][ain]);
+ dst[j + aoffset] = av_clip_uint8(cm->lut[A][R][rin] +
+ cm->lut[A][G][gin] +
+ cm->lut[A][B][bin] +
+ cm->lut[A][A][ain]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGB48:
+ for (i = 0; i < outlink->h; i++) {
+ const uint16_t *src = (const uint16_t *)srcrow;
+ uint16_t *dst = (uint16_t *)dstrow;
+
+ for (j = 0; j < outlink->w * 3; j += 3) {
+ const uint16_t rin = src[j + roffset];
+ const uint16_t gin = src[j + goffset];
+ const uint16_t bin = src[j + boffset];
+
+ dst[j + roffset] = av_clip_uint16(cm->lut[R][R][rin] +
+ cm->lut[R][G][gin] +
+ cm->lut[R][B][bin]);
+ dst[j + goffset] = av_clip_uint16(cm->lut[G][R][rin] +
+ cm->lut[G][G][gin] +
+ cm->lut[G][B][bin]);
+ dst[j + boffset] = av_clip_uint16(cm->lut[B][R][rin] +
+ cm->lut[B][G][gin] +
+ cm->lut[B][B][bin]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ break;
+ case AV_PIX_FMT_BGRA64:
+ case AV_PIX_FMT_RGBA64:
+ for (i = 0; i < outlink->h; i++) {
+ const uint16_t *src = (const uint16_t *)srcrow;
+ uint16_t *dst = (uint16_t *)dstrow;
+
+ for (j = 0; j < outlink->w * 4; j += 4) {
+ const uint16_t rin = src[j + roffset];
+ const uint16_t gin = src[j + goffset];
+ const uint16_t bin = src[j + boffset];
+ const uint16_t ain = src[j + aoffset];
+
+ dst[j + roffset] = av_clip_uint16(cm->lut[R][R][rin] +
+ cm->lut[R][G][gin] +
+ cm->lut[R][B][bin] +
+ cm->lut[R][A][ain]);
+ dst[j + goffset] = av_clip_uint16(cm->lut[G][R][rin] +
+ cm->lut[G][G][gin] +
+ cm->lut[G][B][bin] +
+ cm->lut[G][A][ain]);
+ dst[j + boffset] = av_clip_uint16(cm->lut[B][R][rin] +
+ cm->lut[B][G][gin] +
+ cm->lut[B][B][bin] +
+ cm->lut[B][A][ain]);
+ dst[j + aoffset] = av_clip_uint16(cm->lut[A][R][rin] +
+ cm->lut[A][G][gin] +
+ cm->lut[A][B][bin] +
+ cm->lut[A][A][ain]);
+ }
+
+ srcrow += in->linesize[0];
+ dstrow += out->linesize[0];
+ }
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(ctx->outputs[0], out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ColorChannelMixerContext *cm = ctx->priv;
+
+ av_freep(&cm->buffer);
+}
+
+static const AVFilterPad colorchannelmixer_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colorchannelmixer_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colorchannelmixer = {
+ .name = "colorchannelmixer",
+ .description = NULL_IF_CONFIG_SMALL("Adjust colors by mixing color channels."),
+ .priv_size = sizeof(ColorChannelMixerContext),
+ .priv_class = &colorchannelmixer_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = colorchannelmixer_inputs,
+ .outputs = colorchannelmixer_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_colormatrix.c b/libavfilter/vf_colormatrix.c
new file mode 100644
index 0000000..df78391
--- /dev/null
+++ b/libavfilter/vf_colormatrix.c
@@ -0,0 +1,411 @@
+/*
+ * ColorMatrix v2.2 for Avisynth 2.5.x
+ *
+ * Copyright (C) 2006-2007 Kevin Stone
+ *
+ * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
+ * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
+ * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
+ * adds an option to use scaled or non-scaled coefficients, and more...
+ */
+
+#include <float.h>
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+
+#define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
+#define CB(n) av_clip_uint8(n)
+
+static const double yuv_coeff[4][3][3] = {
+ { { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
+ { -0.3850, +0.5000, -0.1150 },
+ { -0.4540, -0.0460, +0.5000 } },
+ { { +0.5900, +0.1100, +0.3000 }, // FCC (1)
+ { -0.3310, +0.5000, -0.1690 },
+ { -0.4210, -0.0790, +0.5000 } },
+ { { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
+ { -0.3313, +0.5000, -0.1687 },
+ { -0.4187, -0.0813, +0.5000 } },
+ { { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
+ { -0.3840, +0.5000, -0.1160 },
+ { -0.4450, -0.0550, +0.5000 } },
+};
+
+enum ColorMode {
+ COLOR_MODE_NONE = -1,
+ COLOR_MODE_BT709,
+ COLOR_MODE_FCC,
+ COLOR_MODE_BT601,
+ COLOR_MODE_SMPTE240M,
+ COLOR_MODE_COUNT
+};
+
+typedef struct {
+ const AVClass *class;
+ int yuv_convert[16][3][3];
+ int interlaced;
+ enum ColorMode source, dest;
+ int mode;
+ int hsub, vsub;
+} ColorMatrixContext;
+
+#define OFFSET(x) offsetof(ColorMatrixContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption colormatrix_options[] = {
+ { "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
+ { "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
+ { "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
+ { "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
+ { "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
+ { "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(colormatrix);
+
+#define ma m[0][0]
+#define mb m[0][1]
+#define mc m[0][2]
+#define md m[1][0]
+#define me m[1][1]
+#define mf m[1][2]
+#define mg m[2][0]
+#define mh m[2][1]
+#define mi m[2][2]
+
+#define ima im[0][0]
+#define imb im[0][1]
+#define imc im[0][2]
+#define imd im[1][0]
+#define ime im[1][1]
+#define imf im[1][2]
+#define img im[2][0]
+#define imh im[2][1]
+#define imi im[2][2]
+
+static void inverse3x3(double im[3][3], const double m[3][3])
+{
+ double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
+ det = 1.0 / det;
+ ima = det * (me * mi - mf * mh);
+ imb = det * (mc * mh - mb * mi);
+ imc = det * (mb * mf - mc * me);
+ imd = det * (mf * mg - md * mi);
+ ime = det * (ma * mi - mc * mg);
+ imf = det * (mc * md - ma * mf);
+ img = det * (md * mh - me * mg);
+ imh = det * (mb * mg - ma * mh);
+ imi = det * (ma * me - mb * md);
+}
+
+static void solve_coefficients(double cm[3][3], double rgb[3][3], const double yuv[3][3])
+{
+ int i, j;
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 3; j++)
+ cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
+}
+
+static void calc_coefficients(AVFilterContext *ctx)
+{
+ ColorMatrixContext *color = ctx->priv;
+ double rgb_coeffd[4][3][3];
+ double yuv_convertd[16][3][3];
+ int v = 0;
+ int i, j, k;
+
+ for (i = 0; i < 4; i++)
+ inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
+ for (k = 0; k < 3; k++) {
+ color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
+ color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
+ color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
+ }
+ if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
+ color->yuv_convert[v][2][0] != 0) {
+ av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
+ }
+ v++;
+ }
+ }
+}
+
+static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m"};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ColorMatrixContext *color = ctx->priv;
+
+ if (color->dest == COLOR_MODE_NONE) {
+ av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (color->source == color->dest) {
+ av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static void process_frame_uyvy422(ColorMatrixContext *color,
+ AVFrame *dst, AVFrame *src)
+{
+ const unsigned char *srcp = src->data[0];
+ const int src_pitch = src->linesize[0];
+ const int height = src->height;
+ const int width = src->width*2;
+ unsigned char *dstp = dst->data[0];
+ const int dst_pitch = dst->linesize[0];
+ const int c2 = color->yuv_convert[color->mode][0][1];
+ const int c3 = color->yuv_convert[color->mode][0][2];
+ const int c4 = color->yuv_convert[color->mode][1][1];
+ const int c5 = color->yuv_convert[color->mode][1][2];
+ const int c6 = color->yuv_convert[color->mode][2][1];
+ const int c7 = color->yuv_convert[color->mode][2][2];
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 4) {
+ const int u = srcp[x + 0] - 128;
+ const int v = srcp[x + 2] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
+ dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
+ }
+ srcp += src_pitch;
+ dstp += dst_pitch;
+ }
+}
+
+static void process_frame_yuv422p(ColorMatrixContext *color,
+ AVFrame *dst, AVFrame *src)
+{
+ const unsigned char *srcpU = src->data[1];
+ const unsigned char *srcpV = src->data[2];
+ const unsigned char *srcpY = src->data[0];
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const int height = src->height;
+ const int width = src->width;
+ unsigned char *dstpU = dst->data[1];
+ unsigned char *dstpV = dst->data[2];
+ unsigned char *dstpY = dst->data[0];
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ const int c2 = color->yuv_convert[color->mode][0][1];
+ const int c3 = color->yuv_convert[color->mode][0][2];
+ const int c4 = color->yuv_convert[color->mode][1][1];
+ const int c5 = color->yuv_convert[color->mode][1][2];
+ const int c6 = color->yuv_convert[color->mode][2][1];
+ const int c7 = color->yuv_convert[color->mode][2][2];
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 2) {
+ const int u = srcpU[x >> 1] - 128;
+ const int v = srcpV[x >> 1] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
+ dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
+ dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY;
+ dstpY += dst_pitchY;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+}
+
+static void process_frame_yuv420p(ColorMatrixContext *color,
+ AVFrame *dst, AVFrame *src)
+{
+ const unsigned char *srcpU = src->data[1];
+ const unsigned char *srcpV = src->data[2];
+ const unsigned char *srcpY = src->data[0];
+ const unsigned char *srcpN = src->data[0] + src->linesize[0];
+ const int src_pitchY = src->linesize[0];
+ const int src_pitchUV = src->linesize[1];
+ const int height = src->height;
+ const int width = src->width;
+ unsigned char *dstpU = dst->data[1];
+ unsigned char *dstpV = dst->data[2];
+ unsigned char *dstpY = dst->data[0];
+ unsigned char *dstpN = dst->data[0] + dst->linesize[0];
+ const int dst_pitchY = dst->linesize[0];
+ const int dst_pitchUV = dst->linesize[1];
+ const int c2 = color->yuv_convert[color->mode][0][1];
+ const int c3 = color->yuv_convert[color->mode][0][2];
+ const int c4 = color->yuv_convert[color->mode][1][1];
+ const int c5 = color->yuv_convert[color->mode][1][2];
+ const int c6 = color->yuv_convert[color->mode][2][1];
+ const int c7 = color->yuv_convert[color->mode][2][2];
+ int x, y;
+
+ for (y = 0; y < height; y += 2) {
+ for (x = 0; x < width; x += 2) {
+ const int u = srcpU[x >> 1] - 128;
+ const int v = srcpV[x >> 1] - 128;
+ const int uvval = c2 * u + c3 * v + 1081344;
+ dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
+ dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
+ dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
+ dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
+ dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
+ dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
+ }
+ srcpY += src_pitchY << 1;
+ dstpY += dst_pitchY << 1;
+ srcpN += src_pitchY << 1;
+ dstpN += dst_pitchY << 1;
+ srcpU += src_pitchUV;
+ srcpV += src_pitchUV;
+ dstpU += dst_pitchUV;
+ dstpV += dst_pitchUV;
+ }
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ColorMatrixContext *color = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ color->hsub = pix_desc->log2_chroma_w;
+ color->vsub = pix_desc->log2_chroma_h;
+
+ av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
+ color_modes[color->source], color_modes[color->dest]);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_UYVY422,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ AVFilterContext *ctx = link->dst;
+ ColorMatrixContext *color = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (color->source == COLOR_MODE_NONE) {
+ enum AVColorSpace cs = av_frame_get_colorspace(in);
+ enum ColorMode source;
+
+ switch(cs) {
+ case AVCOL_SPC_BT709 : source = COLOR_MODE_BT709 ; break;
+ case AVCOL_SPC_FCC : source = COLOR_MODE_FCC ; break;
+ case AVCOL_SPC_SMPTE240M : source = COLOR_MODE_SMPTE240M ; break;
+ case AVCOL_SPC_BT470BG : source = COLOR_MODE_BT601 ; break;
+ default :
+ av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
+ av_frame_free(&out);
+ return AVERROR(EINVAL);
+ }
+ color->mode = source * 4 + color->dest;
+ } else
+ color->mode = color->source * 4 + color->dest;
+
+ switch(color->dest) {
+ case COLOR_MODE_BT709 : av_frame_set_colorspace(out, AVCOL_SPC_BT709) ; break;
+ case COLOR_MODE_FCC : av_frame_set_colorspace(out, AVCOL_SPC_FCC) ; break;
+ case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M); break;
+ case COLOR_MODE_BT601 : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG) ; break;
+ }
+
+ calc_coefficients(ctx);
+
+ if (in->format == AV_PIX_FMT_YUV422P)
+ process_frame_yuv422p(color, out, in);
+ else if (in->format == AV_PIX_FMT_YUV420P)
+ process_frame_yuv420p(color, out, in);
+ else
+ process_frame_uyvy422(color, out, in);
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad colormatrix_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad colormatrix_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_colormatrix = {
+ .name = "colormatrix",
+ .description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
+ .priv_size = sizeof(ColorMatrixContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = colormatrix_inputs,
+ .outputs = colormatrix_outputs,
+ .priv_class = &colormatrix_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_copy.c b/libavfilter/vf_copy.c
index 5e60f20..fb9a906 100644
--- a/libavfilter/vf_copy.c
+++ b/libavfilter/vf_copy.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -44,10 +44,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVFilterPad avfilter_vf_copy_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -61,9 +60,8 @@ static const AVFilterPad avfilter_vf_copy_outputs[] = {
};
AVFilter ff_vf_copy = {
- .name = "copy",
+ .name = "copy",
.description = NULL_IF_CONFIG_SMALL("Copy the input video unchanged to the output."),
-
- .inputs = avfilter_vf_copy_inputs,
- .outputs = avfilter_vf_copy_outputs,
+ .inputs = avfilter_vf_copy_inputs,
+ .outputs = avfilter_vf_copy_outputs,
};
diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
index 9e820d7..a2f029a 100644
--- a/libavfilter/vf_crop.c
+++ b/libavfilter/vf_crop.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,13 +38,15 @@
#include "libavutil/opt.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"in_w", "iw", ///< width of the input video
"in_h", "ih", ///< height of the input video
"out_w", "ow", ///< width of the cropped video
"out_h", "oh", ///< height of the cropped video
+ "a",
+ "sar",
+ "dar",
+ "hsub",
+ "vsub",
"x",
"y",
"n", ///< number of frame
@@ -54,16 +56,19 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
+ VAR_A,
+ VAR_SAR,
+ VAR_DAR,
+ VAR_HSUB,
+ VAR_VSUB,
VAR_X,
VAR_Y,
VAR_N,
+ VAR_POS,
VAR_T,
VAR_VARS_NB
};
@@ -75,43 +80,29 @@ typedef struct CropContext {
int w; ///< width of the cropped area
int h; ///< height of the cropped area
+ AVRational out_sar; ///< output sample aspect ratio
+ int keep_aspect; ///< keep display aspect ratio when cropping
+
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
int hsub, vsub; ///< chroma subsampling
- char *x_expr, *y_expr, *ow_expr, *oh_expr;
+ char *x_expr, *y_expr, *w_expr, *h_expr;
AVExpr *x_pexpr, *y_pexpr; /* parsed expressions for x and y */
double var_values[VAR_VARS_NB];
} CropContext;
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGB48LE,
- AV_PIX_FMT_BGR48BE, AV_PIX_FMT_BGR48LE,
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ AVFilterFormats *formats = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM)) &&
+ !((desc->log2_chroma_w || desc->log2_chroma_h) && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR)))
+ ff_add_format(&formats, fmt);
+ }
+ ff_set_common_formats(ctx, formats);
return 0;
}
@@ -149,34 +140,37 @@ static int config_input(AVFilterLink *link)
const char *expr;
double res;
- s->var_values[VAR_E] = M_E;
- s->var_values[VAR_PHI] = M_PHI;
- s->var_values[VAR_PI] = M_PI;
s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = ctx->inputs[0]->w;
s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = ctx->inputs[0]->h;
+ s->var_values[VAR_A] = (float) link->w / link->h;
+ s->var_values[VAR_SAR] = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
+ s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
s->var_values[VAR_X] = NAN;
s->var_values[VAR_Y] = NAN;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = NAN;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = NAN;
s->var_values[VAR_N] = 0;
s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_POS] = NAN;
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
s->hsub = pix_desc->log2_chroma_w;
s->vsub = pix_desc->log2_chroma_h;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->ow_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->oh_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = res;
/* evaluate again ow as it may depend on oh */
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->ow_expr),
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
var_names, s->var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail_expr;
@@ -187,7 +181,7 @@ static int config_input(AVFilterLink *link)
av_log(ctx, AV_LOG_ERROR,
"Too big value or invalid expression for out_w/ow or out_h/oh. "
"Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
- s->ow_expr, s->oh_expr);
+ s->w_expr, s->h_expr);
return AVERROR(EINVAL);
}
s->w &= ~((1 << s->hsub) - 1);
@@ -202,8 +196,17 @@ static int config_input(AVFilterLink *link)
NULL, NULL, NULL, NULL, 0, ctx)) < 0)
return AVERROR(EINVAL);
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
- link->w, link->h, s->w, s->h);
+ if (s->keep_aspect) {
+ AVRational dar = av_mul_q(link->sample_aspect_ratio,
+ (AVRational){ link->w, link->h });
+ av_reduce(&s->out_sar.num, &s->out_sar.den,
+ dar.num * s->h, dar.den * s->w, INT_MAX);
+ } else
+ s->out_sar = link->sample_aspect_ratio;
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
+ link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
+ s->w, s->h, s->out_sar.num, s->out_sar.den);
if (s->w <= 0 || s->h <= 0 ||
s->w > link->w || s->h > link->h) {
@@ -231,6 +234,7 @@ static int config_output(AVFilterLink *link)
link->w = s->w;
link->h = s->h;
+ link->sample_aspect_ratio = s->out_sar;
return 0;
}
@@ -245,8 +249,11 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
frame->width = s->w;
frame->height = s->h;
+ s->var_values[VAR_N] = link->frame_count;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
+ s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
+ NAN : av_frame_get_pkt_pos(frame);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
@@ -265,9 +272,9 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
s->x &= ~((1 << s->hsub) - 1);
s->y &= ~((1 << s->vsub) - 1);
- av_dlog(ctx, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
- (int)s->var_values[VAR_N], s->var_values[VAR_T], s->x,
- s->y, s->x+s->w, s->y+s->h);
+ av_dlog(ctx, "n:%d t:%f pos:%f x:%d y:%d x+w:%d y+h:%d\n",
+ (int)s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+ s->x, s->y, s->x+s->w, s->y+s->h);
frame->data[0] += s->y * frame->linesize[0];
frame->data[0] += s->x * s->max_step[0];
@@ -287,37 +294,31 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
frame->data[3] += s->x * s->max_step[3];
}
- s->var_values[VAR_N] += 1.0;
-
return ff_filter_frame(link->dst->outputs[0], frame);
}
#define OFFSET(x) offsetof(CropContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "out_w", "Output video width", OFFSET(ow_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
- { "out_h", "Output video height", OFFSET(oh_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
- { "x", "Horizontal position in the input video of the left edge of the cropped output video",
- OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "(in_w - out_w) / 2" }, .flags = FLAGS },
- { "y", "Vertical position in the input video of the top edge of the cropped output video",
- OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "(in_h - out_h) / 2" }, .flags = FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption crop_options[] = {
+ { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
};
-static const AVClass crop_class = {
- .class_name = "crop",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(crop);
static const AVFilterPad avfilter_vf_crop_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
},
{ NULL }
};
@@ -332,15 +333,12 @@ static const AVFilterPad avfilter_vf_crop_outputs[] = {
};
AVFilter ff_vf_crop = {
- .name = "crop",
- .description = NULL_IF_CONFIG_SMALL("Crop the input video to width:height:x:y."),
-
- .priv_size = sizeof(CropContext),
- .priv_class = &crop_class,
-
+ .name = "crop",
+ .description = NULL_IF_CONFIG_SMALL("Crop the input video."),
+ .priv_size = sizeof(CropContext),
+ .priv_class = &crop_class,
.query_formats = query_formats,
.uninit = uninit,
-
- .inputs = avfilter_vf_crop_inputs,
- .outputs = avfilter_vf_crop_outputs,
+ .inputs = avfilter_vf_crop_inputs,
+ .outputs = avfilter_vf_crop_outputs,
};
diff --git a/libavfilter/vf_cropdetect.c b/libavfilter/vf_cropdetect.c
index 14c26c7..76aa7b2 100644
--- a/libavfilter/vf_cropdetect.c
+++ b/libavfilter/vf_cropdetect.c
@@ -1,19 +1,19 @@
/*
* Copyright (c) 2002 A'rpi
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -23,8 +23,6 @@
* Ported from MPlayer libmpcodecs/vf_cropdetect.c.
*/
-#include <stdio.h>
-
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -114,15 +112,21 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
+#define SET_META(key, value) \
+ av_dict_set_int(metadata, key, value, 0)
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
int bpp = s->max_pixsteps[0];
int w, h, x, y, shrink_by;
+ AVDictionary **metadata;
// ignore first 2 frames - they may be empty
if (++s->frame_nb > 0) {
+ metadata = avpriv_frame_get_metadatap(frame);
+
// Reset the crop area every reset_count frames, if reset_count is > 0
if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
s->x1 = frame->width - 1;
@@ -139,7 +143,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
}
- for (y = frame->height - 1; y > s->y2; y--) {
+ for (y = frame->height - 1; y > FFMAX(s->y2, s->y1); y--) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
s->y2 = y;
break;
@@ -153,7 +157,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
}
- for (y = frame->width - 1; y > s->x2; y--) {
+ for (y = frame->width - 1; y > FFMAX(s->x2, s->x1); y--) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
s->x2 = y;
break;
@@ -183,6 +187,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
h -= shrink_by;
y += (shrink_by/2 + 1) & ~1;
+ SET_META("lavfi.cropdetect.x1", s->x1);
+ SET_META("lavfi.cropdetect.x2", s->x2);
+ SET_META("lavfi.cropdetect.y1", s->y1);
+ SET_META("lavfi.cropdetect.y2", s->y2);
+ SET_META("lavfi.cropdetect.w", w);
+ SET_META("lavfi.cropdetect.h", h);
+ SET_META("lavfi.cropdetect.x", x);
+ SET_META("lavfi.cropdetect.y", y);
+
av_log(ctx, AV_LOG_INFO,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
@@ -194,28 +207,24 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(CropDetectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_INT, { .i64 = 24 }, 0, INT_MAX, FLAGS },
- { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption cropdetect_options[] = {
+ { "limit", "Threshold below which the pixel is considered black", OFFSET(limit), AV_OPT_TYPE_INT, { .i64 = 24 }, 0, 255, FLAGS },
+ { "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
{ "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { NULL },
+ { "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { NULL }
};
-static const AVClass cropdetect_class = {
- .class_name = "cropdetect",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(cropdetect);
static const AVFilterPad avfilter_vf_cropdetect_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -229,16 +238,13 @@ static const AVFilterPad avfilter_vf_cropdetect_outputs[] = {
};
AVFilter ff_vf_cropdetect = {
- .name = "cropdetect",
- .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
-
- .priv_size = sizeof(CropDetectContext),
- .priv_class = &cropdetect_class,
- .init = init,
-
+ .name = "cropdetect",
+ .description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
+ .priv_size = sizeof(CropDetectContext),
+ .priv_class = &cropdetect_class,
+ .init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_cropdetect_inputs,
-
- .outputs = avfilter_vf_cropdetect_outputs,
+ .inputs = avfilter_vf_cropdetect_inputs,
+ .outputs = avfilter_vf_cropdetect_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_curves.c b/libavfilter/vf_curves.c
new file mode 100644
index 0000000..b17c391
--- /dev/null
+++ b/libavfilter/vf_curves.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/bprint.h"
+#include "libavutil/eval.h"
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+struct keypoint {
+ double x, y;
+ struct keypoint *next;
+};
+
+#define NB_COMP 3
+
+enum preset {
+ PRESET_NONE,
+ PRESET_COLOR_NEGATIVE,
+ PRESET_CROSS_PROCESS,
+ PRESET_DARKER,
+ PRESET_INCREASE_CONTRAST,
+ PRESET_LIGHTER,
+ PRESET_LINEAR_CONTRAST,
+ PRESET_MEDIUM_CONTRAST,
+ PRESET_NEGATIVE,
+ PRESET_STRONG_CONTRAST,
+ PRESET_VINTAGE,
+ NB_PRESETS,
+};
+
+typedef struct {
+ const AVClass *class;
+ enum preset preset;
+ char *comp_points_str[NB_COMP + 1];
+ char *comp_points_str_all;
+ uint8_t graph[NB_COMP + 1][256];
+ char *psfile;
+ uint8_t rgba_map[4];
+ int step;
+} CurvesContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+#define OFFSET(x) offsetof(CurvesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption curves_options[] = {
+ { "preset", "select a color curves preset", OFFSET(preset), AV_OPT_TYPE_INT, {.i64=PRESET_NONE}, PRESET_NONE, NB_PRESETS-1, FLAGS, "preset_name" },
+ { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NONE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "color_negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_COLOR_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "cross_process", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_CROSS_PROCESS}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "darker", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_DARKER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "increase_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_INCREASE_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "lighter", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LIGHTER}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "linear_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_LINEAR_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "medium_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_MEDIUM_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "negative", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_NEGATIVE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "strong_contrast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_STRONG_CONTRAST}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "vintage", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PRESET_VINTAGE}, INT_MIN, INT_MAX, FLAGS, "preset_name" },
+ { "master","set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "m", "set master points coordinates",OFFSET(comp_points_str[NB_COMP]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "red", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "r", "set red points coordinates", OFFSET(comp_points_str[0]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "green", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "g", "set green points coordinates", OFFSET(comp_points_str[1]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "blue", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "b", "set blue points coordinates", OFFSET(comp_points_str[2]), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "all", "set points coordinates for all components", OFFSET(comp_points_str_all), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "psfile", "set Photoshop curves file name", OFFSET(psfile), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(curves);
+
+static const struct {
+ const char *r;
+ const char *g;
+ const char *b;
+ const char *master;
+} curves_presets[] = {
+ [PRESET_COLOR_NEGATIVE] = {
+ "0/1 0.129/1 0.466/0.498 0.725/0 1/0",
+ "0/1 0.109/1 0.301/0.498 0.517/0 1/0",
+ "0/1 0.098/1 0.235/0.498 0.423/0 1/0",
+ },
+ [PRESET_CROSS_PROCESS] = {
+ "0.25/0.156 0.501/0.501 0.686/0.745",
+ "0.25/0.188 0.38/0.501 0.745/0.815 1/0.815",
+ "0.231/0.094 0.709/0.874",
+ },
+ [PRESET_DARKER] = { .master = "0.5/0.4" },
+ [PRESET_INCREASE_CONTRAST] = { .master = "0.149/0.066 0.831/0.905 0.905/0.98" },
+ [PRESET_LIGHTER] = { .master = "0.4/0.5" },
+ [PRESET_LINEAR_CONTRAST] = { .master = "0.305/0.286 0.694/0.713" },
+ [PRESET_MEDIUM_CONTRAST] = { .master = "0.286/0.219 0.639/0.643" },
+ [PRESET_NEGATIVE] = { .master = "0/1 1/0" },
+ [PRESET_STRONG_CONTRAST] = { .master = "0.301/0.196 0.592/0.6 0.686/0.737" },
+ [PRESET_VINTAGE] = {
+ "0/0.11 0.42/0.51 1/0.95",
+ "0.50/0.48",
+ "0/0.22 0.49/0.44 1/0.8",
+ }
+};
+
+static struct keypoint *make_point(double x, double y, struct keypoint *next)
+{
+ struct keypoint *point = av_mallocz(sizeof(*point));
+
+ if (!point)
+ return NULL;
+ point->x = x;
+ point->y = y;
+ point->next = next;
+ return point;
+}
+
+static int parse_points_str(AVFilterContext *ctx, struct keypoint **points, const char *s)
+{
+ char *p = (char *)s; // strtod won't alter the string
+ struct keypoint *last = NULL;
+
+ /* construct a linked list based on the key points string */
+ while (p && *p) {
+ struct keypoint *point = make_point(0, 0, NULL);
+ if (!point)
+ return AVERROR(ENOMEM);
+ point->x = av_strtod(p, &p); if (p && *p) p++;
+ point->y = av_strtod(p, &p); if (p && *p) p++;
+ if (point->x < 0 || point->x > 1 || point->y < 0 || point->y > 1) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid key point coordinates (%f;%f), "
+ "x and y must be in the [0;1] range.\n", point->x, point->y);
+ return AVERROR(EINVAL);
+ }
+ if (!*points)
+ *points = point;
+ if (last) {
+ if ((int)(last->x * 255) >= (int)(point->x * 255)) {
+ av_log(ctx, AV_LOG_ERROR, "Key point coordinates (%f;%f) "
+ "and (%f;%f) are too close from each other or not "
+ "strictly increasing on the x-axis\n",
+ last->x, last->y, point->x, point->y);
+ return AVERROR(EINVAL);
+ }
+ last->next = point;
+ }
+ last = point;
+ }
+
+ /* auto insert first key point if missing at x=0 */
+ if (!*points) {
+ last = make_point(0, 0, NULL);
+ if (!last)
+ return AVERROR(ENOMEM);
+ last->x = last->y = 0;
+ *points = last;
+ } else if ((*points)->x != 0.) {
+ struct keypoint *newfirst = make_point(0, 0, *points);
+ if (!newfirst)
+ return AVERROR(ENOMEM);
+ *points = newfirst;
+ }
+
+ av_assert0(last);
+
+ /* auto insert last key point if missing at x=1 */
+ if (last->x != 1.) {
+ struct keypoint *point = make_point(1, 1, NULL);
+ if (!point)
+ return AVERROR(ENOMEM);
+ last->next = point;
+ }
+
+ return 0;
+}
+
+static int get_nb_points(const struct keypoint *d)
+{
+ int n = 0;
+ while (d) {
+ n++;
+ d = d->next;
+ }
+ return n;
+}
+
+/**
+ * Natural cubic spline interpolation
+ * Finding curves using Cubic Splines notes by Steven Rauch and John Stockie.
+ * @see http://people.math.sfu.ca/~stockie/teaching/macm316/notes/splines.pdf
+ */
+static int interpolate(AVFilterContext *ctx, uint8_t *y, const struct keypoint *points)
+{
+ int i, ret = 0;
+ const struct keypoint *point;
+ double xprev = 0;
+
+ int n = get_nb_points(points); // number of splines
+
+ double (*matrix)[3] = av_calloc(n, sizeof(*matrix));
+ double *h = av_malloc((n - 1) * sizeof(*h));
+ double *r = av_calloc(n, sizeof(*r));
+
+ if (!matrix || !h || !r) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* h(i) = x(i+1) - x(i) */
+ i = -1;
+ for (point = points; point; point = point->next) {
+ if (i != -1)
+ h[i] = point->x - xprev;
+ xprev = point->x;
+ i++;
+ }
+
+ /* right-side of the polynomials, will be modified to contains the solution */
+ point = points;
+ for (i = 1; i < n - 1; i++) {
+ double yp = point->y,
+ yc = point->next->y,
+ yn = point->next->next->y;
+ r[i] = 6 * ((yn-yc)/h[i] - (yc-yp)/h[i-1]);
+ point = point->next;
+ }
+
+#define BD 0 /* sub diagonal (below main) */
+#define MD 1 /* main diagonal (center) */
+#define AD 2 /* sup diagonal (above main) */
+
+ /* left side of the polynomials into a tridiagonal matrix. */
+ matrix[0][MD] = matrix[n - 1][MD] = 1;
+ for (i = 1; i < n - 1; i++) {
+ matrix[i][BD] = h[i-1];
+ matrix[i][MD] = 2 * (h[i-1] + h[i]);
+ matrix[i][AD] = h[i];
+ }
+
+ /* tridiagonal solving of the linear system */
+ for (i = 1; i < n; i++) {
+ double den = matrix[i][MD] - matrix[i][BD] * matrix[i-1][AD];
+ double k = den ? 1./den : 1.;
+ matrix[i][AD] *= k;
+ r[i] = (r[i] - matrix[i][BD] * r[i - 1]) * k;
+ }
+ for (i = n - 2; i >= 0; i--)
+ r[i] = r[i] - matrix[i][AD] * r[i + 1];
+
+ /* compute the graph with x=[0..255] */
+ i = 0;
+ point = points;
+ av_assert0(point->next); // always at least 2 key points
+ while (point->next) {
+ double yc = point->y;
+ double yn = point->next->y;
+
+ double a = yc;
+ double b = (yn-yc)/h[i] - h[i]*r[i]/2. - h[i]*(r[i+1]-r[i])/6.;
+ double c = r[i] / 2.;
+ double d = (r[i+1] - r[i]) / (6.*h[i]);
+
+ int x;
+ int x_start = point->x * 255;
+ int x_end = point->next->x * 255;
+
+ av_assert0(x_start >= 0 && x_start <= 255 &&
+ x_end >= 0 && x_end <= 255);
+
+ for (x = x_start; x <= x_end; x++) {
+ double xx = (x - x_start) * 1/255.;
+ double yy = a + b*xx + c*xx*xx + d*xx*xx*xx;
+ y[x] = av_clipf(yy, 0, 1) * 255;
+ av_log(ctx, AV_LOG_DEBUG, "f(%f)=%f -> y[%d]=%d\n", xx, yy, x, y[x]);
+ }
+
+ point = point->next;
+ i++;
+ }
+
+end:
+ av_free(matrix);
+ av_free(h);
+ av_free(r);
+ return ret;
+}
+
+static int parse_psfile(AVFilterContext *ctx, const char *fname)
+{
+ CurvesContext *curves = ctx->priv;
+ uint8_t *buf;
+ size_t size;
+ int i, ret, av_unused(version), nb_curves;
+ AVBPrint ptstr;
+ static const int comp_ids[] = {3, 0, 1, 2};
+
+ av_bprint_init(&ptstr, 0, AV_BPRINT_SIZE_AUTOMATIC);
+
+ ret = av_file_map(fname, &buf, &size, 0, NULL);
+ if (ret < 0)
+ return ret;
+
+#define READ16(dst) do { \
+ if (size < 2) { \
+ ret = AVERROR_INVALIDDATA; \
+ goto end; \
+ } \
+ dst = AV_RB16(buf); \
+ buf += 2; \
+ size -= 2; \
+} while (0)
+
+ READ16(version);
+ READ16(nb_curves);
+ for (i = 0; i < FFMIN(nb_curves, FF_ARRAY_ELEMS(comp_ids)); i++) {
+ int nb_points, n;
+ av_bprint_clear(&ptstr);
+ READ16(nb_points);
+ for (n = 0; n < nb_points; n++) {
+ int y, x;
+ READ16(y);
+ READ16(x);
+ av_bprintf(&ptstr, "%f/%f ", x / 255., y / 255.);
+ }
+ if (*ptstr.str) {
+ char **pts = &curves->comp_points_str[comp_ids[i]];
+ if (!*pts) {
+ *pts = av_strdup(ptstr.str);
+ av_log(ctx, AV_LOG_DEBUG, "curves %d (intid=%d) [%d points]: [%s]\n",
+ i, comp_ids[i], nb_points, *pts);
+ if (!*pts) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ }
+ }
+ }
+end:
+ av_bprint_finalize(&ptstr, NULL);
+ av_file_unmap(buf, size);
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ int i, j, ret;
+ CurvesContext *curves = ctx->priv;
+ struct keypoint *comp_points[NB_COMP + 1] = {0};
+ char **pts = curves->comp_points_str;
+ const char *allp = curves->comp_points_str_all;
+
+ //if (!allp && curves->preset != PRESET_NONE && curves_presets[curves->preset].all)
+ // allp = curves_presets[curves->preset].all;
+
+ if (allp) {
+ for (i = 0; i < NB_COMP; i++) {
+ if (!pts[i])
+ pts[i] = av_strdup(allp);
+ if (!pts[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ if (curves->psfile) {
+ ret = parse_psfile(ctx, curves->psfile);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (curves->preset != PRESET_NONE) {
+#define SET_COMP_IF_NOT_SET(n, name) do { \
+ if (!pts[n] && curves_presets[curves->preset].name) { \
+ pts[n] = av_strdup(curves_presets[curves->preset].name); \
+ if (!pts[n]) \
+ return AVERROR(ENOMEM); \
+ } \
+} while (0)
+ SET_COMP_IF_NOT_SET(0, r);
+ SET_COMP_IF_NOT_SET(1, g);
+ SET_COMP_IF_NOT_SET(2, b);
+ SET_COMP_IF_NOT_SET(3, master);
+ }
+
+ for (i = 0; i < NB_COMP + 1; i++) {
+ ret = parse_points_str(ctx, comp_points + i, curves->comp_points_str[i]);
+ if (ret < 0)
+ return ret;
+ ret = interpolate(ctx, curves->graph[i], comp_points[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pts[NB_COMP]) {
+ for (i = 0; i < NB_COMP; i++)
+ for (j = 0; j < 256; j++)
+ curves->graph[i][j] = curves->graph[NB_COMP][curves->graph[i][j]];
+ }
+
+ if (av_log_get_level() >= AV_LOG_VERBOSE) {
+ for (i = 0; i < NB_COMP; i++) {
+ struct keypoint *point = comp_points[i];
+ av_log(ctx, AV_LOG_VERBOSE, "#%d points:", i);
+ while (point) {
+ av_log(ctx, AV_LOG_VERBOSE, " (%f;%f)", point->x, point->y);
+ point = point->next;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "\n");
+ av_log(ctx, AV_LOG_VERBOSE, "#%d values:", i);
+ for (j = 0; j < 256; j++)
+ av_log(ctx, AV_LOG_VERBOSE, " %02X", curves->graph[i][j]);
+ av_log(ctx, AV_LOG_VERBOSE, "\n");
+ }
+ }
+
+ for (i = 0; i < NB_COMP + 1; i++) {
+ struct keypoint *point = comp_points[i];
+ while (point) {
+ struct keypoint *next = point->next;
+ av_free(point);
+ point = next;
+ }
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ CurvesContext *curves = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ ff_fill_rgba_map(curves->rgba_map, inlink->format);
+ curves->step = av_get_padded_bits_per_pixel(desc) >> 3;
+
+ return 0;
+}
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ int x, y;
+ const CurvesContext *curves = ctx->priv;
+ const ThreadData *td = arg;
+ const AVFrame *in = td->in;
+ const AVFrame *out = td->out;
+ const int direct = out == in;
+ const int step = curves->step;
+ const uint8_t r = curves->rgba_map[R];
+ const uint8_t g = curves->rgba_map[G];
+ const uint8_t b = curves->rgba_map[B];
+ const uint8_t a = curves->rgba_map[A];
+ const int slice_start = (in->height * jobnr ) / nb_jobs;
+ const int slice_end = (in->height * (jobnr+1)) / nb_jobs;
+ uint8_t *dst = out->data[0] + slice_start * out->linesize[0];
+ const uint8_t *src = in->data[0] + slice_start * in->linesize[0];
+
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < in->width * step; x += step) {
+ dst[x + r] = curves->graph[R][src[x + r]];
+ dst[x + g] = curves->graph[G][src[x + g]];
+ dst[x + b] = curves->graph[B][src[x + b]];
+ if (!direct && step == 4)
+ dst[x + a] = src[x + a];
+ }
+ dst += out->linesize[0];
+ src += in ->linesize[0];
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ ThreadData td;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ td.in = in;
+ td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad curves_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad curves_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_curves = {
+ .name = "curves",
+ .description = NULL_IF_CONFIG_SMALL("Adjust components curves."),
+ .priv_size = sizeof(CurvesContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = curves_inputs,
+ .outputs = curves_outputs,
+ .priv_class = &curves_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_dctdnoiz.c b/libavfilter/vf_dctdnoiz.c
new file mode 100644
index 0000000..a9017b1
--- /dev/null
+++ b/libavfilter/vf_dctdnoiz.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2013-2014 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * A simple, relatively efficient and slow DCT image denoiser.
+ *
+ * @see http://www.ipol.im/pub/art/2011/ys-dct/
+ *
+ * The DCT factorization used is based on "Fast and numerically stable
+ * algorithms for discrete cosine transforms" from Gerlind Plonkaa & Manfred
+ * Tasche (DOI: 10.1016/j.laa.2004.07.015).
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+static const char *const var_names[] = { "c", NULL };
+enum { VAR_C, VAR_VARS_NB };
+
+#define MAX_THREADS 8
+
+typedef struct DCTdnoizContext {
+ const AVClass *class;
+
+ /* coefficient factor expression */
+ char *expr_str;
+ AVExpr *expr[MAX_THREADS];
+ double var_values[MAX_THREADS][VAR_VARS_NB];
+
+ int nb_threads;
+ int pr_width, pr_height; // width and height to process
+ float sigma; // used when no expression are st
+ float th; // threshold (3*sigma)
+ float *cbuf[2][3]; // two planar rgb color buffers
+ float *slices[MAX_THREADS]; // slices buffers (1 slice buffer per thread)
+ float *weights; // dct coeff are cumulated with overlapping; these values are used for averaging
+ int p_linesize; // line sizes for color and weights
+ int overlap; // number of block overlapping pixels
+ int step; // block step increment (blocksize - overlap)
+ int n; // 1<<n is the block size
+ int bsize; // block size, 1<<n
+ void (*filter_freq_func)(struct DCTdnoizContext *s,
+ const float *src, int src_linesize,
+ float *dst, int dst_linesize,
+ int thread_id);
+ void (*color_decorrelation)(float **dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int w, int h);
+ void (*color_correlation)(uint8_t *dst, int dst_linesize,
+ float **src, int src_linesize,
+ int w, int h);
+} DCTdnoizContext;
+
+#define MIN_NBITS 3 /* blocksize = 1<<3 = 8 */
+#define MAX_NBITS 4 /* blocksize = 1<<4 = 16 */
+#define DEFAULT_NBITS 3
+
+#define OFFSET(x) offsetof(DCTdnoizContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption dctdnoiz_options[] = {
+ { "sigma", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
+ { "s", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
+ { "overlap", "set number of block overlapping pixels", OFFSET(overlap), AV_OPT_TYPE_INT, {.i64=-1}, -1, (1<<MAX_NBITS)-1, .flags = FLAGS },
+ { "expr", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "e", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "n", "set the block size, expressed in bits", OFFSET(n), AV_OPT_TYPE_INT, {.i64=DEFAULT_NBITS}, MIN_NBITS, MAX_NBITS, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(dctdnoiz);
+
+static void av_always_inline fdct8_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ const float x00 = src[0*src_stridea] + src[7*src_stridea];
+ const float x01 = src[1*src_stridea] + src[6*src_stridea];
+ const float x02 = src[2*src_stridea] + src[5*src_stridea];
+ const float x03 = src[3*src_stridea] + src[4*src_stridea];
+ const float x04 = src[0*src_stridea] - src[7*src_stridea];
+ const float x05 = src[1*src_stridea] - src[6*src_stridea];
+ const float x06 = src[2*src_stridea] - src[5*src_stridea];
+ const float x07 = src[3*src_stridea] - src[4*src_stridea];
+ const float x08 = x00 + x03;
+ const float x09 = x01 + x02;
+ const float x0a = x00 - x03;
+ const float x0b = x01 - x02;
+ const float x0c = 1.38703984532215f*x04 + 0.275899379282943f*x07;
+ const float x0d = 1.17587560241936f*x05 + 0.785694958387102f*x06;
+ const float x0e = -0.785694958387102f*x05 + 1.17587560241936f*x06;
+ const float x0f = 0.275899379282943f*x04 - 1.38703984532215f*x07;
+ const float x10 = 0.353553390593274f * (x0c - x0d);
+ const float x11 = 0.353553390593274f * (x0e - x0f);
+ dst[0*dst_stridea] = 0.353553390593274f * (x08 + x09);
+ dst[1*dst_stridea] = 0.353553390593274f * (x0c + x0d);
+ dst[2*dst_stridea] = 0.461939766255643f*x0a + 0.191341716182545f*x0b;
+ dst[3*dst_stridea] = 0.707106781186547f * (x10 - x11);
+ dst[4*dst_stridea] = 0.353553390593274f * (x08 - x09);
+ dst[5*dst_stridea] = 0.707106781186547f * (x10 + x11);
+ dst[6*dst_stridea] = 0.191341716182545f*x0a - 0.461939766255643f*x0b;
+ dst[7*dst_stridea] = 0.353553390593274f * (x0e + x0f);
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+static void av_always_inline idct8_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb,
+ int add)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ const float x00 = 1.4142135623731f *src[0*src_stridea];
+ const float x01 = 1.38703984532215f *src[1*src_stridea] + 0.275899379282943f*src[7*src_stridea];
+ const float x02 = 1.30656296487638f *src[2*src_stridea] + 0.541196100146197f*src[6*src_stridea];
+ const float x03 = 1.17587560241936f *src[3*src_stridea] + 0.785694958387102f*src[5*src_stridea];
+ const float x04 = 1.4142135623731f *src[4*src_stridea];
+ const float x05 = -0.785694958387102f*src[3*src_stridea] + 1.17587560241936f*src[5*src_stridea];
+ const float x06 = 0.541196100146197f*src[2*src_stridea] - 1.30656296487638f*src[6*src_stridea];
+ const float x07 = -0.275899379282943f*src[1*src_stridea] + 1.38703984532215f*src[7*src_stridea];
+ const float x09 = x00 + x04;
+ const float x0a = x01 + x03;
+ const float x0b = 1.4142135623731f*x02;
+ const float x0c = x00 - x04;
+ const float x0d = x01 - x03;
+ const float x0e = 0.353553390593274f * (x09 - x0b);
+ const float x0f = 0.353553390593274f * (x0c + x0d);
+ const float x10 = 0.353553390593274f * (x0c - x0d);
+ const float x11 = 1.4142135623731f*x06;
+ const float x12 = x05 + x07;
+ const float x13 = x05 - x07;
+ const float x14 = 0.353553390593274f * (x11 + x12);
+ const float x15 = 0.353553390593274f * (x11 - x12);
+ const float x16 = 0.5f*x13;
+ dst[0*dst_stridea] = (add ? dst[ 0*dst_stridea] : 0) + 0.25f * (x09 + x0b) + 0.353553390593274f*x0a;
+ dst[1*dst_stridea] = (add ? dst[ 1*dst_stridea] : 0) + 0.707106781186547f * (x0f + x15);
+ dst[2*dst_stridea] = (add ? dst[ 2*dst_stridea] : 0) + 0.707106781186547f * (x0f - x15);
+ dst[3*dst_stridea] = (add ? dst[ 3*dst_stridea] : 0) + 0.707106781186547f * (x0e + x16);
+ dst[4*dst_stridea] = (add ? dst[ 4*dst_stridea] : 0) + 0.707106781186547f * (x0e - x16);
+ dst[5*dst_stridea] = (add ? dst[ 5*dst_stridea] : 0) + 0.707106781186547f * (x10 - x14);
+ dst[6*dst_stridea] = (add ? dst[ 6*dst_stridea] : 0) + 0.707106781186547f * (x10 + x14);
+ dst[7*dst_stridea] = (add ? dst[ 7*dst_stridea] : 0) + 0.25f * (x09 + x0b) - 0.353553390593274f*x0a;
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+
+static void av_always_inline fdct16_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ const float x00 = src[ 0*src_stridea] + src[15*src_stridea];
+ const float x01 = src[ 1*src_stridea] + src[14*src_stridea];
+ const float x02 = src[ 2*src_stridea] + src[13*src_stridea];
+ const float x03 = src[ 3*src_stridea] + src[12*src_stridea];
+ const float x04 = src[ 4*src_stridea] + src[11*src_stridea];
+ const float x05 = src[ 5*src_stridea] + src[10*src_stridea];
+ const float x06 = src[ 6*src_stridea] + src[ 9*src_stridea];
+ const float x07 = src[ 7*src_stridea] + src[ 8*src_stridea];
+ const float x08 = src[ 0*src_stridea] - src[15*src_stridea];
+ const float x09 = src[ 1*src_stridea] - src[14*src_stridea];
+ const float x0a = src[ 2*src_stridea] - src[13*src_stridea];
+ const float x0b = src[ 3*src_stridea] - src[12*src_stridea];
+ const float x0c = src[ 4*src_stridea] - src[11*src_stridea];
+ const float x0d = src[ 5*src_stridea] - src[10*src_stridea];
+ const float x0e = src[ 6*src_stridea] - src[ 9*src_stridea];
+ const float x0f = src[ 7*src_stridea] - src[ 8*src_stridea];
+ const float x10 = x00 + x07;
+ const float x11 = x01 + x06;
+ const float x12 = x02 + x05;
+ const float x13 = x03 + x04;
+ const float x14 = x00 - x07;
+ const float x15 = x01 - x06;
+ const float x16 = x02 - x05;
+ const float x17 = x03 - x04;
+ const float x18 = x10 + x13;
+ const float x19 = x11 + x12;
+ const float x1a = x10 - x13;
+ const float x1b = x11 - x12;
+ const float x1c = 1.38703984532215f*x14 + 0.275899379282943f*x17;
+ const float x1d = 1.17587560241936f*x15 + 0.785694958387102f*x16;
+ const float x1e = -0.785694958387102f*x15 + 1.17587560241936f *x16;
+ const float x1f = 0.275899379282943f*x14 - 1.38703984532215f *x17;
+ const float x20 = 0.25f * (x1c - x1d);
+ const float x21 = 0.25f * (x1e - x1f);
+ const float x22 = 1.40740373752638f *x08 + 0.138617169199091f*x0f;
+ const float x23 = 1.35331800117435f *x09 + 0.410524527522357f*x0e;
+ const float x24 = 1.24722501298667f *x0a + 0.666655658477747f*x0d;
+ const float x25 = 1.09320186700176f *x0b + 0.897167586342636f*x0c;
+ const float x26 = -0.897167586342636f*x0b + 1.09320186700176f *x0c;
+ const float x27 = 0.666655658477747f*x0a - 1.24722501298667f *x0d;
+ const float x28 = -0.410524527522357f*x09 + 1.35331800117435f *x0e;
+ const float x29 = 0.138617169199091f*x08 - 1.40740373752638f *x0f;
+ const float x2a = x22 + x25;
+ const float x2b = x23 + x24;
+ const float x2c = x22 - x25;
+ const float x2d = x23 - x24;
+ const float x2e = 0.25f * (x2a - x2b);
+ const float x2f = 0.326640741219094f*x2c + 0.135299025036549f*x2d;
+ const float x30 = 0.135299025036549f*x2c - 0.326640741219094f*x2d;
+ const float x31 = x26 + x29;
+ const float x32 = x27 + x28;
+ const float x33 = x26 - x29;
+ const float x34 = x27 - x28;
+ const float x35 = 0.25f * (x31 - x32);
+ const float x36 = 0.326640741219094f*x33 + 0.135299025036549f*x34;
+ const float x37 = 0.135299025036549f*x33 - 0.326640741219094f*x34;
+ dst[ 0*dst_stridea] = 0.25f * (x18 + x19);
+ dst[ 1*dst_stridea] = 0.25f * (x2a + x2b);
+ dst[ 2*dst_stridea] = 0.25f * (x1c + x1d);
+ dst[ 3*dst_stridea] = 0.707106781186547f * (x2f - x37);
+ dst[ 4*dst_stridea] = 0.326640741219094f*x1a + 0.135299025036549f*x1b;
+ dst[ 5*dst_stridea] = 0.707106781186547f * (x2f + x37);
+ dst[ 6*dst_stridea] = 0.707106781186547f * (x20 - x21);
+ dst[ 7*dst_stridea] = 0.707106781186547f * (x2e + x35);
+ dst[ 8*dst_stridea] = 0.25f * (x18 - x19);
+ dst[ 9*dst_stridea] = 0.707106781186547f * (x2e - x35);
+ dst[10*dst_stridea] = 0.707106781186547f * (x20 + x21);
+ dst[11*dst_stridea] = 0.707106781186547f * (x30 - x36);
+ dst[12*dst_stridea] = 0.135299025036549f*x1a - 0.326640741219094f*x1b;
+ dst[13*dst_stridea] = 0.707106781186547f * (x30 + x36);
+ dst[14*dst_stridea] = 0.25f * (x1e + x1f);
+ dst[15*dst_stridea] = 0.25f * (x31 + x32);
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+static void av_always_inline idct16_1d(float *dst, const float *src,
+ int dst_stridea, int dst_strideb,
+ int src_stridea, int src_strideb,
+ int add)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ const float x00 = 1.4142135623731f *src[ 0*src_stridea];
+ const float x01 = 1.40740373752638f *src[ 1*src_stridea] + 0.138617169199091f*src[15*src_stridea];
+ const float x02 = 1.38703984532215f *src[ 2*src_stridea] + 0.275899379282943f*src[14*src_stridea];
+ const float x03 = 1.35331800117435f *src[ 3*src_stridea] + 0.410524527522357f*src[13*src_stridea];
+ const float x04 = 1.30656296487638f *src[ 4*src_stridea] + 0.541196100146197f*src[12*src_stridea];
+ const float x05 = 1.24722501298667f *src[ 5*src_stridea] + 0.666655658477747f*src[11*src_stridea];
+ const float x06 = 1.17587560241936f *src[ 6*src_stridea] + 0.785694958387102f*src[10*src_stridea];
+ const float x07 = 1.09320186700176f *src[ 7*src_stridea] + 0.897167586342636f*src[ 9*src_stridea];
+ const float x08 = 1.4142135623731f *src[ 8*src_stridea];
+ const float x09 = -0.897167586342636f*src[ 7*src_stridea] + 1.09320186700176f*src[ 9*src_stridea];
+ const float x0a = 0.785694958387102f*src[ 6*src_stridea] - 1.17587560241936f*src[10*src_stridea];
+ const float x0b = -0.666655658477747f*src[ 5*src_stridea] + 1.24722501298667f*src[11*src_stridea];
+ const float x0c = 0.541196100146197f*src[ 4*src_stridea] - 1.30656296487638f*src[12*src_stridea];
+ const float x0d = -0.410524527522357f*src[ 3*src_stridea] + 1.35331800117435f*src[13*src_stridea];
+ const float x0e = 0.275899379282943f*src[ 2*src_stridea] - 1.38703984532215f*src[14*src_stridea];
+ const float x0f = -0.138617169199091f*src[ 1*src_stridea] + 1.40740373752638f*src[15*src_stridea];
+ const float x12 = x00 + x08;
+ const float x13 = x01 + x07;
+ const float x14 = x02 + x06;
+ const float x15 = x03 + x05;
+ const float x16 = 1.4142135623731f*x04;
+ const float x17 = x00 - x08;
+ const float x18 = x01 - x07;
+ const float x19 = x02 - x06;
+ const float x1a = x03 - x05;
+ const float x1d = x12 + x16;
+ const float x1e = x13 + x15;
+ const float x1f = 1.4142135623731f*x14;
+ const float x20 = x12 - x16;
+ const float x21 = x13 - x15;
+ const float x22 = 0.25f * (x1d - x1f);
+ const float x23 = 0.25f * (x20 + x21);
+ const float x24 = 0.25f * (x20 - x21);
+ const float x25 = 1.4142135623731f*x17;
+ const float x26 = 1.30656296487638f*x18 + 0.541196100146197f*x1a;
+ const float x27 = 1.4142135623731f*x19;
+ const float x28 = -0.541196100146197f*x18 + 1.30656296487638f*x1a;
+ const float x29 = 0.176776695296637f * (x25 + x27) + 0.25f*x26;
+ const float x2a = 0.25f * (x25 - x27);
+ const float x2b = 0.176776695296637f * (x25 + x27) - 0.25f*x26;
+ const float x2c = 0.353553390593274f*x28;
+ const float x1b = 0.707106781186547f * (x2a - x2c);
+ const float x1c = 0.707106781186547f * (x2a + x2c);
+ const float x2d = 1.4142135623731f*x0c;
+ const float x2e = x0b + x0d;
+ const float x2f = x0a + x0e;
+ const float x30 = x09 + x0f;
+ const float x31 = x09 - x0f;
+ const float x32 = x0a - x0e;
+ const float x33 = x0b - x0d;
+ const float x37 = 1.4142135623731f*x2d;
+ const float x38 = 1.30656296487638f*x2e + 0.541196100146197f*x30;
+ const float x39 = 1.4142135623731f*x2f;
+ const float x3a = -0.541196100146197f*x2e + 1.30656296487638f*x30;
+ const float x3b = 0.176776695296637f * (x37 + x39) + 0.25f*x38;
+ const float x3c = 0.25f * (x37 - x39);
+ const float x3d = 0.176776695296637f * (x37 + x39) - 0.25f*x38;
+ const float x3e = 0.353553390593274f*x3a;
+ const float x34 = 0.707106781186547f * (x3c - x3e);
+ const float x35 = 0.707106781186547f * (x3c + x3e);
+ const float x3f = 1.4142135623731f*x32;
+ const float x40 = x31 + x33;
+ const float x41 = x31 - x33;
+ const float x42 = 0.25f * (x3f + x40);
+ const float x43 = 0.25f * (x3f - x40);
+ const float x44 = 0.353553390593274f*x41;
+ dst[ 0*dst_stridea] = (add ? dst[ 0*dst_stridea] : 0) + 0.176776695296637f * (x1d + x1f) + 0.25f*x1e;
+ dst[ 1*dst_stridea] = (add ? dst[ 1*dst_stridea] : 0) + 0.707106781186547f * (x29 + x3d);
+ dst[ 2*dst_stridea] = (add ? dst[ 2*dst_stridea] : 0) + 0.707106781186547f * (x29 - x3d);
+ dst[ 3*dst_stridea] = (add ? dst[ 3*dst_stridea] : 0) + 0.707106781186547f * (x23 - x43);
+ dst[ 4*dst_stridea] = (add ? dst[ 4*dst_stridea] : 0) + 0.707106781186547f * (x23 + x43);
+ dst[ 5*dst_stridea] = (add ? dst[ 5*dst_stridea] : 0) + 0.707106781186547f * (x1b - x35);
+ dst[ 6*dst_stridea] = (add ? dst[ 6*dst_stridea] : 0) + 0.707106781186547f * (x1b + x35);
+ dst[ 7*dst_stridea] = (add ? dst[ 7*dst_stridea] : 0) + 0.707106781186547f * (x22 + x44);
+ dst[ 8*dst_stridea] = (add ? dst[ 8*dst_stridea] : 0) + 0.707106781186547f * (x22 - x44);
+ dst[ 9*dst_stridea] = (add ? dst[ 9*dst_stridea] : 0) + 0.707106781186547f * (x1c + x34);
+ dst[10*dst_stridea] = (add ? dst[10*dst_stridea] : 0) + 0.707106781186547f * (x1c - x34);
+ dst[11*dst_stridea] = (add ? dst[11*dst_stridea] : 0) + 0.707106781186547f * (x24 + x42);
+ dst[12*dst_stridea] = (add ? dst[12*dst_stridea] : 0) + 0.707106781186547f * (x24 - x42);
+ dst[13*dst_stridea] = (add ? dst[13*dst_stridea] : 0) + 0.707106781186547f * (x2b - x3b);
+ dst[14*dst_stridea] = (add ? dst[14*dst_stridea] : 0) + 0.707106781186547f * (x2b + x3b);
+ dst[15*dst_stridea] = (add ? dst[15*dst_stridea] : 0) + 0.176776695296637f * (x1d + x1f) - 0.25f*x1e;
+ dst += dst_strideb;
+ src += src_strideb;
+ }
+}
+
+#define DEF_FILTER_FREQ_FUNCS(bsize) \
+static av_always_inline void filter_freq_##bsize(const float *src, int src_linesize, \
+ float *dst, int dst_linesize, \
+ AVExpr *expr, double *var_values, \
+ int sigma_th) \
+{ \
+ unsigned i; \
+ DECLARE_ALIGNED(32, float, tmp_block1)[bsize * bsize]; \
+ DECLARE_ALIGNED(32, float, tmp_block2)[bsize * bsize]; \
+ \
+ /* forward DCT */ \
+ fdct##bsize##_1d(tmp_block1, src, 1, bsize, 1, src_linesize); \
+ fdct##bsize##_1d(tmp_block2, tmp_block1, bsize, 1, bsize, 1); \
+ \
+ for (i = 0; i < bsize*bsize; i++) { \
+ float *b = &tmp_block2[i]; \
+ /* frequency filtering */ \
+ if (expr) { \
+ var_values[VAR_C] = FFABS(*b); \
+ *b *= av_expr_eval(expr, var_values, NULL); \
+ } else { \
+ if (FFABS(*b) < sigma_th) \
+ *b = 0; \
+ } \
+ } \
+ \
+ /* inverse DCT */ \
+ idct##bsize##_1d(tmp_block1, tmp_block2, 1, bsize, 1, bsize, 0); \
+ idct##bsize##_1d(dst, tmp_block1, dst_linesize, 1, bsize, 1, 1); \
+} \
+ \
+static void filter_freq_sigma_##bsize(DCTdnoizContext *s, \
+ const float *src, int src_linesize, \
+ float *dst, int dst_linesize, int thread_id) \
+{ \
+ filter_freq_##bsize(src, src_linesize, dst, dst_linesize, NULL, NULL, s->th); \
+} \
+ \
+static void filter_freq_expr_##bsize(DCTdnoizContext *s, \
+ const float *src, int src_linesize, \
+ float *dst, int dst_linesize, int thread_id) \
+{ \
+ filter_freq_##bsize(src, src_linesize, dst, dst_linesize, \
+ s->expr[thread_id], s->var_values[thread_id], 0); \
+}
+
+DEF_FILTER_FREQ_FUNCS(8)
+DEF_FILTER_FREQ_FUNCS(16)
+
+#define DCT3X3_0_0 0.5773502691896258f /* 1/sqrt(3) */
+#define DCT3X3_0_1 0.5773502691896258f /* 1/sqrt(3) */
+#define DCT3X3_0_2 0.5773502691896258f /* 1/sqrt(3) */
+#define DCT3X3_1_0 0.7071067811865475f /* 1/sqrt(2) */
+#define DCT3X3_1_2 -0.7071067811865475f /* -1/sqrt(2) */
+#define DCT3X3_2_0 0.4082482904638631f /* 1/sqrt(6) */
+#define DCT3X3_2_1 -0.8164965809277261f /* -2/sqrt(6) */
+#define DCT3X3_2_2 0.4082482904638631f /* 1/sqrt(6) */
+
+static av_always_inline void color_decorrelation(float **dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int w, int h,
+ int r, int g, int b)
+{
+ int x, y;
+ float *dstp_r = dst[0];
+ float *dstp_g = dst[1];
+ float *dstp_b = dst[2];
+
+ for (y = 0; y < h; y++) {
+ const uint8_t *srcp = src;
+
+ for (x = 0; x < w; x++) {
+ dstp_r[x] = srcp[r] * DCT3X3_0_0 + srcp[g] * DCT3X3_0_1 + srcp[b] * DCT3X3_0_2;
+ dstp_g[x] = srcp[r] * DCT3X3_1_0 + srcp[b] * DCT3X3_1_2;
+ dstp_b[x] = srcp[r] * DCT3X3_2_0 + srcp[g] * DCT3X3_2_1 + srcp[b] * DCT3X3_2_2;
+ srcp += 3;
+ }
+ src += src_linesize;
+ dstp_r += dst_linesize;
+ dstp_g += dst_linesize;
+ dstp_b += dst_linesize;
+ }
+}
+
+static av_always_inline void color_correlation(uint8_t *dst, int dst_linesize,
+ float **src, int src_linesize,
+ int w, int h,
+ int r, int g, int b)
+{
+ int x, y;
+ const float *src_r = src[0];
+ const float *src_g = src[1];
+ const float *src_b = src[2];
+
+ for (y = 0; y < h; y++) {
+ uint8_t *dstp = dst;
+
+ for (x = 0; x < w; x++) {
+ dstp[r] = av_clip_uint8(src_r[x] * DCT3X3_0_0 + src_g[x] * DCT3X3_1_0 + src_b[x] * DCT3X3_2_0);
+ dstp[g] = av_clip_uint8(src_r[x] * DCT3X3_0_1 + src_b[x] * DCT3X3_2_1);
+ dstp[b] = av_clip_uint8(src_r[x] * DCT3X3_0_2 + src_g[x] * DCT3X3_1_2 + src_b[x] * DCT3X3_2_2);
+ dstp += 3;
+ }
+ dst += dst_linesize;
+ src_r += src_linesize;
+ src_g += src_linesize;
+ src_b += src_linesize;
+ }
+}
+
+#define DECLARE_COLOR_FUNCS(name, r, g, b) \
+static void color_decorrelation_##name(float **dst, int dst_linesize, \
+ const uint8_t *src, int src_linesize, \
+ int w, int h) \
+{ \
+ color_decorrelation(dst, dst_linesize, src, src_linesize, w, h, r, g, b); \
+} \
+ \
+static void color_correlation_##name(uint8_t *dst, int dst_linesize, \
+ float **src, int src_linesize, \
+ int w, int h) \
+{ \
+ color_correlation(dst, dst_linesize, src, src_linesize, w, h, r, g, b); \
+}
+
+DECLARE_COLOR_FUNCS(rgb, 0, 1, 2)
+DECLARE_COLOR_FUNCS(bgr, 2, 1, 0)
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DCTdnoizContext *s = ctx->priv;
+ int i, x, y, bx, by, linesize, *iweights, max_slice_h, slice_h;
+ const int bsize = 1 << s->n;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_BGR24:
+ s->color_decorrelation = color_decorrelation_bgr;
+ s->color_correlation = color_correlation_bgr;
+ break;
+ case AV_PIX_FMT_RGB24:
+ s->color_decorrelation = color_decorrelation_rgb;
+ s->color_correlation = color_correlation_rgb;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ s->pr_width = inlink->w - (inlink->w - bsize) % s->step;
+ s->pr_height = inlink->h - (inlink->h - bsize) % s->step;
+ if (s->pr_width != inlink->w)
+ av_log(ctx, AV_LOG_WARNING, "The last %d horizontal pixels won't be denoised\n",
+ inlink->w - s->pr_width);
+ if (s->pr_height != inlink->h)
+ av_log(ctx, AV_LOG_WARNING, "The last %d vertical pixels won't be denoised\n",
+ inlink->h - s->pr_height);
+
+ max_slice_h = s->pr_height / ((s->bsize - 1) * 2);
+ s->nb_threads = FFMIN3(MAX_THREADS, ctx->graph->nb_threads, max_slice_h);
+ av_log(ctx, AV_LOG_DEBUG, "threads: [max=%d hmax=%d user=%d] => %d\n",
+ MAX_THREADS, max_slice_h, ctx->graph->nb_threads, s->nb_threads);
+
+ s->p_linesize = linesize = FFALIGN(s->pr_width, 32);
+ for (i = 0; i < 2; i++) {
+ s->cbuf[i][0] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][0]));
+ s->cbuf[i][1] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][1]));
+ s->cbuf[i][2] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][2]));
+ if (!s->cbuf[i][0] || !s->cbuf[i][1] || !s->cbuf[i][2])
+ return AVERROR(ENOMEM);
+ }
+
+ /* eval expressions are probably not thread safe when the eval internal
+ * state can be changed (typically through load & store operations) */
+ if (s->expr_str) {
+ for (i = 0; i < s->nb_threads; i++) {
+ int ret = av_expr_parse(&s->expr[i], s->expr_str, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* each slice will need to (pre & re)process the top and bottom block of
+ * the previous one in in addition to its processing area. This is because
+ * each pixel is averaged by all the surrounding blocks */
+ slice_h = (int)ceilf(s->pr_height / s->nb_threads) + (s->bsize - 1) * 2;
+ for (i = 0; i < s->nb_threads; i++) {
+ s->slices[i] = av_malloc_array(linesize, slice_h * sizeof(*s->slices[i]));
+ if (!s->slices[i])
+ return AVERROR(ENOMEM);
+ }
+
+ s->weights = av_malloc(s->pr_height * linesize * sizeof(*s->weights));
+ if (!s->weights)
+ return AVERROR(ENOMEM);
+ iweights = av_calloc(s->pr_height, linesize * sizeof(*iweights));
+ if (!iweights)
+ return AVERROR(ENOMEM);
+ for (y = 0; y < s->pr_height - bsize + 1; y += s->step)
+ for (x = 0; x < s->pr_width - bsize + 1; x += s->step)
+ for (by = 0; by < bsize; by++)
+ for (bx = 0; bx < bsize; bx++)
+ iweights[(y + by)*linesize + x + bx]++;
+ for (y = 0; y < s->pr_height; y++)
+ for (x = 0; x < s->pr_width; x++)
+ s->weights[y*linesize + x] = 1. / iweights[y*linesize + x];
+ av_free(iweights);
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DCTdnoizContext *s = ctx->priv;
+
+ s->bsize = 1 << s->n;
+ if (s->overlap == -1)
+ s->overlap = s->bsize - 1;
+
+ if (s->overlap > s->bsize - 1) {
+ av_log(s, AV_LOG_ERROR, "Overlap value can not except %d "
+ "with a block size of %dx%d\n",
+ s->bsize - 1, s->bsize, s->bsize);
+ return AVERROR(EINVAL);
+ }
+
+ if (s->expr_str) {
+ switch (s->n) {
+ case 3: s->filter_freq_func = filter_freq_expr_8; break;
+ case 4: s->filter_freq_func = filter_freq_expr_16; break;
+ default: av_assert0(0);
+ }
+ } else {
+ switch (s->n) {
+ case 3: s->filter_freq_func = filter_freq_sigma_8; break;
+ case 4: s->filter_freq_func = filter_freq_sigma_16; break;
+ default: av_assert0(0);
+ }
+ }
+
+ s->th = s->sigma * 3.;
+ s->step = s->bsize - s->overlap;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+typedef struct ThreadData {
+ float *src, *dst;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx,
+ void *arg, int jobnr, int nb_jobs)
+{
+ int x, y;
+ DCTdnoizContext *s = ctx->priv;
+ const ThreadData *td = arg;
+ const int w = s->pr_width;
+ const int h = s->pr_height;
+ const int slice_start = (h * jobnr ) / nb_jobs;
+ const int slice_end = (h * (jobnr+1)) / nb_jobs;
+ const int slice_start_ctx = FFMAX(slice_start - s->bsize + 1, 0);
+ const int slice_end_ctx = FFMIN(slice_end, h - s->bsize + 1);
+ const int slice_h = slice_end_ctx - slice_start_ctx;
+ const int src_linesize = s->p_linesize;
+ const int dst_linesize = s->p_linesize;
+ const int slice_linesize = s->p_linesize;
+ float *dst;
+ const float *src = td->src + slice_start_ctx * src_linesize;
+ const float *weights = s->weights + slice_start * dst_linesize;
+ float *slice = s->slices[jobnr];
+
+ // reset block sums
+ memset(slice, 0, (slice_h + s->bsize - 1) * dst_linesize * sizeof(*slice));
+
+ // block dct sums
+ for (y = 0; y < slice_h; y += s->step) {
+ for (x = 0; x < w - s->bsize + 1; x += s->step)
+ s->filter_freq_func(s, src + x, src_linesize,
+ slice + x, slice_linesize,
+ jobnr);
+ src += s->step * src_linesize;
+ slice += s->step * slice_linesize;
+ }
+
+ // average blocks
+ slice = s->slices[jobnr] + (slice_start - slice_start_ctx) * slice_linesize;
+ dst = td->dst + slice_start * dst_linesize;
+ for (y = slice_start; y < slice_end; y++) {
+ for (x = 0; x < w; x++)
+ dst[x] = slice[x] * weights[x];
+ slice += slice_linesize;
+ dst += dst_linesize;
+ weights += dst_linesize;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DCTdnoizContext *s = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int direct, plane;
+ AVFrame *out;
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ direct = 0;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ s->color_decorrelation(s->cbuf[0], s->p_linesize,
+ in->data[0], in->linesize[0],
+ s->pr_width, s->pr_height);
+ for (plane = 0; plane < 3; plane++) {
+ ThreadData td = {
+ .src = s->cbuf[0][plane],
+ .dst = s->cbuf[1][plane],
+ };
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, s->nb_threads);
+ }
+ s->color_correlation(out->data[0], out->linesize[0],
+ s->cbuf[1], s->p_linesize,
+ s->pr_width, s->pr_height);
+
+ if (!direct) {
+ int y;
+ uint8_t *dst = out->data[0];
+ const uint8_t *src = in->data[0];
+ const int dst_linesize = out->linesize[0];
+ const int src_linesize = in->linesize[0];
+ const int hpad = (inlink->w - s->pr_width) * 3;
+ const int vpad = (inlink->h - s->pr_height);
+
+ if (hpad) {
+ uint8_t *dstp = dst + s->pr_width * 3;
+ const uint8_t *srcp = src + s->pr_width * 3;
+
+ for (y = 0; y < s->pr_height; y++) {
+ memcpy(dstp, srcp, hpad);
+ dstp += dst_linesize;
+ srcp += src_linesize;
+ }
+ }
+ if (vpad) {
+ uint8_t *dstp = dst + s->pr_height * dst_linesize;
+ const uint8_t *srcp = src + s->pr_height * src_linesize;
+
+ for (y = 0; y < vpad; y++) {
+ memcpy(dstp, srcp, inlink->w * 3);
+ dstp += dst_linesize;
+ srcp += src_linesize;
+ }
+ }
+
+ av_frame_free(&in);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ DCTdnoizContext *s = ctx->priv;
+
+ av_free(s->weights);
+ for (i = 0; i < 2; i++) {
+ av_free(s->cbuf[i][0]);
+ av_free(s->cbuf[i][1]);
+ av_free(s->cbuf[i][2]);
+ }
+ for (i = 0; i < s->nb_threads; i++) {
+ av_free(s->slices[i]);
+ av_expr_free(s->expr[i]);
+ }
+}
+
+static const AVFilterPad dctdnoiz_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad dctdnoiz_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_dctdnoiz = {
+ .name = "dctdnoiz",
+ .description = NULL_IF_CONFIG_SMALL("Denoise frames using 2D DCT."),
+ .priv_size = sizeof(DCTdnoizContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = dctdnoiz_inputs,
+ .outputs = dctdnoiz_outputs,
+ .priv_class = &dctdnoiz_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_decimate.c b/libavfilter/vf_decimate.c
new file mode 100644
index 0000000..ffb9320
--- /dev/null
+++ b/libavfilter/vf_decimate.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2012 Fredrik Mellbin
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define INPUT_MAIN 0
+#define INPUT_CLEANSRC 1
+
+struct qitem {
+ AVFrame *frame;
+ int64_t maxbdiff;
+ int64_t totdiff;
+};
+
+typedef struct {
+ const AVClass *class;
+ struct qitem *queue; ///< window of cycle frames and the associated data diff
+ int fid; ///< current frame id in the queue
+ int filled; ///< 1 if the queue is filled, 0 otherwise
+ AVFrame *last; ///< last frame from the previous queue
+ AVFrame **clean_src; ///< frame queue for the clean source
+ int got_frame[2]; ///< frame request flag for each input stream
+ double ts_unit; ///< timestamp units for the output frames
+ int64_t start_pts; ///< base for output timestamps
+ uint32_t eof; ///< bitmask for end of stream
+ int hsub, vsub; ///< chroma subsampling values
+ int depth;
+ int nxblocks, nyblocks;
+ int bdiffsize;
+ int64_t *bdiffs;
+
+ /* options */
+ int cycle;
+ double dupthresh_flt;
+ double scthresh_flt;
+ int64_t dupthresh;
+ int64_t scthresh;
+ int blockx, blocky;
+ int ppsrc;
+ int chroma;
+} DecimateContext;
+
+#define OFFSET(x) offsetof(DecimateContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption decimate_options[] = {
+ { "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
+ { "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
+ { "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
+ { "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
+ { "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
+ { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(decimate);
+
+static void calc_diffs(const DecimateContext *dm, struct qitem *q,
+ const AVFrame *f1, const AVFrame *f2)
+{
+ int64_t maxdiff = -1;
+ int64_t *bdiffs = dm->bdiffs;
+ int plane, i, j;
+
+ memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
+
+ for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
+ int x, y, xl;
+ const int linesize1 = f1->linesize[plane];
+ const int linesize2 = f2->linesize[plane];
+ const uint8_t *f1p = f1->data[plane];
+ const uint8_t *f2p = f2->data[plane];
+ int width = plane ? FF_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
+ int height = plane ? FF_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
+ int hblockx = dm->blockx / 2;
+ int hblocky = dm->blocky / 2;
+
+ if (plane) {
+ hblockx >>= dm->hsub;
+ hblocky >>= dm->vsub;
+ }
+
+ for (y = 0; y < height; y++) {
+ int ydest = y / hblocky;
+ int xdest = 0;
+
+#define CALC_DIFF(nbits) do { \
+ for (x = 0; x < width; x += hblockx) { \
+ int64_t acc = 0; \
+ int m = FFMIN(width, x + hblockx); \
+ for (xl = x; xl < m; xl++) \
+ acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
+ ((const uint##nbits##_t *)f2p)[xl]); \
+ bdiffs[ydest * dm->nxblocks + xdest] += acc; \
+ xdest++; \
+ } \
+} while (0)
+ if (dm->depth == 8) CALC_DIFF(8);
+ else CALC_DIFF(16);
+
+ f1p += linesize1;
+ f2p += linesize2;
+ }
+ }
+
+ for (i = 0; i < dm->nyblocks - 1; i++) {
+ for (j = 0; j < dm->nxblocks - 1; j++) {
+ int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
+ + bdiffs[ i * dm->nxblocks + j + 1]
+ + bdiffs[(i + 1) * dm->nxblocks + j ]
+ + bdiffs[(i + 1) * dm->nxblocks + j + 1];
+ if (tmp > maxdiff)
+ maxdiff = tmp;
+ }
+ }
+
+ q->totdiff = 0;
+ for (i = 0; i < dm->bdiffsize; i++)
+ q->totdiff += bdiffs[i];
+ q->maxbdiff = maxdiff;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int scpos = -1, duppos = -1;
+ int drop = INT_MIN, i, lowest = 0, ret;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DecimateContext *dm = ctx->priv;
+ AVFrame *prv;
+
+ /* update frames queue(s) */
+ if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
+ dm->queue[dm->fid].frame = in;
+ dm->got_frame[INPUT_MAIN] = 1;
+ } else {
+ dm->clean_src[dm->fid] = in;
+ dm->got_frame[INPUT_CLEANSRC] = 1;
+ }
+ if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
+ return 0;
+ dm->got_frame[INPUT_MAIN] = dm->got_frame[INPUT_CLEANSRC] = 0;
+
+ if (in) {
+ /* update frame metrics */
+ prv = dm->fid ? dm->queue[dm->fid - 1].frame : dm->last;
+ if (!prv)
+ prv = in;
+ calc_diffs(dm, &dm->queue[dm->fid], prv, in);
+ if (++dm->fid != dm->cycle)
+ return 0;
+ av_frame_free(&dm->last);
+ dm->last = av_frame_clone(in);
+ dm->fid = 0;
+
+ /* we have a complete cycle, select the frame to drop */
+ lowest = 0;
+ for (i = 0; i < dm->cycle; i++) {
+ if (dm->queue[i].totdiff > dm->scthresh)
+ scpos = i;
+ if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
+ lowest = i;
+ }
+ if (dm->queue[lowest].maxbdiff < dm->dupthresh)
+ duppos = lowest;
+ drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
+ }
+
+ /* metrics debug */
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
+ for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
+ av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
+ i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
+ i == scpos ? " sc" : "",
+ i == duppos ? " dup" : "",
+ i == lowest ? " lowest" : "",
+ i == drop ? " [DROP]" : "");
+ }
+ }
+
+ /* push all frames except the drop */
+ ret = 0;
+ for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
+ if (i == drop) {
+ if (dm->ppsrc)
+ av_frame_free(&dm->clean_src[i]);
+ av_frame_free(&dm->queue[i].frame);
+ } else {
+ AVFrame *frame = dm->queue[i].frame;
+ if (frame->pts != AV_NOPTS_VALUE && dm->start_pts == AV_NOPTS_VALUE)
+ dm->start_pts = frame->pts;
+ if (dm->ppsrc) {
+ av_frame_free(&frame);
+ frame = dm->clean_src[i];
+ }
+ frame->pts = outlink->frame_count * dm->ts_unit +
+ (dm->start_pts == AV_NOPTS_VALUE ? 0 : dm->start_pts);
+ ret = ff_filter_frame(outlink, frame);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int max_value;
+ AVFilterContext *ctx = inlink->dst;
+ DecimateContext *dm = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int w = inlink->w;
+ const int h = inlink->h;
+
+ dm->hsub = pix_desc->log2_chroma_w;
+ dm->vsub = pix_desc->log2_chroma_h;
+ dm->depth = pix_desc->comp[0].depth_minus1 + 1;
+ max_value = (1 << dm->depth) - 1;
+ dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
+ dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
+ dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
+ dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
+ dm->bdiffsize = dm->nxblocks * dm->nyblocks;
+ dm->bdiffs = av_malloc_array(dm->bdiffsize, sizeof(*dm->bdiffs));
+ dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
+
+ if (!dm->bdiffs || !dm->queue)
+ return AVERROR(ENOMEM);
+
+ if (dm->ppsrc) {
+ dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
+ if (!dm->clean_src)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static av_cold int decimate_init(AVFilterContext *ctx)
+{
+ DecimateContext *dm = ctx->priv;
+ AVFilterPad pad = {
+ .name = av_strdup("main"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ };
+
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_MAIN, &pad);
+
+ if (dm->ppsrc) {
+ pad.name = av_strdup("clean_src");
+ pad.config_props = NULL;
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
+ }
+
+ if ((dm->blockx & (dm->blockx - 1)) ||
+ (dm->blocky & (dm->blocky - 1))) {
+ av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
+ return AVERROR(EINVAL);
+ }
+
+ dm->start_pts = AV_NOPTS_VALUE;
+
+ return 0;
+}
+
+static av_cold void decimate_uninit(AVFilterContext *ctx)
+{
+ int i;
+ DecimateContext *dm = ctx->priv;
+
+ av_frame_free(&dm->last);
+ av_freep(&dm->bdiffs);
+ av_freep(&dm->queue);
+ av_freep(&dm->clean_src);
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static int request_inlink(AVFilterContext *ctx, int lid)
+{
+ int ret = 0;
+ DecimateContext *dm = ctx->priv;
+
+ if (!dm->got_frame[lid]) {
+ AVFilterLink *inlink = ctx->inputs[lid];
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) { // flushing
+ dm->eof |= 1 << lid;
+ ret = filter_frame(inlink, NULL);
+ }
+ }
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ DecimateContext *dm = ctx->priv;
+ const uint32_t eof_mask = 1<<INPUT_MAIN | dm->ppsrc<<INPUT_CLEANSRC;
+
+ if ((dm->eof & eof_mask) == eof_mask) // flush done?
+ return AVERROR_EOF;
+ if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
+ return ret;
+ if (dm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
+ return ret;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
+#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
+#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
+ PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DecimateContext *dm = ctx->priv;
+ const AVFilterLink *inlink =
+ ctx->inputs[dm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
+ AVRational fps = inlink->frame_rate;
+
+ if (!fps.num || !fps.den) {
+ av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
+ "current rate of %d/%d is invalid\n", fps.num, fps.den);
+ return AVERROR(EINVAL);
+ }
+ fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
+ av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
+ inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ outlink->time_base = inlink->time_base;
+ outlink->frame_rate = fps;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ dm->ts_unit = av_q2d(av_inv_q(av_mul_q(fps, outlink->time_base)));
+ return 0;
+}
+
+static const AVFilterPad decimate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_decimate = {
+ .name = "decimate",
+ .description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
+ .init = decimate_init,
+ .uninit = decimate_uninit,
+ .priv_size = sizeof(DecimateContext),
+ .query_formats = query_formats,
+ .outputs = decimate_outputs,
+ .priv_class = &decimate_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_dejudder.c b/libavfilter/vf_dejudder.c
new file mode 100644
index 0000000..ab525b6
--- /dev/null
+++ b/libavfilter/vf_dejudder.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2014 Nicholas Robbins
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * remove judder in video stream
+ *
+ * Algorithm:
+ * - If the old packets had PTS of old_pts[i]. Replace these with new
+ * value based on the running average of the last n=cycle frames. So
+ *
+ * new_pts[i] = Sum(k=i-n+1, i, old_pts[k])/n
+ * + (old_pts[i]-old_pts[i-n])*(n-1)/2n
+ *
+ * For any repeating pattern of length n of judder this will produce
+ * an even progression of PTS's.
+ *
+ * - In order to avoid calculating this sum ever frame, a running tally
+ * is maintained in ctx->new_pts. Each frame the new term at the start
+ * of the sum is added, the one and the end is removed, and the offset
+ * terms (second line in formula above) are recalculated.
+ *
+ * - To aid in this a ringbuffer of the last n-2 PTS's is maintained in
+ * ctx->ringbuff. With the indices of the first two and last two entries
+ * stored in i1, i2, i3, & i4.
+ *
+ * - To ensure that the new PTS's are integers, time_base is divided
+ * by 2n. This removes the division in the new_pts calculation.
+ *
+ * - frame_rate is also multiplied by 2n to allow the frames to fall
+ * where they may in what may now be a VFR output. This produces more
+ * even output then setting frame_rate=1/0 in practice.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/mathematics.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int64_t *ringbuff;
+ int i1, i2, i3, i4;
+ int64_t new_pts;
+ int start_count;
+
+ /* options */
+ int cycle;
+} DejudderContext;
+
+#define OFFSET(x) offsetof(DejudderContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption dejudder_options[] = {
+ {"cycle", "set the length of the cycle to use for dejuddering",
+ OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 4}, 2, 240, .flags = FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(dejudder);
+
+static int config_out_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ DejudderContext *dj = ctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->time_base = av_mul_q(inlink->time_base, av_make_q(1, 2 * dj->cycle));
+ outlink->frame_rate = av_mul_q(inlink->frame_rate, av_make_q(2 * dj->cycle, 1));
+
+ av_log(ctx, AV_LOG_VERBOSE, "cycle:%d\n", dj->cycle);
+
+ return 0;
+}
+
+static av_cold int dejudder_init(AVFilterContext *ctx)
+{
+ DejudderContext *dj = ctx->priv;
+
+ dj->ringbuff = av_mallocz_array(dj->cycle+2, sizeof(*dj->ringbuff));
+ if (!dj->ringbuff)
+ return AVERROR(ENOMEM);
+
+ dj->new_pts = 0;
+ dj->i1 = 0;
+ dj->i2 = 1;
+ dj->i3 = 2;
+ dj->i4 = 3;
+ dj->start_count = dj->cycle + 2;
+
+ return 0;
+}
+
+static av_cold void dejudder_uninit(AVFilterContext *ctx)
+{
+ DejudderContext *dj = ctx->priv;
+
+ av_freep(&(dj->ringbuff));
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ int k;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ DejudderContext *dj = ctx->priv;
+ int64_t *judbuff = dj->ringbuff;
+ int64_t next_pts = frame->pts;
+ int64_t offset;
+
+ if (next_pts == AV_NOPTS_VALUE)
+ return ff_filter_frame(outlink, frame);
+
+ if (dj->start_count) {
+ dj->start_count--;
+ dj->new_pts = next_pts * 2 * dj->cycle;
+ } else {
+ if (next_pts < judbuff[dj->i2]) {
+ offset = next_pts + judbuff[dj->i3] - judbuff[dj->i4] - judbuff[dj->i1];
+ for (k = 0; k < dj->cycle + 2; k++)
+ judbuff[k] += offset;
+ }
+ dj->new_pts += (dj->cycle - 1) * (judbuff[dj->i3] - judbuff[dj->i1])
+ + (dj->cycle + 1) * (next_pts - judbuff[dj->i4]);
+ }
+
+ judbuff[dj->i2] = next_pts;
+ dj->i1 = dj->i2;
+ dj->i2 = dj->i3;
+ dj->i3 = dj->i4;
+ dj->i4 = (dj->i4 + 1) % (dj->cycle + 2);
+
+ frame->pts = dj->new_pts;
+
+ for (k = 0; k < dj->cycle + 2; k++)
+ av_log(ctx, AV_LOG_DEBUG, "%"PRId64"\t", judbuff[k]);
+ av_log(ctx, AV_LOG_DEBUG, "next=%"PRId64", new=%"PRId64"\n", next_pts, frame->pts);
+
+ return ff_filter_frame(outlink, frame);
+}
+
+static const AVFilterPad dejudder_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad dejudder_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_dejudder = {
+ .name = "dejudder",
+ .description = NULL_IF_CONFIG_SMALL("Remove judder produced by pullup."),
+ .priv_size = sizeof(DejudderContext),
+ .priv_class = &dejudder_class,
+ .inputs = dejudder_inputs,
+ .outputs = dejudder_outputs,
+ .init = dejudder_init,
+ .uninit = dejudder_uninit,
+};
diff --git a/libavfilter/vf_delogo.c b/libavfilter/vf_delogo.c
index dc58078..6ccdfb2 100644
--- a/libavfilter/vf_delogo.c
+++ b/libavfilter/vf_delogo.c
@@ -1,28 +1,30 @@
/*
* Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com>
* Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013 Jean Delvare <khali@linux-fr.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/**
* @file
* A very simple tv station logo remover
- * Ported from MPlayer libmpcodecs/vf_delogo.c.
+ * Originally imported from MPlayer libmpcodecs/vf_delogo.c,
+ * the algorithm was later improved.
*/
#include "libavutil/common.h"
@@ -35,8 +37,8 @@
#include "video.h"
/**
- * Apply a simple delogo algorithm to the image in dst and put the
- * result in src.
+ * Apply a simple delogo algorithm to the image in src and put the
+ * result in dst.
*
* The algorithm is only applied to the region specified by the logo
* parameters.
@@ -54,15 +56,16 @@
*/
static void apply_delogo(uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
- int w, int h,
+ int w, int h, AVRational sar,
int logo_x, int logo_y, int logo_w, int logo_h,
- int band, int show, int direct)
+ unsigned int band, int show, int direct)
{
int x, y;
- int interp, dist;
+ uint64_t interp, weightl, weightr, weightt, weightb;
uint8_t *xdst, *xsrc;
uint8_t *topleft, *botleft, *topright;
+ unsigned int left_sample, right_sample;
int xclipl, xclipr, yclipt, yclipb;
int logo_x1, logo_x2, logo_y1, logo_y2;
@@ -87,28 +90,43 @@ static void apply_delogo(uint8_t *dst, int dst_linesize,
src += (logo_y1 + 1) * src_linesize;
for (y = logo_y1+1; y < logo_y2-1; y++) {
+ left_sample = topleft[src_linesize*(y-logo_y1)] +
+ topleft[src_linesize*(y-logo_y1-1)] +
+ topleft[src_linesize*(y-logo_y1+1)];
+ right_sample = topright[src_linesize*(y-logo_y1)] +
+ topright[src_linesize*(y-logo_y1-1)] +
+ topright[src_linesize*(y-logo_y1+1)];
+
for (x = logo_x1+1,
xdst = dst+logo_x1+1,
xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {
- interp = (topleft[src_linesize*(y-logo_y -yclipt)] +
- topleft[src_linesize*(y-logo_y-1-yclipt)] +
- topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w
- + (topright[src_linesize*(y-logo_y-yclipt)] +
- topright[src_linesize*(y-logo_y-1-yclipt)] +
- topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w
- + (topleft[x-logo_x-xclipl] +
- topleft[x-logo_x-1-xclipl] +
- topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h
- + (botleft[x-logo_x-xclipl] +
- botleft[x-logo_x-1-xclipl] +
- botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h;
- interp /= 6;
+
+ /* Weighted interpolation based on relative distances, taking SAR into account */
+ weightl = (uint64_t) (logo_x2-1-x) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
+ weightr = (uint64_t)(x-logo_x1) * (y-logo_y1) * (logo_y2-1-y) * sar.den;
+ weightt = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (logo_y2-1-y) * sar.num;
+ weightb = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (y-logo_y1) * sar.num;
+
+ interp =
+ left_sample * weightl
+ +
+ right_sample * weightr
+ +
+ (topleft[x-logo_x1] +
+ topleft[x-logo_x1-1] +
+ topleft[x-logo_x1+1]) * weightt
+ +
+ (botleft[x-logo_x1] +
+ botleft[x-logo_x1-1] +
+ botleft[x-logo_x1+1]) * weightb;
+ interp /= (weightl + weightr + weightt + weightb) * 3U;
if (y >= logo_y+band && y < logo_y+logo_h-band &&
x >= logo_x+band && x < logo_x+logo_w-band) {
*xdst = interp;
} else {
- dist = 0;
+ unsigned dist = 0;
+
if (x < logo_x+band)
dist = FFMAX(dist, logo_x-x+band);
else if (x >= logo_x+logo_w-band)
@@ -136,33 +154,24 @@ typedef struct DelogoContext {
} DelogoContext;
#define OFFSET(x) offsetof(DelogoContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption delogo_options[]= {
{ "x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
{ "h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
- { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, -1, INT_MAX, FLAGS },
- { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, -1, INT_MAX, FLAGS },
+ { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
+ { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
{ "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
- { NULL },
+ { NULL }
};
-static const char *delogo_get_name(void *ctx)
-{
- return "delogo";
-}
-
-static const AVClass delogo_class = {
- .class_name = "DelogoContext",
- .item_name = delogo_get_name,
- .option = delogo_options,
-};
+AVFILTER_DEFINE_CLASS(delogo);
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8,
@@ -187,10 +196,7 @@ static av_cold int init(AVFilterContext *ctx)
CHECK_UNSET_OPT(w);
CHECK_UNSET_OPT(h);
- if (s->show)
- s->band = 4;
-
- av_log(ctx, AV_LOG_DEBUG, "x:%d y:%d, w:%d h:%d band:%d show:%d\n",
+ av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n",
s->x, s->y, s->w, s->h, s->band, s->show);
s->w += s->band*2;
@@ -211,6 +217,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
+ AVRational sar;
if (av_frame_is_writable(in)) {
direct = 1;
@@ -223,19 +230,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- out->width = outlink->w;
- out->height = outlink->h;
}
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
+ sar = in->sample_aspect_ratio;
+ /* Assume square pixels if SAR is unknown */
+ if (!sar.num)
+ sar.num = sar.den = 1;
+
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
apply_delogo(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
- inlink->w>>hsub, inlink->h>>vsub,
- s->x>>hsub, s->y>>vsub,
- s->w>>hsub, s->h>>vsub,
+ FF_CEIL_RSHIFT(inlink->w, hsub),
+ FF_CEIL_RSHIFT(inlink->h, vsub),
+ sar, s->x>>hsub, s->y>>vsub,
+ /* Up and left borders were rounded down, inject lost bits
+ * into width and height to avoid error accumulation */
+ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
+ FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
s->band>>FFMIN(hsub, vsub),
s->show, direct);
}
@@ -248,10 +262,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVFilterPad avfilter_vf_delogo_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -271,7 +284,7 @@ AVFilter ff_vf_delogo = {
.priv_class = &delogo_class,
.init = init,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_delogo_inputs,
- .outputs = avfilter_vf_delogo_outputs,
+ .inputs = avfilter_vf_delogo_inputs,
+ .outputs = avfilter_vf_delogo_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_deshake.c b/libavfilter/vf_deshake.c
new file mode 100644
index 0000000..b5d5457
--- /dev/null
+++ b/libavfilter/vf_deshake.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2010 Georg Martius <georg.martius@web.de>
+ * Copyright (C) 2010 Daniel G. Taylor <dan@programmer-art.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * fast deshake / depan video filter
+ *
+ * SAD block-matching motion compensation to fix small changes in
+ * horizontal and/or vertical shift. This filter helps remove camera shake
+ * from hand-holding a camera, bumping a tripod, moving on a vehicle, etc.
+ *
+ * Algorithm:
+ * - For each frame with one previous reference frame
+ * - For each block in the frame
+ * - If contrast > threshold then find likely motion vector
+ * - For all found motion vectors
+ * - Find most common, store as global motion vector
+ * - Find most likely rotation angle
+ * - Transform image along global motion
+ *
+ * TODO:
+ * - Fill frame edges based on previous/next reference frames
+ * - Fill frame edges by stretching image near the edges?
+ * - Can this be done quickly and look decent?
+ *
+ * Dark Shikari links to http://wiki.videolan.org/SoC_x264_2010#GPU_Motion_Estimation_2
+ * for an algorithm similar to what could be used here to get the gmv
+ * It requires only a couple diamond searches + fast downscaling
+ *
+ * Special thanks to Jason Kotenko for his help with the algorithm and my
+ * inability to see simple errors in C code.
+ */
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libavutil/common.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "deshake.h"
+#include "deshake_opencl.h"
+
+#define CHROMA_WIDTH(link) (-((-(link)->w) >> av_pix_fmt_desc_get((link)->format)->log2_chroma_w))
+#define CHROMA_HEIGHT(link) (-((-(link)->h) >> av_pix_fmt_desc_get((link)->format)->log2_chroma_h))
+
+#define OFFSET(x) offsetof(DeshakeContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption deshake_options[] = {
+ { "x", "set x for the rectangular search area", OFFSET(cx), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "y", "set y for the rectangular search area", OFFSET(cy), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "w", "set width for the rectangular search area", OFFSET(cw), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "h", "set height for the rectangular search area", OFFSET(ch), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
+ { "rx", "set x for the rectangular search area", OFFSET(rx), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
+ { "ry", "set y for the rectangular search area", OFFSET(ry), AV_OPT_TYPE_INT, {.i64=16}, 0, MAX_R, .flags = FLAGS },
+ { "edge", "set edge mode", OFFSET(edge), AV_OPT_TYPE_INT, {.i64=FILL_MIRROR}, FILL_BLANK, FILL_COUNT-1, FLAGS, "edge"},
+ { "blank", "fill zeroes at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_BLANK}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "original", "original image at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_ORIGINAL}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "clamp", "extruded edge value at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_CLAMP}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "mirror", "mirrored edge at blank locations", 0, AV_OPT_TYPE_CONST, {.i64=FILL_MIRROR}, INT_MIN, INT_MAX, FLAGS, "edge" },
+ { "blocksize", "set motion search blocksize", OFFSET(blocksize), AV_OPT_TYPE_INT, {.i64=8}, 4, 128, .flags = FLAGS },
+ { "contrast", "set contrast threshold for blocks", OFFSET(contrast), AV_OPT_TYPE_INT, {.i64=125}, 1, 255, .flags = FLAGS },
+ { "search", "set search strategy", OFFSET(search), AV_OPT_TYPE_INT, {.i64=EXHAUSTIVE}, EXHAUSTIVE, SEARCH_COUNT-1, FLAGS, "smode" },
+ { "exhaustive", "exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
+ { "less", "less exhaustive search", 0, AV_OPT_TYPE_CONST, {.i64=SMART_EXHAUSTIVE}, INT_MIN, INT_MAX, FLAGS, "smode" },
+ { "filename", "set motion search detailed log file name", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(deshake);
+
+static int cmp(const double *a, const double *b)
+{
+ return *a < *b ? -1 : ( *a > *b ? 1 : 0 );
+}
+
+/**
+ * Cleaned mean (cuts off 20% of values to remove outliers and then averages)
+ */
+static double clean_mean(double *values, int count)
+{
+ double mean = 0;
+ int cut = count / 5;
+ int x;
+
+ qsort(values, count, sizeof(double), (void*)cmp);
+
+ for (x = cut; x < count - cut; x++) {
+ mean += values[x];
+ }
+
+ return mean / (count - cut * 2);
+}
+
+/**
+ * Find the most likely shift in motion between two frames for a given
+ * macroblock. Test each block against several shifts given by the rx
+ * and ry attributes. Searches using a simple matrix of those shifts and
+ * chooses the most likely shift by the smallest difference in blocks.
+ */
+static void find_block_motion(DeshakeContext *deshake, uint8_t *src1,
+ uint8_t *src2, int cx, int cy, int stride,
+ IntMotionVector *mv)
+{
+ int x, y;
+ int diff;
+ int smallest = INT_MAX;
+ int tmp, tmp2;
+
+ #define CMP(i, j) deshake->sad(src1 + cy * stride + cx, stride,\
+ src2 + (j) * stride + (i), stride)
+
+ if (deshake->search == EXHAUSTIVE) {
+ // Compare every possible position - this is sloooow!
+ for (y = -deshake->ry; y <= deshake->ry; y++) {
+ for (x = -deshake->rx; x <= deshake->rx; x++) {
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+ } else if (deshake->search == SMART_EXHAUSTIVE) {
+ // Compare every other possible position and find the best match
+ for (y = -deshake->ry + 1; y < deshake->ry; y += 2) {
+ for (x = -deshake->rx + 1; x < deshake->rx; x += 2) {
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+
+ // Hone in on the specific best match around the match we found above
+ tmp = mv->x;
+ tmp2 = mv->y;
+
+ for (y = tmp2 - 1; y <= tmp2 + 1; y++) {
+ for (x = tmp - 1; x <= tmp + 1; x++) {
+ if (x == tmp && y == tmp2)
+ continue;
+
+ diff = CMP(cx - x, cy - y);
+ if (diff < smallest) {
+ smallest = diff;
+ mv->x = x;
+ mv->y = y;
+ }
+ }
+ }
+ }
+
+ if (smallest > 512) {
+ mv->x = -1;
+ mv->y = -1;
+ }
+ emms_c();
+ //av_log(NULL, AV_LOG_ERROR, "%d\n", smallest);
+ //av_log(NULL, AV_LOG_ERROR, "Final: (%d, %d) = %d x %d\n", cx, cy, mv->x, mv->y);
+}
+
+/**
+ * Find the contrast of a given block. When searching for global motion we
+ * really only care about the high contrast blocks, so using this method we
+ * can actually skip blocks we don't care much about.
+ */
+static int block_contrast(uint8_t *src, int x, int y, int stride, int blocksize)
+{
+ int highest = 0;
+ int lowest = 255;
+ int i, j, pos;
+
+ for (i = 0; i <= blocksize * 2; i++) {
+ // We use a width of 16 here to match the sad function
+ for (j = 0; j <= 15; j++) {
+ pos = (y - i) * stride + (x - j);
+ if (src[pos] < lowest)
+ lowest = src[pos];
+ else if (src[pos] > highest) {
+ highest = src[pos];
+ }
+ }
+ }
+
+ return highest - lowest;
+}
+
+/**
+ * Find the rotation for a given block.
+ */
+static double block_angle(int x, int y, int cx, int cy, IntMotionVector *shift)
+{
+ double a1, a2, diff;
+
+ a1 = atan2(y - cy, x - cx);
+ a2 = atan2(y - cy + shift->y, x - cx + shift->x);
+
+ diff = a2 - a1;
+
+ return (diff > M_PI) ? diff - 2 * M_PI :
+ (diff < -M_PI) ? diff + 2 * M_PI :
+ diff;
+}
+
+/**
+ * Find the estimated global motion for a scene given the most likely shift
+ * for each block in the frame. The global motion is estimated to be the
+ * same as the motion from most blocks in the frame, so if most blocks
+ * move one pixel to the right and two pixels down, this would yield a
+ * motion vector (1, -2).
+ */
+static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
+ int width, int height, int stride, Transform *t)
+{
+ int x, y;
+ IntMotionVector mv = {0, 0};
+ int count_max_value = 0;
+ int contrast;
+
+ int pos;
+ int center_x = 0, center_y = 0;
+ double p_x, p_y;
+
+ av_fast_malloc(&deshake->angles, &deshake->angles_size, width * height / (16 * deshake->blocksize) * sizeof(*deshake->angles));
+
+ // Reset counts to zero
+ for (x = 0; x < deshake->rx * 2 + 1; x++) {
+ for (y = 0; y < deshake->ry * 2 + 1; y++) {
+ deshake->counts[x][y] = 0;
+ }
+ }
+
+ pos = 0;
+ // Find motion for every block and store the motion vector in the counts
+ for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
+ // We use a width of 16 here to match the sad function
+ for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
+ // If the contrast is too low, just skip this block as it probably
+ // won't be very useful to us.
+ contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
+ if (contrast > deshake->contrast) {
+ //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
+ find_block_motion(deshake, src1, src2, x, y, stride, &mv);
+ if (mv.x != -1 && mv.y != -1) {
+ deshake->counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
+ if (x > deshake->rx && y > deshake->ry)
+ deshake->angles[pos++] = block_angle(x, y, 0, 0, &mv);
+
+ center_x += mv.x;
+ center_y += mv.y;
+ }
+ }
+ }
+ }
+
+ if (pos) {
+ center_x /= pos;
+ center_y /= pos;
+ t->angle = clean_mean(deshake->angles, pos);
+ if (t->angle < 0.001)
+ t->angle = 0;
+ } else {
+ t->angle = 0;
+ }
+
+ // Find the most common motion vector in the frame and use it as the gmv
+ for (y = deshake->ry * 2; y >= 0; y--) {
+ for (x = 0; x < deshake->rx * 2 + 1; x++) {
+ //av_log(NULL, AV_LOG_ERROR, "%5d ", deshake->counts[x][y]);
+ if (deshake->counts[x][y] > count_max_value) {
+ t->vec.x = x - deshake->rx;
+ t->vec.y = y - deshake->ry;
+ count_max_value = deshake->counts[x][y];
+ }
+ }
+ //av_log(NULL, AV_LOG_ERROR, "\n");
+ }
+
+ p_x = (center_x - width / 2.0);
+ p_y = (center_y - height / 2.0);
+ t->vec.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
+ t->vec.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
+
+ // Clamp max shift & rotation?
+ t->vec.x = av_clipf(t->vec.x, -deshake->rx * 2, deshake->rx * 2);
+ t->vec.y = av_clipf(t->vec.y, -deshake->ry * 2, deshake->ry * 2);
+ t->angle = av_clipf(t->angle, -0.1, 0.1);
+
+ //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
+}
+
+static int deshake_transform_c(AVFilterContext *ctx,
+ int width, int height, int cw, int ch,
+ const float *matrix_y, const float *matrix_uv,
+ enum InterpolateMethod interpolate,
+ enum FillMethod fill, AVFrame *in, AVFrame *out)
+{
+ int i = 0, ret = 0;
+ const float *matrixs[3];
+ int plane_w[3], plane_h[3];
+ matrixs[0] = matrix_y;
+ matrixs[1] = matrixs[2] = matrix_uv;
+ plane_w[0] = width;
+ plane_w[1] = plane_w[2] = cw;
+ plane_h[0] = height;
+ plane_h[1] = plane_h[2] = ch;
+
+ for (i = 0; i < 3; i++) {
+ // Transform the luma and chroma planes
+ ret = avfilter_transform(in->data[i], out->data[i], in->linesize[i], out->linesize[i],
+ plane_w[i], plane_h[i], matrixs[i], interpolate, fill);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ int ret;
+ DeshakeContext *deshake = ctx->priv;
+
+ deshake->sad = av_pixelutils_get_sad_fn(4, 4, 1, deshake); // 16x16, 2nd source unaligned
+ if (!deshake->sad)
+ return AVERROR(EINVAL);
+
+ deshake->refcount = 20; // XXX: add to options?
+ deshake->blocksize /= 2;
+ deshake->blocksize = av_clip(deshake->blocksize, 4, 128);
+
+ if (deshake->rx % 16) {
+ av_log(ctx, AV_LOG_ERROR, "rx must be a multiple of 16\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ if (deshake->filename)
+ deshake->fp = fopen(deshake->filename, "w");
+ if (deshake->fp)
+ fwrite("Ori x, Avg x, Fin x, Ori y, Avg y, Fin y, Ori angle, Avg angle, Fin angle, Ori zoom, Avg zoom, Fin zoom\n", sizeof(char), 104, deshake->fp);
+
+ // Quadword align left edge of box for MMX code, adjust width if necessary
+ // to keep right margin
+ if (deshake->cx > 0) {
+ deshake->cw += deshake->cx - (deshake->cx & ~15);
+ deshake->cx &= ~15;
+ }
+ deshake->transform = deshake_transform_c;
+ if (!CONFIG_OPENCL && deshake->opencl) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (CONFIG_OPENCL && deshake->opencl) {
+ deshake->transform = ff_opencl_transform;
+ ret = ff_opencl_deshake_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "cx: %d, cy: %d, cw: %d, ch: %d, rx: %d, ry: %d, edge: %d blocksize: %d contrast: %d search: %d\n",
+ deshake->cx, deshake->cy, deshake->cw, deshake->ch,
+ deshake->rx, deshake->ry, deshake->edge, deshake->blocksize * 2, deshake->contrast, deshake->search);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *link)
+{
+ DeshakeContext *deshake = link->dst->priv;
+
+ deshake->ref = NULL;
+ deshake->last.vec.x = 0;
+ deshake->last.vec.y = 0;
+ deshake->last.angle = 0;
+ deshake->last.zoom = 0;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DeshakeContext *deshake = ctx->priv;
+ if (CONFIG_OPENCL && deshake->opencl) {
+ ff_opencl_deshake_uninit(ctx);
+ }
+ av_frame_free(&deshake->ref);
+ av_freep(&deshake->angles);
+ deshake->angles_size = 0;
+ if (deshake->fp)
+ fclose(deshake->fp);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ DeshakeContext *deshake = link->dst->priv;
+ AVFilterLink *outlink = link->dst->outputs[0];
+ AVFrame *out;
+ Transform t = {{0},0}, orig = {{0},0};
+ float matrix_y[9], matrix_uv[9];
+ float alpha = 2.0 / deshake->refcount;
+ char tmp[256];
+ int ret = 0;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (CONFIG_OPENCL && deshake->opencl) {
+ ret = ff_opencl_deshake_process_inout_buf(link->dst,in, out);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
+ // Find the most likely global motion for the current frame
+ find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
+ } else {
+ uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
+ uint8_t *src2 = in->data[0];
+
+ deshake->cx = FFMIN(deshake->cx, link->w);
+ deshake->cy = FFMIN(deshake->cy, link->h);
+
+ if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
+ if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;
+
+ // Quadword align right margin
+ deshake->cw &= ~15;
+
+ src1 += deshake->cy * in->linesize[0] + deshake->cx;
+ src2 += deshake->cy * in->linesize[0] + deshake->cx;
+
+ find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
+ }
+
+
+ // Copy transform so we can output it later to compare to the smoothed value
+ orig.vec.x = t.vec.x;
+ orig.vec.y = t.vec.y;
+ orig.angle = t.angle;
+ orig.zoom = t.zoom;
+
+ // Generate a one-sided moving exponential average
+ deshake->avg.vec.x = alpha * t.vec.x + (1.0 - alpha) * deshake->avg.vec.x;
+ deshake->avg.vec.y = alpha * t.vec.y + (1.0 - alpha) * deshake->avg.vec.y;
+ deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
+ deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;
+
+ // Remove the average from the current motion to detect the motion that
+ // is not on purpose, just as jitter from bumping the camera
+ t.vec.x -= deshake->avg.vec.x;
+ t.vec.y -= deshake->avg.vec.y;
+ t.angle -= deshake->avg.angle;
+ t.zoom -= deshake->avg.zoom;
+
+ // Invert the motion to undo it
+ t.vec.x *= -1;
+ t.vec.y *= -1;
+ t.angle *= -1;
+
+ // Write statistics to file
+ if (deshake->fp) {
+ snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vec.x, deshake->avg.vec.x, t.vec.x, orig.vec.y, deshake->avg.vec.y, t.vec.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
+ fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);
+ }
+
+ // Turn relative current frame motion into absolute by adding it to the
+ // last absolute motion
+ t.vec.x += deshake->last.vec.x;
+ t.vec.y += deshake->last.vec.y;
+ t.angle += deshake->last.angle;
+ t.zoom += deshake->last.zoom;
+
+ // Shrink motion by 10% to keep things centered in the camera frame
+ t.vec.x *= 0.9;
+ t.vec.y *= 0.9;
+ t.angle *= 0.9;
+
+ // Store the last absolute motion information
+ deshake->last.vec.x = t.vec.x;
+ deshake->last.vec.y = t.vec.y;
+ deshake->last.angle = t.angle;
+ deshake->last.zoom = t.zoom;
+
+ // Generate a luma transformation matrix
+ avfilter_get_matrix(t.vec.x, t.vec.y, t.angle, 1.0 + t.zoom / 100.0, matrix_y);
+ // Generate a chroma transformation matrix
+ avfilter_get_matrix(t.vec.x / (link->w / CHROMA_WIDTH(link)), t.vec.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix_uv);
+ // Transform the luma and chroma planes
+ ret = deshake->transform(link->dst, link->w, link->h, CHROMA_WIDTH(link), CHROMA_HEIGHT(link),
+ matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);
+
+ // Cleanup the old reference frame
+ av_frame_free(&deshake->ref);
+
+ if (ret < 0)
+ return ret;
+
+ // Store the current frame as the reference frame for calculating the
+ // motion of the next frame
+ deshake->ref = in;
+
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad deshake_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad deshake_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_deshake = {
+ .name = "deshake",
+ .description = NULL_IF_CONFIG_SMALL("Stabilize shaky video."),
+ .priv_size = sizeof(DeshakeContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = deshake_inputs,
+ .outputs = deshake_outputs,
+ .priv_class = &deshake_class,
+};
diff --git a/libavfilter/vf_drawbox.c b/libavfilter/vf_drawbox.c
index ab14af2..115df88 100644
--- a/libavfilter/vf_drawbox.c
+++ b/libavfilter/vf_drawbox.c
@@ -1,32 +1,34 @@
/*
* Copyright (c) 2008 Affine Systems, Inc (Michael Sullivan, Bobby Impollonia)
+ * Copyright (c) 2013 Andrey Utkin <andrey.krieger.utkin gmail com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
- * Box drawing filter. Also a nice template for a filter that needs to
- * write in the input frame.
+ * Box and grid drawing filters. Also a nice template for a filter
+ * that needs to write in the input frame.
*/
#include "libavutil/colorspace.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
+#include "libavutil/eval.h"
#include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
@@ -34,35 +36,74 @@
#include "internal.h"
#include "video.h"
+static const char *const var_names[] = {
+ "dar",
+ "hsub", "vsub",
+ "in_h", "ih", ///< height of the input video
+ "in_w", "iw", ///< width of the input video
+ "sar",
+ "x",
+ "y",
+ "h", ///< height of the rendered box
+ "w", ///< width of the rendered box
+ "t",
+ NULL
+};
+
enum { Y, U, V, A };
+enum var_name {
+ VAR_DAR,
+ VAR_HSUB, VAR_VSUB,
+ VAR_IN_H, VAR_IH,
+ VAR_IN_W, VAR_IW,
+ VAR_SAR,
+ VAR_X,
+ VAR_Y,
+ VAR_H,
+ VAR_W,
+ VAR_T,
+ VARS_NB
+};
+
typedef struct DrawBoxContext {
const AVClass *class;
- int x, y, w_opt, h_opt, w, h;
+ int x, y, w, h;
+ int thickness;
char *color_str;
unsigned char yuv_color[4];
+ int invert_color; ///< invert luma color
int vsub, hsub; ///< chroma subsampling
+ char *x_expr, *y_expr; ///< expression for x and y
+ char *w_expr, *h_expr; ///< expression for width and height
+ char *t_expr; ///< expression for thickness
} DrawBoxContext;
+static const int NUM_EXPR_EVALS = 5;
+
static av_cold int init(AVFilterContext *ctx)
{
DrawBoxContext *s = ctx->priv;
uint8_t rgba_color[4];
- if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
+ if (!strcmp(s->color_str, "invert"))
+ s->invert_color = 1;
+ else if (av_parse_color(rgba_color, s->color_str, -1, ctx) < 0)
return AVERROR(EINVAL);
- s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
- s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
- s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
- s->yuv_color[A] = rgba_color[3];
+ if (!s->invert_color) {
+ s->yuv_color[Y] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
+ s->yuv_color[U] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
+ s->yuv_color[V] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
+ s->yuv_color[A] = rgba_color[3];
+ }
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
@@ -76,20 +117,83 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
{
- DrawBoxContext *s = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ DrawBoxContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ double var_values[VARS_NB], res;
+ char *expr;
+ int ret;
+ int i;
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
- s->w = (s->w_opt > 0) ? s->w_opt : inlink->w;
- s->h = (s->h_opt > 0) ? s->h_opt : inlink->h;
+ var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
+ var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ var_values[VAR_DAR] = (double)inlink->w / inlink->h * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = s->hsub;
+ var_values[VAR_VSUB] = s->vsub;
+ var_values[VAR_X] = NAN;
+ var_values[VAR_Y] = NAN;
+ var_values[VAR_H] = NAN;
+ var_values[VAR_W] = NAN;
+ var_values[VAR_T] = NAN;
+
+ for (i = 0; i <= NUM_EXPR_EVALS; i++) {
+ /* evaluate expressions, fail on last iteration */
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->x = var_values[VAR_X] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->y = var_values[VAR_Y] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->w = var_values[VAR_W] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->h = var_values[VAR_H] = res;
+
+ if ((ret = av_expr_parse_and_eval(&res, (expr = s->t_expr),
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0 && i == NUM_EXPR_EVALS)
+ goto fail;
+ s->thickness = var_values[VAR_T] = res;
+ }
+
+ /* if w or h are zero, use the input w/h */
+ s->w = (s->w > 0) ? s->w : inlink->w;
+ s->h = (s->h > 0) ? s->h : inlink->h;
+
+ /* sanity check width and height */
+ if (s->w < 0 || s->h < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Size values less than 0 are not acceptable.\n");
+ return AVERROR(EINVAL);
+ }
- av_log(inlink->dst, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
- s->w, s->y, s->w, s->h,
+ av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d w:%d h:%d color:0x%02X%02X%02X%02X\n",
+ s->x, s->y, s->w, s->h,
s->yuv_color[Y], s->yuv_color[U], s->yuv_color[V], s->yuv_color[A]);
return 0;
+
+fail:
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s'.\n",
+ expr);
+ return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -105,14 +209,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
row[plane] = frame->data[plane] +
frame->linesize[plane] * (y >> s->vsub);
- for (x = FFMAX(xb, 0); x < (xb + s->w) && x < frame->width; x++) {
- double alpha = (double)s->yuv_color[A] / 255;
+ if (s->invert_color) {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++)
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = FFMAX(xb, 0); x < xb + s->w && x < frame->width; x++) {
+ double alpha = (double)s->yuv_color[A] / 255;
- if ((y - yb < 3) || (yb + s->h - y < 4) ||
- (x - xb < 3) || (xb + s->w - x < 4)) {
- row[0][x ] = (1 - alpha) * row[0][x ] + alpha * s->yuv_color[Y];
- row[1][x >> s->hsub] = (1 - alpha) * row[1][x >> s->hsub] + alpha * s->yuv_color[U];
- row[2][x >> s->hsub] = (1 - alpha) * row[2][x >> s->hsub] + alpha * s->yuv_color[V];
+ if ((y - yb < s->thickness) || (yb + s->h - 1 - y < s->thickness) ||
+ (x - xb < s->thickness) || (xb + s->w - 1 - x < s->thickness)) {
+ row[0][x ] = (1 - alpha) * row[0][x ] + alpha * s->yuv_color[Y];
+ row[1][x >> s->hsub] = (1 - alpha) * row[1][x >> s->hsub] + alpha * s->yuv_color[U];
+ row[2][x >> s->hsub] = (1 - alpha) * row[2][x >> s->hsub] + alpha * s->yuv_color[V];
+ }
}
}
}
@@ -121,36 +232,38 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
#define OFFSET(x) offsetof(DrawBoxContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "x", "Horizontal position of the left box edge", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "y", "Vertical position of the top box edge", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "width", "Width of the box", OFFSET(w_opt), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { "height", "Height of the box", OFFSET(h_opt), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
- { "color", "Color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-static const AVClass drawbox_class = {
- .class_name = "drawbox",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+#if CONFIG_DRAWBOX_FILTER
+
+static const AVOption drawbox_options[] = {
+ { "x", "set horizontal position of the left box edge", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set vertical position of the top box edge", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "width", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set width of the box", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set height of the box", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color of the box", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "thickness", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "t", "set the box thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, { .str="3" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
};
-static const AVFilterPad avfilter_vf_drawbox_inputs[] = {
+AVFILTER_DEFINE_CLASS(drawbox);
+
+static const AVFilterPad drawbox_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
},
{ NULL }
};
-static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
+static const AVFilterPad drawbox_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
@@ -159,13 +272,121 @@ static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
};
AVFilter ff_vf_drawbox = {
- .name = "drawbox",
- .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
- .priv_size = sizeof(DrawBoxContext),
- .priv_class = &drawbox_class,
- .init = init,
-
- .query_formats = query_formats,
- .inputs = avfilter_vf_drawbox_inputs,
- .outputs = avfilter_vf_drawbox_outputs,
+ .name = "drawbox",
+ .description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
+ .priv_size = sizeof(DrawBoxContext),
+ .priv_class = &drawbox_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = drawbox_inputs,
+ .outputs = drawbox_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+#endif /* CONFIG_DRAWBOX_FILTER */
+
+#if CONFIG_DRAWGRID_FILTER
+static av_pure av_always_inline int pixel_belongs_to_grid(DrawBoxContext *drawgrid, int x, int y)
+{
+ // x is horizontal (width) coord,
+ // y is vertical (height) coord
+ int x_modulo;
+ int y_modulo;
+
+ // Abstract from the offset
+ x -= drawgrid->x;
+ y -= drawgrid->y;
+
+ x_modulo = x % drawgrid->w;
+ y_modulo = y % drawgrid->h;
+
+ // If x or y got negative, fix values to preserve logics
+ if (x_modulo < 0)
+ x_modulo += drawgrid->w;
+ if (y_modulo < 0)
+ y_modulo += drawgrid->h;
+
+ return x_modulo < drawgrid->thickness // Belongs to vertical line
+ || y_modulo < drawgrid->thickness; // Belongs to horizontal line
+}
+
+static int drawgrid_filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ DrawBoxContext *drawgrid = inlink->dst->priv;
+ int plane, x, y;
+ uint8_t *row[4];
+
+ for (y = 0; y < frame->height; y++) {
+ row[0] = frame->data[0] + y * frame->linesize[0];
+
+ for (plane = 1; plane < 3; plane++)
+ row[plane] = frame->data[plane] +
+ frame->linesize[plane] * (y >> drawgrid->vsub);
+
+ if (drawgrid->invert_color) {
+ for (x = 0; x < frame->width; x++)
+ if (pixel_belongs_to_grid(drawgrid, x, y))
+ row[0][x] = 0xff - row[0][x];
+ } else {
+ for (x = 0; x < frame->width; x++) {
+ double alpha = (double)drawgrid->yuv_color[A] / 255;
+
+ if (pixel_belongs_to_grid(drawgrid, x, y)) {
+ row[0][x ] = (1 - alpha) * row[0][x ] + alpha * drawgrid->yuv_color[Y];
+ row[1][x >> drawgrid->hsub] = (1 - alpha) * row[1][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[U];
+ row[2][x >> drawgrid->hsub] = (1 - alpha) * row[2][x >> drawgrid->hsub] + alpha * drawgrid->yuv_color[V];
+ }
+ }
+ }
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static const AVOption drawgrid_options[] = {
+ { "x", "set horizontal offset", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set vertical offset", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "width", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set width of grid cell", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set height of grid cell", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str="0" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color of the grid", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "thickness", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "t", "set grid line thickness", OFFSET(t_expr), AV_OPT_TYPE_STRING, {.str="1"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
};
+
+AVFILTER_DEFINE_CLASS(drawgrid);
+
+static const AVFilterPad drawgrid_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = drawgrid_filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad drawgrid_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_drawgrid = {
+ .name = "drawgrid",
+ .description = NULL_IF_CONFIG_SMALL("Draw a colored grid on the input video."),
+ .priv_size = sizeof(DrawBoxContext),
+ .priv_class = &drawgrid_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = drawgrid_inputs,
+ .outputs = drawgrid_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
+
+#endif /* CONFIG_DRAWGRID_FILTER */
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index 54a8847..d20f805 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -3,20 +3,20 @@
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
* Copyright (c) 2003 Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,25 +28,30 @@
#include "config.h"
-#include <sys/types.h>
+#if HAVE_SYS_TIME_H
#include <sys/time.h>
+#endif
+#include <sys/types.h>
#include <sys/stat.h>
#include <time.h>
+#if HAVE_UNISTD_H
#include <unistd.h>
+#endif
+#include <fenv.h>
#if CONFIG_LIBFONTCONFIG
#include <fontconfig/fontconfig.h>
#endif
-#include "libavutil/colorspace.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/common.h"
#include "libavutil/file.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
-#include "libavutil/mathematics.h"
#include "libavutil/random_seed.h"
#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
+#include "libavutil/timecode.h"
#include "libavutil/time_internal.h"
#include "libavutil/tree.h"
#include "libavutil/lfg.h"
@@ -56,22 +61,33 @@
#include "internal.h"
#include "video.h"
+#if CONFIG_LIBFRIBIDI
+#include <fribidi.h>
+#endif
+
#include <ft2build.h>
#include FT_FREETYPE_H
#include FT_GLYPH_H
+#include FT_STROKER_H
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
- "main_w", "W", ///< width of the main video
- "main_h", "H", ///< height of the main video
- "text_w", "w", ///< width of the overlay text
- "text_h", "h", ///< height of the overlay text
+ "dar",
+ "hsub", "vsub",
+ "line_h", "lh", ///< line height, same as max_glyph_h
+ "main_h", "h", "H", ///< height of the input video
+ "main_w", "w", "W", ///< width of the input video
+ "max_glyph_a", "ascent", ///< max glyph ascent
+ "max_glyph_d", "descent", ///< min glyph descent
+ "max_glyph_h", ///< max glyph height
+ "max_glyph_w", ///< max glyph width
+ "n", ///< number of frame
+ "sar",
+ "t", ///< timestamp expressed in seconds
+ "text_h", "th", ///< height of the rendered text
+ "text_w", "tw", ///< width of the rendered text
"x",
"y",
- "n", ///< number of processed frames
- "t", ///< timestamp expressed in seconds
+ "pict_type",
NULL
};
@@ -92,95 +108,134 @@ static const eval_func2 fun2[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
- VAR_MAIN_W, VAR_MW,
- VAR_MAIN_H, VAR_MH,
- VAR_TEXT_W, VAR_TW,
+ VAR_DAR,
+ VAR_HSUB, VAR_VSUB,
+ VAR_LINE_H, VAR_LH,
+ VAR_MAIN_H, VAR_h, VAR_H,
+ VAR_MAIN_W, VAR_w, VAR_W,
+ VAR_MAX_GLYPH_A, VAR_ASCENT,
+ VAR_MAX_GLYPH_D, VAR_DESCENT,
+ VAR_MAX_GLYPH_H,
+ VAR_MAX_GLYPH_W,
+ VAR_N,
+ VAR_SAR,
+ VAR_T,
VAR_TEXT_H, VAR_TH,
+ VAR_TEXT_W, VAR_TW,
VAR_X,
VAR_Y,
- VAR_N,
- VAR_T,
+ VAR_PICT_TYPE,
VAR_VARS_NB
};
+enum expansion_mode {
+ EXP_NONE,
+ EXP_NORMAL,
+ EXP_STRFTIME,
+};
+
typedef struct DrawTextContext {
const AVClass *class;
+ enum expansion_mode exp_mode; ///< expansion mode to use for the text
+ int reinit; ///< tells if the filter is being reinited
#if CONFIG_LIBFONTCONFIG
uint8_t *font; ///< font to be used
#endif
uint8_t *fontfile; ///< font to be used
uint8_t *text; ///< text to be drawn
- uint8_t *expanded_text; ///< used to contain the strftime()-expanded text
- size_t expanded_text_size; ///< size in bytes of the expanded_text buffer
+ AVBPrint expanded_text; ///< used to contain the expanded text
+ uint8_t *fontcolor_expr; ///< fontcolor expression to evaluate
+ AVBPrint expanded_fontcolor; ///< used to contain the expanded fontcolor spec
int ft_load_flags; ///< flags used for loading fonts, see FT_LOAD_*
FT_Vector *positions; ///< positions for each element in the text
size_t nb_positions; ///< number of elements of positions array
char *textfile; ///< file with text to be drawn
- int x, y; ///< position to start drawing text
- int w, h; ///< dimension of the text block
+ int x; ///< x position to start drawing text
+ int y; ///< y position to start drawing text
+ int max_glyph_w; ///< max glyph width
+ int max_glyph_h; ///< max glyph height
int shadowx, shadowy;
+ int borderw; ///< border width
unsigned int fontsize; ///< font size to use
- char *fontcolor_string; ///< font color as string
- char *boxcolor_string; ///< box color as string
- char *shadowcolor_string; ///< shadow color as string
- uint8_t fontcolor[4]; ///< foreground color
- uint8_t boxcolor[4]; ///< background color
- uint8_t shadowcolor[4]; ///< shadow color
- uint8_t fontcolor_rgba[4]; ///< foreground color in RGBA
- uint8_t boxcolor_rgba[4]; ///< background color in RGBA
- uint8_t shadowcolor_rgba[4]; ///< shadow color in RGBA
short int draw_box; ///< draw box around text - true or false
int use_kerning; ///< font kerning is used - true/false
int tabsize; ///< tab size
int fix_bounds; ///< do we let it go out of frame bounds - t/f
+ FFDrawContext dc;
+ FFDrawColor fontcolor; ///< foreground color
+ FFDrawColor shadowcolor; ///< shadow color
+ FFDrawColor bordercolor; ///< border color
+ FFDrawColor boxcolor; ///< background color
+
FT_Library library; ///< freetype font library handle
FT_Face face; ///< freetype font face handle
+ FT_Stroker stroker; ///< freetype stroker handle
struct AVTreeNode *glyphs; ///< rendered glyphs, stored using the UTF-32 char code
- int hsub, vsub; ///< chroma subsampling values
- int is_packed_rgb;
- int pixel_step[4]; ///< distance in bytes between the component of each pixel
- uint8_t rgba_map[4]; ///< map RGBA offsets to the positions in the packed RGBA format
- uint8_t *box_line[4]; ///< line used for filling the box background
- char *x_expr, *y_expr;
+ char *x_expr; ///< expression for x position
+ char *y_expr; ///< expression for y position
AVExpr *x_pexpr, *y_pexpr; ///< parsed expressions for x and y
+ int64_t basetime; ///< base pts time in the real world for display
double var_values[VAR_VARS_NB];
- char *d_expr;
- AVExpr *d_pexpr;
- int draw; ///< set to zero to prevent drawing
AVLFG prng; ///< random
+ char *tc_opt_string; ///< specified timecode option string
+ AVRational tc_rate; ///< frame rate for timecode
+ AVTimecode tc; ///< timecode context
+ int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise
+ int reload; ///< reload text file for each frame
+ int start_number; ///< starting frame number for n/frame_num var
+#if CONFIG_LIBFRIBIDI
+ int text_shaping; ///< 1 to shape the text before drawing it
+#endif
+ AVDictionary *metadata;
} DrawTextContext;
#define OFFSET(x) offsetof(DrawTextContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption drawtext_options[]= {
+ {"fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"text", "set text", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"textfile", "set text file", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"fontcolor", "set foreground color", OFFSET(fontcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"fontcolor_expr", "set foreground color expression", OFFSET(fontcolor_expr), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"boxcolor", "set box color", OFFSET(boxcolor.rgba), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"bordercolor", "set border color", OFFSET(bordercolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"shadowcolor", "set shadow color", OFFSET(shadowcolor.rgba), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"box", "set box", OFFSET(draw_box), AV_OPT_TYPE_INT, {.i64=0}, 0, 1 , FLAGS},
+ {"fontsize", "set font size", OFFSET(fontsize), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX , FLAGS},
+ {"x", "set x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"y", "set y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"shadowx", "set x", OFFSET(shadowx), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"shadowy", "set y", OFFSET(shadowy), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"borderw", "set border width", OFFSET(borderw), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX , FLAGS},
+ {"tabsize", "set tab size", OFFSET(tabsize), AV_OPT_TYPE_INT, {.i64=4}, 0, INT_MAX , FLAGS},
+ {"basetime", "set base time", OFFSET(basetime), AV_OPT_TYPE_INT64, {.i64=AV_NOPTS_VALUE}, INT64_MIN, INT64_MAX , FLAGS},
#if CONFIG_LIBFONTCONFIG
{ "font", "Font name", OFFSET(font), AV_OPT_TYPE_STRING, { .str = "Sans" }, .flags = FLAGS },
#endif
- { "fontfile", NULL, OFFSET(fontfile), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "text", NULL, OFFSET(text), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "textfile", NULL, OFFSET(textfile), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "fontcolor", NULL, OFFSET(fontcolor_string), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { "boxcolor", NULL, OFFSET(boxcolor_string), AV_OPT_TYPE_STRING, { .str = "white" }, .flags = FLAGS },
- { "shadowcolor", NULL, OFFSET(shadowcolor_string), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { "box", NULL, OFFSET(draw_box), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
- { "fontsize", NULL, OFFSET(fontsize), AV_OPT_TYPE_INT, { .i64 = 16 }, 1, 72, FLAGS },
- { "x", NULL, OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "y", NULL, OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "shadowx", NULL, OFFSET(shadowx), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "shadowy", NULL, OFFSET(shadowy), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
- { "tabsize", NULL, OFFSET(tabsize), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, FLAGS },
- { "draw", "if false do not draw", OFFSET(d_expr), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { "fix_bounds", "if true, check and fix text coords to avoid clipping",
- OFFSET(fix_bounds), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
+
+ {"expansion", "set the expansion mode", OFFSET(exp_mode), AV_OPT_TYPE_INT, {.i64=EXP_NORMAL}, 0, 2, FLAGS, "expansion"},
+ {"none", "set no expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NONE}, 0, 0, FLAGS, "expansion"},
+ {"normal", "set normal expansion", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_NORMAL}, 0, 0, FLAGS, "expansion"},
+ {"strftime", "set strftime expansion (deprecated)", OFFSET(exp_mode), AV_OPT_TYPE_CONST, {.i64=EXP_STRFTIME}, 0, 0, FLAGS, "expansion"},
+
+ {"timecode", "set initial timecode", OFFSET(tc_opt_string), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"tc24hmax", "set 24 hours max (timecode only)", OFFSET(tc24hmax), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"timecode_rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"r", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"rate", "set rate (timecode only)", OFFSET(tc_rate), AV_OPT_TYPE_RATIONAL, {.dbl=0}, 0, INT_MAX, FLAGS},
+ {"reload", "reload text file for each frame", OFFSET(reload), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"fix_bounds", "if true, check and fix text coords to avoid clipping", OFFSET(fix_bounds), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS},
+ {"start_number", "start frame number for n/frame_num variable", OFFSET(start_number), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS},
+
+#if CONFIG_LIBFRIBIDI
+ {"text_shaping", "attempt to shape text before drawing", OFFSET(text_shaping), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS},
+#endif
/* FT_LOAD_* flags */
- { "ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, { .i64 = FT_LOAD_DEFAULT | FT_LOAD_RENDER}, 0, INT_MAX, FLAGS, "ft_load_flags" },
+ { "ft_load_flags", "set font loading flags for libfreetype", OFFSET(ft_load_flags), AV_OPT_TYPE_FLAGS, { .i64 = FT_LOAD_DEFAULT }, 0, INT_MAX, FLAGS, "ft_load_flags" },
{ "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_DEFAULT }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_scale", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_SCALE }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_hinting", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_HINTING }, .flags = FLAGS, .unit = "ft_load_flags" },
@@ -196,38 +251,31 @@ static const AVOption drawtext_options[]= {
{ "monochrome", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_MONOCHROME }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "linear_design", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_LINEAR_DESIGN }, .flags = FLAGS, .unit = "ft_load_flags" },
{ "no_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
- { NULL},
+ { NULL }
};
-static const char *drawtext_get_name(void *ctx)
-{
- return "drawtext";
-}
-
-static const AVClass drawtext_class = {
- "DrawTextContext",
- drawtext_get_name,
- drawtext_options
-};
+AVFILTER_DEFINE_CLASS(drawtext);
#undef __FTERRORS_H__
#define FT_ERROR_START_LIST {
#define FT_ERRORDEF(e, v, s) { (e), (s) },
#define FT_ERROR_END_LIST { 0, NULL } };
-struct ft_error
+static const struct ft_error
{
int err;
const char *err_msg;
-} static ft_errors[] =
+} ft_errors[] =
#include FT_ERRORS_H
#define FT_ERRMSG(e) ft_errors[e].err_msg
typedef struct Glyph {
- FT_Glyph *glyph;
+ FT_Glyph glyph;
+ FT_Glyph border_glyph;
uint32_t code;
FT_Bitmap bitmap; ///< array holding bitmaps of font
+ FT_Bitmap border_bitmap; ///< array holding bitmaps of font border
FT_BBox bbox;
int advance;
int bitmap_left;
@@ -247,6 +295,7 @@ static int glyph_cmp(void *key, const void *b)
static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
{
DrawTextContext *s = ctx->priv;
+ FT_BitmapGlyph bitmapglyph;
Glyph *glyph;
struct AVTreeNode *node = NULL;
int ret;
@@ -255,26 +304,40 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
if (FT_Load_Char(s->face, code, s->ft_load_flags))
return AVERROR(EINVAL);
- /* save glyph */
- if (!(glyph = av_mallocz(sizeof(*glyph))) ||
- !(glyph->glyph = av_mallocz(sizeof(*glyph->glyph)))) {
+ glyph = av_mallocz(sizeof(*glyph));
+ if (!glyph) {
ret = AVERROR(ENOMEM);
goto error;
}
glyph->code = code;
- if (FT_Get_Glyph(s->face->glyph, glyph->glyph)) {
+ if (FT_Get_Glyph(s->face->glyph, &glyph->glyph)) {
ret = AVERROR(EINVAL);
goto error;
}
+ if (s->borderw) {
+ glyph->border_glyph = glyph->glyph;
+ if (FT_Glyph_StrokeBorder(&glyph->border_glyph, s->stroker, 0, 0) ||
+ FT_Glyph_To_Bitmap(&glyph->border_glyph, FT_RENDER_MODE_NORMAL, 0, 1)) {
+ ret = AVERROR_EXTERNAL;
+ goto error;
+ }
+ bitmapglyph = (FT_BitmapGlyph) glyph->border_glyph;
+ glyph->border_bitmap = bitmapglyph->bitmap;
+ }
+ if (FT_Glyph_To_Bitmap(&glyph->glyph, FT_RENDER_MODE_NORMAL, 0, 1)) {
+ ret = AVERROR_EXTERNAL;
+ goto error;
+ }
+ bitmapglyph = (FT_BitmapGlyph) glyph->glyph;
- glyph->bitmap = s->face->glyph->bitmap;
- glyph->bitmap_left = s->face->glyph->bitmap_left;
- glyph->bitmap_top = s->face->glyph->bitmap_top;
+ glyph->bitmap = bitmapglyph->bitmap;
+ glyph->bitmap_left = bitmapglyph->left;
+ glyph->bitmap_top = bitmapglyph->top;
glyph->advance = s->face->glyph->advance.x >> 6;
/* measure text height to calculate text_height (or the maximum text height) */
- FT_Glyph_Get_CBox(*glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
+ FT_Glyph_Get_CBox(glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
/* cache the newly created glyph */
if (!(node = av_tree_node_alloc())) {
@@ -290,89 +353,235 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
error:
if (glyph)
av_freep(&glyph->glyph);
+
av_freep(&glyph);
av_freep(&node);
return ret;
}
-static int parse_font(AVFilterContext *ctx)
+static int load_font_file(AVFilterContext *ctx, const char *path, int index)
{
DrawTextContext *s = ctx->priv;
-#if !CONFIG_LIBFONTCONFIG
- if (!s->fontfile) {
- av_log(ctx, AV_LOG_ERROR, "No font filename provided\n");
+ int err;
+
+ err = FT_New_Face(s->library, path, index, &s->face);
+ if (err) {
+ av_log(ctx, AV_LOG_ERROR, "Could not load font \"%s\": %s\n",
+ s->fontfile, FT_ERRMSG(err));
return AVERROR(EINVAL);
}
-
return 0;
-#else
+}
+
+#if CONFIG_LIBFONTCONFIG
+static int load_font_fontconfig(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ FcConfig *fontconfig;
FcPattern *pat, *best;
FcResult result = FcResultMatch;
-
- FcBool fc_bool;
- FcChar8* fc_string;
+ FcChar8 *filename;
+ int index;
+ double size;
int err = AVERROR(ENOENT);
- if (s->fontfile)
- return 0;
-
- if (!FcInit())
+ fontconfig = FcInitLoadConfigAndFonts();
+ if (!fontconfig) {
+ av_log(ctx, AV_LOG_ERROR, "impossible to init fontconfig\n");
return AVERROR_UNKNOWN;
-
- if (!(pat = FcPatternCreate()))
- return AVERROR(ENOMEM);
+ }
+ pat = FcNameParse(s->fontfile ? s->fontfile :
+ (uint8_t *)(intptr_t)"default");
+ if (!pat) {
+ av_log(ctx, AV_LOG_ERROR, "could not parse fontconfig pat");
+ return AVERROR(EINVAL);
+ }
FcPatternAddString(pat, FC_FAMILY, s->font);
- FcPatternAddBool(pat, FC_OUTLINE, FcTrue);
- FcPatternAddDouble(pat, FC_SIZE, (double)s->fontsize);
+ if (s->fontsize)
+ FcPatternAddDouble(pat, FC_SIZE, (double)s->fontsize);
FcDefaultSubstitute(pat);
- if (!FcConfigSubstitute(NULL, pat, FcMatchPattern)) {
+ if (!FcConfigSubstitute(fontconfig, pat, FcMatchPattern)) {
+ av_log(ctx, AV_LOG_ERROR, "could not substitue fontconfig options"); /* very unlikely */
FcPatternDestroy(pat);
return AVERROR(ENOMEM);
}
- best = FcFontMatch(NULL, pat, &result);
+ best = FcFontMatch(fontconfig, pat, &result);
FcPatternDestroy(pat);
- if (!best || result == FcResultNoMatch) {
- av_log(ctx, AV_LOG_ERROR,
- "Cannot find a valid font for the family %s\n",
- s->font);
- goto fail;
- }
-
- if (FcPatternGetBool(best, FC_OUTLINE, 0, &fc_bool) != FcResultMatch ||
- !fc_bool) {
- av_log(ctx, AV_LOG_ERROR, "Outline not available for %s\n",
+ if (!best || result != FcResultMatch) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Cannot find a valid font for the family %s\n",
s->font);
goto fail;
}
- if (FcPatternGetString(best, FC_FAMILY, 0, &fc_string) != FcResultMatch) {
- av_log(ctx, AV_LOG_ERROR, "No matches for %s\n",
- s->font);
- goto fail;
+ if (
+ FcPatternGetInteger(best, FC_INDEX, 0, &index ) != FcResultMatch ||
+ FcPatternGetDouble (best, FC_SIZE, 0, &size ) != FcResultMatch) {
+ av_log(ctx, AV_LOG_ERROR, "impossible to find font information");
+ return AVERROR(EINVAL);
}
- if (FcPatternGetString(best, FC_FILE, 0, &fc_string) != FcResultMatch) {
+ if (FcPatternGetString(best, FC_FILE, 0, &filename) != FcResultMatch) {
av_log(ctx, AV_LOG_ERROR, "No file path for %s\n",
s->font);
goto fail;
}
- s->fontfile = av_strdup(fc_string);
- if (!s->fontfile)
- err = AVERROR(ENOMEM);
- else
- err = 0;
+ av_log(ctx, AV_LOG_INFO, "Using \"%s\"\n", filename);
+ if (!s->fontsize)
+ s->fontsize = size + 0.5;
+ err = load_font_file(ctx, filename, index);
+ if (err)
+ return err;
+ FcConfigDestroy(fontconfig);
fail:
FcPatternDestroy(best);
return err;
+}
+#endif
+
+static int load_font(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ int err;
+
+ /* load the face, and set up the encoding, which is by default UTF-8 */
+ err = load_font_file(ctx, s->fontfile, 0);
+ if (!err)
+ return 0;
+#if CONFIG_LIBFONTCONFIG
+ err = load_font_fontconfig(ctx);
+ if (!err)
+ return 0;
#endif
+ return err;
+}
+
+static int load_textfile(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ int err;
+ uint8_t *textbuf;
+ uint8_t *tmp;
+ size_t textbuf_size;
+
+ if ((err = av_file_map(s->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The text file '%s' could not be read or is empty\n",
+ s->textfile);
+ return err;
+ }
+
+ if (textbuf_size > SIZE_MAX - 1 || !(tmp = av_realloc(s->text, textbuf_size + 1))) {
+ av_file_unmap(textbuf, textbuf_size);
+ return AVERROR(ENOMEM);
+ }
+ s->text = tmp;
+ memcpy(s->text, textbuf, textbuf_size);
+ s->text[textbuf_size] = 0;
+ av_file_unmap(textbuf, textbuf_size);
+
+ return 0;
+}
+
+static inline int is_newline(uint32_t c)
+{
+ return c == '\n' || c == '\r' || c == '\f' || c == '\v';
+}
+
+#if CONFIG_LIBFRIBIDI
+static int shape_text(AVFilterContext *ctx)
+{
+ DrawTextContext *s = ctx->priv;
+ uint8_t *tmp;
+ int ret = AVERROR(ENOMEM);
+ static const FriBidiFlags flags = FRIBIDI_FLAGS_DEFAULT |
+ FRIBIDI_FLAGS_ARABIC;
+ FriBidiChar *unicodestr = NULL;
+ FriBidiStrIndex len;
+ FriBidiParType direction = FRIBIDI_PAR_LTR;
+ FriBidiStrIndex line_start = 0;
+ FriBidiStrIndex line_end = 0;
+ FriBidiLevel *embedding_levels = NULL;
+ FriBidiArabicProp *ar_props = NULL;
+ FriBidiCharType *bidi_types = NULL;
+ FriBidiStrIndex i,j;
+
+ len = strlen(s->text);
+ if (!(unicodestr = av_malloc_array(len, sizeof(*unicodestr)))) {
+ goto out;
+ }
+ len = fribidi_charset_to_unicode(FRIBIDI_CHAR_SET_UTF8,
+ s->text, len, unicodestr);
+
+ bidi_types = av_malloc_array(len, sizeof(*bidi_types));
+ if (!bidi_types) {
+ goto out;
+ }
+
+ fribidi_get_bidi_types(unicodestr, len, bidi_types);
+
+ embedding_levels = av_malloc_array(len, sizeof(*embedding_levels));
+ if (!embedding_levels) {
+ goto out;
+ }
+
+ if (!fribidi_get_par_embedding_levels(bidi_types, len, &direction,
+ embedding_levels)) {
+ goto out;
+ }
+
+ ar_props = av_malloc_array(len, sizeof(*ar_props));
+ if (!ar_props) {
+ goto out;
+ }
+
+ fribidi_get_joining_types(unicodestr, len, ar_props);
+ fribidi_join_arabic(bidi_types, len, embedding_levels, ar_props);
+ fribidi_shape(flags, embedding_levels, len, ar_props, unicodestr);
+
+ for (line_end = 0, line_start = 0; line_end < len; line_end++) {
+ if (is_newline(unicodestr[line_end]) || line_end == len - 1) {
+ if (!fribidi_reorder_line(flags, bidi_types,
+ line_end - line_start + 1, line_start,
+ direction, embedding_levels, unicodestr,
+ NULL)) {
+ goto out;
+ }
+ line_start = line_end + 1;
+ }
+ }
+
+ /* Remove zero-width fill chars put in by libfribidi */
+ for (i = 0, j = 0; i < len; i++)
+ if (unicodestr[i] != FRIBIDI_CHAR_FILL)
+ unicodestr[j++] = unicodestr[i];
+ len = j;
+
+ if (!(tmp = av_realloc(s->text, (len * 4 + 1) * sizeof(*s->text)))) {
+ /* Use len * 4, as a unicode character can be up to 4 bytes in UTF-8 */
+ goto out;
+ }
+
+ s->text = tmp;
+ len = fribidi_unicode_to_charset(FRIBIDI_CHAR_SET_UTF8,
+ unicodestr, len, s->text);
+ ret = 0;
+
+out:
+ av_free(unicodestr);
+ av_free(embedding_levels);
+ av_free(ar_props);
+ av_free(bidi_types);
+ return ret;
}
+#endif
static av_cold int init(AVFilterContext *ctx)
{
@@ -380,57 +589,45 @@ static av_cold int init(AVFilterContext *ctx)
DrawTextContext *s = ctx->priv;
Glyph *glyph;
- if ((err = parse_font(ctx)) < 0)
- return err;
+ if (!s->fontfile && !CONFIG_LIBFONTCONFIG) {
+ av_log(ctx, AV_LOG_ERROR, "No font filename provided\n");
+ return AVERROR(EINVAL);
+ }
if (s->textfile) {
- uint8_t *textbuf;
- size_t textbuf_size;
-
if (s->text) {
av_log(ctx, AV_LOG_ERROR,
"Both text and text file provided. Please provide only one\n");
return AVERROR(EINVAL);
}
- if ((err = av_file_map(s->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR,
- "The text file '%s' could not be read or is empty\n",
- s->textfile);
+ if ((err = load_textfile(ctx)) < 0)
return err;
- }
-
- if (textbuf_size > SIZE_MAX - 1 ||
- !(s->text = av_malloc(textbuf_size + 1))) {
- av_file_unmap(textbuf, textbuf_size);
- return AVERROR(ENOMEM);
- }
- memcpy(s->text, textbuf, textbuf_size);
- s->text[textbuf_size] = 0;
- av_file_unmap(textbuf, textbuf_size);
}
- if (!s->text) {
- av_log(ctx, AV_LOG_ERROR,
- "Either text or a valid file must be provided\n");
- return AVERROR(EINVAL);
- }
+#if CONFIG_LIBFRIBIDI
+ if (s->text_shaping)
+ if ((err = shape_text(ctx)) < 0)
+ return err;
+#endif
- if ((err = av_parse_color(s->fontcolor_rgba, s->fontcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid font color '%s'\n", s->fontcolor_string);
- return err;
- }
+ if (s->reload && !s->textfile)
+ av_log(ctx, AV_LOG_WARNING, "No file to reload\n");
- if ((err = av_parse_color(s->boxcolor_rgba, s->boxcolor_string, -1, ctx))) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid box color '%s'\n", s->boxcolor_string);
- return err;
+ if (s->tc_opt_string) {
+ int ret = av_timecode_init_from_string(&s->tc, s->tc_rate,
+ s->tc_opt_string, ctx);
+ if (ret < 0)
+ return ret;
+ if (s->tc24hmax)
+ s->tc.flags |= AV_TIMECODE_FLAG_24HOURSMAX;
+ if (!s->text)
+ s->text = av_strdup("");
}
- if ((err = av_parse_color(s->shadowcolor_rgba, s->shadowcolor_string, -1, ctx))) {
+ if (!s->text) {
av_log(ctx, AV_LOG_ERROR,
- "Invalid shadow color '%s'\n", s->shadowcolor_string);
- return err;
+ "Either text, a valid file or a timecode must be provided\n");
+ return AVERROR(EINVAL);
}
if ((err = FT_Init_FreeType(&(s->library)))) {
@@ -439,55 +636,60 @@ static av_cold int init(AVFilterContext *ctx)
return AVERROR(EINVAL);
}
- /* load the face, and set up the encoding, which is by default UTF-8 */
- if ((err = FT_New_Face(s->library, s->fontfile, 0, &s->face))) {
- av_log(ctx, AV_LOG_ERROR, "Could not load fontface from file '%s': %s\n",
- s->fontfile, FT_ERRMSG(err));
- return AVERROR(EINVAL);
- }
+ err = load_font(ctx);
+ if (err)
+ return err;
+ if (!s->fontsize)
+ s->fontsize = 16;
if ((err = FT_Set_Pixel_Sizes(s->face, 0, s->fontsize))) {
av_log(ctx, AV_LOG_ERROR, "Could not set font size to %d pixels: %s\n",
s->fontsize, FT_ERRMSG(err));
return AVERROR(EINVAL);
}
+ if (s->borderw) {
+ if (FT_Stroker_New(s->library, &s->stroker)) {
+ av_log(ctx, AV_LOG_ERROR, "Coult not init FT stroker\n");
+ return AVERROR_EXTERNAL;
+ }
+ FT_Stroker_Set(s->stroker, s->borderw << 6, FT_STROKER_LINECAP_ROUND,
+ FT_STROKER_LINEJOIN_ROUND, 0);
+ }
+
s->use_kerning = FT_HAS_KERNING(s->face);
/* load the fallback glyph with code 0 */
load_glyph(ctx, NULL, 0);
/* set the tabsize in pixels */
- if ((err = load_glyph(ctx, &glyph, ' ') < 0)) {
+ if ((err = load_glyph(ctx, &glyph, ' ')) < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n");
return err;
}
s->tabsize *= glyph->advance;
-#if !HAVE_LOCALTIME_R
- av_log(ctx, AV_LOG_WARNING, "strftime() expansion unavailable!\n");
-#endif
+ if (s->exp_mode == EXP_STRFTIME &&
+ (strchr(s->text, '%') || strchr(s->text, '\\')))
+ av_log(ctx, AV_LOG_WARNING, "expansion=strftime is deprecated.\n");
+
+ av_bprint_init(&s->expanded_text, 0, AV_BPRINT_SIZE_UNLIMITED);
+ av_bprint_init(&s->expanded_fontcolor, 0, AV_BPRINT_SIZE_UNLIMITED);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
static int glyph_enu_free(void *opaque, void *elem)
{
+ Glyph *glyph = elem;
+
+ FT_Done_Glyph(glyph->glyph);
+ FT_Done_Glyph(glyph->border_glyph);
av_free(elem);
return 0;
}
@@ -495,325 +697,370 @@ static int glyph_enu_free(void *opaque, void *elem)
static av_cold void uninit(AVFilterContext *ctx)
{
DrawTextContext *s = ctx->priv;
- int i;
av_expr_free(s->x_pexpr);
av_expr_free(s->y_pexpr);
- av_expr_free(s->d_pexpr);
- s->x_pexpr = s->y_pexpr = s->d_pexpr = NULL;
- av_freep(&s->expanded_text);
+ s->x_pexpr = s->y_pexpr = NULL;
av_freep(&s->positions);
+ s->nb_positions = 0;
+
+
av_tree_enumerate(s->glyphs, NULL, NULL, glyph_enu_free);
av_tree_destroy(s->glyphs);
- s->glyphs = 0;
+ s->glyphs = NULL;
+
FT_Done_Face(s->face);
+ FT_Stroker_Done(s->stroker);
FT_Done_FreeType(s->library);
- for (i = 0; i < 4; i++) {
- av_freep(&s->box_line[i]);
- s->pixel_step[i] = 0;
- }
-
+ av_bprint_finalize(&s->expanded_text, NULL);
+ av_bprint_finalize(&s->expanded_fontcolor, NULL);
}
-static inline int is_newline(uint32_t c)
-{
- return c == '\n' || c == '\r' || c == '\f' || c == '\v';
-}
-
-static int dtext_prepare_text(AVFilterContext *ctx)
+static int config_input(AVFilterLink *inlink)
{
+ AVFilterContext *ctx = inlink->dst;
DrawTextContext *s = ctx->priv;
- uint32_t code = 0, prev_code = 0;
- int x = 0, y = 0, i = 0, ret;
- int text_height, baseline;
- char *text = s->text;
- uint8_t *p;
- int str_w = 0, len;
- int y_min = 32000, y_max = -32000;
- FT_Vector delta;
- Glyph *glyph = NULL, *prev_glyph = NULL;
- Glyph dummy = { 0 };
- int width = ctx->inputs[0]->w;
- int height = ctx->inputs[0]->h;
- time_t now = time(0);
- struct tm ltime;
- uint8_t *buf = s->expanded_text;
- int buf_size = s->expanded_text_size;
+ int ret;
- if (!buf)
- buf_size = 2*strlen(s->text)+1;
+ ff_draw_init(&s->dc, inlink->format, 0);
+ ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba);
+ ff_draw_color(&s->dc, &s->shadowcolor, s->shadowcolor.rgba);
+ ff_draw_color(&s->dc, &s->bordercolor, s->bordercolor.rgba);
+ ff_draw_color(&s->dc, &s->boxcolor, s->boxcolor.rgba);
+
+ s->var_values[VAR_w] = s->var_values[VAR_W] = s->var_values[VAR_MAIN_W] = inlink->w;
+ s->var_values[VAR_h] = s->var_values[VAR_H] = s->var_values[VAR_MAIN_H] = inlink->h;
+ s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? av_q2d(inlink->sample_aspect_ratio) : 1;
+ s->var_values[VAR_DAR] = (double)inlink->w / inlink->h * s->var_values[VAR_SAR];
+ s->var_values[VAR_HSUB] = 1 << s->dc.hsub_max;
+ s->var_values[VAR_VSUB] = 1 << s->dc.vsub_max;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_T] = NAN;
- localtime_r(&now, &ltime);
+ av_lfg_init(&s->prng, av_get_random_seed());
- while ((buf = av_realloc(buf, buf_size))) {
- *buf = 1;
- if (strftime(buf, buf_size, s->text, &ltime) != 0 || *buf == 0)
- break;
- buf_size *= 2;
- }
+ av_expr_free(s->x_pexpr);
+ av_expr_free(s->y_pexpr);
+ s->x_pexpr = s->y_pexpr = NULL;
- if (!buf)
- return AVERROR(ENOMEM);
- text = s->expanded_text = buf;
- s->expanded_text_size = buf_size;
-
- if ((len = strlen(text)) > s->nb_positions) {
- FT_Vector *p = av_realloc(s->positions,
- len * sizeof(*s->positions));
- if (!p) {
- av_freep(s->positions);
- s->nb_positions = 0;
- return AVERROR(ENOMEM);
- } else {
- s->positions = p;
- s->nb_positions = len;
- }
- }
+ if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
+ NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
+ (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
+ NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
- /* load and cache glyphs */
- for (i = 0, p = text; *p; i++) {
- GET_UTF8(code, *p++, continue;);
+ return AVERROR(EINVAL);
- /* get glyph */
- dummy.code = code;
- glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
- if (!glyph) {
- ret = load_glyph(ctx, &glyph, code);
- if (ret)
- return ret;
- }
+ return 0;
+}
- y_min = FFMIN(glyph->bbox.yMin, y_min);
- y_max = FFMAX(glyph->bbox.yMax, y_max);
+static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
+{
+ DrawTextContext *s = ctx->priv;
+
+ if (!strcmp(cmd, "reinit")) {
+ int ret;
+ uninit(ctx);
+ s->reinit = 1;
+ if ((ret = av_set_options_string(ctx, arg, "=", ":")) < 0)
+ return ret;
+ if ((ret = init(ctx)) < 0)
+ return ret;
+ return config_input(ctx->inputs[0]);
}
- text_height = y_max - y_min;
- baseline = y_max;
- /* compute and save position for each glyph */
- glyph = NULL;
- for (i = 0, p = text; *p; i++) {
- GET_UTF8(code, *p++, continue;);
+ return AVERROR(ENOSYS);
+}
- /* skip the \n in the sequence \r\n */
- if (prev_code == '\r' && code == '\n')
- continue;
+static int func_pict_type(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
- prev_code = code;
- if (is_newline(code)) {
- str_w = FFMAX(str_w, x - s->x);
- y += text_height;
- x = 0;
- continue;
- }
+ av_bprintf(bp, "%c", av_get_picture_type_char(s->var_values[VAR_PICT_TYPE]));
+ return 0;
+}
- /* get glyph */
- prev_glyph = glyph;
- dummy.code = code;
- glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+static int func_pts(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ const char *fmt;
+ double pts = s->var_values[VAR_T];
+ int ret;
- /* kerning */
- if (s->use_kerning && prev_glyph && glyph->code) {
- FT_Get_Kerning(s->face, prev_glyph->code, glyph->code,
- ft_kerning_default, &delta);
- x += delta.x >> 6;
+ fmt = argc >= 1 ? argv[0] : "flt";
+ if (argc >= 2) {
+ int64_t delta;
+ if ((ret = av_parse_time(&delta, argv[1], 1)) < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid delta '%s'\n", argv[1]);
+ return ret;
}
-
- if (x + glyph->bbox.xMax >= width) {
- str_w = FFMAX(str_w, x);
- y += text_height;
- x = 0;
+ pts += (double)delta / AV_TIME_BASE;
+ }
+ if (!strcmp(fmt, "flt")) {
+ av_bprintf(bp, "%.6f", s->var_values[VAR_T]);
+ } else if (!strcmp(fmt, "hms")) {
+ if (isnan(pts)) {
+ av_bprintf(bp, " ??:??:??.???");
+ } else {
+ int64_t ms = round(pts * 1000);
+ char sign = ' ';
+ if (ms < 0) {
+ sign = '-';
+ ms = -ms;
+ }
+ av_bprintf(bp, "%c%02d:%02d:%02d.%03d", sign,
+ (int)(ms / (60 * 60 * 1000)),
+ (int)(ms / (60 * 1000)) % 60,
+ (int)(ms / 1000) % 60,
+ (int)ms % 1000);
}
-
- /* save position */
- s->positions[i].x = x + glyph->bitmap_left;
- s->positions[i].y = y - glyph->bitmap_top + baseline;
- if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize;
- else x += glyph->advance;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid format '%s'\n", fmt);
+ return AVERROR(EINVAL);
}
+ return 0;
+}
- str_w = FFMIN(width - 1, FFMAX(str_w, x));
- y = FFMIN(y + text_height, height - 1);
+static int func_frame_num(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+
+ av_bprintf(bp, "%d", (int)s->var_values[VAR_N]);
+ return 0;
+}
- s->w = str_w;
- s->var_values[VAR_TEXT_W] = s->var_values[VAR_TW] = s->w;
- s->h = y;
- s->var_values[VAR_TEXT_H] = s->var_values[VAR_TH] = s->h;
+static int func_metadata(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ AVDictionaryEntry *e = av_dict_get(s->metadata, argv[0], NULL, 0);
+ if (e && e->value)
+ av_bprintf(bp, "%s", e->value);
return 0;
}
+static int func_strftime(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ const char *fmt = argc ? argv[0] : "%Y-%m-%d %H:%M:%S";
+ time_t now;
+ struct tm tm;
-static int config_input(AVFilterLink *inlink)
+ time(&now);
+ if (tag == 'L')
+ localtime_r(&now, &tm);
+ else
+ tm = *gmtime_r(&now, &tm);
+ av_bprint_strftime(bp, fmt, &tm);
+ return 0;
+}
+
+static int func_eval_expr(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
{
- AVFilterContext *ctx = inlink->dst;
DrawTextContext *s = ctx->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ double res;
int ret;
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ ret = av_expr_parse_and_eval(&res, argv[0], var_names, s->var_values,
+ NULL, NULL, fun2_names, fun2,
+ &s->prng, 0, ctx);
+ if (ret < 0)
+ av_log(ctx, AV_LOG_ERROR,
+ "Expression '%s' for the expr text expansion function is not valid\n",
+ argv[0]);
+ else
+ av_bprintf(bp, "%f", res);
- s->var_values[VAR_E ] = M_E;
- s->var_values[VAR_PHI] = M_PHI;
- s->var_values[VAR_PI ] = M_PI;
+ return ret;
+}
- s->var_values[VAR_MAIN_W] =
- s->var_values[VAR_MW] = ctx->inputs[0]->w;
- s->var_values[VAR_MAIN_H] =
- s->var_values[VAR_MH] = ctx->inputs[0]->h;
+static int func_eval_expr_int_format(AVFilterContext *ctx, AVBPrint *bp,
+ char *fct, unsigned argc, char **argv, int tag)
+{
+ DrawTextContext *s = ctx->priv;
+ double res;
+ int intval;
+ int ret;
+ unsigned int positions = 0;
+ char fmt_str[30] = "%";
+
+ /*
+ * argv[0] expression to be converted to `int`
+ * argv[1] format: 'x', 'X', 'd' or 'u'
+ * argv[2] positions printed (optional)
+ */
+
+ ret = av_expr_parse_and_eval(&res, argv[0], var_names, s->var_values,
+ NULL, NULL, fun2_names, fun2,
+ &s->prng, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Expression '%s' for the expr text expansion function is not valid\n",
+ argv[0]);
+ return ret;
+ }
- s->var_values[VAR_X] = 0;
- s->var_values[VAR_Y] = 0;
- s->var_values[VAR_T] = NAN;
+ if (!strchr("xXdu", argv[1][0])) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid format '%c' specified,"
+ " allowed values: 'x', 'X', 'd', 'u'\n", argv[1][0]);
+ return AVERROR(EINVAL);
+ }
- av_lfg_init(&s->prng, av_get_random_seed());
+ if (argc == 3) {
+ ret = sscanf(argv[2], "%u", &positions);
+ if (ret != 1) {
+ av_log(ctx, AV_LOG_ERROR, "expr_int_format(): Invalid number of positions"
+ " to print: '%s'\n", argv[2]);
+ return AVERROR(EINVAL);
+ }
+ }
- av_expr_free(s->x_pexpr);
- av_expr_free(s->y_pexpr);
- av_expr_free(s->d_pexpr);
- s->x_pexpr = s->y_pexpr = s->d_pexpr = NULL;
- if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 ||
- (ret = av_expr_parse(&s->d_pexpr, s->d_expr, var_names,
- NULL, NULL, fun2_names, fun2, 0, ctx)) < 0)
+ feclearexcept(FE_ALL_EXCEPT);
+ intval = res;
+ if ((ret = fetestexcept(FE_INVALID|FE_OVERFLOW|FE_UNDERFLOW))) {
+ av_log(ctx, AV_LOG_ERROR, "Conversion of floating-point result to int failed. Control register: 0x%08x. Conversion result: %d\n", ret, intval);
return AVERROR(EINVAL);
+ }
- if ((ret =
- ff_fill_line_with_color(s->box_line, s->pixel_step,
- inlink->w, s->boxcolor,
- inlink->format, s->boxcolor_rgba,
- &s->is_packed_rgb, s->rgba_map)) < 0)
- return ret;
+ if (argc == 3)
+ av_strlcatf(fmt_str, sizeof(fmt_str), "0%u", positions);
+ av_strlcatf(fmt_str, sizeof(fmt_str), "%c", argv[1][0]);
- if (!s->is_packed_rgb) {
- uint8_t *rgba = s->fontcolor_rgba;
- s->fontcolor[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
- s->fontcolor[1] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->fontcolor[2] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->fontcolor[3] = rgba[3];
- rgba = s->shadowcolor_rgba;
- s->shadowcolor[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]);
- s->shadowcolor[1] = RGB_TO_U_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->shadowcolor[2] = RGB_TO_V_CCIR(rgba[0], rgba[1], rgba[2], 0);
- s->shadowcolor[3] = rgba[3];
- }
+ av_log(ctx, AV_LOG_DEBUG, "Formatting value %f (expr '%s') with spec '%s'\n",
+ res, argv[0], fmt_str);
- s->draw = 1;
+ av_bprintf(bp, fmt_str, intval);
- return dtext_prepare_text(ctx);
+ return 0;
}
-#define GET_BITMAP_VAL(r, c) \
- bitmap->pixel_mode == FT_PIXEL_MODE_MONO ? \
- (bitmap->buffer[(r) * bitmap->pitch + ((c)>>3)] & (0x80 >> ((c)&7))) * 255 : \
- bitmap->buffer[(r) * bitmap->pitch + (c)]
-
-#define SET_PIXEL_YUV(frame, yuva_color, val, x, y, hsub, vsub) { \
- luma_pos = ((x) ) + ((y) ) * frame->linesize[0]; \
- alpha = yuva_color[3] * (val) * 129; \
- frame->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * frame->data[0][luma_pos] ) >> 23; \
- if (((x) & ((1<<(hsub)) - 1)) == 0 && ((y) & ((1<<(vsub)) - 1)) == 0) {\
- chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * frame->linesize[1]; \
- chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * frame->linesize[2]; \
- frame->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * frame->data[1][chroma_pos1]) >> 23; \
- frame->data[2][chroma_pos2] = (alpha * yuva_color[2] + (255*255*129 - alpha) * frame->data[2][chroma_pos2]) >> 23; \
- }\
-}
+static const struct drawtext_function {
+ const char *name;
+ unsigned argc_min, argc_max;
+ int tag; /**< opaque argument to func */
+ int (*func)(AVFilterContext *, AVBPrint *, char *, unsigned, char **, int);
+} functions[] = {
+ { "expr", 1, 1, 0, func_eval_expr },
+ { "e", 1, 1, 0, func_eval_expr },
+ { "expr_int_format", 2, 3, 0, func_eval_expr_int_format },
+ { "eif", 2, 3, 0, func_eval_expr_int_format },
+ { "pict_type", 0, 0, 0, func_pict_type },
+ { "pts", 0, 2, 0, func_pts },
+ { "gmtime", 0, 1, 'G', func_strftime },
+ { "localtime", 0, 1, 'L', func_strftime },
+ { "frame_num", 0, 0, 0, func_frame_num },
+ { "n", 0, 0, 0, func_frame_num },
+ { "metadata", 1, 1, 0, func_metadata },
+};
-static inline int draw_glyph_yuv(AVFrame *frame, FT_Bitmap *bitmap, unsigned int x,
- unsigned int y, unsigned int width, unsigned int height,
- const uint8_t yuva_color[4], int hsub, int vsub)
+static int eval_function(AVFilterContext *ctx, AVBPrint *bp, char *fct,
+ unsigned argc, char **argv)
{
- int r, c, alpha;
- unsigned int luma_pos, chroma_pos1, chroma_pos2;
- uint8_t src_val;
-
- for (r = 0; r < bitmap->rows && r+y < height; r++) {
- for (c = 0; c < bitmap->width && c+x < width; c++) {
- /* get intensity value in the glyph bitmap (source) */
- src_val = GET_BITMAP_VAL(r, c);
- if (!src_val)
- continue;
-
- SET_PIXEL_YUV(frame, yuva_color, src_val, c+x, y+r, hsub, vsub);
+ unsigned i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(functions); i++) {
+ if (strcmp(fct, functions[i].name))
+ continue;
+ if (argc < functions[i].argc_min) {
+ av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at least %d arguments\n",
+ fct, functions[i].argc_min);
+ return AVERROR(EINVAL);
}
+ if (argc > functions[i].argc_max) {
+ av_log(ctx, AV_LOG_ERROR, "%%{%s} requires at most %d arguments\n",
+ fct, functions[i].argc_max);
+ return AVERROR(EINVAL);
+ }
+ break;
}
-
- return 0;
-}
-
-#define SET_PIXEL_RGB(frame, rgba_color, val, x, y, pixel_step, r_off, g_off, b_off, a_off) { \
- p = frame->data[0] + (x) * pixel_step + ((y) * frame->linesize[0]); \
- alpha = rgba_color[3] * (val) * 129; \
- *(p+r_off) = (alpha * rgba_color[0] + (255*255*129 - alpha) * *(p+r_off)) >> 23; \
- *(p+g_off) = (alpha * rgba_color[1] + (255*255*129 - alpha) * *(p+g_off)) >> 23; \
- *(p+b_off) = (alpha * rgba_color[2] + (255*255*129 - alpha) * *(p+b_off)) >> 23; \
+ if (i >= FF_ARRAY_ELEMS(functions)) {
+ av_log(ctx, AV_LOG_ERROR, "%%{%s} is not known\n", fct);
+ return AVERROR(EINVAL);
+ }
+ return functions[i].func(ctx, bp, fct, argc, argv, functions[i].tag);
}
-static inline int draw_glyph_rgb(AVFrame *frame, FT_Bitmap *bitmap,
- unsigned int x, unsigned int y,
- unsigned int width, unsigned int height, int pixel_step,
- const uint8_t rgba_color[4], const uint8_t rgba_map[4])
+static int expand_function(AVFilterContext *ctx, AVBPrint *bp, char **rtext)
{
- int r, c, alpha;
- uint8_t *p;
- uint8_t src_val;
-
- for (r = 0; r < bitmap->rows && r+y < height; r++) {
- for (c = 0; c < bitmap->width && c+x < width; c++) {
- /* get intensity value in the glyph bitmap (source) */
- src_val = GET_BITMAP_VAL(r, c);
- if (!src_val)
- continue;
+ const char *text = *rtext;
+ char *argv[16] = { NULL };
+ unsigned argc = 0, i;
+ int ret;
- SET_PIXEL_RGB(frame, rgba_color, src_val, c+x, y+r, pixel_step,
- rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]);
+ if (*text != '{') {
+ av_log(ctx, AV_LOG_ERROR, "Stray %% near '%s'\n", text);
+ return AVERROR(EINVAL);
+ }
+ text++;
+ while (1) {
+ if (!(argv[argc++] = av_get_token(&text, ":}"))) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ if (!*text) {
+ av_log(ctx, AV_LOG_ERROR, "Unterminated %%{} near '%s'\n", *rtext);
+ ret = AVERROR(EINVAL);
+ goto end;
}
+ if (argc == FF_ARRAY_ELEMS(argv))
+ av_freep(&argv[--argc]); /* error will be caught later */
+ if (*text == '}')
+ break;
+ text++;
}
- return 0;
+ if ((ret = eval_function(ctx, bp, argv[0], argc - 1, argv + 1)) < 0)
+ goto end;
+ ret = 0;
+ *rtext = (char *)text + 1;
+
+end:
+ for (i = 0; i < argc; i++)
+ av_freep(&argv[i]);
+ return ret;
}
-static inline void drawbox(AVFrame *frame, unsigned int x, unsigned int y,
- unsigned int width, unsigned int height,
- uint8_t *line[4], int pixel_step[4], uint8_t color[4],
- int hsub, int vsub, int is_rgba_packed, uint8_t rgba_map[4])
+static int expand_text(AVFilterContext *ctx, char *text, AVBPrint *bp)
{
- int i, j, alpha;
-
- if (color[3] != 0xFF) {
- if (is_rgba_packed) {
- uint8_t *p;
- for (j = 0; j < height; j++)
- for (i = 0; i < width; i++)
- SET_PIXEL_RGB(frame, color, 255, i+x, y+j, pixel_step[0],
- rgba_map[0], rgba_map[1], rgba_map[2], rgba_map[3]);
+ int ret;
+
+ av_bprint_clear(bp);
+ while (*text) {
+ if (*text == '\\' && text[1]) {
+ av_bprint_chars(bp, text[1], 1);
+ text += 2;
+ } else if (*text == '%') {
+ text++;
+ if ((ret = expand_function(ctx, bp, &text)) < 0)
+ return ret;
} else {
- unsigned int luma_pos, chroma_pos1, chroma_pos2;
- for (j = 0; j < height; j++)
- for (i = 0; i < width; i++)
- SET_PIXEL_YUV(frame, color, 255, i+x, y+j, hsub, vsub);
+ av_bprint_chars(bp, *text, 1);
+ text++;
}
- } else {
- ff_draw_rectangle(frame->data, frame->linesize,
- line, pixel_step, hsub, vsub,
- x, y, width, height);
}
+ if (!av_bprint_is_complete(bp))
+ return AVERROR(ENOMEM);
+ return 0;
}
static int draw_glyphs(DrawTextContext *s, AVFrame *frame,
- int width, int height, const uint8_t rgbcolor[4], const uint8_t yuvcolor[4], int x, int y)
+ int width, int height,
+ FFDrawColor *color, int x, int y, int borderw)
{
- char *text = HAVE_LOCALTIME_R ? s->expanded_text : s->text;
+ char *text = s->expanded_text.str;
uint32_t code = 0;
- int i;
+ int i, x1, y1;
uint8_t *p;
Glyph *glyph = NULL;
for (i = 0, p = text; *p; i++) {
+ FT_Bitmap bitmap;
Glyph dummy = { 0 };
GET_UTF8(code, *p++, continue;);
@@ -824,19 +1071,21 @@ static int draw_glyphs(DrawTextContext *s, AVFrame *frame,
dummy.code = code;
glyph = av_tree_find(s->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ bitmap = borderw ? glyph->border_bitmap : glyph->bitmap;
+
if (glyph->bitmap.pixel_mode != FT_PIXEL_MODE_MONO &&
glyph->bitmap.pixel_mode != FT_PIXEL_MODE_GRAY)
return AVERROR(EINVAL);
- if (s->is_packed_rgb) {
- draw_glyph_rgb(frame, &glyph->bitmap,
- s->positions[i].x+x, s->positions[i].y+y, width, height,
- s->pixel_step[0], rgbcolor, s->rgba_map);
- } else {
- draw_glyph_yuv(frame, &glyph->bitmap,
- s->positions[i].x+x, s->positions[i].y+y, width, height,
- yuvcolor, s->hsub, s->vsub);
- }
+ x1 = s->positions[i].x+s->x+x - borderw;
+ y1 = s->positions[i].y+s->y+y - borderw;
+
+ ff_blend_mask(&s->dc, color,
+ frame->data, frame->linesize, width, height,
+ bitmap.buffer, bitmap.pitch,
+ bitmap.width, bitmap.rows,
+ bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3,
+ 0, x1, y1);
}
return 0;
@@ -846,107 +1095,217 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
int width, int height)
{
DrawTextContext *s = ctx->priv;
- int ret;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ uint32_t code = 0, prev_code = 0;
+ int x = 0, y = 0, i = 0, ret;
+ int max_text_line_w = 0, len;
+ int box_w, box_h;
+ char *text;
+ uint8_t *p;
+ int y_min = 32000, y_max = -32000;
+ int x_min = 32000, x_max = -32000;
+ FT_Vector delta;
+ Glyph *glyph = NULL, *prev_glyph = NULL;
+ Glyph dummy = { 0 };
+
+ time_t now = time(0);
+ struct tm ltime;
+ AVBPrint *bp = &s->expanded_text;
+
+ av_bprint_clear(bp);
+
+ if(s->basetime != AV_NOPTS_VALUE)
+ now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + s->basetime/1000000;
+
+ switch (s->exp_mode) {
+ case EXP_NONE:
+ av_bprintf(bp, "%s", s->text);
+ break;
+ case EXP_NORMAL:
+ if ((ret = expand_text(ctx, s->text, &s->expanded_text)) < 0)
+ return ret;
+ break;
+ case EXP_STRFTIME:
+ localtime_r(&now, &ltime);
+ av_bprint_strftime(bp, s->text, &ltime);
+ break;
+ }
+
+ if (s->tc_opt_string) {
+ char tcbuf[AV_TIMECODE_STR_SIZE];
+ av_timecode_make_string(&s->tc, tcbuf, inlink->frame_count);
+ av_bprint_clear(bp);
+ av_bprintf(bp, "%s%s", s->text, tcbuf);
+ }
+
+ if (!av_bprint_is_complete(bp))
+ return AVERROR(ENOMEM);
+ text = s->expanded_text.str;
+ if ((len = s->expanded_text.len) > s->nb_positions) {
+ if (!(s->positions =
+ av_realloc(s->positions, len*sizeof(*s->positions))))
+ return AVERROR(ENOMEM);
+ s->nb_positions = len;
+ }
+
+ if (s->fontcolor_expr[0]) {
+ /* If expression is set, evaluate and replace the static value */
+ av_bprint_clear(&s->expanded_fontcolor);
+ if ((ret = expand_text(ctx, s->fontcolor_expr, &s->expanded_fontcolor)) < 0)
+ return ret;
+ if (!av_bprint_is_complete(&s->expanded_fontcolor))
+ return AVERROR(ENOMEM);
+ av_log(s, AV_LOG_DEBUG, "Evaluated fontcolor is '%s'\n", s->expanded_fontcolor.str);
+ ret = av_parse_color(s->fontcolor.rgba, s->expanded_fontcolor.str, -1, s);
+ if (ret)
+ return ret;
+ ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba);
+ }
+
+ x = 0;
+ y = 0;
+
+ /* load and cache glyphs */
+ for (i = 0, p = text; *p; i++) {
+ GET_UTF8(code, *p++, continue;);
+
+ /* get glyph */
+ dummy.code = code;
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+ if (!glyph) {
+ load_glyph(ctx, &glyph, code);
+ }
+
+ y_min = FFMIN(glyph->bbox.yMin, y_min);
+ y_max = FFMAX(glyph->bbox.yMax, y_max);
+ x_min = FFMIN(glyph->bbox.xMin, x_min);
+ x_max = FFMAX(glyph->bbox.xMax, x_max);
+ }
+ s->max_glyph_h = y_max - y_min;
+ s->max_glyph_w = x_max - x_min;
+
+ /* compute and save position for each glyph */
+ glyph = NULL;
+ for (i = 0, p = text; *p; i++) {
+ GET_UTF8(code, *p++, continue;);
+
+ /* skip the \n in the sequence \r\n */
+ if (prev_code == '\r' && code == '\n')
+ continue;
+
+ prev_code = code;
+ if (is_newline(code)) {
+
+ max_text_line_w = FFMAX(max_text_line_w, x);
+ y += s->max_glyph_h;
+ x = 0;
+ continue;
+ }
+
+ /* get glyph */
+ prev_glyph = glyph;
+ dummy.code = code;
+ glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL);
+
+ /* kerning */
+ if (s->use_kerning && prev_glyph && glyph->code) {
+ FT_Get_Kerning(s->face, prev_glyph->code, glyph->code,
+ ft_kerning_default, &delta);
+ x += delta.x >> 6;
+ }
+
+ /* save position */
+ s->positions[i].x = x + glyph->bitmap_left;
+ s->positions[i].y = y - glyph->bitmap_top + y_max;
+ if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize;
+ else x += glyph->advance;
+ }
+
+ max_text_line_w = FFMAX(x, max_text_line_w);
+
+ s->var_values[VAR_TW] = s->var_values[VAR_TEXT_W] = max_text_line_w;
+ s->var_values[VAR_TH] = s->var_values[VAR_TEXT_H] = y + s->max_glyph_h;
+
+ s->var_values[VAR_MAX_GLYPH_W] = s->max_glyph_w;
+ s->var_values[VAR_MAX_GLYPH_H] = s->max_glyph_h;
+ s->var_values[VAR_MAX_GLYPH_A] = s->var_values[VAR_ASCENT ] = y_max;
+ s->var_values[VAR_MAX_GLYPH_D] = s->var_values[VAR_DESCENT] = y_min;
+
+ s->var_values[VAR_LINE_H] = s->var_values[VAR_LH] = s->max_glyph_h;
+
+ s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
+ s->y = s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, &s->prng);
+ s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
+
+ box_w = FFMIN(width - 1 , max_text_line_w);
+ box_h = FFMIN(height - 1, y + s->max_glyph_h);
/* draw box */
if (s->draw_box)
- drawbox(frame, s->x, s->y, s->w, s->h,
- s->box_line, s->pixel_step, s->boxcolor,
- s->hsub, s->vsub, s->is_packed_rgb,
- s->rgba_map);
+ ff_blend_rectangle(&s->dc, &s->boxcolor,
+ frame->data, frame->linesize, width, height,
+ s->x, s->y, box_w, box_h);
if (s->shadowx || s->shadowy) {
if ((ret = draw_glyphs(s, frame, width, height,
- s->shadowcolor_rgba,
- s->shadowcolor,
- s->x + s->shadowx,
- s->y + s->shadowy)) < 0)
+ &s->shadowcolor, s->shadowx, s->shadowy, 0)) < 0)
return ret;
}
+ if (s->borderw) {
+ if ((ret = draw_glyphs(s, frame, width, height,
+ &s->bordercolor, 0, 0, s->borderw)) < 0)
+ return ret;
+ }
if ((ret = draw_glyphs(s, frame, width, height,
- s->fontcolor_rgba,
- s->fontcolor,
- s->x,
- s->y)) < 0)
+ &s->fontcolor, 0, 0, 0)) < 0)
return ret;
return 0;
}
-static inline int normalize_double(int *n, double d)
-{
- int ret = 0;
-
- if (isnan(d)) {
- ret = AVERROR(EINVAL);
- } else if (d > INT_MAX || d < INT_MIN) {
- *n = d > INT_MAX ? INT_MAX : INT_MIN;
- ret = AVERROR(EINVAL);
- } else
- *n = round(d);
-
- return ret;
-}
-
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
DrawTextContext *s = ctx->priv;
- int ret = 0;
+ int ret;
- if ((ret = dtext_prepare_text(ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Can't draw text\n");
- av_frame_free(&frame);
- return ret;
+ if (s->reload) {
+ if ((ret = load_textfile(ctx)) < 0)
+ return ret;
+#if CONFIG_LIBFRIBIDI
+ if (s->text_shaping)
+ if ((ret = shape_text(ctx)) < 0)
+ return ret;
+#endif
}
+ s->var_values[VAR_N] = inlink->frame_count+s->start_number;
s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(inlink->time_base);
- s->var_values[VAR_X] =
- av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
- s->var_values[VAR_Y] =
- av_expr_eval(s->y_pexpr, s->var_values, &s->prng);
- s->var_values[VAR_X] =
- av_expr_eval(s->x_pexpr, s->var_values, &s->prng);
- s->draw = av_expr_eval(s->d_pexpr, s->var_values, &s->prng);
+ s->var_values[VAR_PICT_TYPE] = frame->pict_type;
+ s->metadata = av_frame_get_metadata(frame);
- normalize_double(&s->x, s->var_values[VAR_X]);
- normalize_double(&s->y, s->var_values[VAR_Y]);
+ draw_text(ctx, frame, frame->width, frame->height);
- if (s->fix_bounds) {
- if (s->x < 0) s->x = 0;
- if (s->y < 0) s->y = 0;
- if ((unsigned)s->x + (unsigned)s->w > inlink->w)
- s->x = inlink->w - s->w;
- if ((unsigned)s->y + (unsigned)s->h > inlink->h)
- s->y = inlink->h - s->h;
- }
-
- s->x &= ~((1 << s->hsub) - 1);
- s->y &= ~((1 << s->vsub) - 1);
-
- av_dlog(ctx, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
- (int)s->var_values[VAR_N], s->var_values[VAR_T],
- s->x, s->y, s->x+s->w, s->y+s->h);
+ av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n",
+ (int)s->var_values[VAR_N], s->var_values[VAR_T],
+ (int)s->var_values[VAR_TEXT_W], (int)s->var_values[VAR_TEXT_H],
+ s->x, s->y);
- if (s->draw)
- draw_text(inlink->dst, frame, frame->width, frame->height);
-
- s->var_values[VAR_N] += 1.0;
-
- return ff_filter_frame(inlink->dst->outputs[0], frame);
+ return ff_filter_frame(outlink, frame);
}
static const AVFilterPad avfilter_vf_drawtext_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .config_props = config_input,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -967,7 +1326,8 @@ AVFilter ff_vf_drawtext = {
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_drawtext_inputs,
- .outputs = avfilter_vf_drawtext_outputs,
+ .inputs = avfilter_vf_drawtext_inputs,
+ .outputs = avfilter_vf_drawtext_outputs,
+ .process_command = command,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_edgedetect.c b/libavfilter/vf_edgedetect.c
new file mode 100644
index 0000000..7316412
--- /dev/null
+++ b/libavfilter/vf_edgedetect.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2012-2014 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Edge detection filter
+ *
+ * @see https://en.wikipedia.org/wiki/Canny_edge_detector
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum FilterMode {
+ MODE_WIRES,
+ MODE_COLORMIX,
+ NB_MODE
+};
+
+struct plane_info {
+ uint8_t *tmpbuf;
+ uint16_t *gradients;
+ char *directions;
+};
+
+typedef struct {
+ const AVClass *class;
+ struct plane_info planes[3];
+ int nb_planes;
+ double low, high;
+ uint8_t low_u8, high_u8;
+ enum FilterMode mode;
+} EdgeDetectContext;
+
+#define OFFSET(x) offsetof(EdgeDetectContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption edgedetect_options[] = {
+ { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
+ { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_WIRES}, 0, NB_MODE-1, FLAGS, "mode" },
+ { "wires", "white/gray wires on black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_WIRES}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "colormix", "mix colors", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLORMIX}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(edgedetect);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ EdgeDetectContext *edgedetect = ctx->priv;
+
+ edgedetect->low_u8 = edgedetect->low * 255. + .5;
+ edgedetect->high_u8 = edgedetect->high * 255. + .5;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ const EdgeDetectContext *edgedetect = ctx->priv;
+
+ if (edgedetect->mode == MODE_WIRES) {
+ static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ } else if (edgedetect->mode == MODE_COLORMIX) {
+ static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GBRP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ } else {
+ av_assert0(0);
+ }
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ int p;
+ AVFilterContext *ctx = inlink->dst;
+ EdgeDetectContext *edgedetect = ctx->priv;
+
+ edgedetect->nb_planes = inlink->format == AV_PIX_FMT_GRAY8 ? 1 : 3;
+ for (p = 0; p < edgedetect->nb_planes; p++) {
+ struct plane_info *plane = &edgedetect->planes[p];
+
+ plane->tmpbuf = av_malloc(inlink->w * inlink->h);
+ plane->gradients = av_calloc(inlink->w * inlink->h, sizeof(*plane->gradients));
+ plane->directions = av_malloc(inlink->w * inlink->h);
+ if (!plane->tmpbuf || !plane->gradients || !plane->directions)
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+static void gaussian_blur(AVFilterContext *ctx, int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
+ memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
+ for (j = 2; j < h - 2; j++) {
+ dst[0] = src[0];
+ dst[1] = src[1];
+ for (i = 2; i < w - 2; i++) {
+ /* Gaussian mask of size 5x5 with sigma = 1.4 */
+ dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
+ + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
+ + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
+ + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
+ + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
+
+ + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
+ + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
+ + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
+ + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
+ + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
+
+ + src[i-2] * 5
+ + src[i-1] * 12
+ + src[i ] * 15
+ + src[i+1] * 12
+ + src[i+2] * 5) / 159;
+ }
+ dst[i ] = src[i ];
+ dst[i + 1] = src[i + 1];
+
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+ memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
+ memcpy(dst, src, w);
+}
+
+enum {
+ DIRECTION_45UP,
+ DIRECTION_45DOWN,
+ DIRECTION_HORIZONTAL,
+ DIRECTION_VERTICAL,
+};
+
+static int get_rounded_direction(int gx, int gy)
+{
+ /* reference angles:
+ * tan( pi/8) = sqrt(2)-1
+ * tan(3pi/8) = sqrt(2)+1
+ * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
+ * <ref-angle>, or more simply Gy against <ref-angle>*Gx
+ *
+ * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
+ * round((sqrt(2)-1) * (1<<16)) = 27146
+ * round((sqrt(2)+1) * (1<<16)) = 158218
+ */
+ if (gx) {
+ int tanpi8gx, tan3pi8gx;
+
+ if (gx < 0)
+ gx = -gx, gy = -gy;
+ gy <<= 16;
+ tanpi8gx = 27146 * gx;
+ tan3pi8gx = 158218 * gx;
+ if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
+ if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
+ if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
+ }
+ return DIRECTION_VERTICAL;
+}
+
+static void sobel(int w, int h,
+ uint16_t *dst, int dst_linesize,
+ int8_t *dir, int dir_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ for (j = 1; j < h - 1; j++) {
+ dst += dst_linesize;
+ dir += dir_linesize;
+ src += src_linesize;
+ for (i = 1; i < w - 1; i++) {
+ const int gx =
+ -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
+ -2*src[ i-1] + 2*src[ i+1]
+ -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
+ const int gy =
+ -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
+ -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
+ -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
+
+ dst[i] = FFABS(gx) + FFABS(gy);
+ dir[i] = get_rounded_direction(gx, gy);
+ }
+ }
+}
+
+static void non_maximum_suppression(int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const int8_t *dir, int dir_linesize,
+ const uint16_t *src, int src_linesize)
+{
+ int i, j;
+
+#define COPY_MAXIMA(ay, ax, by, bx) do { \
+ if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
+ src[i] > src[(by)*src_linesize + i+(bx)]) \
+ dst[i] = av_clip_uint8(src[i]); \
+} while (0)
+
+ for (j = 1; j < h - 1; j++) {
+ dst += dst_linesize;
+ dir += dir_linesize;
+ src += src_linesize;
+ for (i = 1; i < w - 1; i++) {
+ switch (dir[i]) {
+ case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
+ case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
+ case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
+ case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
+ }
+ }
+ }
+}
+
+static void double_threshold(int low, int high, int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < w; i++) {
+ if (src[i] > high) {
+ dst[i] = src[i];
+ continue;
+ }
+
+ if ((!i || i == w - 1 || !j || j == h - 1) &&
+ src[i] > low &&
+ (src[-src_linesize + i-1] > high ||
+ src[-src_linesize + i ] > high ||
+ src[-src_linesize + i+1] > high ||
+ src[ i-1] > high ||
+ src[ i+1] > high ||
+ src[ src_linesize + i-1] > high ||
+ src[ src_linesize + i ] > high ||
+ src[ src_linesize + i+1] > high))
+ dst[i] = src[i];
+ else
+ dst[i] = 0;
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static void color_mix(int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize)
+{
+ int i, j;
+
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < w; i++)
+ dst[i] = (dst[i] + src[i]) >> 1;
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ EdgeDetectContext *edgedetect = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int p, direct = 0;
+ AVFrame *out;
+
+ if (edgedetect->mode != MODE_COLORMIX && av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (p = 0; p < edgedetect->nb_planes; p++) {
+ struct plane_info *plane = &edgedetect->planes[p];
+ uint8_t *tmpbuf = plane->tmpbuf;
+ uint16_t *gradients = plane->gradients;
+ int8_t *directions = plane->directions;
+
+ /* gaussian filter to reduce noise */
+ gaussian_blur(ctx, inlink->w, inlink->h,
+ tmpbuf, inlink->w,
+ in->data[p], in->linesize[p]);
+
+ /* compute the 16-bits gradients and directions for the next step */
+ sobel(inlink->w, inlink->h,
+ gradients, inlink->w,
+ directions,inlink->w,
+ tmpbuf, inlink->w);
+
+ /* non_maximum_suppression() will actually keep & clip what's necessary and
+ * ignore the rest, so we need a clean output buffer */
+ memset(tmpbuf, 0, inlink->w * inlink->h);
+ non_maximum_suppression(inlink->w, inlink->h,
+ tmpbuf, inlink->w,
+ directions,inlink->w,
+ gradients, inlink->w);
+
+ /* keep high values, or low values surrounded by high values */
+ double_threshold(edgedetect->low_u8, edgedetect->high_u8,
+ inlink->w, inlink->h,
+ out->data[p], out->linesize[p],
+ tmpbuf, inlink->w);
+
+ if (edgedetect->mode == MODE_COLORMIX) {
+ color_mix(inlink->w, inlink->h,
+ out->data[p], out->linesize[p],
+ in->data[p], in->linesize[p]);
+ }
+ }
+
+ if (!direct)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int p;
+ EdgeDetectContext *edgedetect = ctx->priv;
+
+ for (p = 0; p < edgedetect->nb_planes; p++) {
+ struct plane_info *plane = &edgedetect->planes[p];
+ av_freep(&plane->tmpbuf);
+ av_freep(&plane->gradients);
+ av_freep(&plane->directions);
+ }
+}
+
+static const AVFilterPad edgedetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad edgedetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_edgedetect = {
+ .name = "edgedetect",
+ .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
+ .priv_size = sizeof(EdgeDetectContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = edgedetect_inputs,
+ .outputs = edgedetect_outputs,
+ .priv_class = &edgedetect_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_elbg.c b/libavfilter/vf_elbg.c
new file mode 100644
index 0000000..be0885d
--- /dev/null
+++ b/libavfilter/vf_elbg.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * video quantizer filter based on ELBG
+ */
+
+#include "libavcodec/elbg.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/random_seed.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct ColorContext {
+ const AVClass *class;
+ AVLFG lfg;
+ unsigned int lfg_seed;
+ int max_steps_nb;
+ int *codeword;
+ int codeword_length;
+ int *codeword_closest_codebook_idxs;
+ int *codebook;
+ int codebook_length;
+ const AVPixFmtDescriptor *pix_desc;
+ uint8_t rgba_map[4];
+} ELBGContext;
+
+#define OFFSET(x) offsetof(ELBGContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption elbg_options[] = {
+ { "codebook_length", "set codebook length", OFFSET(codebook_length), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, INT_MAX, FLAGS },
+ { "l", "set codebook length", OFFSET(codebook_length), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, INT_MAX, FLAGS },
+ { "nb_steps", "set max number of steps used to compute the mapping", OFFSET(max_steps_nb), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, INT_MAX, FLAGS },
+ { "n", "set max number of steps used to compute the mapping", OFFSET(max_steps_nb), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, INT_MAX, FLAGS },
+ { "seed", "set the random seed", OFFSET(lfg_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { "s", "set the random seed", OFFSET(lfg_seed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, UINT32_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(elbg);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ELBGContext *elbg = ctx->priv;
+
+ if (elbg->lfg_seed == -1)
+ elbg->lfg_seed = av_get_random_seed();
+
+ av_lfg_init(&elbg->lfg, elbg->lfg_seed);
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+#define NB_COMPONENTS 3
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ELBGContext *elbg = ctx->priv;
+
+ elbg->pix_desc = av_pix_fmt_desc_get(inlink->format);
+ elbg->codeword_length = inlink->w * inlink->h;
+ elbg->codeword = av_realloc_f(elbg->codeword, elbg->codeword_length,
+ NB_COMPONENTS * sizeof(*elbg->codeword));
+ if (!elbg->codeword)
+ return AVERROR(ENOMEM);
+
+ elbg->codeword_closest_codebook_idxs =
+ av_realloc_f(elbg->codeword_closest_codebook_idxs, elbg->codeword_length,
+ sizeof(*elbg->codeword_closest_codebook_idxs));
+ if (!elbg->codeword_closest_codebook_idxs)
+ return AVERROR(ENOMEM);
+
+ elbg->codebook = av_realloc_f(elbg->codebook, elbg->codebook_length,
+ NB_COMPONENTS * sizeof(*elbg->codebook));
+ if (!elbg->codebook)
+ return AVERROR(ENOMEM);
+
+ ff_fill_rgba_map(elbg->rgba_map, inlink->format);
+
+ return 0;
+}
+
+#define R 0
+#define G 1
+#define B 2
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ ELBGContext *elbg = inlink->dst->priv;
+ int i, j, k;
+ uint8_t *p, *p0;
+
+ const uint8_t r_idx = elbg->rgba_map[R];
+ const uint8_t g_idx = elbg->rgba_map[G];
+ const uint8_t b_idx = elbg->rgba_map[B];
+
+ /* build the codeword */
+ p0 = frame->data[0];
+ k = 0;
+ for (i = 0; i < inlink->h; i++) {
+ p = p0;
+ for (j = 0; j < inlink->w; j++) {
+ elbg->codeword[k++] = p[r_idx];
+ elbg->codeword[k++] = p[g_idx];
+ elbg->codeword[k++] = p[b_idx];
+ p += elbg->pix_desc->nb_components;
+ }
+ p0 += frame->linesize[0];
+ }
+
+ /* compute the codebook */
+ avpriv_init_elbg(elbg->codeword, NB_COMPONENTS, elbg->codeword_length,
+ elbg->codebook, elbg->codebook_length, elbg->max_steps_nb,
+ elbg->codeword_closest_codebook_idxs, &elbg->lfg);
+ avpriv_do_elbg(elbg->codeword, NB_COMPONENTS, elbg->codeword_length,
+ elbg->codebook, elbg->codebook_length, elbg->max_steps_nb,
+ elbg->codeword_closest_codebook_idxs, &elbg->lfg);
+
+ /* fill the output with the codebook values */
+ p0 = frame->data[0];
+
+ k = 0;
+ for (i = 0; i < inlink->h; i++) {
+ p = p0;
+ for (j = 0; j < inlink->w; j++) {
+ int cb_idx = NB_COMPONENTS * elbg->codeword_closest_codebook_idxs[k++];
+ p[r_idx] = elbg->codebook[cb_idx];
+ p[g_idx] = elbg->codebook[cb_idx+1];
+ p[b_idx] = elbg->codebook[cb_idx+2];
+ p += elbg->pix_desc->nb_components;
+ }
+ p0 += frame->linesize[0];
+ }
+
+ return ff_filter_frame(inlink->dst->outputs[0], frame);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ELBGContext *elbg = ctx->priv;
+
+ av_freep(&elbg->codebook);
+ av_freep(&elbg->codeword);
+ av_freep(&elbg->codeword_closest_codebook_idxs);
+}
+
+static const AVFilterPad elbg_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad elbg_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_elbg = {
+ .name = "elbg",
+ .description = NULL_IF_CONFIG_SMALL("Apply posterize effect, using the ELBG algorithm."),
+ .priv_size = sizeof(ELBGContext),
+ .priv_class = &elbg_class,
+ .query_formats = query_formats,
+ .init = init,
+ .uninit = uninit,
+ .inputs = elbg_inputs,
+ .outputs = elbg_outputs,
+};
diff --git a/libavfilter/vf_extractplanes.c b/libavfilter/vf_extractplanes.c
new file mode 100644
index 0000000..fadd2dd
--- /dev/null
+++ b/libavfilter/vf_extractplanes.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+
+#define PLANE_R 0x01
+#define PLANE_G 0x02
+#define PLANE_B 0x04
+#define PLANE_A 0x08
+#define PLANE_Y 0x10
+#define PLANE_U 0x20
+#define PLANE_V 0x40
+
+typedef struct {
+ const AVClass *class;
+ int requested_planes;
+ int map[4];
+ int linesize[4];
+ int is_packed_rgb;
+ int depth;
+ int step;
+} ExtractPlanesContext;
+
+#define OFFSET(x) offsetof(ExtractPlanesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption extractplanes_options[] = {
+ { "planes", "set planes", OFFSET(requested_planes), AV_OPT_TYPE_FLAGS, {.i64=1}, 1, 0xff, FLAGS, "flags"},
+ { "y", "set luma plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_Y}, 0, 0, FLAGS, "flags"},
+ { "u", "set u plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_U}, 0, 0, FLAGS, "flags"},
+ { "v", "set v plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_V}, 0, 0, FLAGS, "flags"},
+ { "r", "set red plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_R}, 0, 0, FLAGS, "flags"},
+ { "g", "set green plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_G}, 0, 0, FLAGS, "flags"},
+ { "b", "set blue plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_B}, 0, 0, FLAGS, "flags"},
+ { "a", "set alpha plane", 0, AV_OPT_TYPE_CONST, {.i64=PLANE_A}, 0, 0, FLAGS, "flags"},
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(extractplanes);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat in_pixfmts[] = {
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
+ AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
+ AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
+ AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
+ AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A,
+ AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_GRAY16BE,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
+ AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
+ AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GBRP16LE, AV_PIX_FMT_GBRP16BE,
+ AV_PIX_FMT_GBRAP16LE, AV_PIX_FMT_GBRAP16BE,
+ AV_PIX_FMT_NONE,
+ };
+ static const enum AVPixelFormat out8_pixfmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out16le_pixfmts[] = { AV_PIX_FMT_GRAY16LE, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat out16be_pixfmts[] = { AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_NONE };
+ const enum AVPixelFormat *out_pixfmts;
+ const AVPixFmtDescriptor *desc;
+ AVFilterFormats *avff;
+ int i, depth = 0, be = 0;
+
+ if (!ctx->inputs[0]->in_formats ||
+ !ctx->inputs[0]->in_formats->nb_formats) {
+ return AVERROR(EAGAIN);
+ }
+
+ if (!ctx->inputs[0]->out_formats)
+ ff_formats_ref(ff_make_format_list(in_pixfmts), &ctx->inputs[0]->out_formats);
+
+ avff = ctx->inputs[0]->in_formats;
+ desc = av_pix_fmt_desc_get(avff->formats[0]);
+ depth = desc->comp[0].depth_minus1;
+ be = desc->flags & AV_PIX_FMT_FLAG_BE;
+ for (i = 1; i < avff->nb_formats; i++) {
+ desc = av_pix_fmt_desc_get(avff->formats[i]);
+ if (depth != desc->comp[0].depth_minus1 ||
+ be != (desc->flags & AV_PIX_FMT_FLAG_BE)) {
+ return AVERROR(EAGAIN);
+ }
+ }
+
+ if (depth == 7)
+ out_pixfmts = out8_pixfmts;
+ else if (be)
+ out_pixfmts = out16be_pixfmts;
+ else
+ out_pixfmts = out16le_pixfmts;
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ ff_formats_ref(ff_make_format_list(out_pixfmts), &ctx->outputs[i]->in_formats);
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ExtractPlanesContext *e = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int plane_avail, ret, i;
+ uint8_t rgba_map[4];
+
+ plane_avail = ((desc->flags & AV_PIX_FMT_FLAG_RGB) ? PLANE_R|PLANE_G|PLANE_B :
+ PLANE_Y |
+ ((desc->nb_components > 2) ? PLANE_U|PLANE_V : 0)) |
+ ((desc->flags & AV_PIX_FMT_FLAG_ALPHA) ? PLANE_A : 0);
+ if (e->requested_planes & ~plane_avail) {
+ av_log(ctx, AV_LOG_ERROR, "Requested planes not available.\n");
+ return AVERROR(EINVAL);
+ }
+ if ((ret = av_image_fill_linesizes(e->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ e->depth = (desc->comp[0].depth_minus1 + 1) >> 3;
+ e->step = av_get_padded_bits_per_pixel(desc) >> 3;
+ e->is_packed_rgb = !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
+ if (desc->flags & AV_PIX_FMT_FLAG_RGB) {
+ ff_fill_rgba_map(rgba_map, inlink->format);
+ for (i = 0; i < 4; i++)
+ e->map[i] = rgba_map[e->map[i]];
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ ExtractPlanesContext *e = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int output = outlink->srcpad - ctx->output_pads;
+
+ if (e->map[output] == 1 || e->map[output] == 2) {
+ outlink->h = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ outlink->w = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ }
+
+ return 0;
+}
+
+static void extract_from_packed(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int width, int height,
+ int depth, int step, int comp)
+{
+ int x, y;
+
+ for (y = 0; y < height; y++) {
+ switch (depth) {
+ case 1:
+ for (x = 0; x < width; x++)
+ dst[x] = src[x * step + comp];
+ break;
+ case 2:
+ for (x = 0; x < width; x++) {
+ dst[x * 2 ] = src[x * step + comp * 2 ];
+ dst[x * 2 + 1] = src[x * step + comp * 2 + 1];
+ }
+ break;
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ExtractPlanesContext *e = ctx->priv;
+ int i, eof = 0, ret = 0;
+
+ for (i = 0; i < ctx->nb_outputs; i++) {
+ AVFilterLink *outlink = ctx->outputs[i];
+ const int idx = e->map[i];
+ AVFrame *out;
+
+ if (outlink->closed)
+ continue;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_frame_copy_props(out, frame);
+
+ if (e->is_packed_rgb) {
+ extract_from_packed(out->data[0], out->linesize[0],
+ frame->data[0], frame->linesize[0],
+ outlink->w, outlink->h,
+ e->depth,
+ e->step, idx);
+ } else {
+ av_image_copy_plane(out->data[0], out->linesize[0],
+ frame->data[idx], frame->linesize[idx],
+ e->linesize[idx], outlink->h);
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ if (ret == AVERROR_EOF)
+ eof++;
+ else if (ret < 0)
+ break;
+ }
+ av_frame_free(&frame);
+
+ if (eof == ctx->nb_outputs)
+ ret = AVERROR_EOF;
+ else if (ret == AVERROR_EOF)
+ ret = 0;
+ return ret;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ExtractPlanesContext *e = ctx->priv;
+ int planes = (e->requested_planes & 0xf) | (e->requested_planes >> 4);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ char *name;
+ AVFilterPad pad = { 0 };
+
+ if (!(planes & (1 << i)))
+ continue;
+
+ name = av_asprintf("out%d", ctx->nb_outputs);
+ if (!name)
+ return AVERROR(ENOMEM);
+ e->map[ctx->nb_outputs] = i;
+ pad.name = name;
+ pad.type = AVMEDIA_TYPE_VIDEO;
+ pad.config_props = config_output;
+
+ ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->nb_outputs; i++)
+ av_freep(&ctx->output_pads[i].name);
+}
+
+static const AVFilterPad extractplanes_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_extractplanes = {
+ .name = "extractplanes",
+ .description = NULL_IF_CONFIG_SMALL("Extract planes as grayscale frames."),
+ .priv_size = sizeof(ExtractPlanesContext),
+ .priv_class = &extractplanes_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = extractplanes_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+
+#if CONFIG_ALPHAEXTRACT_FILTER
+
+static av_cold int init_alphaextract(AVFilterContext *ctx)
+{
+ ExtractPlanesContext *e = ctx->priv;
+
+ e->requested_planes = PLANE_A;
+
+ return init(ctx);
+}
+
+AVFilter ff_vf_alphaextract = {
+ .name = "alphaextract",
+ .description = NULL_IF_CONFIG_SMALL("Extract an alpha channel as a "
+ "grayscale image component."),
+ .priv_size = sizeof(ExtractPlanesContext),
+ .init = init_alphaextract,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = extractplanes_inputs,
+ .outputs = NULL,
+ .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif /* CONFIG_ALPHAEXTRACT_FILTER */
diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c
index eac0c2c..80ce75d 100644
--- a/libavfilter/vf_fade.c
+++ b/libavfilter/vf_fade.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Brandon Mintern
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,14 +25,27 @@
* based heavily on vf_negate.c by Bobby Bingham
*/
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
#include "libavutil/common.h"
+#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+#define Y 0
+#define U 1
+#define V 2
+
#define FADE_IN 0
#define FADE_OUT 1
@@ -41,8 +54,15 @@ typedef struct FadeContext {
int type;
int factor, fade_per_frame;
int start_frame, nb_frames;
- unsigned int frame_index, stop_frame;
int hsub, vsub, bpp;
+ unsigned int black_level, black_level_scaled;
+ uint8_t is_packed_rgb;
+ uint8_t rgba_map[4];
+ int alpha;
+ uint64_t start_time, duration;
+ enum {VF_FADE_WAITING=0, VF_FADE_FADING, VF_FADE_DONE} fade_state;
+ uint8_t color_rgba[4]; ///< fade color
+ int black_fade; ///< if color_rgba is black
} FadeContext;
static av_cold int init(AVFilterContext *ctx)
@@ -50,36 +70,66 @@ static av_cold int init(AVFilterContext *ctx)
FadeContext *s = ctx->priv;
s->fade_per_frame = (1 << 16) / s->nb_frames;
- if (s->type == FADE_IN) {
- s->factor = 0;
- } else if (s->type == FADE_OUT) {
- s->fade_per_frame = -s->fade_per_frame;
- s->factor = (1 << 16);
+ s->fade_state = VF_FADE_WAITING;
+
+ if (s->duration != 0) {
+ // If duration (seconds) is non-zero, assume that we are not fading based on frames
+ s->nb_frames = 0; // Mostly to clean up logging
}
- s->stop_frame = s->start_frame + s->nb_frames;
- av_log(ctx, AV_LOG_VERBOSE,
- "type:%s start_frame:%d nb_frames:%d\n",
- s->type == FADE_IN ? "in" : "out", s->start_frame,
- s->nb_frames);
+ // Choose what to log. If both time-based and frame-based options, both lines will be in the log
+ if (s->start_frame || s->nb_frames) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "type:%s start_frame:%d nb_frames:%d alpha:%d\n",
+ s->type == FADE_IN ? "in" : "out", s->start_frame,
+ s->nb_frames,s->alpha);
+ }
+ if (s->start_time || s->duration) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "type:%s start_time:%f duration:%f alpha:%d\n",
+ s->type == FADE_IN ? "in" : "out", (s->start_time / (double)AV_TIME_BASE),
+ (s->duration / (double)AV_TIME_BASE),s->alpha);
+ }
+
+ s->black_fade = !memcmp(s->color_rgba, "\x00\x00\x00\xff", 4);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
+ const FadeContext *s = ctx->priv;
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat pix_fmts_rgb[] = {
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
AV_PIX_FMT_NONE
};
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ if (s->black_fade)
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ else
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts_rgb));
return 0;
}
+const static enum AVPixelFormat studio_level_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_NONE
+};
+
static int config_props(AVFilterLink *inlink)
{
FadeContext *s = inlink->dst->priv;
@@ -89,6 +139,56 @@ static int config_props(AVFilterLink *inlink)
s->vsub = pixdesc->log2_chroma_h;
s->bpp = av_get_bits_per_pixel(pixdesc) >> 3;
+ s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA);
+ s->is_packed_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
+
+ /* use CCIR601/709 black level for studio-level pixel non-alpha components */
+ s->black_level =
+ ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !s->alpha ? 16 : 0;
+ /* 32768 = 1 << 15, it is an integer representation
+ * of 0.5 and is for rounding. */
+ s->black_level_scaled = (s->black_level << 16) + 32768;
+ return 0;
+}
+
+static av_always_inline void filter_rgb(FadeContext *s, const AVFrame *frame,
+ int slice_start, int slice_end,
+ int do_alpha, int step)
+{
+ int i, j;
+ const uint8_t r_idx = s->rgba_map[R];
+ const uint8_t g_idx = s->rgba_map[G];
+ const uint8_t b_idx = s->rgba_map[B];
+ const uint8_t a_idx = s->rgba_map[A];
+ const uint8_t *c = s->color_rgba;
+
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[0] + i * frame->linesize[0];
+ for (j = 0; j < frame->width; j++) {
+#define INTERP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)p[c_name] - (int)c[c_idx]) * s->factor + (1<<15)) >> 16)
+ p[r_idx] = INTERP(r_idx, 0);
+ p[g_idx] = INTERP(g_idx, 1);
+ p[b_idx] = INTERP(b_idx, 2);
+ if (do_alpha)
+ p[a_idx] = INTERP(a_idx, 3);
+ p += step;
+ }
+ }
+}
+
+static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+
+ if (s->alpha) filter_rgb(s, frame, slice_start, slice_end, 1, 4);
+ else if (s->bpp == 3) filter_rgb(s, frame, slice_start, slice_end, 0, 3);
+ else if (s->bpp == 4) filter_rgb(s, frame, slice_start, slice_end, 0, 4);
+ else av_assert0(0);
+
return 0;
}
@@ -97,9 +197,8 @@ static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
- int slice_h = frame->height / nb_jobs;
- int slice_start = jobnr * slice_h;
- int slice_end = (jobnr == nb_jobs - 1) ? frame->height : (jobnr + 1) * slice_h;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
int i, j;
for (i = slice_start; i < slice_end; i++) {
@@ -108,7 +207,7 @@ static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr,
/* s->factor is using 16 lower-order bits for decimal
* places. 32768 = 1 << 15, it is an integer representation
* of 0.5 and is for rounding. */
- *p = (*p * s->factor + 32768) >> 16;
+ *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
p++;
}
}
@@ -121,15 +220,16 @@ static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
- int slice_h = FFALIGN(frame->height / nb_jobs, 1 << s->vsub);
- int slice_start = jobnr * slice_h;
- int slice_end = (jobnr == nb_jobs - 1) ? frame->height : (jobnr + 1) * slice_h;
int i, j, plane;
+ const int width = FF_CEIL_RSHIFT(frame->width, s->hsub);
+ const int height= FF_CEIL_RSHIFT(frame->height, s->vsub);
+ int slice_start = (height * jobnr ) / nb_jobs;
+ int slice_end = (height * (jobnr+1)) / nb_jobs;
for (plane = 1; plane < 3; plane++) {
for (i = slice_start; i < slice_end; i++) {
- uint8_t *p = frame->data[plane] + (i >> s->vsub) * frame->linesize[plane];
- for (j = 0; j < frame->width >> s->hsub; j++) {
+ uint8_t *p = frame->data[plane] + i * frame->linesize[plane];
+ for (j = 0; j < width; j++) {
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer
* representation of 128.5. The .5 is for rounding
* purposes. */
@@ -142,60 +242,148 @@ static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
return 0;
}
+static int filter_slice_alpha(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ FadeContext *s = ctx->priv;
+ AVFrame *frame = arg;
+ int plane = s->is_packed_rgb ? 0 : A;
+ int slice_start = (frame->height * jobnr ) / nb_jobs;
+ int slice_end = (frame->height * (jobnr+1)) / nb_jobs;
+ int i, j;
+
+ for (i = slice_start; i < slice_end; i++) {
+ uint8_t *p = frame->data[plane] + i * frame->linesize[plane] + s->is_packed_rgb*s->rgba_map[A];
+ int step = s->is_packed_rgb ? 4 : 1;
+ for (j = 0; j < frame->width; j++) {
+ /* s->factor is using 16 lower-order bits for decimal
+ * places. 32768 = 1 << 15, it is an integer representation
+ * of 0.5 and is for rounding. */
+ *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16;
+ p += step;
+ }
+ }
+
+ return 0;
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FadeContext *s = ctx->priv;
+ double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base);
- if (s->factor < UINT16_MAX) {
- /* luma or rgb plane */
- ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
- FFMIN(frame->height, ctx->graph->nb_threads));
+ // Calculate Fade assuming this is a Fade In
+ if (s->fade_state == VF_FADE_WAITING) {
+ s->factor=0;
+ if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE
+ && inlink->frame_count >= s->start_frame) {
+ // Time to start fading
+ s->fade_state = VF_FADE_FADING;
- if (frame->data[1] && frame->data[2]) {
- /* chroma planes */
- ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
- FFMIN(frame->height, ctx->graph->nb_threads));
+ // Save start time in case we are starting based on frames and fading based on time
+ if (s->start_time == 0 && s->start_frame != 0) {
+ s->start_time = frame_timestamp*(double)AV_TIME_BASE;
+ }
+
+ // Save start frame in case we are starting based on time and fading based on frames
+ if (s->start_time != 0 && s->start_frame == 0) {
+ s->start_frame = inlink->frame_count;
+ }
}
}
+ if (s->fade_state == VF_FADE_FADING) {
+ if (s->duration == 0) {
+ // Fading based on frame count
+ s->factor = (inlink->frame_count - s->start_frame) * s->fade_per_frame;
+ if (inlink->frame_count > s->start_frame + s->nb_frames) {
+ s->fade_state = VF_FADE_DONE;
+ }
+
+ } else {
+ // Fading based on duration
+ s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE)
+ * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
+ if (frame_timestamp > s->start_time/(double)AV_TIME_BASE
+ + s->duration/(double)AV_TIME_BASE) {
+ s->fade_state = VF_FADE_DONE;
+ }
+ }
+ }
+ if (s->fade_state == VF_FADE_DONE) {
+ s->factor=UINT16_MAX;
+ }
- if (s->frame_index >= s->start_frame &&
- s->frame_index <= s->stop_frame)
- s->factor += s->fade_per_frame;
s->factor = av_clip_uint16(s->factor);
- s->frame_index++;
+
+ // Invert fade_factor if Fading Out
+ if (s->type == FADE_OUT) {
+ s->factor=UINT16_MAX-s->factor;
+ }
+
+ if (s->factor < UINT16_MAX) {
+ if (s->alpha) {
+ ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
+ } else if (s->is_packed_rgb && !s->black_fade) {
+ ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
+ } else {
+ /* luma, or rgb plane in case of black */
+ ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
+
+ if (frame->data[1] && frame->data[2]) {
+ /* chroma planes */
+ ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
+ FFMIN(frame->height, ctx->graph->nb_threads));
+ }
+ }
+ }
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
+
#define OFFSET(x) offsetof(FadeContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fade_options[] = {
{ "type", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
+ { "t", "'in' or 'out' for fade-in/fade-out", OFFSET(type), AV_OPT_TYPE_INT, { .i64 = FADE_IN }, FADE_IN, FADE_OUT, FLAGS, "type" },
{ "in", "fade-in", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_IN }, .unit = "type" },
{ "out", "fade-out", 0, AV_OPT_TYPE_CONST, { .i64 = FADE_OUT }, .unit = "type" },
{ "start_frame", "Number of the first frame to which to apply the effect.",
OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "s", "Number of the first frame to which to apply the effect.",
+ OFFSET(start_frame), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "nb_frames", "Number of frames to which the effect should be applied.",
- OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, FLAGS },
- { NULL },
+ OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
+ { "n", "Number of frames to which the effect should be applied.",
+ OFFSET(nb_frames), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, FLAGS },
+ { "alpha", "fade alpha if it is available on the input", OFFSET(alpha), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS },
+ { "start_time", "Number of seconds of the beginning of the effect.",
+ OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "st", "Number of seconds of the beginning of the effect.",
+ OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "duration", "Duration of the effect in seconds.",
+ OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "d", "Duration of the effect in seconds.",
+ OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
+ { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
};
-static const AVClass fade_class = {
- .class_name = "fade",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(fade);
static const AVFilterPad avfilter_vf_fade_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ .needs_writable = 1,
},
{ NULL }
};
@@ -210,13 +398,12 @@ static const AVFilterPad avfilter_vf_fade_outputs[] = {
AVFilter ff_vf_fade = {
.name = "fade",
- .description = NULL_IF_CONFIG_SMALL("Fade in/out input video"),
+ .description = NULL_IF_CONFIG_SMALL("Fade in/out input video."),
.init = init,
.priv_size = sizeof(FadeContext),
.priv_class = &fade_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_fade_inputs,
- .outputs = avfilter_vf_fade_outputs,
- .flags = AVFILTER_FLAG_SLICE_THREADS,
+ .inputs = avfilter_vf_fade_inputs,
+ .outputs = avfilter_vf_fade_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_field.c b/libavfilter/vf_field.c
new file mode 100644
index 0000000..ed12379
--- /dev/null
+++ b/libavfilter/vf_field.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2003 Rich Felker
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * field filter, based on libmpcodecs/vf_field.c by Rich Felker
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum FieldType { FIELD_TYPE_TOP = 0, FIELD_TYPE_BOTTOM };
+
+typedef struct {
+ const AVClass *class;
+ enum FieldType type;
+ int nb_planes; ///< number of planes of the current format
+} FieldContext;
+
+#define OFFSET(x) offsetof(FieldContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption field_options[] = {
+ {"type", "set field type (top or bottom)", OFFSET(type), AV_OPT_TYPE_INT, {.i64=FIELD_TYPE_TOP}, 0, 1, FLAGS, "field_type" },
+ {"top", "select top field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_TOP}, INT_MIN, INT_MAX, FLAGS, "field_type"},
+ {"bottom", "select bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field_type"},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(field);
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FieldContext *field = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ field->nb_planes = av_pix_fmt_count_planes(outlink->format);
+
+ outlink->w = inlink->w;
+ outlink->h = (inlink->h + (field->type == FIELD_TYPE_TOP)) / 2;
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d type:%s -> w:%d h:%d\n",
+ inlink->w, inlink->h, field->type == FIELD_TYPE_BOTTOM ? "bottom" : "top",
+ outlink->w, outlink->h);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ FieldContext *field = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int i;
+
+ inpicref->height = outlink->h;
+ inpicref->interlaced_frame = 0;
+
+ for (i = 0; i < field->nb_planes; i++) {
+ if (field->type == FIELD_TYPE_BOTTOM)
+ inpicref->data[i] = inpicref->data[i] + inpicref->linesize[i];
+ inpicref->linesize[i] = 2 * inpicref->linesize[i];
+ }
+ return ff_filter_frame(outlink, inpicref);
+}
+
+static const AVFilterPad field_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad field_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_field = {
+ .name = "field",
+ .description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
+ .priv_size = sizeof(FieldContext),
+ .inputs = field_inputs,
+ .outputs = field_outputs,
+ .priv_class = &field_class,
+};
diff --git a/libavfilter/vf_fieldmatch.c b/libavfilter/vf_fieldmatch.c
new file mode 100644
index 0000000..e2aa60b
--- /dev/null
+++ b/libavfilter/vf_fieldmatch.c
@@ -0,0 +1,984 @@
+/*
+ * Copyright (c) 2012 Fredrik Mellbin
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Fieldmatching filter, ported from VFM filter (VapouSsynth) by Clément.
+ * Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a
+ * light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone
+ * (tritical), the original author.
+ *
+ * @see http://bengal.missouri.edu/~kes25c/
+ * @see http://www.vapoursynth.com/about/
+ */
+
+#include <inttypes.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define INPUT_MAIN 0
+#define INPUT_CLEANSRC 1
+
+enum fieldmatch_parity {
+ FM_PARITY_AUTO = -1,
+ FM_PARITY_BOTTOM = 0,
+ FM_PARITY_TOP = 1,
+};
+
+enum matching_mode {
+ MODE_PC,
+ MODE_PC_N,
+ MODE_PC_U,
+ MODE_PC_N_UB,
+ MODE_PCN,
+ MODE_PCN_UB,
+ NB_MODE
+};
+
+enum comb_matching_mode {
+ COMBMATCH_NONE,
+ COMBMATCH_SC,
+ COMBMATCH_FULL,
+ NB_COMBMATCH
+};
+
+enum comb_dbg {
+ COMBDBG_NONE,
+ COMBDBG_PCN,
+ COMBDBG_PCNUB,
+ NB_COMBDBG
+};
+
+typedef struct {
+ const AVClass *class;
+
+ AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
+ AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
+ int got_frame[2]; ///< frame request flag for each input stream
+ int hsub, vsub; ///< chroma subsampling values
+ uint32_t eof; ///< bitmask for end of stream
+ int64_t lastscdiff;
+ int64_t lastn;
+
+ /* options */
+ int order;
+ int ppsrc;
+ enum matching_mode mode;
+ int field;
+ int mchroma;
+ int y0, y1;
+ int64_t scthresh;
+ double scthresh_flt;
+ enum comb_matching_mode combmatch;
+ int combdbg;
+ int cthresh;
+ int chroma;
+ int blockx, blocky;
+ int combpel;
+
+ /* misc buffers */
+ uint8_t *map_data[4];
+ int map_linesize[4];
+ uint8_t *cmask_data[4];
+ int cmask_linesize[4];
+ int *c_array;
+ int tpitchy, tpitchuv;
+ uint8_t *tbuffer;
+} FieldMatchContext;
+
+#define OFFSET(x) offsetof(FieldMatchContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption fieldmatch_options[] = {
+ { "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" },
+ { "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" },
+ { "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" },
+ { "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" },
+ { "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" },
+ { "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" },
+ { "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" },
+ { "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" },
+ { "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" },
+ { "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
+ { "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS },
+ { "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" },
+ { "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
+ { "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
+ { "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
+ { "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" },
+ { "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
+ { "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
+ { "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
+ { "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
+ { "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64= 0}, 0, 1, FLAGS },
+ { "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
+ { "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
+ { "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(fieldmatch);
+
+static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
+{
+ return plane ? FF_CEIL_RSHIFT(f->width, fm->hsub) : f->width;
+}
+
+static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
+{
+ return plane ? FF_CEIL_RSHIFT(f->height, fm->vsub) : f->height;
+}
+
+static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
+{
+ int x, y;
+ const uint8_t *srcp1 = f1->data[0];
+ const uint8_t *srcp2 = f2->data[0];
+ const int src1_linesize = f1->linesize[0];
+ const int src2_linesize = f2->linesize[0];
+ const int width = f1->width;
+ const int height = f1->height;
+ int64_t acc = 0;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ acc += abs(srcp1[x] - srcp2[x]);
+ srcp1 += src1_linesize;
+ srcp2 += src2_linesize;
+ }
+ return acc;
+}
+
+static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
+{
+ int y;
+
+ for (y = 0; y < h; y++) {
+ memset(data, v, w);
+ data += linesize;
+ }
+}
+
+static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
+{
+ int x, y, plane, max_v = 0;
+ const int cthresh = fm->cthresh;
+ const int cthresh6 = cthresh * 6;
+
+ for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) {
+ const uint8_t *srcp = src->data[plane];
+ const int src_linesize = src->linesize[plane];
+ const int width = get_width (fm, src, plane);
+ const int height = get_height(fm, src, plane);
+ uint8_t *cmkp = fm->cmask_data[plane];
+ const int cmk_linesize = fm->cmask_linesize[plane];
+
+ if (cthresh < 0) {
+ fill_buf(cmkp, width, height, cmk_linesize, 0xff);
+ continue;
+ }
+ fill_buf(cmkp, width, height, cmk_linesize, 0);
+
+ /* [1 -3 4 -3 1] vertical filter */
+#define FILTER(xm2, xm1, xp1, xp2) \
+ abs( 4 * srcp[x] \
+ -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
+ + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
+
+ /* first line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && FILTER(2, 1, 1, 2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+
+ /* second line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+
+ /* all lines minus first two and last two */
+ for (y = 2; y < height-2; y++) {
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+ }
+
+ /* before-last line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
+ if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2))
+ cmkp[x] = 0xff;
+ }
+ srcp += src_linesize;
+ cmkp += cmk_linesize;
+
+ /* last line */
+ for (x = 0; x < width; x++) {
+ const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
+ if (s1 > cthresh && FILTER(-2, -1, -1, -2))
+ cmkp[x] = 0xff;
+ }
+ }
+
+ if (fm->chroma) {
+ uint8_t *cmkp = fm->cmask_data[0];
+ uint8_t *cmkpU = fm->cmask_data[1];
+ uint8_t *cmkpV = fm->cmask_data[2];
+ const int width = FF_CEIL_RSHIFT(src->width, fm->hsub);
+ const int height = FF_CEIL_RSHIFT(src->height, fm->vsub);
+ const int cmk_linesize = fm->cmask_linesize[0] << 1;
+ const int cmk_linesizeUV = fm->cmask_linesize[2];
+ uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
+ uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
+ uint8_t *cmkpnn = cmkp + cmk_linesize;
+ for (y = 1; y < height - 1; y++) {
+ cmkpp += cmk_linesize;
+ cmkp += cmk_linesize;
+ cmkpn += cmk_linesize;
+ cmkpnn += cmk_linesize;
+ cmkpV += cmk_linesizeUV;
+ cmkpU += cmk_linesizeUV;
+ for (x = 1; x < width - 1; x++) {
+#define HAS_FF_AROUND(p, lz) (p[x-1 - lz] == 0xff || p[x - lz] == 0xff || p[x+1 - lz] == 0xff || \
+ p[x-1 ] == 0xff || p[x+1 ] == 0xff || \
+ p[x-1 + lz] == 0xff || p[x + lz] == 0xff || p[x+1 + lz] == 0xff)
+ if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
+ (cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
+ ((uint16_t*)cmkp)[x] = 0xffff;
+ ((uint16_t*)cmkpn)[x] = 0xffff;
+ if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
+ else ((uint16_t*)cmkpnn)[x] = 0xffff;
+ }
+ }
+ }
+ }
+
+ {
+ const int blockx = fm->blockx;
+ const int blocky = fm->blocky;
+ const int xhalf = blockx/2;
+ const int yhalf = blocky/2;
+ const int cmk_linesize = fm->cmask_linesize[0];
+ const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize;
+ const int width = src->width;
+ const int height = src->height;
+ const int xblocks = ((width+xhalf)/blockx) + 1;
+ const int xblocks4 = xblocks<<2;
+ const int yblocks = ((height+yhalf)/blocky) + 1;
+ int *c_array = fm->c_array;
+ const int arraysize = (xblocks*yblocks)<<2;
+ int heighta = (height/(blocky/2))*(blocky/2);
+ const int widtha = (width /(blockx/2))*(blockx/2);
+ if (heighta == height)
+ heighta = height - yhalf;
+ memset(c_array, 0, arraysize * sizeof(*c_array));
+
+#define C_ARRAY_ADD(v) do { \
+ const int box1 = (x / blockx) * 4; \
+ const int box2 = ((x + xhalf) / blockx) * 4; \
+ c_array[temp1 + box1 ] += v; \
+ c_array[temp1 + box2 + 1] += v; \
+ c_array[temp2 + box1 + 2] += v; \
+ c_array[temp2 + box2 + 3] += v; \
+} while (0)
+
+#define VERTICAL_HALF(y_start, y_end) do { \
+ for (y = y_start; y < y_end; y++) { \
+ const int temp1 = (y / blocky) * xblocks4; \
+ const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
+ for (x = 0; x < width; x++) \
+ if (cmkp[x - cmk_linesize] == 0xff && \
+ cmkp[x ] == 0xff && \
+ cmkp[x + cmk_linesize] == 0xff) \
+ C_ARRAY_ADD(1); \
+ cmkp += cmk_linesize; \
+ } \
+} while (0)
+
+ VERTICAL_HALF(1, yhalf);
+
+ for (y = yhalf; y < heighta; y += yhalf) {
+ const int temp1 = (y / blocky) * xblocks4;
+ const int temp2 = ((y + yhalf) / blocky) * xblocks4;
+
+ for (x = 0; x < widtha; x += xhalf) {
+ const uint8_t *cmkp_tmp = cmkp + x;
+ int u, v, sum = 0;
+ for (u = 0; u < yhalf; u++) {
+ for (v = 0; v < xhalf; v++)
+ if (cmkp_tmp[v - cmk_linesize] == 0xff &&
+ cmkp_tmp[v ] == 0xff &&
+ cmkp_tmp[v + cmk_linesize] == 0xff)
+ sum++;
+ cmkp_tmp += cmk_linesize;
+ }
+ if (sum)
+ C_ARRAY_ADD(sum);
+ }
+
+ for (x = widtha; x < width; x++) {
+ const uint8_t *cmkp_tmp = cmkp + x;
+ int u, sum = 0;
+ for (u = 0; u < yhalf; u++) {
+ if (cmkp_tmp[-cmk_linesize] == 0xff &&
+ cmkp_tmp[ 0] == 0xff &&
+ cmkp_tmp[ cmk_linesize] == 0xff)
+ sum++;
+ cmkp_tmp += cmk_linesize;
+ }
+ if (sum)
+ C_ARRAY_ADD(sum);
+ }
+
+ cmkp += cmk_linesize * yhalf;
+ }
+
+ VERTICAL_HALF(heighta, height - 1);
+
+ for (x = 0; x < arraysize; x++)
+ if (c_array[x] > max_v)
+ max_v = c_array[x];
+ }
+ return max_v;
+}
+
+// the secret is that tbuffer is an interlaced, offset subset of all the lines
+static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize,
+ const uint8_t *nxtp, int nxt_linesize,
+ uint8_t *tbuffer, int tbuf_linesize,
+ int width, int height)
+{
+ int y, x;
+
+ prvp -= prv_linesize;
+ nxtp -= nxt_linesize;
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++)
+ tbuffer[x] = FFABS(prvp[x] - nxtp[x]);
+ prvp += prv_linesize;
+ nxtp += nxt_linesize;
+ tbuffer += tbuf_linesize;
+ }
+}
+
+/**
+ * Build a map over which pixels differ a lot/a little
+ */
+static void build_diff_map(FieldMatchContext *fm,
+ const uint8_t *prvp, int prv_linesize,
+ const uint8_t *nxtp, int nxt_linesize,
+ uint8_t *dstp, int dst_linesize, int height,
+ int width, int plane)
+{
+ int x, y, u, diff, count;
+ int tpitch = plane ? fm->tpitchuv : fm->tpitchy;
+ const uint8_t *dp = fm->tbuffer + tpitch;
+
+ build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize,
+ fm->tbuffer, tpitch, width, height>>1);
+
+ for (y = 2; y < height - 2; y += 2) {
+ for (x = 1; x < width - 1; x++) {
+ diff = dp[x];
+ if (diff > 3) {
+ for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
+ count += dp[u-tpitch] > 3;
+ count += dp[u ] > 3;
+ count += dp[u+tpitch] > 3;
+ }
+ if (count > 1) {
+ dstp[x] = 1;
+ if (diff > 19) {
+ int upper = 0, lower = 0;
+ for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
+ if (dp[u-tpitch] > 19) { count++; upper = 1; }
+ if (dp[u ] > 19) count++;
+ if (dp[u+tpitch] > 19) { count++; lower = 1; }
+ }
+ if (count > 3) {
+ if (upper && lower) {
+ dstp[x] |= 1<<1;
+ } else {
+ int upper2 = 0, lower2 = 0;
+ for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) {
+ if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
+ if ( dp[u- tpitch] > 19) upper = 1;
+ if ( dp[u+ tpitch] > 19) lower = 1;
+ if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
+ }
+ if ((upper && (lower || upper2)) ||
+ (lower && (upper || lower2)))
+ dstp[x] |= 1<<1;
+ else if (count > 5)
+ dstp[x] |= 1<<2;
+ }
+ }
+ }
+ }
+ }
+ }
+ dp += tpitch;
+ dstp += dst_linesize;
+ }
+}
+
+enum { mP, mC, mN, mB, mU };
+
+static int get_field_base(int match, int field)
+{
+ return match < 3 ? 2 - field : 1 + field;
+}
+
+static AVFrame *select_frame(FieldMatchContext *fm, int match)
+{
+ if (match == mP || match == mB) return fm->prv;
+ else if (match == mN || match == mU) return fm->nxt;
+ else /* match == mC */ return fm->src;
+}
+
+static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
+{
+ int plane, ret;
+ uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
+ uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
+ int norm1, norm2, mtn1, mtn2;
+ float c1, c2, mr;
+ const AVFrame *src = fm->src;
+
+ for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) {
+ int x, y, temp1, temp2, fbase;
+ const AVFrame *prev, *next;
+ uint8_t *mapp = fm->map_data[plane];
+ int map_linesize = fm->map_linesize[plane];
+ const uint8_t *srcp = src->data[plane];
+ const int src_linesize = src->linesize[plane];
+ const int srcf_linesize = src_linesize << 1;
+ int prv_linesize, nxt_linesize;
+ int prvf_linesize, nxtf_linesize;
+ const int width = get_width (fm, src, plane);
+ const int height = get_height(fm, src, plane);
+ const int y0a = fm->y0 >> (plane != 0);
+ const int y1a = fm->y1 >> (plane != 0);
+ const int startx = (plane == 0 ? 8 : 4);
+ const int stopx = width - startx;
+ const uint8_t *srcpf, *srcf, *srcnf;
+ const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
+
+ fill_buf(mapp, width, height, map_linesize, 0);
+
+ /* match1 */
+ fbase = get_field_base(match1, field);
+ srcf = srcp + (fbase + 1) * src_linesize;
+ srcpf = srcf - srcf_linesize;
+ srcnf = srcf + srcf_linesize;
+ mapp = mapp + fbase * map_linesize;
+ prev = select_frame(fm, match1);
+ prv_linesize = prev->linesize[plane];
+ prvf_linesize = prv_linesize << 1;
+ prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field
+ prvnf = prvpf + prvf_linesize; // previous frame, next field
+
+ /* match2 */
+ fbase = get_field_base(match2, field);
+ next = select_frame(fm, match2);
+ nxt_linesize = next->linesize[plane];
+ nxtf_linesize = nxt_linesize << 1;
+ nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field
+ nxtnf = nxtpf + nxtf_linesize; // next frame, next field
+
+ map_linesize <<= 1;
+ if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
+ build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize,
+ mapp, map_linesize, height, width, plane);
+ else
+ build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize,
+ mapp + map_linesize, map_linesize, height, width, plane);
+
+ for (y = 2; y < height - 2; y += 2) {
+ if (y0a == y1a || y < y0a || y > y1a) {
+ for (x = startx; x < stopx; x++) {
+ if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
+ temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1]
+
+ temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1);
+ if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
+ accumPc += temp2;
+ if (temp2 > 42) {
+ if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
+ accumPm += temp2;
+ if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
+ accumPml += temp2;
+ }
+
+ temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
+ if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
+ accumNc += temp2;
+ if (temp2 > 42) {
+ if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
+ accumNm += temp2;
+ if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
+ accumNml += temp2;
+ }
+ }
+ }
+ }
+ prvpf += prvf_linesize;
+ prvnf += prvf_linesize;
+ srcpf += srcf_linesize;
+ srcf += srcf_linesize;
+ srcnf += srcf_linesize;
+ nxtpf += nxtf_linesize;
+ nxtnf += nxtf_linesize;
+ mapp += map_linesize;
+ }
+ }
+
+ if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
+ FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) {
+ accumPm = accumPml;
+ accumNm = accumNml;
+ }
+
+ norm1 = (int)((accumPc / 6.0f) + 0.5f);
+ norm2 = (int)((accumNc / 6.0f) + 0.5f);
+ mtn1 = (int)((accumPm / 6.0f) + 0.5f);
+ mtn2 = (int)((accumNm / 6.0f) + 0.5f);
+ c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1));
+ c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1));
+ mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1));
+ if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
+ ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
+ ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
+ ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
+ ret = mtn1 > mtn2 ? match2 : match1;
+ else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
+ ret = mtn1 > mtn2 ? match2 : match1;
+ else
+ ret = norm1 > norm2 ? match2 : match1;
+ return ret;
+}
+
+static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
+ const AVFrame *src, int field)
+{
+ int plane;
+ for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++)
+ av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
+ src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
+ get_width(fm, src, plane), get_height(fm, src, plane) / 2);
+}
+
+static AVFrame *create_weave_frame(AVFilterContext *ctx, int match, int field,
+ const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
+{
+ AVFrame *dst;
+ FieldMatchContext *fm = ctx->priv;
+
+ if (match == mC) {
+ dst = av_frame_clone(src);
+ } else {
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!dst)
+ return NULL;
+ av_frame_copy_props(dst, src);
+
+ switch (match) {
+ case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break;
+ case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break;
+ case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break;
+ case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break;
+ default: av_assert0(0);
+ }
+ }
+ return dst;
+}
+
+static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2,
+ AVFrame **gen_frames, int field)
+{
+ const FieldMatchContext *fm = ctx->priv;
+
+#define LOAD_COMB(mid) do { \
+ if (combs[mid] < 0) { \
+ if (!gen_frames[mid]) \
+ gen_frames[mid] = create_weave_frame(ctx, mid, field, \
+ fm->prv, fm->src, fm->nxt); \
+ combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
+ } \
+} while (0)
+
+ LOAD_COMB(m1);
+ LOAD_COMB(m2);
+
+ if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) &&
+ abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel)
+ return m2;
+ else
+ return m1;
+}
+
+static const int fxo0m[] = { mP, mC, mN, mB, mU };
+static const int fxo1m[] = { mN, mC, mP, mU, mB };
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ FieldMatchContext *fm = ctx->priv;
+ int combs[] = { -1, -1, -1, -1, -1 };
+ int order, field, i, match, sc = 0;
+ const int *fxo;
+ AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL };
+ AVFrame *dst;
+
+ /* update frames queue(s) */
+#define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
+ if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \
+ av_frame_free(&prv); \
+ prv = src; \
+ src = nxt; \
+ if (in) \
+ nxt = in; \
+ if (!prv) \
+ prv = src; \
+ if (!prv) /* received only one frame at that point */ \
+ return 0; \
+ av_assert0(prv && src && nxt); \
+} while (0)
+ if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
+ SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt);
+ fm->got_frame[INPUT_MAIN] = 1;
+ } else {
+ SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2);
+ fm->got_frame[INPUT_CLEANSRC] = 1;
+ }
+ if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC]))
+ return 0;
+ fm->got_frame[INPUT_MAIN] = fm->got_frame[INPUT_CLEANSRC] = 0;
+ in = fm->src;
+
+ /* parity */
+ order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1);
+ field = fm->field != FM_PARITY_AUTO ? fm->field : order;
+ av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
+ fxo = field ^ order ? fxo1m : fxo0m;
+
+ /* debug mode: we generate all the fields combinations and their associated
+ * combed score. XXX: inject as frame metadata? */
+ if (fm->combdbg) {
+ for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) {
+ if (i > mN && fm->combdbg == COMBDBG_PCN)
+ break;
+ gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt);
+ if (!gen_frames[i])
+ return AVERROR(ENOMEM);
+ combs[i] = calc_combed_score(fm, gen_frames[i]);
+ }
+ av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n",
+ combs[0], combs[1], combs[2], combs[3], combs[4]);
+ } else {
+ gen_frames[mC] = av_frame_clone(fm->src);
+ if (!gen_frames[mC])
+ return AVERROR(ENOMEM);
+ }
+
+ /* p/c selection and optional 3-way p/c/n matches */
+ match = compare_fields(fm, fxo[mC], fxo[mP], field);
+ if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB)
+ match = compare_fields(fm, match, fxo[mN], field);
+
+ /* scene change check */
+ if (fm->combmatch == COMBMATCH_SC) {
+ if (fm->lastn == outlink->frame_count - 1) {
+ if (fm->lastscdiff > fm->scthresh)
+ sc = 1;
+ } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
+ sc = 1;
+ }
+
+ if (!sc) {
+ fm->lastn = outlink->frame_count;
+ fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
+ sc = fm->lastscdiff > fm->scthresh;
+ }
+ }
+
+ if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) {
+ switch (fm->mode) {
+ /* 2-way p/c matches */
+ case MODE_PC:
+ match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
+ break;
+ case MODE_PC_N:
+ match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
+ break;
+ case MODE_PC_U:
+ match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
+ break;
+ case MODE_PC_N_UB:
+ match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
+ match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
+ match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
+ break;
+ /* 3-way p/c/n matches */
+ case MODE_PCN:
+ match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
+ break;
+ case MODE_PCN_UB:
+ match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
+ match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
+ break;
+ default:
+ av_assert0(0);
+ }
+ }
+
+ /* get output frame and drop the others */
+ if (fm->ppsrc) {
+ /* field matching was based on a filtered/post-processed input, we now
+ * pick the untouched fields from the clean source */
+ dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2);
+ } else {
+ if (!gen_frames[match]) { // XXX: is that possible?
+ dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt);
+ } else {
+ dst = gen_frames[match];
+ gen_frames[match] = NULL;
+ }
+ }
+ if (!dst)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++)
+ av_frame_free(&gen_frames[i]);
+
+ /* mark the frame we are unable to match properly as interlaced so a proper
+ * de-interlacer can take the relay */
+ dst->interlaced_frame = combs[match] >= fm->combpel;
+ if (dst->interlaced_frame) {
+ av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
+ outlink->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
+ dst->top_field_first = field;
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
+ " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
+ fm->combpel, match, dst->interlaced_frame ? "YES" : "NO");
+
+ return ff_filter_frame(outlink, dst);
+}
+
+static int request_inlink(AVFilterContext *ctx, int lid)
+{
+ int ret = 0;
+ FieldMatchContext *fm = ctx->priv;
+
+ if (!fm->got_frame[lid]) {
+ AVFilterLink *inlink = ctx->inputs[lid];
+ ret = ff_request_frame(inlink);
+ if (ret == AVERROR_EOF) { // flushing
+ fm->eof |= 1 << lid;
+ ret = filter_frame(inlink, NULL);
+ }
+ }
+ return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ int ret;
+ AVFilterContext *ctx = outlink->src;
+ FieldMatchContext *fm = ctx->priv;
+ const uint32_t eof_mask = 1<<INPUT_MAIN | fm->ppsrc<<INPUT_CLEANSRC;
+
+ if ((fm->eof & eof_mask) == eof_mask) // flush done?
+ return AVERROR_EOF;
+ if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
+ return ret;
+ if (fm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
+ return ret;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // TODO: second input source can support >8bit depth
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int ret;
+ AVFilterContext *ctx = inlink->dst;
+ FieldMatchContext *fm = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int w = inlink->w;
+ const int h = inlink->h;
+
+ fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0);
+
+ if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 ||
+ (ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0)
+ return ret;
+
+ fm->hsub = pix_desc->log2_chroma_w;
+ fm->vsub = pix_desc->log2_chroma_h;
+
+ fm->tpitchy = FFALIGN(w, 16);
+ fm->tpitchuv = FFALIGN(w >> 1, 16);
+
+ fm->tbuffer = av_malloc(h/2 * fm->tpitchy);
+ fm->c_array = av_malloc((((w + fm->blockx/2)/fm->blockx)+1) *
+ (((h + fm->blocky/2)/fm->blocky)+1) *
+ 4 * sizeof(*fm->c_array));
+ if (!fm->tbuffer || !fm->c_array)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold int fieldmatch_init(AVFilterContext *ctx)
+{
+ const FieldMatchContext *fm = ctx->priv;
+ AVFilterPad pad = {
+ .name = av_strdup("main"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ };
+
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_MAIN, &pad);
+
+ if (fm->ppsrc) {
+ pad.name = av_strdup("clean_src");
+ pad.config_props = NULL;
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
+ }
+
+ if ((fm->blockx & (fm->blockx - 1)) ||
+ (fm->blocky & (fm->blocky - 1))) {
+ av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (fm->combpel > fm->blockx * fm->blocky) {
+ av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
+{
+ int i;
+ FieldMatchContext *fm = ctx->priv;
+
+ if (fm->prv != fm->src)
+ av_frame_free(&fm->prv);
+ if (fm->nxt != fm->src)
+ av_frame_free(&fm->nxt);
+ av_frame_free(&fm->src);
+ av_freep(&fm->map_data[0]);
+ av_freep(&fm->cmask_data[0]);
+ av_freep(&fm->tbuffer);
+ av_freep(&fm->c_array);
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ const FieldMatchContext *fm = ctx->priv;
+ const AVFilterLink *inlink =
+ ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ outlink->time_base = inlink->time_base;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->frame_rate = inlink->frame_rate;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+ return 0;
+}
+
+static const AVFilterPad fieldmatch_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_fieldmatch = {
+ .name = "fieldmatch",
+ .description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(FieldMatchContext),
+ .init = fieldmatch_init,
+ .uninit = fieldmatch_uninit,
+ .inputs = NULL,
+ .outputs = fieldmatch_outputs,
+ .priv_class = &fieldmatch_class,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_fieldorder.c b/libavfilter/vf_fieldorder.c
index c05d081..5cc612c 100644
--- a/libavfilter/vf_fieldorder.c
+++ b/libavfilter/vf_fieldorder.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Mark Himsley
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -23,9 +23,6 @@
* video field order filter, heavily influenced by vf_pad.c
*/
-#include <stdio.h>
-#include <string.h>
-
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -55,6 +52,7 @@ static int query_formats(AVFilterContext *ctx)
while ((desc = av_pix_fmt_desc_next(desc))) {
pix_fmt = av_pix_fmt_desc_get_id(desc);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
desc->nb_components && !desc->log2_chroma_h &&
(ret = ff_add_format(&formats, pix_fmt)) < 0) {
@@ -73,16 +71,8 @@ static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
- int plane;
-
- /** full an array with the number of bytes that the video
- * data occupies per line for each plane of the input video */
- for (plane = 0; plane < 4; plane++) {
- s->line_size[plane] = av_image_get_linesize(inlink->format, inlink->w,
- plane);
- }
- return 0;
+ return av_image_fill_linesizes(s->line_size, inlink->format, inlink->w);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
@@ -90,8 +80,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
- int h, plane, line_step, line_size, line;
- uint8_t *data;
+ int h, plane, src_line_step, dst_line_step, line_size, line;
+ uint8_t *dst, *src;
+ AVFrame *out;
if (!frame->interlaced_frame ||
frame->top_field_first == s->dst_tff) {
@@ -102,14 +93,27 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
return ff_filter_frame(outlink, frame);
}
+ if (av_frame_is_writable(frame)) {
+ out = frame;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, frame);
+ }
+
av_dlog(ctx,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->height;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
- line_step = frame->linesize[plane];
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
+ dst_line_step = out->linesize[plane];
+ src_line_step = frame->linesize[plane];
line_size = s->line_size[plane];
- data = frame->data[plane];
+ dst = out->data[plane];
+ src = frame->data[plane];
if (s->dst_tff) {
/** Move every line up one line, working from
* the top to the bottom of the frame.
@@ -118,11 +122,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->height) {
- memcpy(data, data + line_step, line_size);
+ memcpy(dst, src + src_line_step, line_size);
} else {
- memcpy(data, data - line_step - line_step, line_size);
+ memcpy(dst, src - 2 * src_line_step, line_size);
}
- data += line_step;
+ dst += dst_line_step;
+ src += src_line_step;
}
} else {
/** Move every line down one line, working from
@@ -130,45 +135,44 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
* The original bottom line is lost.
* The new first line is created as a copy of the
* second line from that field. */
- data += (h - 1) * line_step;
+ dst += (h - 1) * dst_line_step;
+ src += (h - 1) * src_line_step;
for (line = h - 1; line >= 0 ; line--) {
if (line > 0) {
- memcpy(data, data - line_step, line_size);
+ memcpy(dst, src - src_line_step, line_size);
} else {
- memcpy(data, data + line_step + line_step, line_size);
+ memcpy(dst, src + 2 * src_line_step, line_size);
}
- data -= line_step;
+ dst -= dst_line_step;
+ src -= src_line_step;
}
}
}
- frame->top_field_first = s->dst_tff;
+ out->top_field_first = s->dst_tff;
- return ff_filter_frame(outlink, frame);
+ if (frame != out)
+ av_frame_free(&frame);
+ return ff_filter_frame(outlink, out);
}
#define OFFSET(x) offsetof(FieldOrderContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption fieldorder_options[] = {
{ "order", "output field order", OFFSET(dst_tff), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS, "order" },
- { "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .unit = "order" },
- { "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .unit = "order" },
- { NULL },
+ { "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags=FLAGS, .unit = "order" },
+ { "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags=FLAGS, .unit = "order" },
+ { NULL }
};
-static const AVClass fieldorder_class = {
- .class_name = "fieldorder",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(fieldorder);
static const AVFilterPad avfilter_vf_fieldorder_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_input,
- .filter_frame = filter_frame,
- .needs_writable = 1,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -189,4 +193,5 @@ AVFilter ff_vf_fieldorder = {
.query_formats = query_formats,
.inputs = avfilter_vf_fieldorder_inputs,
.outputs = avfilter_vf_fieldorder_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_format.c b/libavfilter/vf_format.c
index 914089d..96cb7fd 100644
--- a/libavfilter/vf_format.c
+++ b/libavfilter/vf_format.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -58,6 +58,7 @@ static av_cold int init(AVFilterContext *ctx)
char *cur, *sep;
int nb_formats = 1;
int i;
+ int ret;
if (!s->pix_fmts) {
av_log(ctx, AV_LOG_ERROR, "Empty output format string.\n");
@@ -83,11 +84,8 @@ static av_cold int init(AVFilterContext *ctx)
if (sep)
*sep++ = 0;
- s->formats[i] = av_get_pix_fmt(cur);
- if (s->formats[i] == AV_PIX_FMT_NONE) {
- av_log(ctx, AV_LOG_ERROR, "Unknown pixel format: %s\n", cur);
- return AVERROR(EINVAL);
- }
+ if ((ret = ff_parse_pixel_format(&s->formats[i], cur, ctx)) < 0)
+ return ret;
cur = sep;
}
@@ -96,7 +94,7 @@ static av_cold int init(AVFilterContext *ctx)
if (!strcmp(ctx->filter->name, "noformat")) {
const AVPixFmtDescriptor *desc = NULL;
enum AVPixelFormat *formats_allowed;
- int nb_formats_lavu = 0, nb_formats_allowed = 0;;
+ int nb_formats_lavu = 0, nb_formats_allowed = 0;
/* count the formats known to lavu */
while ((desc = av_pix_fmt_desc_next(desc)))
@@ -144,16 +142,13 @@ static int query_formats(AVFilterContext *ctx)
#define OFFSET(x) offsetof(FormatContext, x)
static const AVOption options[] = {
{ "pix_fmts", "A '|'-separated list of pixel formats", OFFSET(pix_fmts), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM },
- { NULL },
+ { NULL }
};
#if CONFIG_FORMAT_FILTER
-static const AVClass format_class = {
- .class_name = "format",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+#define format_options options
+AVFILTER_DEFINE_CLASS(format);
static const AVFilterPad avfilter_vf_format_inputs[] = {
{
@@ -173,29 +168,26 @@ static const AVFilterPad avfilter_vf_format_outputs[] = {
};
AVFilter ff_vf_format = {
- .name = "format",
- .description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
+ .name = "format",
+ .description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
- .init = init,
- .uninit = uninit,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
- .priv_size = sizeof(FormatContext),
- .priv_class = &format_class,
+ .priv_size = sizeof(FormatContext),
+ .priv_class = &format_class,
- .inputs = avfilter_vf_format_inputs,
- .outputs = avfilter_vf_format_outputs,
+ .inputs = avfilter_vf_format_inputs,
+ .outputs = avfilter_vf_format_outputs,
};
#endif /* CONFIG_FORMAT_FILTER */
#if CONFIG_NOFORMAT_FILTER
-static const AVClass noformat_class = {
- .class_name = "noformat",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+
+#define noformat_options options
+AVFILTER_DEFINE_CLASS(noformat);
static const AVFilterPad avfilter_vf_noformat_inputs[] = {
{
@@ -215,18 +207,18 @@ static const AVFilterPad avfilter_vf_noformat_outputs[] = {
};
AVFilter ff_vf_noformat = {
- .name = "noformat",
- .description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
+ .name = "noformat",
+ .description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
- .init = init,
- .uninit = uninit,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
- .priv_size = sizeof(FormatContext),
- .priv_class = &noformat_class,
+ .priv_size = sizeof(FormatContext),
+ .priv_class = &noformat_class,
- .inputs = avfilter_vf_noformat_inputs,
- .outputs = avfilter_vf_noformat_outputs,
+ .inputs = avfilter_vf_noformat_inputs,
+ .outputs = avfilter_vf_noformat_outputs,
};
#endif /* CONFIG_NOFORMAT_FILTER */
diff --git a/libavfilter/vf_fps.c b/libavfilter/vf_fps.c
index ea22d37..a38633d 100644
--- a/libavfilter/vf_fps.c
+++ b/libavfilter/vf_fps.c
@@ -1,18 +1,22 @@
/*
- * This file is part of Libav.
+ * Copyright 2007 Bobby Bingham
+ * Copyright 2012 Robert Nagy <ronag89 gmail com>
+ * Copyright 2012 Anton Khirnov <anton khirnov net>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -41,12 +45,11 @@ typedef struct FPSContext {
/* timestamps in input timebase */
int64_t first_pts; ///< pts of the first frame that arrived on this filter
- int64_t pts; ///< pts of the first frame currently in the fifo
double start_time; ///< pts, in seconds, of the expected first frame
AVRational framerate; ///< target framerate
- char *fps; ///< a string describing target framerate
+ int rounding; ///< AVRounding method for timestamps
/* statistics */
int frames_in; ///< number of frames on input
@@ -57,33 +60,28 @@ typedef struct FPSContext {
#define OFFSET(x) offsetof(FPSContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "fps", "A string describing desired output framerate", OFFSET(fps), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = V },
+#define F AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption fps_options[] = {
+ { "fps", "A string describing desired output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = V|F },
{ "start_time", "Assume the first PTS should be this value.", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX}, -DBL_MAX, DBL_MAX, V },
- { NULL },
+ { "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
+ { "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" },
+ { "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" },
+ { "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" },
+ { "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" },
+ { "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
+ { NULL }
};
-static const AVClass class = {
- .class_name = "FPS filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(fps);
static av_cold int init(AVFilterContext *ctx)
{
FPSContext *s = ctx->priv;
- int ret;
-
- if ((ret = av_parse_video_rate(&s->framerate, s->fps)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error parsing framerate %s.\n", s->fps);
- return ret;
- }
- if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*))))
+ if (!(s->fifo = av_fifo_alloc_array(2, sizeof(AVFrame*))))
return AVERROR(ENOMEM);
- s->pts = AV_NOPTS_VALUE;
s->first_pts = AV_NOPTS_VALUE;
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den);
@@ -105,7 +103,7 @@ static av_cold void uninit(AVFilterContext *ctx)
if (s->fifo) {
s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*);
flush_fifo(s->fifo);
- av_fifo_free(s->fifo);
+ av_fifo_freep(&s->fifo);
}
av_log(ctx, AV_LOG_VERBOSE, "%d frames in, %d frames out; %d frames dropped, "
@@ -116,7 +114,8 @@ static int config_props(AVFilterLink* link)
{
FPSContext *s = link->src->priv;
- link->time_base = (AVRational){ s->framerate.den, s->framerate.num };
+ link->time_base = av_inv_q(s->framerate);
+ link->frame_rate= s->framerate;
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
@@ -178,22 +177,22 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
s->frames_in++;
/* discard frames until we get the first timestamp */
- if (s->pts == AV_NOPTS_VALUE) {
+ if (s->first_pts == AV_NOPTS_VALUE) {
if (buf->pts != AV_NOPTS_VALUE) {
ret = write_to_fifo(s->fifo, buf);
if (ret < 0)
return ret;
- if (s->start_time != DBL_MAX) {
+ if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
double first_pts = s->start_time * AV_TIME_BASE;
first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
- s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
+ s->first_pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
inlink->time_base);
av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
outlink->time_base));
} else {
- s->first_pts = s->pts = buf->pts;
+ s->first_pts = buf->pts;
}
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
@@ -205,13 +204,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
}
/* now wait for the next timestamp */
- if (buf->pts == AV_NOPTS_VALUE) {
+ if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
return write_to_fifo(s->fifo, buf);
}
/* number of output frames */
- delta = av_rescale_q(buf->pts - s->pts, inlink->time_base,
- outlink->time_base);
+ delta = av_rescale_q_rnd(buf->pts - s->first_pts, inlink->time_base,
+ outlink->time_base, s->rounding) - s->frames_out ;
if (delta < 1) {
/* drop the frame and everything buffered except the first */
@@ -266,15 +265,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
flush_fifo(s->fifo);
ret = write_to_fifo(s->fifo, buf);
- s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base);
return ret;
}
static const AVFilterPad avfilter_vf_fps_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -292,14 +290,11 @@ static const AVFilterPad avfilter_vf_fps_outputs[] = {
AVFilter ff_vf_fps = {
.name = "fps",
- .description = NULL_IF_CONFIG_SMALL("Force constant framerate"),
-
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(FPSContext),
- .priv_class = &class,
-
- .inputs = avfilter_vf_fps_inputs,
- .outputs = avfilter_vf_fps_outputs,
+ .description = NULL_IF_CONFIG_SMALL("Force constant framerate."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(FPSContext),
+ .priv_class = &fps_class,
+ .inputs = avfilter_vf_fps_inputs,
+ .outputs = avfilter_vf_fps_outputs,
};
diff --git a/libavfilter/vf_framepack.c b/libavfilter/vf_framepack.c
index f5c761a..8a7d4e8 100644
--- a/libavfilter/vf_framepack.c
+++ b/libavfilter/vf_framepack.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2013 Vittorio Giovara
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/vf_framestep.c b/libavfilter/vf_framestep.c
new file mode 100644
index 0000000..09945e1
--- /dev/null
+++ b/libavfilter/vf_framestep.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file framestep filter, inspired on libmpcodecs/vf_framestep.c by
+ * Daniele Fornighieri <guru AT digitalfantasy it>.
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct NullContext {
+ const AVClass *class;
+ int frame_step;
+} FrameStepContext;
+
+#define OFFSET(x) offsetof(FrameStepContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption framestep_options[] = {
+ { "step", "set frame step", OFFSET(frame_step), AV_OPT_TYPE_INT, {.i64=1}, 1, INT_MAX, FLAGS},
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(framestep);
+
+static int config_output_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ FrameStepContext *framestep = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ outlink->frame_rate =
+ av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1});
+
+ av_log(ctx, AV_LOG_VERBOSE, "step:%d frame_rate:%d/%d(%f) -> frame_rate:%d/%d(%f)\n",
+ framestep->frame_step,
+ inlink->frame_rate.num, inlink->frame_rate.den, av_q2d(inlink->frame_rate),
+ outlink->frame_rate.num, outlink->frame_rate.den, av_q2d(outlink->frame_rate));
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
+{
+ FrameStepContext *framestep = inlink->dst->priv;
+
+ if (!(inlink->frame_count % framestep->frame_step)) {
+ return ff_filter_frame(inlink->dst->outputs[0], ref);
+ } else {
+ av_frame_free(&ref);
+ return 0;
+ }
+}
+
+static const AVFilterPad framestep_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad framestep_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_framestep = {
+ .name = "framestep",
+ .description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."),
+ .priv_size = sizeof(FrameStepContext),
+ .priv_class = &framestep_class,
+ .inputs = framestep_inputs,
+ .outputs = framestep_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c
index 771443d..9f86838 100644
--- a/libavfilter/vf_frei0r.c
+++ b/libavfilter/vf_frei0r.c
@@ -1,19 +1,19 @@
/*
* Copyright (c) 2010 Stefano Sabatini
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,6 +29,7 @@
#include <stdlib.h>
#include "config.h"
#include "libavutil/avstring.h"
+#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
@@ -67,8 +68,7 @@ typedef struct Frei0rContext {
char *dl_name;
char *params;
- char *size;
- char *framerate;
+ AVRational framerate;
/* only used by the source */
int w, h;
@@ -139,6 +139,9 @@ static int set_params(AVFilterContext *ctx, const char *params)
Frei0rContext *s = ctx->priv;
int i;
+ if (!params)
+ return 0;
+
for (i = 0; i < s->plugin_info.num_params; i++) {
f0r_param_info_t info;
char *param;
@@ -149,7 +152,8 @@ static int set_params(AVFilterContext *ctx, const char *params)
if (*params) {
if (!(param = av_get_token(&params, "|")))
return AVERROR(ENOMEM);
- params++; /* skip ':' */
+ if (*params)
+ params++; /* skip ':' */
ret = set_param(ctx, info, i, param);
av_free(param);
if (ret < 0)
@@ -208,13 +212,15 @@ static int set_params(AVFilterContext *ctx, const char *params)
return 0;
}
-static void *load_path(AVFilterContext *ctx, const char *prefix, const char *name)
+static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix, const char *name)
{
- char path[1024];
-
- snprintf(path, sizeof(path), "%s%s%s", prefix, name, SLIBSUF);
+ char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF);
+ if (!path)
+ return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'.\n", path);
- return dlopen(path, RTLD_NOW|RTLD_LOCAL);
+ *handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL);
+ av_free(path);
+ return 0;
}
static av_cold int frei0r_init(AVFilterContext *ctx,
@@ -225,35 +231,62 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
f0r_get_plugin_info_f f0r_get_plugin_info;
f0r_plugin_info_t *pi;
char *path;
+ int ret = 0;
+ int i;
+ static const char* const frei0r_pathlist[] = {
+ "/usr/local/lib/frei0r-1/",
+ "/usr/lib/frei0r-1/",
+ "/usr/local/lib64/frei0r-1/",
+ "/usr/lib64/frei0r-1/"
+ };
if (!dl_name) {
av_log(ctx, AV_LOG_ERROR, "No filter name provided.\n");
return AVERROR(EINVAL);
}
- /* see: http://piksel.org/frei0r/1.2/spec/1.2/spec/group__pluglocations.html */
- if (path = getenv("FREI0R_PATH")) {
- while(*path) {
- char *ptr = av_get_token((const char **)&path, ":");
- if (!ptr)
- return AVERROR(ENOMEM);
- s->dl_handle = load_path(ctx, ptr, dl_name);
- av_freep(&ptr);
+ /* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */
+ if ((path = av_strdup(getenv("FREI0R_PATH")))) {
+#ifdef _WIN32
+ const char *separator = ";";
+#else
+ const char *separator = ":";
+#endif
+ char *p, *ptr = NULL;
+ for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) {
+ /* add additional trailing slash in case it is missing */
+ char *p1 = av_asprintf("%s/", p);
+ if (!p1) {
+ ret = AVERROR(ENOMEM);
+ goto check_path_end;
+ }
+ ret = load_path(ctx, &s->dl_handle, p1, dl_name);
+ av_free(p1);
+ if (ret < 0)
+ goto check_path_end;
if (s->dl_handle)
- break; /* found */
- if (*path)
- path++; /* skip ':' */
+ break;
}
+
+ check_path_end:
+ av_free(path);
+ if (ret < 0)
+ return ret;
}
if (!s->dl_handle && (path = getenv("HOME"))) {
- char prefix[1024];
- snprintf(prefix, sizeof(prefix), "%s/.frei0r-1/lib/", path);
- s->dl_handle = load_path(ctx, prefix, dl_name);
+ char *prefix = av_asprintf("%s/.frei0r-1/lib/", path);
+ if (!prefix)
+ return AVERROR(ENOMEM);
+ ret = load_path(ctx, &s->dl_handle, prefix, dl_name);
+ av_free(prefix);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; !s->dl_handle && i < FF_ARRAY_ELEMS(frei0r_pathlist); i++) {
+ ret = load_path(ctx, &s->dl_handle, frei0r_pathlist[i], dl_name);
+ if (ret < 0)
+ return ret;
}
- if (!s->dl_handle)
- s->dl_handle = load_path(ctx, "/usr/local/lib/frei0r-1/", dl_name);
- if (!s->dl_handle)
- s->dl_handle = load_path(ctx, "/usr/lib/frei0r-1/", dl_name);
if (!s->dl_handle) {
av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'.\n", dl_name);
return AVERROR(EINVAL);
@@ -379,19 +412,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(Frei0rContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption filter_options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption frei0r_options[] = {
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass filter_class = {
- .class_name = "frei0r",
- .item_name = av_default_item_name,
- .option = filter_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(frei0r);
static const AVFilterPad avfilter_vf_frei0r_inputs[] = {
{
@@ -412,38 +440,23 @@ static const AVFilterPad avfilter_vf_frei0r_outputs[] = {
};
AVFilter ff_vf_frei0r = {
- .name = "frei0r",
- .description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
-
+ .name = "frei0r",
+ .description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
.query_formats = query_formats,
- .init = filter_init,
- .uninit = uninit,
-
- .priv_size = sizeof(Frei0rContext),
- .priv_class = &filter_class,
-
- .inputs = avfilter_vf_frei0r_inputs,
-
- .outputs = avfilter_vf_frei0r_outputs,
+ .init = filter_init,
+ .uninit = uninit,
+ .priv_size = sizeof(Frei0rContext),
+ .priv_class = &frei0r_class,
+ .inputs = avfilter_vf_frei0r_inputs,
+ .outputs = avfilter_vf_frei0r_outputs,
};
static av_cold int source_init(AVFilterContext *ctx)
{
Frei0rContext *s = ctx->priv;
- AVRational frame_rate_q;
- if (av_parse_video_size(&s->w, &s->h, s->size) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'.\n", s->size);
- return AVERROR(EINVAL);
- }
-
- if (av_parse_video_rate(&frame_rate_q, s->framerate) < 0 ||
- frame_rate_q.den <= 0 || frame_rate_q.num <= 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'.\n", s->framerate);
- return AVERROR(EINVAL);
- }
- s->time_base.num = frame_rate_q.den;
- s->time_base.den = frame_rate_q.num;
+ s->time_base.num = s->framerate.den;
+ s->time_base.den = s->framerate.num;
return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_SOURCE);
}
@@ -458,6 +471,7 @@ static int source_config_props(AVFilterLink *outlink)
outlink->w = s->w;
outlink->h = s->h;
outlink->time_base = s->time_base;
+ outlink->sample_aspect_ratio = (AVRational){1,1};
if (s->destruct && s->instance)
s->destruct(s->instance);
@@ -490,20 +504,15 @@ static int source_request_frame(AVFilterLink *outlink)
return ff_filter_frame(outlink, frame);
}
-static const AVOption src_options[] = {
- { "size", "Dimensions of the generated video.", OFFSET(size), AV_OPT_TYPE_STRING, { .str = "" }, .flags = FLAGS },
- { "framerate", NULL, OFFSET(framerate), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = FLAGS },
+static const AVOption frei0r_src_options[] = {
+ { "size", "Dimensions of the generated video.", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, { .str = "320x240" }, .flags = FLAGS },
+ { "framerate", NULL, OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = FLAGS },
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ NULL },
};
-static const AVClass src_class = {
- .class_name = "frei0r_src",
- .item_name = av_default_item_name,
- .option = src_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(frei0r_src);
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
{
@@ -516,17 +525,13 @@ static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
};
AVFilter ff_vsrc_frei0r_src = {
- .name = "frei0r_src",
- .description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
-
- .priv_size = sizeof(Frei0rContext),
- .priv_class = &src_class,
- .init = source_init,
- .uninit = uninit,
-
+ .name = "frei0r_src",
+ .description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
+ .priv_size = sizeof(Frei0rContext),
+ .priv_class = &frei0r_src_class,
+ .init = source_init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_frei0r_src_outputs,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_frei0r_src_outputs,
};
diff --git a/libavfilter/vf_geq.c b/libavfilter/vf_geq.c
new file mode 100644
index 0000000..49a3e62
--- /dev/null
+++ b/libavfilter/vf_geq.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2012 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Generic equation change filter
+ * Originally written by Michael Niedermayer for the MPlayer project, and
+ * ported by Clément Bœsch for FFmpeg.
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ AVExpr *e[4]; ///< expressions for each plane
+ char *expr_str[4+3]; ///< expression strings for each plane
+ AVFrame *picref; ///< current input buffer
+ int hsub, vsub; ///< chroma subsampling
+ int planes; ///< number of planes
+ int is_rgb;
+} GEQContext;
+
+enum { Y = 0, U, V, A, G, B, R };
+
+#define OFFSET(x) offsetof(GEQContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption geq_options[] = {
+ { "lum_expr", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "lum", "set luminance expression", OFFSET(expr_str[Y]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cb_expr", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cb", "set chroma blue expression", OFFSET(expr_str[U]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cr_expr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "cr", "set chroma red expression", OFFSET(expr_str[V]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "alpha_expr", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "a", "set alpha expression", OFFSET(expr_str[A]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "red_expr", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "r", "set red expression", OFFSET(expr_str[R]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "green_expr", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "g", "set green expression", OFFSET(expr_str[G]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "blue_expr", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "b", "set blue expression", OFFSET(expr_str[B]), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(geq);
+
+static inline double getpix(void *priv, double x, double y, int plane)
+{
+ int xi, yi;
+ GEQContext *geq = priv;
+ AVFrame *picref = geq->picref;
+ const uint8_t *src = picref->data[plane];
+ const int linesize = picref->linesize[plane];
+ const int w = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width;
+ const int h = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height;
+
+ if (!src)
+ return 0;
+
+ xi = x = av_clipf(x, 0, w - 2);
+ yi = y = av_clipf(y, 0, h - 2);
+
+ x -= xi;
+ y -= yi;
+
+ return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
+ + y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
+}
+
+//TODO: cubic interpolate
+//TODO: keep the last few frames
+static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
+static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
+static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
+static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); }
+
+static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", "T", NULL };
+enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_SW, VAR_SH, VAR_T, VAR_VARS_NB };
+
+static av_cold int geq_init(AVFilterContext *ctx)
+{
+ GEQContext *geq = ctx->priv;
+ int plane, ret = 0;
+
+ if (!geq->expr_str[Y] && !geq->expr_str[G] && !geq->expr_str[B] && !geq->expr_str[R]) {
+ av_log(ctx, AV_LOG_ERROR, "A luminance or RGB expression is mandatory\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ geq->is_rgb = !geq->expr_str[Y];
+
+ if ((geq->expr_str[Y] || geq->expr_str[U] || geq->expr_str[V]) && (geq->expr_str[G] || geq->expr_str[B] || geq->expr_str[R])) {
+ av_log(ctx, AV_LOG_ERROR, "Either YCbCr or RGB but not both must be specified\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ if (!geq->expr_str[U] && !geq->expr_str[V]) {
+ /* No chroma at all: fallback on luma */
+ geq->expr_str[U] = av_strdup(geq->expr_str[Y]);
+ geq->expr_str[V] = av_strdup(geq->expr_str[Y]);
+ } else {
+ /* One chroma unspecified, fallback on the other */
+ if (!geq->expr_str[U]) geq->expr_str[U] = av_strdup(geq->expr_str[V]);
+ if (!geq->expr_str[V]) geq->expr_str[V] = av_strdup(geq->expr_str[U]);
+ }
+
+ if (!geq->expr_str[A])
+ geq->expr_str[A] = av_strdup("255");
+ if (!geq->expr_str[G])
+ geq->expr_str[G] = av_strdup("g(X,Y)");
+ if (!geq->expr_str[B])
+ geq->expr_str[B] = av_strdup("b(X,Y)");
+ if (!geq->expr_str[R])
+ geq->expr_str[R] = av_strdup("r(X,Y)");
+
+ if (geq->is_rgb ?
+ (!geq->expr_str[G] || !geq->expr_str[B] || !geq->expr_str[R])
+ :
+ (!geq->expr_str[U] || !geq->expr_str[V] || !geq->expr_str[A])) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ for (plane = 0; plane < 4; plane++) {
+ static double (*p[])(void *, double, double) = { lum, cb, cr, alpha };
+ static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL };
+ static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL };
+ const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names;
+ double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL };
+
+ ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names,
+ NULL, NULL, func2_names, func2, 0, ctx);
+ if (ret < 0)
+ break;
+ }
+
+end:
+ return ret;
+}
+
+static int geq_query_formats(AVFilterContext *ctx)
+{
+ GEQContext *geq = ctx->priv;
+ static const enum PixelFormat yuv_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+ static const enum PixelFormat rgb_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_NONE
+ };
+ if (geq->is_rgb) {
+ ff_set_common_formats(ctx, ff_make_format_list(rgb_pix_fmts));
+ } else
+ ff_set_common_formats(ctx, ff_make_format_list(yuv_pix_fmts));
+ return 0;
+}
+
+static int geq_config_props(AVFilterLink *inlink)
+{
+ GEQContext *geq = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ geq->hsub = desc->log2_chroma_w;
+ geq->vsub = desc->log2_chroma_h;
+ geq->planes = desc->nb_components;
+ return 0;
+}
+
+static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int plane;
+ GEQContext *geq = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ double values[VAR_VARS_NB] = {
+ [VAR_N] = inlink->frame_count,
+ [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
+ };
+
+ geq->picref = in;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
+ int x, y;
+ uint8_t *dst = out->data[plane];
+ const int linesize = out->linesize[plane];
+ const int w = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
+ const int h = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
+
+ values[VAR_W] = w;
+ values[VAR_H] = h;
+ values[VAR_SW] = w / (double)inlink->w;
+ values[VAR_SH] = h / (double)inlink->h;
+
+ for (y = 0; y < h; y++) {
+ values[VAR_Y] = y;
+ for (x = 0; x < w; x++) {
+ values[VAR_X] = x;
+ dst[x] = av_expr_eval(geq->e[plane], values, geq);
+ }
+ dst += linesize;
+ }
+ }
+
+ av_frame_free(&geq->picref);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void geq_uninit(AVFilterContext *ctx)
+{
+ int i;
+ GEQContext *geq = ctx->priv;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
+ av_expr_free(geq->e[i]);
+}
+
+static const AVFilterPad geq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = geq_config_props,
+ .filter_frame = geq_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad geq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_geq = {
+ .name = "geq",
+ .description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
+ .priv_size = sizeof(GEQContext),
+ .init = geq_init,
+ .uninit = geq_uninit,
+ .query_formats = geq_query_formats,
+ .inputs = geq_inputs,
+ .outputs = geq_outputs,
+ .priv_class = &geq_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_gradfun.c b/libavfilter/vf_gradfun.c
index f7c4372..0da9e0b 100644
--- a/libavfilter/vf_gradfun.c
+++ b/libavfilter/vf_gradfun.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -54,7 +54,7 @@ DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
{0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A},
};
-void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers)
+void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
{
int x;
for (x = 0; x < width; dc += x & 1, x++) {
@@ -68,7 +68,7 @@ void ff_gradfun_filter_line_c(uint8_t *dst, uint8_t *src, uint16_t *dc, int widt
}
}
-void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width)
+void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
{
int x, v, old;
for (x = 0; x < width; x++) {
@@ -79,7 +79,7 @@ void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t
}
}
-static void filter(GradFunContext *ctx, uint8_t *dst, uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
+static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
{
int bstride = FFALIGN(width, 16) / 2;
int y;
@@ -126,9 +126,9 @@ static av_cold int init(AVFilterContext *ctx)
GradFunContext *s = ctx->priv;
s->thresh = (1 << 15) / s->strength;
- s->radius &= ~1;
+ s->radius = av_clip((s->radius + 1) & ~1, 4, 32);
- s->blur_line = ff_gradfun_blur_line_c;
+ s->blur_line = ff_gradfun_blur_line_c;
s->filter_line = ff_gradfun_filter_line_c;
if (ARCH_X86)
@@ -149,9 +149,10 @@ static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_GRAY8, AV_PIX_FMT_NV12,
- AV_PIX_FMT_NV21, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_GBRP,
AV_PIX_FMT_NONE
};
@@ -168,12 +169,12 @@ static int config_input(AVFilterLink *inlink)
int vsub = desc->log2_chroma_h;
av_freep(&s->buf);
- s->buf = av_mallocz((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32) * sizeof(uint16_t));
+ s->buf = av_calloc((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32), sizeof(*s->buf));
if (!s->buf)
return AVERROR(ENOMEM);
- s->chroma_w = -((-inlink->w) >> hsub);
- s->chroma_h = -((-inlink->h) >> vsub);
+ s->chroma_w = FF_CEIL_RSHIFT(inlink->w, hsub);
+ s->chroma_h = FF_CEIL_RSHIFT(inlink->h, vsub);
s->chroma_r = av_clip(((((s->radius >> hsub) + (s->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
return 0;
@@ -196,13 +197,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_free(&in);
return AVERROR(ENOMEM);
}
-
av_frame_copy_props(out, in);
- out->width = outlink->w;
- out->height = outlink->h;
}
- for (p = 0; p < 4 && in->data[p]; p++) {
+ for (p = 0; p < 4 && in->data[p] && in->linesize[p]; p++) {
int w = inlink->w;
int h = inlink->h;
int r = s->radius;
@@ -225,19 +223,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(GradFunContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption gradfun_options[] = {
{ "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
{ "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass gradfun_class = {
- .class_name = "gradfun",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(gradfun);
static const AVFilterPad avfilter_vf_gradfun_inputs[] = {
{
@@ -265,7 +259,7 @@ AVFilter ff_vf_gradfun = {
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_gradfun_inputs,
- .outputs = avfilter_vf_gradfun_outputs,
+ .inputs = avfilter_vf_gradfun_inputs,
+ .outputs = avfilter_vf_gradfun_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_hflip.c b/libavfilter/vf_hflip.c
index 1eb8d69..519a609 100644
--- a/libavfilter/vf_hflip.c
+++ b/libavfilter/vf_hflip.c
@@ -2,20 +2,20 @@
* Copyright (c) 2007 Benoit Fouet
* Copyright (c) 2010 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -37,38 +37,25 @@
typedef struct FlipContext {
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
- int hsub, vsub; ///< chroma subsampling
+ int planewidth[4]; ///< width of each plane
+ int planeheight[4]; ///< height of each plane
} FlipContext;
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGB48LE,
- AV_PIX_FMT_BGR48BE, AV_PIX_FMT_BGR48LE,
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
+ (desc->log2_chroma_w != desc->log2_chroma_h &&
+ desc->comp[0].plane == desc->comp[1].plane)))
+ ff_add_format(&pix_fmts, fmt);
+ }
+
+ ff_set_common_formats(ctx, pix_fmts);
return 0;
}
@@ -76,41 +63,45 @@ static int config_props(AVFilterLink *inlink)
{
FlipContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int hsub = pix_desc->log2_chroma_w;
+ const int vsub = pix_desc->log2_chroma_h;
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, hsub);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
- AVFilterContext *ctx = inlink->dst;
- FlipContext *s = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *out;
+ FlipContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
uint8_t *inrow, *outrow;
- int i, j, plane, step, hsub, vsub;
+ int i, j, plane, step;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
- av_frame_copy_props(out, in);
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ const int width = s->planewidth[plane];
+ const int height = s->planeheight[plane];
+ const int start = (height * job ) / nb_jobs;
+ const int end = (height * (job+1)) / nb_jobs;
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
step = s->max_step[plane];
- hsub = (plane == 1 || plane == 2) ? s->hsub : 0;
- vsub = (plane == 1 || plane == 2) ? s->vsub : 0;
- outrow = out->data[plane];
- inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step;
- for (i = 0; i < in->height >> vsub; i++) {
+ outrow = out->data[plane] + start * out->linesize[plane];
+ inrow = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step;
+ for (i = start; i < end; i++) {
switch (step) {
case 1:
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow[j] = inrow[-j];
break;
@@ -118,7 +109,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint16_t *outrow16 = (uint16_t *)outrow;
uint16_t * inrow16 = (uint16_t *) inrow;
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow16[j] = inrow16[-j];
}
break;
@@ -127,7 +118,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint8_t *in = inrow;
uint8_t *out = outrow;
- for (j = 0; j < (inlink->w >> hsub); j++, out += 3, in -= 3) {
+ for (j = 0; j < width; j++, out += 3, in -= 3) {
int32_t v = AV_RB24(in);
AV_WB24(out, v);
}
@@ -138,13 +129,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
uint32_t *outrow32 = (uint32_t *)outrow;
uint32_t * inrow32 = (uint32_t *) inrow;
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
outrow32[j] = inrow32[-j];
}
break;
default:
- for (j = 0; j < (inlink->w >> hsub); j++)
+ for (j = 0; j < width; j++)
memcpy(outrow + j*step, inrow - j*step, step);
}
@@ -153,6 +144,30 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ /* copy palette if required */
+ if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
+ memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
+
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@@ -176,11 +191,11 @@ static const AVFilterPad avfilter_vf_hflip_outputs[] = {
};
AVFilter ff_vf_hflip = {
- .name = "hflip",
- .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
- .priv_size = sizeof(FlipContext),
+ .name = "hflip",
+ .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
+ .priv_size = sizeof(FlipContext),
.query_formats = query_formats,
-
- .inputs = avfilter_vf_hflip_inputs,
- .outputs = avfilter_vf_hflip_outputs,
+ .inputs = avfilter_vf_hflip_inputs,
+ .outputs = avfilter_vf_hflip_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_histeq.c b/libavfilter/vf_histeq.c
new file mode 100644
index 0000000..6fdb7be
--- /dev/null
+++ b/libavfilter/vf_histeq.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2012 Jeremy Tran
+ * Copyright (c) 2001 Donald A. Graft
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Histogram equalization filter, based on the VirtualDub filter by
+ * Donald A. Graft <neuron2 AT home DOT com>.
+ * Implements global automatic contrast adjustment by means of
+ * histogram equalization.
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+// #define DEBUG
+
+// Linear Congruential Generator, see "Numerical Recipes"
+#define LCG_A 4096
+#define LCG_C 150889
+#define LCG_M 714025
+#define LCG(x) (((x) * LCG_A + LCG_C) % LCG_M)
+#define LCG_SEED 739187
+
+enum HisteqAntibanding {
+ HISTEQ_ANTIBANDING_NONE = 0,
+ HISTEQ_ANTIBANDING_WEAK = 1,
+ HISTEQ_ANTIBANDING_STRONG = 2,
+ HISTEQ_ANTIBANDING_NB,
+};
+
+typedef struct {
+ const AVClass *class;
+ float strength;
+ float intensity;
+ enum HisteqAntibanding antibanding;
+ int in_histogram [256]; ///< input histogram
+ int out_histogram[256]; ///< output histogram
+ int LUT[256]; ///< lookup table derived from histogram[]
+ uint8_t rgba_map[4]; ///< components position
+ int bpp; ///< bytes per pixel
+} HisteqContext;
+
+#define OFFSET(x) offsetof(HisteqContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption histeq_options[] = {
+ { "strength", "set the strength", OFFSET(strength), AV_OPT_TYPE_FLOAT, {.dbl=0.2}, 0, 1, FLAGS },
+ { "intensity", "set the intensity", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0.21}, 0, 1, FLAGS },
+ { "antibanding", "set the antibanding level", OFFSET(antibanding), AV_OPT_TYPE_INT, {.i64=HISTEQ_ANTIBANDING_NONE}, 0, HISTEQ_ANTIBANDING_NB-1, FLAGS, "antibanding" },
+ CONST("none", "apply no antibanding", HISTEQ_ANTIBANDING_NONE, "antibanding"),
+ CONST("weak", "apply weak antibanding", HISTEQ_ANTIBANDING_WEAK, "antibanding"),
+ CONST("strong", "apply strong antibanding", HISTEQ_ANTIBANDING_STRONG, "antibanding"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(histeq);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ HisteqContext *histeq = ctx->priv;
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "strength:%0.3f intensity:%0.3f antibanding:%d\n",
+ histeq->strength, histeq->intensity, histeq->antibanding);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ HisteqContext *histeq = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ histeq->bpp = av_get_bits_per_pixel(pix_desc) / 8;
+ ff_fill_rgba_map(histeq->rgba_map, inlink->format);
+
+ return 0;
+}
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+#define GET_RGB_VALUES(r, g, b, src, map) do { \
+ r = src[x + map[R]]; \
+ g = src[x + map[G]]; \
+ b = src[x + map[B]]; \
+} while (0)
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ AVFilterContext *ctx = inlink->dst;
+ HisteqContext *histeq = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int strength = histeq->strength * 1000;
+ int intensity = histeq->intensity * 1000;
+ int x, y, i, luthi, lutlo, lut, luma, oluma, m;
+ AVFrame *outpic;
+ unsigned int r, g, b, jran;
+ uint8_t *src, *dst;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+
+ /* Seed random generator for antibanding. */
+ jran = LCG_SEED;
+
+ /* Calculate and store the luminance and calculate the global histogram
+ based on the luminance. */
+ memset(histeq->in_histogram, 0, sizeof(histeq->in_histogram));
+ src = inpic->data[0];
+ dst = outpic->data[0];
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) {
+ GET_RGB_VALUES(r, g, b, src, histeq->rgba_map);
+ luma = (55 * r + 182 * g + 19 * b) >> 8;
+ dst[x + histeq->rgba_map[A]] = luma;
+ histeq->in_histogram[luma]++;
+ }
+ src += inpic->linesize[0];
+ dst += outpic->linesize[0];
+ }
+
+#ifdef DEBUG
+ for (x = 0; x < 256; x++)
+ av_dlog(ctx, "in[%d]: %u\n", x, histeq->in_histogram[x]);
+#endif
+
+ /* Calculate the lookup table. */
+ histeq->LUT[0] = histeq->in_histogram[0];
+ /* Accumulate */
+ for (x = 1; x < 256; x++)
+ histeq->LUT[x] = histeq->LUT[x-1] + histeq->in_histogram[x];
+
+ /* Normalize */
+ for (x = 0; x < 256; x++)
+ histeq->LUT[x] = (histeq->LUT[x] * intensity) / (inlink->h * inlink->w);
+
+ /* Adjust the LUT based on the selected strength. This is an alpha
+ mix of the calculated LUT and a linear LUT with gain 1. */
+ for (x = 0; x < 256; x++)
+ histeq->LUT[x] = (strength * histeq->LUT[x]) / 255 +
+ ((255 - strength) * x) / 255;
+
+ /* Output the equalized frame. */
+ memset(histeq->out_histogram, 0, sizeof(histeq->out_histogram));
+
+ src = inpic->data[0];
+ dst = outpic->data[0];
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w * histeq->bpp; x += histeq->bpp) {
+ luma = dst[x + histeq->rgba_map[A]];
+ if (luma == 0) {
+ for (i = 0; i < histeq->bpp; ++i)
+ dst[x + i] = 0;
+ histeq->out_histogram[0]++;
+ } else {
+ lut = histeq->LUT[luma];
+ if (histeq->antibanding != HISTEQ_ANTIBANDING_NONE) {
+ if (luma > 0) {
+ lutlo = histeq->antibanding == HISTEQ_ANTIBANDING_WEAK ?
+ (histeq->LUT[luma] + histeq->LUT[luma - 1]) / 2 :
+ histeq->LUT[luma - 1];
+ } else
+ lutlo = lut;
+
+ if (luma < 255) {
+ luthi = (histeq->antibanding == HISTEQ_ANTIBANDING_WEAK) ?
+ (histeq->LUT[luma] + histeq->LUT[luma + 1]) / 2 :
+ histeq->LUT[luma + 1];
+ } else
+ luthi = lut;
+
+ if (lutlo != luthi) {
+ jran = LCG(jran);
+ lut = lutlo + ((luthi - lutlo + 1) * jran) / LCG_M;
+ }
+ }
+
+ GET_RGB_VALUES(r, g, b, src, histeq->rgba_map);
+ if (((m = FFMAX3(r, g, b)) * lut) / luma > 255) {
+ r = (r * 255) / m;
+ g = (g * 255) / m;
+ b = (b * 255) / m;
+ } else {
+ r = (r * lut) / luma;
+ g = (g * lut) / luma;
+ b = (b * lut) / luma;
+ }
+ dst[x + histeq->rgba_map[R]] = r;
+ dst[x + histeq->rgba_map[G]] = g;
+ dst[x + histeq->rgba_map[B]] = b;
+ oluma = av_clip_uint8((55 * r + 182 * g + 19 * b) >> 8);
+ histeq->out_histogram[oluma]++;
+ }
+ }
+ src += inpic->linesize[0];
+ dst += outpic->linesize[0];
+ }
+#ifdef DEBUG
+ for (x = 0; x < 256; x++)
+ av_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]);
+#endif
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad histeq_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad histeq_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_histeq = {
+ .name = "histeq",
+ .description = NULL_IF_CONFIG_SMALL("Apply global color histogram equalization."),
+ .priv_size = sizeof(HisteqContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = histeq_inputs,
+ .outputs = histeq_outputs,
+ .priv_class = &histeq_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_histogram.c b/libavfilter/vf_histogram.c
new file mode 100644
index 0000000..34656b5
--- /dev/null
+++ b/libavfilter/vf_histogram.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2012-2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum HistogramMode {
+ MODE_LEVELS,
+ MODE_WAVEFORM,
+ MODE_COLOR,
+ MODE_COLOR2,
+ MODE_NB
+};
+
+typedef struct HistogramContext {
+ const AVClass *class; ///< AVClass context for log and options purpose
+ enum HistogramMode mode;
+ unsigned histogram[256];
+ int ncomp;
+ const uint8_t *bg_color;
+ const uint8_t *fg_color;
+ int level_height;
+ int scale_height;
+ int step;
+ int waveform_mode;
+ int waveform_mirror;
+ int display_mode;
+ int levels_mode;
+ const AVPixFmtDescriptor *desc;
+} HistogramContext;
+
+#define OFFSET(x) offsetof(HistogramContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption histogram_options[] = {
+ { "mode", "set histogram mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_LEVELS}, 0, MODE_NB-1, FLAGS, "mode"},
+ { "levels", "standard histogram", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LEVELS}, 0, 0, FLAGS, "mode" },
+ { "waveform", "per row/column luminance graph", 0, AV_OPT_TYPE_CONST, {.i64=MODE_WAVEFORM}, 0, 0, FLAGS, "mode" },
+ { "color", "chroma values in vectorscope", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLOR}, 0, 0, FLAGS, "mode" },
+ { "color2", "chroma values in vectorscope", 0, AV_OPT_TYPE_CONST, {.i64=MODE_COLOR2}, 0, 0, FLAGS, "mode" },
+ { "level_height", "set level height", OFFSET(level_height), AV_OPT_TYPE_INT, {.i64=200}, 50, 2048, FLAGS},
+ { "scale_height", "set scale height", OFFSET(scale_height), AV_OPT_TYPE_INT, {.i64=12}, 0, 40, FLAGS},
+ { "step", "set waveform step value", OFFSET(step), AV_OPT_TYPE_INT, {.i64=10}, 1, 255, FLAGS},
+ { "waveform_mode", "set waveform mode", OFFSET(waveform_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "waveform_mode"},
+ { "row", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "waveform_mode" },
+ { "column", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "waveform_mode" },
+ { "waveform_mirror", "set waveform mirroring", OFFSET(waveform_mirror), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "waveform_mirror"},
+ { "display_mode", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "display_mode"},
+ { "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "display_mode" },
+ { "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "display_mode" },
+ { "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"},
+ { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" },
+ { "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(histogram);
+
+static const enum AVPixelFormat color_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat levels_pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat waveform_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ HistogramContext *h = ctx->priv;
+ const enum AVPixelFormat *pix_fmts;
+
+ switch (h->mode) {
+ case MODE_WAVEFORM:
+ pix_fmts = waveform_pix_fmts;
+ break;
+ case MODE_LEVELS:
+ pix_fmts = levels_pix_fmts;
+ break;
+ case MODE_COLOR:
+ case MODE_COLOR2:
+ pix_fmts = color_pix_fmts;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static const uint8_t black_yuva_color[4] = { 0, 127, 127, 255 };
+static const uint8_t black_gbrp_color[4] = { 0, 0, 0, 255 };
+static const uint8_t white_yuva_color[4] = { 255, 127, 127, 255 };
+static const uint8_t white_gbrp_color[4] = { 255, 255, 255, 255 };
+
+static int config_input(AVFilterLink *inlink)
+{
+ HistogramContext *h = inlink->dst->priv;
+
+ h->desc = av_pix_fmt_desc_get(inlink->format);
+ h->ncomp = h->desc->nb_components;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRP:
+ h->bg_color = black_gbrp_color;
+ h->fg_color = white_gbrp_color;
+ break;
+ default:
+ h->bg_color = black_yuva_color;
+ h->fg_color = white_yuva_color;
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ HistogramContext *h = ctx->priv;
+
+ switch (h->mode) {
+ case MODE_LEVELS:
+ outlink->w = 256;
+ outlink->h = (h->level_height + h->scale_height) * FFMAX(h->ncomp * h->display_mode, 1);
+ break;
+ case MODE_WAVEFORM:
+ if (h->waveform_mode)
+ outlink->h = 256 * FFMAX(h->ncomp * h->display_mode, 1);
+ else
+ outlink->w = 256 * FFMAX(h->ncomp * h->display_mode, 1);
+ break;
+ case MODE_COLOR:
+ case MODE_COLOR2:
+ outlink->h = outlink->w = 256;
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ outlink->sample_aspect_ratio = (AVRational){1,1};
+
+ return 0;
+}
+
+static void gen_waveform(HistogramContext *h, AVFrame *inpicref, AVFrame *outpicref,
+ int component, int intensity, int offset, int col_mode)
+{
+ const int plane = h->desc->comp[component].plane;
+ const int mirror = h->waveform_mirror;
+ const int is_chroma = (component == 1 || component == 2);
+ const int shift_w = (is_chroma ? h->desc->log2_chroma_w : 0);
+ const int shift_h = (is_chroma ? h->desc->log2_chroma_h : 0);
+ const int src_linesize = inpicref->linesize[plane];
+ const int dst_linesize = outpicref->linesize[plane];
+ const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
+ uint8_t *src_data = inpicref->data[plane];
+ uint8_t *dst_data = outpicref->data[plane] + (col_mode ? (offset >> shift_h) * dst_linesize : offset >> shift_w);
+ uint8_t * const dst_bottom_line = dst_data + dst_linesize * ((256 >> shift_h) - 1);
+ uint8_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
+ const uint8_t max = 255 - intensity;
+ const int src_h = FF_CEIL_RSHIFT(inpicref->height, shift_h);
+ const int src_w = FF_CEIL_RSHIFT(inpicref->width, shift_w);
+ uint8_t *dst, *p;
+ int y;
+
+ if (!col_mode && mirror)
+ dst_data += 256 >> shift_w;
+ for (y = 0; y < src_h; y++) {
+ const uint8_t *src_data_end = src_data + src_w;
+ dst = dst_line;
+ for (p = src_data; p < src_data_end; p++) {
+ uint8_t *target;
+ if (col_mode) {
+ target = dst++ + dst_signed_linesize * (*p >> shift_h);
+ } else {
+ if (mirror)
+ target = dst_data - (*p >> shift_w);
+ else
+ target = dst_data + (*p >> shift_w);
+ }
+ if (*target <= max)
+ *target += intensity;
+ else
+ *target = 255;
+ }
+ src_data += src_linesize;
+ dst_data += dst_linesize;
+ }
+}
+
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ HistogramContext *h = inlink->dst->priv;
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ const uint8_t *src;
+ uint8_t *dst;
+ int i, j, k, l;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ out->pts = in->pts;
+
+ for (k = 0; k < h->ncomp; k++) {
+ const int is_chroma = (k == 1 || k == 2);
+ const int dst_h = FF_CEIL_RSHIFT(outlink->h, (is_chroma ? h->desc->log2_chroma_h : 0));
+ const int dst_w = FF_CEIL_RSHIFT(outlink->w, (is_chroma ? h->desc->log2_chroma_w : 0));
+ for (i = 0; i < dst_h ; i++)
+ memset(out->data[h->desc->comp[k].plane] +
+ i * out->linesize[h->desc->comp[k].plane],
+ h->bg_color[k], dst_w);
+ }
+
+ switch (h->mode) {
+ case MODE_LEVELS:
+ for (k = 0; k < h->ncomp; k++) {
+ const int p = h->desc->comp[k].plane;
+ const int start = k * (h->level_height + h->scale_height) * h->display_mode;
+ double max_hval_log;
+ unsigned max_hval = 0;
+
+ for (i = 0; i < in->height; i++) {
+ src = in->data[p] + i * in->linesize[p];
+ for (j = 0; j < in->width; j++)
+ h->histogram[src[j]]++;
+ }
+
+ for (i = 0; i < 256; i++)
+ max_hval = FFMAX(max_hval, h->histogram[i]);
+ max_hval_log = log2(max_hval + 1);
+
+ for (i = 0; i < outlink->w; i++) {
+ int col_height;
+
+ if (h->levels_mode)
+ col_height = round(h->level_height * (1. - (log2(h->histogram[i] + 1) / max_hval_log)));
+ else
+ col_height = h->level_height - (h->histogram[i] * (int64_t)h->level_height + max_hval - 1) / max_hval;
+
+ for (j = h->level_height - 1; j >= col_height; j--) {
+ if (h->display_mode) {
+ for (l = 0; l < h->ncomp; l++)
+ out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];
+ } else {
+ out->data[p][(j + start) * out->linesize[p] + i] = 255;
+ }
+ }
+ for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
+ out->data[p][(j + start) * out->linesize[p] + i] = i;
+ }
+
+ memset(h->histogram, 0, 256 * sizeof(unsigned));
+ }
+ break;
+ case MODE_WAVEFORM:
+ for (k = 0; k < h->ncomp; k++) {
+ const int offset = k * 256 * h->display_mode;
+ gen_waveform(h, in, out, k, h->step, offset, h->waveform_mode);
+ }
+ break;
+ case MODE_COLOR:
+ for (i = 0; i < inlink->h; i++) {
+ const int iw1 = i * in->linesize[1];
+ const int iw2 = i * in->linesize[2];
+ for (j = 0; j < inlink->w; j++) {
+ const int pos = in->data[1][iw1 + j] * out->linesize[0] + in->data[2][iw2 + j];
+ if (out->data[0][pos] < 255)
+ out->data[0][pos]++;
+ }
+ }
+ for (i = 0; i < 256; i++) {
+ dst = out->data[0] + i * out->linesize[0];
+ for (j = 0; j < 256; j++) {
+ if (!dst[j]) {
+ out->data[1][i * out->linesize[0] + j] = i;
+ out->data[2][i * out->linesize[0] + j] = j;
+ }
+ }
+ }
+ break;
+ case MODE_COLOR2:
+ for (i = 0; i < inlink->h; i++) {
+ const int iw1 = i * in->linesize[1];
+ const int iw2 = i * in->linesize[2];
+ for (j = 0; j < inlink->w; j++) {
+ const int u = in->data[1][iw1 + j];
+ const int v = in->data[2][iw2 + j];
+ const int pos = u * out->linesize[0] + v;
+ if (!out->data[0][pos])
+ out->data[0][pos] = FFABS(128 - u) + FFABS(128 - v);
+ out->data[1][pos] = u;
+ out->data[2][pos] = v;
+ }
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_histogram = {
+ .name = "histogram",
+ .description = NULL_IF_CONFIG_SMALL("Compute and draw a histogram."),
+ .priv_size = sizeof(HistogramContext),
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .priv_class = &histogram_class,
+};
diff --git a/libavfilter/vf_hqdn3d.c b/libavfilter/vf_hqdn3d.c
index be6b761..0448c0d 100644
--- a/libavfilter/vf_hqdn3d.c
+++ b/libavfilter/vf_hqdn3d.c
@@ -3,20 +3,20 @@
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2012 Loren Merritt
*
- * This file is part of Libav, ported from MPlayer.
+ * This file is part of FFmpeg, ported from MPlayer.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -133,7 +133,7 @@ static void denoise_depth(HQDN3DContext *s,
uint16_t *frame_ant = *frame_ant_ptr;
if (!frame_ant) {
uint8_t *frame_src = src;
- *frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t));
+ *frame_ant_ptr = frame_ant = av_malloc_array(w, h*sizeof(uint16_t));
for (y = 0; y < h; y++, src += sstride, frame_ant += w)
for (x = 0; x < w; x++)
frame_ant[x] = LOAD(x);
@@ -230,15 +230,15 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUV420P9LE ),
- AV_NE( AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUV422P9LE ),
- AV_NE( AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUV444P9LE ),
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_NONE
};
@@ -259,7 +259,7 @@ static int config_input(AVFilterLink *inlink)
s->vsub = desc->log2_chroma_h;
s->depth = desc->comp[0].depth_minus1+1;
- s->line = av_malloc(inlink->w * sizeof(*s->line));
+ s->line = av_malloc_array(inlink->w, sizeof(*s->line));
if (!s->line)
return AVERROR(ENOMEM);
@@ -277,12 +277,14 @@ static int config_input(AVFilterLink *inlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
- HQDN3DContext *s = inlink->dst->priv;
- AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFilterContext *ctx = inlink->dst;
+ HQDN3DContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+
AVFrame *out;
int direct, c;
- if (av_frame_is_writable(in)) {
+ if (av_frame_is_writable(in) && !ctx->is_disabled) {
direct = 1;
out = in;
} else {
@@ -294,17 +296,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
av_frame_copy_props(out, in);
- out->width = outlink->w;
- out->height = outlink->h;
}
for (c = 0; c < 3; c++) {
denoise(s, in->data[c], out->data[c],
s->line, &s->frame_prev[c],
- in->width >> (!!c * s->hsub),
- in->height >> (!!c * s->vsub),
+ FF_CEIL_RSHIFT(in->width, (!!c * s->hsub)),
+ FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)),
in->linesize[c], out->linesize[c],
- s->coefs[c?2:0], s->coefs[c?3:1]);
+ s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],
+ s->coefs[c ? CHROMA_TMP : LUMA_TMP]);
+ }
+
+ if (ctx->is_disabled) {
+ av_frame_free(&out);
+ return ff_filter_frame(outlink, in);
}
if (!direct)
@@ -314,21 +320,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(HQDN3DContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption hqdn3d_options[] = {
{ "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
{ "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass hqdn3d_class = {
- .class_name = "hqdn3d",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(hqdn3d);
static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
{
@@ -340,6 +341,7 @@ static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
{ NULL }
};
+
static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
{
.name = "default",
@@ -351,14 +353,12 @@ static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
AVFilter ff_vf_hqdn3d = {
.name = "hqdn3d",
.description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
-
.priv_size = sizeof(HQDN3DContext),
.priv_class = &hqdn3d_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_hqdn3d_inputs,
-
- .outputs = avfilter_vf_hqdn3d_outputs,
+ .inputs = avfilter_vf_hqdn3d_inputs,
+ .outputs = avfilter_vf_hqdn3d_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/libavfilter/vf_hqdn3d.h b/libavfilter/vf_hqdn3d.h
index a344518..be55400 100644
--- a/libavfilter/vf_hqdn3d.h
+++ b/libavfilter/vf_hqdn3d.h
@@ -1,18 +1,22 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2012 Loren Merritt
*
- * Libav is free software; you can redistribute it and/or modify
+ * This file is part of FFmpeg, ported from MPlayer.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
diff --git a/libavfilter/vf_hqx.c b/libavfilter/vf_hqx.c
new file mode 100644
index 0000000..4783381
--- /dev/null
+++ b/libavfilter/vf_hqx.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2014 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * @file
+ * hqx magnification filters (hq2x, hq3x, hq4x)
+ *
+ * Originally designed by Maxim Stephin.
+ *
+ * @see http://en.wikipedia.org/wiki/Hqx
+ * @see http://web.archive.org/web/20131114143602/http://www.hiend3d.com/hq3x.html
+ * @see http://blog.pkh.me/p/19-butchering-hqx-scaling-filters.html
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+typedef int (*hqxfunc_t)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+
+typedef struct {
+ const AVClass *class;
+ int n;
+ hqxfunc_t func;
+ uint32_t rgbtoyuv[1<<24];
+} HQXContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ const uint32_t *rgbtoyuv;
+} ThreadData;
+
+#define OFFSET(x) offsetof(HQXContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption hqx_options[] = {
+ { "n", "set scale factor", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 3}, 2, 4, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hqx);
+
+static av_always_inline uint32_t rgb2yuv(const uint32_t *r2y, uint32_t c)
+{
+ return r2y[c & 0xffffff];
+}
+
+static av_always_inline int yuv_diff(uint32_t yuv1, uint32_t yuv2)
+{
+#define YMASK 0xff0000
+#define UMASK 0x00ff00
+#define VMASK 0x0000ff
+ return abs((yuv1 & YMASK) - (yuv2 & YMASK)) > (48 << 16) ||
+ abs((yuv1 & UMASK) - (yuv2 & UMASK)) > ( 7 << 8) ||
+ abs((yuv1 & VMASK) - (yuv2 & VMASK)) > ( 6 << 0);
+}
+
+/* (c1*w1 + c2*w2) >> s */
+static av_always_inline uint32_t interp_2px(uint32_t c1, int w1, uint32_t c2, int w2, int s)
+{
+ return (((((c1 & 0xff00ff00) >> 8) * w1 + ((c2 & 0xff00ff00) >> 8) * w2) << (8 - s)) & 0xff00ff00) |
+ (((((c1 & 0x00ff00ff) ) * w1 + ((c2 & 0x00ff00ff) ) * w2) >> s ) & 0x00ff00ff);
+}
+
+/* (c1*w1 + c2*w2 + c3*w3) >> s */
+static av_always_inline uint32_t interp_3px(uint32_t c1, int w1, uint32_t c2, int w2, uint32_t c3, int w3, int s)
+{
+ return (((((c1 & 0xff00ff00) >> 8) * w1 + ((c2 & 0xff00ff00) >> 8) * w2 + ((c3 & 0xff00ff00) >> 8) * w3) << (8 - s)) & 0xff00ff00) |
+ (((((c1 & 0x00ff00ff) ) * w1 + ((c2 & 0x00ff00ff) ) * w2 + ((c3 & 0x00ff00ff) ) * w3) >> s ) & 0x00ff00ff);
+}
+
+/* m is the mask of diff with the center pixel that matters in the pattern, and
+ * r is the expected result (bit set to 1 if there is difference with the
+ * center, 0 otherwise) */
+#define P(m, r) ((k_shuffled & (m)) == (r))
+
+/* adjust 012345678 to 01235678: the mask doesn't contain the (null) diff
+ * between the center/current pixel and itself */
+#define DROP4(z) ((z) > 4 ? (z)-1 : (z))
+
+/* shuffle the input mask: move bit n (4-adjusted) to position stored in p<n> */
+#define SHF(x, rot, n) (((x) >> ((rot) ? 7-DROP4(n) : DROP4(n)) & 1) << DROP4(p##n))
+
+/* used to check if there is YUV difference between 2 pixels */
+#define WDIFF(c1, c2) yuv_diff(rgb2yuv(r2y, c1), rgb2yuv(r2y, c2))
+
+/* bootstrap template for every interpolation code. It defines the shuffled
+ * masks and surrounding pixels. The rot flag is used to indicate if it's a
+ * rotation; its basic effect is to shuffle k using p8..p0 instead of p0..p8 */
+#define INTERP_BOOTSTRAP(rot) \
+ const int k_shuffled = SHF(k,rot,0) | SHF(k,rot,1) | SHF(k,rot,2) \
+ | SHF(k,rot,3) | 0 | SHF(k,rot,5) \
+ | SHF(k,rot,6) | SHF(k,rot,7) | SHF(k,rot,8); \
+ \
+ const uint32_t w0 = w[p0], w1 = w[p1], \
+ w3 = w[p3], w4 = w[p4], w5 = w[p5], \
+ w7 = w[p7]
+
+/* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
+ * top-left pixel in the total of the 2x2 pixels to interpolates. The function
+ * is also used for the 3 other pixels */
+static av_always_inline uint32_t hq2x_interp_1x1(const uint32_t *r2y, int k,
+ const uint32_t *w,
+ int p0, int p1, int p2,
+ int p3, int p4, int p5,
+ int p6, int p7, int p8)
+{
+ INTERP_BOOTSTRAP(0);
+
+ if ((P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5))
+ return interp_2px(w4, 3, w3, 1, 2);
+ if ((P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3))
+ return interp_2px(w4, 3, w1, 1, 2);
+ if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
+ return w4;
+ if ((P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) || P(0xdf,0x5a) ||
+ P(0x9f,0x8a) || P(0xcf,0x8a) || P(0xef,0x4e) || P(0x3f,0x0e) ||
+ P(0xfb,0x5a) || P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
+ P(0xeb,0x8a)) && WDIFF(w3, w1))
+ return interp_2px(w4, 3, w0, 1, 2);
+ if (P(0x0b,0x08))
+ return interp_3px(w4, 2, w0, 1, w1, 1, 2);
+ if (P(0x0b,0x02))
+ return interp_3px(w4, 2, w0, 1, w3, 1, 2);
+ if (P(0x2f,0x2f))
+ return interp_3px(w4, 14, w3, 1, w1, 1, 4);
+ if (P(0xbf,0x37) || P(0xdb,0x13))
+ return interp_3px(w4, 5, w1, 2, w3, 1, 3);
+ if (P(0xdb,0x49) || P(0xef,0x6d))
+ return interp_3px(w4, 5, w3, 2, w1, 1, 3);
+ if (P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) || P(0x6b,0x43))
+ return interp_2px(w4, 3, w3, 1, 2);
+ if (P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) || P(0x3b,0x19))
+ return interp_2px(w4, 3, w1, 1, 2);
+ if (P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7e,0x0e))
+ return interp_3px(w4, 2, w3, 3, w1, 3, 3);
+ if (P(0xfb,0x6a) || P(0x6f,0x6e) || P(0x3f,0x3e) || P(0xfb,0xfa) ||
+ P(0xdf,0xde) || P(0xdf,0x1e))
+ return interp_2px(w4, 3, w0, 1, 2);
+ if (P(0x0a,0x00) || P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) ||
+ P(0xbe,0x0a) || P(0xee,0x0a) || P(0x7e,0x0a) || P(0xeb,0x4b) ||
+ P(0x3b,0x1b))
+ return interp_3px(w4, 2, w3, 1, w1, 1, 2);
+ return interp_3px(w4, 6, w3, 1, w1, 1, 3);
+}
+
+/* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
+ * top-left and top-center pixel in the total of the 3x3 pixels to
+ * interpolates. The function is also used for the 3 other couples of pixels
+ * defining the outline. The center pixel is not defined through this function,
+ * since it's just the same as the original value. */
+static av_always_inline void hq3x_interp_2x1(uint32_t *dst, int dst_linesize,
+ const uint32_t *r2y, int k,
+ const uint32_t *w,
+ int pos00, int pos01,
+ int p0, int p1, int p2,
+ int p3, int p4, int p5,
+ int p6, int p7, int p8,
+ int rotate)
+{
+ INTERP_BOOTSTRAP(rotate);
+
+ uint32_t *dst00 = &dst[dst_linesize*(pos00>>1) + (pos00&1)];
+ uint32_t *dst01 = &dst[dst_linesize*(pos01>>1) + (pos01&1)];
+
+ if ((P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3))
+ *dst00 = interp_2px(w4, 3, w1, 1, 2);
+ else if ((P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5))
+ *dst00 = interp_2px(w4, 3, w3, 1, 2);
+ else if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
+ *dst00 = w4;
+ else if ((P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) || P(0xdf,0x5a) ||
+ P(0x9f,0x8a) || P(0xcf,0x8a) || P(0xef,0x4e) || P(0x3f,0x0e) ||
+ P(0xfb,0x5a) || P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
+ P(0xeb,0x8a)) && WDIFF(w3, w1))
+ *dst00 = interp_2px(w4, 3, w0, 1, 2);
+ else if (P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) || P(0x3b,0x19))
+ *dst00 = interp_2px(w4, 3, w1, 1, 2);
+ else if (P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) || P(0x6b,0x43))
+ *dst00 = interp_2px(w4, 3, w3, 1, 2);
+ else if (P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7e,0x0e))
+ *dst00 = interp_2px(w3, 1, w1, 1, 1);
+ else if (P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) || P(0xbe,0x0a) ||
+ P(0xee,0x0a) || P(0x7e,0x0a) || P(0xeb,0x4b) || P(0x3b,0x1b))
+ *dst00 = interp_3px(w4, 2, w3, 7, w1, 7, 4);
+ else if (P(0x0b,0x08) || P(0xf9,0x68) || P(0xf3,0x62) || P(0x6d,0x6c) ||
+ P(0x67,0x66) || P(0x3d,0x3c) || P(0x37,0x36) || P(0xf9,0xf8) ||
+ P(0xdd,0xdc) || P(0xf3,0xf2) || P(0xd7,0xd6) || P(0xdd,0x1c) ||
+ P(0xd7,0x16) || P(0x0b,0x02))
+ *dst00 = interp_2px(w4, 3, w0, 1, 2);
+ else
+ *dst00 = interp_3px(w4, 2, w3, 1, w1, 1, 2);
+
+ if ((P(0xfe,0xde) || P(0x9e,0x16) || P(0xda,0x12) || P(0x17,0x16) ||
+ P(0x5b,0x12) || P(0xbb,0x12)) && WDIFF(w1, w5))
+ *dst01 = w4;
+ else if ((P(0x0f,0x0b) || P(0x5e,0x0a) || P(0xfb,0x7b) || P(0x3b,0x0b) ||
+ P(0xbe,0x0a) || P(0x7a,0x0a)) && WDIFF(w3, w1))
+ *dst01 = w4;
+ else if (P(0xbf,0x8f) || P(0x7e,0x0e) || P(0xbf,0x37) || P(0xdb,0x13))
+ *dst01 = interp_2px(w1, 3, w4, 1, 2);
+ else if (P(0x02,0x00) || P(0x7c,0x28) || P(0xed,0xa9) || P(0xf5,0xb4) ||
+ P(0xd9,0x90))
+ *dst01 = interp_2px(w4, 3, w1, 1, 2);
+ else if (P(0x4f,0x4b) || P(0xfb,0x7b) || P(0xfe,0x7e) || P(0x9f,0x1b) ||
+ P(0x2f,0x0b) || P(0xbe,0x0a) || P(0x7e,0x0a) || P(0xfb,0x4b) ||
+ P(0xfb,0xdb) || P(0xfe,0xde) || P(0xfe,0x56) || P(0x57,0x56) ||
+ P(0x97,0x16) || P(0x3f,0x1e) || P(0xdb,0x12) || P(0xbb,0x12))
+ *dst01 = interp_2px(w4, 7, w1, 1, 3);
+ else
+ *dst01 = w4;
+}
+
+/* Assuming p0..p8 is mapped to pixels 0..8, this function interpolates the
+ * top-left block of 2x2 pixels in the total of the 4x4 pixels (or 4 blocks) to
+ * interpolates. The function is also used for the 3 other blocks of 2x2
+ * pixels. */
+static av_always_inline void hq4x_interp_2x2(uint32_t *dst, int dst_linesize,
+ const uint32_t *r2y, int k,
+ const uint32_t *w,
+ int pos00, int pos01,
+ int pos10, int pos11,
+ int p0, int p1, int p2,
+ int p3, int p4, int p5,
+ int p6, int p7, int p8)
+{
+ INTERP_BOOTSTRAP(0);
+
+ uint32_t *dst00 = &dst[dst_linesize*(pos00>>1) + (pos00&1)];
+ uint32_t *dst01 = &dst[dst_linesize*(pos01>>1) + (pos01&1)];
+ uint32_t *dst10 = &dst[dst_linesize*(pos10>>1) + (pos10&1)];
+ uint32_t *dst11 = &dst[dst_linesize*(pos11>>1) + (pos11&1)];
+
+ const int cond00 = (P(0xbf,0x37) || P(0xdb,0x13)) && WDIFF(w1, w5);
+ const int cond01 = (P(0xdb,0x49) || P(0xef,0x6d)) && WDIFF(w7, w3);
+ const int cond02 = (P(0x6f,0x2a) || P(0x5b,0x0a) || P(0xbf,0x3a) ||
+ P(0xdf,0x5a) || P(0x9f,0x8a) || P(0xcf,0x8a) ||
+ P(0xef,0x4e) || P(0x3f,0x0e) || P(0xfb,0x5a) ||
+ P(0xbb,0x8a) || P(0x7f,0x5a) || P(0xaf,0x8a) ||
+ P(0xeb,0x8a)) && WDIFF(w3, w1);
+ const int cond03 = P(0xdb,0x49) || P(0xef,0x6d);
+ const int cond04 = P(0xbf,0x37) || P(0xdb,0x13);
+ const int cond05 = P(0x1b,0x03) || P(0x4f,0x43) || P(0x8b,0x83) ||
+ P(0x6b,0x43);
+ const int cond06 = P(0x4b,0x09) || P(0x8b,0x89) || P(0x1f,0x19) ||
+ P(0x3b,0x19);
+ const int cond07 = P(0x0b,0x08) || P(0xf9,0x68) || P(0xf3,0x62) ||
+ P(0x6d,0x6c) || P(0x67,0x66) || P(0x3d,0x3c) ||
+ P(0x37,0x36) || P(0xf9,0xf8) || P(0xdd,0xdc) ||
+ P(0xf3,0xf2) || P(0xd7,0xd6) || P(0xdd,0x1c) ||
+ P(0xd7,0x16) || P(0x0b,0x02);
+ const int cond08 = (P(0x0f,0x0b) || P(0x2b,0x0b) || P(0xfe,0x4a) ||
+ P(0xfe,0x1a)) && WDIFF(w3, w1);
+ const int cond09 = P(0x2f,0x2f);
+ const int cond10 = P(0x0a,0x00);
+ const int cond11 = P(0x0b,0x09);
+ const int cond12 = P(0x7e,0x2a) || P(0xef,0xab);
+ const int cond13 = P(0xbf,0x8f) || P(0x7e,0x0e);
+ const int cond14 = P(0x4f,0x4b) || P(0x9f,0x1b) || P(0x2f,0x0b) ||
+ P(0xbe,0x0a) || P(0xee,0x0a) || P(0x7e,0x0a) ||
+ P(0xeb,0x4b) || P(0x3b,0x1b);
+ const int cond15 = P(0x0b,0x03);
+
+ if (cond00)
+ *dst00 = interp_2px(w4, 5, w3, 3, 3);
+ else if (cond01)
+ *dst00 = interp_2px(w4, 5, w1, 3, 3);
+ else if ((P(0x0b,0x0b) || P(0xfe,0x4a) || P(0xfe,0x1a)) && WDIFF(w3, w1))
+ *dst00 = w4;
+ else if (cond02)
+ *dst00 = interp_2px(w4, 5, w0, 3, 3);
+ else if (cond03)
+ *dst00 = interp_2px(w4, 3, w3, 1, 2);
+ else if (cond04)
+ *dst00 = interp_2px(w4, 3, w1, 1, 2);
+ else if (cond05)
+ *dst00 = interp_2px(w4, 5, w3, 3, 3);
+ else if (cond06)
+ *dst00 = interp_2px(w4, 5, w1, 3, 3);
+ else if (P(0x0f,0x0b) || P(0x5e,0x0a) || P(0x2b,0x0b) || P(0xbe,0x0a) ||
+ P(0x7a,0x0a) || P(0xee,0x0a))
+ *dst00 = interp_2px(w1, 1, w3, 1, 1);
+ else if (cond07)
+ *dst00 = interp_2px(w4, 5, w0, 3, 3);
+ else
+ *dst00 = interp_3px(w4, 2, w1, 1, w3, 1, 2);
+
+ if (cond00)
+ *dst01 = interp_2px(w4, 7, w3, 1, 3);
+ else if (cond08)
+ *dst01 = w4;
+ else if (cond02)
+ *dst01 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond09)
+ *dst01 = w4;
+ else if (cond10)
+ *dst01 = interp_3px(w4, 5, w1, 2, w3, 1, 3);
+ else if (P(0x0b,0x08))
+ *dst01 = interp_3px(w4, 5, w1, 2, w0, 1, 3);
+ else if (cond11)
+ *dst01 = interp_2px(w4, 5, w1, 3, 3);
+ else if (cond04)
+ *dst01 = interp_2px(w1, 3, w4, 1, 2);
+ else if (cond12)
+ *dst01 = interp_3px(w1, 2, w4, 1, w3, 1, 2);
+ else if (cond13)
+ *dst01 = interp_2px(w1, 5, w3, 3, 3);
+ else if (cond05)
+ *dst01 = interp_2px(w4, 7, w3, 1, 3);
+ else if (P(0xf3,0x62) || P(0x67,0x66) || P(0x37,0x36) || P(0xf3,0xf2) ||
+ P(0xd7,0xd6) || P(0xd7,0x16) || P(0x0b,0x02))
+ *dst01 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond14)
+ *dst01 = interp_2px(w1, 1, w4, 1, 1);
+ else
+ *dst01 = interp_2px(w4, 3, w1, 1, 2);
+
+ if (cond01)
+ *dst10 = interp_2px(w4, 7, w1, 1, 3);
+ else if (cond08)
+ *dst10 = w4;
+ else if (cond02)
+ *dst10 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond09)
+ *dst10 = w4;
+ else if (cond10)
+ *dst10 = interp_3px(w4, 5, w3, 2, w1, 1, 3);
+ else if (P(0x0b,0x02))
+ *dst10 = interp_3px(w4, 5, w3, 2, w0, 1, 3);
+ else if (cond15)
+ *dst10 = interp_2px(w4, 5, w3, 3, 3);
+ else if (cond03)
+ *dst10 = interp_2px(w3, 3, w4, 1, 2);
+ else if (cond13)
+ *dst10 = interp_3px(w3, 2, w4, 1, w1, 1, 2);
+ else if (cond12)
+ *dst10 = interp_2px(w3, 5, w1, 3, 3);
+ else if (cond06)
+ *dst10 = interp_2px(w4, 7, w1, 1, 3);
+ else if (P(0x0b,0x08) || P(0xf9,0x68) || P(0x6d,0x6c) || P(0x3d,0x3c) ||
+ P(0xf9,0xf8) || P(0xdd,0xdc) || P(0xdd,0x1c))
+ *dst10 = interp_2px(w4, 3, w0, 1, 2);
+ else if (cond14)
+ *dst10 = interp_2px(w3, 1, w4, 1, 1);
+ else
+ *dst10 = interp_2px(w4, 3, w3, 1, 2);
+
+ if ((P(0x7f,0x2b) || P(0xef,0xab) || P(0xbf,0x8f) || P(0x7f,0x0f)) &&
+ WDIFF(w3, w1))
+ *dst11 = w4;
+ else if (cond02)
+ *dst11 = interp_2px(w4, 7, w0, 1, 3);
+ else if (cond15)
+ *dst11 = interp_2px(w4, 7, w3, 1, 3);
+ else if (cond11)
+ *dst11 = interp_2px(w4, 7, w1, 1, 3);
+ else if (P(0x0a,0x00) || P(0x7e,0x2a) || P(0xef,0xab) || P(0xbf,0x8f) ||
+ P(0x7e,0x0e))
+ *dst11 = interp_3px(w4, 6, w3, 1, w1, 1, 3);
+ else if (cond07)
+ *dst11 = interp_2px(w4, 7, w0, 1, 3);
+ else
+ *dst11 = w4;
+}
+
+static av_always_inline void hqx_filter(const ThreadData *td, int jobnr, int nb_jobs, int n)
+{
+ int x, y;
+ AVFrame *in = td->in, *out = td->out;
+ const uint32_t *r2y = td->rgbtoyuv;
+ const int height = in->height;
+ const int width = in->width;
+ const int slice_start = (height * jobnr ) / nb_jobs;
+ const int slice_end = (height * (jobnr+1)) / nb_jobs;
+ const int dst_linesize = out->linesize[0];
+ const int src_linesize = in->linesize[0];
+ uint8_t *dst = out->data[0] + slice_start * dst_linesize * n;
+ const uint8_t *src = in->data[0] + slice_start * src_linesize;
+
+ const int dst32_linesize = dst_linesize >> 2;
+ const int src32_linesize = src_linesize >> 2;
+
+ for (y = slice_start; y < slice_end; y++) {
+ const uint32_t *src32 = (const uint32_t *)src;
+ uint32_t *dst32 = (uint32_t *)dst;
+ const int prevline = y > 0 ? -src32_linesize : 0;
+ const int nextline = y < height - 1 ? src32_linesize : 0;
+
+ for (x = 0; x < width; x++) {
+ const int prevcol = x > 0 ? -1 : 0;
+ const int nextcol = x < width -1 ? 1 : 0;
+ const uint32_t w[3*3] = {
+ src32[prevcol + prevline], src32[prevline], src32[prevline + nextcol],
+ src32[prevcol ], src32[ 0], src32[ nextcol],
+ src32[prevcol + nextline], src32[nextline], src32[nextline + nextcol]
+ };
+ const uint32_t yuv1 = rgb2yuv(r2y, w[4]);
+ const int pattern = (w[4] != w[0] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[0]))) : 0)
+ | (w[4] != w[1] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[1]))) : 0) << 1
+ | (w[4] != w[2] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[2]))) : 0) << 2
+ | (w[4] != w[3] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[3]))) : 0) << 3
+ | (w[4] != w[5] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[5]))) : 0) << 4
+ | (w[4] != w[6] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[6]))) : 0) << 5
+ | (w[4] != w[7] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[7]))) : 0) << 6
+ | (w[4] != w[8] ? (yuv_diff(yuv1, rgb2yuv(r2y, w[8]))) : 0) << 7;
+
+ if (n == 2) {
+ dst32[dst32_linesize*0 + 0] = hq2x_interp_1x1(r2y, pattern, w, 0,1,2,3,4,5,6,7,8); // 00
+ dst32[dst32_linesize*0 + 1] = hq2x_interp_1x1(r2y, pattern, w, 2,1,0,5,4,3,8,7,6); // 01 (vert mirrored)
+ dst32[dst32_linesize*1 + 0] = hq2x_interp_1x1(r2y, pattern, w, 6,7,8,3,4,5,0,1,2); // 10 (horiz mirrored)
+ dst32[dst32_linesize*1 + 1] = hq2x_interp_1x1(r2y, pattern, w, 8,7,6,5,4,3,2,1,0); // 11 (center mirrored)
+ } else if (n == 3) {
+ hq3x_interp_2x1(dst32, dst32_linesize, r2y, pattern, w, 0,1, 0,1,2,3,4,5,6,7,8, 0); // 00 01
+ hq3x_interp_2x1(dst32 + 1, dst32_linesize, r2y, pattern, w, 1,3, 2,5,8,1,4,7,0,3,6, 1); // 02 12 (rotated to the right)
+ hq3x_interp_2x1(dst32 + 1*dst32_linesize, dst32_linesize, r2y, pattern, w, 2,0, 6,3,0,7,4,1,8,5,2, 1); // 20 10 (rotated to the left)
+ hq3x_interp_2x1(dst32 + 1*dst32_linesize + 1, dst32_linesize, r2y, pattern, w, 3,2, 8,7,6,5,4,3,2,1,0, 0); // 22 21 (center mirrored)
+ dst32[dst32_linesize + 1] = w[4]; // 11
+ } else if (n == 4) {
+ hq4x_interp_2x2(dst32, dst32_linesize, r2y, pattern, w, 0,1,2,3, 0,1,2,3,4,5,6,7,8); // 00 01 10 11
+ hq4x_interp_2x2(dst32 + 2, dst32_linesize, r2y, pattern, w, 1,0,3,2, 2,1,0,5,4,3,8,7,6); // 02 03 12 13 (vert mirrored)
+ hq4x_interp_2x2(dst32 + 2*dst32_linesize, dst32_linesize, r2y, pattern, w, 2,3,0,1, 6,7,8,3,4,5,0,1,2); // 20 21 30 31 (horiz mirrored)
+ hq4x_interp_2x2(dst32 + 2*dst32_linesize + 2, dst32_linesize, r2y, pattern, w, 3,2,1,0, 8,7,6,5,4,3,2,1,0); // 22 23 32 33 (center mirrored)
+ } else {
+ av_assert0(0);
+ }
+
+ src32 += 1;
+ dst32 += n;
+ }
+
+ src += src_linesize;
+ dst += dst_linesize * n;
+ }
+}
+
+#define HQX_FUNC(size) \
+static int hq##size##x(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ hqx_filter(arg, jobnr, nb_jobs, size); \
+ return 0; \
+}
+
+HQX_FUNC(2)
+HQX_FUNC(3)
+HQX_FUNC(4)
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ HQXContext *hqx = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->w = inlink->w * hqx->n;
+ outlink->h = inlink->h * hqx->n;
+ av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
+ av_get_pix_fmt_name(inlink->format),
+ inlink->w, inlink->h, outlink->w, outlink->h);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ HQXContext *hqx = ctx->priv;
+ ThreadData td;
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ td.in = in;
+ td.out = out;
+ td.rgbtoyuv = hqx->rgbtoyuv;
+ ctx->internal->execute(ctx, hqx->func, &td, NULL, FFMIN(inlink->h, ctx->graph->nb_threads));
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ HQXContext *hqx = ctx->priv;
+ static const hqxfunc_t hqxfuncs[] = {hq2x, hq3x, hq4x};
+
+ uint32_t c;
+ int bg, rg, g;
+
+ for (bg=-255; bg<256; bg++) {
+ for (rg=-255; rg<256; rg++) {
+ const uint32_t u = (uint32_t)((-169*rg + 500*bg)/1000) + 128;
+ const uint32_t v = (uint32_t)(( 500*rg - 81*bg)/1000) + 128;
+ int startg = FFMAX3(-bg, -rg, 0);
+ int endg = FFMIN3(255-bg, 255-rg, 255);
+ uint32_t y = (uint32_t)(( 299*rg + 1000*startg + 114*bg)/1000);
+ c = bg + (rg<<16) + 0x010101 * startg;
+ for (g = startg; g <= endg; g++) {
+ hqx->rgbtoyuv[c] = ((y++) << 16) + (u << 8) + v;
+ c+= 0x010101;
+ }
+ }
+ }
+
+ hqx->func = hqxfuncs[hqx->n - 2];
+ return 0;
+}
+
+static const AVFilterPad hqx_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad hqx_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_hqx = {
+ .name = "hqx",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input by 2, 3 or 4 using the hq*x magnification algorithm."),
+ .priv_size = sizeof(HQXContext),
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = hqx_inputs,
+ .outputs = hqx_outputs,
+ .priv_class = &hqx_class,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c
new file mode 100644
index 0000000..7843673
--- /dev/null
+++ b/libavfilter/vf_hue.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2012 Jeremy Tran
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Apply a hue/saturation filter to the input video
+ * Ported from MPlayer libmpcodecs/vf_hue.c.
+ */
+
+#include <float.h>
+#include "libavutil/eval.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define SAT_MIN_VAL -10
+#define SAT_MAX_VAL 10
+
+static const char *const var_names[] = {
+ "n", // frame count
+ "pts", // presentation timestamp expressed in AV_TIME_BASE units
+ "r", // frame rate
+ "t", // timestamp expressed in seconds
+ "tb", // timebase
+ NULL
+};
+
+enum var_name {
+ VAR_N,
+ VAR_PTS,
+ VAR_R,
+ VAR_T,
+ VAR_TB,
+ VAR_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ float hue_deg; /* hue expressed in degrees */
+ float hue; /* hue expressed in radians */
+ char *hue_deg_expr;
+ char *hue_expr;
+ AVExpr *hue_deg_pexpr;
+ AVExpr *hue_pexpr;
+ float saturation;
+ char *saturation_expr;
+ AVExpr *saturation_pexpr;
+ float brightness;
+ char *brightness_expr;
+ AVExpr *brightness_pexpr;
+ int hsub;
+ int vsub;
+ int is_first;
+ int32_t hue_sin;
+ int32_t hue_cos;
+ double var_values[VAR_NB];
+ uint8_t lut_l[256];
+ uint8_t lut_u[256][256];
+ uint8_t lut_v[256][256];
+} HueContext;
+
+#define OFFSET(x) offsetof(HueContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption hue_options[] = {
+ { "h", "set the hue angle degrees expression", OFFSET(hue_deg_expr), AV_OPT_TYPE_STRING,
+ { .str = NULL }, .flags = FLAGS },
+ { "s", "set the saturation expression", OFFSET(saturation_expr), AV_OPT_TYPE_STRING,
+ { .str = "1" }, .flags = FLAGS },
+ { "H", "set the hue angle radians expression", OFFSET(hue_expr), AV_OPT_TYPE_STRING,
+ { .str = NULL }, .flags = FLAGS },
+ { "b", "set the brightness expression", OFFSET(brightness_expr), AV_OPT_TYPE_STRING,
+ { .str = "0" }, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(hue);
+
+static inline void compute_sin_and_cos(HueContext *hue)
+{
+ /*
+ * Scale the value to the norm of the resulting (U,V) vector, that is
+ * the saturation.
+ * This will be useful in the apply_lut function.
+ */
+ hue->hue_sin = rint(sin(hue->hue) * (1 << 16) * hue->saturation);
+ hue->hue_cos = rint(cos(hue->hue) * (1 << 16) * hue->saturation);
+}
+
+static inline void create_luma_lut(HueContext *h)
+{
+ const float b = h->brightness;
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ h->lut_l[i] = av_clip_uint8(i + b * 25.5);
+ }
+}
+
+static inline void create_chrominance_lut(HueContext *h, const int32_t c,
+ const int32_t s)
+{
+ int32_t i, j, u, v, new_u, new_v;
+
+ /*
+ * If we consider U and V as the components of a 2D vector then its angle
+ * is the hue and the norm is the saturation
+ */
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ /* Normalize the components from range [16;140] to [-112;112] */
+ u = i - 128;
+ v = j - 128;
+ /*
+ * Apply the rotation of the vector : (c * u) - (s * v)
+ * (s * u) + (c * v)
+ * De-normalize the components (without forgetting to scale 128
+ * by << 16)
+ * Finally scale back the result by >> 16
+ */
+ new_u = ((c * u) - (s * v) + (1 << 15) + (128 << 16)) >> 16;
+ new_v = ((s * u) + (c * v) + (1 << 15) + (128 << 16)) >> 16;
+
+ /* Prevent a potential overflow */
+ h->lut_u[i][j] = av_clip_uint8(new_u);
+ h->lut_v[i][j] = av_clip_uint8(new_v);
+ }
+ }
+}
+
+static int set_expr(AVExpr **pexpr_ptr, char **expr_ptr,
+ const char *expr, const char *option, void *log_ctx)
+{
+ int ret;
+ AVExpr *new_pexpr;
+ char *new_expr;
+
+ new_expr = av_strdup(expr);
+ if (!new_expr)
+ return AVERROR(ENOMEM);
+ ret = av_expr_parse(&new_pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for %s\n",
+ expr, option);
+ av_free(new_expr);
+ return ret;
+ }
+
+ if (*pexpr_ptr)
+ av_expr_free(*pexpr_ptr);
+ *pexpr_ptr = new_pexpr;
+ av_freep(expr_ptr);
+ *expr_ptr = new_expr;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ HueContext *hue = ctx->priv;
+ int ret;
+
+ if (hue->hue_expr && hue->hue_deg_expr) {
+ av_log(ctx, AV_LOG_ERROR,
+ "H and h options are incompatible and cannot be specified "
+ "at the same time\n");
+ return AVERROR(EINVAL);
+ }
+
+#define SET_EXPR(expr, option) \
+ if (hue->expr##_expr) do { \
+ ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
+ hue->expr##_expr, option, ctx); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+ SET_EXPR(brightness, "b");
+ SET_EXPR(saturation, "s");
+ SET_EXPR(hue_deg, "h");
+ SET_EXPR(hue, "H");
+#undef SET_EXPR
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "H_expr:%s h_deg_expr:%s s_expr:%s b_expr:%s\n",
+ hue->hue_expr, hue->hue_deg_expr, hue->saturation_expr, hue->brightness_expr);
+ compute_sin_and_cos(hue);
+ hue->is_first = 1;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ HueContext *hue = ctx->priv;
+
+ av_expr_free(hue->brightness_pexpr);
+ av_expr_free(hue->hue_deg_pexpr);
+ av_expr_free(hue->hue_pexpr);
+ av_expr_free(hue->saturation_pexpr);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ HueContext *hue = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ hue->hsub = desc->log2_chroma_w;
+ hue->vsub = desc->log2_chroma_h;
+
+ hue->var_values[VAR_N] = 0;
+ hue->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ hue->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
+ NAN : av_q2d(inlink->frame_rate);
+
+ return 0;
+}
+
+static void apply_luma_lut(HueContext *s,
+ uint8_t *ldst, const int dst_linesize,
+ uint8_t *lsrc, const int src_linesize,
+ int w, int h)
+{
+ int i;
+
+ while (h--) {
+ for (i = 0; i < w; i++)
+ ldst[i] = s->lut_l[lsrc[i]];
+
+ lsrc += src_linesize;
+ ldst += dst_linesize;
+ }
+}
+
+static void apply_lut(HueContext *s,
+ uint8_t *udst, uint8_t *vdst, const int dst_linesize,
+ uint8_t *usrc, uint8_t *vsrc, const int src_linesize,
+ int w, int h)
+{
+ int i;
+
+ while (h--) {
+ for (i = 0; i < w; i++) {
+ const int u = usrc[i];
+ const int v = vsrc[i];
+
+ udst[i] = s->lut_u[u][v];
+ vdst[i] = s->lut_v[u][v];
+ }
+
+ usrc += src_linesize;
+ vsrc += src_linesize;
+ udst += dst_linesize;
+ vdst += dst_linesize;
+ }
+}
+
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ HueContext *hue = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+ const int32_t old_hue_sin = hue->hue_sin, old_hue_cos = hue->hue_cos;
+ const float old_brightness = hue->brightness;
+ int direct = 0;
+
+ if (av_frame_is_writable(inpic)) {
+ direct = 1;
+ outpic = inpic;
+ } else {
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ }
+
+ hue->var_values[VAR_N] = inlink->frame_count;
+ hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
+ hue->var_values[VAR_PTS] = TS2D(inpic->pts);
+
+ if (hue->saturation_expr) {
+ hue->saturation = av_expr_eval(hue->saturation_pexpr, hue->var_values, NULL);
+
+ if (hue->saturation < SAT_MIN_VAL || hue->saturation > SAT_MAX_VAL) {
+ hue->saturation = av_clip(hue->saturation, SAT_MIN_VAL, SAT_MAX_VAL);
+ av_log(inlink->dst, AV_LOG_WARNING,
+ "Saturation value not in range [%d,%d]: clipping value to %0.1f\n",
+ SAT_MIN_VAL, SAT_MAX_VAL, hue->saturation);
+ }
+ }
+
+ if (hue->brightness_expr) {
+ hue->brightness = av_expr_eval(hue->brightness_pexpr, hue->var_values, NULL);
+
+ if (hue->brightness < -10 || hue->brightness > 10) {
+ hue->brightness = av_clipf(hue->brightness, -10, 10);
+ av_log(inlink->dst, AV_LOG_WARNING,
+ "Brightness value not in range [%d,%d]: clipping value to %0.1f\n",
+ -10, 10, hue->brightness);
+ }
+ }
+
+ if (hue->hue_deg_expr) {
+ hue->hue_deg = av_expr_eval(hue->hue_deg_pexpr, hue->var_values, NULL);
+ hue->hue = hue->hue_deg * M_PI / 180;
+ } else if (hue->hue_expr) {
+ hue->hue = av_expr_eval(hue->hue_pexpr, hue->var_values, NULL);
+ hue->hue_deg = hue->hue * 180 / M_PI;
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "H:%0.1f*PI h:%0.1f s:%0.1f b:%0.f t:%0.1f n:%d\n",
+ hue->hue/M_PI, hue->hue_deg, hue->saturation, hue->brightness,
+ hue->var_values[VAR_T], (int)hue->var_values[VAR_N]);
+
+ compute_sin_and_cos(hue);
+ if (hue->is_first || (old_hue_sin != hue->hue_sin || old_hue_cos != hue->hue_cos))
+ create_chrominance_lut(hue, hue->hue_cos, hue->hue_sin);
+
+ if (hue->is_first || (old_brightness != hue->brightness && hue->brightness))
+ create_luma_lut(hue);
+
+ if (!direct) {
+ if (!hue->brightness)
+ av_image_copy_plane(outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h);
+ if (inpic->data[3])
+ av_image_copy_plane(outpic->data[3], outpic->linesize[3],
+ inpic->data[3], inpic->linesize[3],
+ inlink->w, inlink->h);
+ }
+
+ apply_lut(hue, outpic->data[1], outpic->data[2], outpic->linesize[1],
+ inpic->data[1], inpic->data[2], inpic->linesize[1],
+ FF_CEIL_RSHIFT(inlink->w, hue->hsub),
+ FF_CEIL_RSHIFT(inlink->h, hue->vsub));
+ if (hue->brightness)
+ apply_luma_lut(hue, outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0], inlink->w, inlink->h);
+
+ if (!direct)
+ av_frame_free(&inpic);
+
+ hue->is_first = 0;
+ return ff_filter_frame(outlink, outpic);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ HueContext *hue = ctx->priv;
+ int ret;
+
+#define SET_EXPR(expr, option) \
+ do { \
+ ret = set_expr(&hue->expr##_pexpr, &hue->expr##_expr, \
+ args, option, ctx); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+ if (!strcmp(cmd, "h")) {
+ SET_EXPR(hue_deg, "h");
+ av_freep(&hue->hue_expr);
+ } else if (!strcmp(cmd, "H")) {
+ SET_EXPR(hue, "H");
+ av_freep(&hue->hue_deg_expr);
+ } else if (!strcmp(cmd, "s")) {
+ SET_EXPR(saturation, "s");
+ } else if (!strcmp(cmd, "b")) {
+ SET_EXPR(brightness, "b");
+ } else
+ return AVERROR(ENOSYS);
+
+ return 0;
+}
+
+static const AVFilterPad hue_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad hue_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_hue = {
+ .name = "hue",
+ .description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
+ .priv_size = sizeof(HueContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .inputs = hue_inputs,
+ .outputs = hue_outputs,
+ .priv_class = &hue_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_idet.c b/libavfilter/vf_idet.c
new file mode 100644
index 0000000..ed21eea
--- /dev/null
+++ b/libavfilter/vf_idet.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h> /* FLT_MAX */
+
+#include "libavutil/cpu.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+#include "vf_idet.h"
+
+#define OFFSET(x) offsetof(IDETContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption idet_options[] = {
+ { "intl_thres", "set interlacing threshold", OFFSET(interlace_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.04}, -1, FLT_MAX, FLAGS },
+ { "prog_thres", "set progressive threshold", OFFSET(progressive_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 1.5}, -1, FLT_MAX, FLAGS },
+ { "rep_thres", "set repeat threshold", OFFSET(repeat_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 3.0}, -1, FLT_MAX, FLAGS },
+ { "half_life", "half life of cumulative statistics", OFFSET(half_life), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, -1, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(idet);
+
+static const char *type2str(Type type)
+{
+ switch(type) {
+ case TFF : return "tff";
+ case BFF : return "bff";
+ case PROGRESSIVE : return "progressive";
+ case UNDETERMINED : return "undetermined";
+ }
+ return NULL;
+}
+
+#define PRECISION 1048576
+
+static uint64_t uintpow(uint64_t b,unsigned int e)
+{
+ uint64_t r=1;
+ while(e--) r*=b;
+ return r;
+}
+
+static int av_dict_set_fxp(AVDictionary **pm, const char *key, uint64_t value, unsigned int digits,
+ int flags)
+{
+ char valuestr[44];
+ uint64_t print_precision = uintpow(10, digits);
+
+ value = av_rescale(value, print_precision, PRECISION);
+
+ snprintf(valuestr, sizeof(valuestr), "%"PRId64".%0*"PRId64,
+ value / print_precision, digits, value % print_precision);
+
+ return av_dict_set(pm, key, valuestr, flags);
+}
+
+static const char *rep2str(RepeatedField repeated_field)
+{
+ switch(repeated_field) {
+ case REPEAT_NONE : return "neither";
+ case REPEAT_TOP : return "top";
+ case REPEAT_BOTTOM : return "bottom";
+ }
+ return NULL;
+}
+
+int ff_idet_filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w)
+{
+ int x;
+ int ret=0;
+
+ for(x=0; x<w; x++){
+ int v = (*a++ + *c++) - 2 * *b++;
+ ret += FFABS(v);
+ }
+
+ return ret;
+}
+
+int ff_idet_filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w)
+{
+ int x;
+ int ret=0;
+
+ for(x=0; x<w; x++){
+ int v = (*a++ + *c++) - 2 * *b++;
+ ret += FFABS(v);
+ }
+
+ return ret;
+}
+
+static void filter(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+ int y, i;
+ int64_t alpha[2]={0};
+ int64_t delta=0;
+ int64_t gamma[2]={0};
+ Type type, best_type;
+ RepeatedField repeat;
+ int match = 0;
+ AVDictionary **metadata = avpriv_frame_get_metadatap(idet->cur);
+
+ for (i = 0; i < idet->csp->nb_components; i++) {
+ int w = idet->cur->width;
+ int h = idet->cur->height;
+ int refs = idet->cur->linesize[i];
+
+ if (i && i<3) {
+ w = FF_CEIL_RSHIFT(w, idet->csp->log2_chroma_w);
+ h = FF_CEIL_RSHIFT(h, idet->csp->log2_chroma_h);
+ }
+
+ for (y = 2; y < h - 2; y++) {
+ uint8_t *prev = &idet->prev->data[i][y*refs];
+ uint8_t *cur = &idet->cur ->data[i][y*refs];
+ uint8_t *next = &idet->next->data[i][y*refs];
+ alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
+ alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
+ delta += idet->filter_line(cur-refs, cur, cur+refs, w);
+ gamma[(y^1)&1] += idet->filter_line(cur , prev, cur , w);
+ }
+ }
+
+ if (alpha[0] > idet->interlace_threshold * alpha[1]){
+ type = TFF;
+ }else if(alpha[1] > idet->interlace_threshold * alpha[0]){
+ type = BFF;
+ }else if(alpha[1] > idet->progressive_threshold * delta){
+ type = PROGRESSIVE;
+ }else{
+ type = UNDETERMINED;
+ }
+
+ if ( gamma[0] > idet->repeat_threshold * gamma[1] ){
+ repeat = REPEAT_TOP;
+ } else if ( gamma[1] > idet->repeat_threshold * gamma[0] ){
+ repeat = REPEAT_BOTTOM;
+ } else {
+ repeat = REPEAT_NONE;
+ }
+
+ memmove(idet->history+1, idet->history, HIST_SIZE-1);
+ idet->history[0] = type;
+ best_type = UNDETERMINED;
+ for(i=0; i<HIST_SIZE; i++){
+ if(idet->history[i] != UNDETERMINED){
+ if(best_type == UNDETERMINED)
+ best_type = idet->history[i];
+
+ if(idet->history[i] == best_type) {
+ match++;
+ }else{
+ match=0;
+ break;
+ }
+ }
+ }
+ if(idet->last_type == UNDETERMINED){
+ if(match ) idet->last_type = best_type;
+ }else{
+ if(match>2) idet->last_type = best_type;
+ }
+
+ if (idet->last_type == TFF){
+ idet->cur->top_field_first = 1;
+ idet->cur->interlaced_frame = 1;
+ }else if(idet->last_type == BFF){
+ idet->cur->top_field_first = 0;
+ idet->cur->interlaced_frame = 1;
+ }else if(idet->last_type == PROGRESSIVE){
+ idet->cur->interlaced_frame = 0;
+ }
+
+ for(i=0; i<3; i++)
+ idet->repeats[i] = av_rescale(idet->repeats [i], idet->decay_coefficient, PRECISION);
+
+ for(i=0; i<4; i++){
+ idet->prestat [i] = av_rescale(idet->prestat [i], idet->decay_coefficient, PRECISION);
+ idet->poststat[i] = av_rescale(idet->poststat[i], idet->decay_coefficient, PRECISION);
+ }
+
+ idet->total_repeats [ repeat] ++;
+ idet->repeats [ repeat] += PRECISION;
+
+ idet->total_prestat [ type] ++;
+ idet->prestat [ type] += PRECISION;
+
+ idet->total_poststat[idet->last_type] ++;
+ idet->poststat [idet->last_type] += PRECISION;
+
+ av_log(ctx, AV_LOG_DEBUG, "Repeated Field:%12s, Single frame:%12s, Multi frame:%12s\n",
+ rep2str(repeat), type2str(type), type2str(idet->last_type));
+
+ av_dict_set (metadata, "lavfi.idet.repeated.current_frame", rep2str(repeat), 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.repeated.neither", idet->repeats[REPEAT_NONE], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.repeated.top", idet->repeats[REPEAT_TOP], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.repeated.bottom", idet->repeats[REPEAT_BOTTOM], 2, 0);
+
+ av_dict_set (metadata, "lavfi.idet.single.current_frame", type2str(type), 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.tff", idet->prestat[TFF], 2 , 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.bff", idet->prestat[BFF], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.progressive", idet->prestat[PROGRESSIVE], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.single.undetermined", idet->prestat[UNDETERMINED], 2, 0);
+
+ av_dict_set (metadata, "lavfi.idet.multiple.current_frame", type2str(idet->last_type), 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.tff", idet->poststat[TFF], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.bff", idet->poststat[BFF], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.progressive", idet->poststat[PROGRESSIVE], 2, 0);
+ av_dict_set_fxp(metadata, "lavfi.idet.multiple.undetermined", idet->poststat[UNDETERMINED], 2, 0);
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *picref)
+{
+ AVFilterContext *ctx = link->dst;
+ IDETContext *idet = ctx->priv;
+
+ if (idet->prev)
+ av_frame_free(&idet->prev);
+ idet->prev = idet->cur;
+ idet->cur = idet->next;
+ idet->next = picref;
+
+ if (!idet->cur)
+ return 0;
+
+ if (!idet->prev)
+ idet->prev = av_frame_clone(idet->cur);
+
+ if (!idet->csp)
+ idet->csp = av_pix_fmt_desc_get(link->format);
+ if (idet->csp->comp[0].depth_minus1 / 8 == 1){
+ idet->filter_line = (ff_idet_filter_func)ff_idet_filter_line_c_16bit;
+ if (ARCH_X86)
+ ff_idet_init_x86(idet, 1);
+ }
+
+ filter(ctx);
+
+ return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ IDETContext *idet = ctx->priv;
+
+ do {
+ int ret;
+
+ if (idet->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(link->src->inputs[0]);
+
+ if (ret == AVERROR_EOF && idet->cur) {
+ AVFrame *next = av_frame_clone(idet->next);
+
+ if (!next)
+ return AVERROR(ENOMEM);
+
+ filter_frame(link->src->inputs[0], next);
+ idet->eof = 1;
+ } else if (ret < 0) {
+ return ret;
+ }
+ } while (!idet->cur);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+
+ av_log(ctx, AV_LOG_INFO, "Repeated Fields: Neither:%6"PRId64" Top:%6"PRId64" Bottom:%6"PRId64"\n",
+ idet->total_repeats[REPEAT_NONE],
+ idet->total_repeats[REPEAT_TOP],
+ idet->total_repeats[REPEAT_BOTTOM]
+ );
+ av_log(ctx, AV_LOG_INFO, "Single frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n",
+ idet->total_prestat[TFF],
+ idet->total_prestat[BFF],
+ idet->total_prestat[PROGRESSIVE],
+ idet->total_prestat[UNDETERMINED]
+ );
+ av_log(ctx, AV_LOG_INFO, "Multi frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n",
+ idet->total_poststat[TFF],
+ idet->total_poststat[BFF],
+ idet->total_poststat[PROGRESSIVE],
+ idet->total_poststat[UNDETERMINED]
+ );
+
+ av_frame_free(&idet->prev);
+ av_frame_free(&idet->cur );
+ av_frame_free(&idet->next);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ IDETContext *idet = ctx->priv;
+
+ idet->eof = 0;
+ idet->last_type = UNDETERMINED;
+ memset(idet->history, UNDETERMINED, HIST_SIZE);
+
+ if( idet->half_life > 0 )
+ idet->decay_coefficient = (uint64_t) round( PRECISION * exp2(-1.0 / idet->half_life) );
+ else
+ idet->decay_coefficient = PRECISION;
+
+ idet->filter_line = ff_idet_filter_line_c;
+
+ if (ARCH_X86)
+ ff_idet_init_x86(idet, 0);
+
+ return 0;
+}
+
+static const AVFilterPad idet_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad idet_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_idet = {
+ .name = "idet",
+ .description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
+ .priv_size = sizeof(IDETContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = idet_inputs,
+ .outputs = idet_outputs,
+ .priv_class = &idet_class,
+};
diff --git a/libavfilter/vf_idet.h b/libavfilter/vf_idet.h
new file mode 100644
index 0000000..af759b4
--- /dev/null
+++ b/libavfilter/vf_idet.h
@@ -0,0 +1,76 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_IDET_H
+#define AVFILTER_IDET_H
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+
+#define HIST_SIZE 4
+
+typedef int (*ff_idet_filter_func)(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w);
+
+typedef enum {
+ TFF,
+ BFF,
+ PROGRESSIVE,
+ UNDETERMINED,
+} Type;
+
+typedef enum {
+ REPEAT_NONE,
+ REPEAT_TOP,
+ REPEAT_BOTTOM,
+} RepeatedField;
+
+typedef struct {
+ const AVClass *class;
+ float interlace_threshold;
+ float progressive_threshold;
+ float repeat_threshold;
+ float half_life;
+ uint64_t decay_coefficient;
+
+ Type last_type;
+
+ uint64_t repeats[3];
+ uint64_t prestat[4];
+ uint64_t poststat[4];
+ uint64_t total_repeats[3];
+ uint64_t total_prestat[4];
+ uint64_t total_poststat[4];
+
+ uint8_t history[HIST_SIZE];
+
+ AVFrame *cur;
+ AVFrame *next;
+ AVFrame *prev;
+ ff_idet_filter_func filter_line;
+
+ const AVPixFmtDescriptor *csp;
+ int eof;
+} IDETContext;
+
+void ff_idet_init_x86(IDETContext *idet, int for_16b);
+
+/* main fall-back for left-over */
+int ff_idet_filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w);
+int ff_idet_filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w);
+
+#endif
diff --git a/libavfilter/vf_il.c b/libavfilter/vf_il.c
new file mode 100644
index 0000000..b19fea1
--- /dev/null
+++ b/libavfilter/vf_il.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * (de)interleave fields filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum FilterMode {
+ MODE_NONE,
+ MODE_INTERLEAVE,
+ MODE_DEINTERLEAVE
+};
+
+typedef struct {
+ const AVClass *class;
+ enum FilterMode luma_mode, chroma_mode, alpha_mode;
+ int luma_swap, chroma_swap, alpha_swap;
+ int nb_planes;
+ int linesize[4], chroma_height;
+ int has_alpha;
+} IlContext;
+
+#define OFFSET(x) offsetof(IlContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption il_options[] = {
+ {"luma_mode", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"},
+ {"l", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"},
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "luma_mode"},
+ {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "luma_mode"},
+ {"chroma_mode", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"},
+ {"c", "select chroma mode", OFFSET(chroma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "chroma_mode"},
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "chroma_mode"},
+ {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "chroma_mode"},
+ {"alpha_mode", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"},
+ {"a", "select alpha mode", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "alpha_mode"},
+ {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NONE}, 0, 0, FLAGS, "alpha_mode"},
+ {"interleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"i", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"deinterleave", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"d", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_DEINTERLEAVE}, 0, 0, FLAGS, "alpha_mode"},
+ {"luma_swap", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"ls", "swap luma fields", OFFSET(luma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"chroma_swap", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"cs", "swap chroma fields", OFFSET(chroma_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"alpha_swap", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {"as", "swap alpha fields", OFFSET(alpha_swap), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(il);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL) && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ ff_add_format(&formats, fmt);
+ }
+
+ ff_set_common_formats(ctx, formats);
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ IlContext *il = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ il->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ il->has_alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
+ if ((ret = av_image_fill_linesizes(il->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ il->chroma_height = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+
+ return 0;
+}
+
+static void interleave(uint8_t *dst, uint8_t *src, int w, int h,
+ int dst_linesize, int src_linesize,
+ enum FilterMode mode, int swap)
+{
+ const int a = swap;
+ const int b = 1 - a;
+ const int m = h >> 1;
+ int y;
+
+ switch (mode) {
+ case MODE_DEINTERLEAVE:
+ for (y = 0; y < m; y++) {
+ memcpy(dst + dst_linesize * y , src + src_linesize * (y * 2 + a), w);
+ memcpy(dst + dst_linesize * (y + m), src + src_linesize * (y * 2 + b), w);
+ }
+ break;
+ case MODE_NONE:
+ for (y = 0; y < m; y++) {
+ memcpy(dst + dst_linesize * y * 2 , src + src_linesize * (y * 2 + a), w);
+ memcpy(dst + dst_linesize * (y * 2 + 1), src + src_linesize * (y * 2 + b), w);
+ }
+ break;
+ case MODE_INTERLEAVE:
+ for (y = 0; y < m; y++) {
+ memcpy(dst + dst_linesize * (y * 2 + a), src + src_linesize * y , w);
+ memcpy(dst + dst_linesize * (y * 2 + b), src + src_linesize * (y + m), w);
+ }
+ break;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ IlContext *il = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ int comp;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, inpicref);
+
+ interleave(out->data[0], inpicref->data[0],
+ il->linesize[0], inlink->h,
+ out->linesize[0], inpicref->linesize[0],
+ il->luma_mode, il->luma_swap);
+
+ for (comp = 1; comp < (il->nb_planes - il->has_alpha); comp++) {
+ interleave(out->data[comp], inpicref->data[comp],
+ il->linesize[comp], il->chroma_height,
+ out->linesize[comp], inpicref->linesize[comp],
+ il->chroma_mode, il->chroma_swap);
+ }
+
+ if (il->has_alpha) {
+ comp = il->nb_planes - 1;
+ interleave(out->data[comp], inpicref->data[comp],
+ il->linesize[comp], inlink->h,
+ out->linesize[comp], inpicref->linesize[comp],
+ il->alpha_mode, il->alpha_swap);
+ }
+
+ av_frame_free(&inpicref);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_il = {
+ .name = "il",
+ .description = NULL_IF_CONFIG_SMALL("Deinterleave or interleave fields."),
+ .priv_size = sizeof(IlContext),
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .priv_class = &il_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_interlace.c b/libavfilter/vf_interlace.c
index 3c36568..a63f915 100644
--- a/libavfilter/vf_interlace.c
+++ b/libavfilter/vf_interlace.c
@@ -1,18 +1,23 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>
*
- * Libav is free software; you can redistribute it and/or modify
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
@@ -34,7 +39,7 @@
#define OFFSET(x) offsetof(InterlaceContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+static const AVOption interlace_options[] = {
{ "scan", "scanning mode", OFFSET(scan),
AV_OPT_TYPE_INT, {.i64 = MODE_TFF }, 0, 1, .flags = V, .unit = "scan" },
{ "tff", "top field first", 0,
@@ -46,12 +51,7 @@ static const AVOption options[] = {
{ NULL }
};
-static const AVClass class = {
- .class_name = "interlace filter",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(interlace);
static void lowpass_line_c(uint8_t *dstp, ptrdiff_t linesize,
const uint8_t *srcp,
@@ -86,8 +86,6 @@ static av_cold void uninit(AVFilterContext *ctx)
av_frame_free(&s->cur);
av_frame_free(&s->next);
-
- av_opt_free(s);
}
static int config_out_props(AVFilterLink *outlink)
@@ -109,8 +107,10 @@ static int config_out_props(AVFilterLink *outlink)
outlink->w = inlink->w;
outlink->h = inlink->h;
outlink->time_base = inlink->time_base;
+ outlink->frame_rate = inlink->frame_rate;
// half framerate
- outlink->time_base.num *= 2;
+ outlink->frame_rate.den *= 2;
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
if (s->lowpass) {
@@ -135,7 +135,7 @@ static void copy_picture_field(InterlaceContext *s,
int plane, j;
for (plane = 0; plane < desc->nb_components; plane++) {
- int lines = (plane == 1 || plane == 2) ? -(-inlink->h) >> vsub : inlink->h;
+ int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
ptrdiff_t linesize = av_image_get_linesize(inlink->format, inlink->w, plane);
uint8_t *dstp = dst_frame->data[plane];
const uint8_t *srcp = src_frame->data[plane];
@@ -193,7 +193,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
return AVERROR(ENOMEM);
out->pts /= 2; // adjust pts to new framerate
ret = ff_filter_frame(outlink, out);
- s->got_output = 1;
return ret;
}
@@ -205,7 +204,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_copy_props(out, s->cur);
out->interlaced_frame = 1;
out->top_field_first = tff;
- out->pts /= 2; // adjust pts to new framerate
/* copy upper/lower field from cur */
copy_picture_field(s, s->cur, out, inlink, tff ? FIELD_UPPER : FIELD_LOWER, s->lowpass);
@@ -216,20 +214,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_free(&s->next);
ret = ff_filter_frame(outlink, out);
- s->got_output = 1;
-
- return ret;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- InterlaceContext *s = ctx->priv;
- int ret = 0;
-
- s->got_output = 0;
- while (ret >= 0 && !s->got_output)
- ret = ff_request_frame(ctx->inputs[0]);
return ret;
}
@@ -245,10 +229,9 @@ static const AVFilterPad inputs[] = {
static const AVFilterPad outputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_out_props,
- .request_frame = request_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
},
{ NULL }
};
@@ -257,12 +240,9 @@ AVFilter ff_vf_interlace = {
.name = "interlace",
.description = NULL_IF_CONFIG_SMALL("Convert progressive video into interlaced."),
.uninit = uninit,
-
- .priv_class = &class,
+ .priv_class = &interlace_class,
.priv_size = sizeof(InterlaceContext),
.query_formats = query_formats,
-
.inputs = inputs,
.outputs = outputs,
};
-
diff --git a/libavfilter/vf_kerndeint.c b/libavfilter/vf_kerndeint.c
new file mode 100644
index 0000000..5130208
--- /dev/null
+++ b/libavfilter/vf_kerndeint.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2012 Jeremy Tran
+ * Copyright (c) 2004 Tobias Diedrich
+ * Copyright (c) 2003 Donald A. Graft
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Kernel Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_kerndeint.c.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ int frame; ///< frame count, starting from 0
+ int thresh, map, order, sharp, twoway;
+ int vsub;
+ int is_packed_rgb;
+ uint8_t *tmp_data [4]; ///< temporary plane data buffer
+ int tmp_linesize[4]; ///< temporary plane byte linesize
+ int tmp_bwidth [4]; ///< temporary plane byte width
+} KerndeintContext;
+
+#define OFFSET(x) offsetof(KerndeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption kerndeint_options[] = {
+ { "thresh", "set the threshold", OFFSET(thresh), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
+ { "map", "set the map", OFFSET(map), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "order", "set the order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "sharp", "enable sharpening", OFFSET(sharp), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "twoway", "enable twoway", OFFSET(twoway), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(kerndeint);
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ KerndeintContext *kerndeint = ctx->priv;
+
+ av_free(kerndeint->tmp_data[0]);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUYV422,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_0RGB,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB0,
+ AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ KerndeintContext *kerndeint = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_RGB;
+ kerndeint->vsub = desc->log2_chroma_h;
+
+ ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
+ inlink->w, inlink->h, inlink->format, 16);
+ if (ret < 0)
+ return ret;
+ memset(kerndeint->tmp_data[0], 0, ret);
+
+ if ((ret = av_image_fill_linesizes(kerndeint->tmp_bwidth, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ KerndeintContext *kerndeint = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+ const uint8_t *prvp; ///< Previous field's pixel line number n
+ const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1)
+ const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1)
+ const uint8_t *prvppp; ///< Previous field's pixel line number (n - 2)
+ const uint8_t *prvpnn; ///< Previous field's pixel line number (n + 2)
+ const uint8_t *prvp4p; ///< Previous field's pixel line number (n - 4)
+ const uint8_t *prvp4n; ///< Previous field's pixel line number (n + 4)
+
+ const uint8_t *srcp; ///< Current field's pixel line number n
+ const uint8_t *srcpp; ///< Current field's pixel line number (n - 1)
+ const uint8_t *srcpn; ///< Current field's pixel line number (n + 1)
+ const uint8_t *srcppp; ///< Current field's pixel line number (n - 2)
+ const uint8_t *srcpnn; ///< Current field's pixel line number (n + 2)
+ const uint8_t *srcp3p; ///< Current field's pixel line number (n - 3)
+ const uint8_t *srcp3n; ///< Current field's pixel line number (n + 3)
+ const uint8_t *srcp4p; ///< Current field's pixel line number (n - 4)
+ const uint8_t *srcp4n; ///< Current field's pixel line number (n + 4)
+
+ uint8_t *dstp, *dstp_saved;
+ const uint8_t *srcp_saved;
+
+ int src_linesize, psrc_linesize, dst_linesize, bwidth;
+ int x, y, plane, val, hi, lo, g, h, n = kerndeint->frame++;
+ double valf;
+
+ const int thresh = kerndeint->thresh;
+ const int order = kerndeint->order;
+ const int map = kerndeint->map;
+ const int sharp = kerndeint->sharp;
+ const int twoway = kerndeint->twoway;
+
+ const int is_packed_rgb = kerndeint->is_packed_rgb;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ outpic->interlaced_frame = 0;
+
+ for (plane = 0; plane < 4 && inpic->data[plane] && inpic->linesize[plane]; plane++) {
+ h = plane == 0 ? inlink->h : FF_CEIL_RSHIFT(inlink->h, kerndeint->vsub);
+ bwidth = kerndeint->tmp_bwidth[plane];
+
+ srcp_saved = inpic->data[plane];
+ src_linesize = inpic->linesize[plane];
+ psrc_linesize = kerndeint->tmp_linesize[plane];
+ dstp_saved = outpic->data[plane];
+ dst_linesize = outpic->linesize[plane];
+ srcp = srcp_saved + (1 - order) * src_linesize;
+ dstp = dstp_saved + (1 - order) * dst_linesize;
+
+ for (y = 0; y < h; y += 2) {
+ memcpy(dstp, srcp, bwidth);
+ srcp += 2 * src_linesize;
+ dstp += 2 * dst_linesize;
+ }
+
+ // Copy through the lines that will be missed below.
+ memcpy(dstp_saved + order * dst_linesize, srcp_saved + (1 - order) * src_linesize, bwidth);
+ memcpy(dstp_saved + (2 + order ) * dst_linesize, srcp_saved + (3 - order) * src_linesize, bwidth);
+ memcpy(dstp_saved + (h - 2 + order) * dst_linesize, srcp_saved + (h - 1 - order) * src_linesize, bwidth);
+ memcpy(dstp_saved + (h - 4 + order) * dst_linesize, srcp_saved + (h - 3 - order) * src_linesize, bwidth);
+
+ /* For the other field choose adaptively between using the previous field
+ or the interpolant from the current field. */
+ prvp = kerndeint->tmp_data[plane] + 5 * psrc_linesize - (1 - order) * psrc_linesize;
+ prvpp = prvp - psrc_linesize;
+ prvppp = prvp - 2 * psrc_linesize;
+ prvp4p = prvp - 4 * psrc_linesize;
+ prvpn = prvp + psrc_linesize;
+ prvpnn = prvp + 2 * psrc_linesize;
+ prvp4n = prvp + 4 * psrc_linesize;
+
+ srcp = srcp_saved + 5 * src_linesize - (1 - order) * src_linesize;
+ srcpp = srcp - src_linesize;
+ srcppp = srcp - 2 * src_linesize;
+ srcp3p = srcp - 3 * src_linesize;
+ srcp4p = srcp - 4 * src_linesize;
+
+ srcpn = srcp + src_linesize;
+ srcpnn = srcp + 2 * src_linesize;
+ srcp3n = srcp + 3 * src_linesize;
+ srcp4n = srcp + 4 * src_linesize;
+
+ dstp = dstp_saved + 5 * dst_linesize - (1 - order) * dst_linesize;
+
+ for (y = 5 - (1 - order); y <= h - 5 - (1 - order); y += 2) {
+ for (x = 0; x < bwidth; x++) {
+ if (thresh == 0 || n == 0 ||
+ (abs((int)prvp[x] - (int)srcp[x]) > thresh) ||
+ (abs((int)prvpp[x] - (int)srcpp[x]) > thresh) ||
+ (abs((int)prvpn[x] - (int)srcpn[x]) > thresh)) {
+ if (map) {
+ g = x & ~3;
+
+ if (is_packed_rgb) {
+ AV_WB32(dstp + g, 0xffffffff);
+ x = g + 3;
+ } else if (inlink->format == AV_PIX_FMT_YUYV422) {
+ // y <- 235, u <- 128, y <- 235, v <- 128
+ AV_WB32(dstp + g, 0xeb80eb80);
+ x = g + 3;
+ } else {
+ dstp[x] = plane == 0 ? 235 : 128;
+ }
+ } else {
+ if (is_packed_rgb) {
+ hi = 255;
+ lo = 0;
+ } else if (inlink->format == AV_PIX_FMT_YUYV422) {
+ hi = x & 1 ? 240 : 235;
+ lo = 16;
+ } else {
+ hi = plane == 0 ? 235 : 240;
+ lo = 16;
+ }
+
+ if (sharp) {
+ if (twoway) {
+ valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ + 0.170 * ((int)srcp[x] + (int)prvp[x])
+ - 0.116 * ((int)srcppp[x] + (int)srcpnn[x] + (int)prvppp[x] + (int)prvpnn[x])
+ - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ + 0.031 * ((int)srcp4p[x] + (int)srcp4n[x] + (int)prvp4p[x] + (int)prvp4n[x]);
+ } else {
+ valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ + 0.170 * ((int)prvp[x])
+ - 0.116 * ((int)prvppp[x] + (int)prvpnn[x])
+ - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ + 0.031 * ((int)prvp4p[x] + (int)prvp4p[x]);
+ }
+ dstp[x] = av_clip(valf, lo, hi);
+ } else {
+ if (twoway) {
+ val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)srcp[x] + (int)prvp[x])
+ - (int)(srcppp[x]) - (int)(srcpnn[x])
+ - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
+ } else {
+ val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)prvp[x])
+ - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
+ }
+ dstp[x] = av_clip(val, lo, hi);
+ }
+ }
+ } else {
+ dstp[x] = srcp[x];
+ }
+ }
+ prvp += 2 * psrc_linesize;
+ prvpp += 2 * psrc_linesize;
+ prvppp += 2 * psrc_linesize;
+ prvpn += 2 * psrc_linesize;
+ prvpnn += 2 * psrc_linesize;
+ prvp4p += 2 * psrc_linesize;
+ prvp4n += 2 * psrc_linesize;
+ srcp += 2 * src_linesize;
+ srcpp += 2 * src_linesize;
+ srcppp += 2 * src_linesize;
+ srcp3p += 2 * src_linesize;
+ srcp4p += 2 * src_linesize;
+ srcpn += 2 * src_linesize;
+ srcpnn += 2 * src_linesize;
+ srcp3n += 2 * src_linesize;
+ srcp4n += 2 * src_linesize;
+ dstp += 2 * dst_linesize;
+ }
+
+ srcp = inpic->data[plane];
+ dstp = kerndeint->tmp_data[plane];
+ av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h);
+ }
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad kerndeint_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad kerndeint_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+
+AVFilter ff_vf_kerndeint = {
+ .name = "kerndeint",
+ .description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."),
+ .priv_size = sizeof(KerndeintContext),
+ .priv_class = &kerndeint_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = kerndeint_inputs,
+ .outputs = kerndeint_outputs,
+};
diff --git a/libavfilter/vf_lenscorrection.c b/libavfilter/vf_lenscorrection.c
new file mode 100644
index 0000000..9fb1424
--- /dev/null
+++ b/libavfilter/vf_lenscorrection.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2007 Richard Spindler (author of frei0r plugin from which this was derived)
+ * Copyright (C) 2014 Daniel Oberhoff
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Lenscorrection filter, algorithm from the frei0r plugin with the same name
+*/
+#include <stdlib.h>
+#include <math.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct LenscorrectionCtx {
+ const AVClass *av_class;
+ unsigned int width;
+ unsigned int height;
+ int hsub, vsub;
+ int nb_planes;
+ double cx, cy, k1, k2;
+ int32_t *correction[4];
+} LenscorrectionCtx;
+
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption lenscorrection_options[] = {
+ { "cx", "set relative center x", offsetof(LenscorrectionCtx, cx), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
+ { "cy", "set relative center y", offsetof(LenscorrectionCtx, cy), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
+ { "k1", "set quadratic distortion factor", offsetof(LenscorrectionCtx, k1), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
+ { "k2", "set double quadratic distortion factor", offsetof(LenscorrectionCtx, k2), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(lenscorrection);
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int w, h;
+ int plane;
+ int xcenter, ycenter;
+ int32_t *correction;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
+{
+ ThreadData *td = (ThreadData*)arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+
+ const int w = td->w, h = td->h;
+ const int xcenter = td->xcenter;
+ const int ycenter = td->ycenter;
+ const int start = (h * job ) / nb_jobs;
+ const int end = (h * (job+1)) / nb_jobs;
+ const int plane = td->plane;
+ const int inlinesize = in->linesize[plane];
+ const int outlinesize = out->linesize[plane];
+ const uint8_t *indata = in->data[plane];
+ uint8_t *outrow = out->data[plane] + start * outlinesize;
+ int i;
+ for (i = start; i < end; i++, outrow += outlinesize) {
+ const int off_y = i - ycenter;
+ uint8_t *out = outrow;
+ int j;
+ for (j = 0; j < w; j++) {
+ const int off_x = j - xcenter;
+ const int64_t radius_mult = td->correction[j + i*w];
+ const int x = xcenter + ((radius_mult * off_x + (1<<23))>>24);
+ const int y = ycenter + ((radius_mult * off_y + (1<<23))>>24);
+ const char isvalid = x > 0 && x < w - 1 && y > 0 && y < h - 1;
+ *out++ = isvalid ? indata[y * inlinesize + x] : 0;
+ }
+ }
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LenscorrectionCtx *rect = ctx->priv;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(rect->correction); i++) {
+ av_freep(&rect->correction[i]);
+ }
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LenscorrectionCtx *rect = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
+ rect->hsub = pixdesc->log2_chroma_w;
+ rect->vsub = pixdesc->log2_chroma_h;
+ outlink->w = rect->width = inlink->w;
+ outlink->h = rect->height = inlink->h;
+ rect->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ LenscorrectionCtx *rect = (LenscorrectionCtx*)ctx->priv;
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ int plane;
+
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(out, in);
+
+ for (plane = 0; plane < rect->nb_planes; ++plane) {
+ int hsub = plane == 1 || plane == 2 ? rect->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? rect->vsub : 0;
+ int hdiv = 1 << hsub;
+ int vdiv = 1 << vsub;
+ int w = rect->width / hdiv;
+ int h = rect->height / vdiv;
+ int xcenter = rect->cx * w;
+ int ycenter = rect->cy * h;
+ int k1 = rect->k1 * (1<<24);
+ int k2 = rect->k2 * (1<<24);
+ ThreadData td = {
+ .in = in,
+ .out = out,
+ .w = w,
+ .h = h,
+ .xcenter = xcenter,
+ .ycenter = ycenter,
+ .plane = plane};
+
+ if (!rect->correction[plane]) {
+ int i,j;
+ const int64_t r2inv = (4LL<<60) / (w * w + h * h);
+
+ rect->correction[plane] = av_malloc_array(w, h * sizeof(**rect->correction));
+ if (!rect->correction[plane])
+ return AVERROR(ENOMEM);
+ for (j = 0; j < h; j++) {
+ const int off_y = j - ycenter;
+ const int off_y2 = off_y * off_y;
+ for (i = 0; i < w; i++) {
+ const int off_x = i - xcenter;
+ const int64_t r2 = ((off_x * off_x + off_y2) * r2inv + (1LL<<31)) >> 32;
+ const int64_t r4 = (r2 * r2 + (1<<27)) >> 28;
+ const int radius_mult = (r2 * k1 + r4 * k2 + (1LL<<27) + (1LL<<52))>>28;
+ rect->correction[plane][j * w + i] = radius_mult;
+ }
+ }
+ }
+
+ td.correction = rect->correction[plane];
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad lenscorrection_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad lenscorrection_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_lenscorrection = {
+ .name = "lenscorrection",
+ .description = NULL_IF_CONFIG_SMALL("Rectify the image by correcting for lens distortion."),
+ .priv_size = sizeof(LenscorrectionCtx),
+ .query_formats = query_formats,
+ .inputs = lenscorrection_inputs,
+ .outputs = lenscorrection_outputs,
+ .priv_class = &lenscorrection_class,
+ .uninit = uninit,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_libopencv.c b/libavfilter/vf_libopencv.c
index bd3d59b..f55f552 100644
--- a/libavfilter/vf_libopencv.c
+++ b/libavfilter/vf_libopencv.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -166,7 +166,7 @@ static int read_shape_from_file(int *cols, int *rows, int **values, const char *
*rows, *cols);
return AVERROR_INVALIDDATA;
}
- if (!(*values = av_mallocz(sizeof(int) * *rows * *cols)))
+ if (!(*values = av_mallocz_array(sizeof(int) * *rows, *cols)))
return AVERROR(ENOMEM);
/* fill *values */
@@ -259,17 +259,18 @@ static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
const char *buf = args;
int ret;
- dilate->nb_iterations = 1;
-
if (args)
kernel_str = av_get_token(&buf, "|");
- if ((ret = parse_iplconvkernel(&dilate->kernel,
- *kernel_str ? kernel_str : default_kernel_str,
- ctx)) < 0)
+ else
+ kernel_str = av_strdup(default_kernel_str);
+ if (!kernel_str)
+ return AVERROR(ENOMEM);
+ if ((ret = parse_iplconvkernel(&dilate->kernel, kernel_str, ctx)) < 0)
return ret;
av_free(kernel_str);
- sscanf(buf, "|%d", &dilate->nb_iterations);
+ if (!buf || sscanf(buf, "|%d", &dilate->nb_iterations) != 1)
+ dilate->nb_iterations = 1;
av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations);
if (dilate->nb_iterations <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n",
@@ -320,6 +321,10 @@ static av_cold int init(AVFilterContext *ctx)
OCVContext *s = ctx->priv;
int i;
+ if (!s->name) {
+ av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n");
+ return AVERROR(EINVAL);
+ }
for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) {
OCVFilterEntry *entry = &ocv_filter_entries[i];
if (!strcmp(s->name, entry->name)) {
@@ -372,24 +377,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(OCVContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption ocv_options[] = {
{ "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { NULL },
+ { NULL }
};
-static const AVClass ocv_class = {
- .class_name = "ocv",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(ocv);
static const AVFilterPad avfilter_vf_ocv_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -404,17 +404,13 @@ static const AVFilterPad avfilter_vf_ocv_outputs[] = {
};
AVFilter ff_vf_ocv = {
- .name = "ocv",
- .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
-
- .priv_size = sizeof(OCVContext),
- .priv_class = &ocv_class,
-
+ .name = "ocv",
+ .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
+ .priv_size = sizeof(OCVContext),
+ .priv_class = &ocv_class,
.query_formats = query_formats,
- .init = init,
- .uninit = uninit,
-
- .inputs = avfilter_vf_ocv_inputs,
-
- .outputs = avfilter_vf_ocv_outputs,
+ .init = init,
+ .uninit = uninit,
+ .inputs = avfilter_vf_ocv_inputs,
+ .outputs = avfilter_vf_ocv_outputs,
};
diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c
index 9299d40..0b7a2ca 100644
--- a/libavfilter/vf_lut.c
+++ b/libavfilter/vf_lut.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,18 +27,15 @@
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
-#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"w", ///< width of the input video
"h", ///< height of the input video
"val", ///< input value for the pixel
@@ -50,9 +47,6 @@ static const char *const var_names[] = {
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_W,
VAR_H,
VAR_VAL,
@@ -71,7 +65,6 @@ typedef struct LutContext {
int hsub, vsub;
double var_values[VAR_VARS_NB];
int is_rgb, is_yuv;
- int rgba_map[4];
int step;
int negate_alpha; /* only used by negate */
} LutContext;
@@ -85,9 +78,9 @@ typedef struct LutContext {
#define A 3
#define OFFSET(x) offsetof(LutContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption lut_options[] = {
+static const AVOption options[] = {
{ "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
@@ -99,23 +92,9 @@ static const AVOption lut_options[] = {
{ "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
{ "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
- { NULL },
+ { NULL }
};
-static av_cold int init(AVFilterContext *ctx)
-{
- LutContext *s = ctx->priv;
-
- s->var_values[VAR_PHI] = M_PHI;
- s->var_values[VAR_PI] = M_PI;
- s->var_values[VAR_E ] = M_E;
-
- s->is_rgb = !strcmp(ctx->filter->name, "lutrgb");
- s->is_yuv = !strcmp(ctx->filter->name, "lutyuv");
-
- return 0;
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
@@ -131,7 +110,7 @@ static av_cold void uninit(AVFilterContext *ctx)
#define YUV_FORMATS \
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
- AV_PIX_FMT_YUVA420P, \
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
AV_PIX_FMT_YUVJ440P
@@ -182,15 +161,32 @@ static double compute_gammaval(void *opaque, double gamma)
return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
}
+/**
+ * Compute Rec.709 gama correction of value val
+ */
+static double compute_gammaval709(void *opaque, double gamma)
+{
+ LutContext *s = opaque;
+ double val = s->var_values[VAR_CLIPVAL];
+ double minval = s->var_values[VAR_MINVAL];
+ double maxval = s->var_values[VAR_MAXVAL];
+ double level = (val - minval) / (maxval - minval);
+ level = level < 0.018 ? 4.5 * level
+ : 1.099 * pow(level, 1.0 / gamma) - 0.099;
+ return level * (maxval - minval) + minval;
+}
+
static double (* const funcs1[])(void *, double) = {
- clip,
- compute_gammaval,
+ (void *)clip,
+ (void *)compute_gammaval,
+ (void *)compute_gammaval709,
NULL
};
static const char * const funcs1_names[] = {
"clip",
"gammaval",
+ "gammaval709",
NULL
};
@@ -199,8 +195,9 @@ static int config_props(AVFilterLink *inlink)
AVFilterContext *ctx = inlink->dst;
LutContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ uint8_t rgba_map[4]; /* component index -> RGBA color index map */
int min[4], max[4];
- int val, comp, ret;
+ int val, color, ret;
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
@@ -216,6 +213,8 @@ static int config_props(AVFilterLink *inlink)
case AV_PIX_FMT_YUV440P:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUVA420P:
+ case AV_PIX_FMT_YUVA422P:
+ case AV_PIX_FMT_YUVA444P:
min[Y] = min[U] = min[V] = 16;
max[Y] = 235;
max[U] = max[V] = 240;
@@ -231,51 +230,45 @@ static int config_props(AVFilterLink *inlink)
else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;
if (s->is_rgb) {
- switch (inlink->format) {
- case AV_PIX_FMT_ARGB: s->rgba_map[A] = 0; s->rgba_map[R] = 1; s->rgba_map[G] = 2; s->rgba_map[B] = 3; break;
- case AV_PIX_FMT_ABGR: s->rgba_map[A] = 0; s->rgba_map[B] = 1; s->rgba_map[G] = 2; s->rgba_map[R] = 3; break;
- case AV_PIX_FMT_RGBA:
- case AV_PIX_FMT_RGB24: s->rgba_map[R] = 0; s->rgba_map[G] = 1; s->rgba_map[B] = 2; s->rgba_map[A] = 3; break;
- case AV_PIX_FMT_BGRA:
- case AV_PIX_FMT_BGR24: s->rgba_map[B] = 0; s->rgba_map[G] = 1; s->rgba_map[R] = 2; s->rgba_map[A] = 3; break;
- }
+ ff_fill_rgba_map(rgba_map, inlink->format);
s->step = av_get_bits_per_pixel(desc) >> 3;
}
- for (comp = 0; comp < desc->nb_components; comp++) {
+ for (color = 0; color < desc->nb_components; color++) {
double res;
+ int comp = s->is_rgb ? rgba_map[color] : color;
/* create the parsed expression */
- av_expr_free(s->comp_expr[comp]);
- s->comp_expr[comp] = NULL;
- ret = av_expr_parse(&s->comp_expr[comp], s->comp_expr_str[comp],
+ av_expr_free(s->comp_expr[color]);
+ s->comp_expr[color] = NULL;
+ ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color],
var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
- "Error when parsing the expression '%s' for the component %d.\n",
- s->comp_expr_str[comp], comp);
+ "Error when parsing the expression '%s' for the component %d and color %d.\n",
+ s->comp_expr_str[comp], comp, color);
return AVERROR(EINVAL);
}
- /* compute the s */
- s->var_values[VAR_MAXVAL] = max[comp];
- s->var_values[VAR_MINVAL] = min[comp];
+ /* compute the lut */
+ s->var_values[VAR_MAXVAL] = max[color];
+ s->var_values[VAR_MINVAL] = min[color];
for (val = 0; val < 256; val++) {
s->var_values[VAR_VAL] = val;
- s->var_values[VAR_CLIPVAL] = av_clip(val, min[comp], max[comp]);
+ s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
s->var_values[VAR_NEGVAL] =
- av_clip(min[comp] + max[comp] - s->var_values[VAR_VAL],
- min[comp], max[comp]);
+ av_clip(min[color] + max[color] - s->var_values[VAR_VAL],
+ min[color], max[color]);
- res = av_expr_eval(s->comp_expr[comp], s->var_values, s);
+ res = av_expr_eval(s->comp_expr[color], s->var_values, s);
if (isnan(res)) {
av_log(ctx, AV_LOG_ERROR,
- "Error when evaluating the expression '%s' for the value %d for the component #%d.\n",
- s->comp_expr_str[comp], val, comp);
+ "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
+ s->comp_expr_str[color], val, comp);
return AVERROR(EINVAL);
}
- s->lut[comp][val] = av_clip((int)res, min[comp], max[comp]);
+ s->lut[comp][val] = av_clip((int)res, min[color], max[color]);
av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]);
}
}
@@ -290,14 +283,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *inrow, *outrow, *inrow0, *outrow0;
- int i, j, k, plane;
+ int i, j, plane, direct = 0;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
}
- av_frame_copy_props(out, in);
if (s->is_rgb) {
/* packed */
@@ -305,11 +303,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
outrow0 = out->data[0];
for (i = 0; i < in->height; i ++) {
+ int w = inlink->w;
+ const uint8_t (*tab)[256] = (const uint8_t (*)[256])s->lut;
inrow = inrow0;
outrow = outrow0;
- for (j = 0; j < inlink->w; j++) {
- for (k = 0; k < s->step; k++)
- outrow[k] = s->lut[s->rgba_map[k]][inrow[k]];
+ for (j = 0; j < w; j++) {
+ switch (s->step) {
+ case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
+ case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
+ case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
+ default: outrow[0] = tab[0][inrow[0]];
+ }
outrow += s->step;
inrow += s->step;
}
@@ -318,77 +322,116 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
} else {
/* planar */
- for (plane = 0; plane < 4 && in->data[plane]; plane++) {
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int h = FF_CEIL_RSHIFT(inlink->h, vsub);
+ int w = FF_CEIL_RSHIFT(inlink->w, hsub);
inrow = in ->data[plane];
outrow = out->data[plane];
- for (i = 0; i < in->height >> vsub; i ++) {
- for (j = 0; j < inlink->w>>hsub; j++)
- outrow[j] = s->lut[plane][inrow[j]];
+ for (i = 0; i < h; i++) {
+ const uint8_t *tab = s->lut[plane];
+ for (j = 0; j < w; j++)
+ outrow[j] = tab[inrow[j]];
inrow += in ->linesize[plane];
outrow += out->linesize[plane];
}
}
}
- av_frame_free(&in);
+ if (!direct)
+ av_frame_free(&in);
+
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
- { .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame,
- .config_props = config_props,
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
},
- { .name = NULL}
+ { NULL }
};
static const AVFilterPad outputs[] = {
- { .name = "default",
- .type = AVMEDIA_TYPE_VIDEO, },
- { .name = NULL}
+ { .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
};
-#define DEFINE_LUT_FILTER(name_, description_, init_, options) \
- static const AVClass name_ ## _class = { \
- .class_name = #name_, \
- .item_name = av_default_item_name, \
- .option = options, \
- .version = LIBAVUTIL_VERSION_INT, \
- }; \
+
+#define DEFINE_LUT_FILTER(name_, description_) \
AVFilter ff_vf_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(LutContext), \
.priv_class = &name_ ## _class, \
- \
- .init = init_, \
+ .init = name_##_init, \
.uninit = uninit, \
.query_formats = query_formats, \
- \
.inputs = inputs, \
.outputs = outputs, \
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
}
#if CONFIG_LUT_FILTER
-DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.", init, lut_options);
+
+#define lut_options options
+AVFILTER_DEFINE_CLASS(lut);
+
+static int lut_init(AVFilterContext *ctx)
+{
+ return 0;
+}
+
+DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.");
#endif
+
#if CONFIG_LUTYUV_FILTER
-DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.", init, lut_options);
+
+#define lutyuv_options options
+AVFILTER_DEFINE_CLASS(lutyuv);
+
+static av_cold int lutyuv_init(AVFilterContext *ctx)
+{
+ LutContext *s = ctx->priv;
+
+ s->is_yuv = 1;
+
+ return 0;
+}
+
+DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.");
#endif
+
#if CONFIG_LUTRGB_FILTER
-DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.", init, lut_options);
+
+#define lutrgb_options options
+AVFILTER_DEFINE_CLASS(lutrgb);
+
+static av_cold int lutrgb_init(AVFilterContext *ctx)
+{
+ LutContext *s = ctx->priv;
+
+ s->is_rgb = 1;
+
+ return 0;
+}
+
+DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.");
#endif
#if CONFIG_NEGATE_FILTER
static const AVOption negate_options[] = {
- { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, .flags = FLAGS },
- { NULL },
+ { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(negate);
+
static av_cold int negate_init(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
@@ -397,7 +440,7 @@ static av_cold int negate_init(AVFilterContext *ctx)
av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", s->negate_alpha);
for (i = 0; i < 4; i++) {
- s->comp_expr_str[i] = av_strdup((i == 3 && s->negate_alpha) ?
+ s->comp_expr_str[i] = av_strdup((i == 3 && !s->negate_alpha) ?
"val" : "negval");
if (!s->comp_expr_str[i]) {
uninit(ctx);
@@ -405,9 +448,9 @@ static av_cold int negate_init(AVFilterContext *ctx)
}
}
- return init(ctx);
+ return 0;
}
-DEFINE_LUT_FILTER(negate, "Negate input video.", negate_init, negate_options);
+DEFINE_LUT_FILTER(negate, "Negate input video.");
#endif
diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c
new file mode 100644
index 0000000..862ddde
--- /dev/null
+++ b/libavfilter/vf_lut3d.c
@@ -0,0 +1,815 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * 3D Lookup table filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "dualinput.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+enum interp_mode {
+ INTERPOLATE_NEAREST,
+ INTERPOLATE_TRILINEAR,
+ INTERPOLATE_TETRAHEDRAL,
+ NB_INTERP_MODE
+};
+
+struct rgbvec {
+ float r, g, b;
+};
+
+/* 3D LUT don't often go up to level 32, but it is common to have a Hald CLUT
+ * of 512x512 (64x64x64) */
+#define MAX_LEVEL 64
+
+typedef struct LUT3DContext {
+ const AVClass *class;
+ enum interp_mode interpolation;
+ char *file;
+ uint8_t rgba_map[4];
+ int step;
+ avfilter_action_func *interp;
+ struct rgbvec lut[MAX_LEVEL][MAX_LEVEL][MAX_LEVEL];
+ int lutsize;
+#if CONFIG_HALDCLUT_FILTER
+ uint8_t clut_rgba_map[4];
+ int clut_step;
+ int clut_is16bit;
+ int clut_width;
+ FFDualInputContext dinput;
+#endif
+} LUT3DContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+#define OFFSET(x) offsetof(LUT3DContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define COMMON_OPTIONS \
+ { "interp", "select interpolation mode", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=INTERPOLATE_TETRAHEDRAL}, 0, NB_INTERP_MODE-1, FLAGS, "interp_mode" }, \
+ { "nearest", "use values from the nearest defined points", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_NEAREST}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+ { "trilinear", "interpolate values using the 8 points defining a cube", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TRILINEAR}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+ { "tetrahedral", "interpolate values using a tetrahedron", 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATE_TETRAHEDRAL}, INT_MIN, INT_MAX, FLAGS, "interp_mode" }, \
+ { NULL }
+
+static inline float lerpf(float v0, float v1, float f)
+{
+ return v0 + (v1 - v0) * f;
+}
+
+static inline struct rgbvec lerp(const struct rgbvec *v0, const struct rgbvec *v1, float f)
+{
+ struct rgbvec v = {
+ lerpf(v0->r, v1->r, f), lerpf(v0->g, v1->g, f), lerpf(v0->b, v1->b, f)
+ };
+ return v;
+}
+
+#define NEAR(x) ((int)((x) + .5))
+#define PREV(x) ((int)(x))
+#define NEXT(x) (FFMIN((int)(x) + 1, lut3d->lutsize - 1))
+
+/**
+ * Get the nearest defined point
+ */
+static inline struct rgbvec interp_nearest(const LUT3DContext *lut3d,
+ const struct rgbvec *s)
+{
+ return lut3d->lut[NEAR(s->r)][NEAR(s->g)][NEAR(s->b)];
+}
+
+/**
+ * Interpolate using the 8 vertices of a cube
+ * @see https://en.wikipedia.org/wiki/Trilinear_interpolation
+ */
+static inline struct rgbvec interp_trilinear(const LUT3DContext *lut3d,
+ const struct rgbvec *s)
+{
+ const int prev[] = {PREV(s->r), PREV(s->g), PREV(s->b)};
+ const int next[] = {NEXT(s->r), NEXT(s->g), NEXT(s->b)};
+ const struct rgbvec d = {s->r - prev[0], s->g - prev[1], s->b - prev[2]};
+ const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
+ const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
+ const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
+ const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
+ const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
+ const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
+ const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
+ const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
+ const struct rgbvec c00 = lerp(&c000, &c100, d.r);
+ const struct rgbvec c10 = lerp(&c010, &c110, d.r);
+ const struct rgbvec c01 = lerp(&c001, &c101, d.r);
+ const struct rgbvec c11 = lerp(&c011, &c111, d.r);
+ const struct rgbvec c0 = lerp(&c00, &c10, d.g);
+ const struct rgbvec c1 = lerp(&c01, &c11, d.g);
+ const struct rgbvec c = lerp(&c0, &c1, d.b);
+ return c;
+}
+
+/**
+ * Tetrahedral interpolation. Based on code found in Truelight Software Library paper.
+ * @see http://www.filmlight.ltd.uk/pdf/whitepapers/FL-TL-TN-0057-SoftwareLib.pdf
+ */
+static inline struct rgbvec interp_tetrahedral(const LUT3DContext *lut3d,
+ const struct rgbvec *s)
+{
+ const int prev[] = {PREV(s->r), PREV(s->g), PREV(s->b)};
+ const int next[] = {NEXT(s->r), NEXT(s->g), NEXT(s->b)};
+ const struct rgbvec d = {s->r - prev[0], s->g - prev[1], s->b - prev[2]};
+ const struct rgbvec c000 = lut3d->lut[prev[0]][prev[1]][prev[2]];
+ const struct rgbvec c111 = lut3d->lut[next[0]][next[1]][next[2]];
+ struct rgbvec c;
+ if (d.r > d.g) {
+ if (d.g > d.b) {
+ const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
+ const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
+ c.r = (1-d.r) * c000.r + (d.r-d.g) * c100.r + (d.g-d.b) * c110.r + (d.b) * c111.r;
+ c.g = (1-d.r) * c000.g + (d.r-d.g) * c100.g + (d.g-d.b) * c110.g + (d.b) * c111.g;
+ c.b = (1-d.r) * c000.b + (d.r-d.g) * c100.b + (d.g-d.b) * c110.b + (d.b) * c111.b;
+ } else if (d.r > d.b) {
+ const struct rgbvec c100 = lut3d->lut[next[0]][prev[1]][prev[2]];
+ const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
+ c.r = (1-d.r) * c000.r + (d.r-d.b) * c100.r + (d.b-d.g) * c101.r + (d.g) * c111.r;
+ c.g = (1-d.r) * c000.g + (d.r-d.b) * c100.g + (d.b-d.g) * c101.g + (d.g) * c111.g;
+ c.b = (1-d.r) * c000.b + (d.r-d.b) * c100.b + (d.b-d.g) * c101.b + (d.g) * c111.b;
+ } else {
+ const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
+ const struct rgbvec c101 = lut3d->lut[next[0]][prev[1]][next[2]];
+ c.r = (1-d.b) * c000.r + (d.b-d.r) * c001.r + (d.r-d.g) * c101.r + (d.g) * c111.r;
+ c.g = (1-d.b) * c000.g + (d.b-d.r) * c001.g + (d.r-d.g) * c101.g + (d.g) * c111.g;
+ c.b = (1-d.b) * c000.b + (d.b-d.r) * c001.b + (d.r-d.g) * c101.b + (d.g) * c111.b;
+ }
+ } else {
+ if (d.b > d.g) {
+ const struct rgbvec c001 = lut3d->lut[prev[0]][prev[1]][next[2]];
+ const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
+ c.r = (1-d.b) * c000.r + (d.b-d.g) * c001.r + (d.g-d.r) * c011.r + (d.r) * c111.r;
+ c.g = (1-d.b) * c000.g + (d.b-d.g) * c001.g + (d.g-d.r) * c011.g + (d.r) * c111.g;
+ c.b = (1-d.b) * c000.b + (d.b-d.g) * c001.b + (d.g-d.r) * c011.b + (d.r) * c111.b;
+ } else if (d.b > d.r) {
+ const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
+ const struct rgbvec c011 = lut3d->lut[prev[0]][next[1]][next[2]];
+ c.r = (1-d.g) * c000.r + (d.g-d.b) * c010.r + (d.b-d.r) * c011.r + (d.r) * c111.r;
+ c.g = (1-d.g) * c000.g + (d.g-d.b) * c010.g + (d.b-d.r) * c011.g + (d.r) * c111.g;
+ c.b = (1-d.g) * c000.b + (d.g-d.b) * c010.b + (d.b-d.r) * c011.b + (d.r) * c111.b;
+ } else {
+ const struct rgbvec c010 = lut3d->lut[prev[0]][next[1]][prev[2]];
+ const struct rgbvec c110 = lut3d->lut[next[0]][next[1]][prev[2]];
+ c.r = (1-d.g) * c000.r + (d.g-d.r) * c010.r + (d.r-d.b) * c110.r + (d.b) * c111.r;
+ c.g = (1-d.g) * c000.g + (d.g-d.r) * c010.g + (d.r-d.b) * c110.g + (d.b) * c111.g;
+ c.b = (1-d.g) * c000.b + (d.g-d.r) * c010.b + (d.r-d.b) * c110.b + (d.b) * c111.b;
+ }
+ }
+ return c;
+}
+
+#define DEFINE_INTERP_FUNC(name, nbits) \
+static int interp_##nbits##_##name(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
+{ \
+ int x, y; \
+ const LUT3DContext *lut3d = ctx->priv; \
+ const ThreadData *td = arg; \
+ const AVFrame *in = td->in; \
+ const AVFrame *out = td->out; \
+ const int direct = out == in; \
+ const int step = lut3d->step; \
+ const uint8_t r = lut3d->rgba_map[R]; \
+ const uint8_t g = lut3d->rgba_map[G]; \
+ const uint8_t b = lut3d->rgba_map[B]; \
+ const uint8_t a = lut3d->rgba_map[A]; \
+ const int slice_start = (in->height * jobnr ) / nb_jobs; \
+ const int slice_end = (in->height * (jobnr+1)) / nb_jobs; \
+ uint8_t *dstrow = out->data[0] + slice_start * out->linesize[0]; \
+ const uint8_t *srcrow = in ->data[0] + slice_start * in ->linesize[0]; \
+ const float scale = (1. / ((1<<nbits) - 1)) * (lut3d->lutsize - 1); \
+ \
+ for (y = slice_start; y < slice_end; y++) { \
+ uint##nbits##_t *dst = (uint##nbits##_t *)dstrow; \
+ const uint##nbits##_t *src = (const uint##nbits##_t *)srcrow; \
+ for (x = 0; x < in->width * step; x += step) { \
+ const struct rgbvec scaled_rgb = {src[x + r] * scale, \
+ src[x + g] * scale, \
+ src[x + b] * scale}; \
+ struct rgbvec vec = interp_##name(lut3d, &scaled_rgb); \
+ dst[x + r] = av_clip_uint##nbits(vec.r * (float)((1<<nbits) - 1)); \
+ dst[x + g] = av_clip_uint##nbits(vec.g * (float)((1<<nbits) - 1)); \
+ dst[x + b] = av_clip_uint##nbits(vec.b * (float)((1<<nbits) - 1)); \
+ if (!direct && step == 4) \
+ dst[x + a] = src[x + a]; \
+ } \
+ dstrow += out->linesize[0]; \
+ srcrow += in ->linesize[0]; \
+ } \
+ return 0; \
+}
+
+DEFINE_INTERP_FUNC(nearest, 8)
+DEFINE_INTERP_FUNC(trilinear, 8)
+DEFINE_INTERP_FUNC(tetrahedral, 8)
+
+DEFINE_INTERP_FUNC(nearest, 16)
+DEFINE_INTERP_FUNC(trilinear, 16)
+DEFINE_INTERP_FUNC(tetrahedral, 16)
+
+#define MAX_LINE_SIZE 512
+
+static int skip_line(const char *p)
+{
+ while (*p && av_isspace(*p))
+ p++;
+ return !*p || *p == '#';
+}
+
+#define NEXT_LINE(loop_cond) do { \
+ if (!fgets(line, sizeof(line), f)) { \
+ av_log(ctx, AV_LOG_ERROR, "Unexpected EOF\n"); \
+ return AVERROR_INVALIDDATA; \
+ } \
+} while (loop_cond)
+
+/* Basically r g and b float values on each line, with a facultative 3DLUTSIZE
+ * directive; seems to be generated by Davinci */
+static int parse_dat(AVFilterContext *ctx, FILE *f)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ char line[MAX_LINE_SIZE];
+ int i, j, k, size;
+
+ lut3d->lutsize = size = 33;
+
+ NEXT_LINE(skip_line(line));
+ if (!strncmp(line, "3DLUTSIZE ", 10)) {
+ size = strtol(line + 10, NULL, 0);
+ if (size < 2 || size > MAX_LEVEL) {
+ av_log(ctx, AV_LOG_ERROR, "Too large or invalid 3D LUT size\n");
+ return AVERROR(EINVAL);
+ }
+ lut3d->lutsize = size;
+ NEXT_LINE(skip_line(line));
+ }
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+ if (k != 0 || j != 0 || i != 0)
+ NEXT_LINE(skip_line(line));
+ if (sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b) != 3)
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Iridas format */
+static int parse_cube(AVFilterContext *ctx, FILE *f)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ char line[MAX_LINE_SIZE];
+ float min[3] = {0.0, 0.0, 0.0};
+ float max[3] = {1.0, 1.0, 1.0};
+
+ while (fgets(line, sizeof(line), f)) {
+ if (!strncmp(line, "LUT_3D_SIZE ", 12)) {
+ int i, j, k;
+ const int size = strtol(line + 12, NULL, 0);
+
+ if (size < 2 || size > MAX_LEVEL) {
+ av_log(ctx, AV_LOG_ERROR, "Too large or invalid 3D LUT size\n");
+ return AVERROR(EINVAL);
+ }
+ lut3d->lutsize = size;
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[i][j][k];
+
+ do {
+ NEXT_LINE(0);
+ if (!strncmp(line, "DOMAIN_", 7)) {
+ float *vals = NULL;
+ if (!strncmp(line + 7, "MIN ", 4)) vals = min;
+ else if (!strncmp(line + 7, "MAX ", 4)) vals = max;
+ if (!vals)
+ return AVERROR_INVALIDDATA;
+ sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2);
+ av_log(ctx, AV_LOG_DEBUG, "min: %f %f %f | max: %f %f %f\n",
+ min[0], min[1], min[2], max[0], max[1], max[2]);
+ continue;
+ }
+ } while (skip_line(line));
+ if (sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b) != 3)
+ return AVERROR_INVALIDDATA;
+ vec->r *= max[0] - min[0];
+ vec->g *= max[1] - min[1];
+ vec->b *= max[2] - min[2];
+ }
+ }
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Assume 17x17x17 LUT with a 16-bit depth
+ * FIXME: it seems there are various 3dl formats */
+static int parse_3dl(AVFilterContext *ctx, FILE *f)
+{
+ char line[MAX_LINE_SIZE];
+ LUT3DContext *lut3d = ctx->priv;
+ int i, j, k;
+ const int size = 17;
+ const float scale = 16*16*16;
+
+ lut3d->lutsize = size;
+ NEXT_LINE(skip_line(line));
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ int r, g, b;
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+
+ NEXT_LINE(skip_line(line));
+ if (sscanf(line, "%d %d %d", &r, &g, &b) != 3)
+ return AVERROR_INVALIDDATA;
+ vec->r = r / scale;
+ vec->g = g / scale;
+ vec->b = b / scale;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Pandora format */
+static int parse_m3d(AVFilterContext *ctx, FILE *f)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ float scale;
+ int i, j, k, size, in = -1, out = -1;
+ char line[MAX_LINE_SIZE];
+ uint8_t rgb_map[3] = {0, 1, 2};
+
+ while (fgets(line, sizeof(line), f)) {
+ if (!strncmp(line, "in", 2)) in = strtol(line + 2, NULL, 0);
+ else if (!strncmp(line, "out", 3)) out = strtol(line + 3, NULL, 0);
+ else if (!strncmp(line, "values", 6)) {
+ const char *p = line + 6;
+#define SET_COLOR(id) do { \
+ while (av_isspace(*p)) \
+ p++; \
+ switch (*p) { \
+ case 'r': rgb_map[id] = 0; break; \
+ case 'g': rgb_map[id] = 1; break; \
+ case 'b': rgb_map[id] = 2; break; \
+ } \
+ while (*p && !av_isspace(*p)) \
+ p++; \
+} while (0)
+ SET_COLOR(0);
+ SET_COLOR(1);
+ SET_COLOR(2);
+ break;
+ }
+ }
+
+ if (in == -1 || out == -1) {
+ av_log(ctx, AV_LOG_ERROR, "in and out must be defined\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (in < 2 || out < 2 ||
+ in > MAX_LEVEL*MAX_LEVEL*MAX_LEVEL ||
+ out > MAX_LEVEL*MAX_LEVEL*MAX_LEVEL) {
+ av_log(ctx, AV_LOG_ERROR, "invalid in (%d) or out (%d)\n", in, out);
+ return AVERROR_INVALIDDATA;
+ }
+ for (size = 1; size*size*size < in; size++);
+ lut3d->lutsize = size;
+ scale = 1. / (out - 1);
+
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+ float val[3];
+
+ NEXT_LINE(0);
+ if (sscanf(line, "%f %f %f", val, val + 1, val + 2) != 3)
+ return AVERROR_INVALIDDATA;
+ vec->r = val[rgb_map[0]] * scale;
+ vec->g = val[rgb_map[1]] * scale;
+ vec->b = val[rgb_map[2]] * scale;
+ }
+ }
+ }
+ return 0;
+}
+
+static void set_identity_matrix(LUT3DContext *lut3d, int size)
+{
+ int i, j, k;
+ const float c = 1. / (size - 1);
+
+ lut3d->lutsize = size;
+ for (k = 0; k < size; k++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < size; i++) {
+ struct rgbvec *vec = &lut3d->lut[k][j][i];
+ vec->r = k * c;
+ vec->g = j * c;
+ vec->b = i * c;
+ }
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int is16bit = 0;
+ LUT3DContext *lut3d = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ is16bit = 1;
+ }
+
+ ff_fill_rgba_map(lut3d->rgba_map, inlink->format);
+ lut3d->step = av_get_padded_bits_per_pixel(desc) >> (3 + is16bit);
+
+#define SET_FUNC(name) do { \
+ if (is16bit) lut3d->interp = interp_16_##name; \
+ else lut3d->interp = interp_8_##name; \
+} while (0)
+
+ switch (lut3d->interpolation) {
+ case INTERPOLATE_NEAREST: SET_FUNC(nearest); break;
+ case INTERPOLATE_TRILINEAR: SET_FUNC(trilinear); break;
+ case INTERPOLATE_TETRAHEDRAL: SET_FUNC(tetrahedral); break;
+ default:
+ av_assert0(0);
+ }
+
+ return 0;
+}
+
+static AVFrame *apply_lut(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ LUT3DContext *lut3d = ctx->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out;
+ ThreadData td;
+
+ if (av_frame_is_writable(in)) {
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return NULL;
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ td.in = in;
+ td.out = out;
+ ctx->internal->execute(ctx, lut3d->interp, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
+
+ if (out != in)
+ av_frame_free(&in);
+
+ return out;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *out = apply_lut(inlink, in);
+ if (!out)
+ return AVERROR(ENOMEM);
+ return ff_filter_frame(outlink, out);
+}
+
+#if CONFIG_LUT3D_FILTER
+static const AVOption lut3d_options[] = {
+ { "file", "set 3D LUT file name", OFFSET(file), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ COMMON_OPTIONS
+};
+
+AVFILTER_DEFINE_CLASS(lut3d);
+
+static av_cold int lut3d_init(AVFilterContext *ctx)
+{
+ int ret;
+ FILE *f;
+ const char *ext;
+ LUT3DContext *lut3d = ctx->priv;
+
+ if (!lut3d->file) {
+ set_identity_matrix(lut3d, 32);
+ return 0;
+ }
+
+ f = fopen(lut3d->file, "r");
+ if (!f) {
+ ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "%s: %s\n", lut3d->file, av_err2str(ret));
+ return ret;
+ }
+
+ ext = strrchr(lut3d->file, '.');
+ if (!ext) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to guess the format from the extension\n");
+ ret = AVERROR_INVALIDDATA;
+ goto end;
+ }
+ ext++;
+
+ if (!av_strcasecmp(ext, "dat")) {
+ ret = parse_dat(ctx, f);
+ } else if (!av_strcasecmp(ext, "3dl")) {
+ ret = parse_3dl(ctx, f);
+ } else if (!av_strcasecmp(ext, "cube")) {
+ ret = parse_cube(ctx, f);
+ } else if (!av_strcasecmp(ext, "m3d")) {
+ ret = parse_m3d(ctx, f);
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Unrecognized '.%s' file type\n", ext);
+ ret = AVERROR(EINVAL);
+ }
+
+ if (!ret && !lut3d->lutsize) {
+ av_log(ctx, AV_LOG_ERROR, "3D LUT is empty\n");
+ ret = AVERROR_INVALIDDATA;
+ }
+
+end:
+ fclose(f);
+ return ret;
+}
+
+static const AVFilterPad lut3d_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad lut3d_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_lut3d = {
+ .name = "lut3d",
+ .description = NULL_IF_CONFIG_SMALL("Adjust colors using a 3D LUT."),
+ .priv_size = sizeof(LUT3DContext),
+ .init = lut3d_init,
+ .query_formats = query_formats,
+ .inputs = lut3d_inputs,
+ .outputs = lut3d_outputs,
+ .priv_class = &lut3d_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
+#endif
+
+#if CONFIG_HALDCLUT_FILTER
+
+static void update_clut(LUT3DContext *lut3d, const AVFrame *frame)
+{
+ const uint8_t *data = frame->data[0];
+ const int linesize = frame->linesize[0];
+ const int w = lut3d->clut_width;
+ const int step = lut3d->clut_step;
+ const uint8_t *rgba_map = lut3d->clut_rgba_map;
+ const int level = lut3d->lutsize;
+
+#define LOAD_CLUT(nbits) do { \
+ int i, j, k, x = 0, y = 0; \
+ \
+ for (k = 0; k < level; k++) { \
+ for (j = 0; j < level; j++) { \
+ for (i = 0; i < level; i++) { \
+ const uint##nbits##_t *src = (const uint##nbits##_t *) \
+ (data + y*linesize + x*step); \
+ struct rgbvec *vec = &lut3d->lut[i][j][k]; \
+ vec->r = src[rgba_map[0]] / (float)((1<<(nbits)) - 1); \
+ vec->g = src[rgba_map[1]] / (float)((1<<(nbits)) - 1); \
+ vec->b = src[rgba_map[2]] / (float)((1<<(nbits)) - 1); \
+ if (++x == w) { \
+ x = 0; \
+ y++; \
+ } \
+ } \
+ } \
+ } \
+} while (0)
+
+ if (!lut3d->clut_is16bit) LOAD_CLUT(8);
+ else LOAD_CLUT(16);
+}
+
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ LUT3DContext *lut3d = ctx->priv;
+ int ret;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ if ((ret = ff_dualinput_init(ctx, &lut3d->dinput)) < 0)
+ return ret;
+ return 0;
+}
+
+static int filter_frame_hald(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ LUT3DContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ LUT3DContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static int config_clut(AVFilterLink *inlink)
+{
+ int size, level, w, h;
+ AVFilterContext *ctx = inlink->dst;
+ LUT3DContext *lut3d = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ lut3d->clut_is16bit = 0;
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ lut3d->clut_is16bit = 1;
+ }
+
+ lut3d->clut_step = av_get_padded_bits_per_pixel(desc) >> 3;
+ ff_fill_rgba_map(lut3d->clut_rgba_map, inlink->format);
+
+ if (inlink->w > inlink->h)
+ av_log(ctx, AV_LOG_INFO, "Padding on the right (%dpx) of the "
+ "Hald CLUT will be ignored\n", inlink->w - inlink->h);
+ else if (inlink->w < inlink->h)
+ av_log(ctx, AV_LOG_INFO, "Padding at the bottom (%dpx) of the "
+ "Hald CLUT will be ignored\n", inlink->h - inlink->w);
+ lut3d->clut_width = w = h = FFMIN(inlink->w, inlink->h);
+
+ for (level = 1; level*level*level < w; level++);
+ size = level*level*level;
+ if (size != w) {
+ av_log(ctx, AV_LOG_WARNING, "The Hald CLUT width does not match the level\n");
+ return AVERROR_INVALIDDATA;
+ }
+ av_assert0(w == h && w == size);
+ level *= level;
+ if (level > MAX_LEVEL) {
+ const int max_clut_level = sqrt(MAX_LEVEL);
+ const int max_clut_size = max_clut_level*max_clut_level*max_clut_level;
+ av_log(ctx, AV_LOG_ERROR, "Too large Hald CLUT "
+ "(maximum level is %d, or %dx%d CLUT)\n",
+ max_clut_level, max_clut_size, max_clut_size);
+ return AVERROR(EINVAL);
+ }
+ lut3d->lutsize = level;
+
+ return 0;
+}
+
+static AVFrame *update_apply_clut(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *second)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ update_clut(ctx->priv, second);
+ return apply_lut(inlink, main);
+}
+
+static av_cold int haldclut_init(AVFilterContext *ctx)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ lut3d->dinput.process = update_apply_clut;
+ return 0;
+}
+
+static av_cold void haldclut_uninit(AVFilterContext *ctx)
+{
+ LUT3DContext *lut3d = ctx->priv;
+ ff_dualinput_uninit(&lut3d->dinput);
+}
+
+static const AVOption haldclut_options[] = {
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { "repeatlast", "continue applying the last clut after eos", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
+ COMMON_OPTIONS
+};
+
+AVFILTER_DEFINE_CLASS(haldclut);
+
+static const AVFilterPad haldclut_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_hald,
+ .config_props = config_input,
+ },{
+ .name = "clut",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame_hald,
+ .config_props = config_clut,
+ },
+ { NULL }
+};
+
+static const AVFilterPad haldclut_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_haldclut = {
+ .name = "haldclut",
+ .description = NULL_IF_CONFIG_SMALL("Adjust colors using a Hald CLUT."),
+ .priv_size = sizeof(LUT3DContext),
+ .init = haldclut_init,
+ .uninit = haldclut_uninit,
+ .query_formats = query_formats,
+ .inputs = haldclut_inputs,
+ .outputs = haldclut_outputs,
+ .priv_class = &haldclut_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
+};
+#endif
diff --git a/libavfilter/vf_mcdeint.c b/libavfilter/vf_mcdeint.c
new file mode 100644
index 0000000..2aa2e27
--- /dev/null
+++ b/libavfilter/vf_mcdeint.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Motion Compensation Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_mcdeint.c.
+ *
+ * Known Issues:
+ *
+ * The motion estimation is somewhat at the mercy of the input, if the
+ * input frames are created purely based on spatial interpolation then
+ * for example a thin black line or another random and not
+ * interpolateable pattern will cause problems.
+ * Note: completely ignoring the "unavailable" lines during motion
+ * estimation did not look any better, so the most obvious solution
+ * would be to improve tfields or penalize problematic motion vectors.
+ *
+ * If non iterative ME is used then snow currently ignores the OBMC
+ * window and as a result sometimes creates artifacts.
+ *
+ * Only past frames are used, we should ideally use future frames too,
+ * something like filtering the whole movie in forward and then
+ * backward direction seems like a interesting idea but the current
+ * filter framework is FAR from supporting such things.
+ *
+ * Combining the motion compensated image with the input image also is
+ * not as trivial as it seems, simple blindly taking even lines from
+ * one and odd ones from the other does not work at all as ME/MC
+ * sometimes has nothing in the previous frames which matches the
+ * current. The current algorithm has been found by trial and error
+ * and almost certainly can be improved...
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/avcodec.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+enum MCDeintMode {
+ MODE_FAST = 0,
+ MODE_MEDIUM,
+ MODE_SLOW,
+ MODE_EXTRA_SLOW,
+ MODE_NB,
+};
+
+enum MCDeintParity {
+ PARITY_TFF = 0, ///< top field first
+ PARITY_BFF = 1, ///< bottom field first
+};
+
+typedef struct {
+ const AVClass *class;
+ enum MCDeintMode mode;
+ enum MCDeintParity parity;
+ int qp;
+ AVCodecContext *enc_ctx;
+} MCDeintContext;
+
+#define OFFSET(x) offsetof(MCDeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption mcdeint_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_FAST}, 0, MODE_NB-1, FLAGS, .unit="mode" },
+ CONST("fast", NULL, MODE_FAST, "mode"),
+ CONST("medium", NULL, MODE_MEDIUM, "mode"),
+ CONST("slow", NULL, MODE_SLOW, "mode"),
+ CONST("extra_slow", NULL, MODE_EXTRA_SLOW, "mode"),
+
+ { "parity", "set the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=PARITY_BFF}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", PARITY_BFF, "parity"),
+
+ { "qp", "set qp", OFFSET(qp), AV_OPT_TYPE_INT, {.i64=1}, INT_MIN, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mcdeint);
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MCDeintContext *mcdeint = ctx->priv;
+ AVCodec *enc;
+ AVCodecContext *enc_ctx;
+ AVDictionary *opts = NULL;
+ int ret;
+
+ if (!(enc = avcodec_find_encoder(AV_CODEC_ID_SNOW))) {
+ av_log(ctx, AV_LOG_ERROR, "Snow encoder is not enabled in libavcodec\n");
+ return AVERROR(EINVAL);
+ }
+
+ mcdeint->enc_ctx = avcodec_alloc_context3(enc);
+ if (!mcdeint->enc_ctx)
+ return AVERROR(ENOMEM);
+ enc_ctx = mcdeint->enc_ctx;
+ enc_ctx->width = inlink->w;
+ enc_ctx->height = inlink->h;
+ enc_ctx->time_base = (AVRational){1,25}; // meaningless
+ enc_ctx->gop_size = 300;
+ enc_ctx->max_b_frames = 0;
+ enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
+ enc_ctx->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ enc_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ enc_ctx->global_quality = 1;
+ enc_ctx->me_cmp = enc_ctx->me_sub_cmp = FF_CMP_SAD;
+ enc_ctx->mb_cmp = FF_CMP_SSE;
+ av_dict_set(&opts, "memc_only", "1", 0);
+
+ switch (mcdeint->mode) {
+ case MODE_EXTRA_SLOW:
+ enc_ctx->refs = 3;
+ case MODE_SLOW:
+ enc_ctx->me_method = ME_ITER;
+ case MODE_MEDIUM:
+ enc_ctx->flags |= CODEC_FLAG_4MV;
+ enc_ctx->dia_size = 2;
+ case MODE_FAST:
+ enc_ctx->flags |= CODEC_FLAG_QPEL;
+ }
+
+ ret = avcodec_open2(enc_ctx, enc, &opts);
+ av_dict_free(&opts);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MCDeintContext *mcdeint = ctx->priv;
+
+ if (mcdeint->enc_ctx) {
+ avcodec_close(mcdeint->enc_ctx);
+ av_freep(&mcdeint->enc_ctx);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ MCDeintContext *mcdeint = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic, *frame_dec;
+ AVPacket pkt;
+ int x, y, i, ret, got_frame = 0;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ inpic->quality = mcdeint->qp * FF_QP2LAMBDA;
+
+ av_init_packet(&pkt);
+ pkt.data = NULL; // packet data will be allocated by the encoder
+ pkt.size = 0;
+
+ ret = avcodec_encode_video2(mcdeint->enc_ctx, &pkt, inpic, &got_frame);
+ if (ret < 0)
+ goto end;
+
+ frame_dec = mcdeint->enc_ctx->coded_frame;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = FF_CEIL_RSHIFT(inlink->w, is_chroma);
+ int h = FF_CEIL_RSHIFT(inlink->h, is_chroma);
+ int fils = frame_dec->linesize[i];
+ int srcs = inpic ->linesize[i];
+ int dsts = outpic ->linesize[i];
+
+ for (y = 0; y < h; y++) {
+ if ((y ^ mcdeint->parity) & 1) {
+ for (x = 0; x < w; x++) {
+ uint8_t *filp = &frame_dec->data[i][x + y*fils];
+ uint8_t *srcp = &inpic ->data[i][x + y*srcs];
+ uint8_t *dstp = &outpic ->data[i][x + y*dsts];
+
+ if (y > 0 && y < h-1){
+ int is_edge = x < 3 || x > w-4;
+ int diff0 = filp[-fils] - srcp[-srcs];
+ int diff1 = filp[+fils] - srcp[+srcs];
+ int temp = filp[0];
+
+#define DELTA(j) av_clip(j, -x, w-1-x)
+
+#define GET_SCORE_EDGE(j)\
+ FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
+ FFABS(srcp[-srcs+DELTA(j) ] - srcp[+srcs+DELTA( -(j))])+\
+ FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
+
+#define GET_SCORE(j)\
+ FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
+ FFABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])+\
+ FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
+
+#define CHECK_EDGE(j)\
+ { int score = GET_SCORE_EDGE(j);\
+ if (score < spatial_score){\
+ spatial_score = score;\
+ diff0 = filp[-fils+DELTA(j)] - srcp[-srcs+DELTA(j)];\
+ diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
+
+#define CHECK(j)\
+ { int score = GET_SCORE(j);\
+ if (score < spatial_score){\
+ spatial_score= score;\
+ diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
+ diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
+
+ if (is_edge) {
+ int spatial_score = GET_SCORE_EDGE(0) - 1;
+ CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
+ CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
+ } else {
+ int spatial_score = GET_SCORE(0) - 1;
+ CHECK(-1) CHECK(-2) }} }}
+ CHECK( 1) CHECK( 2) }} }}
+ }
+
+
+ if (diff0 + diff1 > 0)
+ temp -= (diff0 + diff1 - FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ else
+ temp -= (diff0 + diff1 + FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ *filp = *dstp = temp > 255U ? ~(temp>>31) : temp;
+ } else {
+ *dstp = *filp;
+ }
+ }
+ }
+ }
+
+ for (y = 0; y < h; y++) {
+ if (!((y ^ mcdeint->parity) & 1)) {
+ for (x = 0; x < w; x++) {
+ frame_dec->data[i][x + y*fils] =
+ outpic ->data[i][x + y*dsts] = inpic->data[i][x + y*srcs];
+ }
+ }
+ }
+ }
+ mcdeint->parity ^= 1;
+
+end:
+ av_free_packet(&pkt);
+ av_frame_free(&inpic);
+ if (ret < 0) {
+ av_frame_free(&outpic);
+ return ret;
+ }
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad mcdeint_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mcdeint_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mcdeint = {
+ .name = "mcdeint",
+ .description = NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
+ .priv_size = sizeof(MCDeintContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = mcdeint_inputs,
+ .outputs = mcdeint_outputs,
+ .priv_class = &mcdeint_class,
+};
diff --git a/libavfilter/vf_mergeplanes.c b/libavfilter/vf_mergeplanes.c
new file mode 100644
index 0000000..c76e82a
--- /dev/null
+++ b/libavfilter/vf_mergeplanes.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "framesync.h"
+
+typedef struct InputParam {
+ int depth[4];
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+} InputParam;
+
+typedef struct MergePlanesContext {
+ const AVClass *class;
+ int64_t mapping;
+ const enum AVPixelFormat out_fmt;
+ int nb_inputs;
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+ int map[4][2];
+ const AVPixFmtDescriptor *outdesc;
+
+ FFFrameSync fs;
+ FFFrameSyncIn fsin[3]; /* must be immediately after fs */
+} MergePlanesContext;
+
+#define OFFSET(x) offsetof(MergePlanesContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mergeplanes_options[] = {
+ { "mapping", "set input to output plane mapping", OFFSET(mapping), AV_OPT_TYPE_INT, {.i64=0}, 0, 0x33333333, FLAGS },
+ { "format", "set output pixel format", OFFSET(out_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_YUVA444P}, 0, INT_MAX, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mergeplanes);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ MergePlanesContext *s = inlink->dst->priv;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int64_t m = s->mapping;
+ int i, ret;
+
+ s->outdesc = av_pix_fmt_desc_get(s->out_fmt);
+ if (!(s->outdesc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
+ s->outdesc->nb_components < 2) {
+ av_log(ctx, AV_LOG_ERROR, "Only planar formats with more than one component are supported.\n");
+ return AVERROR(EINVAL);
+ }
+ s->nb_planes = av_pix_fmt_count_planes(s->out_fmt);
+
+ for (i = s->nb_planes - 1; i >= 0; i--) {
+ s->map[i][0] = m & 0xf;
+ m >>= 4;
+ s->map[i][1] = m & 0xf;
+ m >>= 4;
+
+ if (s->map[i][0] > 3 || s->map[i][1] > 3) {
+ av_log(ctx, AV_LOG_ERROR, "Mapping with out of range input and/or plane number.\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->nb_inputs = FFMAX(s->nb_inputs, s->map[i][1] + 1);
+ }
+
+ av_assert0(s->nb_inputs && s->nb_inputs <= 4);
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad pad = { 0 };
+
+ pad.type = AVMEDIA_TYPE_VIDEO;
+ pad.name = av_asprintf("in%d", i);
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+ pad.filter_frame = filter_frame;
+
+ if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0){
+ av_freep(&pad.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ AVFilterFormats *formats = NULL;
+ int i;
+
+ s->outdesc = av_pix_fmt_desc_get(s->out_fmt);
+ for (i = 0; av_pix_fmt_desc_get(i); i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (desc->comp[0].depth_minus1 == s->outdesc->comp[0].depth_minus1 &&
+ av_pix_fmt_count_planes(i) == desc->nb_components)
+ ff_add_format(&formats, i);
+ }
+
+ for (i = 0; i < s->nb_inputs; i++)
+ ff_formats_ref(formats, &ctx->inputs[i]->out_formats);
+
+ formats = NULL;
+ ff_add_format(&formats, s->out_fmt);
+ ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
+
+ return 0;
+}
+
+static int process_frame(FFFrameSync *fs)
+{
+ AVFilterContext *ctx = fs->parent;
+ AVFilterLink *outlink = ctx->outputs[0];
+ MergePlanesContext *s = fs->opaque;
+ AVFrame *in[4] = { NULL };
+ AVFrame *out;
+ int i, ret;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
+ return ret;
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
+
+ for (i = 0; i < s->nb_planes; i++) {
+ const int input = s->map[i][1];
+ const int plane = s->map[i][0];
+
+ av_image_copy_plane(out->data[i], out->linesize[i],
+ in[input]->data[plane], in[input]->linesize[plane],
+ s->planewidth[i], s->planeheight[i]);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MergePlanesContext *s = ctx->priv;
+ InputParam inputsp[4];
+ FFFrameSyncIn *in;
+ int i;
+
+ ff_framesync_init(&s->fs, ctx, s->nb_inputs);
+ in = s->fs.in;
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+
+ outlink->w = ctx->inputs[0]->w;
+ outlink->h = ctx->inputs[0]->h;
+ outlink->time_base = ctx->inputs[0]->time_base;
+ outlink->frame_rate = ctx->inputs[0]->frame_rate;
+ outlink->sample_aspect_ratio = ctx->inputs[0]->sample_aspect_ratio;
+
+ s->planewidth[1] =
+ s->planewidth[2] = FF_CEIL_RSHIFT(outlink->w, s->outdesc->log2_chroma_w);
+ s->planewidth[0] =
+ s->planewidth[3] = outlink->w;
+ s->planeheight[1] =
+ s->planeheight[2] = FF_CEIL_RSHIFT(outlink->h, s->outdesc->log2_chroma_h);
+ s->planeheight[0] =
+ s->planeheight[3] = outlink->h;
+
+ for (i = 0; i < s->nb_inputs; i++) {
+ InputParam *inputp = &inputsp[i];
+ AVFilterLink *inlink = ctx->inputs[i];
+ const AVPixFmtDescriptor *indesc = av_pix_fmt_desc_get(inlink->format);
+ int j;
+
+ if (outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
+ outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
+ av_log(ctx, AV_LOG_ERROR, "input #%d link %s SAR %d:%d "
+ "does not match output link %s SAR %d:%d\n",
+ i, ctx->input_pads[i].name,
+ inlink->sample_aspect_ratio.num,
+ inlink->sample_aspect_ratio.den,
+ ctx->output_pads[0].name,
+ outlink->sample_aspect_ratio.num,
+ outlink->sample_aspect_ratio.den);
+ return AVERROR(EINVAL);
+ }
+
+ inputp->planewidth[1] =
+ inputp->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, indesc->log2_chroma_w);
+ inputp->planewidth[0] =
+ inputp->planewidth[3] = inlink->w;
+ inputp->planeheight[1] =
+ inputp->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, indesc->log2_chroma_h);
+ inputp->planeheight[0] =
+ inputp->planeheight[3] = inlink->h;
+ inputp->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ for (j = 0; j < inputp->nb_planes; j++)
+ inputp->depth[j] = indesc->comp[j].depth_minus1 + 1;
+
+ in[i].time_base = inlink->time_base;
+ in[i].sync = 1;
+ in[i].before = EXT_STOP;
+ in[i].after = EXT_STOP;
+ }
+
+ for (i = 0; i < s->nb_planes; i++) {
+ const int input = s->map[i][1];
+ const int plane = s->map[i][0];
+ InputParam *inputp = &inputsp[input];
+
+ if (plane + 1 > inputp->nb_planes) {
+ av_log(ctx, AV_LOG_ERROR, "input %d does not have %d plane\n",
+ input, plane);
+ goto fail;
+ }
+ if (s->outdesc->comp[i].depth_minus1 + 1 != inputp->depth[plane]) {
+ av_log(ctx, AV_LOG_ERROR, "output plane %d depth %d does not "
+ "match input %d plane %d depth %d\n",
+ i, s->outdesc->comp[i].depth_minus1 + 1,
+ input, plane, inputp->depth[plane]);
+ goto fail;
+ }
+ if (s->planewidth[i] != inputp->planewidth[plane]) {
+ av_log(ctx, AV_LOG_ERROR, "output plane %d width %d does not "
+ "match input %d plane %d width %d\n",
+ i, s->planewidth[i],
+ input, plane, inputp->planewidth[plane]);
+ goto fail;
+ }
+ if (s->planeheight[i] != inputp->planeheight[plane]) {
+ av_log(ctx, AV_LOG_ERROR, "output plane %d height %d does not "
+ "match input %d plane %d height %d\n",
+ i, s->planeheight[i],
+ input, plane, inputp->planeheight[plane]);
+ goto fail;
+ }
+ }
+
+ return ff_framesync_configure(&s->fs);
+fail:
+ return AVERROR(EINVAL);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MergePlanesContext *s = outlink->src->priv;
+ return ff_framesync_request_frame(&s->fs, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MergePlanesContext *s = ctx->priv;
+ int i;
+
+ ff_framesync_uninit(&s->fs);
+
+ for (i = 0; i < ctx->nb_inputs; i++)
+ av_freep(&ctx->input_pads[i].name);
+}
+
+static const AVFilterPad mergeplanes_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mergeplanes = {
+ .name = "mergeplanes",
+ .description = NULL_IF_CONFIG_SMALL("Merge planes."),
+ .priv_size = sizeof(MergePlanesContext),
+ .priv_class = &mergeplanes_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mergeplanes_outputs,
+ .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
+};
diff --git a/libavfilter/vf_mp.c b/libavfilter/vf_mp.c
new file mode 100644
index 0000000..9264e9b
--- /dev/null
+++ b/libavfilter/vf_mp.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright (c) 2011 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Parts of this file have been stolen from mplayer
+ */
+
+/**
+ * @file
+ */
+
+#include "avfilter.h"
+#include "video.h"
+#include "formats.h"
+#include "internal.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+
+#include "libmpcodecs/vf.h"
+#include "libmpcodecs/img_format.h"
+#include "libmpcodecs/cpudetect.h"
+#include "libmpcodecs/av_helpers.h"
+#include "libmpcodecs/libvo/fastmemcpy.h"
+
+#include "libswscale/swscale.h"
+
+
+//FIXME maybe link the orig in
+//XXX: identical pix_fmt must be following with each others
+static const struct {
+ int fmt;
+ enum AVPixelFormat pix_fmt;
+} conversion_map[] = {
+ {IMGFMT_ARGB, AV_PIX_FMT_ARGB},
+ {IMGFMT_BGRA, AV_PIX_FMT_BGRA},
+ {IMGFMT_BGR24, AV_PIX_FMT_BGR24},
+ {IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE},
+ {IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE},
+ {IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE},
+ {IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE},
+ {IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE},
+ {IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE},
+ {IMGFMT_BGR8, AV_PIX_FMT_RGB8},
+ {IMGFMT_BGR4, AV_PIX_FMT_RGB4},
+ {IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK},
+ {IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK},
+ {IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE},
+ {IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE},
+ {IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE},
+ {IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE},
+ {IMGFMT_ABGR, AV_PIX_FMT_ABGR},
+ {IMGFMT_RGBA, AV_PIX_FMT_RGBA},
+ {IMGFMT_RGB24, AV_PIX_FMT_RGB24},
+ {IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE},
+ {IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE},
+ {IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE},
+ {IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE},
+ {IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE},
+ {IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE},
+ {IMGFMT_RGB8, AV_PIX_FMT_BGR8},
+ {IMGFMT_RGB4, AV_PIX_FMT_BGR4},
+ {IMGFMT_BGR8, AV_PIX_FMT_PAL8},
+ {IMGFMT_YUY2, AV_PIX_FMT_YUYV422},
+ {IMGFMT_UYVY, AV_PIX_FMT_UYVY422},
+ {IMGFMT_NV12, AV_PIX_FMT_NV12},
+ {IMGFMT_NV21, AV_PIX_FMT_NV21},
+ {IMGFMT_Y800, AV_PIX_FMT_GRAY8},
+ {IMGFMT_Y8, AV_PIX_FMT_GRAY8},
+ {IMGFMT_YVU9, AV_PIX_FMT_YUV410P},
+ {IMGFMT_IF09, AV_PIX_FMT_YUV410P},
+ {IMGFMT_YV12, AV_PIX_FMT_YUV420P},
+ {IMGFMT_I420, AV_PIX_FMT_YUV420P},
+ {IMGFMT_IYUV, AV_PIX_FMT_YUV420P},
+ {IMGFMT_411P, AV_PIX_FMT_YUV411P},
+ {IMGFMT_422P, AV_PIX_FMT_YUV422P},
+ {IMGFMT_444P, AV_PIX_FMT_YUV444P},
+ {IMGFMT_440P, AV_PIX_FMT_YUV440P},
+
+ {IMGFMT_420A, AV_PIX_FMT_YUVA420P},
+
+ {IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE},
+ {IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE},
+ {IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE},
+ {IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE},
+ {IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE},
+ {IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE},
+
+ // YUVJ are YUV formats that use the full Y range and not just
+ // 16 - 235 (see colorspaces.txt).
+ // Currently they are all treated the same way.
+ {IMGFMT_YV12, AV_PIX_FMT_YUVJ420P},
+ {IMGFMT_422P, AV_PIX_FMT_YUVJ422P},
+ {IMGFMT_444P, AV_PIX_FMT_YUVJ444P},
+ {IMGFMT_440P, AV_PIX_FMT_YUVJ440P},
+
+#if FF_API_XVMC
+ {IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC},
+ {IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT},
+#endif /* FF_API_XVMC */
+
+ {IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1},
+ {IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2},
+ {IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264},
+ {IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3},
+ {IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1},
+ {IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4},
+ {0, AV_PIX_FMT_NONE}
+};
+
+extern const vf_info_t ff_vf_info_eq2;
+extern const vf_info_t ff_vf_info_eq;
+extern const vf_info_t ff_vf_info_fspp;
+extern const vf_info_t ff_vf_info_ilpack;
+extern const vf_info_t ff_vf_info_pp7;
+extern const vf_info_t ff_vf_info_softpulldown;
+extern const vf_info_t ff_vf_info_uspp;
+
+
+static const vf_info_t* const filters[]={
+ &ff_vf_info_eq2,
+ &ff_vf_info_eq,
+ &ff_vf_info_fspp,
+ &ff_vf_info_ilpack,
+ &ff_vf_info_pp7,
+ &ff_vf_info_softpulldown,
+ &ff_vf_info_uspp,
+
+ NULL
+};
+
+/*
+Unsupported filters
+1bpp
+ass
+bmovl
+crop
+dvbscale
+flip
+expand
+format
+halfpack
+lavc
+lavcdeint
+noformat
+pp
+scale
+tfields
+vo
+yadif
+zrmjpeg
+*/
+
+CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work
+
+enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){
+ int i;
+ for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++)
+ ;
+ return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE;
+}
+
+typedef struct {
+ const AVClass *class;
+ vf_instance_t vf;
+ vf_instance_t next_vf;
+ AVFilterContext *avfctx;
+ int frame_returned;
+ char *filter;
+ enum AVPixelFormat in_pix_fmt;
+} MPContext;
+
+#define OFFSET(x) offsetof(MPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mp_options[] = {
+ { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mp);
+
+void ff_mp_msg(int mod, int lev, const char *format, ... ){
+ va_list va;
+ va_start(va, format);
+ //FIXME convert lev/mod
+ av_vlog(NULL, AV_LOG_DEBUG, format, va);
+ va_end(va);
+}
+
+int ff_mp_msg_test(int mod, int lev){
+ return 123;
+}
+
+void ff_init_avcodec(void)
+{
+ //we maybe should init but its kinda 1. unneeded 2. a bit impolite from here
+}
+
+//Exact copy of vf.c
+void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){
+ dst->pict_type= src->pict_type;
+ dst->fields = src->fields;
+ dst->qscale_type= src->qscale_type;
+ if(dst->width == src->width && dst->height == src->height){
+ dst->qstride= src->qstride;
+ dst->qscale= src->qscale;
+ }
+}
+
+//Exact copy of vf.c
+void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){
+ if (vf->next->draw_slice) {
+ vf->next->draw_slice(vf->next,src,stride,w,h,x,y);
+ return;
+ }
+ if (!vf->dmpi) {
+ ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name);
+ return;
+ }
+ if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) {
+ memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x,
+ src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]);
+ return;
+ }
+ memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0],
+ w, h, vf->dmpi->stride[0], stride[0]);
+ memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift),
+ src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]);
+ memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift),
+ src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]);
+}
+
+//Exact copy of vf.c
+void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){
+ int y;
+ if(mpi->flags&MP_IMGFLAG_PLANAR){
+ y0&=~1;h+=h&1;
+ if(x0==0 && w==mpi->width){
+ // full width clear:
+ memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h);
+ memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift));
+ memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift));
+ } else
+ for(y=y0;y<y0+h;y+=2){
+ memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w);
+ memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w);
+ memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
+ memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
+ }
+ return;
+ }
+ // packed:
+ for(y=y0;y<y0+h;y++){
+ unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0;
+ if(mpi->flags&MP_IMGFLAG_YUV){
+ unsigned int* p=(unsigned int*) dst;
+ int size=(mpi->bpp>>3)*w/4;
+ int i;
+#if HAVE_BIGENDIAN
+#define CLEAR_PACKEDYUV_PATTERN 0x00800080
+#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000
+#else
+#define CLEAR_PACKEDYUV_PATTERN 0x80008000
+#define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080
+#endif
+ if(mpi->flags&MP_IMGFLAG_SWAPPED){
+ for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
+ for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
+ } else {
+ for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN;
+ for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN;
+ }
+ } else
+ memset(dst,0,(mpi->bpp>>3)*w);
+ }
+}
+
+int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){
+ return 1;
+}
+
+//used by delogo
+unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){
+ return preferred;
+}
+
+mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf));
+ mp_image_t* mpi=NULL;
+ int w2;
+ int number = mp_imgtype >> 16;
+
+ av_assert0(vf->next == NULL); // all existing filters call this just on next
+
+ //vf_dint needs these as it calls ff_vf_get_image() before configuring the output
+ if(vf->w==0 && w>0) vf->w=w;
+ if(vf->h==0 && h>0) vf->h=h;
+
+ av_assert0(w == -1 || w >= vf->w);
+ av_assert0(h == -1 || h >= vf->h);
+ av_assert0(vf->w > 0);
+ av_assert0(vf->h > 0);
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h);
+
+ if (w == -1) w = vf->w;
+ if (h == -1) h = vf->h;
+
+ w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w;
+
+ // Note: we should call libvo first to check if it supports direct rendering
+ // and if not, then fallback to software buffers:
+ switch(mp_imgtype & 0xff){
+ case MP_IMGTYPE_EXPORT:
+ if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h);
+ mpi=vf->imgctx.export_images[0];
+ break;
+ case MP_IMGTYPE_STATIC:
+ if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h);
+ mpi=vf->imgctx.static_images[0];
+ break;
+ case MP_IMGTYPE_TEMP:
+ if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
+ mpi=vf->imgctx.temp_images[0];
+ break;
+ case MP_IMGTYPE_IPB:
+ if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame:
+ if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
+ mpi=vf->imgctx.temp_images[0];
+ break;
+ }
+ case MP_IMGTYPE_IP:
+ if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h);
+ mpi=vf->imgctx.static_images[vf->imgctx.static_idx];
+ vf->imgctx.static_idx^=1;
+ break;
+ case MP_IMGTYPE_NUMBERED:
+ if (number == -1) {
+ int i;
+ for (i = 0; i < NUM_NUMBERED_MPI; i++)
+ if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count)
+ break;
+ number = i;
+ }
+ if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL;
+ if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h);
+ mpi = vf->imgctx.numbered_images[number];
+ mpi->number = number;
+ break;
+ }
+ if(mpi){
+ mpi->type=mp_imgtype;
+ mpi->w=vf->w; mpi->h=vf->h;
+ // keep buffer allocation status & color flags only:
+// mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT);
+ mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS;
+ // accept restrictions, draw_slice and palette flags only:
+ mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE);
+ if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK;
+ if(mpi->width!=w2 || mpi->height!=h){
+// printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h);
+ if(mpi->flags&MP_IMGFLAG_ALLOCATED){
+ if(mpi->width<w2 || mpi->height<h){
+ // need to re-allocate buffer memory:
+ av_free(mpi->planes[0]);
+ mpi->flags&=~MP_IMGFLAG_ALLOCATED;
+ ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n");
+ }
+// } else {
+ } {
+ mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
+ mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift;
+ }
+ }
+ if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt);
+ if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){
+
+ av_assert0(!vf->get_image);
+ // check libvo first!
+ if(vf->get_image) vf->get_image(vf,mpi);
+
+ if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
+ // non-direct and not yet allocated image. allocate it!
+ if (!mpi->bpp) { // no way we can allocate this
+ ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL,
+ "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n");
+ return NULL;
+ }
+
+ // check if codec prefer aligned stride:
+ if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){
+ int align=(mpi->flags&MP_IMGFLAG_PLANAR &&
+ mpi->flags&MP_IMGFLAG_YUV) ?
+ (8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME
+ w2=((w+align)&(~align));
+ if(mpi->width!=w2){
+#if 0
+ // we have to change width... check if we CAN co it:
+ int flags=vf->query_format(vf,outfmt); // should not fail
+ if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n");
+// printf("query -> 0x%X \n",flags);
+ if(flags&VFCAP_ACCEPT_STRIDE){
+#endif
+ mpi->width=w2;
+ mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
+// }
+ }
+ }
+
+ ff_mp_image_alloc_planes(mpi);
+// printf("clearing img!\n");
+ ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height);
+ }
+ }
+ av_assert0(!vf->start_slice);
+ if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)
+ if(vf->start_slice) vf->start_slice(vf,mpi);
+ if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){
+ ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n",
+ "NULL"/*vf->info->name*/,
+ (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting":
+ ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"),
+ (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"",
+ mpi->width,mpi->height,mpi->bpp,
+ (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"),
+ (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed",
+ mpi->bpp*mpi->width*mpi->height/8);
+ ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n",
+ mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2],
+ mpi->stride[0], mpi->stride[1], mpi->stride[2],
+ mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift);
+ mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED;
+ }
+
+ mpi->qscale = NULL;
+ mpi->usage_count++;
+ }
+// printf("\rVF_MPI: %p %p %p %d %d %d \n",
+// mpi->planes[0],mpi->planes[1],mpi->planes[2],
+// mpi->stride[0],mpi->stride[1],mpi->stride[2]);
+ return mpi;
+}
+
+int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
+ AVFilterLink *outlink = m->avfctx->outputs[0];
+ AVFrame *picref = av_frame_alloc();
+ int i;
+
+ av_assert0(vf->next);
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n");
+
+ if (!picref)
+ goto fail;
+
+ picref->width = mpi->w;
+ picref->height = mpi->h;
+
+ for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++);
+ picref->format = conversion_map[i].pix_fmt;
+
+ for(i=0; conversion_map[i].fmt && m->in_pix_fmt != conversion_map[i].pix_fmt; i++);
+ if (mpi->imgfmt == conversion_map[i].fmt)
+ picref->format = conversion_map[i].pix_fmt;
+
+ memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride)));
+
+ for(i=0; i<4 && mpi->stride[i]; i++){
+ picref->data[i] = mpi->planes[i];
+ }
+
+ if(pts != MP_NOPTS_VALUE)
+ picref->pts= pts * av_q2d(outlink->time_base);
+
+ if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy
+ AVFrame *tofree = picref;
+ picref = av_frame_clone(picref);
+ av_frame_free(&tofree);
+ }
+
+ ff_filter_frame(outlink, picref);
+ m->frame_returned++;
+
+ return 1;
+fail:
+ av_frame_free(&picref);
+ return 0;
+}
+
+int ff_vf_next_config(struct vf_instance *vf,
+ int width, int height, int d_width, int d_height,
+ unsigned int voflags, unsigned int outfmt){
+
+ av_assert0(width>0 && height>0);
+ vf->next->w = width; vf->next->h = height;
+
+ return 1;
+#if 0
+ int flags=vf->next->query_format(vf->next,outfmt);
+ if(!flags){
+ // hmm. colorspace mismatch!!!
+ //this is fatal for us ATM
+ return 0;
+ }
+ ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs);
+ miss=vf->default_reqs - (flags&vf->default_reqs);
+ if(miss&VFCAP_ACCEPT_STRIDE){
+ // vf requires stride support but vf->next doesn't support it!
+ // let's insert the 'expand' filter, it does the job for us:
+ vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL);
+ if(!vf2) return 0; // shouldn't happen!
+ vf->next=vf2;
+ }
+ vf->next->w = width; vf->next->h = height;
+ return 1;
+#endif
+}
+
+int ff_vf_next_control(struct vf_instance *vf, int request, void* data){
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
+ av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request);
+ return 0;
+}
+
+static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
+ MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
+ int i;
+ av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt);
+
+ for(i=0; conversion_map[i].fmt; i++){
+ if(fmt==conversion_map[i].fmt)
+ return 1; //we suport all
+ }
+ return 0;
+}
+
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MPContext *m = ctx->priv;
+ int cpu_flags = av_get_cpu_flags();
+ char name[256];
+ const char *args;
+ int i;
+
+ ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX;
+ ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2;
+ ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE;
+ ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2;
+ ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3;
+ ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3;
+ ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4;
+ ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42;
+ ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX;
+ ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW;
+ ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT;
+
+ m->avfctx= ctx;
+
+ args = m->filter;
+ if(!args || 1!=sscanf(args, "%255[^:=]", name)){
+ av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n");
+ return AVERROR(EINVAL);
+ }
+ args += strlen(name);
+ if (args[0] == '=')
+ args++;
+
+ for(i=0; ;i++){
+ if(!filters[i] || !strcmp(name, filters[i]->name))
+ break;
+ }
+
+ if(!filters[i]){
+ av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name);
+ return AVERROR(EINVAL);
+ }
+
+ av_log(ctx, AV_LOG_WARNING,
+ "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n"
+ "once it has been ported to a native libavfilter.\n", name);
+
+ memset(&m->vf,0,sizeof(m->vf));
+ m->vf.info= filters[i];
+
+ m->vf.next = &m->next_vf;
+ m->vf.put_image = ff_vf_next_put_image;
+ m->vf.config = ff_vf_next_config;
+ m->vf.query_format= vf_default_query_format;
+ m->vf.control = ff_vf_next_control;
+ m->vf.default_caps=VFCAP_ACCEPT_STRIDE;
+ m->vf.default_reqs=0;
+ if(m->vf.info->opts)
+ av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n");
+#if 0
+ if(vf->info->opts) { // vf_vo get some special argument
+ const m_struct_t* st = vf->info->opts;
+ void* vf_priv = m_struct_alloc(st);
+ int n;
+ for(n = 0 ; args && args[2*n] ; n++)
+ m_struct_set(st,vf_priv,args[2*n],args[2*n+1]);
+ vf->priv = vf_priv;
+ args = NULL;
+ } else // Otherwise we should have the '_oldargs_'
+ if(args && !strcmp(args[0],"_oldargs_"))
+ args = (char**)args[1];
+ else
+ args = NULL;
+#endif
+ if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){
+ av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args);
+ return -1;
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MPContext *m = ctx->priv;
+ vf_instance_t *vf = &m->vf;
+
+ while(vf){
+ vf_instance_t *next = vf->next;
+ if(vf->uninit)
+ vf->uninit(vf);
+ ff_free_mp_image(vf->imgctx.static_images[0]);
+ ff_free_mp_image(vf->imgctx.static_images[1]);
+ ff_free_mp_image(vf->imgctx.temp_images[0]);
+ ff_free_mp_image(vf->imgctx.export_images[0]);
+ vf = next;
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *avfmts=NULL;
+ MPContext *m = ctx->priv;
+ enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE;
+ int i;
+
+ for(i=0; conversion_map[i].fmt; i++){
+ av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt);
+ if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){
+ av_log(ctx, AV_LOG_DEBUG, "supported,adding\n");
+ if (conversion_map[i].pix_fmt != lastpixfmt) {
+ ff_add_format(&avfmts, conversion_map[i].pix_fmt);
+ lastpixfmt = conversion_map[i].pix_fmt;
+ }
+ }
+ }
+
+ if (!avfmts)
+ return -1;
+
+ //We assume all allowed input formats are also allowed output formats
+ ff_set_common_formats(ctx, avfmts);
+ return 0;
+}
+
+static int config_inprops(AVFilterLink *inlink)
+{
+ MPContext *m = inlink->dst->priv;
+ int i;
+ for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
+
+ av_assert0(conversion_map[i].fmt && inlink->w && inlink->h);
+
+ m->vf.fmt.have_configured = 1;
+ m->vf.fmt.orig_height = inlink->h;
+ m->vf.fmt.orig_width = inlink->w;
+ m->vf.fmt.orig_fmt = conversion_map[i].fmt;
+
+ if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0)
+ return -1;
+
+ return 0;
+}
+
+static int config_outprops(AVFilterLink *outlink)
+{
+ MPContext *m = outlink->src->priv;
+
+ outlink->w = m->next_vf.w;
+ outlink->h = m->next_vf.h;
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MPContext *m = outlink->src->priv;
+ int ret;
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n");
+
+ for(m->frame_returned=0; !m->frame_returned;){
+ ret=ff_request_frame(outlink->src->inputs[0]);
+ if(ret<0)
+ break;
+ }
+
+ av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret);
+ return ret;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ MPContext *m = inlink->dst->priv;
+ int i;
+ double pts= MP_NOPTS_VALUE;
+ mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height);
+
+ if(inpic->pts != AV_NOPTS_VALUE)
+ pts= inpic->pts / av_q2d(inlink->time_base);
+
+ for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
+ ff_mp_image_setfmt(mpi,conversion_map[i].fmt);
+ m->in_pix_fmt = inlink->format;
+
+ memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes)));
+ memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride)));
+
+ if (inpic->interlaced_frame)
+ mpi->fields |= MP_IMGFIELD_INTERLACED;
+ if (inpic->top_field_first)
+ mpi->fields |= MP_IMGFIELD_TOP_FIRST;
+ if (inpic->repeat_pict)
+ mpi->fields |= MP_IMGFIELD_REPEAT_FIRST;
+
+ // mpi->flags|=MP_IMGFLAG_ALLOCATED; ?
+ mpi->flags |= MP_IMGFLAG_READABLE;
+ if(!av_frame_is_writable(inpic))
+ mpi->flags |= MP_IMGFLAG_PRESERVE;
+ if(m->vf.put_image(&m->vf, mpi, pts) == 0){
+ av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n");
+ }else{
+ av_frame_free(&inpic);
+ }
+ ff_free_mp_image(mpi);
+ return 0;
+}
+
+static const AVFilterPad mp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_inprops,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_outprops,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mp = {
+ .name = "mp",
+ .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(MPContext),
+ .query_formats = query_formats,
+ .inputs = mp_inputs,
+ .outputs = mp_outputs,
+ .priv_class = &mp_class,
+};
diff --git a/libavfilter/vf_mpdecimate.c b/libavfilter/vf_mpdecimate.c
new file mode 100644
index 0000000..3ed9602
--- /dev/null
+++ b/libavfilter/vf_mpdecimate.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2003 Rich Felker
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file mpdecimate filter, ported from libmpcodecs/vf_decimate.c by
+ * Rich Felker.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixelutils.h"
+#include "libavutil/timestamp.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int lo, hi; ///< lower and higher threshold number of differences
+ ///< values for 8x8 blocks
+
+ float frac; ///< threshold of changed pixels over the total fraction
+
+ int max_drop_count; ///< if positive: maximum number of sequential frames to drop
+ ///< if negative: minimum number of frames between two drops
+
+ int drop_count; ///< if positive: number of frames sequentially dropped
+ ///< if negative: number of sequential frames which were not dropped
+
+ int hsub, vsub; ///< chroma subsampling values
+ AVFrame *ref; ///< reference picture
+ av_pixelutils_sad_fn sad; ///< sum of absolute difference function
+} DecimateContext;
+
+#define OFFSET(x) offsetof(DecimateContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption mpdecimate_options[] = {
+ { "max", "set the maximum number of consecutive dropped frames (positive), or the minimum interval between dropped frames (negative)",
+ OFFSET(max_drop_count), AV_OPT_TYPE_INT, {.i64=0}, INT_MIN, INT_MAX, FLAGS },
+ { "hi", "set high dropping threshold", OFFSET(hi), AV_OPT_TYPE_INT, {.i64=64*12}, INT_MIN, INT_MAX, FLAGS },
+ { "lo", "set low dropping threshold", OFFSET(lo), AV_OPT_TYPE_INT, {.i64=64*5}, INT_MIN, INT_MAX, FLAGS },
+ { "frac", "set fraction dropping threshold", OFFSET(frac), AV_OPT_TYPE_FLOAT, {.dbl=0.33}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mpdecimate);
+
+/**
+ * Return 1 if the two planes are different, 0 otherwise.
+ */
+static int diff_planes(AVFilterContext *ctx,
+ uint8_t *cur, int cur_linesize,
+ uint8_t *ref, int ref_linesize,
+ int w, int h)
+{
+ DecimateContext *decimate = ctx->priv;
+
+ int x, y;
+ int d, c = 0;
+ int t = (w/16)*(h/16)*decimate->frac;
+
+ /* compute difference for blocks of 8x8 bytes */
+ for (y = 0; y < h-7; y += 4) {
+ for (x = 8; x < w-7; x += 4) {
+ d = decimate->sad(cur + y*cur_linesize + x, cur_linesize,
+ ref + y*ref_linesize + x, ref_linesize);
+ if (d > decimate->hi)
+ return 1;
+ if (d > decimate->lo) {
+ c++;
+ if (c > t)
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Tell if the frame should be decimated, for example if it is no much
+ * different with respect to the reference frame ref.
+ */
+static int decimate_frame(AVFilterContext *ctx,
+ AVFrame *cur, AVFrame *ref)
+{
+ DecimateContext *decimate = ctx->priv;
+ int plane;
+
+ if (decimate->max_drop_count > 0 &&
+ decimate->drop_count >= decimate->max_drop_count)
+ return 0;
+ if (decimate->max_drop_count < 0 &&
+ (decimate->drop_count-1) > decimate->max_drop_count)
+ return 0;
+
+ for (plane = 0; ref->data[plane] && ref->linesize[plane]; plane++) {
+ int vsub = plane == 1 || plane == 2 ? decimate->vsub : 0;
+ int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0;
+ if (diff_planes(ctx,
+ cur->data[plane], cur->linesize[plane],
+ ref->data[plane], ref->linesize[plane],
+ FF_CEIL_RSHIFT(ref->width, hsub),
+ FF_CEIL_RSHIFT(ref->height, vsub)))
+ return 0;
+ }
+
+ return 1;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ DecimateContext *decimate = ctx->priv;
+
+ decimate->sad = av_pixelutils_get_sad_fn(3, 3, 0, decimate); // 8x8, not aligned on blocksize
+ if (!decimate->sad)
+ return AVERROR(EINVAL);
+
+ av_log(ctx, AV_LOG_VERBOSE, "max_drop_count:%d hi:%d lo:%d frac:%f\n",
+ decimate->max_drop_count, decimate->hi, decimate->lo, decimate->frac);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ DecimateContext *decimate = ctx->priv;
+ av_frame_free(&decimate->ref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ DecimateContext *decimate = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ decimate->hsub = pix_desc->log2_chroma_w;
+ decimate->vsub = pix_desc->log2_chroma_h;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *cur)
+{
+ DecimateContext *decimate = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int ret;
+
+ if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) {
+ decimate->drop_count = FFMAX(1, decimate->drop_count+1);
+ } else {
+ av_frame_free(&decimate->ref);
+ decimate->ref = cur;
+ decimate->drop_count = FFMIN(-1, decimate->drop_count-1);
+
+ if (ret = ff_filter_frame(outlink, av_frame_clone(cur)) < 0)
+ return ret;
+ }
+
+ av_log(inlink->dst, AV_LOG_DEBUG,
+ "%s pts:%s pts_time:%s drop_count:%d\n",
+ decimate->drop_count > 0 ? "drop" : "keep",
+ av_ts2str(cur->pts), av_ts2timestr(cur->pts, &inlink->time_base),
+ decimate->drop_count);
+
+ if (decimate->drop_count > 0)
+ av_frame_free(&cur);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ DecimateContext *decimate = outlink->src->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int ret;
+
+ do {
+ ret = ff_request_frame(inlink);
+ } while (decimate->drop_count > 0 && ret >= 0);
+
+ return ret;
+}
+
+static const AVFilterPad mpdecimate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mpdecimate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mpdecimate = {
+ .name = "mpdecimate",
+ .description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(DecimateContext),
+ .priv_class = &mpdecimate_class,
+ .query_formats = query_formats,
+ .inputs = mpdecimate_inputs,
+ .outputs = mpdecimate_outputs,
+};
diff --git a/libavfilter/vf_noise.c b/libavfilter/vf_noise.c
new file mode 100644
index 0000000..4acad8a
--- /dev/null
+++ b/libavfilter/vf_noise.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * noise generator
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/lfg.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "vf_noise.h"
+#include "video.h"
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+#define OFFSET(x) offsetof(NoiseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+#define NOISE_PARAMS(name, x, param) \
+ {#name"_seed", "set component #"#x" noise seed", OFFSET(param.seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, FLAGS}, \
+ {#name"_strength", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \
+ {#name"s", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \
+ {#name"_flags", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
+ {#name"f", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \
+ {"a", "averaged noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_AVERAGED}, 0, 0, FLAGS, #name"_flags"}, \
+ {"p", "(semi)regular pattern", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_PATTERN}, 0, 0, FLAGS, #name"_flags"}, \
+ {"t", "temporal noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_TEMPORAL}, 0, 0, FLAGS, #name"_flags"}, \
+ {"u", "uniform noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_UNIFORM}, 0, 0, FLAGS, #name"_flags"},
+
+static const AVOption noise_options[] = {
+ NOISE_PARAMS(all, 0, all)
+ NOISE_PARAMS(c0, 0, param[0])
+ NOISE_PARAMS(c1, 1, param[1])
+ NOISE_PARAMS(c2, 2, param[2])
+ NOISE_PARAMS(c3, 3, param[3])
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(noise);
+
+static const int8_t patt[4] = { -1, 0, 1, 0 };
+
+#define RAND_N(range) ((int) ((double) range * av_lfg_get(lfg) / (UINT_MAX + 1.0)))
+static av_cold int init_noise(NoiseContext *n, int comp)
+{
+ int8_t *noise = av_malloc(MAX_NOISE * sizeof(int8_t));
+ FilterParams *fp = &n->param[comp];
+ AVLFG *lfg = &n->param[comp].lfg;
+ int strength = fp->strength;
+ int flags = fp->flags;
+ int i, j;
+
+ if (!noise)
+ return AVERROR(ENOMEM);
+
+ av_lfg_init(&fp->lfg, fp->seed + comp*31415U);
+
+ for (i = 0, j = 0; i < MAX_NOISE; i++, j++) {
+ if (flags & NOISE_UNIFORM) {
+ if (flags & NOISE_AVERAGED) {
+ if (flags & NOISE_PATTERN) {
+ noise[i] = (RAND_N(strength) - strength / 2) / 6
+ + patt[j % 4] * strength * 0.25 / 3;
+ } else {
+ noise[i] = (RAND_N(strength) - strength / 2) / 3;
+ }
+ } else {
+ if (flags & NOISE_PATTERN) {
+ noise[i] = (RAND_N(strength) - strength / 2) / 2
+ + patt[j % 4] * strength * 0.25;
+ } else {
+ noise[i] = RAND_N(strength) - strength / 2;
+ }
+ }
+ } else {
+ double x1, x2, w, y1;
+ do {
+ x1 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
+ x2 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0;
+ w = x1 * x1 + x2 * x2;
+ } while (w >= 1.0);
+
+ w = sqrt((-2.0 * log(w)) / w);
+ y1 = x1 * w;
+ y1 *= strength / sqrt(3.0);
+ if (flags & NOISE_PATTERN) {
+ y1 /= 2;
+ y1 += patt[j % 4] * strength * 0.35;
+ }
+ y1 = av_clipf(y1, -128, 127);
+ if (flags & NOISE_AVERAGED)
+ y1 /= 3.0;
+ noise[i] = (int)y1;
+ }
+ if (RAND_N(6) == 0)
+ j--;
+ }
+
+ for (i = 0; i < MAX_RES; i++)
+ for (j = 0; j < 3; j++)
+ fp->prev_shift[i][j] = noise + (av_lfg_get(lfg) & (MAX_SHIFT - 1));
+
+ fp->noise = noise;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (desc->flags & AV_PIX_FMT_FLAG_PLANAR && !((desc->comp[0].depth_minus1 + 1) & 7))
+ ff_add_format(&formats, fmt);
+ }
+
+ ff_set_common_formats(ctx, formats);
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ NoiseContext *n = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ n->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ if ((ret = av_image_fill_linesizes(n->bytewidth, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ n->height[1] = n->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ n->height[0] = n->height[3] = inlink->h;
+
+ return 0;
+}
+
+void ff_line_noise_c(uint8_t *dst, const uint8_t *src, const int8_t *noise,
+ int len, int shift)
+{
+ int i;
+
+ noise += shift;
+ for (i = 0; i < len; i++) {
+ int v = src[i] + noise[i];
+
+ dst[i] = av_clip_uint8(v);
+ }
+}
+
+void ff_line_noise_avg_c(uint8_t *dst, const uint8_t *src,
+ int len, const int8_t * const *shift)
+{
+ int i;
+ const int8_t *src2 = (const int8_t*)src;
+
+ for (i = 0; i < len; i++) {
+ const int n = shift[0][i] + shift[1][i] + shift[2][i];
+ dst[i] = src2[i] + ((n * src2[i]) >> 7);
+ }
+}
+
+static void noise(uint8_t *dst, const uint8_t *src,
+ int dst_linesize, int src_linesize,
+ int width, int start, int end, NoiseContext *n, int comp)
+{
+ FilterParams *p = &n->param[comp];
+ int8_t *noise = p->noise;
+ const int flags = p->flags;
+ int y;
+
+ if (!noise) {
+ if (dst != src)
+ av_image_copy_plane(dst, dst_linesize, src, src_linesize, width, end - start);
+ return;
+ }
+
+ for (y = start; y < end; y++) {
+ const int ix = y & (MAX_RES - 1);
+ int x;
+ for (x=0; x < width; x+= MAX_RES) {
+ int w = FFMIN(width - x, MAX_RES);
+ int shift = p->rand_shift[ix];
+
+ if (flags & NOISE_AVERAGED) {
+ n->line_noise_avg(dst + x, src + x, w, (const int8_t**)p->prev_shift[ix]);
+ p->prev_shift[ix][shift & 3] = noise + shift;
+ } else {
+ n->line_noise(dst + x, src + x, noise, w, shift);
+ }
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ }
+}
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
+{
+ NoiseContext *s = ctx->priv;
+ ThreadData *td = arg;
+ int plane;
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const int height = s->height[plane];
+ const int start = (height * jobnr ) / nb_jobs;
+ const int end = (height * (jobnr+1)) / nb_jobs;
+ noise(td->out->data[plane] + start * td->out->linesize[plane],
+ td->in->data[plane] + start * td->in->linesize[plane],
+ td->out->linesize[plane], td->in->linesize[plane],
+ s->bytewidth[plane], start, end, s, plane);
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ NoiseContext *n = ctx->priv;
+ ThreadData td;
+ AVFrame *out;
+ int comp, i;
+
+ if (av_frame_is_writable(inpicref)) {
+ out = inpicref;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, inpicref);
+ }
+
+ for (comp = 0; comp < 4; comp++) {
+ FilterParams *fp = &n->param[comp];
+
+ if ((!fp->rand_shift_init || (fp->flags & NOISE_TEMPORAL)) && fp->strength) {
+
+ for (i = 0; i < MAX_RES; i++) {
+ fp->rand_shift[i] = av_lfg_get(&fp->lfg) & (MAX_SHIFT - 1);
+ }
+ fp->rand_shift_init = 1;
+ }
+ }
+
+ td.in = inpicref; td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(n->height[0], ctx->graph->nb_threads));
+ emms_c();
+
+ if (inpicref != out)
+ av_frame_free(&inpicref);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ NoiseContext *n = ctx->priv;
+ int ret, i;
+
+ for (i = 0; i < 4; i++) {
+ if (n->all.seed >= 0)
+ n->param[i].seed = n->all.seed;
+ else
+ n->param[i].seed = 123457;
+ if (n->all.strength)
+ n->param[i].strength = n->all.strength;
+ if (n->all.flags)
+ n->param[i].flags = n->all.flags;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (n->param[i].strength && ((ret = init_noise(n, i)) < 0))
+ return ret;
+ }
+
+ n->line_noise = ff_line_noise_c;
+ n->line_noise_avg = ff_line_noise_avg_c;
+
+ if (ARCH_X86)
+ ff_noise_init_x86(n);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ NoiseContext *n = ctx->priv;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ av_freep(&n->param[i].noise);
+}
+
+static const AVFilterPad noise_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad noise_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_noise = {
+ .name = "noise",
+ .description = NULL_IF_CONFIG_SMALL("Add noise."),
+ .priv_size = sizeof(NoiseContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = noise_inputs,
+ .outputs = noise_outputs,
+ .priv_class = &noise_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_noise.h b/libavfilter/vf_noise.h
new file mode 100644
index 0000000..2207ed9
--- /dev/null
+++ b/libavfilter/vf_noise.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_NOISE_H
+#define AVFILTER_NOISE_H
+
+#include "libavutil/lfg.h"
+#include "avfilter.h"
+
+#define MAX_NOISE 5120
+#define MAX_SHIFT 1024
+#define MAX_RES (MAX_NOISE-MAX_SHIFT)
+
+#define NOISE_UNIFORM 1
+#define NOISE_TEMPORAL 2
+#define NOISE_AVERAGED 8
+#define NOISE_PATTERN 16
+
+typedef struct {
+ int strength;
+ unsigned flags;
+ AVLFG lfg;
+ int seed;
+ int8_t *noise;
+ int8_t *prev_shift[MAX_RES][3];
+ int rand_shift[MAX_RES];
+ int rand_shift_init;
+} FilterParams;
+
+typedef struct {
+ const AVClass *class;
+ int nb_planes;
+ int bytewidth[4];
+ int height[4];
+ FilterParams all;
+ FilterParams param[4];
+ void (*line_noise)(uint8_t *dst, const uint8_t *src, const int8_t *noise, int len, int shift);
+ void (*line_noise_avg)(uint8_t *dst, const uint8_t *src, int len, const int8_t * const *shift);
+} NoiseContext;
+
+void ff_line_noise_c(uint8_t *dst, const uint8_t *src, const int8_t *noise, int len, int shift);
+void ff_line_noise_avg_c(uint8_t *dst, const uint8_t *src, int len, const int8_t * const *shift);
+
+void ff_noise_init_x86(NoiseContext *n);
+
+#endif /* AVFILTER_NOISE_H */
diff --git a/libavfilter/vf_null.c b/libavfilter/vf_null.c
index f872587..2355615 100644
--- a/libavfilter/vf_null.c
+++ b/libavfilter/vf_null.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,9 +28,8 @@
static const AVFilterPad avfilter_vf_null_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
@@ -44,12 +43,8 @@ static const AVFilterPad avfilter_vf_null_outputs[] = {
};
AVFilter ff_vf_null = {
- .name = "null",
+ .name = "null",
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
-
- .priv_size = 0,
-
- .inputs = avfilter_vf_null_inputs,
-
- .outputs = avfilter_vf_null_outputs,
+ .inputs = avfilter_vf_null_inputs,
+ .outputs = avfilter_vf_null_outputs,
};
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 11e0a6f..80d99bd 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -3,20 +3,20 @@
* Copyright (c) 2010 Baptiste Coudurier
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,33 +30,43 @@
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/avstring.h"
-#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
+#include "libavutil/timestamp.h"
#include "internal.h"
+#include "dualinput.h"
+#include "drawutils.h"
#include "video.h"
static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
"main_w", "W", ///< width of the main video
"main_h", "H", ///< height of the main video
"overlay_w", "w", ///< width of the overlay video
"overlay_h", "h", ///< height of the overlay video
+ "hsub",
+ "vsub",
+ "x",
+ "y",
+ "n", ///< number of frame
+ "pos", ///< position in the file
+ "t", ///< timestamp expressed in seconds
NULL
};
enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
VAR_MAIN_W, VAR_MW,
VAR_MAIN_H, VAR_MH,
VAR_OVERLAY_W, VAR_OW,
VAR_OVERLAY_H, VAR_OH,
+ VAR_HSUB,
+ VAR_VSUB,
+ VAR_X,
+ VAR_Y,
+ VAR_N,
+ VAR_POS,
+ VAR_T,
VAR_VARS_NB
};
@@ -66,60 +76,211 @@ enum EOFAction {
EOF_ACTION_PASS
};
-static const char *eof_action_str[] = {
+static const char * const eof_action_str[] = {
"repeat", "endall", "pass"
};
#define MAIN 0
#define OVERLAY 1
+#define R 0
+#define G 1
+#define B 2
+#define A 3
+
+#define Y 0
+#define U 1
+#define V 2
+
typedef struct OverlayContext {
const AVClass *class;
int x, y; ///< position of overlayed picture
- int max_plane_step[4]; ///< steps per pixel for each plane
+ int allow_packed_rgb;
+ uint8_t main_is_packed_rgb;
+ uint8_t main_rgba_map[4];
+ uint8_t main_has_alpha;
+ uint8_t overlay_is_packed_rgb;
+ uint8_t overlay_rgba_map[4];
+ uint8_t overlay_has_alpha;
+ enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV422, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format;
+ enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
+
+ FFDualInputContext dinput;
+
+ int main_pix_step[4]; ///< steps per pixel for each plane of the main output
+ int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay
int hsub, vsub; ///< chroma subsampling values
+ double var_values[VAR_VARS_NB];
char *x_expr, *y_expr;
enum EOFAction eof_action; ///< action to take on EOF from source
- AVFrame *main;
- AVFrame *over_prev, *over_next;
+ AVExpr *x_pexpr, *y_pexpr;
} OverlayContext;
static av_cold void uninit(AVFilterContext *ctx)
{
OverlayContext *s = ctx->priv;
- av_frame_free(&s->main);
- av_frame_free(&s->over_prev);
- av_frame_free(&s->over_next);
+ ff_dualinput_uninit(&s->dinput);
+ av_expr_free(s->x_pexpr); s->x_pexpr = NULL;
+ av_expr_free(s->y_pexpr); s->y_pexpr = NULL;
+}
+
+static inline int normalize_xy(double d, int chroma_sub)
+{
+ if (isnan(d))
+ return INT_MAX;
+ return (int)d & ~((1 << chroma_sub) - 1);
+}
+
+static void eval_expr(AVFilterContext *ctx)
+{
+ OverlayContext *s = ctx->priv;
+
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
+ s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
+ s->x = normalize_xy(s->var_values[VAR_X], s->hsub);
+ s->y = normalize_xy(s->var_values[VAR_Y], s->vsub);
+}
+
+static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
+{
+ int ret;
+ AVExpr *old = NULL;
+
+ if (*pexpr)
+ old = *pexpr;
+ ret = av_expr_parse(pexpr, expr, var_names,
+ NULL, NULL, NULL, NULL, 0, log_ctx);
+ if (ret < 0) {
+ av_log(log_ctx, AV_LOG_ERROR,
+ "Error when evaluating the expression '%s' for %s\n",
+ expr, option);
+ *pexpr = old;
+ return ret;
+ }
+
+ av_expr_free(old);
+ return 0;
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ OverlayContext *s = ctx->priv;
+ int ret;
+
+ if (!strcmp(cmd, "x"))
+ ret = set_expr(&s->x_pexpr, args, cmd, ctx);
+ else if (!strcmp(cmd, "y"))
+ ret = set_expr(&s->y_pexpr, args, cmd, ctx);
+ else
+ ret = AVERROR(ENOSYS);
+
+ if (ret < 0)
+ return ret;
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
+ return ret;
}
static int query_formats(AVFilterContext *ctx)
{
- const enum AVPixelFormat inout_pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
- const enum AVPixelFormat blend_pix_fmts[] = { AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE };
- AVFilterFormats *inout_formats = ff_make_format_list(inout_pix_fmts);
- AVFilterFormats *blend_formats = ff_make_format_list(blend_pix_fmts);
+ OverlayContext *s = ctx->priv;
- ff_formats_ref(inout_formats, &ctx->inputs [MAIN ]->out_formats);
- ff_formats_ref(blend_formats, &ctx->inputs [OVERLAY]->out_formats);
- ff_formats_ref(inout_formats, &ctx->outputs[MAIN ]->in_formats );
+ /* overlay formats contains alpha, for avoiding conversion with alpha information loss */
+ static const enum AVPixelFormat main_pix_fmts_yuv420[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_yuv420[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_yuv422[] = {
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_yuv422[] = {
+ AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_yuv444[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
+ };
+
+ static const enum AVPixelFormat main_pix_fmts_rgb[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ static const enum AVPixelFormat overlay_pix_fmts_rgb[] = {
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *main_formats;
+ AVFilterFormats *overlay_formats;
+
+ switch (s->format) {
+ case OVERLAY_FORMAT_YUV420:
+ main_formats = ff_make_format_list(main_pix_fmts_yuv420);
+ overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420);
+ break;
+ case OVERLAY_FORMAT_YUV422:
+ main_formats = ff_make_format_list(main_pix_fmts_yuv422);
+ overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422);
+ break;
+ case OVERLAY_FORMAT_YUV444:
+ main_formats = ff_make_format_list(main_pix_fmts_yuv444);
+ overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv444);
+ break;
+ case OVERLAY_FORMAT_RGB:
+ main_formats = ff_make_format_list(main_pix_fmts_rgb);
+ overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb);
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ ff_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats);
+ ff_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats);
+ ff_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats );
return 0;
}
+static const enum AVPixelFormat alpha_pix_fmts[] = {
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE
+};
+
static int config_input_main(AVFilterLink *inlink)
{
OverlayContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
- av_image_fill_max_pixsteps(s->max_plane_step, NULL, pix_desc);
+ av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
+
s->hsub = pix_desc->log2_chroma_w;
s->vsub = pix_desc->log2_chroma_h;
+ s->main_is_packed_rgb =
+ ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
+ s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
return 0;
}
@@ -127,66 +288,58 @@ static int config_input_overlay(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
OverlayContext *s = inlink->dst->priv;
- char *expr;
- double var_values[VAR_VARS_NB], res;
int ret;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+
+ av_image_fill_max_pixsteps(s->overlay_pix_step, NULL, pix_desc);
/* Finish the configuration by evaluating the expressions
now when both inputs are configured. */
- var_values[VAR_E ] = M_E;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_PI ] = M_PI;
-
- var_values[VAR_MAIN_W ] = var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
- var_values[VAR_MAIN_H ] = var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
- var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
- var_values[VAR_OVERLAY_H] = var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
-
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- s->x = res;
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)))
- goto fail;
- s->y = res;
- /* x may depend on y */
- if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values,
- NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
- goto fail;
- s->x = res;
+ s->var_values[VAR_MAIN_W ] = s->var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
+ s->var_values[VAR_MAIN_H ] = s->var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
+ s->var_values[VAR_OVERLAY_W] = s->var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
+ s->var_values[VAR_OVERLAY_H] = s->var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
+ s->var_values[VAR_HSUB] = 1<<pix_desc->log2_chroma_w;
+ s->var_values[VAR_VSUB] = 1<<pix_desc->log2_chroma_h;
+ s->var_values[VAR_X] = NAN;
+ s->var_values[VAR_Y] = NAN;
+ s->var_values[VAR_N] = 0;
+ s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_POS] = NAN;
+
+ if ((ret = set_expr(&s->x_pexpr, s->x_expr, "x", ctx)) < 0 ||
+ (ret = set_expr(&s->y_pexpr, s->y_expr, "y", ctx)) < 0)
+ return ret;
+
+ s->overlay_is_packed_rgb =
+ ff_fill_rgba_map(s->overlay_rgba_map, inlink->format) >= 0;
+ s->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
+
+ if (s->eval_mode == EVAL_MODE_INIT) {
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_VERBOSE, "x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
av_log(ctx, AV_LOG_VERBOSE,
- "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s eof_action:%s\n",
+ "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s eof_action:%s\n",
ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
- s->x, s->y,
ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format),
eof_action_str[s->eof_action]);
-
- if (s->x < 0 || s->y < 0 ||
- s->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
- s->y + var_values[VAR_OVERLAY_H] > var_values[VAR_MAIN_H]) {
- av_log(ctx, AV_LOG_ERROR,
- "Overlay area (%d,%d)<->(%d,%d) not within the main area (0,0)<->(%d,%d) or zero-sized\n",
- s->x, s->y,
- (int)(s->x + var_values[VAR_OVERLAY_W]),
- (int)(s->y + var_values[VAR_OVERLAY_H]),
- (int)var_values[VAR_MAIN_W], (int)var_values[VAR_MAIN_H]);
- return AVERROR(EINVAL);
- }
return 0;
-
-fail:
- av_log(NULL, AV_LOG_ERROR,
- "Error when evaluating the expression '%s'\n", expr);
- return ret;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ OverlayContext *s = ctx->priv;
+ int ret;
+
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h;
@@ -195,71 +348,196 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
-static void blend_frame(AVFilterContext *ctx,
- AVFrame *dst, AVFrame *src,
+// divide by 255 and round to nearest
+// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
+#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
+
+// calculate the unpremultiplied alpha, applying the general equation:
+// alpha = alpha_overlay / ( (alpha_main + alpha_overlay) - (alpha_main * alpha_overlay) )
+// (((x) << 16) - ((x) << 9) + (x)) is a faster version of: 255 * 255 * x
+// ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)) is a faster version of: 255 * (x + y)
+#define UNPREMULTIPLY_ALPHA(x, y) ((((x) << 16) - ((x) << 9) + (x)) / ((((x) + (y)) << 8) - ((x) + (y)) - (y) * (x)))
+
+/**
+ * Blend image in src to destination buffer dst at position (x, y).
+ */
+static void blend_image(AVFilterContext *ctx,
+ AVFrame *dst, const AVFrame *src,
int x, int y)
{
OverlayContext *s = ctx->priv;
- int i, j, k;
- int width, height;
- int overlay_end_y = y + src->height;
- int end_y, start_y;
-
- width = FFMIN(dst->width - x, src->width);
- end_y = FFMIN(dst->height, overlay_end_y);
- start_y = FFMAX(y, 0);
- height = end_y - start_y;
-
- if (dst->format == AV_PIX_FMT_BGR24 || dst->format == AV_PIX_FMT_RGB24) {
- uint8_t *dp = dst->data[0] + x * 3 + start_y * dst->linesize[0];
- uint8_t *sp = src->data[0];
- int b = dst->format == AV_PIX_FMT_BGR24 ? 2 : 0;
- int r = dst->format == AV_PIX_FMT_BGR24 ? 0 : 2;
- if (y < 0)
- sp += -y * src->linesize[0];
- for (i = 0; i < height; i++) {
- uint8_t *d = dp, *s = sp;
- for (j = 0; j < width; j++) {
- d[r] = (d[r] * (0xff - s[3]) + s[0] * s[3] + 128) >> 8;
- d[1] = (d[1] * (0xff - s[3]) + s[1] * s[3] + 128) >> 8;
- d[b] = (d[b] * (0xff - s[3]) + s[2] * s[3] + 128) >> 8;
- d += 3;
- s += 4;
+ int i, imax, j, jmax, k, kmax;
+ const int src_w = src->width;
+ const int src_h = src->height;
+ const int dst_w = dst->width;
+ const int dst_h = dst->height;
+
+ if (x >= dst_w || x+src_w < 0 ||
+ y >= dst_h || y+src_h < 0)
+ return; /* no intersection */
+
+ if (s->main_is_packed_rgb) {
+ uint8_t alpha; ///< the amount of overlay to blend on to main
+ const int dr = s->main_rgba_map[R];
+ const int dg = s->main_rgba_map[G];
+ const int db = s->main_rgba_map[B];
+ const int da = s->main_rgba_map[A];
+ const int dstep = s->main_pix_step[0];
+ const int sr = s->overlay_rgba_map[R];
+ const int sg = s->overlay_rgba_map[G];
+ const int sb = s->overlay_rgba_map[B];
+ const int sa = s->overlay_rgba_map[A];
+ const int sstep = s->overlay_pix_step[0];
+ const int main_has_alpha = s->main_has_alpha;
+ uint8_t *s, *sp, *d, *dp;
+
+ i = FFMAX(-y, 0);
+ sp = src->data[0] + i * src->linesize[0];
+ dp = dst->data[0] + (y+i) * dst->linesize[0];
+
+ for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
+ j = FFMAX(-x, 0);
+ s = sp + j * sstep;
+ d = dp + (x+j) * dstep;
+
+ for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
+ alpha = s[sa];
+
+ // if the main channel has an alpha channel, alpha has to be calculated
+ // to create an un-premultiplied (straight) alpha value
+ if (main_has_alpha && alpha != 0 && alpha != 255) {
+ uint8_t alpha_d = d[da];
+ alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
+ }
+
+ switch (alpha) {
+ case 0:
+ break;
+ case 255:
+ d[dr] = s[sr];
+ d[dg] = s[sg];
+ d[db] = s[sb];
+ break;
+ default:
+ // main_value = main_value * (1 - alpha) + overlay_value * alpha
+ // since alpha is in the range 0-255, the result must divided by 255
+ d[dr] = FAST_DIV255(d[dr] * (255 - alpha) + s[sr] * alpha);
+ d[dg] = FAST_DIV255(d[dg] * (255 - alpha) + s[sg] * alpha);
+ d[db] = FAST_DIV255(d[db] * (255 - alpha) + s[sb] * alpha);
+ }
+ if (main_has_alpha) {
+ switch (alpha) {
+ case 0:
+ break;
+ case 255:
+ d[da] = s[sa];
+ break;
+ default:
+ // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
+ d[da] += FAST_DIV255((255 - d[da]) * s[sa]);
+ }
+ }
+ d += dstep;
+ s += sstep;
}
dp += dst->linesize[0];
sp += src->linesize[0];
}
} else {
+ const int main_has_alpha = s->main_has_alpha;
+ if (main_has_alpha) {
+ uint8_t alpha; ///< the amount of overlay to blend on to main
+ uint8_t *s, *sa, *d, *da;
+
+ i = FFMAX(-y, 0);
+ sa = src->data[3] + i * src->linesize[3];
+ da = dst->data[3] + (y+i) * dst->linesize[3];
+
+ for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
+ j = FFMAX(-x, 0);
+ s = sa + j;
+ d = da + x+j;
+
+ for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
+ alpha = *s;
+ if (alpha != 0 && alpha != 255) {
+ uint8_t alpha_d = *d;
+ alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
+ }
+ switch (alpha) {
+ case 0:
+ break;
+ case 255:
+ *d = *s;
+ break;
+ default:
+ // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
+ *d += FAST_DIV255((255 - *d) * *s);
+ }
+ d += 1;
+ s += 1;
+ }
+ da += dst->linesize[3];
+ sa += src->linesize[3];
+ }
+ }
for (i = 0; i < 3; i++) {
int hsub = i ? s->hsub : 0;
int vsub = i ? s->vsub : 0;
- uint8_t *dp = dst->data[i] + (x >> hsub) +
- (start_y >> vsub) * dst->linesize[i];
- uint8_t *sp = src->data[i];
- uint8_t *ap = src->data[3];
- int wp = FFALIGN(width, 1<<hsub) >> hsub;
- int hp = FFALIGN(height, 1<<vsub) >> vsub;
- if (y < 0) {
- sp += ((-y) >> vsub) * src->linesize[i];
- ap += -y * src->linesize[3];
- }
- for (j = 0; j < hp; j++) {
- uint8_t *d = dp, *s = sp, *a = ap;
- for (k = 0; k < wp; k++) {
- // average alpha for color components, improve quality
+ int src_wp = FF_CEIL_RSHIFT(src_w, hsub);
+ int src_hp = FF_CEIL_RSHIFT(src_h, vsub);
+ int dst_wp = FF_CEIL_RSHIFT(dst_w, hsub);
+ int dst_hp = FF_CEIL_RSHIFT(dst_h, vsub);
+ int yp = y>>vsub;
+ int xp = x>>hsub;
+ uint8_t *s, *sp, *d, *dp, *a, *ap;
+
+ j = FFMAX(-yp, 0);
+ sp = src->data[i] + j * src->linesize[i];
+ dp = dst->data[i] + (yp+j) * dst->linesize[i];
+ ap = src->data[3] + (j<<vsub) * src->linesize[3];
+
+ for (jmax = FFMIN(-yp + dst_hp, src_hp); j < jmax; j++) {
+ k = FFMAX(-xp, 0);
+ d = dp + xp+k;
+ s = sp + k;
+ a = ap + (k<<hsub);
+
+ for (kmax = FFMIN(-xp + dst_wp, src_wp); k < kmax; k++) {
int alpha_v, alpha_h, alpha;
- if (hsub && vsub && j+1 < hp && k+1 < wp) {
+
+ // average alpha for color components, improve quality
+ if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
alpha = (a[0] + a[src->linesize[3]] +
a[1] + a[src->linesize[3]+1]) >> 2;
} else if (hsub || vsub) {
- alpha_h = hsub && k+1 < wp ?
+ alpha_h = hsub && k+1 < src_wp ?
(a[0] + a[1]) >> 1 : a[0];
- alpha_v = vsub && j+1 < hp ?
+ alpha_v = vsub && j+1 < src_hp ?
(a[0] + a[src->linesize[3]]) >> 1 : a[0];
alpha = (alpha_v + alpha_h) >> 1;
} else
alpha = a[0];
- *d = (*d * (0xff - alpha) + *s++ * alpha + 128) >> 8;
+ // if the main channel has an alpha channel, alpha has to be calculated
+ // to create an un-premultiplied (straight) alpha value
+ if (main_has_alpha && alpha != 0 && alpha != 255) {
+ // average alpha for color components, improve quality
+ uint8_t alpha_d;
+ if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
+ alpha_d = (d[0] + d[src->linesize[3]] +
+ d[1] + d[src->linesize[3]+1]) >> 2;
+ } else if (hsub || vsub) {
+ alpha_h = hsub && k+1 < src_wp ?
+ (d[0] + d[1]) >> 1 : d[0];
+ alpha_v = vsub && j+1 < src_hp ?
+ (d[0] + d[src->linesize[3]]) >> 1 : d[0];
+ alpha_d = (alpha_v + alpha_h) >> 1;
+ } else
+ alpha_d = d[0];
+ alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
+ }
+ *d = FAST_DIV255(*d * (255 - alpha) + *s * alpha);
+ s++;
d++;
a += 1 << hsub;
}
@@ -271,136 +549,107 @@ static void blend_frame(AVFilterContext *ctx,
}
}
-static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
+static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
+ const AVFrame *second)
{
- OverlayContext *s = inlink->dst->priv;
+ OverlayContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
- av_assert0(!s->main);
- s->main = frame;
+ if (s->eval_mode == EVAL_MODE_FRAME) {
+ int64_t pos = av_frame_get_pkt_pos(mainpic);
- return 0;
-}
+ s->var_values[VAR_N] = inlink->frame_count;
+ s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
+ NAN : mainpic->pts * av_q2d(inlink->time_base);
+ s->var_values[VAR_POS] = pos == -1 ? NAN : pos;
-static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
-{
- OverlayContext *s = inlink->dst->priv;
-
- av_assert0(!s->over_next);
- s->over_next = frame;
+ eval_expr(ctx);
+ av_log(ctx, AV_LOG_DEBUG, "n:%f t:%f pos:%f x:%f xi:%d y:%f yi:%d\n",
+ s->var_values[VAR_N], s->var_values[VAR_T], s->var_values[VAR_POS],
+ s->var_values[VAR_X], s->x,
+ s->var_values[VAR_Y], s->y);
+ }
- return 0;
+ blend_image(ctx, mainpic, second, s->x, s->y);
+ return mainpic;
}
-static int output_frame(AVFilterContext *ctx)
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
- OverlayContext *s = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- int ret = ff_filter_frame(outlink, s->main);
- s->main = NULL;
-
- return ret;
+ OverlayContext *s = inlink->dst->priv;
+ av_log(inlink->dst, AV_LOG_DEBUG, "Incoming frame (time:%s) from link #%d\n", av_ts2timestr(inpicref->pts, &inlink->time_base), FF_INLINK_IDX(inlink));
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
-static int handle_overlay_eof(AVFilterContext *ctx)
+static int request_frame(AVFilterLink *outlink)
{
- OverlayContext *s = ctx->priv;
- /* Repeat previous frame on secondary input */
- if (s->over_prev && s->eof_action == EOF_ACTION_REPEAT)
- blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
- /* End both streams */
- else if (s->eof_action == EOF_ACTION_ENDALL)
- return AVERROR_EOF;
- return output_frame(ctx);
+ OverlayContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
}
-static int request_frame(AVFilterLink *outlink)
+static av_cold int init(AVFilterContext *ctx)
{
- AVFilterContext *ctx = outlink->src;
- OverlayContext *s = ctx->priv;
- AVRational tb_main = ctx->inputs[MAIN]->time_base;
- AVRational tb_over = ctx->inputs[OVERLAY]->time_base;
- int ret = 0;
-
- /* get a frame on the main input */
- if (!s->main) {
- ret = ff_request_frame(ctx->inputs[MAIN]);
- if (ret < 0)
- return ret;
- }
+ OverlayContext *s = ctx->priv;
- /* get a new frame on the overlay input, on EOF check setting 'eof_action' */
- if (!s->over_next) {
- ret = ff_request_frame(ctx->inputs[OVERLAY]);
- if (ret == AVERROR_EOF)
- return handle_overlay_eof(ctx);
- else if (ret < 0)
- return ret;
+ if (s->allow_packed_rgb) {
+ av_log(ctx, AV_LOG_WARNING,
+ "The rgb option is deprecated and is overriding the format option, use format instead\n");
+ s->format = OVERLAY_FORMAT_RGB;
}
-
- while (s->main->pts != AV_NOPTS_VALUE &&
- s->over_next->pts != AV_NOPTS_VALUE &&
- av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
- av_frame_free(&s->over_prev);
- FFSWAP(AVFrame*, s->over_prev, s->over_next);
-
- ret = ff_request_frame(ctx->inputs[OVERLAY]);
- if (ret == AVERROR_EOF)
- return handle_overlay_eof(ctx);
- else if (ret < 0)
- return ret;
+ if (!s->dinput.repeatlast || s->eof_action == EOF_ACTION_PASS) {
+ s->dinput.repeatlast = 0;
+ s->eof_action = EOF_ACTION_PASS;
}
-
- if (s->main->pts == AV_NOPTS_VALUE ||
- s->over_next->pts == AV_NOPTS_VALUE ||
- !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
- blend_frame(ctx, s->main, s->over_next, s->x, s->y);
- av_frame_free(&s->over_prev);
- FFSWAP(AVFrame*, s->over_prev, s->over_next);
- } else if (s->over_prev) {
- blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
+ if (s->dinput.shortest || s->eof_action == EOF_ACTION_ENDALL) {
+ s->dinput.shortest = 1;
+ s->eof_action = EOF_ACTION_ENDALL;
}
- return output_frame(ctx);
+ s->dinput.process = do_blend;
+ return 0;
}
#define OFFSET(x) offsetof(OverlayContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "x", "Horizontal position of the left edge of the overlaid video on the "
- "main video.", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "y", "Vertical position of the top edge of the overlaid video on the "
- "main video.", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption overlay_options[] = {
+ { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "eof_action", "Action to take when encountering EOF from secondary input ",
OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
{ "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
{ "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
{ "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
- { NULL },
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { "rgb", "force packed RGB in input and output (deprecated)", OFFSET(allow_packed_rgb), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
+ { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
+ { "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" },
+ { "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
+ { "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
+ { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { NULL }
};
-static const AVClass overlay_class = {
- .class_name = "overlay",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(overlay);
static const AVFilterPad avfilter_vf_overlay_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
- .filter_frame = filter_frame_main,
+ .filter_frame = filter_frame,
.needs_writable = 1,
- .needs_fifo = 1,
},
{
.name = "overlay",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_overlay,
- .filter_frame = filter_frame_overlay,
- .needs_fifo = 1,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -416,16 +665,15 @@ static const AVFilterPad avfilter_vf_overlay_outputs[] = {
};
AVFilter ff_vf_overlay = {
- .name = "overlay",
- .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
-
- .uninit = uninit,
-
- .priv_size = sizeof(OverlayContext),
- .priv_class = &overlay_class,
-
+ .name = "overlay",
+ .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
+ .init = init,
+ .uninit = uninit,
+ .priv_size = sizeof(OverlayContext),
+ .priv_class = &overlay_class,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_overlay_inputs,
- .outputs = avfilter_vf_overlay_outputs,
+ .process_command = process_command,
+ .inputs = avfilter_vf_overlay_inputs,
+ .outputs = avfilter_vf_overlay_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
diff --git a/libavfilter/vf_owdenoise.c b/libavfilter/vf_owdenoise.c
new file mode 100644
index 0000000..5b47f76
--- /dev/null
+++ b/libavfilter/vf_owdenoise.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @todo try to change to int
+ * @todo try lifting based implementation
+ * @todo optimize optimize optimize
+ * @todo hard thresholding
+ * @todo use QP to decide filter strength
+ * @todo wavelet normalization / least squares optimal signal vs. noise thresholds
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ double luma_strength;
+ double chroma_strength;
+ int depth;
+ float *plane[16+1][4];
+ int linesize;
+ int hsub, vsub;
+} OWDenoiseContext;
+
+#define OFFSET(x) offsetof(OWDenoiseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption owdenoise_options[] = {
+ { "depth", "set depth", OFFSET(depth), AV_OPT_TYPE_INT, {.i64 = 8}, 8, 16, FLAGS },
+ { "luma_strength", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { "ls", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { "chroma_strength", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { "cs", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(owdenoise);
+
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63 },
+ { 32, 16, 44, 28, 35, 19, 47, 31 },
+ { 8, 56, 4, 52, 11, 59, 7, 55 },
+ { 40, 24, 36, 20, 43, 27, 39, 23 },
+ { 2, 50, 14, 62, 1, 49, 13, 61 },
+ { 34, 18, 46, 30, 33, 17, 45, 29 },
+ { 10, 58, 6, 54, 9, 57, 5, 53 },
+ { 42, 26, 38, 22, 41, 25, 37, 21 },
+};
+
+static const double coeff[2][5] = {
+ {
+ 0.6029490182363579 * M_SQRT2,
+ 0.2668641184428723 * M_SQRT2,
+ -0.07822326652898785 * M_SQRT2,
+ -0.01686411844287495 * M_SQRT2,
+ 0.02674875741080976 * M_SQRT2,
+ },{
+ 1.115087052456994 / M_SQRT2,
+ -0.5912717631142470 / M_SQRT2,
+ -0.05754352622849957 / M_SQRT2,
+ 0.09127176311424948 / M_SQRT2,
+ }
+};
+
+static const double icoeff[2][5] = {
+ {
+ 1.115087052456994 / M_SQRT2,
+ 0.5912717631142470 / M_SQRT2,
+ -0.05754352622849957 / M_SQRT2,
+ -0.09127176311424948 / M_SQRT2,
+ },{
+ 0.6029490182363579 * M_SQRT2,
+ -0.2668641184428723 * M_SQRT2,
+ -0.07822326652898785 * M_SQRT2,
+ 0.01686411844287495 * M_SQRT2,
+ 0.02674875741080976 * M_SQRT2,
+ }
+};
+
+static inline int mirror(int x, int w)
+{
+ while ((unsigned)x > (unsigned)w) {
+ x = -x;
+ if (x < 0)
+ x += 2 * w;
+ }
+ return x;
+}
+
+static inline void decompose(float *dst_l, float *dst_h, const float *src,
+ int linesize, int w)
+{
+ int x, i;
+ for (x = 0; x < w; x++) {
+ double sum_l = src[x * linesize] * coeff[0][0];
+ double sum_h = src[x * linesize] * coeff[1][0];
+ for (i = 1; i <= 4; i++) {
+ const double s = src[mirror(x - i, w - 1) * linesize]
+ + src[mirror(x + i, w - 1) * linesize];
+
+ sum_l += coeff[0][i] * s;
+ sum_h += coeff[1][i] * s;
+ }
+ dst_l[x * linesize] = sum_l;
+ dst_h[x * linesize] = sum_h;
+ }
+}
+
+static inline void compose(float *dst, const float *src_l, const float *src_h,
+ int linesize, int w)
+{
+ int x, i;
+ for (x = 0; x < w; x++) {
+ double sum_l = src_l[x * linesize] * icoeff[0][0];
+ double sum_h = src_h[x * linesize] * icoeff[1][0];
+ for (i = 1; i <= 4; i++) {
+ const int x0 = mirror(x - i, w - 1) * linesize;
+ const int x1 = mirror(x + i, w - 1) * linesize;
+
+ sum_l += icoeff[0][i] * (src_l[x0] + src_l[x1]);
+ sum_h += icoeff[1][i] * (src_h[x0] + src_h[x1]);
+ }
+ dst[x * linesize] = (sum_l + sum_h) * 0.5;
+ }
+}
+
+static inline void decompose2D(float *dst_l, float *dst_h, const float *src,
+ int xlinesize, int ylinesize,
+ int step, int w, int h)
+{
+ int y, x;
+ for (y = 0; y < h; y++)
+ for (x = 0; x < step; x++)
+ decompose(dst_l + ylinesize*y + xlinesize*x,
+ dst_h + ylinesize*y + xlinesize*x,
+ src + ylinesize*y + xlinesize*x,
+ step * xlinesize, (w - x + step - 1) / step);
+}
+
+static inline void compose2D(float *dst, const float *src_l, const float *src_h,
+ int xlinesize, int ylinesize,
+ int step, int w, int h)
+{
+ int y, x;
+ for (y = 0; y < h; y++)
+ for (x = 0; x < step; x++)
+ compose(dst + ylinesize*y + xlinesize*x,
+ src_l + ylinesize*y + xlinesize*x,
+ src_h + ylinesize*y + xlinesize*x,
+ step * xlinesize, (w - x + step - 1) / step);
+}
+
+static void decompose2D2(float *dst[4], float *src, float *temp[2],
+ int linesize, int step, int w, int h)
+{
+ decompose2D(temp[0], temp[1], src, 1, linesize, step, w, h);
+ decompose2D( dst[0], dst[1], temp[0], linesize, 1, step, h, w);
+ decompose2D( dst[2], dst[3], temp[1], linesize, 1, step, h, w);
+}
+
+static void compose2D2(float *dst, float *src[4], float *temp[2],
+ int linesize, int step, int w, int h)
+{
+ compose2D(temp[0], src[0], src[1], linesize, 1, step, h, w);
+ compose2D(temp[1], src[2], src[3], linesize, 1, step, h, w);
+ compose2D(dst, temp[0], temp[1], 1, linesize, step, w, h);
+}
+
+static void filter(OWDenoiseContext *s,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int width, int height, double strength)
+{
+ int x, y, i, j, depth = s->depth;
+
+ while (1<<depth > width || 1<<depth > height)
+ depth--;
+
+ for (y = 0; y < height; y++)
+ for(x = 0; x < width; x++)
+ s->plane[0][0][y*s->linesize + x] = src[y*src_linesize + x];
+
+ for (i = 0; i < depth; i++)
+ decompose2D2(s->plane[i + 1], s->plane[i][0], s->plane[0] + 1, s->linesize, 1<<i, width, height);
+
+ for (i = 0; i < depth; i++) {
+ for (j = 1; j < 4; j++) {
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ double v = s->plane[i + 1][j][y*s->linesize + x];
+ if (v > strength) v -= strength;
+ else if (v < -strength) v += strength;
+ else v = 0;
+ s->plane[i + 1][j][x + y*s->linesize] = v;
+ }
+ }
+ }
+ }
+ for (i = depth-1; i >= 0; i--)
+ compose2D2(s->plane[i][0], s->plane[i + 1], s->plane[0] + 1, s->linesize, 1<<i, width, height);
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ i = s->plane[0][0][y*s->linesize + x] + dither[x&7][y&7]*(1.0/64) + 1.0/128; // yes the rounding is insane but optimal :)
+ if ((unsigned)i > 255U) i = ~(i >> 31);
+ dst[y*dst_linesize + x] = i;
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ int direct = 0;
+ AVFilterContext *ctx = inlink->dst;
+ OWDenoiseContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ const int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub);
+ const int ch = FF_CEIL_RSHIFT(inlink->h, s->vsub);
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
+ filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, s->chroma_strength);
+ filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, s->chroma_strength);
+
+ if (!direct) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ int i, j;
+ OWDenoiseContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int h = FFALIGN(inlink->h, 16);
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ s->linesize = FFALIGN(inlink->w, 16);
+ for (j = 0; j < 4; j++) {
+ for (i = 0; i <= s->depth; i++) {
+ s->plane[i][j] = av_malloc_array(s->linesize, h * sizeof(s->plane[0][0][0]));
+ if (!s->plane[i][j])
+ return AVERROR(ENOMEM);
+ }
+ }
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i, j;
+ OWDenoiseContext *s = ctx->priv;
+
+ for (j = 0; j < 4; j++)
+ for (i = 0; i <= s->depth; i++)
+ av_freep(&s->plane[i][j]);
+}
+
+static const AVFilterPad owdenoise_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad owdenoise_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_owdenoise = {
+ .name = "owdenoise",
+ .description = NULL_IF_CONFIG_SMALL("Denoise using wavelets."),
+ .priv_size = sizeof(OWDenoiseContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = owdenoise_inputs,
+ .outputs = owdenoise_outputs,
+ .priv_class = &owdenoise_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_pad.c b/libavfilter/vf_pad.c
index 634af4c..2d9b9d0 100644
--- a/libavfilter/vf_pad.c
+++ b/libavfilter/vf_pad.c
@@ -2,20 +2,20 @@
* Copyright (c) 2008 vmrsss
* Copyright (c) 2009 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -42,9 +42,6 @@
#include "drawutils.h"
static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
@@ -52,15 +49,14 @@ static const char *const var_names[] = {
"x",
"y",
"a",
+ "sar",
+ "dar",
"hsub",
"vsub",
NULL
};
enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
@@ -68,6 +64,8 @@ enum var_name {
VAR_X,
VAR_Y,
VAR_A,
+ VAR_SAR,
+ VAR_DAR,
VAR_HSUB,
VAR_VSUB,
VARS_NB
@@ -75,22 +73,7 @@ enum var_name {
static int query_formats(AVFilterContext *ctx)
{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
-
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
-
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0;
}
@@ -104,58 +87,32 @@ typedef struct PadContext {
char *h_expr; ///< height expression string
char *x_expr; ///< width expression string
char *y_expr; ///< height expression string
- char *color_str;
-
- uint8_t color[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area
- uint8_t *line[4];
- int line_step[4];
- int hsub, vsub; ///< chroma subsampling values
+ uint8_t rgba_color[4]; ///< color for the padding area
+ FFDrawContext draw;
+ FFDrawColor color;
} PadContext;
-static av_cold int init(AVFilterContext *ctx)
-{
- PadContext *s = ctx->priv;
-
- if (av_parse_color(s->color, s->color_str, -1, ctx) < 0)
- return AVERROR(EINVAL);
-
- return 0;
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- PadContext *s = ctx->priv;
- int i;
-
- for (i = 0; i < 4; i++) {
- av_freep(&s->line[i]);
- s->line_step[i] = 0;
- }
-}
-
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
PadContext *s = ctx->priv;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
- uint8_t rgba_color[4];
- int ret, is_packed_rgba;
+ int ret;
double var_values[VARS_NB], res;
char *expr;
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ ff_draw_init(&s->draw, inlink->format, 0);
+ ff_draw_color(&s->draw, &s->color, s->rgba_color);
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
var_values[VAR_A] = (double) inlink->w / inlink->h;
- var_values[VAR_HSUB] = 1<<s->hsub;
- var_values[VAR_VSUB] = 1<<s->vsub;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = 1 << s->draw.hsub_max;
+ var_values[VAR_VSUB] = 1 << s->draw.vsub_max;
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = s->w_expr),
@@ -202,22 +159,16 @@ static int config_input(AVFilterLink *inlink)
if (!s->h)
s->h = inlink->h;
- s->w &= ~((1 << s->hsub) - 1);
- s->h &= ~((1 << s->vsub) - 1);
- s->x &= ~((1 << s->hsub) - 1);
- s->y &= ~((1 << s->vsub) - 1);
+ s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
+ s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
+ s->x = ff_draw_round_to_sub(&s->draw, 0, -1, s->x);
+ s->y = ff_draw_round_to_sub(&s->draw, 1, -1, s->y);
+ s->in_w = ff_draw_round_to_sub(&s->draw, 0, -1, inlink->w);
+ s->in_h = ff_draw_round_to_sub(&s->draw, 1, -1, inlink->h);
- s->in_w = inlink->w & ~((1 << s->hsub) - 1);
- s->in_h = inlink->h & ~((1 << s->vsub) - 1);
-
- memcpy(rgba_color, s->color, sizeof(rgba_color));
- ff_fill_line_with_color(s->line, s->line_step, s->w, s->color,
- inlink->format, rgba_color, &is_packed_rgba, NULL);
-
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X[%s]\n",
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n",
inlink->w, inlink->h, s->w, s->h, s->x, s->y,
- s->color[0], s->color[1], s->color[2], s->color[3],
- is_packed_rgba ? "rgba" : "yuva");
+ s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], s->rgba_color[3]);
if (s->x < 0 || s->y < 0 ||
s->w <= 0 || s->h <= 0 ||
@@ -262,12 +213,11 @@ static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
frame->width = w;
frame->height = h;
- for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
- int hsub = (plane == 1 || plane == 2) ? s->hsub : 0;
- int vsub = (plane == 1 || plane == 2) ? s->vsub : 0;
-
- frame->data[plane] += (s->x >> hsub) * s->line_step[plane] +
- (s->y >> vsub) * frame->linesize[plane];
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
+ int hsub = s->draw.hsub[plane];
+ int vsub = s->draw.vsub[plane];
+ frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
+ (s->y >> vsub) * frame->linesize[plane];
}
return frame;
@@ -288,38 +238,37 @@ static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
/* for each plane in this buffer, check that it can be padded without
* going over buffer bounds or other planes */
for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) {
- int hsub = (planes[i] == 1 || planes[i] == 2) ? s->hsub : 0;
- int vsub = (planes[i] == 1 || planes[i] == 2) ? s->vsub : 0;
+ int hsub = s->draw.hsub[planes[i]];
+ int vsub = s->draw.vsub[planes[i]];
uint8_t *start = frame->data[planes[i]];
- uint8_t *end = start + (frame->height >> hsub) *
+ uint8_t *end = start + (frame->height >> vsub) *
frame->linesize[planes[i]];
/* amount of free space needed before the start and after the end
* of the plane */
- ptrdiff_t req_start = (s->x >> hsub) * s->line_step[planes[i]] +
+ ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] +
(s->y >> vsub) * frame->linesize[planes[i]];
ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
- s->line_step[planes[i]] +
- (s->y >> vsub) * frame->linesize[planes[i]];
+ s->draw.pixelstep[planes[i]] +
+ ((s->h - s->y - frame->height) >> vsub) * frame->linesize[planes[i]];
- if (frame->linesize[planes[i]] < (s->w >> hsub) * s->line_step[planes[i]])
+ if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]])
return 1;
if (start - buf->data < req_start ||
(buf->data + buf->size) - end < req_end)
return 1;
-#define SIGN(x) ((x) > 0 ? 1 : -1)
for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) {
- int hsub1 = (planes[j] == 1 || planes[j] == 2) ? s->hsub : 0;
+ int vsub1 = s->draw.vsub[planes[j]];
uint8_t *start1 = frame->data[planes[j]];
- uint8_t *end1 = start1 + (frame->height >> hsub1) *
+ uint8_t *end1 = start1 + (frame->height >> vsub1) *
frame->linesize[planes[j]];
if (i == j)
continue;
- if (SIGN(start - end1) != SIGN(start - end1 - req_start) ||
- SIGN(end - start1) != SIGN(end - start1 + req_end))
+ if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) ||
+ FFSIGN(end - start1) != FFSIGN(end - start1 + req_end))
return 1;
}
}
@@ -334,7 +283,7 @@ static int frame_needs_copy(PadContext *s, AVFrame *frame)
if (!av_frame_is_writable(frame))
return 1;
- for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++)
+ for (i = 0; i < 4 && frame->buf[i]; i++)
if (buffer_needs_copy(s, frame, frame->buf[i]))
return 1;
return 0;
@@ -361,41 +310,40 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int i;
out = in;
- for (i = 0; i < FF_ARRAY_ELEMS(out->data) && out->data[i]; i++) {
- int hsub = (i == 1 || i == 2) ? s->hsub : 0;
- int vsub = (i == 1 || i == 2) ? s->vsub : 0;
- out->data[i] -= (s->x >> hsub) * s->line_step[i] +
+ for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
+ int hsub = s->draw.hsub[i];
+ int vsub = s->draw.vsub[i];
+ out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
(s->y >> vsub) * out->linesize[i];
}
}
/* top bar */
if (s->y) {
- ff_draw_rectangle(out->data, out->linesize,
- s->line, s->line_step, s->hsub, s->vsub,
+ ff_fill_rectangle(&s->draw, &s->color,
+ out->data, out->linesize,
0, 0, s->w, s->y);
}
/* bottom bar */
if (s->h > s->y + s->in_h) {
- ff_draw_rectangle(out->data, out->linesize,
- s->line, s->line_step, s->hsub, s->vsub,
+ ff_fill_rectangle(&s->draw, &s->color,
+ out->data, out->linesize,
0, s->y + s->in_h, s->w, s->h - s->y - s->in_h);
}
/* left border */
- ff_draw_rectangle(out->data, out->linesize, s->line, s->line_step,
- s->hsub, s->vsub, 0, s->y, s->x, in->height);
+ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
+ 0, s->y, s->x, in->height);
if (needs_copy) {
- ff_copy_rectangle(out->data, out->linesize, in->data, in->linesize,
- s->line_step, s->hsub, s->vsub,
- s->x, s->y, 0, in->width, in->height);
+ ff_copy_rectangle2(&s->draw,
+ out->data, out->linesize, in->data, in->linesize,
+ s->x, s->y, 0, 0, in->width, in->height);
}
/* right border */
- ff_draw_rectangle(out->data, out->linesize,
- s->line, s->line_step, s->hsub, s->vsub,
+ ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
s->x + s->in_w, s->y, s->w - s->x - s->in_w,
in->height);
@@ -408,24 +356,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
#define OFFSET(x) offsetof(PadContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
- { "height", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
- { "x", "Horizontal position of the left edge of the input video in the "
- "output video", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "y", "Vertical position of the top edge of the input video in the "
- "output video", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
- { "color", "Color of the padded area", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption pad_options[] = {
+ { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
+ { NULL }
};
-static const AVClass pad_class = {
- .class_name = "pad",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(pad);
static const AVFilterPad avfilter_vf_pad_inputs[] = {
{
@@ -449,15 +393,10 @@ static const AVFilterPad avfilter_vf_pad_outputs[] = {
AVFilter ff_vf_pad = {
.name = "pad",
- .description = NULL_IF_CONFIG_SMALL("Pad input image to width:height[:x:y[:color]] (default x and y: 0, default color: black)."),
-
+ .description = NULL_IF_CONFIG_SMALL("Pad the input video."),
.priv_size = sizeof(PadContext),
.priv_class = &pad_class,
- .init = init,
- .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_pad_inputs,
-
- .outputs = avfilter_vf_pad_outputs,
+ .inputs = avfilter_vf_pad_inputs,
+ .outputs = avfilter_vf_pad_outputs,
};
diff --git a/libavfilter/vf_perspective.c b/libavfilter/vf_perspective.c
new file mode 100644
index 0000000..bf06d02
--- /dev/null
+++ b/libavfilter/vf_perspective.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/eval.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define SUB_PIXEL_BITS 8
+#define SUB_PIXELS (1 << SUB_PIXEL_BITS)
+#define COEFF_BITS 11
+
+#define LINEAR 0
+#define CUBIC 1
+
+typedef struct PerspectiveContext {
+ const AVClass *class;
+ char *expr_str[4][2];
+ double ref[4][2];
+ int32_t (*pv)[2];
+ int32_t coeff[SUB_PIXELS][4];
+ int interpolation;
+ int linesize[4];
+ int height[4];
+ int hsub, vsub;
+ int nb_planes;
+ int sense;
+
+ int (*perspective)(AVFilterContext *ctx,
+ void *arg, int job, int nb_jobs);
+} PerspectiveContext;
+
+#define OFFSET(x) offsetof(PerspectiveContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+enum PERSPECTIVESense {
+ PERSPECTIVE_SENSE_SOURCE = 0, ///< coordinates give locations in source of corners of destination.
+ PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source.
+};
+
+static const AVOption perspective_options[] = {
+ { "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
+ { "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
+ { "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
+ { "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
+ { "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
+ { "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, "interpolation" },
+ { "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" },
+ { "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, "interpolation" },
+ { "sense", "specify the sense of the coordinates", OFFSET(sense), AV_OPT_TYPE_INT, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 1, FLAGS, "sense"},
+ { "source", "specify locations in source to send to corners in destination",
+ 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, "sense"},
+ { "destination", "specify locations in destination to send corners of source",
+ 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, "sense"},
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(perspective);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static inline double get_coeff(double d)
+{
+ double coeff, A = -0.60;
+
+ d = fabs(d);
+
+ if (d < 1.0)
+ coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d);
+ else if (d < 2.0)
+ coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d);
+ else
+ coeff = 0.0;
+
+ return coeff;
+}
+
+static const char *const var_names[] = { "W", "H", NULL };
+enum { VAR_W, VAR_H, VAR_VARS_NB };
+
+static int config_input(AVFilterLink *inlink)
+{
+ double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
+ double t0, t1, t2, t3;
+ AVFilterContext *ctx = inlink->dst;
+ PerspectiveContext *s = ctx->priv;
+ double (*ref)[2] = s->ref;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h };
+ int h = inlink->h;
+ int w = inlink->w;
+ int x, y, i, j, ret;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 2; j++) {
+ if (!s->expr_str[i][j])
+ return AVERROR(EINVAL);
+ ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
+ var_names, &values[0],
+ NULL, NULL, NULL, NULL,
+ 0, 0, ctx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->height[0] = s->height[3] = inlink->h;
+
+ s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
+ if (!s->pv)
+ return AVERROR(ENOMEM);
+
+ switch (s->sense) {
+ case PERSPECTIVE_SENSE_SOURCE:
+ x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
+ (ref[2][1] - ref[3][1]) -
+ ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
+ (ref[2][0] - ref[3][0])) * h;
+ x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
+ (ref[1][0] - ref[3][0]) -
+ ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
+ (ref[1][1] - ref[3][1])) * w;
+ q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
+ ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);
+
+ x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
+ x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
+ x2 = q * ref[0][0] * w * h;
+ x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
+ x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
+ x5 = q * ref[0][1] * w * h;
+ x8 = q * w * h;
+ break;
+ case PERSPECTIVE_SENSE_DESTINATION:
+ t0 = ref[0][0] * (ref[3][1] - ref[1][1]) +
+ ref[1][0] * (ref[0][1] - ref[3][1]) +
+ ref[3][0] * (ref[1][1] - ref[0][1]);
+ t1 = ref[1][0] * (ref[2][1] - ref[3][1]) +
+ ref[2][0] * (ref[3][1] - ref[1][1]) +
+ ref[3][0] * (ref[1][1] - ref[2][1]);
+ t2 = ref[0][0] * (ref[3][1] - ref[2][1]) +
+ ref[2][0] * (ref[0][1] - ref[3][1]) +
+ ref[3][0] * (ref[2][1] - ref[0][1]);
+ t3 = ref[0][0] * (ref[1][1] - ref[2][1]) +
+ ref[1][0] * (ref[2][1] - ref[0][1]) +
+ ref[2][0] * (ref[0][1] - ref[1][1]);
+
+ x0 = t0 * t1 * w * (ref[2][1] - ref[0][1]);
+ x1 = t0 * t1 * w * (ref[0][0] - ref[2][0]);
+ x2 = t0 * t1 * w * (ref[0][1] * ref[2][0] - ref[0][0] * ref[2][1]);
+ x3 = t1 * t2 * h * (ref[1][1] - ref[0][1]);
+ x4 = t1 * t2 * h * (ref[0][0] - ref[1][0]);
+ x5 = t1 * t2 * h * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]);
+ x6 = t1 * t2 * (ref[1][1] - ref[0][1]) +
+ t0 * t3 * (ref[2][1] - ref[3][1]);
+ x7 = t1 * t2 * (ref[0][0] - ref[1][0]) +
+ t0 * t3 * (ref[3][0] - ref[2][0]);
+ x8 = t1 * t2 * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]) +
+ t0 * t3 * (ref[2][0] * ref[3][1] - ref[2][1] * ref[3][0]);
+ break;
+ }
+
+ for (y = 0; y < h; y++){
+ for (x = 0; x < w; x++){
+ int u, v;
+
+ u = (int)floor(SUB_PIXELS * (x0 * x + x1 * y + x2) /
+ (x6 * x + x7 * y + x8) + 0.5);
+ v = (int)floor(SUB_PIXELS * (x3 * x + x4 * y + x5) /
+ (x6 * x + x7 * y + x8) + 0.5);
+
+ s->pv[x + y * w][0] = u;
+ s->pv[x + y * w][1] = v;
+ }
+ }
+
+ for (i = 0; i < SUB_PIXELS; i++){
+ double d = i / (double)SUB_PIXELS;
+ double temp[4];
+ double sum = 0;
+
+ for (j = 0; j < 4; j++)
+ temp[j] = get_coeff(j - d - 1);
+
+ for (j = 0; j < 4; j++)
+ sum += temp[j];
+
+ for (j = 0; j < 4; j++)
+ s->coeff[i][j] = (int)floor((1 << COEFF_BITS) * temp[j] / sum + 0.5);
+ }
+
+ return 0;
+}
+
+typedef struct ThreadData {
+ uint8_t *dst;
+ int dst_linesize;
+ uint8_t *src;
+ int src_linesize;
+ int w, h;
+ int hsub, vsub;
+} ThreadData;
+
+static int resample_cubic(AVFilterContext *ctx, void *arg,
+ int job, int nb_jobs)
+{
+ PerspectiveContext *s = ctx->priv;
+ ThreadData *td = arg;
+ uint8_t *dst = td->dst;
+ int dst_linesize = td->dst_linesize;
+ uint8_t *src = td->src;
+ int src_linesize = td->src_linesize;
+ int w = td->w;
+ int h = td->h;
+ int hsub = td->hsub;
+ int vsub = td->vsub;
+ int start = (h * job) / nb_jobs;
+ int end = (h * (job+1)) / nb_jobs;
+ const int linesize = s->linesize[0];
+ int x, y;
+
+ for (y = start; y < end; y++) {
+ int sy = y << vsub;
+ for (x = 0; x < w; x++) {
+ int u, v, subU, subV, sum, sx;
+
+ sx = x << hsub;
+ u = s->pv[sx + sy * linesize][0] >> hsub;
+ v = s->pv[sx + sy * linesize][1] >> vsub;
+ subU = u & (SUB_PIXELS - 1);
+ subV = v & (SUB_PIXELS - 1);
+ u >>= SUB_PIXEL_BITS;
+ v >>= SUB_PIXEL_BITS;
+
+ if (u > 0 && v > 0 && u < w - 2 && v < h - 2){
+ const int index = u + v*src_linesize;
+ const int a = s->coeff[subU][0];
+ const int b = s->coeff[subU][1];
+ const int c = s->coeff[subU][2];
+ const int d = s->coeff[subU][3];
+
+ sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] +
+ c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) +
+ s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] +
+ c * src[index + 1 ] + d * src[index + 2 ]) +
+ s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] +
+ c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) +
+ s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] +
+ c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]);
+ } else {
+ int dx, dy;
+
+ sum = 0;
+
+ for (dy = 0; dy < 4; dy++) {
+ int iy = v + dy - 1;
+
+ if (iy < 0)
+ iy = 0;
+ else if (iy >= h)
+ iy = h-1;
+ for (dx = 0; dx < 4; dx++) {
+ int ix = u + dx - 1;
+
+ if (ix < 0)
+ ix = 0;
+ else if (ix >= w)
+ ix = w - 1;
+
+ sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize];
+ }
+ }
+ }
+
+ sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2);
+ sum = av_clip(sum, 0, 255);
+ dst[x + y * dst_linesize] = sum;
+ }
+ }
+ return 0;
+}
+
+static int resample_linear(AVFilterContext *ctx, void *arg,
+ int job, int nb_jobs)
+{
+ PerspectiveContext *s = ctx->priv;
+ ThreadData *td = arg;
+ uint8_t *dst = td->dst;
+ int dst_linesize = td->dst_linesize;
+ uint8_t *src = td->src;
+ int src_linesize = td->src_linesize;
+ int w = td->w;
+ int h = td->h;
+ int hsub = td->hsub;
+ int vsub = td->vsub;
+ int start = (h * job) / nb_jobs;
+ int end = (h * (job+1)) / nb_jobs;
+ const int linesize = s->linesize[0];
+ int x, y;
+
+ for (y = start; y < end; y++){
+ int sy = y << vsub;
+ for (x = 0; x < w; x++){
+ int u, v, subU, subV, sum, sx, index, subUI, subVI;
+
+ sx = x << hsub;
+ u = s->pv[sx + sy * linesize][0] >> hsub;
+ v = s->pv[sx + sy * linesize][1] >> vsub;
+ subU = u & (SUB_PIXELS - 1);
+ subV = v & (SUB_PIXELS - 1);
+ u >>= SUB_PIXEL_BITS;
+ v >>= SUB_PIXEL_BITS;
+
+ index = u + v * src_linesize;
+ subUI = SUB_PIXELS - subU;
+ subVI = SUB_PIXELS - subV;
+
+ if ((unsigned)u < (unsigned)(w - 1)){
+ if((unsigned)v < (unsigned)(h - 1)){
+ sum = subVI * (subUI * src[index] + subU * src[index + 1]) +
+ subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]);
+ sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2);
+ } else {
+ if (v < 0)
+ v = 0;
+ else
+ v = h - 1;
+ index = u + v * src_linesize;
+ sum = subUI * src[index] + subU * src[index + 1];
+ sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
+ }
+ } else {
+ if (u < 0)
+ u = 0;
+ else
+ u = w - 1;
+ if ((unsigned)v < (unsigned)(h - 1)){
+ index = u + v * src_linesize;
+ sum = subVI * src[index] + subV * src[index + src_linesize];
+ sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
+ } else {
+ if (v < 0)
+ v = 0;
+ else
+ v = h - 1;
+ index = u + v * src_linesize;
+ sum = src[index];
+ }
+ }
+
+ sum = av_clip(sum, 0, 255);
+ dst[x + y * dst_linesize] = sum;
+ }
+ }
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PerspectiveContext *s = ctx->priv;
+
+ switch (s->interpolation) {
+ case LINEAR: s->perspective = resample_linear; break;
+ case CUBIC: s->perspective = resample_cubic; break;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ PerspectiveContext *s = ctx->priv;
+ AVFrame *out;
+ int plane;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, frame);
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
+ ThreadData td = {.dst = out->data[plane],
+ .dst_linesize = out->linesize[plane],
+ .src = frame->data[plane],
+ .src_linesize = frame->linesize[plane],
+ .w = s->linesize[plane],
+ .h = s->height[plane],
+ .hsub = hsub,
+ .vsub = vsub };
+ ctx->internal->execute(ctx, s->perspective, &td, NULL, FFMIN(td.h, ctx->graph->nb_threads));
+ }
+
+ av_frame_free(&frame);
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PerspectiveContext *s = ctx->priv;
+
+ av_freep(&s->pv);
+}
+
+static const AVFilterPad perspective_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad perspective_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_perspective = {
+ .name = "perspective",
+ .description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."),
+ .priv_size = sizeof(PerspectiveContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = perspective_inputs,
+ .outputs = perspective_outputs,
+ .priv_class = &perspective_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_phase.c b/libavfilter/vf_phase.c
new file mode 100644
index 0000000..82dc603
--- /dev/null
+++ b/libavfilter/vf_phase.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2004 Ville Saari
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum PhaseMode {
+ PROGRESSIVE,
+ TOP_FIRST,
+ BOTTOM_FIRST,
+ TOP_FIRST_ANALYZE,
+ BOTTOM_FIRST_ANALYZE,
+ ANALYZE,
+ FULL_ANALYZE,
+ AUTO,
+ AUTO_ANALYZE
+};
+
+typedef struct PhaseContext {
+ const AVClass *class;
+ enum PhaseMode mode;
+ AVFrame *frame; /* previous frame */
+ int nb_planes;
+ int planeheight[4];
+ int linesize[4];
+} PhaseContext;
+
+#define OFFSET(x) offsetof(PhaseContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption phase_options[] = {
+ { "mode", "set phase mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=AUTO_ANALYZE}, PROGRESSIVE, AUTO_ANALYZE, FLAGS, "mode" },
+ CONST("p", "progressive", PROGRESSIVE, "mode"),
+ CONST("t", "top first", TOP_FIRST, "mode"),
+ CONST("b", "bottom first", BOTTOM_FIRST, "mode"),
+ CONST("T", "top first analyze", TOP_FIRST_ANALYZE, "mode"),
+ CONST("B", "bottom first analyze", BOTTOM_FIRST_ANALYZE, "mode"),
+ CONST("u", "analyze", ANALYZE, "mode"),
+ CONST("U", "full analyze", FULL_ANALYZE, "mode"),
+ CONST("a", "auto", AUTO, "mode"),
+ CONST("A", "auto analyze", AUTO_ANALYZE, "mode"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(phase);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ PhaseContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+/*
+ * This macro interpolates the value of both fields at a point halfway
+ * between lines and takes the squared difference. In field resolution
+ * the point is a quarter pixel below a line in one field and a quarter
+ * pixel above a line in other.
+ *
+ * (The result is actually multiplied by 25)
+ */
+#define DIFF(a, as, b, bs) (t = ((*a - b[bs]) << 2) + a[as << 1] - b[-bs], t * t)
+
+/*
+ * Find which field combination has the smallest average squared difference
+ * between the fields.
+ */
+static enum PhaseMode analyze_plane(void *ctx, enum PhaseMode mode, AVFrame *old, AVFrame *new)
+{
+ double bdiff, tdiff, pdiff, scale;
+ const int ns = new->linesize[0];
+ const int os = old->linesize[0];
+ const uint8_t *nptr = new->data[0];
+ const uint8_t *optr = old->data[0];
+ const int h = new->height;
+ const int w = new->width;
+ int bdif, tdif, pdif;
+
+ if (mode == AUTO) {
+ mode = new->interlaced_frame ? new->top_field_first ?
+ TOP_FIRST : BOTTOM_FIRST : PROGRESSIVE;
+ } else if (mode == AUTO_ANALYZE) {
+ mode = new->interlaced_frame ? new->top_field_first ?
+ TOP_FIRST_ANALYZE : BOTTOM_FIRST_ANALYZE : FULL_ANALYZE;
+ }
+
+ if (mode <= BOTTOM_FIRST) {
+ bdiff = pdiff = tdiff = 65536.0;
+ } else {
+ int top = 0, t;
+ const uint8_t *rend, *end = nptr + (h - 2) * ns;
+
+ bdiff = pdiff = tdiff = 0.0;
+
+ nptr += ns;
+ optr += os;
+ while (nptr < end) {
+ pdif = tdif = bdif = 0;
+
+ switch (mode) {
+ case TOP_FIRST_ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ tdif += DIFF(nptr, ns, optr, os);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ tdif += DIFF(optr, os, nptr, ns);
+ }
+ }
+ break;
+ case BOTTOM_FIRST_ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ bdif += DIFF(optr, os, nptr, ns);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ bdif += DIFF(nptr, ns, optr, os);
+ }
+ }
+ break;
+ case ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ tdif += DIFF(nptr, ns, optr, os);
+ bdif += DIFF(optr, os, nptr, ns);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ bdif += DIFF(nptr, ns, optr, os);
+ tdif += DIFF(optr, os, nptr, ns);
+ }
+ }
+ break;
+ case FULL_ANALYZE:
+ if (top) {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ tdif += DIFF(nptr, ns, optr, os);
+ bdif += DIFF(optr, os, nptr, ns);
+ }
+ } else {
+ for (rend = nptr + w; nptr < rend; nptr++, optr++) {
+ pdif += DIFF(nptr, ns, nptr, ns);
+ bdif += DIFF(nptr, ns, optr, os);
+ tdif += DIFF(optr, os, nptr, ns);
+ }
+ }
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ pdiff += (double)pdif;
+ tdiff += (double)tdif;
+ bdiff += (double)bdif;
+ nptr += ns - w;
+ optr += os - w;
+ top ^= 1;
+ }
+
+ scale = 1.0 / (w * (h - 3)) / 25.0;
+ pdiff *= scale;
+ tdiff *= scale;
+ bdiff *= scale;
+
+ if (mode == TOP_FIRST_ANALYZE) {
+ bdiff = 65536.0;
+ } else if (mode == BOTTOM_FIRST_ANALYZE) {
+ tdiff = 65536.0;
+ } else if (mode == ANALYZE) {
+ pdiff = 65536.0;
+ }
+
+ if (bdiff < pdiff && bdiff < tdiff) {
+ mode = BOTTOM_FIRST;
+ } else if (tdiff < pdiff && tdiff < bdiff) {
+ mode = TOP_FIRST;
+ } else {
+ mode = PROGRESSIVE;
+ }
+ }
+
+ av_log(ctx, AV_LOG_DEBUG, "mode=%c tdiff=%f bdiff=%f pdiff=%f\n",
+ mode == BOTTOM_FIRST ? 'b' : mode == TOP_FIRST ? 't' : 'p',
+ tdiff, bdiff, pdiff);
+ return mode;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ PhaseContext *s = ctx->priv;
+ enum PhaseMode mode;
+ int plane, top, y;
+ AVFrame *out;
+
+ if (ctx->is_disabled) {
+ av_frame_free(&s->frame);
+ /* we keep a reference to the previous frame so the filter can start
+ * being useful as soon as it's not disabled, avoiding the 1-frame
+ * delay. */
+ s->frame = av_frame_clone(in);
+ return ff_filter_frame(outlink, in);
+ }
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (!s->frame) {
+ s->frame = in;
+ mode = PROGRESSIVE;
+ } else {
+ mode = analyze_plane(ctx, s->mode, s->frame, in);
+ }
+
+ for (plane = 0; plane < s->nb_planes; plane++) {
+ const uint8_t *buf = s->frame->data[plane];
+ const uint8_t *from = in->data[plane];
+ uint8_t *to = out->data[plane];
+
+ for (y = 0, top = 1; y < s->planeheight[plane]; y++, top ^= 1) {
+ memcpy(to, mode == (top ? BOTTOM_FIRST : TOP_FIRST) ? buf : from, s->linesize[plane]);
+
+ buf += s->frame->linesize[plane];
+ from += in->linesize[plane];
+ to += out->linesize[plane];
+ }
+ }
+
+ if (in != s->frame)
+ av_frame_free(&s->frame);
+ s->frame = in;
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PhaseContext *s = ctx->priv;
+
+ av_frame_free(&s->frame);
+}
+
+static const AVFilterPad phase_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad phase_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_phase = {
+ .name = "phase",
+ .description = NULL_IF_CONFIG_SMALL("Phase shift fields."),
+ .priv_size = sizeof(PhaseContext),
+ .priv_class = &phase_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = phase_inputs,
+ .outputs = phase_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_pixdesctest.c b/libavfilter/vf_pixdesctest.c
index 0c5b7a1..790dd0d 100644
--- a/libavfilter/vf_pixdesctest.c
+++ b/libavfilter/vf_pixdesctest.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2009 Stefano Sabatini
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -47,7 +47,7 @@ static int config_props(AVFilterLink *inlink)
priv->pix_desc = av_pix_fmt_desc_get(inlink->format);
av_freep(&priv->line);
- if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w)))
+ if (!(priv->line = av_malloc_array(sizeof(*priv->line), inlink->w)))
return AVERROR(ENOMEM);
return 0;
@@ -59,6 +59,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int i, c, w = inlink->w, h = inlink->h;
+ const int cw = FF_CEIL_RSHIFT(w, priv->pix_desc->log2_chroma_w);
+ const int ch = FF_CEIL_RSHIFT(h, priv->pix_desc->log2_chroma_h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -69,27 +71,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_copy_props(out, in);
for (i = 0; i < 4; i++) {
- int h = outlink->h;
- h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
+ const int h1 = i == 1 || i == 2 ? ch : h;
if (out->data[i]) {
uint8_t *data = out->data[i] +
- (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h-1));
- memset(data, 0, FFABS(out->linesize[i]) * h);
+ (out->linesize[i] > 0 ? 0 : out->linesize[i] * (h1-1));
+ memset(data, 0, FFABS(out->linesize[i]) * h1);
}
}
/* copy palette */
if (priv->pix_desc->flags & AV_PIX_FMT_FLAG_PAL ||
priv->pix_desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
- memcpy(out->data[1], in->data[1], 256*4);
+ memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
for (c = 0; c < priv->pix_desc->nb_components; c++) {
- int w1 = c == 1 || c == 2 ? w>>priv->pix_desc->log2_chroma_w : w;
- int h1 = c == 1 || c == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
+ const int w1 = c == 1 || c == 2 ? cw : w;
+ const int h1 = c == 1 || c == 2 ? ch : h;
for (i = 0; i < h1; i++) {
av_read_image_line(priv->line,
- in->data,
+ (void*)in->data,
in->linesize,
priv->pix_desc,
0, i, c, w1, 0);
@@ -127,11 +128,8 @@ static const AVFilterPad avfilter_vf_pixdesctest_outputs[] = {
AVFilter ff_vf_pixdesctest = {
.name = "pixdesctest",
.description = NULL_IF_CONFIG_SMALL("Test pixel format definitions."),
-
- .priv_size = sizeof(PixdescTestContext),
- .uninit = uninit,
-
- .inputs = avfilter_vf_pixdesctest_inputs,
-
- .outputs = avfilter_vf_pixdesctest_outputs,
+ .priv_size = sizeof(PixdescTestContext),
+ .uninit = uninit,
+ .inputs = avfilter_vf_pixdesctest_inputs,
+ .outputs = avfilter_vf_pixdesctest_outputs,
};
diff --git a/libavfilter/vf_pp.c b/libavfilter/vf_pp.c
new file mode 100644
index 0000000..e33ac48
--- /dev/null
+++ b/libavfilter/vf_pp.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2002 A'rpi
+ * Copyright (C) 2012 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * libpostproc filter, ported from MPlayer.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+#include "libpostproc/postprocess.h"
+
+typedef struct {
+ const AVClass *class;
+ char *subfilters;
+ int mode_id;
+ pp_mode *modes[PP_QUALITY_MAX + 1];
+ void *pp_ctx;
+} PPFilterContext;
+
+#define OFFSET(x) offsetof(PPFilterContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption pp_options[] = {
+ { "subfilters", "set postprocess subfilters", OFFSET(subfilters), AV_OPT_TYPE_STRING, {.str="de"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pp);
+
+static av_cold int pp_init(AVFilterContext *ctx)
+{
+ int i;
+ PPFilterContext *pp = ctx->priv;
+
+ for (i = 0; i <= PP_QUALITY_MAX; i++) {
+ pp->modes[i] = pp_get_mode_by_name_and_quality(pp->subfilters, i);
+ if (!pp->modes[i])
+ return AVERROR_EXTERNAL;
+ }
+ pp->mode_id = PP_QUALITY_MAX;
+ return 0;
+}
+
+static int pp_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ PPFilterContext *pp = ctx->priv;
+
+ if (!strcmp(cmd, "quality")) {
+ pp->mode_id = av_clip(strtol(args, NULL, 10), 0, PP_QUALITY_MAX);
+ return 0;
+ }
+ return AVERROR(ENOSYS);
+}
+
+static int pp_query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int pp_config_props(AVFilterLink *inlink)
+{
+ int flags = PP_CPU_CAPS_AUTO;
+ PPFilterContext *pp = inlink->dst->priv;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GRAY8:
+ case AV_PIX_FMT_YUVJ420P:
+ case AV_PIX_FMT_YUV420P: flags |= PP_FORMAT_420; break;
+ case AV_PIX_FMT_YUVJ422P:
+ case AV_PIX_FMT_YUV422P: flags |= PP_FORMAT_422; break;
+ case AV_PIX_FMT_YUV411P: flags |= PP_FORMAT_411; break;
+ case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_YUVJ444P:
+ case AV_PIX_FMT_YUV444P: flags |= PP_FORMAT_444; break;
+ case AV_PIX_FMT_YUVJ440P:
+ case AV_PIX_FMT_YUV440P: flags |= PP_FORMAT_440; break;
+ default: av_assert0(0);
+ }
+
+ pp->pp_ctx = pp_get_context(inlink->w, inlink->h, flags);
+ if (!pp->pp_ctx)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int pp_filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PPFilterContext *pp = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ const int aligned_w = FFALIGN(outlink->w, 8);
+ const int aligned_h = FFALIGN(outlink->h, 8);
+ AVFrame *outbuf;
+ int qstride, qp_type;
+ int8_t *qp_table ;
+
+ outbuf = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!outbuf) {
+ av_frame_free(&inbuf);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outbuf, inbuf);
+ outbuf->width = inbuf->width;
+ outbuf->height = inbuf->height;
+ qp_table = av_frame_get_qp_table(inbuf, &qstride, &qp_type);
+
+ pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize,
+ outbuf->data, outbuf->linesize,
+ aligned_w, outlink->h,
+ qp_table,
+ qstride,
+ pp->modes[pp->mode_id],
+ pp->pp_ctx,
+ outbuf->pict_type | (qp_type ? PP_PICT_TYPE_QP2 : 0));
+
+ av_frame_free(&inbuf);
+ return ff_filter_frame(outlink, outbuf);
+}
+
+static av_cold void pp_uninit(AVFilterContext *ctx)
+{
+ int i;
+ PPFilterContext *pp = ctx->priv;
+
+ for (i = 0; i <= PP_QUALITY_MAX; i++)
+ pp_free_mode(pp->modes[i]);
+ if (pp->pp_ctx)
+ pp_free_context(pp->pp_ctx);
+}
+
+static const AVFilterPad pp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = pp_config_props,
+ .filter_frame = pp_filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_pp = {
+ .name = "pp",
+ .description = NULL_IF_CONFIG_SMALL("Filter video using libpostproc."),
+ .priv_size = sizeof(PPFilterContext),
+ .init = pp_init,
+ .uninit = pp_uninit,
+ .query_formats = pp_query_formats,
+ .inputs = pp_inputs,
+ .outputs = pp_outputs,
+ .process_command = pp_process_command,
+ .priv_class = &pp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c
new file mode 100644
index 0000000..082612a
--- /dev/null
+++ b/libavfilter/vf_psnr.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2011 Roger Pau Monné <roger.pau@entel.upc.edu>
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Caculate the PSNR between two input videos.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "dualinput.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct PSNRContext {
+ const AVClass *class;
+ FFDualInputContext dinput;
+ double mse, min_mse, max_mse;
+ uint64_t nb_frames;
+ FILE *stats_file;
+ char *stats_file_str;
+ int max[4], average_max;
+ int is_rgb;
+ uint8_t rgba_map[4];
+ char comps[4];
+ int nb_components;
+ int planewidth[4];
+ int planeheight[4];
+
+ void (*compute_mse)(struct PSNRContext *s,
+ const uint8_t *m[4], const int ml[4],
+ const uint8_t *r[4], const int rl[4],
+ int w, int h, double mse[4]);
+} PSNRContext;
+
+#define OFFSET(x) offsetof(PSNRContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption psnr_options[] = {
+ {"stats_file", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ {"f", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(psnr);
+
+static inline unsigned pow2(unsigned base)
+{
+ return base*base;
+}
+
+static inline double get_psnr(double mse, uint64_t nb_frames, int max)
+{
+ return 10.0 * log(pow2(max) / (mse / nb_frames)) / log(10.0);
+}
+
+static inline
+void compute_images_mse(PSNRContext *s,
+ const uint8_t *main_data[4], const int main_linesizes[4],
+ const uint8_t *ref_data[4], const int ref_linesizes[4],
+ int w, int h, double mse[4])
+{
+ int i, c, j;
+
+ for (c = 0; c < s->nb_components; c++) {
+ const int outw = s->planewidth[c];
+ const int outh = s->planeheight[c];
+ const uint8_t *main_line = main_data[c];
+ const uint8_t *ref_line = ref_data[c];
+ const int ref_linesize = ref_linesizes[c];
+ const int main_linesize = main_linesizes[c];
+ uint64_t m = 0;
+
+ for (i = 0; i < outh; i++) {
+ int m2 = 0;
+ for (j = 0; j < outw; j++)
+ m2 += pow2(main_line[j] - ref_line[j]);
+ m += m2;
+ ref_line += ref_linesize;
+ main_line += main_linesize;
+ }
+ mse[c] = m / (double)(outw * outh);
+ }
+}
+
+static inline
+void compute_images_mse_16bit(PSNRContext *s,
+ const uint8_t *main_data[4], const int main_linesizes[4],
+ const uint8_t *ref_data[4], const int ref_linesizes[4],
+ int w, int h, double mse[4])
+{
+ int i, c, j;
+
+ for (c = 0; c < s->nb_components; c++) {
+ const int outw = s->planewidth[c];
+ const int outh = s->planeheight[c];
+ const uint16_t *main_line = (uint16_t *)main_data[c];
+ const uint16_t *ref_line = (uint16_t *)ref_data[c];
+ const int ref_linesize = ref_linesizes[c] / 2;
+ const int main_linesize = main_linesizes[c] / 2;
+ uint64_t m = 0;
+
+ for (i = 0; i < outh; i++) {
+ for (j = 0; j < outw; j++)
+ m += pow2(main_line[j] - ref_line[j]);
+ ref_line += ref_linesize;
+ main_line += main_linesize;
+ }
+ mse[c] = m / (double)(outw * outh);
+ }
+}
+
+static void set_meta(AVDictionary **metadata, const char *key, char comp, float d)
+{
+ char value[128];
+ snprintf(value, sizeof(value), "%0.2f", d);
+ if (comp) {
+ char key2[128];
+ snprintf(key2, sizeof(key2), "%s%c", key, comp);
+ av_dict_set(metadata, key2, value, 0);
+ } else {
+ av_dict_set(metadata, key, value, 0);
+ }
+}
+
+static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main,
+ const AVFrame *ref)
+{
+ PSNRContext *s = ctx->priv;
+ double comp_mse[4], mse = 0;
+ int j, c;
+ AVDictionary **metadata = avpriv_frame_get_metadatap(main);
+
+ s->compute_mse(s, (const uint8_t **)main->data, main->linesize,
+ (const uint8_t **)ref->data, ref->linesize,
+ main->width, main->height, comp_mse);
+
+ for (j = 0; j < s->nb_components; j++)
+ mse += comp_mse[j];
+ mse /= s->nb_components;
+
+ s->min_mse = FFMIN(s->min_mse, mse);
+ s->max_mse = FFMAX(s->max_mse, mse);
+
+ s->mse += mse;
+ s->nb_frames++;
+
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ set_meta(metadata, "lavfi.psnr.mse.", s->comps[j], comp_mse[c]);
+ set_meta(metadata, "lavfi.psnr.mse_avg", 0, mse);
+ set_meta(metadata, "lavfi.psnr.psnr.", s->comps[j], get_psnr(comp_mse[c], 1, s->max[c]));
+ set_meta(metadata, "lavfi.psnr.psnr_avg", 0, get_psnr(mse, 1, s->average_max));
+ }
+
+ if (s->stats_file) {
+ fprintf(s->stats_file, "n:%"PRId64" mse_avg:%0.2f ", s->nb_frames, mse);
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ fprintf(s->stats_file, "mse_%c:%0.2f ", s->comps[j], comp_mse[c]);
+ }
+ for (j = 0; j < s->nb_components; j++) {
+ c = s->is_rgb ? s->rgba_map[j] : j;
+ fprintf(s->stats_file, "psnr_%c:%0.2f ", s->comps[j],
+ get_psnr(comp_mse[c], 1, s->max[c]));
+ }
+ fprintf(s->stats_file, "\n");
+ }
+
+ return main;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ PSNRContext *s = ctx->priv;
+
+ s->min_mse = +INFINITY;
+ s->max_mse = -INFINITY;
+
+ if (s->stats_file_str) {
+ s->stats_file = fopen(s->stats_file_str, "w");
+ if (!s->stats_file) {
+ int err = AVERROR(errno);
+ char buf[128];
+ av_strerror(err, buf, sizeof(buf));
+ av_log(ctx, AV_LOG_ERROR, "Could not open stats file %s: %s\n",
+ s->stats_file_str, buf);
+ return err;
+ }
+ }
+
+ s->dinput.process = do_psnr;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+#define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
+#define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
+#define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
+ PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP16,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input_ref(AVFilterLink *inlink)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ AVFilterContext *ctx = inlink->dst;
+ PSNRContext *s = ctx->priv;
+ int j;
+
+ s->nb_components = desc->nb_components;
+ if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
+ ctx->inputs[0]->h != ctx->inputs[1]->h) {
+ av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
+ return AVERROR(EINVAL);
+ }
+ if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
+ av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
+ return AVERROR(EINVAL);
+ }
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_GRAY8:
+ case AV_PIX_FMT_GRAY16:
+ case AV_PIX_FMT_GBRP:
+ case AV_PIX_FMT_GBRP9:
+ case AV_PIX_FMT_GBRP10:
+ case AV_PIX_FMT_GBRP12:
+ case AV_PIX_FMT_GBRP14:
+ case AV_PIX_FMT_GBRP16:
+ case AV_PIX_FMT_GBRAP:
+ case AV_PIX_FMT_GBRAP16:
+ case AV_PIX_FMT_YUVJ411P:
+ case AV_PIX_FMT_YUVJ420P:
+ case AV_PIX_FMT_YUVJ422P:
+ case AV_PIX_FMT_YUVJ440P:
+ case AV_PIX_FMT_YUVJ444P:
+ s->max[0] = (1 << (desc->comp[0].depth_minus1 + 1)) - 1;
+ s->max[1] = (1 << (desc->comp[1].depth_minus1 + 1)) - 1;
+ s->max[2] = (1 << (desc->comp[2].depth_minus1 + 1)) - 1;
+ s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
+ break;
+ default:
+ s->max[0] = 235 * (1 << (desc->comp[0].depth_minus1 - 7));
+ s->max[1] = 240 * (1 << (desc->comp[1].depth_minus1 - 7));
+ s->max[2] = 240 * (1 << (desc->comp[2].depth_minus1 - 7));
+ s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
+ }
+
+ s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
+ s->comps[0] = s->is_rgb ? 'r' : 'y' ;
+ s->comps[1] = s->is_rgb ? 'g' : 'u' ;
+ s->comps[2] = s->is_rgb ? 'b' : 'v' ;
+ s->comps[3] = 'a';
+
+ for (j = 0; j < s->nb_components; j++)
+ s->average_max += s->max[j];
+ s->average_max /= s->nb_components;
+
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ s->compute_mse = desc->comp[0].depth_minus1 > 7 ? compute_images_mse_16bit : compute_images_mse;
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ PSNRContext *s = ctx->priv;
+ AVFilterLink *mainlink = ctx->inputs[0];
+ int ret;
+
+ outlink->w = mainlink->w;
+ outlink->h = mainlink->h;
+ outlink->time_base = mainlink->time_base;
+ outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
+ outlink->frame_rate = mainlink->frame_rate;
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ PSNRContext *s = inlink->dst->priv;
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ PSNRContext *s = outlink->src->priv;
+ return ff_dualinput_request_frame(&s->dinput, outlink);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PSNRContext *s = ctx->priv;
+
+ if (s->nb_frames > 0) {
+ av_log(ctx, AV_LOG_INFO, "PSNR average:%0.2f min:%0.2f max:%0.2f\n",
+ get_psnr(s->mse, s->nb_frames, s->average_max),
+ get_psnr(s->max_mse, 1, s->average_max),
+ get_psnr(s->min_mse, 1, s->average_max));
+ }
+
+ ff_dualinput_uninit(&s->dinput);
+
+ if (s->stats_file)
+ fclose(s->stats_file);
+}
+
+static const AVFilterPad psnr_inputs[] = {
+ {
+ .name = "main",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },{
+ .name = "reference",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input_ref,
+ },
+ { NULL }
+};
+
+static const AVFilterPad psnr_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_psnr = {
+ .name = "psnr",
+ .description = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(PSNRContext),
+ .priv_class = &psnr_class,
+ .inputs = psnr_inputs,
+ .outputs = psnr_outputs,
+};
diff --git a/libavfilter/vf_pullup.c b/libavfilter/vf_pullup.c
new file mode 100644
index 0000000..c38d0bb
--- /dev/null
+++ b/libavfilter/vf_pullup.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2003 Rich Felker
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "vf_pullup.h"
+
+#define F_HAVE_BREAKS 1
+#define F_HAVE_AFFINITY 2
+
+#define BREAK_LEFT 1
+#define BREAK_RIGHT 2
+
+#define OFFSET(x) offsetof(PullupContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption pullup_options[] = {
+ { "jl", "set left junk size", OFFSET(junk_left), AV_OPT_TYPE_INT, {.i64=1}, 0, INT_MAX, FLAGS },
+ { "jr", "set right junk size", OFFSET(junk_right), AV_OPT_TYPE_INT, {.i64=1}, 0, INT_MAX, FLAGS },
+ { "jt", "set top junk size", OFFSET(junk_top), AV_OPT_TYPE_INT, {.i64=4}, 1, INT_MAX, FLAGS },
+ { "jb", "set bottom junk size", OFFSET(junk_bottom), AV_OPT_TYPE_INT, {.i64=4}, 1, INT_MAX, FLAGS },
+ { "sb", "set strict breaks", OFFSET(strict_breaks), AV_OPT_TYPE_INT, {.i64=0},-1, 1, FLAGS },
+ { "mp", "set metric plane", OFFSET(metric_plane), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "mp" },
+ { "y", "luma", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mp" },
+ { "u", "chroma blue", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mp" },
+ { "v", "chroma red", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mp" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(pullup);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+#define ABS(a) (((a) ^ ((a) >> 31)) - ((a) >> 31))
+
+static int diff_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
+{
+ int i, j, diff = 0;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ diff += ABS(a[j] - b[j]);
+ a += s;
+ b += s;
+ }
+
+ return diff;
+}
+
+static int comb_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
+{
+ int i, j, comb = 0;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ comb += ABS((a[j] << 1) - b[j - s] - b[j ]) +
+ ABS((b[j] << 1) - a[j ] - a[j + s]);
+ a += s;
+ b += s;
+ }
+
+ return comb;
+}
+
+static int var_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
+{
+ int i, j, var = 0;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 8; j++)
+ var += ABS(a[j] - a[j + s]);
+ a += s;
+ }
+
+ return 4 * var; /* match comb scaling */
+}
+
+static int alloc_metrics(PullupContext *s, PullupField *f)
+{
+ f->diffs = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->diffs));
+ f->combs = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->combs));
+ f->vars = av_calloc(FFALIGN(s->metric_length, 16), sizeof(*f->vars));
+
+ if (!f->diffs || !f->combs || !f->vars) {
+ av_freep(&f->diffs);
+ av_freep(&f->combs);
+ av_freep(&f->vars);
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+static void free_field_queue(PullupField *head)
+{
+ PullupField *f = head;
+ do {
+ PullupField *next;
+ if (!f)
+ break;
+ av_free(f->diffs);
+ av_free(f->combs);
+ av_free(f->vars);
+ next = f->next;
+ memset(f, 0, sizeof(*f)); // clear all pointers to avoid stale ones
+ av_free(f);
+ f = next;
+ } while (f != head);
+}
+
+static PullupField *make_field_queue(PullupContext *s, int len)
+{
+ PullupField *head, *f;
+
+ f = head = av_mallocz(sizeof(*head));
+ if (!f)
+ return NULL;
+
+ if (alloc_metrics(s, f) < 0) {
+ av_free(f);
+ return NULL;
+ }
+
+ for (; len > 0; len--) {
+ f->next = av_mallocz(sizeof(*f->next));
+ if (!f->next) {
+ free_field_queue(head);
+ return NULL;
+ }
+
+ f->next->prev = f;
+ f = f->next;
+ if (alloc_metrics(s, f) < 0) {
+ free_field_queue(head);
+ return NULL;
+ }
+ }
+
+ f->next = head;
+ head->prev = f;
+
+ return head;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ PullupContext *s = ctx->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int mp = s->metric_plane;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ if (mp + 1 > s->nb_planes) {
+ av_log(ctx, AV_LOG_ERROR, "input format does not have such plane\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+
+ s->metric_w = (s->planewidth[mp] - ((s->junk_left + s->junk_right) << 3)) >> 3;
+ s->metric_h = (s->planeheight[mp] - ((s->junk_top + s->junk_bottom) << 1)) >> 3;
+ s->metric_offset = (s->junk_left << 3) + (s->junk_top << 1) * s->planewidth[mp];
+ s->metric_length = s->metric_w * s->metric_h;
+
+ av_log(ctx, AV_LOG_DEBUG, "w: %d h: %d\n", s->metric_w, s->metric_h);
+ av_log(ctx, AV_LOG_DEBUG, "offset: %d length: %d\n", s->metric_offset, s->metric_length);
+
+ s->head = make_field_queue(s, 8);
+ if (!s->head)
+ return AVERROR(ENOMEM);
+
+ s->diff = diff_c;
+ s->comb = comb_c;
+ s->var = var_c;
+
+ if (ARCH_X86)
+ ff_pullup_init_x86(s);
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ return 0;
+}
+
+static PullupBuffer *pullup_lock_buffer(PullupBuffer *b, int parity)
+{
+ if (!b)
+ return NULL;
+
+ if ((parity + 1) & 1)
+ b->lock[0]++;
+ if ((parity + 1) & 2)
+ b->lock[1]++;
+
+ return b;
+}
+
+static void pullup_release_buffer(PullupBuffer *b, int parity)
+{
+ if (!b)
+ return;
+
+ if ((parity + 1) & 1)
+ b->lock[0]--;
+ if ((parity + 1) & 2)
+ b->lock[1]--;
+}
+
+static int alloc_buffer(PullupContext *s, PullupBuffer *b)
+{
+ int i;
+
+ if (b->planes[0])
+ return 0;
+ for (i = 0; i < s->nb_planes; i++) {
+ b->planes[i] = av_malloc(s->planeheight[i] * s->planewidth[i]);
+ }
+ if (s->nb_planes == 1)
+ b->planes[1] = av_malloc(4*256);
+
+ return 0;
+}
+
+static PullupBuffer *pullup_get_buffer(PullupContext *s, int parity)
+{
+ int i;
+
+ /* Try first to get the sister buffer for the previous field */
+ if (parity < 2 && s->last && parity != s->last->parity
+ && !s->last->buffer->lock[parity]) {
+ alloc_buffer(s, s->last->buffer);
+ return pullup_lock_buffer(s->last->buffer, parity);
+ }
+
+ /* Prefer a buffer with both fields open */
+ for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
+ if (s->buffers[i].lock[0])
+ continue;
+ if (s->buffers[i].lock[1])
+ continue;
+ alloc_buffer(s, &s->buffers[i]);
+ return pullup_lock_buffer(&s->buffers[i], parity);
+ }
+
+ if (parity == 2)
+ return 0;
+
+ /* Search for any half-free buffer */
+ for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
+ if (((parity + 1) & 1) && s->buffers[i].lock[0])
+ continue;
+ if (((parity + 1) & 2) && s->buffers[i].lock[1])
+ continue;
+ alloc_buffer(s, &s->buffers[i]);
+ return pullup_lock_buffer(&s->buffers[i], parity);
+ }
+
+ return NULL;
+}
+
+static int queue_length(PullupField *begin, PullupField *end)
+{
+ PullupField *f;
+ int count = 1;
+
+ if (!begin || !end)
+ return 0;
+
+ for (f = begin; f != end; f = f->next)
+ count++;
+
+ return count;
+}
+
+static int find_first_break(PullupField *f, int max)
+{
+ int i;
+
+ for (i = 0; i < max; i++) {
+ if (f->breaks & BREAK_RIGHT || f->next->breaks & BREAK_LEFT)
+ return i + 1;
+ f = f->next;
+ }
+
+ return 0;
+}
+
+static void compute_breaks(PullupContext *s, PullupField *f0)
+{
+ PullupField *f1 = f0->next;
+ PullupField *f2 = f1->next;
+ PullupField *f3 = f2->next;
+ int i, l, max_l = 0, max_r = 0;
+
+ if (f0->flags & F_HAVE_BREAKS)
+ return;
+
+ f0->flags |= F_HAVE_BREAKS;
+
+ /* Special case when fields are 100% identical */
+ if (f0->buffer == f2->buffer && f1->buffer != f3->buffer) {
+ f2->breaks |= BREAK_RIGHT;
+ return;
+ }
+
+ if (f0->buffer != f2->buffer && f1->buffer == f3->buffer) {
+ f1->breaks |= BREAK_LEFT;
+ return;
+ }
+
+ for (i = 0; i < s->metric_length; i++) {
+ l = f2->diffs[i] - f3->diffs[i];
+
+ if ( l > max_l)
+ max_l = l;
+ if (-l > max_r)
+ max_r = -l;
+ }
+
+ /* Don't get tripped up when differences are mostly quant error */
+ if (max_l + max_r < 128)
+ return;
+ if (max_l > 4 * max_r)
+ f1->breaks |= BREAK_LEFT;
+ if (max_r > 4 * max_l)
+ f2->breaks |= BREAK_RIGHT;
+}
+
+static void compute_affinity(PullupContext *s, PullupField *f)
+{
+ int i, max_l = 0, max_r = 0, l;
+
+ if (f->flags & F_HAVE_AFFINITY)
+ return;
+
+ f->flags |= F_HAVE_AFFINITY;
+
+ if (f->buffer == f->next->next->buffer) {
+ f->affinity = 1;
+ f->next->affinity = 0;
+ f->next->next->affinity = -1;
+ f->next->flags |= F_HAVE_AFFINITY;
+ f->next->next->flags |= F_HAVE_AFFINITY;
+ return;
+ }
+
+ for (i = 0; i < s->metric_length; i++) {
+ int v = f->vars[i];
+ int lv = f->prev->vars[i];
+ int rv = f->next->vars[i];
+ int lc = f-> combs[i] - 2*(v < lv ? v : lv);
+ int rc = f->next->combs[i] - 2*(v < rv ? v : rv);
+
+ lc = FFMAX(lc, 0);
+ rc = FFMAX(rc, 0);
+ l = lc - rc;
+
+ if ( l > max_l)
+ max_l = l;
+ if (-l > max_r)
+ max_r = -l;
+ }
+
+ if (max_l + max_r < 64)
+ return;
+
+ if (max_r > 6 * max_l)
+ f->affinity = -1;
+ else if (max_l > 6 * max_r)
+ f->affinity = 1;
+}
+
+static int decide_frame_length(PullupContext *s)
+{
+ PullupField *f0 = s->first;
+ PullupField *f1 = f0->next;
+ PullupField *f2 = f1->next;
+ PullupField *f;
+ int i, l, n;
+
+ if (queue_length(s->first, s->last) < 4)
+ return 0;
+
+ f = s->first;
+ n = queue_length(f, s->last);
+ for (i = 0; i < n - 1; i++) {
+ if (i < n - 3)
+ compute_breaks(s, f);
+
+ compute_affinity(s, f);
+
+ f = f->next;
+ }
+
+ if (f0->affinity == -1)
+ return 1;
+
+ l = find_first_break(f0, 3);
+
+ if (l == 1 && s->strict_breaks < 0)
+ l = 0;
+
+ switch (l) {
+ case 1:
+ return 1 + (s->strict_breaks < 1 && f0->affinity == 1 && f1->affinity == -1);
+ case 2:
+ /* FIXME: strictly speaking, f0->prev is no longer valid... :) */
+ if (s->strict_pairs
+ && (f0->prev->breaks & BREAK_RIGHT) && (f2->breaks & BREAK_LEFT)
+ && (f0->affinity != 1 || f1->affinity != -1) )
+ return 1;
+ return 1 + (f1->affinity != 1);
+ case 3:
+ return 2 + (f2->affinity != 1);
+ default:
+ /* 9 possibilities covered before switch */
+ if (f1->affinity == 1)
+ return 1; /* covers 6 */
+ else if (f1->affinity == -1)
+ return 2; /* covers 6 */
+ else if (f2->affinity == -1) { /* covers 2 */
+ return (f0->affinity == 1) ? 3 : 1;
+ } else {
+ return 2; /* the remaining 6 */
+ }
+ }
+}
+
+static PullupFrame *pullup_get_frame(PullupContext *s)
+{
+ PullupFrame *fr = &s->frame;
+ int i, n = decide_frame_length(s);
+ int aff = s->first->next->affinity;
+
+ av_assert1(n < FF_ARRAY_ELEMS(fr->ifields));
+ if (!n || fr->lock)
+ return NULL;
+
+ fr->lock++;
+ fr->length = n;
+ fr->parity = s->first->parity;
+ fr->buffer = 0;
+
+ for (i = 0; i < n; i++) {
+ /* We cheat and steal the buffer without release+relock */
+ fr->ifields[i] = s->first->buffer;
+ s->first->buffer = 0;
+ s->first = s->first->next;
+ }
+
+ if (n == 1) {
+ fr->ofields[fr->parity ] = fr->ifields[0];
+ fr->ofields[fr->parity ^ 1] = 0;
+ } else if (n == 2) {
+ fr->ofields[fr->parity ] = fr->ifields[0];
+ fr->ofields[fr->parity ^ 1] = fr->ifields[1];
+ } else if (n == 3) {
+ if (!aff)
+ aff = (fr->ifields[0] == fr->ifields[1]) ? -1 : 1;
+ fr->ofields[fr->parity ] = fr->ifields[1 + aff];
+ fr->ofields[fr->parity ^ 1] = fr->ifields[1 ];
+ }
+
+ pullup_lock_buffer(fr->ofields[0], 0);
+ pullup_lock_buffer(fr->ofields[1], 1);
+
+ if (fr->ofields[0] == fr->ofields[1]) {
+ fr->buffer = fr->ofields[0];
+ pullup_lock_buffer(fr->buffer, 2);
+ return fr;
+ }
+
+ return fr;
+}
+
+static void pullup_release_frame(PullupFrame *f)
+{
+ int i;
+
+ for (i = 0; i < f->length; i++)
+ pullup_release_buffer(f->ifields[i], f->parity ^ (i & 1));
+
+ pullup_release_buffer(f->ofields[0], 0);
+ pullup_release_buffer(f->ofields[1], 1);
+
+ if (f->buffer)
+ pullup_release_buffer(f->buffer, 2);
+ f->lock--;
+}
+
+static void compute_metric(PullupContext *s, int *dest,
+ PullupField *fa, int pa, PullupField *fb, int pb,
+ int (*func)(const uint8_t *, const uint8_t *, ptrdiff_t))
+{
+ int mp = s->metric_plane;
+ int xstep = 8;
+ int ystep = s->planewidth[mp] << 3;
+ int stride = s->planewidth[mp] << 1; /* field stride */
+ int w = s->metric_w * xstep;
+ uint8_t *a, *b;
+ int x, y;
+
+ if (!fa->buffer || !fb->buffer)
+ return;
+
+ /* Shortcut for duplicate fields (e.g. from RFF flag) */
+ if (fa->buffer == fb->buffer && pa == pb) {
+ memset(dest, 0, s->metric_length * sizeof(*dest));
+ return;
+ }
+
+ a = fa->buffer->planes[mp] + pa * s->planewidth[mp] + s->metric_offset;
+ b = fb->buffer->planes[mp] + pb * s->planewidth[mp] + s->metric_offset;
+
+ for (y = 0; y < s->metric_h; y++) {
+ for (x = 0; x < w; x += xstep)
+ *dest++ = func(a + x, b + x, stride);
+ a += ystep; b += ystep;
+ }
+}
+
+static int check_field_queue(PullupContext *s)
+{
+ int ret;
+
+ if (s->head->next == s->first) {
+ PullupField *f = av_mallocz(sizeof(*f));
+
+ if (!f)
+ return AVERROR(ENOMEM);
+
+ if ((ret = alloc_metrics(s, f)) < 0) {
+ av_free(f);
+ return ret;
+ }
+
+ f->prev = s->head;
+ f->next = s->first;
+ s->head->next = f;
+ s->first->prev = f;
+ }
+
+ return 0;
+}
+
+static void pullup_submit_field(PullupContext *s, PullupBuffer *b, int parity)
+{
+ PullupField *f;
+
+ /* Grow the circular list if needed */
+ if (check_field_queue(s) < 0)
+ return;
+
+ /* Cannot have two fields of same parity in a row; drop the new one */
+ if (s->last && s->last->parity == parity)
+ return;
+
+ f = s->head;
+ f->parity = parity;
+ f->buffer = pullup_lock_buffer(b, parity);
+ f->flags = 0;
+ f->breaks = 0;
+ f->affinity = 0;
+
+ compute_metric(s, f->diffs, f, parity, f->prev->prev, parity, s->diff);
+ compute_metric(s, f->combs, parity ? f->prev : f, 0, parity ? f : f->prev, 1, s->comb);
+ compute_metric(s, f->vars, f, parity, f, -1, s->var);
+ emms_c();
+
+ /* Advance the circular list */
+ if (!s->first)
+ s->first = s->head;
+
+ s->last = s->head;
+ s->head = s->head->next;
+}
+
+static void copy_field(PullupContext *s,
+ PullupBuffer *dst, PullupBuffer *src, int parity)
+{
+ uint8_t *dd, *ss;
+ int i;
+
+ for (i = 0; i < s->nb_planes; i++) {
+ ss = src->planes[i] + parity * s->planewidth[i];
+ dd = dst->planes[i] + parity * s->planewidth[i];
+
+ av_image_copy_plane(dd, s->planewidth[i] << 1,
+ ss, s->planewidth[i] << 1,
+ s->planewidth[i], s->planeheight[i] >> 1);
+ }
+}
+
+static void pullup_pack_frame(PullupContext *s, PullupFrame *fr)
+{
+ int i;
+
+ if (fr->buffer)
+ return;
+
+ if (fr->length < 2)
+ return; /* FIXME: deal with this */
+
+ for (i = 0; i < 2; i++) {
+ if (fr->ofields[i]->lock[i^1])
+ continue;
+
+ fr->buffer = fr->ofields[i];
+ pullup_lock_buffer(fr->buffer, 2);
+ copy_field(s, fr->buffer, fr->ofields[i^1], i^1);
+ return;
+ }
+
+ fr->buffer = pullup_get_buffer(s, 2);
+
+ copy_field(s, fr->buffer, fr->ofields[0], 0);
+ copy_field(s, fr->buffer, fr->ofields[1], 1);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ PullupContext *s = ctx->priv;
+ PullupBuffer *b;
+ PullupFrame *f;
+ AVFrame *out;
+ int p, ret = 0;
+
+ b = pullup_get_buffer(s, 2);
+ if (!b) {
+ av_log(ctx, AV_LOG_WARNING, "Could not get buffer!\n");
+ f = pullup_get_frame(s);
+ pullup_release_frame(f);
+ goto end;
+ }
+
+ av_image_copy(b->planes, s->planewidth,
+ (const uint8_t**)in->data, in->linesize,
+ inlink->format, inlink->w, inlink->h);
+
+ p = in->interlaced_frame ? !in->top_field_first : 0;
+ pullup_submit_field(s, b, p );
+ pullup_submit_field(s, b, p^1);
+
+ if (in->repeat_pict)
+ pullup_submit_field(s, b, p);
+
+ pullup_release_buffer(b, 2);
+
+ f = pullup_get_frame(s);
+ if (!f)
+ goto end;
+
+ if (f->length < 2) {
+ pullup_release_frame(f);
+ f = pullup_get_frame(s);
+ if (!f)
+ goto end;
+ if (f->length < 2) {
+ pullup_release_frame(f);
+ if (!in->repeat_pict)
+ goto end;
+ f = pullup_get_frame(s);
+ if (!f)
+ goto end;
+ if (f->length < 2) {
+ pullup_release_frame(f);
+ goto end;
+ }
+ }
+ }
+
+ /* If the frame isn't already exportable... */
+ if (!f->buffer)
+ pullup_pack_frame(s, f);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ av_frame_copy_props(out, in);
+
+ av_image_copy(out->data, out->linesize,
+ (const uint8_t**)f->buffer->planes, s->planewidth,
+ inlink->format, inlink->w, inlink->h);
+
+ ret = ff_filter_frame(outlink, out);
+ pullup_release_frame(f);
+end:
+ av_frame_free(&in);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ PullupContext *s = ctx->priv;
+ int i;
+
+ free_field_queue(s->head);
+ s->last = NULL;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
+ av_freep(&s->buffers[i].planes[0]);
+ av_freep(&s->buffers[i].planes[1]);
+ av_freep(&s->buffers[i].planes[2]);
+ }
+}
+
+static const AVFilterPad pullup_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad pullup_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_pullup = {
+ .name = "pullup",
+ .description = NULL_IF_CONFIG_SMALL("Pullup from field sequence to frames."),
+ .priv_size = sizeof(PullupContext),
+ .priv_class = &pullup_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = pullup_inputs,
+ .outputs = pullup_outputs,
+};
diff --git a/libavfilter/vf_pullup.h b/libavfilter/vf_pullup.h
new file mode 100644
index 0000000..8f59335
--- /dev/null
+++ b/libavfilter/vf_pullup.h
@@ -0,0 +1,71 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_PULLUP_H
+#define AVFILTER_PULLUP_H
+
+#include "avfilter.h"
+
+typedef struct PullupBuffer {
+ int lock[2];
+ uint8_t *planes[4];
+} PullupBuffer;
+
+typedef struct PullupField {
+ int parity;
+ PullupBuffer *buffer;
+ unsigned flags;
+ int breaks;
+ int affinity;
+ int *diffs;
+ int *combs;
+ int *vars;
+ struct PullupField *prev, *next;
+} PullupField;
+
+typedef struct PullupFrame {
+ int lock;
+ int length;
+ int parity;
+ PullupBuffer *ifields[4], *ofields[2];
+ PullupBuffer *buffer;
+} PullupFrame;
+
+typedef struct PullupContext {
+ const AVClass *class;
+ int junk_left, junk_right, junk_top, junk_bottom;
+ int metric_plane;
+ int strict_breaks;
+ int strict_pairs;
+ int metric_w, metric_h, metric_length;
+ int metric_offset;
+ int nb_planes;
+ int planewidth[4];
+ int planeheight[4];
+ PullupField *first, *last, *head;
+ PullupBuffer buffers[10];
+ PullupFrame frame;
+
+ int (*diff)(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+ int (*comb)(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+ int (*var )(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+} PullupContext;
+
+void ff_pullup_init_x86(PullupContext *s);
+
+#endif /* AVFILTER_PULLUP_H */
diff --git a/libavfilter/vf_removelogo.c b/libavfilter/vf_removelogo.c
new file mode 100644
index 0000000..555517f
--- /dev/null
+++ b/libavfilter/vf_removelogo.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2005 Robert Edele <yartrebo@earthlink.net>
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Advanced blur-based logo removing filter
+ *
+ * This filter loads an image mask file showing where a logo is and
+ * uses a blur transform to remove the logo.
+ *
+ * Based on the libmpcodecs remove-logo filter by Robert Edele.
+ */
+
+/**
+ * This code implements a filter to remove annoying TV logos and other annoying
+ * images placed onto a video stream. It works by filling in the pixels that
+ * comprise the logo with neighboring pixels. The transform is very loosely
+ * based on a gaussian blur, but it is different enough to merit its own
+ * paragraph later on. It is a major improvement on the old delogo filter as it
+ * both uses a better blurring algorithm and uses a bitmap to use an arbitrary
+ * and generally much tighter fitting shape than a rectangle.
+ *
+ * The logo removal algorithm has two key points. The first is that it
+ * distinguishes between pixels in the logo and those not in the logo by using
+ * the passed-in bitmap. Pixels not in the logo are copied over directly without
+ * being modified and they also serve as source pixels for the logo
+ * fill-in. Pixels inside the logo have the mask applied.
+ *
+ * At init-time the bitmap is reprocessed internally, and the distance to the
+ * nearest edge of the logo (Manhattan distance), along with a little extra to
+ * remove rough edges, is stored in each pixel. This is done using an in-place
+ * erosion algorithm, and incrementing each pixel that survives any given
+ * erosion. Once every pixel is eroded, the maximum value is recorded, and a
+ * set of masks from size 0 to this size are generaged. The masks are circular
+ * binary masks, where each pixel within a radius N (where N is the size of the
+ * mask) is a 1, and all other pixels are a 0. Although a gaussian mask would be
+ * more mathematically accurate, a binary mask works better in practice because
+ * we generally do not use the central pixels in the mask (because they are in
+ * the logo region), and thus a gaussian mask will cause too little blur and
+ * thus a very unstable image.
+ *
+ * The mask is applied in a special way. Namely, only pixels in the mask that
+ * line up to pixels outside the logo are used. The dynamic mask size means that
+ * the mask is just big enough so that the edges touch pixels outside the logo,
+ * so the blurring is kept to a minimum and at least the first boundary
+ * condition is met (that the image function itself is continuous), even if the
+ * second boundary condition (that the derivative of the image function is
+ * continuous) is not met. A masking algorithm that does preserve the second
+ * boundary coundition (perhaps something based on a highly-modified bi-cubic
+ * algorithm) should offer even better results on paper, but the noise in a
+ * typical TV signal should make anything based on derivatives hopelessly noisy.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "bbox.h"
+#include "lavfutils.h"
+#include "lswsutils.h"
+
+typedef struct {
+ const AVClass *class;
+ char *filename;
+ /* Stores our collection of masks. The first is for an array of
+ the second for the y axis, and the third for the x axis. */
+ int ***mask;
+ int max_mask_size;
+ int mask_w, mask_h;
+
+ uint8_t *full_mask_data;
+ FFBoundingBox full_mask_bbox;
+ uint8_t *half_mask_data;
+ FFBoundingBox half_mask_bbox;
+} RemovelogoContext;
+
+#define OFFSET(x) offsetof(RemovelogoContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption removelogo_options[] = {
+ { "filename", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { "f", "set bitmap filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(removelogo);
+
+/**
+ * Choose a slightly larger mask size to improve performance.
+ *
+ * This function maps the absolute minimum mask size needed to the
+ * mask size we'll actually use. f(x) = x (the smallest that will
+ * work) will produce the sharpest results, but will be quite
+ * jittery. f(x) = 1.25x (what I'm using) is a good tradeoff in my
+ * opinion. This will calculate only at init-time, so you can put a
+ * long expression here without effecting performance.
+ */
+#define apply_mask_fudge_factor(x) (((x) >> 2) + x)
+
+/**
+ * Pre-process an image to give distance information.
+ *
+ * This function takes a bitmap image and converts it in place into a
+ * distance image. A distance image is zero for pixels outside of the
+ * logo and is the Manhattan distance (|dx| + |dy|) from the logo edge
+ * for pixels inside of the logo. This will overestimate the distance,
+ * but that is safe, and is far easier to implement than a proper
+ * pythagorean distance since I'm using a modified erosion algorithm
+ * to compute the distances.
+ *
+ * @param mask image which will be converted from a greyscale image
+ * into a distance image.
+ */
+static void convert_mask_to_strength_mask(uint8_t *data, int linesize,
+ int w, int h, int min_val,
+ int *max_mask_size)
+{
+ int x, y;
+
+ /* How many times we've gone through the loop. Used in the
+ in-place erosion algorithm and to get us max_mask_size later on. */
+ int current_pass = 0;
+
+ /* set all non-zero values to 1 */
+ for (y = 0; y < h; y++)
+ for (x = 0; x < w; x++)
+ data[y*linesize + x] = data[y*linesize + x] > min_val;
+
+ /* For each pass, if a pixel is itself the same value as the
+ current pass, and its four neighbors are too, then it is
+ incremented. If no pixels are incremented by the end of the
+ pass, then we go again. Edge pixels are counted as always
+ excluded (this should be true anyway for any sane mask, but if
+ it isn't this will ensure that we eventually exit). */
+ while (1) {
+ /* If this doesn't get set by the end of this pass, then we're done. */
+ int has_anything_changed = 0;
+ uint8_t *current_pixel0 = data + 1 + linesize, *current_pixel;
+ current_pass++;
+
+ for (y = 1; y < h-1; y++) {
+ current_pixel = current_pixel0;
+ for (x = 1; x < w-1; x++) {
+ /* Apply the in-place erosion transform. It is based
+ on the following two premises:
+ 1 - Any pixel that fails 1 erosion will fail all
+ future erosions.
+
+ 2 - Only pixels having survived all erosions up to
+ the present will be >= to current_pass.
+ It doesn't matter if it survived the current pass,
+ failed it, or hasn't been tested yet. By using >=
+ instead of ==, we allow the algorithm to work in
+ place. */
+ if ( *current_pixel >= current_pass &&
+ *(current_pixel + 1) >= current_pass &&
+ *(current_pixel - 1) >= current_pass &&
+ *(current_pixel + linesize) >= current_pass &&
+ *(current_pixel - linesize) >= current_pass) {
+ /* Increment the value since it still has not been
+ * eroded, as evidenced by the if statement that
+ * just evaluated to true. */
+ (*current_pixel)++;
+ has_anything_changed = 1;
+ }
+ current_pixel++;
+ }
+ current_pixel0 += linesize;
+ }
+ if (!has_anything_changed)
+ break;
+ }
+
+ /* Apply the fudge factor, which will increase the size of the
+ * mask a little to reduce jitter at the cost of more blur. */
+ for (y = 1; y < h - 1; y++)
+ for (x = 1; x < w - 1; x++)
+ data[(y * linesize) + x] = apply_mask_fudge_factor(data[(y * linesize) + x]);
+
+ /* As a side-effect, we now know the maximum mask size, which
+ * we'll use to generate our masks. */
+ /* Apply the fudge factor to this number too, since we must ensure
+ * that enough masks are generated. */
+ *max_mask_size = apply_mask_fudge_factor(current_pass + 1);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int load_mask(uint8_t **mask, int *w, int *h,
+ const char *filename, void *log_ctx)
+{
+ int ret;
+ enum AVPixelFormat pix_fmt;
+ uint8_t *src_data[4], *gray_data[4];
+ int src_linesize[4], gray_linesize[4];
+
+ /* load image from file */
+ if ((ret = ff_load_image(src_data, src_linesize, w, h, &pix_fmt, filename, log_ctx)) < 0)
+ return ret;
+
+ /* convert the image to GRAY8 */
+ if ((ret = ff_scale_image(gray_data, gray_linesize, *w, *h, AV_PIX_FMT_GRAY8,
+ src_data, src_linesize, *w, *h, pix_fmt,
+ log_ctx)) < 0)
+ goto end;
+
+ /* copy mask to a newly allocated array */
+ *mask = av_malloc(*w * *h);
+ if (!*mask)
+ ret = AVERROR(ENOMEM);
+ av_image_copy_plane(*mask, *w, gray_data[0], gray_linesize[0], *w, *h);
+
+end:
+ av_freep(&src_data[0]);
+ av_freep(&gray_data[0]);
+ return ret;
+}
+
+/**
+ * Generate a scaled down image with half width, height, and intensity.
+ *
+ * This function not only scales down an image, but halves the value
+ * in each pixel too. The purpose of this is to produce a chroma
+ * filter image out of a luma filter image. The pixel values store the
+ * distance to the edge of the logo and halving the dimensions halves
+ * the distance. This function rounds up, because a downwards rounding
+ * error could cause the filter to fail, but an upwards rounding error
+ * will only cause a minor amount of excess blur in the chroma planes.
+ */
+static void generate_half_size_image(const uint8_t *src_data, int src_linesize,
+ uint8_t *dst_data, int dst_linesize,
+ int src_w, int src_h,
+ int *max_mask_size)
+{
+ int x, y;
+
+ /* Copy over the image data, using the average of 4 pixels for to
+ * calculate each downsampled pixel. */
+ for (y = 0; y < src_h/2; y++) {
+ for (x = 0; x < src_w/2; x++) {
+ /* Set the pixel if there exists a non-zero value in the
+ * source pixels, else clear it. */
+ dst_data[(y * dst_linesize) + x] =
+ src_data[((y << 1) * src_linesize) + (x << 1)] ||
+ src_data[((y << 1) * src_linesize) + (x << 1) + 1] ||
+ src_data[(((y << 1) + 1) * src_linesize) + (x << 1)] ||
+ src_data[(((y << 1) + 1) * src_linesize) + (x << 1) + 1];
+ dst_data[(y * dst_linesize) + x] = FFMIN(1, dst_data[(y * dst_linesize) + x]);
+ }
+ }
+
+ convert_mask_to_strength_mask(dst_data, dst_linesize,
+ src_w/2, src_h/2, 0, max_mask_size);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ RemovelogoContext *s = ctx->priv;
+ int ***mask;
+ int ret = 0;
+ int a, b, c, w, h;
+ int full_max_mask_size, half_max_mask_size;
+
+ if (!s->filename) {
+ av_log(ctx, AV_LOG_ERROR, "The bitmap file name is mandatory\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* Load our mask image. */
+ if ((ret = load_mask(&s->full_mask_data, &w, &h, s->filename, ctx)) < 0)
+ return ret;
+ s->mask_w = w;
+ s->mask_h = h;
+
+ convert_mask_to_strength_mask(s->full_mask_data, w, w, h,
+ 16, &full_max_mask_size);
+
+ /* Create the scaled down mask image for the chroma planes. */
+ if (!(s->half_mask_data = av_mallocz(w/2 * h/2)))
+ return AVERROR(ENOMEM);
+ generate_half_size_image(s->full_mask_data, w,
+ s->half_mask_data, w/2,
+ w, h, &half_max_mask_size);
+
+ s->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
+
+ /* Create a circular mask for each size up to max_mask_size. When
+ the filter is applied, the mask size is determined on a pixel
+ by pixel basis, with pixels nearer the edge of the logo getting
+ smaller mask sizes. */
+ mask = (int ***)av_malloc_array(s->max_mask_size + 1, sizeof(int **));
+ if (!mask)
+ return AVERROR(ENOMEM);
+
+ for (a = 0; a <= s->max_mask_size; a++) {
+ mask[a] = (int **)av_malloc_array((a * 2) + 1, sizeof(int *));
+ if (!mask[a]) {
+ av_free(mask);
+ return AVERROR(ENOMEM);
+ }
+ for (b = -a; b <= a; b++) {
+ mask[a][b + a] = (int *)av_malloc_array((a * 2) + 1, sizeof(int));
+ if (!mask[a][b + a]) {
+ av_free(mask);
+ return AVERROR(ENOMEM);
+ }
+ for (c = -a; c <= a; c++) {
+ if ((b * b) + (c * c) <= (a * a)) /* Circular 0/1 mask. */
+ mask[a][b + a][c + a] = 1;
+ else
+ mask[a][b + a][c + a] = 0;
+ }
+ }
+ }
+ s->mask = mask;
+
+ /* Calculate our bounding rectangles, which determine in what
+ * region the logo resides for faster processing. */
+ ff_calculate_bounding_box(&s->full_mask_bbox, s->full_mask_data, w, w, h, 0);
+ ff_calculate_bounding_box(&s->half_mask_bbox, s->half_mask_data, w/2, w/2, h/2, 0);
+
+#define SHOW_LOGO_INFO(mask_type) \
+ av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \
+ s->mask_type##_mask_bbox.x1, s->mask_type##_mask_bbox.x2, \
+ s->mask_type##_mask_bbox.y1, s->mask_type##_mask_bbox.y2, \
+ mask_type##_max_mask_size);
+ SHOW_LOGO_INFO(full);
+ SHOW_LOGO_INFO(half);
+
+ return 0;
+}
+
+static int config_props_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ RemovelogoContext *s = ctx->priv;
+
+ if (inlink->w != s->mask_w || inlink->h != s->mask_h) {
+ av_log(ctx, AV_LOG_INFO,
+ "Mask image size %dx%d does not match with the input video size %dx%d\n",
+ s->mask_w, s->mask_h, inlink->w, inlink->h);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+/**
+ * Blur image.
+ *
+ * It takes a pixel that is inside the mask and blurs it. It does so
+ * by finding the average of all the pixels within the mask and
+ * outside of the mask.
+ *
+ * @param mask_data the mask plane to use for averaging
+ * @param image_data the image plane to blur
+ * @param w width of the image
+ * @param h height of the image
+ * @param x x-coordinate of the pixel to blur
+ * @param y y-coordinate of the pixel to blur
+ */
+static unsigned int blur_pixel(int ***mask,
+ const uint8_t *mask_data, int mask_linesize,
+ uint8_t *image_data, int image_linesize,
+ int w, int h, int x, int y)
+{
+ /* Mask size tells how large a circle to use. The radius is about
+ * (slightly larger than) mask size. */
+ int mask_size;
+ int start_posx, start_posy, end_posx, end_posy;
+ int i, j;
+ unsigned int accumulator = 0, divisor = 0;
+ /* What pixel we are reading out of the circular blur mask. */
+ const uint8_t *image_read_position;
+ /* What pixel we are reading out of the filter image. */
+ const uint8_t *mask_read_position;
+
+ /* Prepare our bounding rectangle and clip it if need be. */
+ mask_size = mask_data[y * mask_linesize + x];
+ start_posx = FFMAX(0, x - mask_size);
+ start_posy = FFMAX(0, y - mask_size);
+ end_posx = FFMIN(w - 1, x + mask_size);
+ end_posy = FFMIN(h - 1, y + mask_size);
+
+ image_read_position = image_data + image_linesize * start_posy + start_posx;
+ mask_read_position = mask_data + mask_linesize * start_posy + start_posx;
+
+ for (j = start_posy; j <= end_posy; j++) {
+ for (i = start_posx; i <= end_posx; i++) {
+ /* Check if this pixel is in the mask or not. Only use the
+ * pixel if it is not. */
+ if (!(*mask_read_position) && mask[mask_size][i - start_posx][j - start_posy]) {
+ accumulator += *image_read_position;
+ divisor++;
+ }
+
+ image_read_position++;
+ mask_read_position++;
+ }
+
+ image_read_position += (image_linesize - ((end_posx + 1) - start_posx));
+ mask_read_position += (mask_linesize - ((end_posx + 1) - start_posx));
+ }
+
+ /* If divisor is 0, it means that not a single pixel is outside of
+ the logo, so we have no data. Else we need to normalise the
+ data using the divisor. */
+ return divisor == 0 ? 255:
+ (accumulator + (divisor / 2)) / divisor; /* divide, taking into account average rounding error */
+}
+
+/**
+ * Blur image plane using a mask.
+ *
+ * @param source The image to have it's logo removed.
+ * @param destination Where the output image will be stored.
+ * @param source_stride How far apart (in memory) two consecutive lines are.
+ * @param destination Same as source_stride, but for the destination image.
+ * @param width Width of the image. This is the same for source and destination.
+ * @param height Height of the image. This is the same for source and destination.
+ * @param is_image_direct If the image is direct, then source and destination are
+ * the same and we can save a lot of time by not copying pixels that
+ * haven't changed.
+ * @param filter The image that stores the distance to the edge of the logo for
+ * each pixel.
+ * @param logo_start_x smallest x-coordinate that contains at least 1 logo pixel.
+ * @param logo_start_y smallest y-coordinate that contains at least 1 logo pixel.
+ * @param logo_end_x largest x-coordinate that contains at least 1 logo pixel.
+ * @param logo_end_y largest y-coordinate that contains at least 1 logo pixel.
+ *
+ * This function processes an entire plane. Pixels outside of the logo are copied
+ * to the output without change, and pixels inside the logo have the de-blurring
+ * function applied.
+ */
+static void blur_image(int ***mask,
+ const uint8_t *src_data, int src_linesize,
+ uint8_t *dst_data, int dst_linesize,
+ const uint8_t *mask_data, int mask_linesize,
+ int w, int h, int direct,
+ FFBoundingBox *bbox)
+{
+ int x, y;
+ uint8_t *dst_line;
+ const uint8_t *src_line;
+
+ if (!direct)
+ av_image_copy_plane(dst_data, dst_linesize, src_data, src_linesize, w, h);
+
+ for (y = bbox->y1; y <= bbox->y2; y++) {
+ src_line = src_data + src_linesize * y;
+ dst_line = dst_data + dst_linesize * y;
+
+ for (x = bbox->x1; x <= bbox->x2; x++) {
+ if (mask_data[y * mask_linesize + x]) {
+ /* Only process if we are in the mask. */
+ dst_line[x] = blur_pixel(mask,
+ mask_data, mask_linesize,
+ dst_data, dst_linesize,
+ w, h, x, y);
+ } else {
+ /* Else just copy the data. */
+ if (!direct)
+ dst_line[x] = src_line[x];
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ RemovelogoContext *s = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpicref;
+ int direct = 0;
+
+ if (av_frame_is_writable(inpicref)) {
+ direct = 1;
+ outpicref = inpicref;
+ } else {
+ outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpicref, inpicref);
+ }
+
+ blur_image(s->mask,
+ inpicref ->data[0], inpicref ->linesize[0],
+ outpicref->data[0], outpicref->linesize[0],
+ s->full_mask_data, inlink->w,
+ inlink->w, inlink->h, direct, &s->full_mask_bbox);
+ blur_image(s->mask,
+ inpicref ->data[1], inpicref ->linesize[1],
+ outpicref->data[1], outpicref->linesize[1],
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
+ blur_image(s->mask,
+ inpicref ->data[2], inpicref ->linesize[2],
+ outpicref->data[2], outpicref->linesize[2],
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
+
+ if (!direct)
+ av_frame_free(&inpicref);
+
+ return ff_filter_frame(outlink, outpicref);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RemovelogoContext *s = ctx->priv;
+ int a, b;
+
+ av_freep(&s->full_mask_data);
+ av_freep(&s->half_mask_data);
+
+ if (s->mask) {
+ /* Loop through each mask. */
+ for (a = 0; a <= s->max_mask_size; a++) {
+ /* Loop through each scanline in a mask. */
+ for (b = -a; b <= a; b++) {
+ av_freep(&s->mask[a][b + a]); /* Free a scanline. */
+ }
+ av_freep(&s->mask[a]);
+ }
+ /* Free the array of pointers pointing to the masks. */
+ av_freep(&s->mask);
+ }
+}
+
+static const AVFilterPad removelogo_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad removelogo_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_removelogo = {
+ .name = "removelogo",
+ .description = NULL_IF_CONFIG_SMALL("Remove a TV logo based on a mask image."),
+ .priv_size = sizeof(RemovelogoContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = removelogo_inputs,
+ .outputs = removelogo_outputs,
+ .priv_class = &removelogo_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c
new file mode 100644
index 0000000..8dec742
--- /dev/null
+++ b/libavfilter/vf_rotate.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ * Copyright (c) 2008 Vitor Sessak
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rotation filter, partially based on the tests/rotozoom.c program
+*/
+
+#include "libavutil/avstring.h"
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "drawutils.h"
+#include "internal.h"
+#include "video.h"
+
+#include <float.h>
+
+static const char * const var_names[] = {
+ "in_w" , "iw", ///< width of the input video
+ "in_h" , "ih", ///< height of the input video
+ "out_w", "ow", ///< width of the input video
+ "out_h", "oh", ///< height of the input video
+ "hsub", "vsub",
+ "n", ///< number of frame
+ "t", ///< timestamp expressed in seconds
+ NULL
+};
+
+enum var_name {
+ VAR_IN_W , VAR_IW,
+ VAR_IN_H , VAR_IH,
+ VAR_OUT_W, VAR_OW,
+ VAR_OUT_H, VAR_OH,
+ VAR_HSUB, VAR_VSUB,
+ VAR_N,
+ VAR_T,
+ VAR_VARS_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ double angle;
+ char *angle_expr_str; ///< expression for the angle
+ AVExpr *angle_expr; ///< parsed expression for the angle
+ char *outw_expr_str, *outh_expr_str;
+ int outh, outw;
+ uint8_t fillcolor[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area
+ char *fillcolor_str;
+ int fillcolor_enable;
+ int hsub, vsub;
+ int nb_planes;
+ int use_bilinear;
+ float sinx, cosx;
+ double var_values[VAR_VARS_NB];
+ FFDrawContext draw;
+ FFDrawColor color;
+} RotContext;
+
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int inw, inh;
+ int outw, outh;
+ int plane;
+ int xi, yi;
+ int xprime, yprime;
+ int c, s;
+} ThreadData;
+
+#define OFFSET(x) offsetof(RotContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption rotate_options[] = {
+ { "angle", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "a", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "out_w", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "ow", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "out_h", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "oh", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "fillcolor", "set background fill color", OFFSET(fillcolor_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "c", "set background fill color", OFFSET(fillcolor_str), AV_OPT_TYPE_STRING, {.str="black"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS },
+ { "bilinear", "use bilinear interpolation", OFFSET(use_bilinear), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags=FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(rotate);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ RotContext *rot = ctx->priv;
+
+ if (!strcmp(rot->fillcolor_str, "none"))
+ rot->fillcolor_enable = 0;
+ else if (av_parse_color(rot->fillcolor, rot->fillcolor_str, -1, ctx) >= 0)
+ rot->fillcolor_enable = 1;
+ else
+ return AVERROR(EINVAL);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ RotContext *rot = ctx->priv;
+
+ av_expr_free(rot->angle_expr);
+ rot->angle_expr = NULL;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_RGB0,
+ AV_PIX_FMT_0BGR, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static double get_rotated_w(void *opaque, double angle)
+{
+ RotContext *rot = opaque;
+ double inw = rot->var_values[VAR_IN_W];
+ double inh = rot->var_values[VAR_IN_H];
+ float sinx = sin(angle);
+ float cosx = cos(angle);
+
+ return FFMAX(0, inh * sinx) + FFMAX(0, -inw * cosx) +
+ FFMAX(0, inw * cosx) + FFMAX(0, -inh * sinx);
+}
+
+static double get_rotated_h(void *opaque, double angle)
+{
+ RotContext *rot = opaque;
+ double inw = rot->var_values[VAR_IN_W];
+ double inh = rot->var_values[VAR_IN_H];
+ float sinx = sin(angle);
+ float cosx = cos(angle);
+
+ return FFMAX(0, -inh * cosx) + FFMAX(0, -inw * sinx) +
+ FFMAX(0, inh * cosx) + FFMAX(0, inw * sinx);
+}
+
+static double (* const func1[])(void *, double) = {
+ get_rotated_w,
+ get_rotated_h,
+ NULL
+};
+
+static const char * const func1_names[] = {
+ "rotw",
+ "roth",
+ NULL
+};
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ RotContext *rot = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+ double res;
+ char *expr;
+
+ ff_draw_init(&rot->draw, inlink->format, 0);
+ ff_draw_color(&rot->draw, &rot->color, rot->fillcolor);
+
+ rot->hsub = pixdesc->log2_chroma_w;
+ rot->vsub = pixdesc->log2_chroma_h;
+
+ rot->var_values[VAR_IN_W] = rot->var_values[VAR_IW] = inlink->w;
+ rot->var_values[VAR_IN_H] = rot->var_values[VAR_IH] = inlink->h;
+ rot->var_values[VAR_HSUB] = 1<<rot->hsub;
+ rot->var_values[VAR_VSUB] = 1<<rot->vsub;
+ rot->var_values[VAR_N] = NAN;
+ rot->var_values[VAR_T] = NAN;
+ rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = NAN;
+ rot->var_values[VAR_OUT_H] = rot->var_values[VAR_OH] = NAN;
+
+ av_expr_free(rot->angle_expr);
+ rot->angle_expr = NULL;
+ if ((ret = av_expr_parse(&rot->angle_expr, expr = rot->angle_expr_str, var_names,
+ func1_names, func1, NULL, NULL, 0, ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error occurred parsing angle expression '%s'\n", rot->angle_expr_str);
+ return ret;
+ }
+
+#define SET_SIZE_EXPR(name, opt_name) do { \
+ ret = av_expr_parse_and_eval(&res, expr = rot->name##_expr_str, \
+ var_names, rot->var_values, \
+ func1_names, func1, NULL, NULL, rot, 0, ctx); \
+ if (ret < 0 || isnan(res) || isinf(res) || res <= 0) { \
+ av_log(ctx, AV_LOG_ERROR, \
+ "Error parsing or evaluating expression for option %s: " \
+ "invalid expression '%s' or non-positive or indefinite value %f\n", \
+ opt_name, expr, res); \
+ return ret; \
+ } \
+} while (0)
+
+ /* evaluate width and height */
+ av_expr_parse_and_eval(&res, expr = rot->outw_expr_str, var_names, rot->var_values,
+ func1_names, func1, NULL, NULL, rot, 0, ctx);
+ rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = res;
+ rot->outw = res + 0.5;
+ SET_SIZE_EXPR(outh, "out_w");
+ rot->var_values[VAR_OUT_H] = rot->var_values[VAR_OH] = res;
+ rot->outh = res + 0.5;
+
+ /* evaluate the width again, as it may depend on the evaluated output height */
+ SET_SIZE_EXPR(outw, "out_h");
+ rot->var_values[VAR_OUT_W] = rot->var_values[VAR_OW] = res;
+ rot->outw = res + 0.5;
+
+ /* compute number of planes */
+ rot->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ outlink->w = rot->outw;
+ outlink->h = rot->outh;
+ return 0;
+}
+
+#define FIXP (1<<16)
+#define FIXP2 (1<<20)
+#define INT_PI 3294199 //(M_PI * FIXP2)
+
+/**
+ * Compute the sin of a using integer values.
+ * Input is scaled by FIXP2 and output values are scaled by FIXP.
+ */
+static int64_t int_sin(int64_t a)
+{
+ int64_t a2, res = 0;
+ int i;
+ if (a < 0) a = INT_PI-a; // 0..inf
+ a %= 2 * INT_PI; // 0..2PI
+
+ if (a >= INT_PI*3/2) a -= 2*INT_PI; // -PI/2 .. 3PI/2
+ if (a >= INT_PI/2 ) a = INT_PI - a; // -PI/2 .. PI/2
+
+ /* compute sin using Taylor series approximated to the fifth term */
+ a2 = (a*a)/(FIXP2);
+ for (i = 2; i < 11; i += 2) {
+ res += a;
+ a = -a*a2 / (FIXP2*i*(i+1));
+ }
+ return (res + 8)>>4;
+}
+
+/**
+ * Interpolate the color in src at position x and y using bilinear
+ * interpolation.
+ */
+static uint8_t *interpolate_bilinear(uint8_t *dst_color,
+ const uint8_t *src, int src_linesize, int src_linestep,
+ int x, int y, int max_x, int max_y)
+{
+ int int_x = av_clip(x>>16, 0, max_x);
+ int int_y = av_clip(y>>16, 0, max_y);
+ int frac_x = x&0xFFFF;
+ int frac_y = y&0xFFFF;
+ int i;
+ int int_x1 = FFMIN(int_x+1, max_x);
+ int int_y1 = FFMIN(int_y+1, max_y);
+
+ for (i = 0; i < src_linestep; i++) {
+ int s00 = src[src_linestep * int_x + i + src_linesize * int_y ];
+ int s01 = src[src_linestep * int_x1 + i + src_linesize * int_y ];
+ int s10 = src[src_linestep * int_x + i + src_linesize * int_y1];
+ int s11 = src[src_linestep * int_x1 + i + src_linesize * int_y1];
+ int s0 = (((1<<16) - frac_x)*s00 + frac_x*s01);
+ int s1 = (((1<<16) - frac_x)*s10 + frac_x*s11);
+
+ dst_color[i] = ((int64_t)((1<<16) - frac_y)*s0 + (int64_t)frac_y*s1) >> 32;
+ }
+
+ return dst_color;
+}
+
+static av_always_inline void copy_elem(uint8_t *pout, const uint8_t *pin, int elem_size)
+{
+ int v;
+ switch (elem_size) {
+ case 1:
+ *pout = *pin;
+ break;
+ case 2:
+ *((uint16_t *)pout) = *((uint16_t *)pin);
+ break;
+ case 3:
+ v = AV_RB24(pin);
+ AV_WB24(pout, v);
+ break;
+ case 4:
+ *((uint32_t *)pout) = *((uint32_t *)pin);
+ break;
+ default:
+ memcpy(pout, pin, elem_size);
+ break;
+ }
+}
+
+static av_always_inline void simple_rotate_internal(uint8_t *dst, const uint8_t *src, int src_linesize, int angle, int elem_size, int len)
+{
+ int i;
+ switch(angle) {
+ case 0:
+ memcpy(dst, src, elem_size * len);
+ break;
+ case 1:
+ for (i = 0; i<len; i++)
+ copy_elem(dst + i*elem_size, src + (len-i-1)*src_linesize, elem_size);
+ break;
+ case 2:
+ for (i = 0; i<len; i++)
+ copy_elem(dst + i*elem_size, src + (len-i-1)*elem_size, elem_size);
+ break;
+ case 3:
+ for (i = 0; i<len; i++)
+ copy_elem(dst + i*elem_size, src + i*src_linesize, elem_size);
+ break;
+ }
+}
+
+static av_always_inline void simple_rotate(uint8_t *dst, const uint8_t *src, int src_linesize, int angle, int elem_size, int len)
+{
+ switch(elem_size) {
+ case 1 : simple_rotate_internal(dst, src, src_linesize, angle, 1, len); break;
+ case 2 : simple_rotate_internal(dst, src, src_linesize, angle, 2, len); break;
+ case 3 : simple_rotate_internal(dst, src, src_linesize, angle, 3, len); break;
+ case 4 : simple_rotate_internal(dst, src, src_linesize, angle, 4, len); break;
+ default: simple_rotate_internal(dst, src, src_linesize, angle, elem_size, len); break;
+ }
+}
+
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
+{
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ RotContext *rot = ctx->priv;
+ const int outw = td->outw, outh = td->outh;
+ const int inw = td->inw, inh = td->inh;
+ const int plane = td->plane;
+ const int xi = td->xi, yi = td->yi;
+ const int c = td->c, s = td->s;
+ const int start = (outh * job ) / nb_jobs;
+ const int end = (outh * (job+1)) / nb_jobs;
+ int xprime = td->xprime + start * s;
+ int yprime = td->yprime + start * c;
+ int i, j, x, y;
+
+ for (j = start; j < end; j++) {
+ x = xprime + xi + FIXP*(inw-1)/2;
+ y = yprime + yi + FIXP*(inh-1)/2;
+
+ if (fabs(rot->angle - 0) < FLT_EPSILON && outw == inw && outh == inh) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + j * in->linesize[plane],
+ in->linesize[plane], 0, rot->draw.pixelstep[plane], outw);
+ } else if (fabs(rot->angle - M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + j * rot->draw.pixelstep[plane],
+ in->linesize[plane], 1, rot->draw.pixelstep[plane], outw);
+ } else if (fabs(rot->angle - M_PI) < FLT_EPSILON && outw == inw && outh == inh) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + (outh-j-1) * in->linesize[plane],
+ in->linesize[plane], 2, rot->draw.pixelstep[plane], outw);
+ } else if (fabs(rot->angle - 3*M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) {
+ simple_rotate(out->data[plane] + j * out->linesize[plane],
+ in->data[plane] + (outh-j-1) * rot->draw.pixelstep[plane],
+ in->linesize[plane], 3, rot->draw.pixelstep[plane], outw);
+ } else {
+
+ for (i = 0; i < outw; i++) {
+ int32_t v;
+ int x1, y1;
+ uint8_t *pin, *pout;
+ x1 = x>>16;
+ y1 = y>>16;
+
+ /* the out-of-range values avoid border artifacts */
+ if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) {
+ uint8_t inp_inv[4]; /* interpolated input value */
+ pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane];
+ if (rot->use_bilinear) {
+ pin = interpolate_bilinear(inp_inv,
+ in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane],
+ x, y, inw-1, inh-1);
+ } else {
+ int x2 = av_clip(x1, 0, inw-1);
+ int y2 = av_clip(y1, 0, inh-1);
+ pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane];
+ }
+ switch (rot->draw.pixelstep[plane]) {
+ case 1:
+ *pout = *pin;
+ break;
+ case 2:
+ *((uint16_t *)pout) = *((uint16_t *)pin);
+ break;
+ case 3:
+ v = AV_RB24(pin);
+ AV_WB24(pout, v);
+ break;
+ case 4:
+ *((uint32_t *)pout) = *((uint32_t *)pin);
+ break;
+ default:
+ memcpy(pout, pin, rot->draw.pixelstep[plane]);
+ break;
+ }
+ }
+ x += c;
+ y -= s;
+ }
+ }
+ xprime += s;
+ yprime += c;
+ }
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+ RotContext *rot = ctx->priv;
+ int angle_int, s, c, plane;
+ double res;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ rot->var_values[VAR_N] = inlink->frame_count;
+ rot->var_values[VAR_T] = TS2T(in->pts, inlink->time_base);
+ rot->angle = res = av_expr_eval(rot->angle_expr, rot->var_values, rot);
+
+ av_log(ctx, AV_LOG_DEBUG, "n:%f time:%f angle:%f/PI\n",
+ rot->var_values[VAR_N], rot->var_values[VAR_T], rot->angle/M_PI);
+
+ angle_int = res * FIXP * 16;
+ s = int_sin(angle_int);
+ c = int_sin(angle_int + INT_PI/2);
+
+ /* fill background */
+ if (rot->fillcolor_enable)
+ ff_fill_rectangle(&rot->draw, &rot->color, out->data, out->linesize,
+ 0, 0, outlink->w, outlink->h);
+
+ for (plane = 0; plane < rot->nb_planes; plane++) {
+ int hsub = plane == 1 || plane == 2 ? rot->hsub : 0;
+ int vsub = plane == 1 || plane == 2 ? rot->vsub : 0;
+ const int outw = FF_CEIL_RSHIFT(outlink->w, hsub);
+ const int outh = FF_CEIL_RSHIFT(outlink->h, vsub);
+ ThreadData td = { .in = in, .out = out,
+ .inw = FF_CEIL_RSHIFT(inlink->w, hsub),
+ .inh = FF_CEIL_RSHIFT(inlink->h, vsub),
+ .outh = outh, .outw = outw,
+ .xi = -(outw-1) * c / 2, .yi = (outw-1) * s / 2,
+ .xprime = -(outh-1) * s / 2,
+ .yprime = -(outh-1) * c / 2,
+ .plane = plane, .c = c, .s = s };
+
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
+ }
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ RotContext *rot = ctx->priv;
+ int ret;
+
+ if (!strcmp(cmd, "angle") || !strcmp(cmd, "a")) {
+ AVExpr *old = rot->angle_expr;
+ ret = av_expr_parse(&rot->angle_expr, args, var_names,
+ NULL, NULL, NULL, NULL, 0, ctx);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Error when parsing the expression '%s' for angle command\n", args);
+ rot->angle_expr = old;
+ return ret;
+ }
+ av_expr_free(old);
+ } else
+ ret = AVERROR(ENOSYS);
+
+ return ret;
+}
+
+static const AVFilterPad rotate_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad rotate_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_rotate = {
+ .name = "rotate",
+ .description = NULL_IF_CONFIG_SMALL("Rotate the input image."),
+ .priv_size = sizeof(RotContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .process_command = process_command,
+ .inputs = rotate_inputs,
+ .outputs = rotate_outputs,
+ .priv_class = &rotate_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
+};
diff --git a/libavfilter/vf_sab.c b/libavfilter/vf_sab.c
new file mode 100644
index 0000000..aa38b53
--- /dev/null
+++ b/libavfilter/vf_sab.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Shape Adaptive Blur filter, ported from MPlayer libmpcodecs/vf_sab.c
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libswscale/swscale.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct {
+ float radius;
+ float pre_filter_radius;
+ float strength;
+ float quality;
+ struct SwsContext *pre_filter_context;
+ uint8_t *pre_filter_buf;
+ int pre_filter_linesize;
+ int dist_width;
+ int dist_linesize;
+ int *dist_coeff;
+#define COLOR_DIFF_COEFF_SIZE 512
+ int color_diff_coeff[COLOR_DIFF_COEFF_SIZE];
+} FilterParam;
+
+typedef struct {
+ const AVClass *class;
+ FilterParam luma;
+ FilterParam chroma;
+ int hsub;
+ int vsub;
+ unsigned int sws_flags;
+} SabContext;
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+#define RADIUS_MIN 0.1
+#define RADIUS_MAX 4.0
+
+#define PRE_FILTER_RADIUS_MIN 0.1
+#define PRE_FILTER_RADIUS_MAX 2.0
+
+#define STRENGTH_MIN 0.1
+#define STRENGTH_MAX 100.0
+
+#define OFFSET(x) offsetof(SabContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption sab_options[] = {
+ { "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "luma_pre_filter_radius", "set luma pre-filter radius", OFFSET(luma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, PRE_FILTER_RADIUS_MIN, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "lpfr", "set luma pre-filter radius", OFFSET(luma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, PRE_FILTER_RADIUS_MIN, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+ { "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+
+ { "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "chroma_pre_filter_radius", "set chroma pre-filter radius", OFFSET(chroma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=PRE_FILTER_RADIUS_MIN-1},
+ PRE_FILTER_RADIUS_MIN-1, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "cpfr", "set chroma pre-filter radius", OFFSET(chroma.pre_filter_radius), AV_OPT_TYPE_FLOAT, {.dbl=PRE_FILTER_RADIUS_MIN-1},
+ PRE_FILTER_RADIUS_MIN-1, PRE_FILTER_RADIUS_MAX, .flags=FLAGS },
+ { "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+ { "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(sab);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SabContext *sab = ctx->priv;
+
+ /* make chroma default to luma values, if not explicitly set */
+ if (sab->chroma.radius < RADIUS_MIN)
+ sab->chroma.radius = sab->luma.radius;
+ if (sab->chroma.pre_filter_radius < PRE_FILTER_RADIUS_MIN)
+ sab->chroma.pre_filter_radius = sab->luma.pre_filter_radius;
+ if (sab->chroma.strength < STRENGTH_MIN)
+ sab->chroma.strength = sab->luma.strength;
+
+ sab->luma.quality = sab->chroma.quality = 3.0;
+ sab->sws_flags = SWS_POINT;
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "luma_radius:%f luma_pre_filter_radius::%f luma_strength:%f "
+ "chroma_radius:%f chroma_pre_filter_radius:%f chroma_strength:%f\n",
+ sab->luma .radius, sab->luma .pre_filter_radius, sab->luma .strength,
+ sab->chroma.radius, sab->chroma.pre_filter_radius, sab->chroma.strength);
+ return 0;
+}
+
+static void close_filter_param(FilterParam *f)
+{
+ if (f->pre_filter_context) {
+ sws_freeContext(f->pre_filter_context);
+ f->pre_filter_context = NULL;
+ }
+ av_freep(&f->pre_filter_buf);
+ av_freep(&f->dist_coeff);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SabContext *sab = ctx->priv;
+
+ close_filter_param(&sab->luma);
+ close_filter_param(&sab->chroma);
+}
+
+static int open_filter_param(FilterParam *f, int width, int height, unsigned int sws_flags)
+{
+ SwsVector *vec;
+ SwsFilter sws_f;
+ int i, x, y;
+ int linesize = FFALIGN(width, 8);
+
+ f->pre_filter_buf = av_malloc(linesize * height);
+ if (!f->pre_filter_buf)
+ return AVERROR(ENOMEM);
+
+ f->pre_filter_linesize = linesize;
+ vec = sws_getGaussianVec(f->pre_filter_radius, f->quality);
+ sws_f.lumH = sws_f.lumV = vec;
+ sws_f.chrH = sws_f.chrV = NULL;
+ f->pre_filter_context = sws_getContext(width, height, AV_PIX_FMT_GRAY8,
+ width, height, AV_PIX_FMT_GRAY8,
+ sws_flags, &sws_f, NULL, NULL);
+ sws_freeVec(vec);
+
+ vec = sws_getGaussianVec(f->strength, 5.0);
+ for (i = 0; i < COLOR_DIFF_COEFF_SIZE; i++) {
+ double d;
+ int index = i-COLOR_DIFF_COEFF_SIZE/2 + vec->length/2;
+
+ if (index < 0 || index >= vec->length) d = 0.0;
+ else d = vec->coeff[index];
+
+ f->color_diff_coeff[i] = (int)(d/vec->coeff[vec->length/2]*(1<<12) + 0.5);
+ }
+ sws_freeVec(vec);
+
+ vec = sws_getGaussianVec(f->radius, f->quality);
+ f->dist_width = vec->length;
+ f->dist_linesize = FFALIGN(vec->length, 8);
+ f->dist_coeff = av_malloc_array(f->dist_width, f->dist_linesize * sizeof(*f->dist_coeff));
+ if (!f->dist_coeff) {
+ sws_freeVec(vec);
+ return AVERROR(ENOMEM);
+ }
+
+ for (y = 0; y < vec->length; y++) {
+ for (x = 0; x < vec->length; x++) {
+ double d = vec->coeff[x] * vec->coeff[y];
+ f->dist_coeff[x + y*f->dist_linesize] = (int)(d*(1<<10) + 0.5);
+ }
+ }
+ sws_freeVec(vec);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ SabContext *sab = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ sab->hsub = desc->log2_chroma_w;
+ sab->vsub = desc->log2_chroma_h;
+
+ close_filter_param(&sab->luma);
+ ret = open_filter_param(&sab->luma, inlink->w, inlink->h, sab->sws_flags);
+ if (ret < 0)
+ return ret;
+
+ close_filter_param(&sab->chroma);
+ ret = open_filter_param(&sab->chroma,
+ FF_CEIL_RSHIFT(inlink->w, sab->hsub),
+ FF_CEIL_RSHIFT(inlink->h, sab->vsub), sab->sws_flags);
+ return ret;
+}
+
+#define NB_PLANES 4
+
+static void blur(uint8_t *dst, const int dst_linesize,
+ const uint8_t *src, const int src_linesize,
+ const int w, const int h, FilterParam *fp)
+{
+ int x, y;
+ FilterParam f = *fp;
+ const int radius = f.dist_width/2;
+
+ const uint8_t * const src2[NB_PLANES] = { src };
+ int src2_linesize[NB_PLANES] = { src_linesize };
+ uint8_t *dst2[NB_PLANES] = { f.pre_filter_buf };
+ int dst2_linesize[NB_PLANES] = { f.pre_filter_linesize };
+
+ sws_scale(f.pre_filter_context, src2, src2_linesize, 0, h, dst2, dst2_linesize);
+
+#define UPDATE_FACTOR do { \
+ int factor; \
+ factor = f.color_diff_coeff[COLOR_DIFF_COEFF_SIZE/2 + pre_val - \
+ f.pre_filter_buf[ix + iy*f.pre_filter_linesize]] * f.dist_coeff[dx + dy*f.dist_linesize]; \
+ sum += src[ix + iy*src_linesize] * factor; \
+ div += factor; \
+ } while (0)
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ int sum = 0;
+ int div = 0;
+ int dy;
+ const int pre_val = f.pre_filter_buf[x + y*f.pre_filter_linesize];
+ if (x >= radius && x < w - radius) {
+ for (dy = 0; dy < radius*2 + 1; dy++) {
+ int dx;
+ int iy = y+dy - radius;
+ if (iy < 0) iy = -iy;
+ else if (iy >= h) iy = h+h-iy-1;
+
+ for (dx = 0; dx < radius*2 + 1; dx++) {
+ const int ix = x+dx - radius;
+ UPDATE_FACTOR;
+ }
+ }
+ } else {
+ for (dy = 0; dy < radius*2+1; dy++) {
+ int dx;
+ int iy = y+dy - radius;
+ if (iy < 0) iy = -iy;
+ else if (iy >= h) iy = h+h-iy-1;
+
+ for (dx = 0; dx < radius*2 + 1; dx++) {
+ int ix = x+dx - radius;
+ if (ix < 0) ix = -ix;
+ else if (ix >= w) ix = w+w-ix-1;
+ UPDATE_FACTOR;
+ }
+ }
+ }
+ dst[x + y*dst_linesize] = (sum + div/2) / div;
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ SabContext *sab = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+
+ blur(outpic->data[0], outpic->linesize[0], inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h, &sab->luma);
+ if (inpic->data[2]) {
+ int cw = FF_CEIL_RSHIFT(inlink->w, sab->hsub);
+ int ch = FF_CEIL_RSHIFT(inlink->h, sab->vsub);
+ blur(outpic->data[1], outpic->linesize[1], inpic->data[1], inpic->linesize[1], cw, ch, &sab->chroma);
+ blur(outpic->data[2], outpic->linesize[2], inpic->data[2], inpic->linesize[2], cw, ch, &sab->chroma);
+ }
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad sab_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad sab_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_sab = {
+ .name = "sab",
+ .description = NULL_IF_CONFIG_SMALL("Apply shape adaptive blur."),
+ .priv_size = sizeof(SabContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = sab_inputs,
+ .outputs = sab_outputs,
+ .priv_class = &sab_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 73ea9d2..64b88c2 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,73 +35,128 @@
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
#include "libswscale/swscale.h"
static const char *const var_names[] = {
- "PI",
- "PHI",
- "E",
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
"out_h", "oh",
- "a", "dar",
+ "a",
"sar",
+ "dar",
"hsub",
"vsub",
+ "ohsub",
+ "ovsub",
NULL
};
enum var_name {
- VAR_PI,
- VAR_PHI,
- VAR_E,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
- VAR_A, VAR_DAR,
+ VAR_A,
VAR_SAR,
+ VAR_DAR,
VAR_HSUB,
VAR_VSUB,
+ VAR_OHSUB,
+ VAR_OVSUB,
VARS_NB
};
typedef struct ScaleContext {
const AVClass *class;
struct SwsContext *sws; ///< software scaler context
+ struct SwsContext *isws[2]; ///< software scaler context for interlaced material
+ AVDictionary *opts;
/**
* New dimensions. Special values are:
* 0 = original width/height
* -1 = keep original aspect
+ * -N = try to keep aspect but make sure it is divisible by N
*/
int w, h;
+ char *size_str;
unsigned int flags; ///sws flags
int hsub, vsub; ///< chroma subsampling
int slice_y; ///< top of current output slice
int input_is_pal; ///< set to 1 if the input format is paletted
+ int output_is_pal; ///< set to 1 if the output format is paletted
+ int interlaced;
char *w_expr; ///< width expression string
char *h_expr; ///< height expression string
char *flags_str;
+
+ char *in_color_matrix;
+ char *out_color_matrix;
+
+ int in_range;
+ int out_range;
+
+ int out_h_chr_pos;
+ int out_v_chr_pos;
+ int in_h_chr_pos;
+ int in_v_chr_pos;
+
+ int force_original_aspect_ratio;
} ScaleContext;
-static av_cold int init(AVFilterContext *ctx)
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
ScaleContext *scale = ctx->priv;
+ int ret;
+
+ if (scale->size_str && (scale->w_expr || scale->h_expr)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Size and width/height expressions cannot be set at the same time.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (scale->w_expr && !scale->h_expr)
+ FFSWAP(char *, scale->w_expr, scale->size_str);
+
+ if (scale->size_str) {
+ char buf[32];
+ if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid size '%s'\n", scale->size_str);
+ return ret;
+ }
+ snprintf(buf, sizeof(buf)-1, "%d", scale->w);
+ av_opt_set(scale, "w", buf, 0);
+ snprintf(buf, sizeof(buf)-1, "%d", scale->h);
+ av_opt_set(scale, "h", buf, 0);
+ }
+ if (!scale->w_expr)
+ av_opt_set(scale, "w", "iw", 0);
+ if (!scale->h_expr)
+ av_opt_set(scale, "h", "ih", 0);
+
+ av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
+ scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
+
+ scale->flags = 0;
if (scale->flags_str) {
const AVClass *class = sws_get_class();
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ);
int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
-
if (ret < 0)
return ret;
}
+ scale->opts = *opts;
+ *opts = NULL;
return 0;
}
@@ -110,7 +165,10 @@ static av_cold void uninit(AVFilterContext *ctx)
{
ScaleContext *scale = ctx->priv;
sws_freeContext(scale->sws);
+ sws_freeContext(scale->isws[0]);
+ sws_freeContext(scale->isws[1]);
scale->sws = NULL;
+ av_dict_free(&scale->opts);
}
static int query_formats(AVFilterContext *ctx)
@@ -138,7 +196,7 @@ static int query_formats(AVFilterContext *ctx)
formats = NULL;
while ((desc = av_pix_fmt_desc_next(desc))) {
pix_fmt = av_pix_fmt_desc_get_id(desc);
- if ((sws_isSupportedOutput(pix_fmt) ||
+ if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
sws_isSupportedEndiannessConversion(pix_fmt))
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats);
@@ -151,20 +209,42 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
+static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
+{
+ if (!s)
+ s = "bt601";
+
+ if (s && strstr(s, "bt709")) {
+ colorspace = AVCOL_SPC_BT709;
+ } else if (s && strstr(s, "fcc")) {
+ colorspace = AVCOL_SPC_FCC;
+ } else if (s && strstr(s, "smpte240m")) {
+ colorspace = AVCOL_SPC_SMPTE240M;
+ } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
+ colorspace = AVCOL_SPC_BT470BG;
+ }
+
+ if (colorspace < 1 || colorspace > 7) {
+ colorspace = AVCOL_SPC_BT470BG;
+ }
+
+ return sws_getCoefficients(colorspace);
+}
+
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
+ enum AVPixelFormat outfmt = outlink->format;
ScaleContext *scale = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
int64_t w, h;
double var_values[VARS_NB], res;
char *expr;
int ret;
+ int factor_w, factor_h;
- var_values[VAR_PI] = M_PI;
- var_values[VAR_PHI] = M_PHI;
- var_values[VAR_E] = M_E;
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
@@ -175,6 +255,8 @@ static int config_props(AVFilterLink *outlink)
var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
+ var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
+ var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = scale->w_expr),
@@ -196,22 +278,47 @@ static int config_props(AVFilterLink *outlink)
w = scale->w;
h = scale->h;
- /* sanity check params */
- if (w < -1 || h < -1) {
- av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
- return AVERROR(EINVAL);
+ /* Check if it is requested that the result has to be divisible by a some
+ * factor (w or h = -n with n being the factor). */
+ factor_w = 1;
+ factor_h = 1;
+ if (w < -1) {
+ factor_w = -w;
+ }
+ if (h < -1) {
+ factor_h = -h;
}
- if (w == -1 && h == -1)
+
+ if (w < 0 && h < 0)
scale->w = scale->h = 0;
if (!(w = scale->w))
w = inlink->w;
if (!(h = scale->h))
h = inlink->h;
- if (w == -1)
- w = av_rescale(h, inlink->w, inlink->h);
- if (h == -1)
- h = av_rescale(w, inlink->h, inlink->w);
+
+ /* Make sure that the result is divisible by the factor we determined
+ * earlier. If no factor was set, it is nothing will happen as the default
+ * factor is 1 */
+ if (w < 0)
+ w = av_rescale(h, inlink->w, inlink->h * factor_w) * factor_w;
+ if (h < 0)
+ h = av_rescale(w, inlink->h, inlink->w * factor_h) * factor_h;
+
+ /* Note that force_original_aspect_ratio may overwrite the previous set
+ * dimensions so that it is not divisible by the set factors anymore. */
+ if (scale->force_original_aspect_ratio) {
+ int tmp_w = av_rescale(h, inlink->w, inlink->h);
+ int tmp_h = av_rescale(w, inlink->h, inlink->w);
+
+ if (scale->force_original_aspect_ratio == 1) {
+ w = FFMIN(tmp_w, w);
+ h = FFMIN(tmp_h, h);
+ } else {
+ w = FFMAX(tmp_w, w);
+ h = FFMAX(tmp_h, h);
+ }
+ }
if (w > INT_MAX || h > INT_MAX ||
(h * inlink->w) > INT_MAX ||
@@ -222,49 +329,132 @@ static int config_props(AVFilterLink *outlink)
outlink->h = h;
/* TODO: make algorithm configurable */
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x\n",
- inlink ->w, inlink ->h, av_get_pix_fmt_name(inlink->format),
- outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
- scale->flags);
scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
+ if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
+ scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
+ av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
if (scale->sws)
sws_freeContext(scale->sws);
+ if (scale->isws[0])
+ sws_freeContext(scale->isws[0]);
+ if (scale->isws[1])
+ sws_freeContext(scale->isws[1]);
+ scale->isws[0] = scale->isws[1] = scale->sws = NULL;
if (inlink->w == outlink->w && inlink->h == outlink->h &&
inlink->format == outlink->format)
- scale->sws = NULL;
+ ;
else {
- scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format,
- outlink->w, outlink->h, outlink->format,
- scale->flags, NULL, NULL, NULL);
- if (!scale->sws)
- return AVERROR(EINVAL);
- }
+ struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ struct SwsContext **s = swscs[i];
+ *s = sws_alloc_context();
+ if (!*s)
+ return AVERROR(ENOMEM);
+
+ if (scale->opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
+ return ret;
+ }
+ }
+
+ av_opt_set_int(*s, "srcw", inlink ->w, 0);
+ av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0);
+ av_opt_set_int(*s, "src_format", inlink->format, 0);
+ av_opt_set_int(*s, "dstw", outlink->w, 0);
+ av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
+ av_opt_set_int(*s, "dst_format", outfmt, 0);
+ av_opt_set_int(*s, "sws_flags", scale->flags, 0);
+
+ av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
+ av_opt_set_int(*s, "src_v_chr_pos", scale->in_v_chr_pos, 0);
+ av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
+ av_opt_set_int(*s, "dst_v_chr_pos", scale->out_v_chr_pos, 0);
+ if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
+ return ret;
+ if (!scale->interlaced)
+ break;
+ }
+ }
- if (inlink->sample_aspect_ratio.num)
- outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
- outlink->w*inlink->h},
- inlink->sample_aspect_ratio);
- else
+ if (inlink->sample_aspect_ratio.num){
+ outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
+ } else
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
+ inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
+ inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
+ outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
+ outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
+ scale->flags);
return 0;
fail:
av_log(NULL, AV_LOG_ERROR,
- "Error when evaluating the expression '%s'\n", expr);
+ "Error when evaluating the expression '%s'.\n"
+ "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
+ expr, scale->w_expr, scale->h_expr);
return ret;
}
+static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
+{
+ ScaleContext *scale = link->dst->priv;
+ const uint8_t *in[4];
+ uint8_t *out[4];
+ int in_stride[4],out_stride[4];
+ int i;
+
+ for(i=0; i<4; i++){
+ int vsub= ((i+1)&2) ? scale->vsub : 0;
+ in_stride[i] = cur_pic->linesize[i] * mul;
+ out_stride[i] = out_buf->linesize[i] * mul;
+ in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
+ out[i] = out_buf->data[i] + field * out_buf->linesize[i];
+ }
+ if(scale->input_is_pal)
+ in[1] = cur_pic->data[1];
+ if(scale->output_is_pal)
+ out[1] = out_buf->data[1];
+
+ return sws_scale(sws, in, in_stride, y/mul, h,
+ out,out_stride);
+}
+
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ char buf[32];
+ int in_range;
+
+ if( in->width != link->w
+ || in->height != link->h
+ || in->format != link->format) {
+ int ret;
+ snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
+ av_opt_set(scale, "w", buf, 0);
+ snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
+ av_opt_set(scale, "h", buf, 0);
+
+ link->dst->inputs[0]->format = in->format;
+ link->dst->inputs[0]->w = in->width;
+ link->dst->inputs[0]->h = in->height;
+
+ if ((ret = config_props(outlink)) < 0)
+ return ret;
+ }
if (!scale->sws)
return ff_filter_frame(outlink, in);
@@ -282,38 +472,115 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
out->width = outlink->w;
out->height = outlink->h;
+ if(scale->output_is_pal)
+ avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
+
+ in_range = av_frame_get_color_range(in);
+
+ if ( scale->in_color_matrix
+ || scale->out_color_matrix
+ || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
+ || in_range != AVCOL_RANGE_UNSPECIFIED
+ || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
+ int in_full, out_full, brightness, contrast, saturation;
+ const int *inv_table, *table;
+
+ sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
+ (int **)&table, &out_full,
+ &brightness, &contrast, &saturation);
+
+ if (scale->in_color_matrix)
+ inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
+ if (scale->out_color_matrix)
+ table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
+
+ if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
+ in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
+ else if (in_range != AVCOL_RANGE_UNSPECIFIED)
+ in_full = (in_range == AVCOL_RANGE_JPEG);
+ if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
+ out_full = (scale->out_range == AVCOL_RANGE_JPEG);
+
+ sws_setColorspaceDetails(scale->sws, inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ if (scale->isws[0])
+ sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ if (scale->isws[1])
+ sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
+ table, out_full,
+ brightness, contrast, saturation);
+ }
+
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
- sws_scale(scale->sws, in->data, in->linesize, 0, in->height,
- out->data, out->linesize);
+ if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
+ scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
+ scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
+ }else{
+ scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
+ }
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
+static const AVClass *child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : sws_get_class();
+}
+
#define OFFSET(x) offsetof(ScaleContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
- { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption scale_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
+ { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
- { NULL },
+ { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS },
+ { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
+ { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
+ { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
+ { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
+ { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
+ { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
+ { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
+ { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
+ { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
+ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
+ { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
+ { NULL }
};
static const AVClass scale_class = {
- .class_name = "scale",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ .class_name = "scale",
+ .item_name = av_default_item_name,
+ .option = scale_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .child_class_next = child_class_next,
};
static const AVFilterPad avfilter_vf_scale_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
@@ -329,17 +596,13 @@ static const AVFilterPad avfilter_vf_scale_outputs[] = {
};
AVFilter ff_vf_scale = {
- .name = "scale",
- .description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."),
-
- .init = init,
- .uninit = uninit,
-
+ .name = "scale",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
+ .init_dict = init_dict,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .priv_size = sizeof(ScaleContext),
- .priv_class = &scale_class,
-
- .inputs = avfilter_vf_scale_inputs,
- .outputs = avfilter_vf_scale_outputs,
+ .priv_size = sizeof(ScaleContext),
+ .priv_class = &scale_class,
+ .inputs = avfilter_vf_scale_inputs,
+ .outputs = avfilter_vf_scale_outputs,
};
diff --git a/libavfilter/vf_select.c b/libavfilter/vf_select.c
deleted file mode 100644
index 8d0e6c3..0000000
--- a/libavfilter/vf_select.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright (c) 2011 Stefano Sabatini
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * filter for selecting which frame passes in the filterchain
- */
-
-#include "libavutil/eval.h"
-#include "libavutil/fifo.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "avfilter.h"
-#include "internal.h"
-#include "video.h"
-
-static const char *const var_names[] = {
- "E", ///< Euler number
- "PHI", ///< golden ratio
- "PI", ///< greek pi
-
- "TB", ///< timebase
-
- "pts", ///< original pts in the file of the frame
- "start_pts", ///< first PTS in the stream, expressed in TB units
- "prev_pts", ///< previous frame PTS
- "prev_selected_pts", ///< previous selected frame PTS
-
- "t", ///< first PTS in seconds
- "start_t", ///< first PTS in the stream, expressed in seconds
- "prev_t", ///< previous frame time
- "prev_selected_t", ///< previously selected time
-
- "pict_type", ///< the type of picture in the movie
- "I",
- "P",
- "B",
- "S",
- "SI",
- "SP",
- "BI",
-
- "interlace_type", ///< the frame interlace type
- "PROGRESSIVE",
- "TOPFIRST",
- "BOTTOMFIRST",
-
- "n", ///< frame number (starting from zero)
- "selected_n", ///< selected frame number (starting from zero)
- "prev_selected_n", ///< number of the last selected frame
-
- "key", ///< tell if the frame is a key frame
- "pos", ///< original position in the file of the frame
-
- NULL
-};
-
-enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
-
- VAR_TB,
-
- VAR_PTS,
- VAR_START_PTS,
- VAR_PREV_PTS,
- VAR_PREV_SELECTED_PTS,
-
- VAR_T,
- VAR_START_T,
- VAR_PREV_T,
- VAR_PREV_SELECTED_T,
-
- VAR_PICT_TYPE,
- VAR_PICT_TYPE_I,
- VAR_PICT_TYPE_P,
- VAR_PICT_TYPE_B,
- VAR_PICT_TYPE_S,
- VAR_PICT_TYPE_SI,
- VAR_PICT_TYPE_SP,
- VAR_PICT_TYPE_BI,
-
- VAR_INTERLACE_TYPE,
- VAR_INTERLACE_TYPE_P,
- VAR_INTERLACE_TYPE_T,
- VAR_INTERLACE_TYPE_B,
-
- VAR_N,
- VAR_SELECTED_N,
- VAR_PREV_SELECTED_N,
-
- VAR_KEY,
-
- VAR_VARS_NB
-};
-
-#define FIFO_SIZE 8
-
-typedef struct SelectContext {
- const AVClass *class;
- char *expr_str;
- AVExpr *expr;
- double var_values[VAR_VARS_NB];
- double select;
- int cache_frames;
- AVFifoBuffer *pending_frames; ///< FIFO buffer of video frames
-} SelectContext;
-
-static av_cold int init(AVFilterContext *ctx)
-{
- SelectContext *select = ctx->priv;
- int ret;
-
- if ((ret = av_expr_parse(&select->expr, select->expr_str,
- var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
- select->expr_str);
- return ret;
- }
-
- select->pending_frames = av_fifo_alloc(FIFO_SIZE*sizeof(AVFrame*));
- if (!select->pending_frames) {
- av_log(ctx, AV_LOG_ERROR, "Failed to allocate pending frames buffer.\n");
- return AVERROR(ENOMEM);
- }
- return 0;
-}
-
-#define INTERLACE_TYPE_P 0
-#define INTERLACE_TYPE_T 1
-#define INTERLACE_TYPE_B 2
-
-static int config_input(AVFilterLink *inlink)
-{
- SelectContext *select = inlink->dst->priv;
-
- select->var_values[VAR_E] = M_E;
- select->var_values[VAR_PHI] = M_PHI;
- select->var_values[VAR_PI] = M_PI;
-
- select->var_values[VAR_N] = 0.0;
- select->var_values[VAR_SELECTED_N] = 0.0;
-
- select->var_values[VAR_TB] = av_q2d(inlink->time_base);
-
- select->var_values[VAR_PREV_PTS] = NAN;
- select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
- select->var_values[VAR_PREV_SELECTED_T] = NAN;
- select->var_values[VAR_START_PTS] = NAN;
- select->var_values[VAR_START_T] = NAN;
-
- select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
- select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
- select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
- select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
- select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
-
- select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
- select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
- select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;;
-
- return 0;
-}
-
-#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
-#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
-
-static int select_frame(AVFilterContext *ctx, AVFrame *frame)
-{
- SelectContext *select = ctx->priv;
- AVFilterLink *inlink = ctx->inputs[0];
- double res;
-
- if (isnan(select->var_values[VAR_START_PTS]))
- select->var_values[VAR_START_PTS] = TS2D(frame->pts);
- if (isnan(select->var_values[VAR_START_T]))
- select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
-
- select->var_values[VAR_PTS] = TS2D(frame->pts);
- select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
- select->var_values[VAR_PREV_PTS] = TS2D(frame->pts);
-
- select->var_values[VAR_INTERLACE_TYPE] =
- !frame->interlaced_frame ? INTERLACE_TYPE_P :
- frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
- select->var_values[VAR_PICT_TYPE] = frame->pict_type;
-
- res = av_expr_eval(select->expr, select->var_values, NULL);
-
- select->var_values[VAR_N] += 1.0;
-
- if (res) {
- select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
- select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
- select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
- select->var_values[VAR_SELECTED_N] += 1.0;
- }
- return res;
-}
-
-static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
-{
- SelectContext *select = inlink->dst->priv;
-
- select->select = select_frame(inlink->dst, frame);
- if (select->select) {
- /* frame was requested through poll_frame */
- if (select->cache_frames) {
- if (!av_fifo_space(select->pending_frames)) {
- av_log(inlink->dst, AV_LOG_ERROR,
- "Buffering limit reached, cannot cache more frames\n");
- av_frame_free(&frame);
- } else
- av_fifo_generic_write(select->pending_frames, &frame,
- sizeof(frame), NULL);
- return 0;
- }
- return ff_filter_frame(inlink->dst->outputs[0], frame);
- }
-
- av_frame_free(&frame);
- return 0;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- SelectContext *select = ctx->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
- select->select = 0;
-
- if (av_fifo_size(select->pending_frames)) {
- AVFrame *frame;
-
- av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL);
- return ff_filter_frame(outlink, frame);
- }
-
- while (!select->select) {
- int ret = ff_request_frame(inlink);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int poll_frame(AVFilterLink *outlink)
-{
- SelectContext *select = outlink->src->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
- int count, ret;
-
- if (!av_fifo_size(select->pending_frames)) {
- if ((count = ff_poll_frame(inlink)) <= 0)
- return count;
- /* request frame from input, and apply select condition to it */
- select->cache_frames = 1;
- while (count-- && av_fifo_space(select->pending_frames)) {
- ret = ff_request_frame(inlink);
- if (ret < 0)
- break;
- }
- select->cache_frames = 0;
- }
-
- return av_fifo_size(select->pending_frames)/sizeof(AVFrame*);
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- SelectContext *select = ctx->priv;
- AVFrame *frame;
-
- av_expr_free(select->expr);
- select->expr = NULL;
-
- while (select->pending_frames &&
- av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL) == sizeof(frame))
- av_frame_free(&frame);
- av_fifo_free(select->pending_frames);
- select->pending_frames = NULL;
-}
-
-#define OFFSET(x) offsetof(SelectContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "expr", "An expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags = FLAGS },
- { NULL },
-};
-
-static const AVClass select_class = {
- .class_name = "select",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad avfilter_vf_select_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .config_props = config_input,
- .filter_frame = filter_frame,
- },
- { NULL }
-};
-
-static const AVFilterPad avfilter_vf_select_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .poll_frame = poll_frame,
- .request_frame = request_frame,
- },
- { NULL }
-};
-
-AVFilter ff_vf_select = {
- .name = "select",
- .description = NULL_IF_CONFIG_SMALL("Select frames to pass in output."),
- .init = init,
- .uninit = uninit,
-
- .priv_size = sizeof(SelectContext),
- .priv_class = &select_class,
-
- .inputs = avfilter_vf_select_inputs,
- .outputs = avfilter_vf_select_outputs,
-};
diff --git a/libavfilter/vf_separatefields.c b/libavfilter/vf_separatefields.c
new file mode 100644
index 0000000..42ce682
--- /dev/null
+++ b/libavfilter/vf_separatefields.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ int nb_planes;
+ AVFrame *second;
+} SeparateFieldsContext;
+
+static int config_props_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SeparateFieldsContext *sf = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ sf->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ if (inlink->h & 1) {
+ av_log(ctx, AV_LOG_ERROR, "height must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ outlink->time_base.num = inlink->time_base.num;
+ outlink->time_base.den = inlink->time_base.den * 2;
+ outlink->frame_rate.num = inlink->frame_rate.num * 2;
+ outlink->frame_rate.den = inlink->frame_rate.den;
+ outlink->w = inlink->w;
+ outlink->h = inlink->h / 2;
+
+ return 0;
+}
+
+static void extract_field(AVFrame *frame, int nb_planes, int type)
+{
+ int i;
+
+ for (i = 0; i < nb_planes; i++) {
+ if (type)
+ frame->data[i] = frame->data[i] + frame->linesize[i];
+ frame->linesize[i] *= 2;
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SeparateFieldsContext *sf = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
+
+ inpicref->height = outlink->h;
+ inpicref->interlaced_frame = 0;
+
+ if (!sf->second) {
+ goto clone;
+ } else {
+ AVFrame *second = sf->second;
+
+ extract_field(second, sf->nb_planes, second->top_field_first);
+
+ if (second->pts != AV_NOPTS_VALUE &&
+ inpicref->pts != AV_NOPTS_VALUE)
+ second->pts += inpicref->pts;
+ else
+ second->pts = AV_NOPTS_VALUE;
+
+ ret = ff_filter_frame(outlink, second);
+ if (ret < 0)
+ return ret;
+clone:
+ sf->second = av_frame_clone(inpicref);
+ if (!sf->second)
+ return AVERROR(ENOMEM);
+ }
+
+ extract_field(inpicref, sf->nb_planes, !inpicref->top_field_first);
+
+ if (inpicref->pts != AV_NOPTS_VALUE)
+ inpicref->pts *= 2;
+
+ return ff_filter_frame(outlink, inpicref);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SeparateFieldsContext *sf = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && sf->second) {
+ sf->second->pts *= 2;
+ extract_field(sf->second, sf->nb_planes, sf->second->top_field_first);
+ ret = ff_filter_frame(outlink, sf->second);
+ sf->second = 0;
+ }
+
+ return ret;
+}
+
+static const AVFilterPad separatefields_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad separatefields_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_separatefields = {
+ .name = "separatefields",
+ .description = NULL_IF_CONFIG_SMALL("Split input video frames into fields."),
+ .priv_size = sizeof(SeparateFieldsContext),
+ .inputs = separatefields_inputs,
+ .outputs = separatefields_outputs,
+};
diff --git a/libavfilter/vf_setfield.c b/libavfilter/vf_setfield.c
new file mode 100644
index 0000000..eb4df74
--- /dev/null
+++ b/libavfilter/vf_setfield.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * set field order
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "video.h"
+
+enum SetFieldMode {
+ MODE_AUTO = -1,
+ MODE_BFF,
+ MODE_TFF,
+ MODE_PROG,
+};
+
+typedef struct {
+ const AVClass *class;
+ enum SetFieldMode mode;
+} SetFieldContext;
+
+#define OFFSET(x) offsetof(SetFieldContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption setfield_options[] = {
+ {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"},
+ {"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(setfield);
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ SetFieldContext *setfield = inlink->dst->priv;
+
+ if (setfield->mode == MODE_PROG) {
+ picref->interlaced_frame = 0;
+ } else if (setfield->mode != MODE_AUTO) {
+ picref->interlaced_frame = 1;
+ picref->top_field_first = setfield->mode;
+ }
+ return ff_filter_frame(inlink->dst->outputs[0], picref);
+}
+
+static const AVFilterPad setfield_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad setfield_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_setfield = {
+ .name = "setfield",
+ .description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."),
+ .priv_size = sizeof(SetFieldContext),
+ .priv_class = &setfield_class,
+ .inputs = setfield_inputs,
+ .outputs = setfield_outputs,
+};
diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c
index ede1765..aa3bc83 100644
--- a/libavfilter/vf_showinfo.c
+++ b/libavfilter/vf_showinfo.c
@@ -1,19 +1,19 @@
/*
* Copyright (c) 2011 Stefano Sabatini
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,15 +30,12 @@
#include "libavutil/internal.h"
#include "libavutil/pixdesc.h"
#include "libavutil/stereo3d.h"
+#include "libavutil/timestamp.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
-typedef struct ShowInfoContext {
- unsigned int frame;
-} ShowInfoContext;
-
static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
{
AVStereo3D *stereo;
@@ -69,34 +66,49 @@ static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
av_log(ctx, AV_LOG_INFO, " (inverted)");
}
+static void update_sample_stats(const uint8_t *src, int len, int64_t *sum, int64_t *sum2)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ *sum += src[i];
+ *sum2 += src[i] * src[i];
+ }
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
- ShowInfoContext *showinfo = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint32_t plane_checksum[4] = {0}, checksum = 0;
+ int64_t sum[4] = {0}, sum2[4] = {0};
+ int32_t pixelcount[4] = {0};
int i, plane, vsub = desc->log2_chroma_h;
- for (plane = 0; frame->data[plane] && plane < 4; plane++) {
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
uint8_t *data = frame->data[plane];
- int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;
+ int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
int linesize = av_image_get_linesize(frame->format, frame->width, plane);
+
if (linesize < 0)
return linesize;
for (i = 0; i < h; i++) {
plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
checksum = av_adler32_update(checksum, data, linesize);
+
+ update_sample_stats(data, linesize, sum+plane, sum2+plane);
+ pixelcount[plane] += linesize;
data += frame->linesize[plane];
}
}
av_log(ctx, AV_LOG_INFO,
- "n:%d pts:%"PRId64" pts_time:%f "
+ "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
- "checksum:%"PRIu32" plane_checksum:[%"PRIu32" %"PRIu32" %"PRIu32" %"PRIu32"]\n",
- showinfo->frame,
- frame->pts, frame->pts * av_q2d(inlink->time_base),
+ "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
+ inlink->frame_count,
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
desc->name,
frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
frame->width, frame->height,
@@ -104,7 +116,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
frame->top_field_first ? 'T' : 'B', /* Top / Bottom */
frame->key_frame,
av_get_picture_type_char(frame->pict_type),
- checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);
+ checksum, plane_checksum[0]);
+
+ for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
+ av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]);
+ av_log(ctx, AV_LOG_INFO, "] mean:[");
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
+ av_log(ctx, AV_LOG_INFO, "%"PRId64" ", (sum[plane] + pixelcount[plane]/2) / pixelcount[plane]);
+ av_log(ctx, AV_LOG_INFO, "\b] stdev:[");
+ for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
+ av_log(ctx, AV_LOG_INFO, "%3.1f ",
+ sqrt((sum2[plane] - sum[plane]*(double)sum[plane]/pixelcount[plane])/pixelcount[plane]));
+ av_log(ctx, AV_LOG_INFO, "\b]\n");
for (i = 0; i < frame->nb_side_data; i++) {
AVFrameSideData *sd = frame->side_data[i];
@@ -136,16 +159,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
av_log(ctx, AV_LOG_INFO, "\n");
}
- showinfo->frame++;
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
static const AVFilterPad avfilter_vf_showinfo_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = ff_null_get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -161,10 +182,6 @@ static const AVFilterPad avfilter_vf_showinfo_outputs[] = {
AVFilter ff_vf_showinfo = {
.name = "showinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."),
-
- .priv_size = sizeof(ShowInfoContext),
-
- .inputs = avfilter_vf_showinfo_inputs,
-
- .outputs = avfilter_vf_showinfo_outputs,
+ .inputs = avfilter_vf_showinfo_inputs,
+ .outputs = avfilter_vf_showinfo_outputs,
};
diff --git a/libavfilter/vf_shuffleplanes.c b/libavfilter/vf_shuffleplanes.c
index 1bc77b0..80085cd 100644
--- a/libavfilter/vf_shuffleplanes.c
+++ b/libavfilter/vf_shuffleplanes.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -125,7 +125,7 @@ fail:
}
#define OFFSET(x) offsetof(ShufflePlanesContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption shuffleplanes_options[] = {
{ "map0", "Index of the input plane to be used as the first output plane ", OFFSET(map[0]), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 4, FLAGS },
{ "map1", "Index of the input plane to be used as the second output plane ", OFFSET(map[1]), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 4, FLAGS },
diff --git a/libavfilter/vf_signalstats.c b/libavfilter/vf_signalstats.c
new file mode 100644
index 0000000..47545aa
--- /dev/null
+++ b/libavfilter/vf_signalstats.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2010 Mark Heath mjpeg0 @ silicontrip dot org
+ * Copyright (c) 2014 Clément Bœsch
+ * Copyright (c) 2014 Dave Rice @dericed
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+enum FilterMode {
+ FILTER_NONE = -1,
+ FILTER_TOUT,
+ FILTER_VREP,
+ FILTER_BRNG,
+ FILT_NUMB
+};
+
+typedef struct {
+ const AVClass *class;
+ int chromah; // height of chroma plane
+ int chromaw; // width of chroma plane
+ int hsub; // horizontal subsampling
+ int vsub; // vertical subsampling
+ int fs; // pixel count per frame
+ int cfs; // pixel count per frame of chroma planes
+ enum FilterMode outfilter;
+ int filters;
+ AVFrame *frame_prev;
+ char *vrep_line;
+ uint8_t rgba_color[4];
+ int yuv_color[3];
+} SignalstatsContext;
+
+#define OFFSET(x) offsetof(SignalstatsContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption signalstats_options[] = {
+ {"stat", "set statistics filters", OFFSET(filters), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "filters"},
+ {"tout", "analyze pixels for temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_TOUT}, 0, 0, FLAGS, "filters"},
+ {"vrep", "analyze video lines for vertical line repitition", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_VREP}, 0, 0, FLAGS, "filters"},
+ {"brng", "analyze for pixels outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_BRNG}, 0, 0, FLAGS, "filters"},
+ {"out", "set video filter", OFFSET(outfilter), AV_OPT_TYPE_INT, {.i64=FILTER_NONE}, -1, FILT_NUMB-1, FLAGS, "out"},
+ {"tout", "highlight pixels that depict temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_TOUT}, 0, 0, FLAGS, "out"},
+ {"vrep", "highlight video lines that depict vertical line repitition", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_VREP}, 0, 0, FLAGS, "out"},
+ {"brng", "highlight pixels that are outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_BRNG}, 0, 0, FLAGS, "out"},
+ {"c", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
+ {"color", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(signalstats);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ uint8_t r, g, b;
+ SignalstatsContext *s = ctx->priv;
+
+ if (s->outfilter != FILTER_NONE)
+ s->filters |= 1 << s->outfilter;
+
+ r = s->rgba_color[0];
+ g = s->rgba_color[1];
+ b = s->rgba_color[2];
+ s->yuv_color[0] = (( 66*r + 129*g + 25*b + (1<<7)) >> 8) + 16;
+ s->yuv_color[1] = ((-38*r + -74*g + 112*b + (1<<7)) >> 8) + 128;
+ s->yuv_color[2] = ((112*r + -94*g + -18*b + (1<<7)) >> 8) + 128;
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SignalstatsContext *s = ctx->priv;
+ av_frame_free(&s->frame_prev);
+ av_freep(&s->vrep_line);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // TODO: add more
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ SignalstatsContext *s = ctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ outlink->w = inlink->w;
+ outlink->h = inlink->h;
+
+ s->chromaw = FF_CEIL_RSHIFT(inlink->w, s->hsub);
+ s->chromah = FF_CEIL_RSHIFT(inlink->h, s->vsub);
+
+ s->fs = inlink->w * inlink->h;
+ s->cfs = s->chromaw * s->chromah;
+
+ if (s->filters & 1<<FILTER_VREP) {
+ s->vrep_line = av_malloc(inlink->h * sizeof(*s->vrep_line));
+ if (!s->vrep_line)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static void burn_frame(SignalstatsContext *s, AVFrame *f, int x, int y)
+{
+ const int chromax = x >> s->hsub;
+ const int chromay = y >> s->vsub;
+ f->data[0][y * f->linesize[0] + x] = s->yuv_color[0];
+ f->data[1][chromay * f->linesize[1] + chromax] = s->yuv_color[1];
+ f->data[2][chromay * f->linesize[2] + chromax] = s->yuv_color[2];
+}
+
+static int filter_brng(SignalstatsContext *s, const AVFrame *in, AVFrame *out, int y, int w, int h)
+{
+ int x, score = 0;
+ const int yc = y >> s->vsub;
+ const uint8_t *pluma = &in->data[0][y * in->linesize[0]];
+ const uint8_t *pchromau = &in->data[1][yc * in->linesize[1]];
+ const uint8_t *pchromav = &in->data[2][yc * in->linesize[2]];
+
+ for (x = 0; x < w; x++) {
+ const int xc = x >> s->hsub;
+ const int luma = pluma[x];
+ const int chromau = pchromau[xc];
+ const int chromav = pchromav[xc];
+ const int filt = luma < 16 || luma > 235 ||
+ chromau < 16 || chromau > 240 ||
+ chromav < 16 || chromav > 240;
+ score += filt;
+ if (out && filt)
+ burn_frame(s, out, x, y);
+ }
+ return score;
+}
+
+static int filter_tout_outlier(uint8_t x, uint8_t y, uint8_t z)
+{
+ return ((abs(x - y) + abs (z - y)) / 2) - abs(z - x) > 4; // make 4 configurable?
+}
+
+static int filter_tout(SignalstatsContext *s, const AVFrame *in, AVFrame *out, int y, int w, int h)
+{
+ const uint8_t *p = in->data[0];
+ int lw = in->linesize[0];
+ int x, score = 0, filt;
+
+ if (y - 1 < 0 || y + 1 >= h)
+ return 0;
+
+ // detect two pixels above and below (to eliminate interlace artefacts)
+ // should check that video format is infact interlaced.
+
+#define FILTER(i, j) \
+filter_tout_outlier(p[(y-j) * lw + x + i], \
+ p[ y * lw + x + i], \
+ p[(y+j) * lw + x + i])
+
+#define FILTER3(j) (FILTER(-1, j) && FILTER(0, j) && FILTER(1, j))
+
+ if (y - 2 >= 0 && y + 2 < h) {
+ for (x = 1; x < w - 1; x++) {
+ filt = FILTER3(2) && FILTER3(1);
+ score += filt;
+ if (filt && out)
+ burn_frame(s, out, x, y);
+ }
+ } else {
+ for (x = 1; x < w - 1; x++) {
+ filt = FILTER3(1);
+ score += filt;
+ if (filt && out)
+ burn_frame(s, out, x, y);
+ }
+ }
+ return score;
+}
+
+#define VREP_START 4
+
+static void filter_init_vrep(SignalstatsContext *s, const AVFrame *p, int w, int h)
+{
+ int i, y;
+ int lw = p->linesize[0];
+
+ for (y = VREP_START; y < h; y++) {
+ int totdiff = 0;
+ int y2lw = (y - VREP_START) * lw;
+ int ylw = y * lw;
+
+ for (i = 0; i < w; i++)
+ totdiff += abs(p->data[0][y2lw + i] - p->data[0][ylw + i]);
+
+ /* this value should be definable */
+ s->vrep_line[y] = totdiff < w;
+ }
+}
+
+static int filter_vrep(SignalstatsContext *s, const AVFrame *in, AVFrame *out, int y, int w, int h)
+{
+ int x, score = 0;
+
+ if (y < VREP_START)
+ return 0;
+
+ for (x = 0; x < w; x++) {
+ if (s->vrep_line[y]) {
+ score++;
+ if (out)
+ burn_frame(s, out, x, y);
+ }
+ }
+ return score;
+}
+
+static const struct {
+ const char *name;
+ void (*init)(SignalstatsContext *s, const AVFrame *p, int w, int h);
+ int (*process)(SignalstatsContext *s, const AVFrame *in, AVFrame *out, int y, int w, int h);
+} filters_def[] = {
+ {"TOUT", NULL, filter_tout},
+ {"VREP", filter_init_vrep, filter_vrep},
+ {"BRNG", NULL, filter_brng},
+ {NULL}
+};
+
+#define DEPTH 256
+
+static int filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ SignalstatsContext *s = link->dst->priv;
+ AVFilterLink *outlink = link->dst->outputs[0];
+ AVFrame *out = in;
+ int i, j;
+ int w = 0, cw = 0, // in
+ pw = 0, cpw = 0; // prev
+ int yuv, yuvu, yuvv;
+ int fil;
+ char metabuf[128];
+ unsigned int histy[DEPTH] = {0},
+ histu[DEPTH] = {0},
+ histv[DEPTH] = {0},
+ histhue[360] = {0},
+ histsat[DEPTH] = {0}; // limited to 8 bit data.
+ int miny = -1, minu = -1, minv = -1;
+ int maxy = -1, maxu = -1, maxv = -1;
+ int lowy = -1, lowu = -1, lowv = -1;
+ int highy = -1, highu = -1, highv = -1;
+ int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
+ int lowp, highp, clowp, chighp;
+ int accy, accu, accv;
+ int accsat, acchue = 0;
+ int medhue, maxhue;
+ int toty = 0, totu = 0, totv = 0, totsat=0;
+ int tothue = 0;
+ int dify = 0, difu = 0, difv = 0;
+
+ int filtot[FILT_NUMB] = {0};
+ AVFrame *prev;
+
+ if (!s->frame_prev)
+ s->frame_prev = av_frame_clone(in);
+
+ prev = s->frame_prev;
+
+ if (s->outfilter != FILTER_NONE)
+ out = av_frame_clone(in);
+
+ for (fil = 0; fil < FILT_NUMB; fil ++)
+ if ((s->filters & 1<<fil) && filters_def[fil].init)
+ filters_def[fil].init(s, in, link->w, link->h);
+
+ // Calculate luma histogram and difference with previous frame or field.
+ for (j = 0; j < link->h; j++) {
+ for (i = 0; i < link->w; i++) {
+ yuv = in->data[0][w + i];
+ histy[yuv]++;
+ dify += abs(in->data[0][w + i] - prev->data[0][pw + i]);
+ }
+ w += in->linesize[0];
+ pw += prev->linesize[0];
+ }
+
+ // Calculate chroma histogram and difference with previous frame or field.
+ for (j = 0; j < s->chromah; j++) {
+ for (i = 0; i < s->chromaw; i++) {
+ int sat, hue;
+
+ yuvu = in->data[1][cw+i];
+ yuvv = in->data[2][cw+i];
+ histu[yuvu]++;
+ difu += abs(in->data[1][cw+i] - prev->data[1][cpw+i]);
+ histv[yuvv]++;
+ difv += abs(in->data[2][cw+i] - prev->data[2][cpw+i]);
+
+ // int or round?
+ sat = hypot(yuvu - 128, yuvv - 128);
+ histsat[sat]++;
+ hue = floor((180 / M_PI) * atan2f(yuvu-128, yuvv-128) + 180);
+ histhue[hue]++;
+ }
+ cw += in->linesize[1];
+ cpw += prev->linesize[1];
+ }
+
+ for (j = 0; j < link->h; j++) {
+ for (fil = 0; fil < FILT_NUMB; fil ++) {
+ if (s->filters & 1<<fil) {
+ AVFrame *dbg = out != in && s->outfilter == fil ? out : NULL;
+ filtot[fil] += filters_def[fil].process(s, in, dbg, j, link->w, link->h);
+ }
+ }
+ }
+
+ // find low / high based on histogram percentile
+ // these only need to be calculated once.
+
+ lowp = lrint(s->fs * 10 / 100.);
+ highp = lrint(s->fs * 90 / 100.);
+ clowp = lrint(s->cfs * 10 / 100.);
+ chighp = lrint(s->cfs * 90 / 100.);
+
+ accy = accu = accv = accsat = 0;
+ for (fil = 0; fil < DEPTH; fil++) {
+ if (miny < 0 && histy[fil]) miny = fil;
+ if (minu < 0 && histu[fil]) minu = fil;
+ if (minv < 0 && histv[fil]) minv = fil;
+ if (minsat < 0 && histsat[fil]) minsat = fil;
+
+ if (histy[fil]) maxy = fil;
+ if (histu[fil]) maxu = fil;
+ if (histv[fil]) maxv = fil;
+ if (histsat[fil]) maxsat = fil;
+
+ toty += histy[fil] * fil;
+ totu += histu[fil] * fil;
+ totv += histv[fil] * fil;
+ totsat += histsat[fil] * fil;
+
+ accy += histy[fil];
+ accu += histu[fil];
+ accv += histv[fil];
+ accsat += histsat[fil];
+
+ if (lowy == -1 && accy >= lowp) lowy = fil;
+ if (lowu == -1 && accu >= clowp) lowu = fil;
+ if (lowv == -1 && accv >= clowp) lowv = fil;
+ if (lowsat == -1 && accsat >= clowp) lowsat = fil;
+
+ if (highy == -1 && accy >= highp) highy = fil;
+ if (highu == -1 && accu >= chighp) highu = fil;
+ if (highv == -1 && accv >= chighp) highv = fil;
+ if (highsat == -1 && accsat >= chighp) highsat = fil;
+ }
+
+ maxhue = histhue[0];
+ medhue = -1;
+ for (fil = 0; fil < 360; fil++) {
+ tothue += histhue[fil] * fil;
+ acchue += histhue[fil];
+
+ if (medhue == -1 && acchue > s->cfs / 2)
+ medhue = fil;
+ if (histhue[fil] > maxhue) {
+ maxhue = histhue[fil];
+ }
+ }
+
+ av_frame_free(&s->frame_prev);
+ s->frame_prev = av_frame_clone(in);
+
+#define SET_META(key, fmt, val) do { \
+ snprintf(metabuf, sizeof(metabuf), fmt, val); \
+ av_dict_set(&out->metadata, "lavfi.signalstats." key, metabuf, 0); \
+} while (0)
+
+ SET_META("YMIN", "%d", miny);
+ SET_META("YLOW", "%d", lowy);
+ SET_META("YAVG", "%g", 1.0 * toty / s->fs);
+ SET_META("YHIGH", "%d", highy);
+ SET_META("YMAX", "%d", maxy);
+
+ SET_META("UMIN", "%d", minu);
+ SET_META("ULOW", "%d", lowu);
+ SET_META("UAVG", "%g", 1.0 * totu / s->cfs);
+ SET_META("UHIGH", "%d", highu);
+ SET_META("UMAX", "%d", maxu);
+
+ SET_META("VMIN", "%d", minv);
+ SET_META("VLOW", "%d", lowv);
+ SET_META("VAVG", "%g", 1.0 * totv / s->cfs);
+ SET_META("VHIGH", "%d", highv);
+ SET_META("VMAX", "%d", maxv);
+
+ SET_META("SATMIN", "%d", minsat);
+ SET_META("SATLOW", "%d", lowsat);
+ SET_META("SATAVG", "%g", 1.0 * totsat / s->cfs);
+ SET_META("SATHIGH", "%d", highsat);
+ SET_META("SATMAX", "%d", maxsat);
+
+ SET_META("HUEMED", "%d", medhue);
+ SET_META("HUEAVG", "%g", 1.0 * tothue / s->cfs);
+
+ SET_META("YDIF", "%g", 1.0 * dify / s->fs);
+ SET_META("UDIF", "%g", 1.0 * difu / s->cfs);
+ SET_META("VDIF", "%g", 1.0 * difv / s->cfs);
+
+ for (fil = 0; fil < FILT_NUMB; fil ++) {
+ if (s->filters & 1<<fil) {
+ char metaname[128];
+ snprintf(metabuf, sizeof(metabuf), "%g", 1.0 * filtot[fil] / s->fs);
+ snprintf(metaname, sizeof(metaname), "lavfi.signalstats.%s", filters_def[fil].name);
+ av_dict_set(&out->metadata, metaname, metabuf, 0);
+ }
+ }
+
+ if (in != out)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad signalstats_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad signalstats_outputs[] = {
+ {
+ .name = "default",
+ .config_props = config_props,
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_signalstats = {
+ .name = "signalstats",
+ .description = "Generate statistics from video analysis.",
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .priv_size = sizeof(SignalstatsContext),
+ .inputs = signalstats_inputs,
+ .outputs = signalstats_outputs,
+ .priv_class = &signalstats_class,
+};
diff --git a/libavfilter/vf_smartblur.c b/libavfilter/vf_smartblur.c
new file mode 100644
index 0000000..b09ec90
--- /dev/null
+++ b/libavfilter/vf_smartblur.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2012 Jeremy Tran
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Apply a smartblur filter to the input video
+ * Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer.
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libswscale/swscale.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define RADIUS_MIN 0.1
+#define RADIUS_MAX 5.0
+
+#define STRENGTH_MIN -1.0
+#define STRENGTH_MAX 1.0
+
+#define THRESHOLD_MIN -30
+#define THRESHOLD_MAX 30
+
+typedef struct {
+ float radius;
+ float strength;
+ int threshold;
+ float quality;
+ struct SwsContext *filter_context;
+} FilterParam;
+
+typedef struct {
+ const AVClass *class;
+ FilterParam luma;
+ FilterParam chroma;
+ int hsub;
+ int vsub;
+ unsigned int sws_flags;
+} SmartblurContext;
+
+#define OFFSET(x) offsetof(SmartblurContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption smartblur_options[] = {
+ { "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
+ { "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+ { "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
+ { "luma_threshold", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
+ { "lt", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
+
+ { "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
+ { "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+ { "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
+ { "chroma_threshold", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
+ { "ct", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(smartblur);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ SmartblurContext *sblur = ctx->priv;
+
+ /* make chroma default to luma values, if not explicitly set */
+ if (sblur->chroma.radius < RADIUS_MIN)
+ sblur->chroma.radius = sblur->luma.radius;
+ if (sblur->chroma.strength < STRENGTH_MIN)
+ sblur->chroma.strength = sblur->luma.strength;
+ if (sblur->chroma.threshold < THRESHOLD_MIN)
+ sblur->chroma.threshold = sblur->luma.threshold;
+
+ sblur->luma.quality = sblur->chroma.quality = 3.0;
+ sblur->sws_flags = SWS_BICUBIC;
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "luma_radius:%f luma_strength:%f luma_threshold:%d "
+ "chroma_radius:%f chroma_strength:%f chroma_threshold:%d\n",
+ sblur->luma.radius, sblur->luma.strength, sblur->luma.threshold,
+ sblur->chroma.radius, sblur->chroma.strength, sblur->chroma.threshold);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SmartblurContext *sblur = ctx->priv;
+
+ sws_freeContext(sblur->luma.filter_context);
+ sws_freeContext(sblur->chroma.filter_context);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
+{
+ SwsVector *vec;
+ SwsFilter sws_filter;
+
+ vec = sws_getGaussianVec(f->radius, f->quality);
+
+ if (!vec)
+ return AVERROR(EINVAL);
+
+ sws_scaleVec(vec, f->strength);
+ vec->coeff[vec->length / 2] += 1.0 - f->strength;
+ sws_filter.lumH = sws_filter.lumV = vec;
+ sws_filter.chrH = sws_filter.chrV = NULL;
+ f->filter_context = sws_getCachedContext(NULL,
+ width, height, AV_PIX_FMT_GRAY8,
+ width, height, AV_PIX_FMT_GRAY8,
+ flags, &sws_filter, NULL, NULL);
+
+ sws_freeVec(vec);
+
+ if (!f->filter_context)
+ return AVERROR(EINVAL);
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ SmartblurContext *sblur = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ sblur->hsub = desc->log2_chroma_w;
+ sblur->vsub = desc->log2_chroma_h;
+
+ alloc_sws_context(&sblur->luma, inlink->w, inlink->h, sblur->sws_flags);
+ alloc_sws_context(&sblur->chroma,
+ FF_CEIL_RSHIFT(inlink->w, sblur->hsub),
+ FF_CEIL_RSHIFT(inlink->h, sblur->vsub),
+ sblur->sws_flags);
+
+ return 0;
+}
+
+static void blur(uint8_t *dst, const int dst_linesize,
+ const uint8_t *src, const int src_linesize,
+ const int w, const int h, const int threshold,
+ struct SwsContext *filter_context)
+{
+ int x, y;
+ int orig, filtered;
+ int diff;
+ /* Declare arrays of 4 to get aligned data */
+ const uint8_t* const src_array[4] = {src};
+ uint8_t *dst_array[4] = {dst};
+ int src_linesize_array[4] = {src_linesize};
+ int dst_linesize_array[4] = {dst_linesize};
+
+ sws_scale(filter_context, src_array, src_linesize_array,
+ 0, h, dst_array, dst_linesize_array);
+
+ if (threshold > 0) {
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ orig = src[x + y * src_linesize];
+ filtered = dst[x + y * dst_linesize];
+ diff = orig - filtered;
+
+ if (diff > 0) {
+ if (diff > 2 * threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (diff > threshold)
+ /* add 'diff' and subtract 'threshold' from 'filtered' */
+ dst[x + y * dst_linesize] = orig - threshold;
+ } else {
+ if (-diff > 2 * threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (-diff > threshold)
+ /* add 'diff' and 'threshold' to 'filtered' */
+ dst[x + y * dst_linesize] = orig + threshold;
+ }
+ }
+ }
+ } else if (threshold < 0) {
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ orig = src[x + y * src_linesize];
+ filtered = dst[x + y * dst_linesize];
+ diff = orig - filtered;
+
+ if (diff > 0) {
+ if (diff <= -threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (diff <= -2 * threshold)
+ /* subtract 'diff' and 'threshold' from 'orig' */
+ dst[x + y * dst_linesize] = filtered - threshold;
+ } else {
+ if (diff >= threshold)
+ dst[x + y * dst_linesize] = orig;
+ else if (diff >= 2 * threshold)
+ /* add 'threshold' and subtract 'diff' from 'orig' */
+ dst[x + y * dst_linesize] = filtered + threshold;
+ }
+ }
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ SmartblurContext *sblur = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic;
+ int cw = FF_CEIL_RSHIFT(inlink->w, sblur->hsub);
+ int ch = FF_CEIL_RSHIFT(inlink->h, sblur->vsub);
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+
+ blur(outpic->data[0], outpic->linesize[0],
+ inpic->data[0], inpic->linesize[0],
+ inlink->w, inlink->h, sblur->luma.threshold,
+ sblur->luma.filter_context);
+
+ if (inpic->data[2]) {
+ blur(outpic->data[1], outpic->linesize[1],
+ inpic->data[1], inpic->linesize[1],
+ cw, ch, sblur->chroma.threshold,
+ sblur->chroma.filter_context);
+ blur(outpic->data[2], outpic->linesize[2],
+ inpic->data[2], inpic->linesize[2],
+ cw, ch, sblur->chroma.threshold,
+ sblur->chroma.filter_context);
+ }
+
+ av_frame_free(&inpic);
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad smartblur_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad smartblur_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_smartblur = {
+ .name = "smartblur",
+ .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
+ .priv_size = sizeof(SmartblurContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = smartblur_inputs,
+ .outputs = smartblur_outputs,
+ .priv_class = &smartblur_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_spp.c b/libavfilter/vf_spp.c
new file mode 100644
index 0000000..989e283
--- /dev/null
+++ b/libavfilter/vf_spp.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Clément Bœsch <u pkh me>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Simple post processing filter
+ *
+ * This implementation is based on an algorithm described in
+ * "Aria Nosratinia Embedded Post-Processing for
+ * Enhancement of Compressed Images (1999)"
+ *
+ * Originally written by Michael Niedermayer for the MPlayer project, and
+ * ported by Clément Bœsch for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "vf_spp.h"
+
+enum mode {
+ MODE_HARD,
+ MODE_SOFT,
+ NB_MODES
+};
+
+static const AVClass *child_class_next(const AVClass *prev)
+{
+ return prev ? NULL : avcodec_dct_get_class();
+}
+
+static void *child_next(void *obj, void *prev)
+{
+ SPPContext *s = obj;
+ return prev ? NULL : s->dct;
+}
+
+#define OFFSET(x) offsetof(SPPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption spp_options[] = {
+ { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, FLAGS },
+ { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
+ { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, "mode" },
+ { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+static const AVClass spp_class = {
+ .class_name = "spp",
+ .item_name = av_default_item_name,
+ .option = spp_options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ .child_class_next = child_class_next,
+ .child_next = child_next,
+};
+
+// XXX: share between filters?
+DECLARE_ALIGNED(8, static const uint8_t, ldither)[8][8] = {
+ { 0, 48, 12, 60, 3, 51, 15, 63 },
+ { 32, 16, 44, 28, 35, 19, 47, 31 },
+ { 8, 56, 4, 52, 11, 59, 7, 55 },
+ { 40, 24, 36, 20, 43, 27, 39, 23 },
+ { 2, 50, 14, 62, 1, 49, 13, 61 },
+ { 34, 18, 46, 30, 33, 17, 45, 29 },
+ { 10, 58, 6, 54, 9, 57, 5, 53 },
+ { 42, 26, 38, 22, 41, 25, 37, 21 },
+};
+
+static const uint8_t offset[127][2] = {
+ {0,0},
+ {0,0}, {4,4}, // quality = 1
+ {0,0}, {2,2}, {6,4}, {4,6}, // quality = 2
+ {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, // quality = 3
+
+ {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, // quality = 4
+ {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
+
+ {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, // quality = 5
+ {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
+ {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
+ {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
+
+ {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, // quality = 6
+ {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
+ {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
+ {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
+ {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
+ {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
+ {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
+ {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
+};
+
+static void hardthresh_c(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int i;
+ int bias = 0; // FIXME
+
+ unsigned threshold1 = qp * ((1<<4) - bias) - 1;
+ unsigned threshold2 = threshold1 << 1;
+
+ memset(dst, 0, 64 * sizeof(dst[0]));
+ dst[0] = (src[0] + 4) >> 3;
+
+ for (i = 1; i < 64; i++) {
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2) {
+ const int j = permutation[i];
+ dst[j] = (level + 4) >> 3;
+ }
+ }
+}
+
+static void softthresh_c(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int i;
+ int bias = 0; //FIXME
+
+ unsigned threshold1 = qp * ((1<<4) - bias) - 1;
+ unsigned threshold2 = threshold1 << 1;
+
+ memset(dst, 0, 64 * sizeof(dst[0]));
+ dst[0] = (src[0] + 4) >> 3;
+
+ for (i = 1; i < 64; i++) {
+ int level = src[i];
+ if (((unsigned)(level + threshold1)) > threshold2) {
+ const int j = permutation[i];
+ if (level > 0) dst[j] = (level - threshold1 + 4) >> 3;
+ else dst[j] = (level + threshold1 + 4) >> 3;
+ }
+ }
+}
+
+static void store_slice_c(uint8_t *dst, const int16_t *src,
+ int dst_linesize, int src_linesize,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8])
+{
+ int y, x;
+
+#define STORE(pos) do { \
+ temp = ((src[x + y*src_linesize + pos] << log2_scale) + d[pos]) >> 6; \
+ if (temp & 0x100) \
+ temp = ~(temp >> 31); \
+ dst[x + y*dst_linesize + pos] = temp; \
+} while (0)
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ }
+}
+
+static inline void add_block(int16_t *dst, int linesize, const int16_t block[64])
+{
+ int y;
+
+ for (y = 0; y < 8; y++) {
+ *(uint32_t *)&dst[0 + y*linesize] += *(uint32_t *)&block[0 + y*8];
+ *(uint32_t *)&dst[2 + y*linesize] += *(uint32_t *)&block[2 + y*8];
+ *(uint32_t *)&dst[4 + y*linesize] += *(uint32_t *)&block[4 + y*8];
+ *(uint32_t *)&dst[6 + y*linesize] += *(uint32_t *)&block[6 + y*8];
+ }
+}
+
+// XXX: export the function?
+static inline int norm_qscale(int qscale, int type)
+{
+ switch (type) {
+ case FF_QSCALE_TYPE_MPEG1: return qscale;
+ case FF_QSCALE_TYPE_MPEG2: return qscale >> 1;
+ case FF_QSCALE_TYPE_H264: return qscale >> 2;
+ case FF_QSCALE_TYPE_VP56: return (63 - qscale + 2) >> 2;
+ }
+ return qscale;
+}
+
+static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
+ int dst_linesize, int src_linesize, int width, int height,
+ const uint8_t *qp_table, int qp_stride, int is_luma)
+{
+ int x, y, i;
+ const int count = 1 << p->log2_count;
+ const int linesize = is_luma ? p->temp_linesize : FFALIGN(width+16, 16);
+ DECLARE_ALIGNED(16, uint64_t, block_align)[32];
+ int16_t *block = (int16_t *)block_align;
+ int16_t *block2 = (int16_t *)(block_align + 16);
+
+ for (y = 0; y < height; y++) {
+ int index = 8 + 8*linesize + y*linesize;
+ memcpy(p->src + index, src + y*src_linesize, width);
+ for (x = 0; x < 8; x++) {
+ p->src[index - x - 1] = p->src[index + x ];
+ p->src[index + width + x ] = p->src[index + width - x - 1];
+ }
+ }
+ for (y = 0; y < 8; y++) {
+ memcpy(p->src + ( 7-y)*linesize, p->src + ( y+8)*linesize, linesize);
+ memcpy(p->src + (height+8+y)*linesize, p->src + (height-y+7)*linesize, linesize);
+ }
+
+ for (y = 0; y < height + 8; y += 8) {
+ memset(p->temp + (8 + y) * linesize, 0, 8 * linesize * sizeof(*p->temp));
+ for (x = 0; x < width + 8; x += 8) {
+ int qp;
+
+ if (p->qp) {
+ qp = p->qp;
+ } else{
+ const int qps = 3 + is_luma;
+ qp = qp_table[(FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
+ qp = FFMAX(1, norm_qscale(qp, p->qscale_type));
+ }
+ for (i = 0; i < count; i++) {
+ const int x1 = x + offset[i + count - 1][0];
+ const int y1 = y + offset[i + count - 1][1];
+ const int index = x1 + y1*linesize;
+ p->dct->get_pixels(block, p->src + index, linesize);
+ p->dct->fdct(block);
+ p->requantize(block2, block, qp, p->dct->idct_permutation);
+ p->dct->idct(block2);
+ add_block(p->temp + index, linesize, block2);
+ }
+ }
+ if (y)
+ p->store_slice(dst + (y - 8) * dst_linesize, p->temp + 8 + y*linesize,
+ dst_linesize, linesize, width,
+ FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
+ ldither);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ SPPContext *spp = inlink->dst->priv;
+ const int h = FFALIGN(inlink->h + 16, 16);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ spp->hsub = desc->log2_chroma_w;
+ spp->vsub = desc->log2_chroma_h;
+ spp->temp_linesize = FFALIGN(inlink->w + 16, 16);
+ spp->temp = av_malloc_array(spp->temp_linesize, h * sizeof(*spp->temp));
+ spp->src = av_malloc_array(spp->temp_linesize, h * sizeof(*spp->src));
+ if (!spp->use_bframe_qp) {
+ /* we are assuming here the qp blocks will not be smaller that 16x16 */
+ spp->non_b_qp_alloc_size = FF_CEIL_RSHIFT(inlink->w, 4) * FF_CEIL_RSHIFT(inlink->h, 4);
+ spp->non_b_qp_table = av_calloc(spp->non_b_qp_alloc_size, sizeof(*spp->non_b_qp_table));
+ if (!spp->non_b_qp_table)
+ return AVERROR(ENOMEM);
+ }
+ if (!spp->temp || !spp->src)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ SPPContext *spp = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+ int qp_stride = 0;
+ const int8_t *qp_table = NULL;
+
+ /* if we are not in a constant user quantizer mode and we don't want to use
+ * the quantizers from the B-frames (B-frames often have a higher QP), we
+ * need to save the qp table from the last non B-frame; this is what the
+ * following code block does */
+ if (!spp->qp) {
+ qp_table = av_frame_get_qp_table(in, &qp_stride, &spp->qscale_type);
+
+ if (qp_table && !spp->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
+ int w, h;
+
+ /* if the qp stride is not set, it means the QP are only defined on
+ * a line basis */
+ if (!qp_stride) {
+ w = FF_CEIL_RSHIFT(inlink->w, 4);
+ h = 1;
+ } else {
+ w = FF_CEIL_RSHIFT(qp_stride, 4);
+ h = FF_CEIL_RSHIFT(inlink->h, 4);
+ }
+ av_assert0(w * h <= spp->non_b_qp_alloc_size);
+ memcpy(spp->non_b_qp_table, qp_table, w * h);
+ }
+ }
+
+ if (spp->log2_count && !ctx->is_disabled) {
+ if (!spp->use_bframe_qp && spp->non_b_qp_table)
+ qp_table = spp->non_b_qp_table;
+
+ if (qp_table || spp->qp) {
+ const int cw = FF_CEIL_RSHIFT(inlink->w, spp->hsub);
+ const int ch = FF_CEIL_RSHIFT(inlink->h, spp->vsub);
+
+ /* get a new frame if in-place is not possible or if the dimensions
+ * are not multiple of 8 */
+ if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
+ const int aligned_w = FFALIGN(inlink->w, 8);
+ const int aligned_h = FFALIGN(inlink->h, 8);
+
+ out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = in->width;
+ out->height = in->height;
+ }
+
+ filter(spp, out->data[0], in->data[0], out->linesize[0], in->linesize[0], inlink->w, inlink->h, qp_table, qp_stride, 1);
+ filter(spp, out->data[1], in->data[1], out->linesize[1], in->linesize[1], cw, ch, qp_table, qp_stride, 0);
+ filter(spp, out->data[2], in->data[2], out->linesize[2], in->linesize[2], cw, ch, qp_table, qp_stride, 0);
+ emms_c();
+ }
+ }
+
+ if (in != out) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ SPPContext *spp = ctx->priv;
+
+ if (!strcmp(cmd, "level")) {
+ if (!strcmp(args, "max"))
+ spp->log2_count = MAX_LEVEL;
+ else
+ spp->log2_count = av_clip(strtol(args, NULL, 10), 0, MAX_LEVEL);
+ return 0;
+ }
+ return AVERROR(ENOSYS);
+}
+
+static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
+{
+ SPPContext *spp = ctx->priv;
+ int ret;
+
+ spp->avctx = avcodec_alloc_context3(NULL);
+ spp->dct = avcodec_dct_alloc();
+ if (!spp->avctx || !spp->dct)
+ return AVERROR(ENOMEM);
+
+ if (opts) {
+ AVDictionaryEntry *e = NULL;
+
+ while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ if ((ret = av_opt_set(spp->dct, e->key, e->value, 0)) < 0)
+ return ret;
+ }
+ av_dict_free(opts);
+ }
+
+ avcodec_dct_init(spp->dct);
+ spp->store_slice = store_slice_c;
+ switch (spp->mode) {
+ case MODE_HARD: spp->requantize = hardthresh_c; break;
+ case MODE_SOFT: spp->requantize = softthresh_c; break;
+ }
+ if (ARCH_X86)
+ ff_spp_init_x86(spp);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ SPPContext *spp = ctx->priv;
+
+ av_freep(&spp->temp);
+ av_freep(&spp->src);
+ if (spp->avctx) {
+ avcodec_close(spp->avctx);
+ av_freep(&spp->avctx);
+ }
+ av_freep(&spp->dct);
+ av_freep(&spp->non_b_qp_table);
+}
+
+static const AVFilterPad spp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad spp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_spp = {
+ .name = "spp",
+ .description = NULL_IF_CONFIG_SMALL("Apply a simple post processing filter."),
+ .priv_size = sizeof(SPPContext),
+ .init_dict = init_dict,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = spp_inputs,
+ .outputs = spp_outputs,
+ .process_command = process_command,
+ .priv_class = &spp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_spp.h b/libavfilter/vf_spp.h
new file mode 100644
index 0000000..7e7e227
--- /dev/null
+++ b/libavfilter/vf_spp.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AVFILTER_SPP_H
+#define AVFILTER_SPP_H
+
+#include "libavcodec/avcodec.h"
+#include "libavcodec/avdct.h"
+#include "avfilter.h"
+
+#define MAX_LEVEL 6 /* quality levels */
+
+typedef struct {
+ const AVClass *av_class;
+
+ int log2_count;
+ int qp;
+ int mode;
+ int qscale_type;
+ int temp_linesize;
+ uint8_t *src;
+ int16_t *temp;
+ AVCodecContext *avctx;
+ AVDCT *dct;
+ int8_t *non_b_qp_table;
+ int non_b_qp_alloc_size;
+ int use_bframe_qp;
+ int hsub, vsub;
+
+ void (*store_slice)(uint8_t *dst, const int16_t *src,
+ int dst_stride, int src_stride,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8]);
+
+ void (*requantize)(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation);
+} SPPContext;
+
+void ff_spp_init_x86(SPPContext *s);
+
+#endif /* AVFILTER_SPP_H */
diff --git a/libavfilter/vf_stereo3d.c b/libavfilter/vf_stereo3d.c
new file mode 100644
index 0000000..2140120
--- /dev/null
+++ b/libavfilter/vf_stereo3d.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2010 Gordon Schmidt <gordon.schmidt <at> s2000.tu-chemnitz.de>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+enum StereoCode {
+ ANAGLYPH_RC_GRAY, // anaglyph red/cyan gray
+ ANAGLYPH_RC_HALF, // anaglyph red/cyan half colored
+ ANAGLYPH_RC_COLOR, // anaglyph red/cyan colored
+ ANAGLYPH_RC_DUBOIS, // anaglyph red/cyan dubois
+ ANAGLYPH_GM_GRAY, // anaglyph green/magenta gray
+ ANAGLYPH_GM_HALF, // anaglyph green/magenta half colored
+ ANAGLYPH_GM_COLOR, // anaglyph green/magenta colored
+ ANAGLYPH_GM_DUBOIS, // anaglyph green/magenta dubois
+ ANAGLYPH_YB_GRAY, // anaglyph yellow/blue gray
+ ANAGLYPH_YB_HALF, // anaglyph yellow/blue half colored
+ ANAGLYPH_YB_COLOR, // anaglyph yellow/blue colored
+ ANAGLYPH_YB_DUBOIS, // anaglyph yellow/blue dubois
+ ANAGLYPH_RB_GRAY, // anaglyph red/blue gray
+ ANAGLYPH_RG_GRAY, // anaglyph red/green gray
+ MONO_L, // mono output for debugging (left eye only)
+ MONO_R, // mono output for debugging (right eye only)
+ INTERLEAVE_ROWS_LR, // row-interleave (left eye has top row)
+ INTERLEAVE_ROWS_RL, // row-interleave (right eye has top row)
+ SIDE_BY_SIDE_LR, // side by side parallel (left eye left, right eye right)
+ SIDE_BY_SIDE_RL, // side by side crosseye (right eye left, left eye right)
+ SIDE_BY_SIDE_2_LR, // side by side parallel with half width resolution
+ SIDE_BY_SIDE_2_RL, // side by side crosseye with half width resolution
+ ABOVE_BELOW_LR, // above-below (left eye above, right eye below)
+ ABOVE_BELOW_RL, // above-below (right eye above, left eye below)
+ ABOVE_BELOW_2_LR, // above-below with half height resolution
+ ABOVE_BELOW_2_RL, // above-below with half height resolution
+ ALTERNATING_LR, // alternating frames (left eye first, right eye second)
+ ALTERNATING_RL, // alternating frames (right eye first, left eye second)
+ STEREO_CODE_COUNT // TODO: needs autodetection
+};
+
+typedef struct StereoComponent {
+ enum StereoCode format;
+ int width, height;
+ int off_left, off_right;
+ int off_lstep, off_rstep;
+ int row_left, row_right;
+} StereoComponent;
+
+static const int ana_coeff[][3][6] = {
+ [ANAGLYPH_RB_GRAY] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471}},
+ [ANAGLYPH_RG_GRAY] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471},
+ { 0, 0, 0, 0, 0, 0}},
+ [ANAGLYPH_RC_GRAY] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471},
+ { 0, 0, 0, 19595, 38470, 7471}},
+ [ANAGLYPH_RC_HALF] =
+ {{19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_RC_COLOR] =
+ {{65536, 0, 0, 0, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_RC_DUBOIS] =
+ {{29891, 32800, 11559, -2849, -5763, -102},
+ {-2627, -2479, -1033, 24804, 48080, -1209},
+ { -997, -1350, -358, -4729, -7403, 80373}},
+ [ANAGLYPH_GM_GRAY] =
+ {{ 0, 0, 0, 19595, 38470, 7471},
+ {19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 19595, 38470, 7471}},
+ [ANAGLYPH_GM_HALF] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ {19595, 38470, 7471, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_GM_COLOR] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ { 0, 65536, 0, 0, 0, 0},
+ { 0, 0, 0, 0, 0, 65536}},
+ [ANAGLYPH_GM_DUBOIS] =
+ {{-4063,-10354, -2556, 34669, 46203, 1573},
+ {18612, 43778, 9372, -1049, -983, -4260},
+ { -983, -1769, 1376, 590, 4915, 61407}},
+ [ANAGLYPH_YB_GRAY] =
+ {{ 0, 0, 0, 19595, 38470, 7471},
+ { 0, 0, 0, 19595, 38470, 7471},
+ {19595, 38470, 7471, 0, 0, 0}},
+ [ANAGLYPH_YB_HALF] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ {19595, 38470, 7471, 0, 0, 0}},
+ [ANAGLYPH_YB_COLOR] =
+ {{ 0, 0, 0, 65536, 0, 0},
+ { 0, 0, 0, 0, 65536, 0},
+ { 0, 0, 65536, 0, 0, 0}},
+ [ANAGLYPH_YB_DUBOIS] =
+ {{65535,-12650,18451, -987, -7590, -1049},
+ {-1604, 56032, 4196, 370, 3826, -1049},
+ {-2345,-10676, 1358, 5801, 11416, 56217}},
+};
+
+typedef struct Stereo3DContext {
+ const AVClass *class;
+ StereoComponent in, out;
+ int width, height;
+ int row_step;
+ const int *ana_matrix[3];
+ int nb_planes;
+ int linesize[4];
+ int pheight[4];
+ int hsub, vsub;
+ int pixstep[4];
+ AVFrame *prev;
+ double ts_unit;
+} Stereo3DContext;
+
+#define OFFSET(x) offsetof(Stereo3DContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption stereo3d_options[] = {
+ { "in", "set input format", OFFSET(in.format), AV_OPT_TYPE_INT, {.i64=SIDE_BY_SIDE_LR}, SIDE_BY_SIDE_LR, STEREO_CODE_COUNT-1, FLAGS, "in"},
+ { "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "in" },
+ { "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "in" },
+ { "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "in" },
+ { "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "in" },
+ { "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "in" },
+ { "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "in" },
+ { "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "in" },
+ { "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "in" },
+ { "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "in" },
+ { "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "in" },
+ { "out", "set output format", OFFSET(out.format), AV_OPT_TYPE_INT, {.i64=ANAGLYPH_RC_DUBOIS}, 0, STEREO_CODE_COUNT-1, FLAGS, "out"},
+ { "ab2l", "above below half height left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_LR}, 0, 0, FLAGS, "out" },
+ { "ab2r", "above below half height right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_2_RL}, 0, 0, FLAGS, "out" },
+ { "abl", "above below left first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_LR}, 0, 0, FLAGS, "out" },
+ { "abr", "above below right first", 0, AV_OPT_TYPE_CONST, {.i64=ABOVE_BELOW_RL}, 0, 0, FLAGS, "out" },
+ { "agmc", "anaglyph green magenta color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_COLOR}, 0, 0, FLAGS, "out" },
+ { "agmd", "anaglyph green magenta dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_DUBOIS}, 0, 0, FLAGS, "out" },
+ { "agmg", "anaglyph green magenta gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_GRAY}, 0, 0, FLAGS, "out" },
+ { "agmh", "anaglyph green magenta half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_GM_HALF}, 0, 0, FLAGS, "out" },
+ { "al", "alternating frames left first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_LR}, 0, 0, FLAGS, "out" },
+ { "ar", "alternating frames right first", 0, AV_OPT_TYPE_CONST, {.i64=ALTERNATING_RL}, 0, 0, FLAGS, "out" },
+ { "arbg", "anaglyph red blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RB_GRAY}, 0, 0, FLAGS, "out" },
+ { "arcc", "anaglyph red cyan color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_COLOR}, 0, 0, FLAGS, "out" },
+ { "arcd", "anaglyph red cyan dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_DUBOIS}, 0, 0, FLAGS, "out" },
+ { "arcg", "anaglyph red cyan gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_GRAY}, 0, 0, FLAGS, "out" },
+ { "arch", "anaglyph red cyan half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RC_HALF}, 0, 0, FLAGS, "out" },
+ { "argg", "anaglyph red green gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_RG_GRAY}, 0, 0, FLAGS, "out" },
+ { "aybc", "anaglyph yellow blue color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_COLOR}, 0, 0, FLAGS, "out" },
+ { "aybd", "anaglyph yellow blue dubois", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_DUBOIS}, 0, 0, FLAGS, "out" },
+ { "aybg", "anaglyph yellow blue gray", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_GRAY}, 0, 0, FLAGS, "out" },
+ { "aybh", "anaglyph yellow blue half color", 0, AV_OPT_TYPE_CONST, {.i64=ANAGLYPH_YB_HALF}, 0, 0, FLAGS, "out" },
+ { "irl", "interleave rows left first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_LR}, 0, 0, FLAGS, "out" },
+ { "irr", "interleave rows right first", 0, AV_OPT_TYPE_CONST, {.i64=INTERLEAVE_ROWS_RL}, 0, 0, FLAGS, "out" },
+ { "ml", "mono left", 0, AV_OPT_TYPE_CONST, {.i64=MONO_L}, 0, 0, FLAGS, "out" },
+ { "mr", "mono right", 0, AV_OPT_TYPE_CONST, {.i64=MONO_R}, 0, 0, FLAGS, "out" },
+ { "sbs2l", "side by side half width left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_LR}, 0, 0, FLAGS, "out" },
+ { "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "out" },
+ { "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "out" },
+ { "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "out" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(stereo3d);
+
+static const enum AVPixelFormat anaglyph_pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat other_pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_BGR48BE,
+ AV_PIX_FMT_RGB48LE, AV_PIX_FMT_BGR48LE,
+ AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_BGRA64BE,
+ AV_PIX_FMT_RGBA64LE, AV_PIX_FMT_BGRA64LE,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9BE, AV_PIX_FMT_GBRP9LE,
+ AV_PIX_FMT_GBRP10BE, AV_PIX_FMT_GBRP10LE,
+ AV_PIX_FMT_GBRP12BE, AV_PIX_FMT_GBRP12LE,
+ AV_PIX_FMT_GBRP14BE, AV_PIX_FMT_GBRP14LE,
+ AV_PIX_FMT_GBRP16BE, AV_PIX_FMT_GBRP16LE,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUV420P9LE, AV_PIX_FMT_YUVA420P9LE,
+ AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUVA420P9BE,
+ AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUVA422P9LE,
+ AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUVA422P9BE,
+ AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUVA444P9LE,
+ AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUVA444P9BE,
+ AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUVA420P10LE,
+ AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUVA420P10BE,
+ AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUVA422P10LE,
+ AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUVA422P10BE,
+ AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUVA444P10LE,
+ AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUVA444P10BE,
+ AV_PIX_FMT_YUV420P12BE, AV_PIX_FMT_YUV420P12LE,
+ AV_PIX_FMT_YUV422P12BE, AV_PIX_FMT_YUV422P12LE,
+ AV_PIX_FMT_YUV444P12BE, AV_PIX_FMT_YUV444P12LE,
+ AV_PIX_FMT_YUV420P14BE, AV_PIX_FMT_YUV420P14LE,
+ AV_PIX_FMT_YUV422P14BE, AV_PIX_FMT_YUV422P14LE,
+ AV_PIX_FMT_YUV444P14BE, AV_PIX_FMT_YUV444P14LE,
+ AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUVA420P16LE,
+ AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUVA420P16BE,
+ AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUVA422P16LE,
+ AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUVA422P16BE,
+ AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUVA444P16LE,
+ AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUVA444P16BE,
+ AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ Stereo3DContext *s = ctx->priv;
+ const enum AVPixelFormat *pix_fmts;
+
+ switch (s->out.format) {
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ pix_fmts = anaglyph_pix_fmts;
+ break;
+ default:
+ pix_fmts = other_pix_fmts;
+ }
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = ctx->inputs[0];
+ Stereo3DContext *s = ctx->priv;
+ AVRational aspect = inlink->sample_aspect_ratio;
+ AVRational fps = inlink->frame_rate;
+ AVRational tb = inlink->time_base;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ int ret;
+
+ switch (s->in.format) {
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_2_RL:
+ case SIDE_BY_SIDE_RL:
+ if (inlink->w & 1) {
+ av_log(ctx, AV_LOG_ERROR, "width must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_2_RL:
+ case ABOVE_BELOW_RL:
+ if (s->out.format == INTERLEAVE_ROWS_LR ||
+ s->out.format == INTERLEAVE_ROWS_RL) {
+ if (inlink->h & 3) {
+ av_log(ctx, AV_LOG_ERROR, "height must be multiple of 4\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ if (inlink->h & 1) {
+ av_log(ctx, AV_LOG_ERROR, "height must be even\n");
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ s->in.width =
+ s->width = inlink->w;
+ s->in.height =
+ s->height = inlink->h;
+ s->row_step = 1;
+ s->in.off_lstep =
+ s->in.off_rstep =
+ s->in.off_left =
+ s->in.off_right =
+ s->in.row_left =
+ s->in.row_right = 0;
+
+ switch (s->in.format) {
+ case SIDE_BY_SIDE_2_LR:
+ aspect.num *= 2;
+ case SIDE_BY_SIDE_LR:
+ s->width = inlink->w / 2;
+ s->in.off_right = s->width;
+ break;
+ case SIDE_BY_SIDE_2_RL:
+ aspect.num *= 2;
+ case SIDE_BY_SIDE_RL:
+ s->width = inlink->w / 2;
+ s->in.off_left = s->width;
+ break;
+ case ABOVE_BELOW_2_LR:
+ aspect.den *= 2;
+ case ABOVE_BELOW_LR:
+ s->in.row_right =
+ s->height = inlink->h / 2;
+ break;
+ case ABOVE_BELOW_2_RL:
+ aspect.den *= 2;
+ case ABOVE_BELOW_RL:
+ s->in.row_left =
+ s->height = inlink->h / 2;
+ break;
+ case ALTERNATING_RL:
+ case ALTERNATING_LR:
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ fps.den *= 2;
+ tb.num *= 2;
+ break;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "input format %d is not supported\n", s->in.format);
+ return AVERROR(EINVAL);
+ }
+
+ s->out.width = s->width;
+ s->out.height = s->height;
+ s->out.off_lstep =
+ s->out.off_rstep =
+ s->out.off_left =
+ s->out.off_right =
+ s->out.row_left =
+ s->out.row_right = 0;
+
+ switch (s->out.format) {
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS: {
+ uint8_t rgba_map[4];
+
+ ff_fill_rgba_map(rgba_map, outlink->format);
+ s->ana_matrix[rgba_map[0]] = &ana_coeff[s->out.format][0][0];
+ s->ana_matrix[rgba_map[1]] = &ana_coeff[s->out.format][1][0];
+ s->ana_matrix[rgba_map[2]] = &ana_coeff[s->out.format][2][0];
+ break;
+ }
+ case SIDE_BY_SIDE_2_LR:
+ aspect.den *= 2;
+ case SIDE_BY_SIDE_LR:
+ s->out.width = s->width * 2;
+ s->out.off_right = s->width;
+ break;
+ case SIDE_BY_SIDE_2_RL:
+ aspect.den *= 2;
+ case SIDE_BY_SIDE_RL:
+ s->out.width = s->width * 2;
+ s->out.off_left = s->width;
+ break;
+ case ABOVE_BELOW_2_LR:
+ aspect.num *= 2;
+ case ABOVE_BELOW_LR:
+ s->out.height = s->height * 2;
+ s->out.row_right = s->height;
+ break;
+ case ABOVE_BELOW_2_RL:
+ aspect.num *= 2;
+ case ABOVE_BELOW_RL:
+ s->out.height = s->height * 2;
+ s->out.row_left = s->height;
+ break;
+ case INTERLEAVE_ROWS_LR:
+ s->row_step = 2;
+ s->height = s->height / 2;
+ s->out.off_rstep =
+ s->in.off_rstep = 1;
+ break;
+ case INTERLEAVE_ROWS_RL:
+ s->row_step = 2;
+ s->height = s->height / 2;
+ s->out.off_lstep =
+ s->in.off_lstep = 1;
+ break;
+ case MONO_R:
+ s->in.off_left = s->in.off_right;
+ s->in.row_left = s->in.row_right;
+ case MONO_L:
+ break;
+ case ALTERNATING_RL:
+ case ALTERNATING_LR:
+ fps.num *= 2;
+ tb.den *= 2;
+ break;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "output format %d is not supported\n", s->out.format);
+ return AVERROR(EINVAL);
+ }
+
+ outlink->w = s->out.width;
+ outlink->h = s->out.height;
+ outlink->frame_rate = fps;
+ outlink->time_base = tb;
+ outlink->sample_aspect_ratio = aspect;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, outlink->format, s->width)) < 0)
+ return ret;
+ s->nb_planes = av_pix_fmt_count_planes(outlink->format);
+ av_image_fill_max_pixsteps(s->pixstep, NULL, desc);
+ s->ts_unit = av_q2d(av_inv_q(av_mul_q(outlink->frame_rate, outlink->time_base)));
+ s->pheight[1] = s->pheight[2] = FF_CEIL_RSHIFT(s->height, desc->log2_chroma_h);
+ s->pheight[0] = s->pheight[3] = s->height;
+ s->hsub = desc->log2_chroma_w;
+ s->vsub = desc->log2_chroma_h;
+
+ return 0;
+}
+
+static inline uint8_t ana_convert(const int *coeff, const uint8_t *left, const uint8_t *right)
+{
+ int sum;
+
+ sum = coeff[0] * left[0] + coeff[3] * right[0]; //red in
+ sum += coeff[1] * left[1] + coeff[4] * right[1]; //green in
+ sum += coeff[2] * left[2] + coeff[5] * right[2]; //blue in
+
+ return av_clip_uint8(sum >> 16);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ Stereo3DContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *oleft, *oright, *ileft, *iright;
+ int out_off_left[4], out_off_right[4];
+ int in_off_left[4], in_off_right[4];
+ int i;
+
+ switch (s->in.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
+ if (!s->prev) {
+ s->prev = inpicref;
+ return 0;
+ }
+ ileft = s->prev;
+ iright = inpicref;
+ if (s->in.format == ALTERNATING_RL)
+ FFSWAP(AVFrame *, ileft, iright);
+ break;
+ default:
+ ileft = iright = inpicref;
+ };
+
+ out = oleft = oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, inpicref);
+
+ if (s->out.format == ALTERNATING_LR ||
+ s->out.format == ALTERNATING_RL) {
+ oright = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!oright) {
+ av_frame_free(&oleft);
+ av_frame_free(&s->prev);
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(oright, inpicref);
+ }
+
+ for (i = 0; i < 4; i++) {
+ int hsub = i == 1 || i == 2 ? s->hsub : 0;
+ int vsub = i == 1 || i == 2 ? s->vsub : 0;
+ in_off_left[i] = (FF_CEIL_RSHIFT(s->in.row_left, vsub) + s->in.off_lstep) * ileft->linesize[i] + FF_CEIL_RSHIFT(s->in.off_left * s->pixstep[i], hsub);
+ in_off_right[i] = (FF_CEIL_RSHIFT(s->in.row_right, vsub) + s->in.off_rstep) * iright->linesize[i] + FF_CEIL_RSHIFT(s->in.off_right * s->pixstep[i], hsub);
+ out_off_left[i] = (FF_CEIL_RSHIFT(s->out.row_left, vsub) + s->out.off_lstep) * oleft->linesize[i] + FF_CEIL_RSHIFT(s->out.off_left * s->pixstep[i], hsub);
+ out_off_right[i] = (FF_CEIL_RSHIFT(s->out.row_right, vsub) + s->out.off_rstep) * oright->linesize[i] + FF_CEIL_RSHIFT(s->out.off_right * s->pixstep[i], hsub);
+ }
+
+ switch (s->out.format) {
+ case ALTERNATING_LR:
+ case ALTERNATING_RL:
+ case SIDE_BY_SIDE_LR:
+ case SIDE_BY_SIDE_RL:
+ case SIDE_BY_SIDE_2_LR:
+ case SIDE_BY_SIDE_2_RL:
+ case ABOVE_BELOW_LR:
+ case ABOVE_BELOW_RL:
+ case ABOVE_BELOW_2_LR:
+ case ABOVE_BELOW_2_RL:
+ case INTERLEAVE_ROWS_LR:
+ case INTERLEAVE_ROWS_RL:
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(oleft->data[i] + out_off_left[i],
+ oleft->linesize[i] * s->row_step,
+ ileft->data[i] + in_off_left[i],
+ ileft->linesize[i] * s->row_step,
+ s->linesize[i], s->pheight[i]);
+ av_image_copy_plane(oright->data[i] + out_off_right[i],
+ oright->linesize[i] * s->row_step,
+ iright->data[i] + in_off_right[i],
+ iright->linesize[i] * s->row_step,
+ s->linesize[i], s->pheight[i]);
+ }
+ break;
+ case MONO_L:
+ iright = ileft;
+ case MONO_R:
+ for (i = 0; i < s->nb_planes; i++) {
+ av_image_copy_plane(out->data[i], out->linesize[i],
+ iright->data[i] + in_off_left[i],
+ iright->linesize[i],
+ s->linesize[i], s->pheight[i]);
+ }
+ break;
+ case ANAGLYPH_RB_GRAY:
+ case ANAGLYPH_RG_GRAY:
+ case ANAGLYPH_RC_GRAY:
+ case ANAGLYPH_RC_HALF:
+ case ANAGLYPH_RC_COLOR:
+ case ANAGLYPH_RC_DUBOIS:
+ case ANAGLYPH_GM_GRAY:
+ case ANAGLYPH_GM_HALF:
+ case ANAGLYPH_GM_COLOR:
+ case ANAGLYPH_GM_DUBOIS:
+ case ANAGLYPH_YB_GRAY:
+ case ANAGLYPH_YB_HALF:
+ case ANAGLYPH_YB_COLOR:
+ case ANAGLYPH_YB_DUBOIS: {
+ int x, y, il, ir, o;
+ const uint8_t *lsrc = ileft->data[0];
+ const uint8_t *rsrc = iright->data[0];
+ uint8_t *dst = out->data[0];
+ int out_width = s->out.width;
+ const int **ana_matrix = s->ana_matrix;
+
+ for (y = 0; y < s->out.height; y++) {
+ o = out->linesize[0] * y;
+ il = in_off_left[0] + y * ileft->linesize[0];
+ ir = in_off_right[0] + y * iright->linesize[0];
+ for (x = 0; x < out_width; x++, il += 3, ir += 3, o+= 3) {
+ dst[o ] = ana_convert(ana_matrix[0], lsrc + il, rsrc + ir);
+ dst[o + 1] = ana_convert(ana_matrix[1], lsrc + il, rsrc + ir);
+ dst[o + 2] = ana_convert(ana_matrix[2], lsrc + il, rsrc + ir);
+ }
+ }
+ break;
+ }
+ default:
+ av_assert0(0);
+ }
+
+ av_frame_free(&inpicref);
+ av_frame_free(&s->prev);
+ if (oright != oleft) {
+ if (s->out.format == ALTERNATING_LR)
+ FFSWAP(AVFrame *, oleft, oright);
+ oright->pts = outlink->frame_count * s->ts_unit;
+ ff_filter_frame(outlink, oright);
+ out = oleft;
+ oleft->pts = outlink->frame_count * s->ts_unit;
+ } else if (s->in.format == ALTERNATING_LR ||
+ s->in.format == ALTERNATING_RL) {
+ out->pts = outlink->frame_count * s->ts_unit;
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ Stereo3DContext *s = ctx->priv;
+
+ av_frame_free(&s->prev);
+}
+
+static const AVFilterPad stereo3d_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad stereo3d_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_stereo3d = {
+ .name = "stereo3d",
+ .description = NULL_IF_CONFIG_SMALL("Convert video stereoscopic 3D view."),
+ .priv_size = sizeof(Stereo3DContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = stereo3d_inputs,
+ .outputs = stereo3d_outputs,
+ .priv_class = &stereo3d_class,
+};
diff --git a/libavfilter/vf_subtitles.c b/libavfilter/vf_subtitles.c
new file mode 100644
index 0000000..be4c6a5
--- /dev/null
+++ b/libavfilter/vf_subtitles.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2011 Baptiste Coudurier
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2012 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Libass subtitles burning filter.
+ *
+ * @see{http://www.matroska.org/technical/specs/subtitles/ssa.html}
+ */
+
+#include <ass/ass.h>
+
+#include "config.h"
+#if CONFIG_SUBTITLES_FILTER
+# include "libavcodec/avcodec.h"
+# include "libavformat/avformat.h"
+#endif
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "drawutils.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ ASS_Library *library;
+ ASS_Renderer *renderer;
+ ASS_Track *track;
+ char *filename;
+ char *charenc;
+ int stream_index;
+ uint8_t rgba_map[4];
+ int pix_step[4]; ///< steps per pixel for each plane of the main output
+ int original_w, original_h;
+ int shaping;
+ FFDrawContext draw;
+} AssContext;
+
+#define OFFSET(x) offsetof(AssContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+#define COMMON_OPTIONS \
+ {"filename", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+ {"f", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+ {"original_size", "set the size of the original video (used to scale fonts)", OFFSET(original_w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS }, \
+
+/* libass supports a log level ranging from 0 to 7 */
+static const int ass_libavfilter_log_level_map[] = {
+ [0] = AV_LOG_FATAL, /* MSGL_FATAL */
+ [1] = AV_LOG_ERROR, /* MSGL_ERR */
+ [2] = AV_LOG_WARNING, /* MSGL_WARN */
+ [3] = AV_LOG_WARNING, /* <undefined> */
+ [4] = AV_LOG_INFO, /* MSGL_INFO */
+ [5] = AV_LOG_INFO, /* <undefined> */
+ [6] = AV_LOG_VERBOSE, /* MSGL_V */
+ [7] = AV_LOG_DEBUG, /* MSGL_DBG2 */
+};
+
+static void ass_log(int ass_level, const char *fmt, va_list args, void *ctx)
+{
+ const int ass_level_clip = av_clip(ass_level, 0,
+ FF_ARRAY_ELEMS(ass_libavfilter_log_level_map) - 1);
+ const int level = ass_libavfilter_log_level_map[ass_level_clip];
+
+ av_vlog(ctx, level, fmt, args);
+ av_log(ctx, level, "\n");
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+
+ if (!ass->filename) {
+ av_log(ctx, AV_LOG_ERROR, "No filename provided!\n");
+ return AVERROR(EINVAL);
+ }
+
+ ass->library = ass_library_init();
+ if (!ass->library) {
+ av_log(ctx, AV_LOG_ERROR, "Could not initialize libass.\n");
+ return AVERROR(EINVAL);
+ }
+ ass_set_message_cb(ass->library, ass_log, ctx);
+
+ ass->renderer = ass_renderer_init(ass->library);
+ if (!ass->renderer) {
+ av_log(ctx, AV_LOG_ERROR, "Could not initialize libass renderer.\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+
+ if (ass->track)
+ ass_free_track(ass->track);
+ if (ass->renderer)
+ ass_renderer_done(ass->renderer);
+ if (ass->library)
+ ass_library_done(ass->library);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AssContext *ass = inlink->dst->priv;
+
+ ff_draw_init(&ass->draw, inlink->format, 0);
+
+ ass_set_frame_size (ass->renderer, inlink->w, inlink->h);
+ if (ass->original_w && ass->original_h)
+ ass_set_aspect_ratio(ass->renderer, (double)inlink->w / inlink->h,
+ (double)ass->original_w / ass->original_h);
+ if (ass->shaping != -1)
+ ass_set_shaper(ass->renderer, ass->shaping);
+
+ return 0;
+}
+
+/* libass stores an RGBA color in the format RRGGBBTT, where TT is the transparency level */
+#define AR(c) ( (c)>>24)
+#define AG(c) (((c)>>16)&0xFF)
+#define AB(c) (((c)>>8) &0xFF)
+#define AA(c) ((0xFF-c) &0xFF)
+
+static void overlay_ass_image(AssContext *ass, AVFrame *picref,
+ const ASS_Image *image)
+{
+ for (; image; image = image->next) {
+ uint8_t rgba_color[] = {AR(image->color), AG(image->color), AB(image->color), AA(image->color)};
+ FFDrawColor color;
+ ff_draw_color(&ass->draw, &color, rgba_color);
+ ff_blend_mask(&ass->draw, &color,
+ picref->data, picref->linesize,
+ picref->width, picref->height,
+ image->bitmap, image->stride, image->w, image->h,
+ 3, 0, image->dst_x, image->dst_y);
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AssContext *ass = ctx->priv;
+ int detect_change = 0;
+ double time_ms = picref->pts * av_q2d(inlink->time_base) * 1000;
+ ASS_Image *image = ass_render_frame(ass->renderer, ass->track,
+ time_ms, &detect_change);
+
+ if (detect_change)
+ av_log(ctx, AV_LOG_DEBUG, "Change happened at time ms:%f\n", time_ms);
+
+ overlay_ass_image(ass, picref, image);
+
+ return ff_filter_frame(outlink, picref);
+}
+
+static const AVFilterPad ass_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ .needs_writable = 1,
+ },
+ { NULL }
+};
+
+static const AVFilterPad ass_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+#if CONFIG_ASS_FILTER
+
+static const AVOption ass_options[] = {
+ COMMON_OPTIONS
+ {"shaping", "set shaping engine", OFFSET(shaping), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "shaping_mode"},
+ {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
+ {"simple", "simple shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_SIMPLE}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
+ {"complex", "complex shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_COMPLEX}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(ass);
+
+static av_cold int init_ass(AVFilterContext *ctx)
+{
+ AssContext *ass = ctx->priv;
+ int ret = init(ctx);
+
+ if (ret < 0)
+ return ret;
+
+ /* Initialize fonts */
+ ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);
+
+ ass->track = ass_read_file(ass->library, ass->filename, NULL);
+ if (!ass->track) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Could not create a libass track when reading file '%s'\n",
+ ass->filename);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+AVFilter ff_vf_ass = {
+ .name = "ass",
+ .description = NULL_IF_CONFIG_SMALL("Render ASS subtitles onto input video using the libass library."),
+ .priv_size = sizeof(AssContext),
+ .init = init_ass,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = ass_inputs,
+ .outputs = ass_outputs,
+ .priv_class = &ass_class,
+};
+#endif
+
+#if CONFIG_SUBTITLES_FILTER
+
+static const AVOption subtitles_options[] = {
+ COMMON_OPTIONS
+ {"charenc", "set input character encoding", OFFSET(charenc), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, FLAGS},
+ {"stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS},
+ {"si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS},
+ {NULL},
+};
+
+static const char * const font_mimetypes[] = {
+ "application/x-truetype-font",
+ "application/vnd.ms-opentype",
+ "application/x-font-ttf",
+ NULL
+};
+
+static int attachment_is_font(AVStream * st)
+{
+ const AVDictionaryEntry *tag = NULL;
+ int n;
+
+ tag = av_dict_get(st->metadata, "mimetype", NULL, AV_DICT_MATCH_CASE);
+
+ if (tag) {
+ for (n = 0; font_mimetypes[n]; n++) {
+ if (av_strcasecmp(font_mimetypes[n], tag->value) == 0)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+AVFILTER_DEFINE_CLASS(subtitles);
+
+static av_cold int init_subtitles(AVFilterContext *ctx)
+{
+ int j, ret, sid;
+ int k = 0;
+ AVDictionary *codec_opts = NULL;
+ AVFormatContext *fmt = NULL;
+ AVCodecContext *dec_ctx = NULL;
+ AVCodec *dec = NULL;
+ const AVCodecDescriptor *dec_desc;
+ AVStream *st;
+ AVPacket pkt;
+ AssContext *ass = ctx->priv;
+
+ /* Init libass */
+ ret = init(ctx);
+ if (ret < 0)
+ return ret;
+ ass->track = ass_new_track(ass->library);
+ if (!ass->track) {
+ av_log(ctx, AV_LOG_ERROR, "Could not create a libass track\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* Open subtitles file */
+ ret = avformat_open_input(&fmt, ass->filename, NULL, NULL);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to open %s\n", ass->filename);
+ goto end;
+ }
+ ret = avformat_find_stream_info(fmt, NULL);
+ if (ret < 0)
+ goto end;
+
+ /* Locate subtitles stream */
+ if (ass->stream_index < 0)
+ ret = av_find_best_stream(fmt, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0);
+ else {
+ ret = -1;
+ if (ass->stream_index < fmt->nb_streams) {
+ for (j = 0; j < fmt->nb_streams; j++) {
+ if (fmt->streams[j]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ if (ass->stream_index == k) {
+ ret = j;
+ break;
+ }
+ k++;
+ }
+ }
+ }
+ }
+
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to locate subtitle stream in %s\n",
+ ass->filename);
+ goto end;
+ }
+ sid = ret;
+ st = fmt->streams[sid];
+
+ /* Load attached fonts */
+ for (j = 0; j < fmt->nb_streams; j++) {
+ AVStream *st = fmt->streams[j];
+ if (st->codec->codec_type == AVMEDIA_TYPE_ATTACHMENT &&
+ attachment_is_font(st)) {
+ const AVDictionaryEntry *tag = NULL;
+ tag = av_dict_get(st->metadata, "filename", NULL,
+ AV_DICT_MATCH_CASE);
+
+ if (tag) {
+ av_log(ctx, AV_LOG_DEBUG, "Loading attached font: %s\n",
+ tag->value);
+ ass_add_font(ass->library, tag->value,
+ st->codec->extradata,
+ st->codec->extradata_size);
+ } else {
+ av_log(ctx, AV_LOG_WARNING,
+ "Font attachment has no filename, ignored.\n");
+ }
+ }
+ }
+
+ /* Initialize fonts */
+ ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);
+
+ /* Open decoder */
+ dec_ctx = st->codec;
+ dec = avcodec_find_decoder(dec_ctx->codec_id);
+ if (!dec) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to find subtitle codec %s\n",
+ avcodec_get_name(dec_ctx->codec_id));
+ return AVERROR(EINVAL);
+ }
+ dec_desc = avcodec_descriptor_get(dec_ctx->codec_id);
+ if (dec_desc && !(dec_desc->props & AV_CODEC_PROP_TEXT_SUB)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Only text based subtitles are currently supported\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (ass->charenc)
+ av_dict_set(&codec_opts, "sub_charenc", ass->charenc, 0);
+ ret = avcodec_open2(dec_ctx, dec, &codec_opts);
+ if (ret < 0)
+ goto end;
+
+ /* Decode subtitles and push them into the renderer (libass) */
+ if (dec_ctx->subtitle_header)
+ ass_process_codec_private(ass->track,
+ dec_ctx->subtitle_header,
+ dec_ctx->subtitle_header_size);
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+ while (av_read_frame(fmt, &pkt) >= 0) {
+ int i, got_subtitle;
+ AVSubtitle sub = {0};
+
+ if (pkt.stream_index == sid) {
+ ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt);
+ if (ret < 0) {
+ av_log(ctx, AV_LOG_WARNING, "Error decoding: %s (ignored)\n",
+ av_err2str(ret));
+ } else if (got_subtitle) {
+ for (i = 0; i < sub.num_rects; i++) {
+ char *ass_line = sub.rects[i]->ass;
+ if (!ass_line)
+ break;
+ ass_process_data(ass->track, ass_line, strlen(ass_line));
+ }
+ }
+ }
+ av_free_packet(&pkt);
+ avsubtitle_free(&sub);
+ }
+
+end:
+ av_dict_free(&codec_opts);
+ if (dec_ctx)
+ avcodec_close(dec_ctx);
+ if (fmt)
+ avformat_close_input(&fmt);
+ return ret;
+}
+
+AVFilter ff_vf_subtitles = {
+ .name = "subtitles",
+ .description = NULL_IF_CONFIG_SMALL("Render text subtitles onto input video using the libass library."),
+ .priv_size = sizeof(AssContext),
+ .init = init_subtitles,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = ass_inputs,
+ .outputs = ass_outputs,
+ .priv_class = &subtitles_class,
+};
+#endif
diff --git a/libavfilter/vf_super2xsai.c b/libavfilter/vf_super2xsai.c
new file mode 100644
index 0000000..686dac1
--- /dev/null
+++ b/libavfilter/vf_super2xsai.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2010 Niel van der Westhuizen <nielkie@gmail.com>
+ * Copyright (c) 2002 A'rpi
+ * Copyright (c) 1997-2001 ZSNES Team ( zsknight@zsnes.com / _demo_@zsnes.com )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Super 2xSaI video filter
+ * Ported from MPlayer libmpcodecs/vf_2xsai.c.
+ */
+
+#include "libavutil/pixdesc.h"
+#include "libavutil/intreadwrite.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ /* masks used for two pixels interpolation */
+ uint32_t hi_pixel_mask;
+ uint32_t lo_pixel_mask;
+
+ /* masks used for four pixels interpolation */
+ uint32_t q_hi_pixel_mask;
+ uint32_t q_lo_pixel_mask;
+
+ int bpp; ///< bytes per pixel, pixel stride for each (packed) pixel
+ int is_be;
+} Super2xSaIContext;
+
+#define GET_RESULT(A, B, C, D) ((A != C || A != D) - (B != C || B != D))
+
+#define INTERPOLATE(A, B) (((A & hi_pixel_mask) >> 1) + ((B & hi_pixel_mask) >> 1) + (A & B & lo_pixel_mask))
+
+#define Q_INTERPOLATE(A, B, C, D) ((A & q_hi_pixel_mask) >> 2) + ((B & q_hi_pixel_mask) >> 2) + ((C & q_hi_pixel_mask) >> 2) + ((D & q_hi_pixel_mask) >> 2) \
+ + ((((A & q_lo_pixel_mask) + (B & q_lo_pixel_mask) + (C & q_lo_pixel_mask) + (D & q_lo_pixel_mask)) >> 2) & q_lo_pixel_mask)
+
+static void super2xsai(AVFilterContext *ctx,
+ uint8_t *src, int src_linesize,
+ uint8_t *dst, int dst_linesize,
+ int width, int height)
+{
+ Super2xSaIContext *sai = ctx->priv;
+ unsigned int x, y;
+ uint32_t color[4][4];
+ unsigned char *src_line[4];
+ const int bpp = sai->bpp;
+ const uint32_t hi_pixel_mask = sai->hi_pixel_mask;
+ const uint32_t lo_pixel_mask = sai->lo_pixel_mask;
+ const uint32_t q_hi_pixel_mask = sai->q_hi_pixel_mask;
+ const uint32_t q_lo_pixel_mask = sai->q_lo_pixel_mask;
+
+ /* Point to the first 4 lines, first line is duplicated */
+ src_line[0] = src;
+ src_line[1] = src;
+ src_line[2] = src + src_linesize*FFMIN(1, height-1);
+ src_line[3] = src + src_linesize*FFMIN(2, height-1);
+
+#define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off)
+#define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off)
+#define READ_COLOR2(dst, src_line, off) dst = sai->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off)
+
+ for (y = 0; y < height; y++) {
+ uint8_t *dst_line[2];
+
+ dst_line[0] = dst + dst_linesize*2*y;
+ dst_line[1] = dst + dst_linesize*(2*y+1);
+
+ switch (bpp) {
+ case 4:
+ READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2);
+ READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2);
+ READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2);
+ READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2);
+ break;
+ case 3:
+ READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2);
+ READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2);
+ READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2);
+ READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2);
+ break;
+ default:
+ READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2);
+ READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2);
+ READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2);
+ READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2);
+ }
+
+ for (x = 0; x < width; x++) {
+ uint32_t product1a, product1b, product2a, product2b;
+
+//--------------------------------------- B0 B1 B2 B3 0 1 2 3
+// 4 5* 6 S2 -> 4 5* 6 7
+// 1 2 3 S1 8 9 10 11
+// A0 A1 A2 A3 12 13 14 15
+//--------------------------------------
+ if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) {
+ product2b = color[2][1];
+ product1b = product2b;
+ } else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) {
+ product2b = color[1][1];
+ product1b = product2b;
+ } else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) {
+ int r = 0;
+
+ r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]);
+ r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]);
+ r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]);
+ r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]);
+
+ if (r > 0)
+ product1b = color[1][2];
+ else if (r < 0)
+ product1b = color[1][1];
+ else
+ product1b = INTERPOLATE(color[1][1], color[1][2]);
+
+ product2b = product1b;
+ } else {
+ if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0])
+ product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]);
+ else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3])
+ product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]);
+ else
+ product2b = INTERPOLATE(color[2][1], color[2][2]);
+
+ if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0])
+ product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]);
+ else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3])
+ product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]);
+ else
+ product1b = INTERPOLATE(color[1][1], color[1][2]);
+ }
+
+ if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2])
+ product2a = INTERPOLATE(color[2][1], color[1][1]);
+ else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0])
+ product2a = INTERPOLATE(color[2][1], color[1][1]);
+ else
+ product2a = color[2][1];
+
+ if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2])
+ product1a = INTERPOLATE(color[2][1], color[1][1]);
+ else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0])
+ product1a = INTERPOLATE(color[2][1], color[1][1]);
+ else
+ product1a = color[1][1];
+
+ /* Set the calculated pixels */
+ switch (bpp) {
+ case 4:
+ AV_WN32A(dst_line[0] + x * 8, product1a);
+ AV_WN32A(dst_line[0] + x * 8 + 4, product1b);
+ AV_WN32A(dst_line[1] + x * 8, product2a);
+ AV_WN32A(dst_line[1] + x * 8 + 4, product2b);
+ break;
+ case 3:
+ AV_WL24(dst_line[0] + x * 6, product1a);
+ AV_WL24(dst_line[0] + x * 6 + 3, product1b);
+ AV_WL24(dst_line[1] + x * 6, product2a);
+ AV_WL24(dst_line[1] + x * 6 + 3, product2b);
+ break;
+ default: // bpp = 2
+ if (sai->is_be) {
+ AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16));
+ AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16));
+ } else {
+ AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16));
+ AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16));
+ }
+ }
+
+ /* Move color matrix forward */
+ color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3];
+ color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3];
+ color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3];
+ color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3];
+
+ if (x < width - 3) {
+ x += 3;
+ switch (bpp) {
+ case 4:
+ READ_COLOR4(color[0][3], src_line[0], x);
+ READ_COLOR4(color[1][3], src_line[1], x);
+ READ_COLOR4(color[2][3], src_line[2], x);
+ READ_COLOR4(color[3][3], src_line[3], x);
+ break;
+ case 3:
+ READ_COLOR3(color[0][3], src_line[0], x);
+ READ_COLOR3(color[1][3], src_line[1], x);
+ READ_COLOR3(color[2][3], src_line[2], x);
+ READ_COLOR3(color[3][3], src_line[3], x);
+ break;
+ default: /* case 2 */
+ READ_COLOR2(color[0][3], src_line[0], x);
+ READ_COLOR2(color[1][3], src_line[1], x);
+ READ_COLOR2(color[2][3], src_line[2], x);
+ READ_COLOR2(color[3][3], src_line[3], x);
+ }
+ x -= 3;
+ }
+ }
+
+ /* We're done with one line, so we shift the source lines up */
+ src_line[0] = src_line[1];
+ src_line[1] = src_line[2];
+ src_line[2] = src_line[3];
+
+ /* Read next line */
+ src_line[3] = src_line[2];
+ if (y < height - 3)
+ src_line[3] += src_linesize;
+ } // y loop
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB565BE, AV_PIX_FMT_BGR565BE, AV_PIX_FMT_RGB555BE, AV_PIX_FMT_BGR555BE,
+ AV_PIX_FMT_RGB565LE, AV_PIX_FMT_BGR565LE, AV_PIX_FMT_RGB555LE, AV_PIX_FMT_BGR555LE,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ Super2xSaIContext *sai = inlink->dst->priv;
+
+ sai->hi_pixel_mask = 0xFEFEFEFE;
+ sai->lo_pixel_mask = 0x01010101;
+ sai->q_hi_pixel_mask = 0xFCFCFCFC;
+ sai->q_lo_pixel_mask = 0x03030303;
+ sai->bpp = 4;
+
+ switch (inlink->format) {
+ case AV_PIX_FMT_RGB24:
+ case AV_PIX_FMT_BGR24:
+ sai->bpp = 3;
+ break;
+
+ case AV_PIX_FMT_RGB565BE:
+ case AV_PIX_FMT_BGR565BE:
+ sai->is_be = 1;
+ case AV_PIX_FMT_RGB565LE:
+ case AV_PIX_FMT_BGR565LE:
+ sai->hi_pixel_mask = 0xF7DEF7DE;
+ sai->lo_pixel_mask = 0x08210821;
+ sai->q_hi_pixel_mask = 0xE79CE79C;
+ sai->q_lo_pixel_mask = 0x18631863;
+ sai->bpp = 2;
+ break;
+
+ case AV_PIX_FMT_BGR555BE:
+ case AV_PIX_FMT_RGB555BE:
+ sai->is_be = 1;
+ case AV_PIX_FMT_BGR555LE:
+ case AV_PIX_FMT_RGB555LE:
+ sai->hi_pixel_mask = 0x7BDE7BDE;
+ sai->lo_pixel_mask = 0x04210421;
+ sai->q_hi_pixel_mask = 0x739C739C;
+ sai->q_lo_pixel_mask = 0x0C630C63;
+ sai->bpp = 2;
+ break;
+ }
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->w = inlink->w*2;
+ outlink->h = inlink->h*2;
+
+ av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
+ av_get_pix_fmt_name(inlink->format),
+ inlink->w, inlink->h, outlink->w, outlink->h);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpicref) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpicref, inpicref);
+ outpicref->width = outlink->w;
+ outpicref->height = outlink->h;
+
+ super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0],
+ outpicref->data[0], outpicref->linesize[0],
+ inlink->w, inlink->h);
+
+ av_frame_free(&inpicref);
+ return ff_filter_frame(outlink, outpicref);
+}
+
+static const AVFilterPad super2xsai_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad super2xsai_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_super2xsai = {
+ .name = "super2xsai",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
+ .priv_size = sizeof(Super2xSaIContext),
+ .query_formats = query_formats,
+ .inputs = super2xsai_inputs,
+ .outputs = super2xsai_outputs,
+};
diff --git a/libavfilter/vf_swapuv.c b/libavfilter/vf_swapuv.c
new file mode 100644
index 0000000..175e39c
--- /dev/null
+++ b/libavfilter/vf_swapuv.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * swap UV filter
+ */
+
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+static void do_swap(AVFrame *frame)
+{
+ FFSWAP(uint8_t*, frame->data[1], frame->data[2]);
+ FFSWAP(int, frame->linesize[1], frame->linesize[2]);
+ FFSWAP(uint64_t, frame->error[1], frame->error[2]);
+ FFSWAP(AVBufferRef*, frame->buf[1], frame->buf[2]);
+}
+
+static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
+{
+ AVFrame *picref = ff_default_get_video_buffer(link, w, h);
+ do_swap(picref);
+ return picref;
+}
+
+static int filter_frame(AVFilterLink *link, AVFrame *inpicref)
+{
+ do_swap(inpicref);
+ return ff_filter_frame(link->dst->outputs[0], inpicref);
+}
+
+static int is_planar_yuv(const AVPixFmtDescriptor *desc)
+{
+ int i;
+
+ if (desc->flags & ~(AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA) ||
+ desc->nb_components < 3 ||
+ (desc->comp[1].depth_minus1 != desc->comp[2].depth_minus1))
+ return 0;
+ for (i = 0; i < desc->nb_components; i++) {
+ if (desc->comp[i].offset_plus1 != 1 ||
+ desc->comp[i].shift != 0 ||
+ desc->comp[i].plane != i)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (is_planar_yuv(desc))
+ ff_add_format(&formats, fmt);
+ }
+
+ ff_set_common_formats(ctx, formats);
+ return 0;
+}
+
+static const AVFilterPad swapuv_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = get_video_buffer,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad swapuv_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_swapuv = {
+ .name = "swapuv",
+ .description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
+ .query_formats = query_formats,
+ .inputs = swapuv_inputs,
+ .outputs = swapuv_outputs,
+};
diff --git a/libavfilter/vf_telecine.c b/libavfilter/vf_telecine.c
new file mode 100644
index 0000000..aea63ab
--- /dev/null
+++ b/libavfilter/vf_telecine.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2012 Rudolf Polzer
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
+ * Rudolf Polzer.
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int first_field;
+ char *pattern;
+ unsigned int pattern_pos;
+
+ AVRational pts;
+ double ts_unit;
+ int out_cnt;
+ int occupied;
+
+ int nb_planes;
+ int planeheight[4];
+ int stride[4];
+
+ AVFrame *frame[5];
+ AVFrame *temp;
+} TelecineContext;
+
+#define OFFSET(x) offsetof(TelecineContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption telecine_options[] = {
+ {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
+ {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
+ {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
+ {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(telecine);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ TelecineContext *tc = ctx->priv;
+ const char *p;
+ int max = 0;
+
+ if (!strlen(tc->pattern)) {
+ av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (p = tc->pattern; *p; p++) {
+ if (!av_isdigit(*p)) {
+ av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ max = FFMAX(*p - '0', max);
+ tc->pts.num += 2;
+ tc->pts.den += *p - '0';
+ }
+
+ tc->out_cnt = (max + 1) / 2;
+ av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
+ tc->pattern, tc->out_cnt, tc->pts.num, tc->pts.den);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
+ ff_add_format(&pix_fmts, fmt);
+ }
+
+ ff_set_common_formats(ctx, pix_fmts);
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ TelecineContext *tc = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int i, ret;
+
+ tc->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!tc->temp)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < tc->out_cnt; i++) {
+ tc->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ if (!tc->frame[i])
+ return AVERROR(ENOMEM);
+ }
+
+ if ((ret = av_image_fill_linesizes(tc->stride, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ tc->planeheight[1] = tc->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ tc->planeheight[0] = tc->planeheight[3] = inlink->h;
+
+ tc->nb_planes = av_pix_fmt_count_planes(inlink->format);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TelecineContext *tc = ctx->priv;
+ const AVFilterLink *inlink = ctx->inputs[0];
+ AVRational fps = inlink->frame_rate;
+
+ if (!fps.num || !fps.den) {
+ av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
+ "current rate of %d/%d is invalid\n", fps.num, fps.den);
+ return AVERROR(EINVAL);
+ }
+ fps = av_mul_q(fps, av_inv_q(tc->pts));
+ av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
+ inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ outlink->frame_rate = fps;
+ outlink->time_base = av_mul_q(inlink->time_base, tc->pts);
+ av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
+ inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
+
+ tc->ts_unit = av_q2d(av_inv_q(av_mul_q(fps, outlink->time_base)));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ TelecineContext *tc = ctx->priv;
+ int i, len, ret = 0, nout = 0;
+
+ len = tc->pattern[tc->pattern_pos] - '0';
+
+ tc->pattern_pos++;
+ if (!tc->pattern[tc->pattern_pos])
+ tc->pattern_pos = 0;
+
+ if (!len) { // do not output any field from this frame
+ av_frame_free(&inpicref);
+ return 0;
+ }
+
+ if (tc->occupied) {
+ for (i = 0; i < tc->nb_planes; i++) {
+ // fill in the EARLIER field from the buffered pic
+ av_image_copy_plane(tc->frame[nout]->data[i] + tc->frame[nout]->linesize[i] * tc->first_field,
+ tc->frame[nout]->linesize[i] * 2,
+ tc->temp->data[i] + tc->temp->linesize[i] * tc->first_field,
+ tc->temp->linesize[i] * 2,
+ tc->stride[i],
+ (tc->planeheight[i] - tc->first_field + 1) / 2);
+ // fill in the LATER field from the new pic
+ av_image_copy_plane(tc->frame[nout]->data[i] + tc->frame[nout]->linesize[i] * !tc->first_field,
+ tc->frame[nout]->linesize[i] * 2,
+ inpicref->data[i] + inpicref->linesize[i] * !tc->first_field,
+ inpicref->linesize[i] * 2,
+ tc->stride[i],
+ (tc->planeheight[i] - !tc->first_field + 1) / 2);
+ }
+ nout++;
+ len--;
+ tc->occupied = 0;
+ }
+
+ while (len >= 2) {
+ // output THIS image as-is
+ for (i = 0; i < tc->nb_planes; i++)
+ av_image_copy_plane(tc->frame[nout]->data[i], tc->frame[nout]->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ tc->stride[i],
+ tc->planeheight[i]);
+ nout++;
+ len -= 2;
+ }
+
+ if (len >= 1) {
+ // copy THIS image to the buffer, we need it later
+ for (i = 0; i < tc->nb_planes; i++)
+ av_image_copy_plane(tc->temp->data[i], tc->temp->linesize[i],
+ inpicref->data[i], inpicref->linesize[i],
+ tc->stride[i],
+ tc->planeheight[i]);
+ tc->occupied = 1;
+ }
+
+ for (i = 0; i < nout; i++) {
+ AVFrame *frame = av_frame_clone(tc->frame[i]);
+
+ if (!frame) {
+ av_frame_free(&inpicref);
+ return AVERROR(ENOMEM);
+ }
+
+ frame->pts = outlink->frame_count * tc->ts_unit;
+ ret = ff_filter_frame(outlink, frame);
+ }
+ av_frame_free(&inpicref);
+
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TelecineContext *tc = ctx->priv;
+ int i;
+
+ av_frame_free(&tc->temp);
+ for (i = 0; i < tc->out_cnt; i++)
+ av_frame_free(&tc->frame[i]);
+}
+
+static const AVFilterPad telecine_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad telecine_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_telecine = {
+ .name = "telecine",
+ .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
+ .priv_size = sizeof(TelecineContext),
+ .priv_class = &telecine_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = telecine_inputs,
+ .outputs = telecine_outputs,
+};
diff --git a/libavfilter/vf_thumbnail.c b/libavfilter/vf_thumbnail.c
new file mode 100644
index 0000000..1883154
--- /dev/null
+++ b/libavfilter/vf_thumbnail.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2011 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Potential thumbnail lookup filter to reduce the risk of an inappropriate
+ * selection (such as a black frame) we could get with an absolute seek.
+ *
+ * Simplified version of algorithm by Vadim Zaliva <lord@crocodile.org>.
+ * @see http://notbrainsurgery.livejournal.com/29773.html
+ */
+
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#define HIST_SIZE (3*256)
+
+struct thumb_frame {
+ AVFrame *buf; ///< cached frame
+ int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame
+};
+
+typedef struct {
+ const AVClass *class;
+ int n; ///< current frame
+ int n_frames; ///< number of frames for analysis
+ struct thumb_frame *frames; ///< the n_frames frames
+ AVRational tb; ///< copy of the input timebase to ease access
+} ThumbContext;
+
+#define OFFSET(x) offsetof(ThumbContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption thumbnail_options[] = {
+ { "n", "set the frames batch size", OFFSET(n_frames), AV_OPT_TYPE_INT, {.i64=100}, 2, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(thumbnail);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ThumbContext *thumb = ctx->priv;
+
+ thumb->frames = av_calloc(thumb->n_frames, sizeof(*thumb->frames));
+ if (!thumb->frames) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Allocation failure, try to lower the number of frames\n");
+ return AVERROR(ENOMEM);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "batch size: %d frames\n", thumb->n_frames);
+ return 0;
+}
+
+/**
+ * @brief Compute Sum-square deviation to estimate "closeness".
+ * @param hist color distribution histogram
+ * @param median average color distribution histogram
+ * @return sum of squared errors
+ */
+static double frame_sum_square_err(const int *hist, const double *median)
+{
+ int i;
+ double err, sum_sq_err = 0;
+
+ for (i = 0; i < HIST_SIZE; i++) {
+ err = median[i] - (double)hist[i];
+ sum_sq_err += err*err;
+ }
+ return sum_sq_err;
+}
+
+static AVFrame *get_best_frame(AVFilterContext *ctx)
+{
+ AVFrame *picref;
+ ThumbContext *thumb = ctx->priv;
+ int i, j, best_frame_idx = 0;
+ int nb_frames = thumb->n;
+ double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1;
+
+ // average histogram of the N frames
+ for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) {
+ for (i = 0; i < nb_frames; i++)
+ avg_hist[j] += (double)thumb->frames[i].histogram[j];
+ avg_hist[j] /= nb_frames;
+ }
+
+ // find the frame closer to the average using the sum of squared errors
+ for (i = 0; i < nb_frames; i++) {
+ sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist);
+ if (i == 0 || sq_err < min_sq_err)
+ best_frame_idx = i, min_sq_err = sq_err;
+ }
+
+ // free and reset everything (except the best frame buffer)
+ for (i = 0; i < nb_frames; i++) {
+ memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram));
+ if (i != best_frame_idx)
+ av_frame_free(&thumb->frames[i].buf);
+ }
+ thumb->n = 0;
+
+ // raise the chosen one
+ picref = thumb->frames[best_frame_idx].buf;
+ av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected "
+ "from a set of %d images\n", best_frame_idx,
+ picref->pts * av_q2d(thumb->tb), nb_frames);
+ thumb->frames[best_frame_idx].buf = NULL;
+
+ return picref;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ int i, j;
+ AVFilterContext *ctx = inlink->dst;
+ ThumbContext *thumb = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ int *hist = thumb->frames[thumb->n].histogram;
+ const uint8_t *p = frame->data[0];
+
+ // keep a reference of each frame
+ thumb->frames[thumb->n].buf = frame;
+
+ // update current frame RGB histogram
+ for (j = 0; j < inlink->h; j++) {
+ for (i = 0; i < inlink->w; i++) {
+ hist[0*256 + p[i*3 ]]++;
+ hist[1*256 + p[i*3 + 1]]++;
+ hist[2*256 + p[i*3 + 2]]++;
+ }
+ p += frame->linesize[0];
+ }
+
+ // no selection until the buffer of N frames is filled up
+ thumb->n++;
+ if (thumb->n < thumb->n_frames)
+ return 0;
+
+ return ff_filter_frame(outlink, get_best_frame(ctx));
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ int i;
+ ThumbContext *thumb = ctx->priv;
+ for (i = 0; i < thumb->n_frames && thumb->frames[i].buf; i++)
+ av_frame_free(&thumb->frames[i].buf);
+ av_freep(&thumb->frames);
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ AVFilterContext *ctx = link->src;
+ ThumbContext *thumb = ctx->priv;
+
+ /* loop until a frame thumbnail is available (when a frame is queued,
+ * thumb->n is reset to zero) */
+ do {
+ int ret = ff_request_frame(ctx->inputs[0]);
+ if (ret == AVERROR_EOF && thumb->n) {
+ ret = ff_filter_frame(link, get_best_frame(ctx));
+ if (ret < 0)
+ return ret;
+ ret = AVERROR_EOF;
+ }
+ if (ret < 0)
+ return ret;
+ } while (thumb->n);
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ ThumbContext *thumb = ctx->priv;
+
+ thumb->tb = inlink->time_base;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static const AVFilterPad thumbnail_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad thumbnail_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_thumbnail = {
+ .name = "thumbnail",
+ .description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."),
+ .priv_size = sizeof(ThumbContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = thumbnail_inputs,
+ .outputs = thumbnail_outputs,
+ .priv_class = &thumbnail_class,
+};
diff --git a/libavfilter/vf_tile.c b/libavfilter/vf_tile.c
new file mode 100644
index 0000000..459ae46
--- /dev/null
+++ b/libavfilter/vf_tile.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * tile video filter
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "drawutils.h"
+#include "formats.h"
+#include "video.h"
+#include "internal.h"
+
+typedef struct {
+ const AVClass *class;
+ unsigned w, h;
+ unsigned margin;
+ unsigned padding;
+ unsigned current;
+ unsigned nb_frames;
+ FFDrawContext draw;
+ FFDrawColor blank;
+ AVFrame *out_ref;
+ uint8_t rgba_color[4];
+} TileContext;
+
+#define REASONABLE_SIZE 1024
+
+#define OFFSET(x) offsetof(TileContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption tile_options[] = {
+ { "layout", "set grid size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE,
+ {.str = "6x5"}, 0, 0, FLAGS },
+ { "nb_frames", "set maximum number of frame to render", OFFSET(nb_frames),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
+ { "margin", "set outer border margin in pixels", OFFSET(margin),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
+ { "padding", "set inner border thickness in pixels", OFFSET(padding),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
+ { "color", "set the color of the unused area", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(tile);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ TileContext *tile = ctx->priv;
+
+ if (tile->w > REASONABLE_SIZE || tile->h > REASONABLE_SIZE) {
+ av_log(ctx, AV_LOG_ERROR, "Tile size %ux%u is insane.\n",
+ tile->w, tile->h);
+ return AVERROR(EINVAL);
+ }
+
+ if (tile->nb_frames == 0) {
+ tile->nb_frames = tile->w * tile->h;
+ } else if (tile->nb_frames > tile->w * tile->h) {
+ av_log(ctx, AV_LOG_ERROR, "nb_frames must be less than or equal to %dx%d=%d\n",
+ tile->w, tile->h, tile->w * tile->h);
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const unsigned total_margin_w = (tile->w - 1) * tile->padding + 2*tile->margin;
+ const unsigned total_margin_h = (tile->h - 1) * tile->padding + 2*tile->margin;
+
+ if (inlink->w > (INT_MAX - total_margin_w) / tile->w) {
+ av_log(ctx, AV_LOG_ERROR, "Total width %ux%u is too much.\n",
+ tile->w, inlink->w);
+ return AVERROR(EINVAL);
+ }
+ if (inlink->h > (INT_MAX - total_margin_h) / tile->h) {
+ av_log(ctx, AV_LOG_ERROR, "Total height %ux%u is too much.\n",
+ tile->h, inlink->h);
+ return AVERROR(EINVAL);
+ }
+ outlink->w = tile->w * inlink->w + total_margin_w;
+ outlink->h = tile->h * inlink->h + total_margin_h;
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ outlink->frame_rate = av_mul_q(inlink->frame_rate,
+ av_make_q(1, tile->nb_frames));
+ ff_draw_init(&tile->draw, inlink->format, 0);
+ ff_draw_color(&tile->draw, &tile->blank, tile->rgba_color);
+
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+ return 0;
+}
+
+static void get_current_tile_pos(AVFilterContext *ctx, unsigned *x, unsigned *y)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const unsigned tx = tile->current % tile->w;
+ const unsigned ty = tile->current / tile->w;
+
+ *x = tile->margin + (inlink->w + tile->padding) * tx;
+ *y = tile->margin + (inlink->h + tile->padding) * ty;
+}
+
+static void draw_blank_frame(AVFilterContext *ctx, AVFrame *out_buf)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ unsigned x0, y0;
+
+ get_current_tile_pos(ctx, &x0, &y0);
+ ff_fill_rectangle(&tile->draw, &tile->blank,
+ out_buf->data, out_buf->linesize,
+ x0, y0, inlink->w, inlink->h);
+ tile->current++;
+}
+static int end_last_frame(AVFilterContext *ctx)
+{
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out_buf = tile->out_ref;
+ int ret;
+
+ while (tile->current < tile->nb_frames)
+ draw_blank_frame(ctx, out_buf);
+ ret = ff_filter_frame(outlink, out_buf);
+ tile->current = 0;
+ return ret;
+}
+
+/* Note: direct rendering is not possible since there is no guarantee that
+ * buffers are fed to filter_frame in the order they were obtained from
+ * get_buffer (think B-frames). */
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ unsigned x0, y0;
+
+ if (!tile->current) {
+ tile->out_ref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!tile->out_ref) {
+ av_frame_free(&picref);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(tile->out_ref, picref);
+ tile->out_ref->width = outlink->w;
+ tile->out_ref->height = outlink->h;
+
+ /* fill surface once for margin/padding */
+ if (tile->margin || tile->padding)
+ ff_fill_rectangle(&tile->draw, &tile->blank,
+ tile->out_ref->data,
+ tile->out_ref->linesize,
+ 0, 0, outlink->w, outlink->h);
+ }
+
+ get_current_tile_pos(ctx, &x0, &y0);
+ ff_copy_rectangle2(&tile->draw,
+ tile->out_ref->data, tile->out_ref->linesize,
+ picref->data, picref->linesize,
+ x0, y0, 0, 0, inlink->w, inlink->h);
+
+ av_frame_free(&picref);
+ if (++tile->current == tile->nb_frames)
+ return end_last_frame(ctx);
+
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TileContext *tile = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ int r;
+
+ r = ff_request_frame(inlink);
+ if (r == AVERROR_EOF && tile->current)
+ r = end_last_frame(ctx);
+ return r;
+}
+
+static const AVFilterPad tile_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad tile_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_props,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_tile = {
+ .name = "tile",
+ .description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."),
+ .init = init,
+ .query_formats = query_formats,
+ .priv_size = sizeof(TileContext),
+ .inputs = tile_inputs,
+ .outputs = tile_outputs,
+ .priv_class = &tile_class,
+};
diff --git a/libavfilter/vf_tinterlace.c b/libavfilter/vf_tinterlace.c
new file mode 100644
index 0000000..7397beb
--- /dev/null
+++ b/libavfilter/vf_tinterlace.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2010 Baptiste Coudurier
+ * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * temporal field interlace filter, ported from MPlayer/libmpcodecs
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "internal.h"
+
+enum TInterlaceMode {
+ MODE_MERGE = 0,
+ MODE_DROP_EVEN,
+ MODE_DROP_ODD,
+ MODE_PAD,
+ MODE_INTERLEAVE_TOP,
+ MODE_INTERLEAVE_BOTTOM,
+ MODE_INTERLACEX2,
+ MODE_NB,
+};
+
+typedef struct {
+ const AVClass *class;
+ enum TInterlaceMode mode; ///< interlace mode selected
+ int flags; ///< flags affecting interlacing algorithm
+ int frame; ///< number of the output frame
+ int vsub; ///< chroma vertical subsampling
+ AVFrame *cur;
+ AVFrame *next;
+ uint8_t *black_data[4]; ///< buffer used to fill padded lines
+ int black_linesize[4];
+} TInterlaceContext;
+
+#define OFFSET(x) offsetof(TInterlaceContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define TINTERLACE_FLAG_VLPF 01
+
+static const AVOption tinterlace_options[] = {
+ {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_MERGE}, 0, MODE_NB-1, FLAGS, "mode"},
+ {"merge", "merge fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGE}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"drop_even", "drop even fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_EVEN}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"drop_odd", "drop odd fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_ODD}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"pad", "pad alternate lines with black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PAD}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"interleave_top", "interleave top and bottom fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_TOP}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"interleave_bottom", "interleave bottom and top fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ {"interlacex2", "interlace fields from two consecutive frames", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLACEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
+
+ {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, 0, "flags" },
+ {"low_pass_filter", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
+ {"vlpf", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
+
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(tinterlace);
+
+#define FULL_SCALE_YUVJ_FORMATS \
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
+
+static const enum AVPixelFormat full_scale_yuvj_pix_fmts[] = {
+ FULL_SCALE_YUVJ_FORMATS, AV_PIX_FMT_NONE
+};
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TInterlaceContext *tinterlace = ctx->priv;
+
+ av_frame_free(&tinterlace->cur );
+ av_frame_free(&tinterlace->next);
+ av_freep(&tinterlace->black_data[0]);
+}
+
+static int config_out_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
+ TInterlaceContext *tinterlace = ctx->priv;
+
+ tinterlace->vsub = desc->log2_chroma_h;
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+ outlink->w = inlink->w;
+ outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
+ inlink->h*2 : inlink->h;
+
+ if (tinterlace->mode == MODE_PAD) {
+ uint8_t black[4] = { 16, 128, 128, 16 };
+ int i, ret;
+ if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
+ black[0] = black[3] = 0;
+ ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
+ outlink->w, outlink->h, outlink->format, 1);
+ if (ret < 0)
+ return ret;
+
+ /* fill black picture with black */
+ for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
+ int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
+ memset(tinterlace->black_data[i], black[i],
+ tinterlace->black_linesize[i] * h);
+ }
+ }
+ if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
+ && !(tinterlace->mode == MODE_INTERLEAVE_TOP
+ || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
+ av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
+ tinterlace->mode);
+ tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
+ }
+ if (tinterlace->mode == MODE_INTERLACEX2) {
+ outlink->time_base.num = inlink->time_base.num;
+ outlink->time_base.den = inlink->time_base.den * 2;
+ outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
+ } else if (tinterlace->mode != MODE_PAD) {
+ outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
+ tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
+ inlink->h, outlink->h);
+
+ return 0;
+}
+
+#define FIELD_UPPER 0
+#define FIELD_LOWER 1
+#define FIELD_UPPER_AND_LOWER 2
+
+/**
+ * Copy picture field from src to dst.
+ *
+ * @param src_field copy from upper, lower field or both
+ * @param interleave leave a padding line between each copied line
+ * @param dst_field copy to upper or lower field,
+ * only meaningful when interleave is selected
+ * @param flags context flags
+ */
+static inline
+void copy_picture_field(uint8_t *dst[4], int dst_linesize[4],
+ const uint8_t *src[4], int src_linesize[4],
+ enum AVPixelFormat format, int w, int src_h,
+ int src_field, int interleave, int dst_field,
+ int flags)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
+ int plane, vsub = desc->log2_chroma_h;
+ int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
+ int h, i;
+
+ for (plane = 0; plane < desc->nb_components; plane++) {
+ int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;
+ int linesize = av_image_get_linesize(format, w, plane);
+ uint8_t *dstp = dst[plane];
+ const uint8_t *srcp = src[plane];
+
+ if (linesize < 0)
+ return;
+
+ lines = (lines + (src_field == FIELD_UPPER)) / k;
+ if (src_field == FIELD_LOWER)
+ srcp += src_linesize[plane];
+ if (interleave && dst_field == FIELD_LOWER)
+ dstp += dst_linesize[plane];
+ if (flags & TINTERLACE_FLAG_VLPF) {
+ // Low-pass filtering is required when creating an interlaced destination from
+ // a progressive source which contains high-frequency vertical detail.
+ // Filtering will reduce interlace 'twitter' and Moire patterning.
+ int srcp_linesize = src_linesize[plane] * k;
+ int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
+ for (h = lines; h > 0; h--) {
+ const uint8_t *srcp_above = srcp - src_linesize[plane];
+ const uint8_t *srcp_below = srcp + src_linesize[plane];
+ if (h == lines) srcp_above = srcp; // there is no line above
+ if (h == 1) srcp_below = srcp; // there is no line below
+ for (i = 0; i < linesize; i++) {
+ // this calculation is an integer representation of
+ // '0.5 * current + 0.25 * above + 0.25 * below'
+ // '1 +' is for rounding. */
+ dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
+ }
+ dstp += dstp_linesize;
+ srcp += srcp_linesize;
+ }
+ } else {
+ av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
+ srcp, src_linesize[plane]*k, linesize, lines);
+ }
+ }
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ TInterlaceContext *tinterlace = ctx->priv;
+ AVFrame *cur, *next, *out;
+ int field, tff, ret;
+
+ av_frame_free(&tinterlace->cur);
+ tinterlace->cur = tinterlace->next;
+ tinterlace->next = picref;
+
+ cur = tinterlace->cur;
+ next = tinterlace->next;
+ /* we need at least two frames */
+ if (!tinterlace->cur)
+ return 0;
+
+ switch (tinterlace->mode) {
+ case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
+ * the lower field, generating a double-height video at half framerate */
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, cur);
+ out->height = outlink->h;
+ out->interlaced_frame = 1;
+ out->top_field_first = 1;
+
+ /* write odd frame lines into the upper field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER, tinterlace->flags);
+ /* write even frame lines into the lower field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags);
+ av_frame_free(&tinterlace->next);
+ break;
+
+ case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
+ case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
+ out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_free(&tinterlace->next);
+ break;
+
+ case MODE_PAD: /* expand each frame to double height, but pad alternate
+ * lines with black; framerate unchanged */
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, cur);
+ out->height = outlink->h;
+
+ field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
+ /* copy upper and lower fields */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
+ /* pad with black the other field */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
+ inlink->format, inlink->w, inlink->h,
+ FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
+ break;
+
+ /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
+ * halving the frame rate and preserving image height */
+ case MODE_INTERLEAVE_TOP: /* top field first */
+ case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
+ tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, cur);
+ out->interlaced_frame = 1;
+ out->top_field_first = tff;
+
+ /* copy upper/lower field from cur */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
+ tinterlace->flags);
+ /* copy lower/upper field from next */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
+ tinterlace->flags);
+ av_frame_free(&tinterlace->next);
+ break;
+ case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
+ /* output current frame first */
+ out = av_frame_clone(cur);
+ if (!out)
+ return AVERROR(ENOMEM);
+ out->interlaced_frame = 1;
+ if (cur->pts != AV_NOPTS_VALUE)
+ out->pts = cur->pts*2;
+
+ if ((ret = ff_filter_frame(outlink, out)) < 0)
+ return ret;
+
+ /* output mix of current and next frame */
+ tff = next->top_field_first;
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, next);
+ out->interlaced_frame = 1;
+
+ if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
+ out->pts = cur->pts + next->pts;
+ else
+ out->pts = AV_NOPTS_VALUE;
+ /* write current frame second field lines into the second field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)cur->data, cur->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
+ tinterlace->flags);
+ /* write next frame first field lines into the first field of the new frame */
+ copy_picture_field(out->data, out->linesize,
+ (const uint8_t **)next->data, next->linesize,
+ inlink->format, inlink->w, inlink->h,
+ tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
+ tinterlace->flags);
+ break;
+ default:
+ av_assert0(0);
+ }
+
+ ret = ff_filter_frame(outlink, out);
+ tinterlace->frame++;
+
+ return ret;
+}
+
+static const AVFilterPad tinterlace_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad tinterlace_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_out_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_tinterlace = {
+ .name = "tinterlace",
+ .description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
+ .priv_size = sizeof(TInterlaceContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = tinterlace_inputs,
+ .outputs = tinterlace_outputs,
+ .priv_class = &tinterlace_class,
+};
diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c
index 07602b9..d9b165c 100644
--- a/libavfilter/vf_transpose.c
+++ b/libavfilter/vf_transpose.c
@@ -2,20 +2,20 @@
* Copyright (c) 2010 Stefano Sabatini
* Copyright (c) 2008 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,6 +38,12 @@
#include "internal.h"
#include "video.h"
+typedef enum {
+ TRANSPOSE_PT_TYPE_NONE,
+ TRANSPOSE_PT_TYPE_LANDSCAPE,
+ TRANSPOSE_PT_TYPE_PORTRAIT,
+} PassthroughType;
+
enum TransposeDir {
TRANSPOSE_CCLOCK_FLIP,
TRANSPOSE_CLOCK,
@@ -50,36 +56,26 @@ typedef struct TransContext {
int hsub, vsub;
int pixsteps[4];
+ PassthroughType passthrough; ///< landscape passthrough mode enabled
enum TransposeDir dir;
} TransContext;
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
- AV_PIX_FMT_RGB565BE, AV_PIX_FMT_RGB565LE,
- AV_PIX_FMT_RGB555BE, AV_PIX_FMT_RGB555LE,
- AV_PIX_FMT_BGR565BE, AV_PIX_FMT_BGR565LE,
- AV_PIX_FMT_BGR555BE, AV_PIX_FMT_BGR555LE,
- AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE,
- AV_PIX_FMT_YUV420P16LE, AV_PIX_FMT_YUV420P16BE,
- AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV422P16BE,
- AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV444P16BE,
- AV_PIX_FMT_NV12, AV_PIX_FMT_NV21,
- AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8,
- AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
- AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ AVFilterFormats *pix_fmts = NULL;
+ int fmt;
+
+ for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
+ desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
+ desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
+ desc->log2_chroma_w != desc->log2_chroma_h))
+ ff_add_format(&pix_fmts, fmt);
+ }
+
+
+ ff_set_common_formats(ctx, pix_fmts);
return 0;
}
@@ -91,6 +87,23 @@ static int config_props_output(AVFilterLink *outlink)
const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format);
+ if (trans->dir&4) {
+ av_log(ctx, AV_LOG_WARNING,
+ "dir values greater than 3 are deprecated, use the passthrough option instead\n");
+ trans->dir &= 3;
+ trans->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE;
+ }
+
+ if ((inlink->w >= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
+ (inlink->w <= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
+ inlink->w, inlink->h, inlink->w, inlink->h);
+ return 0;
+ } else {
+ trans->passthrough = TRANSPOSE_PT_TYPE_NONE;
+ }
+
trans->hsub = desc_in->log2_chroma_w;
trans->vsub = desc_in->log2_chroma_h;
@@ -113,41 +126,43 @@ static int config_props_output(AVFilterLink *outlink)
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
- AVFilterLink *outlink = inlink->dst->outputs[0];
TransContext *trans = inlink->dst->priv;
- AVFrame *out;
- int plane;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
+ return trans->passthrough ?
+ ff_null_get_video_buffer (inlink, w, h) :
+ ff_default_get_video_buffer(inlink, w, h);
+}
- out->pts = in->pts;
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
- if (in->sample_aspect_ratio.num == 0) {
- out->sample_aspect_ratio = in->sample_aspect_ratio;
- } else {
- out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
- out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
- }
+static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr,
+ int nb_jobs)
+{
+ TransContext *trans = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *out = td->out;
+ AVFrame *in = td->in;
+ int plane;
for (plane = 0; out->data[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
int pixstep = trans->pixsteps[plane];
- int inh = in->height >> vsub;
- int outw = out->width >> hsub;
- int outh = out->height >> vsub;
+ int inh = in->height >> vsub;
+ int outw = FF_CEIL_RSHIFT(out->width, hsub);
+ int outh = FF_CEIL_RSHIFT(out->height, vsub);
+ int start = (outh * jobnr ) / nb_jobs;
+ int end = (outh * (jobnr+1)) / nb_jobs;
uint8_t *dst, *src;
int dstlinesize, srclinesize;
int x, y;
- dst = out->data[plane];
dstlinesize = out->linesize[plane];
+ dst = out->data[plane] + start * dstlinesize;
src = in->data[plane];
srclinesize = in->linesize[plane];
@@ -157,64 +172,115 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
if (trans->dir & 2) {
- dst += out->linesize[plane] * (outh - 1);
+ dst = out->data[plane] + dstlinesize * (outh - start - 1);
dstlinesize *= -1;
}
- for (y = 0; y < outh; y++) {
- switch (pixstep) {
- case 1:
+ switch (pixstep) {
+ case 1:
+ for (y = start; y < end; y++, dst += dstlinesize)
for (x = 0; x < outw; x++)
dst[x] = src[x * srclinesize + y];
- break;
- case 2:
+ break;
+ case 2:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint16_t *)(dst + 2 * x)) =
*((uint16_t *)(src + x * srclinesize + y * 2));
- break;
- case 3:
+ }
+ break;
+ case 3:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++) {
int32_t v = AV_RB24(src + x * srclinesize + y * 3);
AV_WB24(dst + 3 * x, v);
}
- break;
- case 4:
+ }
+ break;
+ case 4:
+ for (y = start; y < end; y++, dst += dstlinesize) {
for (x = 0; x < outw; x++)
*((uint32_t *)(dst + 4 * x)) =
*((uint32_t *)(src + x * srclinesize + y * 4));
- break;
}
- dst += dstlinesize;
+ break;
+ case 6:
+ for (y = start; y < end; y++, dst += dstlinesize) {
+ for (x = 0; x < outw; x++) {
+ int64_t v = AV_RB48(src + x * srclinesize + y*6);
+ AV_WB48(dst + 6*x, v);
+ }
+ }
+ break;
+ case 8:
+ for (y = start; y < end; y++, dst += dstlinesize) {
+ for (x = 0; x < outw; x++)
+ *((uint64_t *)(dst + 8*x)) = *((uint64_t *)(src + x * srclinesize + y*8));
+ }
+ break;
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransContext *trans = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ if (trans->passthrough)
+ return ff_filter_frame(outlink, in);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ if (in->sample_aspect_ratio.num == 0) {
+ out->sample_aspect_ratio = in->sample_aspect_ratio;
+ } else {
+ out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
+ out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
+ }
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
#define OFFSET(x) offsetof(TransContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "dir", "Transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP },
- TRANSPOSE_CCLOCK_FLIP, TRANSPOSE_CLOCK_FLIP, FLAGS, "dir" },
- { "cclock_flip", "counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .unit = "dir" },
- { "clock", "clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .unit = "dir" },
- { "cclock", "counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .unit = "dir" },
- { "clock_flip", "clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .unit = "dir" },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption transpose_options[] = {
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 7, FLAGS, "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .unit = "dir" },
+ { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .unit = "dir" },
+ { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .unit = "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .unit = "dir" },
-static const AVClass transpose_class = {
- .class_name = "transpose",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ { "passthrough", "do not apply transposition if the input matches the specified geometry",
+ OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
+ { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+ { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+ { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
+
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(transpose);
+
static const AVFilterPad avfilter_vf_transpose_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
+ .get_video_buffer = get_video_buffer,
.filter_frame = filter_frame,
},
{ NULL }
@@ -237,4 +303,5 @@ AVFilter ff_vf_transpose = {
.query_formats = query_formats,
.inputs = avfilter_vf_transpose_inputs,
.outputs = avfilter_vf_transpose_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_unsharp.c b/libavfilter/vf_unsharp.c
index d0d59e2..37053d9 100644
--- a/libavfilter/vf_unsharp.c
+++ b/libavfilter/vf_unsharp.c
@@ -3,26 +3,26 @@
* Port copyright (c) 2010 Daniel G. Taylor <dan@programmer-art.org>
* Relicensed to the LGPL with permission from Remi Guyomarch.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
- * blur / sharpen filter, ported to Libav from MPlayer
+ * blur / sharpen filter, ported to FFmpeg from MPlayer
* libmpcodecs/unsharp.c.
*
* This code is based on:
@@ -41,79 +41,57 @@
#include "internal.h"
#include "video.h"
#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
-
-#define MIN_SIZE 3
-#define MAX_SIZE 13
-
-/* right-shift and round-up */
-#define SHIFTUP(x,shift) (-((-(x))>>(shift)))
-
-typedef struct FilterParam {
- int msize_x; ///< matrix width
- int msize_y; ///< matrix height
- int amount; ///< effect amount
- int steps_x; ///< horizontal step count
- int steps_y; ///< vertical step count
- int scalebits; ///< bits to shift pixel
- int32_t halfscale; ///< amount to add to pixel
- uint32_t *sc[(MAX_SIZE * MAX_SIZE) - 1]; ///< finite state machine storage
-} FilterParam;
-
-typedef struct UnsharpContext {
- const AVClass *class;
- int lmsize_x, lmsize_y, cmsize_x, cmsize_y;
- float lamount, camount;
- FilterParam luma; ///< luma parameters (width, height, amount)
- FilterParam chroma; ///< chroma parameters (width, height, amount)
- int hsub, vsub;
-} UnsharpContext;
+#include "unsharp.h"
+#include "unsharp_opencl.h"
static void apply_unsharp( uint8_t *dst, int dst_stride,
const uint8_t *src, int src_stride,
- int width, int height, FilterParam *fp)
+ int width, int height, UnsharpFilterParam *fp)
{
uint32_t **sc = fp->sc;
- uint32_t sr[(MAX_SIZE * MAX_SIZE) - 1], tmp1, tmp2;
+ uint32_t sr[MAX_MATRIX_SIZE - 1], tmp1, tmp2;
int32_t res;
int x, y, z;
- const uint8_t *src2;
-
- if (!fp->amount) {
- if (dst_stride == src_stride)
- memcpy(dst, src, src_stride * height);
- else
- for (y = 0; y < height; y++, dst += dst_stride, src += src_stride)
- memcpy(dst, src, width);
+ const uint8_t *src2 = NULL; //silence a warning
+ const int amount = fp->amount;
+ const int steps_x = fp->steps_x;
+ const int steps_y = fp->steps_y;
+ const int scalebits = fp->scalebits;
+ const int32_t halfscale = fp->halfscale;
+
+ if (!amount) {
+ av_image_copy_plane(dst, dst_stride, src, src_stride, width, height);
return;
}
- for (y = 0; y < 2 * fp->steps_y; y++)
- memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * fp->steps_x));
+ for (y = 0; y < 2 * steps_y; y++)
+ memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * steps_x));
- for (y = -fp->steps_y; y < height + fp->steps_y; y++) {
+ for (y = -steps_y; y < height + steps_y; y++) {
if (y < height)
src2 = src;
- memset(sr, 0, sizeof(sr[0]) * (2 * fp->steps_x - 1));
- for (x = -fp->steps_x; x < width + fp->steps_x; x++) {
+ memset(sr, 0, sizeof(sr[0]) * (2 * steps_x - 1));
+ for (x = -steps_x; x < width + steps_x; x++) {
tmp1 = x <= 0 ? src2[0] : x >= width ? src2[width-1] : src2[x];
- for (z = 0; z < fp->steps_x * 2; z += 2) {
+ for (z = 0; z < steps_x * 2; z += 2) {
tmp2 = sr[z + 0] + tmp1; sr[z + 0] = tmp1;
tmp1 = sr[z + 1] + tmp2; sr[z + 1] = tmp2;
}
- for (z = 0; z < fp->steps_y * 2; z += 2) {
- tmp2 = sc[z + 0][x + fp->steps_x] + tmp1; sc[z + 0][x + fp->steps_x] = tmp1;
- tmp1 = sc[z + 1][x + fp->steps_x] + tmp2; sc[z + 1][x + fp->steps_x] = tmp2;
+ for (z = 0; z < steps_y * 2; z += 2) {
+ tmp2 = sc[z + 0][x + steps_x] + tmp1; sc[z + 0][x + steps_x] = tmp1;
+ tmp1 = sc[z + 1][x + steps_x] + tmp2; sc[z + 1][x + steps_x] = tmp2;
}
- if (x >= fp->steps_x && y >= fp->steps_y) {
- const uint8_t *srx = src - fp->steps_y * src_stride + x - fp->steps_x;
- uint8_t *dsx = dst - fp->steps_y * dst_stride + x - fp->steps_x;
+ if (x >= steps_x && y >= steps_y) {
+ const uint8_t *srx = src - steps_y * src_stride + x - steps_x;
+ uint8_t *dsx = dst - steps_y * dst_stride + x - steps_x;
- res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + fp->halfscale) >> fp->scalebits)) * fp->amount) >> 16);
+ res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + halfscale) >> scalebits)) * amount) >> 16);
*dsx = av_clip_uint8(res);
}
}
@@ -124,7 +102,25 @@ static void apply_unsharp( uint8_t *dst, int dst_stride,
}
}
-static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, float amount)
+static int apply_unsharp_c(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
+{
+ AVFilterLink *inlink = ctx->inputs[0];
+ UnsharpContext *unsharp = ctx->priv;
+ int i, plane_w[3], plane_h[3];
+ UnsharpFilterParam *fp[3];
+ plane_w[0] = inlink->w;
+ plane_w[1] = plane_w[2] = FF_CEIL_RSHIFT(inlink->w, unsharp->hsub);
+ plane_h[0] = inlink->h;
+ plane_h[1] = plane_h[2] = FF_CEIL_RSHIFT(inlink->h, unsharp->vsub);
+ fp[0] = &unsharp->luma;
+ fp[1] = fp[2] = &unsharp->chroma;
+ for (i = 0; i < 3; i++) {
+ apply_unsharp(out->data[i], out->linesize[i], in->data[i], in->linesize[i], plane_w[i], plane_h[i], fp[i]);
+ }
+ return 0;
+}
+
+static void set_filter_param(UnsharpFilterParam *fp, int msize_x, int msize_y, float amount)
{
fp->msize_x = msize_x;
fp->msize_y = msize_y;
@@ -138,17 +134,30 @@ static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, float am
static av_cold int init(AVFilterContext *ctx)
{
+ int ret = 0;
UnsharpContext *unsharp = ctx->priv;
+
set_filter_param(&unsharp->luma, unsharp->lmsize_x, unsharp->lmsize_y, unsharp->lamount);
set_filter_param(&unsharp->chroma, unsharp->cmsize_x, unsharp->cmsize_y, unsharp->camount);
+ unsharp->apply_unsharp = apply_unsharp_c;
+ if (!CONFIG_OPENCL && unsharp->opencl) {
+ av_log(ctx, AV_LOG_ERROR, "OpenCL support was not enabled in this build, cannot be selected\n");
+ return AVERROR(EINVAL);
+ }
+ if (CONFIG_OPENCL && unsharp->opencl) {
+ unsharp->apply_unsharp = ff_opencl_apply_unsharp;
+ ret = ff_opencl_unsharp_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
- enum AVPixelFormat pix_fmts[] = {
+ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
@@ -159,35 +168,49 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
-static void init_filter_param(AVFilterContext *ctx, FilterParam *fp, const char *effect_type, int width)
+static int init_filter_param(AVFilterContext *ctx, UnsharpFilterParam *fp, const char *effect_type, int width)
{
int z;
- const char *effect;
+ const char *effect = fp->amount == 0 ? "none" : fp->amount < 0 ? "blur" : "sharpen";
- effect = fp->amount == 0 ? "none" : fp->amount < 0 ? "blur" : "sharpen";
+ if (!(fp->msize_x & fp->msize_y & 1)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Invalid even size for %s matrix size %dx%d\n",
+ effect_type, fp->msize_x, fp->msize_y);
+ return AVERROR(EINVAL);
+ }
av_log(ctx, AV_LOG_VERBOSE, "effect:%s type:%s msize_x:%d msize_y:%d amount:%0.2f\n",
effect, effect_type, fp->msize_x, fp->msize_y, fp->amount / 65535.0);
for (z = 0; z < 2 * fp->steps_y; z++)
- fp->sc[z] = av_malloc(sizeof(*(fp->sc[z])) * (width + 2 * fp->steps_x));
+ if (!(fp->sc[z] = av_malloc_array(width + 2 * fp->steps_x,
+ sizeof(*(fp->sc[z])))))
+ return AVERROR(ENOMEM);
+
+ return 0;
}
static int config_props(AVFilterLink *link)
{
UnsharpContext *unsharp = link->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
+ int ret;
unsharp->hsub = desc->log2_chroma_w;
unsharp->vsub = desc->log2_chroma_h;
- init_filter_param(link->dst, &unsharp->luma, "luma", link->w);
- init_filter_param(link->dst, &unsharp->chroma, "chroma", SHIFTUP(link->w, unsharp->hsub));
+ ret = init_filter_param(link->dst, &unsharp->luma, "luma", link->w);
+ if (ret < 0)
+ return ret;
+ ret = init_filter_param(link->dst, &unsharp->chroma, "chroma", FF_CEIL_RSHIFT(link->w, unsharp->hsub));
+ if (ret < 0)
+ return ret;
return 0;
}
-static void free_filter_param(FilterParam *fp)
+static void free_filter_param(UnsharpFilterParam *fp)
{
int z;
@@ -199,6 +222,10 @@ static av_cold void uninit(AVFilterContext *ctx)
{
UnsharpContext *unsharp = ctx->priv;
+ if (CONFIG_OPENCL && unsharp->opencl) {
+ ff_opencl_unsharp_uninit(ctx);
+ }
+
free_filter_param(&unsharp->luma);
free_filter_param(&unsharp->chroma);
}
@@ -208,8 +235,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
UnsharpContext *unsharp = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0];
AVFrame *out;
- int cw = SHIFTUP(link->w, unsharp->hsub);
- int ch = SHIFTUP(link->h, unsharp->vsub);
+ int ret = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -217,33 +243,43 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
+ if (CONFIG_OPENCL && unsharp->opencl) {
+ ret = ff_opencl_unsharp_process_inout_buf(link->dst, in, out);
+ if (ret < 0)
+ goto end;
+ }
- apply_unsharp(out->data[0], out->linesize[0], in->data[0], in->linesize[0], link->w, link->h, &unsharp->luma);
- apply_unsharp(out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, &unsharp->chroma);
- apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma);
-
+ ret = unsharp->apply_unsharp(link->dst, in, out);
+end:
av_frame_free(&in);
+
+ if (ret < 0)
+ return ret;
return ff_filter_frame(outlink, out);
}
#define OFFSET(x) offsetof(UnsharpContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "luma_msize_x", "luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "luma_msize_y", "luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "luma_amount", "luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
- { "chroma_msize_x", "chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "chroma_msize_y", "chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
- { "chroma_amount", "chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
- { NULL },
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+#define MIN_SIZE 3
+#define MAX_SIZE 63
+static const AVOption unsharp_options[] = {
+ { "luma_msize_x", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "lx", "set luma matrix horizontal size", OFFSET(lmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "luma_msize_y", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "ly", "set luma matrix vertical size", OFFSET(lmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "luma_amount", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
+ { "la", "set luma effect strength", OFFSET(lamount), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, -2, 5, FLAGS },
+ { "chroma_msize_x", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "cx", "set chroma matrix horizontal size", OFFSET(cmsize_x), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "chroma_msize_y", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "cy", "set chroma matrix vertical size", OFFSET(cmsize_y), AV_OPT_TYPE_INT, { .i64 = 5 }, MIN_SIZE, MAX_SIZE, FLAGS },
+ { "chroma_amount", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
+ { "ca", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
+ { "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
+ { NULL }
};
-static const AVClass unsharp_class = {
- .class_name = "unsharp",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
+AVFILTER_DEFINE_CLASS(unsharp);
static const AVFilterPad avfilter_vf_unsharp_inputs[] = {
{
@@ -264,17 +300,14 @@ static const AVFilterPad avfilter_vf_unsharp_outputs[] = {
};
AVFilter ff_vf_unsharp = {
- .name = "unsharp",
- .description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
-
- .priv_size = sizeof(UnsharpContext),
- .priv_class = &unsharp_class,
-
- .init = init,
- .uninit = uninit,
+ .name = "unsharp",
+ .description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
+ .priv_size = sizeof(UnsharpContext),
+ .priv_class = &unsharp_class,
+ .init = init,
+ .uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_unsharp_inputs,
-
- .outputs = avfilter_vf_unsharp_outputs,
+ .inputs = avfilter_vf_unsharp_inputs,
+ .outputs = avfilter_vf_unsharp_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
diff --git a/libavfilter/vf_vflip.c b/libavfilter/vf_vflip.c
index fa54985..4a4ae0e 100644
--- a/libavfilter/vf_vflip.c
+++ b/libavfilter/vf_vflip.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Bobby Bingham
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -55,9 +55,10 @@ static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
+ int height = FF_CEIL_RSHIFT(h, vsub);
if (frame->data[i]) {
- frame->data[i] += ((h >> vsub) - 1) * frame->linesize[i];
+ frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
@@ -72,9 +73,10 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
for (i = 0; i < 4; i ++) {
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
+ int height = FF_CEIL_RSHIFT(link->h, vsub);
if (frame->data[i]) {
- frame->data[i] += ((link->h >> vsub)-1) * frame->linesize[i];
+ frame->data[i] += (height - 1) * frame->linesize[i];
frame->linesize[i] = -frame->linesize[i];
}
}
@@ -101,11 +103,9 @@ static const AVFilterPad avfilter_vf_vflip_outputs[] = {
};
AVFilter ff_vf_vflip = {
- .name = "vflip",
+ .name = "vflip",
.description = NULL_IF_CONFIG_SMALL("Flip the input video vertically."),
-
- .priv_size = sizeof(FlipContext),
-
- .inputs = avfilter_vf_vflip_inputs,
- .outputs = avfilter_vf_vflip_outputs,
+ .priv_size = sizeof(FlipContext),
+ .inputs = avfilter_vf_vflip_inputs,
+ .outputs = avfilter_vf_vflip_outputs,
};
diff --git a/libavfilter/vf_vidstabdetect.c b/libavfilter/vf_vidstabdetect.c
new file mode 100644
index 0000000..1df6f74
--- /dev/null
+++ b/libavfilter/vf_vidstabdetect.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DEFAULT_RESULT_NAME "transforms.trf"
+
+#include <vid.stab/libvidstab.h>
+
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#include "vidstabutils.h"
+
+typedef struct {
+ const AVClass *class;
+
+ VSMotionDetect md;
+ VSMotionDetectConfig conf;
+
+ char *result;
+ FILE *f;
+} StabData;
+
+
+#define OFFSET(x) offsetof(StabData, x)
+#define OFFSETC(x) (offsetof(StabData, conf)+offsetof(VSMotionDetectConfig, x))
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption vidstabdetect_options[] = {
+ {"result", "path to the file used to write the transforms", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}, .flags = FLAGS},
+ {"shakiness", "how shaky is the video and how quick is the camera?"
+ " 1: little (fast) 10: very strong/quick (slow)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS},
+ {"accuracy", "(>=shakiness) 1: low 15: high (slow)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 15}, 1, 15, FLAGS},
+ {"stepsize", "region around minimum is scanned with 1 pixel resolution", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS},
+ {"mincontrast", "below this contrast a field is discarded (0-1)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS},
+ {"show", "0: draw nothing; 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS},
+ {"tripod", "virtual tripod mode (if >0): motion is compared to a reference"
+ " reference frame (frame # is the value)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(vidstabdetect);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ StabData *sd = ctx->priv;
+ ff_vs_init();
+ sd->class = &vidstabdetect_class;
+ av_log(ctx, AV_LOG_VERBOSE, "vidstabdetect filter: init %s\n", LIBVIDSTAB_VERSION);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ StabData *sd = ctx->priv;
+ VSMotionDetect *md = &(sd->md);
+
+ if (sd->f) {
+ fclose(sd->f);
+ sd->f = NULL;
+ }
+
+ vsMotionDetectionCleanup(md);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // If you add something here also add it in vidstabutils.c
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ StabData *sd = ctx->priv;
+
+ VSMotionDetect* md = &(sd->md);
+ VSFrameInfo fi;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ vsFrameInfoInit(&fi, inlink->w, inlink->h,
+ ff_av2vs_pixfmt(ctx, inlink->format));
+ if (fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: wrong bits/per/pixel, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+ if (fi.log2ChromaW != desc->log2_chroma_w) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_w, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+
+ if (fi.log2ChromaH != desc->log2_chroma_h) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_h, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+
+ // set values that are not initialized by the options
+ sd->conf.algo = 1;
+ sd->conf.modName = "vidstabdetect";
+ if (vsMotionDetectInit(md, &sd->conf, &fi) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "initialization of Motion Detection failed, please report a BUG");
+ return AVERROR(EINVAL);
+ }
+
+ vsMotionDetectGetConfig(&sd->conf, md);
+ av_log(ctx, AV_LOG_INFO, "Video stabilization settings (pass 1/2):\n");
+ av_log(ctx, AV_LOG_INFO, " shakiness = %d\n", sd->conf.shakiness);
+ av_log(ctx, AV_LOG_INFO, " accuracy = %d\n", sd->conf.accuracy);
+ av_log(ctx, AV_LOG_INFO, " stepsize = %d\n", sd->conf.stepSize);
+ av_log(ctx, AV_LOG_INFO, " mincontrast = %f\n", sd->conf.contrastThreshold);
+ av_log(ctx, AV_LOG_INFO, " tripod = %d\n", sd->conf.virtualTripod);
+ av_log(ctx, AV_LOG_INFO, " show = %d\n", sd->conf.show);
+ av_log(ctx, AV_LOG_INFO, " result = %s\n", sd->result);
+
+ sd->f = fopen(sd->result, "w");
+ if (sd->f == NULL) {
+ av_log(ctx, AV_LOG_ERROR, "cannot open transform file %s\n", sd->result);
+ return AVERROR(EINVAL);
+ } else {
+ if (vsPrepareFile(md, sd->f) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "cannot write to transform file %s\n", sd->result);
+ return AVERROR(EINVAL);
+ }
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ StabData *sd = ctx->priv;
+ VSMotionDetect *md = &(sd->md);
+ LocalMotions localmotions;
+
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ VSFrame frame;
+ int plane;
+
+ if (sd->conf.show > 0 && !av_frame_is_writable(in))
+ av_frame_make_writable(in);
+
+ for (plane = 0; plane < md->fi.planes; plane++) {
+ frame.data[plane] = in->data[plane];
+ frame.linesize[plane] = in->linesize[plane];
+ }
+ if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "motion detection failed");
+ return AVERROR(AVERROR_EXTERNAL);
+ } else {
+ if (vsWriteToFile(md, sd->f, &localmotions) != VS_OK) {
+ int ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "cannot write to transform file");
+ return ret;
+ }
+ vs_vector_del(&localmotions);
+ }
+
+ return ff_filter_frame(outlink, in);
+}
+
+static const AVFilterPad avfilter_vf_vidstabdetect_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_vidstabdetect_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vidstabdetect = {
+ .name = "vidstabdetect",
+ .description = NULL_IF_CONFIG_SMALL("Extract relative transformations, "
+ "pass 1 of 2 for stabilization "
+ "(see vidstabtransform for pass 2)."),
+ .priv_size = sizeof(StabData),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_vf_vidstabdetect_inputs,
+ .outputs = avfilter_vf_vidstabdetect_outputs,
+ .priv_class = &vidstabdetect_class,
+};
diff --git a/libavfilter/vf_vidstabtransform.c b/libavfilter/vf_vidstabtransform.c
new file mode 100644
index 0000000..9db6a46
--- /dev/null
+++ b/libavfilter/vf_vidstabtransform.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define DEFAULT_INPUT_NAME "transforms.trf"
+
+#include <vid.stab/libvidstab.h>
+
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "avfilter.h"
+#include "internal.h"
+
+#include "vidstabutils.h"
+
+typedef struct {
+ const AVClass *class;
+
+ VSTransformData td;
+ VSTransformConfig conf;
+
+ VSTransformations trans; // transformations
+ char *input; // name of transform file
+ int tripod;
+ int debug;
+} TransformContext;
+
+#define OFFSET(x) offsetof(TransformContext, x)
+#define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x))
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption vidstabtransform_options[] = {
+ {"input", "set path to the file storing the transforms", OFFSET(input),
+ AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME}, .flags = FLAGS },
+ {"smoothing", "set number of frames*2 + 1 used for lowpass filtering", OFFSETC(smoothing),
+ AV_OPT_TYPE_INT, {.i64 = 15}, 0, 1000, FLAGS},
+
+ {"optalgo", "set camera path optimization algo", OFFSETC(camPathAlgo),
+ AV_OPT_TYPE_INT, {.i64 = VSOptimalL1}, VSOptimalL1, VSAvg, FLAGS, "optalgo"},
+ { "opt", "global optimization", 0, // from version 1.0 on
+ AV_OPT_TYPE_CONST, {.i64 = VSOptimalL1 }, 0, 0, FLAGS, "optalgo"},
+ { "gauss", "gaussian kernel", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSGaussian }, 0, 0, FLAGS, "optalgo"},
+ { "avg", "simple averaging on motion", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSAvg }, 0, 0, FLAGS, "optalgo"},
+
+ {"maxshift", "set maximal number of pixels to translate image", OFFSETC(maxShift),
+ AV_OPT_TYPE_INT, {.i64 = -1}, -1, 500, FLAGS},
+ {"maxangle", "set maximal angle in rad to rotate image", OFFSETC(maxAngle),
+ AV_OPT_TYPE_DOUBLE, {.dbl = -1.0}, -1.0, 3.14, FLAGS},
+
+ {"crop", "set cropping mode", OFFSETC(crop),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "crop"},
+ { "keep", "keep border", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSKeepBorder }, 0, 0, FLAGS, "crop"},
+ { "black", "black border", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"},
+
+ {"invert", "invert transforms", OFFSETC(invert),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
+ {"relative", "consider transforms as relative", OFFSETC(relative),
+ AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS},
+ {"zoom", "set percentage to zoom (>0: zoom in, <0: zoom out", OFFSETC(zoom),
+ AV_OPT_TYPE_DOUBLE, {.dbl = 0}, -100, 100, FLAGS},
+ {"optzoom", "set optimal zoom (0: nothing, 1: optimal static zoom, 2: optimal dynamic zoom)", OFFSETC(optZoom),
+ AV_OPT_TYPE_INT, {.i64 = 1}, 0, 2, FLAGS},
+ {"zoomspeed", "for adative zoom: percent to zoom maximally each frame", OFFSETC(zoomSpeed),
+ AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0, 5, FLAGS},
+
+ {"interpol", "set type of interpolation", OFFSETC(interpolType),
+ AV_OPT_TYPE_INT, {.i64 = 2}, 0, 3, FLAGS, "interpol"},
+ { "no", "no interpolation", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_Zero }, 0, 0, FLAGS, "interpol"},
+ { "linear", "linear (horizontal)", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_Linear }, 0, 0, FLAGS, "interpol"},
+ { "bilinear","bi-linear", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_BiLinear},0, 0, FLAGS, "interpol"},
+ { "bicubic", "bi-cubic", 0,
+ AV_OPT_TYPE_CONST, {.i64 = VS_BiCubic },0, 0, FLAGS, "interpol"},
+
+ {"tripod", "enable virtual tripod mode (same as relative=0:smoothing=0)", OFFSET(tripod),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
+ {"debug", "enable debug mode and writer global motions information to file", OFFSET(debug),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS},
+ {NULL}
+};
+
+AVFILTER_DEFINE_CLASS(vidstabtransform);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ TransformContext *tc = ctx->priv;
+ ff_vs_init();
+ tc->class = &vidstabtransform_class;
+ av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TransformContext *tc = ctx->priv;
+
+ vsTransformDataCleanup(&tc->td);
+ vsTransformationsCleanup(&tc->trans);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ // If you add something here also add it in vidstabutils.c
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+
+static int config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransformContext *tc = ctx->priv;
+ FILE *f;
+
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+
+ VSTransformData *td = &(tc->td);
+
+ VSFrameInfo fi_src;
+ VSFrameInfo fi_dest;
+
+ if (!vsFrameInfoInit(&fi_src, inlink->w, inlink->h,
+ ff_av2vs_pixfmt(ctx, inlink->format)) ||
+ !vsFrameInfoInit(&fi_dest, inlink->w, inlink->h,
+ ff_av2vs_pixfmt(ctx, inlink->format))) {
+ av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)",
+ inlink->format, desc->name);
+ return AVERROR(EINVAL);
+ }
+
+ if (fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 ||
+ fi_src.log2ChromaW != desc->log2_chroma_w ||
+ fi_src.log2ChromaH != desc->log2_chroma_h) {
+ av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ",
+ fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8);
+ av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n",
+ fi_src.log2ChromaW, desc->log2_chroma_w,
+ fi_src.log2ChromaH, desc->log2_chroma_h);
+ return AVERROR(EINVAL);
+ }
+
+ // set values that are not initializes by the options
+ tc->conf.modName = "vidstabtransform";
+ tc->conf.verbose = 1 + tc->debug;
+ if (tc->tripod) {
+ av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0\n");
+ tc->conf.relative = 0;
+ tc->conf.smoothing = 0;
+ }
+ tc->conf.simpleMotionCalculation = 0;
+ tc->conf.storeTransforms = tc->debug;
+ tc->conf.smoothZoom = 0;
+
+ if (vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n");
+ return AVERROR(EINVAL);
+ }
+
+ vsTransformGetConfig(&tc->conf, td);
+ av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n");
+ av_log(ctx, AV_LOG_INFO, " input = %s\n", tc->input);
+ av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", tc->conf.smoothing);
+ av_log(ctx, AV_LOG_INFO, " optalgo = %s\n",
+ tc->conf.camPathAlgo == VSOptimalL1 ? "opt" :
+ (tc->conf.camPathAlgo == VSGaussian ? "gauss" : "avg"));
+ av_log(ctx, AV_LOG_INFO, " maxshift = %d\n", tc->conf.maxShift);
+ av_log(ctx, AV_LOG_INFO, " maxangle = %f\n", tc->conf.maxAngle);
+ av_log(ctx, AV_LOG_INFO, " crop = %s\n", tc->conf.crop ? "Black" : "Keep");
+ av_log(ctx, AV_LOG_INFO, " relative = %s\n", tc->conf.relative ? "True": "False");
+ av_log(ctx, AV_LOG_INFO, " invert = %s\n", tc->conf.invert ? "True" : "False");
+ av_log(ctx, AV_LOG_INFO, " zoom = %f\n", tc->conf.zoom);
+ av_log(ctx, AV_LOG_INFO, " optzoom = %s\n",
+ tc->conf.optZoom == 1 ? "Static (1)" : (tc->conf.optZoom == 2 ? "Dynamic (2)" : "Off (0)"));
+ if (tc->conf.optZoom == 2)
+ av_log(ctx, AV_LOG_INFO, " zoomspeed = %g\n", tc->conf.zoomSpeed);
+ av_log(ctx, AV_LOG_INFO, " interpol = %s\n", getInterpolationTypeName(tc->conf.interpolType));
+
+ f = fopen(tc->input, "r");
+ if (!f) {
+ int ret = AVERROR(errno);
+ av_log(ctx, AV_LOG_ERROR, "cannot open input file %s\n", tc->input);
+ return ret;
+ } else {
+ VSManyLocalMotions mlms;
+ if (vsReadLocalMotionsFile(f, &mlms) == VS_OK) {
+ // calculate the actual transforms from the local motions
+ if (vsLocalmotions2Transforms(td, &mlms, &tc->trans) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n");
+ return AVERROR(EINVAL);
+ }
+ } else { // try to read old format
+ if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */
+ av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input);
+ return AVERROR(EINVAL);
+ }
+ }
+ }
+ fclose(f);
+
+ if (vsPreprocessTransforms(td, &tc->trans) != VS_OK) {
+ av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n");
+ return AVERROR(EINVAL);
+ }
+
+ // TODO: add sharpening, so far the user needs to call the unsharp filter manually
+ return 0;
+}
+
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ TransformContext *tc = ctx->priv;
+ VSTransformData* td = &(tc->td);
+
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ int direct = 0;
+ AVFrame *out;
+ VSFrame inframe;
+ int plane;
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ for (plane = 0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++) {
+ inframe.data[plane] = in->data[plane];
+ inframe.linesize[plane] = in->linesize[plane];
+ }
+ if (direct) {
+ vsTransformPrepare(td, &inframe, &inframe);
+ } else { // separate frames
+ VSFrame outframe;
+ for (plane = 0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++) {
+ outframe.data[plane] = out->data[plane];
+ outframe.linesize[plane] = out->linesize[plane];
+ }
+ vsTransformPrepare(td, &inframe, &outframe);
+ }
+
+ vsDoTransform(td, vsGetNextTransform(td, &tc->trans));
+
+ vsTransformFinish(td);
+
+ if (!direct)
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad avfilter_vf_vidstabtransform_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad avfilter_vf_vidstabtransform_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vidstabtransform = {
+ .name = "vidstabtransform",
+ .description = NULL_IF_CONFIG_SMALL("Transform the frames, "
+ "pass 2 of 2 for stabilization "
+ "(see vidstabdetect for pass 1)."),
+ .priv_size = sizeof(TransformContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = avfilter_vf_vidstabtransform_inputs,
+ .outputs = avfilter_vf_vidstabtransform_outputs,
+ .priv_class = &vidstabtransform_class,
+};
diff --git a/libavfilter/vf_vignette.c b/libavfilter/vf_vignette.c
new file mode 100644
index 0000000..806bd72
--- /dev/null
+++ b/libavfilter/vf_vignette.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2013 Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <float.h> /* DBL_MAX */
+
+#include "libavutil/opt.h"
+#include "libavutil/eval.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+static const char *const var_names[] = {
+ "w", // stream width
+ "h", // stream height
+ "n", // frame count
+ "pts", // presentation timestamp expressed in AV_TIME_BASE units
+ "r", // frame rate
+ "t", // timestamp expressed in seconds
+ "tb", // timebase
+ NULL
+};
+
+enum var_name {
+ VAR_W,
+ VAR_H,
+ VAR_N,
+ VAR_PTS,
+ VAR_R,
+ VAR_T,
+ VAR_TB,
+ VAR_NB
+};
+
+typedef struct {
+ const AVClass *class;
+ const AVPixFmtDescriptor *desc;
+ int backward;
+ enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
+#define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
+ DEF_EXPR_FIELDS(angle);
+ DEF_EXPR_FIELDS(x0);
+ DEF_EXPR_FIELDS(y0);
+ double var_values[VAR_NB];
+ float *fmap;
+ int fmap_linesize;
+ double dmax;
+ float xscale, yscale;
+ uint32_t dither;
+ int do_dither;
+ AVRational aspect;
+ AVRational scale;
+} VignetteContext;
+
+#define OFFSET(x) offsetof(VignetteContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption vignette_options[] = {
+ { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
+ { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
+ { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
+ { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
+ { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
+ { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
+ { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
+ { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
+ { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
+ { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(vignette);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ VignetteContext *s = ctx->priv;
+
+#define PARSE_EXPR(name) do { \
+ int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
+ NULL, NULL, NULL, NULL, 0, ctx); \
+ if (ret < 0) { \
+ av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
+ AV_STRINGIFY(name) "'\n"); \
+ return ret; \
+ } \
+} while (0)
+
+ PARSE_EXPR(angle);
+ PARSE_EXPR(x0);
+ PARSE_EXPR(y0);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ VignetteContext *s = ctx->priv;
+ av_freep(&s->fmap);
+ av_expr_free(s->angle_pexpr);
+ av_expr_free(s->x0_pexpr);
+ av_expr_free(s->y0_pexpr);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static double get_natural_factor(const VignetteContext *s, int x, int y)
+{
+ const int xx = (x - s->x0) * s->xscale;
+ const int yy = (y - s->y0) * s->yscale;
+ const double dnorm = hypot(xx, yy) / s->dmax;
+ if (dnorm > 1) {
+ return 0;
+ } else {
+ const double c = cos(s->angle * dnorm);
+ return (c*c)*(c*c); // do not remove braces, it helps compilers
+ }
+}
+
+#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
+#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
+
+static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
+{
+ int x, y;
+ float *dst = s->fmap;
+ int dst_linesize = s->fmap_linesize;
+
+ if (frame) {
+ s->var_values[VAR_N] = inlink->frame_count;
+ s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
+ s->var_values[VAR_PTS] = TS2D(frame->pts);
+ } else {
+ s->var_values[VAR_N] = 0;
+ s->var_values[VAR_T] = NAN;
+ s->var_values[VAR_PTS] = NAN;
+ }
+
+ s->angle = av_clipf(av_expr_eval(s->angle_pexpr, s->var_values, NULL), 0, M_PI_2);
+ s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
+ s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
+
+ if (s->backward) {
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w; x++)
+ dst[x] = 1. / get_natural_factor(s, x, y);
+ dst += dst_linesize;
+ }
+ } else {
+ for (y = 0; y < inlink->h; y++) {
+ for (x = 0; x < inlink->w; x++)
+ dst[x] = get_natural_factor(s, x, y);
+ dst += dst_linesize;
+ }
+ }
+}
+
+static inline double get_dither_value(VignetteContext *s)
+{
+ double dv = 0;
+ if (s->do_dither) {
+ dv = s->dither / (double)(1LL<<32);
+ s->dither = s->dither * 1664525 + 1013904223;
+ }
+ return dv;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ unsigned x, y, direct = 0;
+ AVFilterContext *ctx = inlink->dst;
+ VignetteContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out;
+
+ if (av_frame_is_writable(in)) {
+ direct = 1;
+ out = in;
+ } else {
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ }
+
+ if (s->eval_mode == EVAL_MODE_FRAME)
+ update_context(s, inlink, in);
+
+ if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
+ uint8_t *dst = out->data[0];
+ const uint8_t *src = in ->data[0];
+ const float *fmap = s->fmap;
+ const int dst_linesize = out->linesize[0];
+ const int src_linesize = in ->linesize[0];
+ const int fmap_linesize = s->fmap_linesize;
+
+ for (y = 0; y < inlink->h; y++) {
+ uint8_t *dstp = dst;
+ const uint8_t *srcp = src;
+
+ for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
+ const float f = fmap[x];
+
+ dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
+ dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
+ dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ fmap += fmap_linesize;
+ }
+ } else {
+ int plane;
+
+ for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
+ uint8_t *dst = out->data[plane];
+ const uint8_t *src = in ->data[plane];
+ const float *fmap = s->fmap;
+ const int dst_linesize = out->linesize[plane];
+ const int src_linesize = in ->linesize[plane];
+ const int fmap_linesize = s->fmap_linesize;
+ const int chroma = plane == 1 || plane == 2;
+ const int hsub = chroma ? s->desc->log2_chroma_w : 0;
+ const int vsub = chroma ? s->desc->log2_chroma_h : 0;
+ const int w = FF_CEIL_RSHIFT(inlink->w, hsub);
+ const int h = FF_CEIL_RSHIFT(inlink->h, vsub);
+
+ for (y = 0; y < h; y++) {
+ uint8_t *dstp = dst;
+ const uint8_t *srcp = src;
+
+ for (x = 0; x < w; x++) {
+ const double dv = get_dither_value(s);
+ if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
+ else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
+ }
+ dst += dst_linesize;
+ src += src_linesize;
+ fmap += fmap_linesize << vsub;
+ }
+ }
+ }
+
+ if (!direct)
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ VignetteContext *s = inlink->dst->priv;
+ AVRational sar = inlink->sample_aspect_ratio;
+
+ s->desc = av_pix_fmt_desc_get(inlink->format);
+ s->var_values[VAR_W] = inlink->w;
+ s->var_values[VAR_H] = inlink->h;
+ s->var_values[VAR_TB] = av_q2d(inlink->time_base);
+ s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
+ NAN : av_q2d(inlink->frame_rate);
+
+ if (!sar.num || !sar.den)
+ sar.num = sar.den = 1;
+ if (sar.num > sar.den) {
+ s->xscale = av_q2d(av_div_q(sar, s->aspect));
+ s->yscale = 1;
+ } else {
+ s->yscale = av_q2d(av_div_q(s->aspect, sar));
+ s->xscale = 1;
+ }
+ s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
+ av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
+ s->xscale, s->yscale, s->dmax);
+
+ s->fmap_linesize = FFALIGN(inlink->w, 32);
+ s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap));
+ if (!s->fmap)
+ return AVERROR(ENOMEM);
+
+ if (s->eval_mode == EVAL_MODE_INIT)
+ update_context(s, inlink, NULL);
+
+ return 0;
+}
+
+static const AVFilterPad vignette_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad vignette_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_vignette = {
+ .name = "vignette",
+ .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
+ .priv_size = sizeof(VignetteContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = vignette_inputs,
+ .outputs = vignette_outputs,
+ .priv_class = &vignette_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/vf_w3fdif.c b/libavfilter/vf_w3fdif.c
new file mode 100644
index 0000000..3de7394
--- /dev/null
+++ b/libavfilter/vf_w3fdif.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
+ * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
+ * Based on the process described by Martin Weston for BBC R&D
+ * Author of FFmpeg filter: Mark Himsley for BBC Broadcast Systems Development
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct W3FDIFContext {
+ const AVClass *class;
+ int filter; ///< 0 is simple, 1 is more complex
+ int deint; ///< which frames to deinterlace
+ int linesize[4]; ///< bytes of pixel data per line for each plane
+ int planeheight[4]; ///< height of each plane
+ int field; ///< which field are we on, 0 or 1
+ int eof;
+ int nb_planes;
+ AVFrame *prev, *cur, *next; ///< previous, current, next frames
+ int32_t *work_line; ///< line we are calculating
+} W3FDIFContext;
+
+#define OFFSET(x) offsetof(W3FDIFContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
+
+static const AVOption w3fdif_options[] = {
+ { "filter", "specify the filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "filter" },
+ CONST("simple", NULL, 0, "filter"),
+ CONST("complex", NULL, 1, "filter"),
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
+ CONST("all", "deinterlace all frames", 0, "deint"),
+ CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"),
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(w3fdif);
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+ W3FDIFContext *s = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int ret;
+
+ if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
+ return ret;
+
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+
+ s->nb_planes = av_pix_fmt_count_planes(inlink->format);
+ s->work_line = av_calloc(s->linesize[0], sizeof(*s->work_line));
+ if (!s->work_line)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+
+ outlink->time_base.num = inlink->time_base.num;
+ outlink->time_base.den = inlink->time_base.den * 2;
+ outlink->frame_rate.num = inlink->frame_rate.num * 2;
+ outlink->frame_rate.den = inlink->frame_rate.den;
+ outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
+
+ return 0;
+}
+
+/*
+ * Filter coefficients from PH-2071, scaled by 256 * 256.
+ * Each set of coefficients has a set for low-frequencies and high-frequencies.
+ * n_coef_lf[] and n_coef_hf[] are the number of coefs for simple and more-complex.
+ * It is important for later that n_coef_lf[] is even and n_coef_hf[] is odd.
+ * coef_lf[][] and coef_hf[][] are the coefficients for low-frequencies
+ * and high-frequencies for simple and more-complex mode.
+ */
+static const int8_t n_coef_lf[2] = { 2, 4 };
+static const int32_t coef_lf[2][4] = {{ 32768, 32768, 0, 0},
+ { -1704, 34472, 34472, -1704}};
+static const int8_t n_coef_hf[2] = { 3, 5 };
+static const int32_t coef_hf[2][5] = {{ -4096, 8192, -4096, 0, 0},
+ { 2032, -7602, 11140, -7602, 2032}};
+
+static void deinterlace_plane(AVFilterContext *ctx, AVFrame *out,
+ const AVFrame *cur, const AVFrame *adj,
+ const int filter, const int plane)
+{
+ W3FDIFContext *s = ctx->priv;
+ uint8_t *in_line, *in_lines_cur[5], *in_lines_adj[5];
+ uint8_t *out_line, *out_pixel;
+ int32_t *work_line, *work_pixel;
+ uint8_t *cur_data = cur->data[plane];
+ uint8_t *adj_data = adj->data[plane];
+ uint8_t *dst_data = out->data[plane];
+ const int linesize = s->linesize[plane];
+ const int height = s->planeheight[plane];
+ const int cur_line_stride = cur->linesize[plane];
+ const int adj_line_stride = adj->linesize[plane];
+ const int dst_line_stride = out->linesize[plane];
+ int i, j, y_in, y_out;
+
+ /* copy unchanged the lines of the field */
+ y_out = s->field == cur->top_field_first;
+
+ in_line = cur_data + (y_out * cur_line_stride);
+ out_line = dst_data + (y_out * dst_line_stride);
+
+ while (y_out < height) {
+ memcpy(out_line, in_line, linesize);
+ y_out += 2;
+ in_line += cur_line_stride * 2;
+ out_line += dst_line_stride * 2;
+ }
+
+ /* interpolate other lines of the field */
+ y_out = s->field != cur->top_field_first;
+
+ out_line = dst_data + (y_out * dst_line_stride);
+
+ while (y_out < height) {
+ /* clear workspace */
+ memset(s->work_line, 0, sizeof(*s->work_line) * linesize);
+
+ /* get low vertical frequencies from current field */
+ for (j = 0; j < n_coef_lf[filter]; j++) {
+ y_in = (y_out + 1) + (j * 2) - n_coef_lf[filter];
+
+ while (y_in < 0)
+ y_in += 2;
+ while (y_in >= height)
+ y_in -= 2;
+
+ in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
+ }
+
+ work_line = s->work_line;
+ switch (n_coef_lf[filter]) {
+ case 2:
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef_lf[filter][0];
+ *work_line++ += *in_lines_cur[1]++ * coef_lf[filter][1];
+ }
+ break;
+ case 4:
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef_lf[filter][0];
+ *work_line += *in_lines_cur[1]++ * coef_lf[filter][1];
+ *work_line += *in_lines_cur[2]++ * coef_lf[filter][2];
+ *work_line++ += *in_lines_cur[3]++ * coef_lf[filter][3];
+ }
+ }
+
+ /* get high vertical frequencies from adjacent fields */
+ for (j = 0; j < n_coef_hf[filter]; j++) {
+ y_in = (y_out + 1) + (j * 2) - n_coef_hf[filter];
+
+ while (y_in < 0)
+ y_in += 2;
+ while (y_in >= height)
+ y_in -= 2;
+
+ in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
+ in_lines_adj[j] = adj_data + (y_in * adj_line_stride);
+ }
+
+ work_line = s->work_line;
+ switch (n_coef_hf[filter]) {
+ case 3:
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef_hf[filter][0];
+ *work_line += *in_lines_adj[0]++ * coef_hf[filter][0];
+ *work_line += *in_lines_cur[1]++ * coef_hf[filter][1];
+ *work_line += *in_lines_adj[1]++ * coef_hf[filter][1];
+ *work_line += *in_lines_cur[2]++ * coef_hf[filter][2];
+ *work_line++ += *in_lines_adj[2]++ * coef_hf[filter][2];
+ }
+ break;
+ case 5:
+ for (i = 0; i < linesize; i++) {
+ *work_line += *in_lines_cur[0]++ * coef_hf[filter][0];
+ *work_line += *in_lines_adj[0]++ * coef_hf[filter][0];
+ *work_line += *in_lines_cur[1]++ * coef_hf[filter][1];
+ *work_line += *in_lines_adj[1]++ * coef_hf[filter][1];
+ *work_line += *in_lines_cur[2]++ * coef_hf[filter][2];
+ *work_line += *in_lines_adj[2]++ * coef_hf[filter][2];
+ *work_line += *in_lines_cur[3]++ * coef_hf[filter][3];
+ *work_line += *in_lines_adj[3]++ * coef_hf[filter][3];
+ *work_line += *in_lines_cur[4]++ * coef_hf[filter][4];
+ *work_line++ += *in_lines_adj[4]++ * coef_hf[filter][4];
+ }
+ }
+
+ /* save scaled result to the output frame, scaling down by 256 * 256 */
+ work_pixel = s->work_line;
+ out_pixel = out_line;
+
+ for (j = 0; j < linesize; j++, out_pixel++, work_pixel++)
+ *out_pixel = av_clip(*work_pixel, 0, 255 * 256 * 256) >> 16;
+
+ /* move on to next line */
+ y_out += 2;
+ out_line += dst_line_stride * 2;
+ }
+}
+
+static int filter(AVFilterContext *ctx, int is_second)
+{
+ W3FDIFContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out, *adj;
+ int plane;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out, s->cur);
+ out->interlaced_frame = 0;
+
+ if (!is_second) {
+ if (out->pts != AV_NOPTS_VALUE)
+ out->pts *= 2;
+ } else {
+ int64_t cur_pts = s->cur->pts;
+ int64_t next_pts = s->next->pts;
+
+ if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
+ out->pts = cur_pts + next_pts;
+ } else {
+ out->pts = AV_NOPTS_VALUE;
+ }
+ }
+
+ adj = s->field ? s->next : s->prev;
+ for (plane = 0; plane < s->nb_planes; plane++)
+ deinterlace_plane(ctx, out, s->cur, adj, s->filter, plane);
+
+ s->field = !s->field;
+
+ return ff_filter_frame(outlink, out);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ W3FDIFContext *s = ctx->priv;
+ int ret;
+
+ av_frame_free(&s->prev);
+ s->prev = s->cur;
+ s->cur = s->next;
+ s->next = frame;
+
+ if (!s->cur) {
+ s->cur = av_frame_clone(s->next);
+ if (!s->cur)
+ return AVERROR(ENOMEM);
+ }
+
+ if ((s->deint && !s->cur->interlaced_frame) || ctx->is_disabled) {
+ AVFrame *out = av_frame_clone(s->cur);
+ if (!out)
+ return AVERROR(ENOMEM);
+
+ av_frame_free(&s->prev);
+ if (out->pts != AV_NOPTS_VALUE)
+ out->pts *= 2;
+ return ff_filter_frame(ctx->outputs[0], out);
+ }
+
+ if (!s->prev)
+ return 0;
+
+ ret = filter(ctx, 0);
+ if (ret < 0)
+ return ret;
+
+ return filter(ctx, 1);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ W3FDIFContext *s = ctx->priv;
+
+ do {
+ int ret;
+
+ if (s->eof)
+ return AVERROR_EOF;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->cur) {
+ AVFrame *next = av_frame_clone(s->next);
+ if (!next)
+ return AVERROR(ENOMEM);
+ next->pts = s->next->pts * 2 - s->cur->pts;
+ filter_frame(ctx->inputs[0], next);
+ s->eof = 1;
+ } else if (ret < 0) {
+ return ret;
+ }
+ } while (!s->cur);
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ W3FDIFContext *s = ctx->priv;
+
+ av_frame_free(&s->prev);
+ av_frame_free(&s->cur );
+ av_frame_free(&s->next);
+ av_freep(&s->work_line);
+}
+
+static const AVFilterPad w3fdif_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad w3fdif_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ .request_frame = request_frame,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_w3fdif = {
+ .name = "w3fdif",
+ .description = NULL_IF_CONFIG_SMALL("Apply Martin Weston three field deinterlace."),
+ .priv_size = sizeof(W3FDIFContext),
+ .priv_class = &w3fdif_class,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = w3fdif_inputs,
+ .outputs = w3fdif_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
diff --git a/libavfilter/vf_xbr.c b/libavfilter/vf_xbr.c
new file mode 100644
index 0000000..90a4c6b
--- /dev/null
+++ b/libavfilter/vf_xbr.c
@@ -0,0 +1,759 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * Copyright (C) 2011, 2012 Hyllian/Jararaca - sergiogdb@gmail.com
+ *
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * XBR Filter is used for depixelization of image.
+ * This is based on Hyllian's xBR shader.
+ *
+ * @see http://www.libretro.com/forums/viewtopic.php?f=6&t=134
+ * @see https://github.com/yoyofr/iFBA/blob/master/fba_src/src/intf/video/scalers/xbr.cpp
+ *
+ * @todo add threading and FATE test
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+#define RGB_MASK 0x00FFFFFF
+#define LB_MASK 0x00FEFEFE
+#define RED_BLUE_MASK 0x00FF00FF
+#define GREEN_MASK 0x0000FF00
+
+typedef struct {
+ const AVClass *class;
+ int n;
+ uint32_t rgbtoyuv[1<<24];
+} XBRContext;
+
+#define OFFSET(x) offsetof(XBRContext, x)
+static const AVOption xbr_options[] = {
+ { "n", "set scale factor", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 3}, 2, 4, },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(xbr);
+
+static uint32_t df(uint32_t x, uint32_t y, const uint32_t *r2y)
+{
+#define YMASK 0xff0000
+#define UMASK 0x00ff00
+#define VMASK 0x0000ff
+
+ uint32_t yuv1 = r2y[x & 0xffffff];
+ uint32_t yuv2 = r2y[y & 0xffffff];
+
+ return (abs((yuv1 & YMASK) - (yuv2 & YMASK)) >> 16) +
+ (abs((yuv1 & UMASK) - (yuv2 & UMASK)) >> 8) +
+ abs((yuv1 & VMASK) - (yuv2 & VMASK));
+}
+
+#define ALPHA_BLEND_128_W(dst, src) dst = ((src & LB_MASK) >> 1) + ((dst & LB_MASK) >> 1)
+
+#define ALPHA_BLEND_32_W(dst, src) \
+ dst = ((RED_BLUE_MASK & ((dst & RED_BLUE_MASK) + ((((src & RED_BLUE_MASK) - \
+ (dst & RED_BLUE_MASK))) >>3))) | (GREEN_MASK & ((dst & GREEN_MASK) + \
+ ((((src & GREEN_MASK) - (dst & GREEN_MASK))) >>3))))
+
+#define ALPHA_BLEND_64_W(dst, src) \
+ dst = ((RED_BLUE_MASK & ((dst & RED_BLUE_MASK) + ((((src & RED_BLUE_MASK) - \
+ (dst & RED_BLUE_MASK))) >>2))) | (GREEN_MASK & ((dst & GREEN_MASK) + \
+ ((((src & GREEN_MASK) - (dst & GREEN_MASK))) >>2))))
+
+#define ALPHA_BLEND_192_W(dst, src) \
+ dst = ((RED_BLUE_MASK & ((dst & RED_BLUE_MASK) + ((((src & RED_BLUE_MASK) - \
+ (dst & RED_BLUE_MASK)) * 3) >>2))) | (GREEN_MASK & ((dst & GREEN_MASK) + \
+ ((((src & GREEN_MASK) - (dst & GREEN_MASK)) * 3) >>2))))
+
+#define ALPHA_BLEND_224_W(dst, src) \
+ dst = ((RED_BLUE_MASK & ((dst & RED_BLUE_MASK) + ((((src & RED_BLUE_MASK) - \
+ (dst & RED_BLUE_MASK)) * 7) >>3))) | (GREEN_MASK & ((dst & GREEN_MASK) + \
+ ((((src & GREEN_MASK) - (dst & GREEN_MASK)) * 7) >>3))))
+
+#define LEFT_UP_2_2X(N3, N2, N1, PIXEL)\
+ ALPHA_BLEND_224_W(E[N3], PIXEL); \
+ ALPHA_BLEND_64_W( E[N2], PIXEL); \
+ E[N1] = E[N2]; \
+
+#define LEFT_2_2X(N3, N2, PIXEL)\
+ ALPHA_BLEND_192_W(E[N3], PIXEL); \
+ ALPHA_BLEND_64_W( E[N2], PIXEL); \
+
+#define UP_2_2X(N3, N1, PIXEL)\
+ ALPHA_BLEND_192_W(E[N3], PIXEL); \
+ ALPHA_BLEND_64_W( E[N1], PIXEL); \
+
+#define DIA_2X(N3, PIXEL)\
+ ALPHA_BLEND_128_W(E[N3], PIXEL); \
+
+#define eq(A, B, r2y)\
+ (df(A, B, r2y) < 155)\
+
+#define FILTRO(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, N0, N1, N2, N3,r2y) \
+ ex = (PE!=PH && PE!=PF); \
+ if ( ex )\
+ {\
+ e = (df(PE,PC,r2y)+df(PE,PG,r2y)+df(PI,H5,r2y)+df(PI,F4,r2y))+(df(PH,PF,r2y)<<2); \
+ i = (df(PH,PD,r2y)+df(PH,I5,r2y)+df(PF,I4,r2y)+df(PF,PB,r2y))+(df(PE,PI,r2y)<<2); \
+ if ((e<i) && ( !eq(PF,PB,r2y) && !eq(PH,PD,r2y) || eq(PE,PI,r2y) && (!eq(PF,I4,r2y) && !eq(PH,I5,r2y)) || eq(PE,PG,r2y) || eq(PE,PC,r2y)) )\
+ {\
+ ke=df(PF,PG,r2y); ki=df(PH,PC,r2y); \
+ ex2 = (PE!=PC && PB!=PC); ex3 = (PE!=PG && PD!=PG); px = (df(PE,PF,r2y) <= df(PE,PH,r2y)) ? PF : PH; \
+ if ( ((ke<<1)<=ki) && ex3 && (ke>=(ki<<1)) && ex2 ) \
+ {\
+ LEFT_UP_2_2X(N3, N2, N1, px)\
+ }\
+ else if ( ((ke<<1)<=ki) && ex3 ) \
+ {\
+ LEFT_2_2X(N3, N2, px);\
+ }\
+ else if ( (ke>=(ki<<1)) && ex2 ) \
+ {\
+ UP_2_2X(N3, N1, px);\
+ }\
+ else \
+ {\
+ DIA_2X(N3, px);\
+ }\
+ }\
+ else if (e<=i)\
+ {\
+ ALPHA_BLEND_128_W( E[N3], ((df(PE,PF,r2y) <= df(PE,PH,r2y)) ? PF : PH)); \
+ }\
+ }\
+
+static void xbr2x(AVFrame * input, AVFrame * output, const uint32_t * r2y)
+{
+ unsigned int e, i,px;
+ unsigned int ex, ex2, ex3;
+ unsigned int ke, ki;
+ int x,y;
+
+ int next_line = output->linesize[0]>>2;
+
+ for (y = 0; y < input->height; y++) {
+
+ uint32_t pprev;
+ uint32_t pprev2;
+
+ uint32_t * E = (uint32_t *)(output->data[0] + y * output->linesize[0] * 2);
+
+ /* middle. Offset of -8 is given */
+ uint32_t * sa2 = (uint32_t *)(input->data[0] + y * input->linesize[0] - 8);
+ /* up one */
+ uint32_t * sa1 = sa2 - (input->linesize[0]>>2);
+ /* up two */
+ uint32_t * sa0 = sa1 - (input->linesize[0]>>2);
+ /* down one */
+ uint32_t * sa3 = sa2 + (input->linesize[0]>>2);
+ /* down two */
+ uint32_t * sa4 = sa3 + (input->linesize[0]>>2);
+
+ if (y <= 1) {
+ sa0 = sa1;
+ if (y == 0) {
+ sa0 = sa1 = sa2;
+ }
+ }
+
+ if (y >= input->height - 2) {
+ sa4 = sa3;
+ if (y == input->height - 1) {
+ sa4 = sa3 = sa2;
+ }
+ }
+
+ pprev = pprev2 = 2;
+
+ for (x = 0; x < input->width; x++) {
+ uint32_t B1 = sa0[2];
+ uint32_t PB = sa1[2];
+ uint32_t PE = sa2[2];
+ uint32_t PH = sa3[2];
+ uint32_t H5 = sa4[2];
+
+ uint32_t A1 = sa0[pprev];
+ uint32_t PA = sa1[pprev];
+ uint32_t PD = sa2[pprev];
+ uint32_t PG = sa3[pprev];
+ uint32_t G5 = sa4[pprev];
+
+ uint32_t A0 = sa1[pprev2];
+ uint32_t D0 = sa2[pprev2];
+ uint32_t G0 = sa3[pprev2];
+
+ uint32_t C1 = 0;
+ uint32_t PC = 0;
+ uint32_t PF = 0;
+ uint32_t PI = 0;
+ uint32_t I5 = 0;
+
+ uint32_t C4 = 0;
+ uint32_t F4 = 0;
+ uint32_t I4 = 0;
+
+ if (x >= input->width - 2) {
+ if (x == input->width - 1) {
+ C1 = sa0[2];
+ PC = sa1[2];
+ PF = sa2[2];
+ PI = sa3[2];
+ I5 = sa4[2];
+
+ C4 = sa1[2];
+ F4 = sa2[2];
+ I4 = sa3[2];
+ } else {
+ C1 = sa0[3];
+ PC = sa1[3];
+ PF = sa2[3];
+ PI = sa3[3];
+ I5 = sa4[3];
+
+ C4 = sa1[3];
+ F4 = sa2[3];
+ I4 = sa3[3];
+ }
+ } else {
+ C1 = sa0[3];
+ PC = sa1[3];
+ PF = sa2[3];
+ PI = sa3[3];
+ I5 = sa4[3];
+
+ C4 = sa1[4];
+ F4 = sa2[4];
+ I4 = sa3[4];
+ }
+
+ E[0] = E[1] = E[next_line] = E[next_line + 1] = PE; // 0, 1, 2, 3
+
+ FILTRO(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, 0, 1, next_line, next_line+1,r2y);
+ FILTRO(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, next_line, 0, next_line+1, 1,r2y);
+ FILTRO(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, next_line+1, next_line, 1, 0,r2y);
+ FILTRO(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, 1, next_line+1, 0, next_line,r2y);
+
+ sa0 += 1;
+ sa1 += 1;
+ sa2 += 1;
+ sa3 += 1;
+ sa4 += 1;
+
+ E += 2;
+
+ if (pprev2){
+ pprev2--;
+ pprev = 1;
+ }
+ }
+ }
+}
+#undef FILTRO
+
+#define LEFT_UP_2_3X(N7, N5, N6, N2, N8, PIXEL)\
+ ALPHA_BLEND_192_W(E[N7], PIXEL); \
+ ALPHA_BLEND_64_W( E[N6], PIXEL); \
+ E[N5] = E[N7]; \
+ E[N2] = E[N6]; \
+ E[N8] = PIXEL;\
+
+#define LEFT_2_3X(N7, N5, N6, N8, PIXEL)\
+ ALPHA_BLEND_192_W(E[N7], PIXEL); \
+ ALPHA_BLEND_64_W( E[N5], PIXEL); \
+ ALPHA_BLEND_64_W( E[N6], PIXEL); \
+ E[N8] = PIXEL;\
+
+#define UP_2_3X(N5, N7, N2, N8, PIXEL)\
+ ALPHA_BLEND_192_W(E[N5], PIXEL); \
+ ALPHA_BLEND_64_W( E[N7], PIXEL); \
+ ALPHA_BLEND_64_W( E[N2], PIXEL); \
+ E[N8] = PIXEL;\
+
+#define DIA_3X(N8, N5, N7, PIXEL)\
+ ALPHA_BLEND_224_W(E[N8], PIXEL); \
+ ALPHA_BLEND_32_W(E[N5], PIXEL); \
+ ALPHA_BLEND_32_W(E[N7], PIXEL); \
+
+#define FILTRO(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, N0, N1, N2, N3, N4, N5, N6, N7, N8,r2y) \
+ ex = (PE!=PH && PE!=PF); \
+ if ( ex )\
+ {\
+ e = (df(PE,PC,r2y)+df(PE,PG,r2y)+df(PI,H5,r2y)+df(PI,F4,r2y))+(df(PH,PF,r2y)<<2); \
+ i = (df(PH,PD,r2y)+df(PH,I5,r2y)+df(PF,I4,r2y)+df(PF,PB,r2y))+(df(PE,PI,r2y)<<2); \
+ if ((e<i) && ( !eq(PF,PB,r2y) && !eq(PF,PC,r2y) || !eq(PH,PD,r2y) && !eq(PH,PG,r2y) || eq(PE,PI,r2y) && (!eq(PF,F4,r2y) && !eq(PF,I4,r2y) || !eq(PH,H5,r2y) && !eq(PH,I5,r2y)) || eq(PE,PG,r2y) || eq(PE,PC,r2y)) )\
+ {\
+ ke=df(PF,PG,r2y); ki=df(PH,PC,r2y); \
+ ex2 = (PE!=PC && PB!=PC); ex3 = (PE!=PG && PD!=PG); px = (df(PE,PF,r2y) <= df(PE,PH,r2y)) ? PF : PH; \
+ if ( ((ke<<1)<=ki) && ex3 && (ke>=(ki<<1)) && ex2 ) \
+ {\
+ LEFT_UP_2_3X(N7, N5, N6, N2, N8, px)\
+ }\
+ else if ( ((ke<<1)<=ki) && ex3 ) \
+ {\
+ LEFT_2_3X(N7, N5, N6, N8, px);\
+ }\
+ else if ( (ke>=(ki<<1)) && ex2 ) \
+ {\
+ UP_2_3X(N5, N7, N2, N8, px);\
+ }\
+ else \
+ {\
+ DIA_3X(N8, N5, N7, px);\
+ }\
+ }\
+ else if (e<=i)\
+ {\
+ ALPHA_BLEND_128_W( E[N8], ((df(PE,PF,r2y) <= df(PE,PH,r2y)) ? PF : PH)); \
+ }\
+ }\
+
+static void xbr3x(AVFrame *input, AVFrame *output, const uint32_t *r2y)
+{
+ const int nl = output->linesize[0]>>2;
+ const int nl1 = nl + nl;
+
+ unsigned int e, i,px;
+ unsigned int ex, ex2, ex3;
+ unsigned int ke, ki;
+
+ uint32_t pprev;
+ uint32_t pprev2;
+
+ int x,y;
+
+ for (y = 0; y < input->height; y++) {
+
+ uint32_t * E = (uint32_t *)(output->data[0] + y * output->linesize[0] * 3);
+
+ /* middle. Offset of -8 is given */
+ uint32_t * sa2 = (uint32_t *)(input->data[0] + y * input->linesize[0] - 8);
+ /* up one */
+ uint32_t * sa1 = sa2 - (input->linesize[0]>>2);
+ /* up two */
+ uint32_t * sa0 = sa1 - (input->linesize[0]>>2);
+ /* down one */
+ uint32_t * sa3 = sa2 + (input->linesize[0]>>2);
+ /* down two */
+ uint32_t * sa4 = sa3 + (input->linesize[0]>>2);
+
+ if (y <= 1){
+ sa0 = sa1;
+ if (y == 0){
+ sa0 = sa1 = sa2;
+ }
+ }
+
+ if (y >= input->height - 2){
+ sa4 = sa3;
+ if (y == input->height - 1){
+ sa4 = sa3 = sa2;
+ }
+ }
+
+ pprev = pprev2 = 2;
+
+ for (x = 0; x < input->width; x++){
+ uint32_t B1 = sa0[2];
+ uint32_t PB = sa1[2];
+ uint32_t PE = sa2[2];
+ uint32_t PH = sa3[2];
+ uint32_t H5 = sa4[2];
+
+ uint32_t A1 = sa0[pprev];
+ uint32_t PA = sa1[pprev];
+ uint32_t PD = sa2[pprev];
+ uint32_t PG = sa3[pprev];
+ uint32_t G5 = sa4[pprev];
+
+ uint32_t A0 = sa1[pprev2];
+ uint32_t D0 = sa2[pprev2];
+ uint32_t G0 = sa3[pprev2];
+
+ uint32_t C1 = 0;
+ uint32_t PC = 0;
+ uint32_t PF = 0;
+ uint32_t PI = 0;
+ uint32_t I5 = 0;
+
+ uint32_t C4 = 0;
+ uint32_t F4 = 0;
+ uint32_t I4 = 0;
+
+ if (x >= input->width - 2){
+ if (x == input->width - 1){
+ C1 = sa0[2];
+ PC = sa1[2];
+ PF = sa2[2];
+ PI = sa3[2];
+ I5 = sa4[2];
+
+ C4 = sa1[2];
+ F4 = sa2[2];
+ I4 = sa3[2];
+ } else {
+ C1 = sa0[3];
+ PC = sa1[3];
+ PF = sa2[3];
+ PI = sa3[3];
+ I5 = sa4[3];
+
+ C4 = sa1[3];
+ F4 = sa2[3];
+ I4 = sa3[3];
+ }
+ } else {
+ C1 = sa0[3];
+ PC = sa1[3];
+ PF = sa2[3];
+ PI = sa3[3];
+ I5 = sa4[3];
+
+ C4 = sa1[4];
+ F4 = sa2[4];
+ I4 = sa3[4];
+ }
+
+ E[0] = E[1] = E[2] = PE;
+ E[nl] = E[nl+1] = E[nl+2] = PE; // 3, 4, 5
+ E[nl1] = E[nl1+1] = E[nl1+2] = PE; // 6, 7, 8
+
+ FILTRO(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, 0, 1, 2, nl, nl+1, nl+2, nl1, nl1+1, nl1+2,r2y);
+ FILTRO(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, nl1, nl, 0, nl1+1, nl+1, 1, nl1+2, nl+2, 2,r2y);
+ FILTRO(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, nl1+2, nl1+1, nl1, nl+2, nl+1, nl, 2, 1, 0,r2y);
+ FILTRO(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, 2, nl+2, nl1+2, 1, nl+1, nl1+1, 0, nl, nl1,r2y);
+
+ sa0 += 1;
+ sa1 += 1;
+ sa2 += 1;
+ sa3 += 1;
+ sa4 += 1;
+
+ E += 3;
+
+ if (pprev2){
+ pprev2--;
+ pprev = 1;
+ }
+ }
+ }
+}
+#undef FILTRO
+
+#define LEFT_UP_2(N15, N14, N11, N13, N12, N10, N7, N3, PIXEL)\
+ ALPHA_BLEND_192_W(E[N13], PIXEL); \
+ ALPHA_BLEND_64_W( E[N12], PIXEL); \
+ E[N15] = E[N14] = E[N11] = PIXEL; \
+ E[N10] = E[N3] = E[N12]; \
+ E[N7] = E[N13]; \
+
+#define LEFT_2(N15, N14, N11, N13, N12, N10, PIXEL)\
+ ALPHA_BLEND_192_W(E[N11], PIXEL); \
+ ALPHA_BLEND_192_W(E[N13], PIXEL); \
+ ALPHA_BLEND_64_W( E[N10], PIXEL); \
+ ALPHA_BLEND_64_W( E[N12], PIXEL); \
+ E[N14] = PIXEL; \
+ E[N15] = PIXEL; \
+
+#define UP_2(N15, N14, N11, N3, N7, N10, PIXEL)\
+ ALPHA_BLEND_192_W(E[N14], PIXEL); \
+ ALPHA_BLEND_192_W(E[N7 ], PIXEL); \
+ ALPHA_BLEND_64_W( E[N10], PIXEL); \
+ ALPHA_BLEND_64_W( E[N3 ], PIXEL); \
+ E[N11] = PIXEL; \
+ E[N15] = PIXEL; \
+
+#define DIA(N15, N14, N11, PIXEL)\
+ ALPHA_BLEND_128_W(E[N11], PIXEL); \
+ ALPHA_BLEND_128_W(E[N14], PIXEL); \
+ E[N15] = PIXEL; \
+
+#define FILTRO(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, N15, N14, N11, N3, N7, N10, N13, N12, N9, N6, N2, N1, N5, N8, N4, N0,r2y) \
+ ex = (PE!=PH && PE!=PF); \
+ if ( ex )\
+ {\
+ e = (df(PE,PC,r2y)+df(PE,PG,r2y)+df(PI,H5,r2y)+df(PI,F4,r2y))+(df(PH,PF,r2y)<<2); \
+ i = (df(PH,PD,r2y)+df(PH,I5,r2y)+df(PF,I4,r2y)+df(PF,PB,r2y))+(df(PE,PI,r2y)<<2); \
+ if ((e<i) && ( !eq(PF,PB,r2y) && !eq(PH,PD,r2y) || eq(PE,PI,r2y) && (!eq(PF,I4,r2y) && !eq(PH,I5,r2y)) || eq(PE,PG,r2y) || eq(PE,PC,r2y)) )\
+ {\
+ ke=df(PF,PG,r2y); ki=df(PH,PC,r2y); \
+ ex2 = (PE!=PC && PB!=PC); ex3 = (PE!=PG && PD!=PG); px = (df(PE,PF,r2y) <= df(PE,PH,r2y)) ? PF : PH; \
+ if ( ((ke<<1)<=ki) && ex3 && (ke>=(ki<<1)) && ex2 ) \
+ {\
+ LEFT_UP_2(N15, N14, N11, N13, N12, N10, N7, N3, px)\
+ }\
+ else if ( ((ke<<1)<=ki) && ex3 ) \
+ {\
+ LEFT_2(N15, N14, N11, N13, N12, N10, px)\
+ }\
+ else if ( (ke>=(ki<<1)) && ex2 ) \
+ {\
+ UP_2(N15, N14, N11, N3, N7, N10, px)\
+ }\
+ else \
+ {\
+ DIA(N15, N14, N11, px)\
+ }\
+ }\
+ else if (e<=i)\
+ {\
+ ALPHA_BLEND_128_W( E[N15], ((df(PE,PF,r2y) <= df(PE,PH,r2y)) ? PF : PH)); \
+ }\
+ }\
+
+static void xbr4x(AVFrame *input, AVFrame *output, const uint32_t *r2y)
+{
+
+ const int nl = output->linesize[0]>>2;
+ const int nl1 = nl + nl;
+ const int nl2 = nl1 + nl;
+
+ unsigned int e, i, px;
+ unsigned int ex, ex2, ex3;
+ unsigned int ke, ki;
+
+ uint32_t pprev;
+ uint32_t pprev2;
+
+ int x, y;
+
+ for (y = 0; y < input->height; y++) {
+
+ uint32_t * E = (uint32_t *)(output->data[0] + y * output->linesize[0] * 4);
+
+ /* middle. Offset of -8 is given */
+ uint32_t * sa2 = (uint32_t *)(input->data[0] + y * input->linesize[0] - 8);
+ /* up one */
+ uint32_t * sa1 = sa2 - (input->linesize[0]>>2);
+ /* up two */
+ uint32_t * sa0 = sa1 - (input->linesize[0]>>2);
+ /* down one */
+ uint32_t * sa3 = sa2 + (input->linesize[0]>>2);
+ /* down two */
+ uint32_t * sa4 = sa3 + (input->linesize[0]>>2);
+
+ if (y <= 1) {
+ sa0 = sa1;
+ if (y == 0) {
+ sa0 = sa1 = sa2;
+ }
+ }
+
+ if (y >= input->height - 2) {
+ sa4 = sa3;
+ if (y == input->height - 1) {
+ sa4 = sa3 = sa2;
+ }
+ }
+
+ pprev = pprev2 = 2;
+
+ for (x = 0; x < input->width; x++) {
+ uint32_t B1 = sa0[2];
+ uint32_t PB = sa1[2];
+ uint32_t PE = sa2[2];
+ uint32_t PH = sa3[2];
+ uint32_t H5 = sa4[2];
+
+ uint32_t A1 = sa0[pprev];
+ uint32_t PA = sa1[pprev];
+ uint32_t PD = sa2[pprev];
+ uint32_t PG = sa3[pprev];
+ uint32_t G5 = sa4[pprev];
+
+ uint32_t A0 = sa1[pprev2];
+ uint32_t D0 = sa2[pprev2];
+ uint32_t G0 = sa3[pprev2];
+
+ uint32_t C1 = 0;
+ uint32_t PC = 0;
+ uint32_t PF = 0;
+ uint32_t PI = 0;
+ uint32_t I5 = 0;
+
+ uint32_t C4 = 0;
+ uint32_t F4 = 0;
+ uint32_t I4 = 0;
+
+ if (x >= input->width - 2) {
+ if (x == input->width - 1) {
+ C1 = sa0[2];
+ PC = sa1[2];
+ PF = sa2[2];
+ PI = sa3[2];
+ I5 = sa4[2];
+
+ C4 = sa1[2];
+ F4 = sa2[2];
+ I4 = sa3[2];
+ } else {
+ C1 = sa0[3];
+ PC = sa1[3];
+ PF = sa2[3];
+ PI = sa3[3];
+ I5 = sa4[3];
+
+ C4 = sa1[3];
+ F4 = sa2[3];
+ I4 = sa3[3];
+ }
+ } else {
+ C1 = sa0[3];
+ PC = sa1[3];
+ PF = sa2[3];
+ PI = sa3[3];
+ I5 = sa4[3];
+
+ C4 = sa1[4];
+ F4 = sa2[4];
+ I4 = sa3[4];
+ }
+
+ E[0] = E[1] = E[2] = E[3] = PE;
+ E[nl] = E[nl+1] = E[nl+2] = E[nl+3] = PE; // 4, 5, 6, 7
+ E[nl1] = E[nl1+1] = E[nl1+2] = E[nl1+3] = PE; // 8, 9, 10, 11
+ E[nl2] = E[nl2+1] = E[nl2+2] = E[nl2+3] = PE; // 12, 13, 14, 15
+
+ FILTRO(PE, PI, PH, PF, PG, PC, PD, PB, PA, G5, C4, G0, D0, C1, B1, F4, I4, H5, I5, A0, A1, nl2+3, nl2+2, nl1+3, 3, nl+3, nl1+2, nl2+1, nl2, nl1+1, nl+2, 2, 1, nl+1, nl1, nl, 0,r2y);
+ FILTRO(PE, PC, PF, PB, PI, PA, PH, PD, PG, I4, A1, I5, H5, A0, D0, B1, C1, F4, C4, G5, G0, 3, nl+3, 2, 0, 1, nl+2, nl1+3, nl2+3, nl1+2, nl+1, nl, nl1, nl1+1,nl2+2,nl2+1,nl2,r2y);
+ FILTRO(PE, PA, PB, PD, PC, PG, PF, PH, PI, C1, G0, C4, F4, G5, H5, D0, A0, B1, A1, I4, I5, 0, 1, nl, nl2, nl1, nl+1, 2, 3, nl+2, nl1+1, nl2+1,nl2+2,nl1+2, nl+3,nl1+3,nl2+3,r2y);
+ FILTRO(PE, PG, PD, PH, PA, PI, PB, PF, PC, A0, I5, A1, B1, I4, F4, H5, G5, D0, G0, C1, C4, nl2, nl1, nl2+1, nl2+3, nl2+2, nl1+1, nl, 0, nl+1, nl1+2, nl1+3, nl+3, nl+2, 1, 2, 3,r2y);
+
+ sa0 += 1;
+ sa1 += 1;
+ sa2 += 1;
+ sa3 += 1;
+ sa4 += 1;
+
+ E += 4;
+
+ if (pprev2){
+ pprev2--;
+ pprev = 1;
+ }
+ }
+ }
+}
+#undef FILTRO
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ XBRContext *xbr = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+
+ outlink->w = inlink->w * xbr->n;
+ outlink->h = inlink->h * xbr->n;
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_0RGB32, AV_PIX_FMT_NONE,
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ XBRContext *xbr = ctx->priv;
+ const uint32_t *r2y = xbr->rgbtoyuv;
+
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+
+ av_frame_copy_props(out, in);
+ if (xbr->n == 4)
+ xbr4x(in, out, r2y);
+ else if (xbr->n == 3)
+ xbr3x(in, out, r2y);
+ else
+ xbr2x(in, out, r2y);
+
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+}
+
+static int init(AVFilterContext *ctx)
+{
+ XBRContext *xbr = ctx->priv;
+ uint32_t c;
+ int bg, rg, g;
+
+ for (bg = -255; bg < 256; bg++) {
+ for (rg = -255; rg < 256; rg++) {
+ const uint32_t u = (uint32_t)((-169*rg + 500*bg)/1000) + 128;
+ const uint32_t v = (uint32_t)(( 500*rg - 81*bg)/1000) + 128;
+ int startg = FFMAX3(-bg, -rg, 0);
+ int endg = FFMIN3(255-bg, 255-rg, 255);
+ uint32_t y = (uint32_t)(( 299*rg + 1000*startg + 114*bg)/1000);
+ c = bg + (rg<<16) + 0x010101 * startg;
+ for (g = startg; g <= endg; g++) {
+ xbr->rgbtoyuv[c] = ((y++) << 16) + (u << 8) + v;
+ c+= 0x010101;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const AVFilterPad xbr_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad xbr_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_xbr = {
+ .name = "xbr",
+ .description = NULL_IF_CONFIG_SMALL("Scale the input using xBR algorithm."),
+ .inputs = xbr_inputs,
+ .outputs = xbr_outputs,
+ .query_formats = query_formats,
+ .priv_size = sizeof(XBRContext),
+ .priv_class = &xbr_class,
+ .init = init,
+};
diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c
index 53c567c..70670c3 100644
--- a/libavfilter/vf_yadif.c
+++ b/libavfilter/vf_yadif.c
@@ -1,37 +1,34 @@
/*
- * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
* 2010 James Darnley <james.darnley@gmail.com>
*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/avassert.h"
#include "libavutil/cpu.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "yadif.h"
-#undef NDEBUG
-#include <assert.h>
-
typedef struct ThreadData {
AVFrame *frame;
int plane;
@@ -69,7 +66,7 @@ typedef struct ThreadData {
CHECK( 1) CHECK( 2) }} }} \
}\
\
- if (mode < 2) { \
+ if (!(mode&2)) { \
int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
@@ -112,6 +109,7 @@ static void filter_line_c(void *dst1,
FILTER(0, w, 1)
}
+#define MAX_ALIGN 8
static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
int w, int prefs, int mrefs, int parity, int mode)
{
@@ -127,13 +125,14 @@ static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
* for is_not_edge should let the compiler ignore the whole branch. */
FILTER(0, 3, 0)
- dst = (uint8_t*)dst1 + w - 3;
- prev = (uint8_t*)prev1 + w - 3;
- cur = (uint8_t*)cur1 + w - 3;
- next = (uint8_t*)next1 + w - 3;
+ dst = (uint8_t*)dst1 + w - (MAX_ALIGN-1);
+ prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1);
+ cur = (uint8_t*)cur1 + w - (MAX_ALIGN-1);
+ next = (uint8_t*)next1 + w - (MAX_ALIGN-1);
prev2 = (uint8_t*)(parity ? prev : cur);
next2 = (uint8_t*)(parity ? cur : next);
+ FILTER(w - (MAX_ALIGN-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
@@ -171,13 +170,14 @@ static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
FILTER(0, 3, 0)
- dst = (uint16_t*)dst1 + w - 3;
- prev = (uint16_t*)prev1 + w - 3;
- cur = (uint16_t*)cur1 + w - 3;
- next = (uint16_t*)next1 + w - 3;
+ dst = (uint16_t*)dst1 + w - (MAX_ALIGN/2-1);
+ prev = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1);
+ cur = (uint16_t*)cur1 + w - (MAX_ALIGN/2-1);
+ next = (uint16_t*)next1 + w - (MAX_ALIGN/2-1);
prev2 = (uint16_t*)(parity ? prev : cur);
next2 = (uint16_t*)(parity ? cur : next);
+ FILTER(w - (MAX_ALIGN/2-1), w - 3, 1)
FILTER(w - 3, w, 0)
}
@@ -188,9 +188,8 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int refs = s->cur->linesize[td->plane];
int df = (s->csp->comp[td->plane].depth_minus1 + 8) / 8;
int pix_3 = 3 * df;
- int slice_h = td->h / nb_jobs;
- int slice_start = jobnr * slice_h;
- int slice_end = (jobnr == nb_jobs - 1) ? td->h : (jobnr + 1) * slice_h;
+ int slice_start = (td->h * jobnr ) / nb_jobs;
+ int slice_end = (td->h * (jobnr+1)) / nb_jobs;
int y;
/* filtering reads 3 pixels to the left/right; to avoid invalid reads,
@@ -204,7 +203,7 @@ static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
int mode = y == 1 || y + 2 == td->h ? 2 : s->mode;
s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
- next + pix_3, td->w - 6,
+ next + pix_3, td->w - (3 + MAX_ALIGN/df-1),
y + 1 < td->h ? refs : -refs,
y ? -refs : refs,
td->parity ^ td->tff, mode);
@@ -232,8 +231,8 @@ static void filter(AVFilterContext *ctx, AVFrame *dstpic,
int h = dstpic->height;
if (i == 1 || i == 2) {
- w >>= yadif->csp->log2_chroma_w;
- h >>= yadif->csp->log2_chroma_h;
+ w = FF_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
+ h = FF_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
}
@@ -247,24 +246,6 @@ static void filter(AVFilterContext *ctx, AVFrame *dstpic,
emms_c();
}
-static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
-{
- AVFrame *frame;
- int width = FFALIGN(w, 32);
- int height = FFALIGN(h + 2, 32);
- int i;
-
- frame = ff_default_get_video_buffer(link, width, height);
-
- frame->width = w;
- frame->height = h;
-
- for (i = 0; i < 3; i++)
- frame->data[i] += frame->linesize[i];
-
- return frame;
-}
-
static int return_frame(AVFilterContext *ctx, int is_second)
{
YADIFContext *yadif = ctx->priv;
@@ -305,11 +286,36 @@ static int return_frame(AVFilterContext *ctx, int is_second)
return ret;
}
+static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
+{
+ int i;
+ for (i = 0; i < yadif->csp->nb_components; i++)
+ if (a->linesize[i] != b->linesize[i])
+ return 1;
+ return 0;
+}
+
+static void fixstride(AVFilterLink *link, AVFrame *f)
+{
+ AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
+ if(!dst)
+ return;
+ av_frame_copy_props(dst, f);
+ av_image_copy(dst->data, dst->linesize,
+ (const uint8_t **)f->data, f->linesize,
+ dst->format, dst->width, dst->height);
+ av_frame_unref(f);
+ av_frame_move_ref(f, dst);
+ av_frame_free(&dst);
+}
+
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
YADIFContext *yadif = ctx->priv;
+ av_assert0(frame);
+
if (yadif->frame_pending)
return_frame(ctx, 1);
@@ -319,10 +325,24 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
yadif->cur = yadif->next;
yadif->next = frame;
- if (!yadif->cur)
- return 0;
+ if (!yadif->cur &&
+ !(yadif->cur = av_frame_clone(yadif->next)))
+ return AVERROR(ENOMEM);
- if (yadif->auto_enable && !yadif->cur->interlaced_frame) {
+ if (checkstride(yadif, yadif->next, yadif->cur)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
+ fixstride(link, yadif->next);
+ }
+ if (checkstride(yadif, yadif->next, yadif->cur))
+ fixstride(link, yadif->cur);
+ if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
+ fixstride(link, yadif->prev);
+ if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
+ return -1;
+ }
+
+ if ((yadif->deint && !yadif->cur->interlaced_frame) || ctx->is_disabled) {
yadif->out = av_frame_clone(yadif->cur);
if (!yadif->out)
return AVERROR(ENOMEM);
@@ -333,9 +353,8 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
return ff_filter_frame(ctx->outputs[0], yadif->out);
}
- if (!yadif->prev &&
- !(yadif->prev = av_frame_clone(yadif->cur)))
- return AVERROR(ENOMEM);
+ if (!yadif->prev)
+ return 0;
yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
if (!yadif->out)
@@ -368,7 +387,7 @@ static int request_frame(AVFilterLink *link)
ret = ff_request_frame(link->src->inputs[0]);
- if (ret == AVERROR_EOF && yadif->next) {
+ if (ret == AVERROR_EOF && yadif->cur) {
AVFrame *next = av_frame_clone(yadif->next);
if (!next)
@@ -381,46 +400,18 @@ static int request_frame(AVFilterLink *link)
} else if (ret < 0) {
return ret;
}
- } while (!yadif->cur);
+ } while (!yadif->prev);
return 0;
}
-static int poll_frame(AVFilterLink *link)
-{
- YADIFContext *yadif = link->src->priv;
- int ret, val;
-
- if (yadif->frame_pending)
- return 1;
-
- val = ff_poll_frame(link->src->inputs[0]);
- if (val <= 0)
- return val;
-
- //FIXME change API to not requre this red tape
- if (val == 1 && !yadif->next) {
- if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
- return ret;
- val = ff_poll_frame(link->src->inputs[0]);
- if (val <= 0)
- return val;
- }
- assert(yadif->next || !val);
-
- if (yadif->auto_enable && yadif->next && !yadif->next->interlaced_frame)
- return val;
-
- return val * ((yadif->mode&1)+1);
-}
-
static av_cold void uninit(AVFilterContext *ctx)
{
YADIFContext *yadif = ctx->priv;
- if (yadif->prev) av_frame_free(&yadif->prev);
- if (yadif->cur ) av_frame_free(&yadif->cur );
- if (yadif->next) av_frame_free(&yadif->next);
+ av_frame_free(&yadif->prev);
+ av_frame_free(&yadif->cur );
+ av_frame_free(&yadif->next);
}
static int query_formats(AVFilterContext *ctx)
@@ -435,16 +426,29 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P,
- AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
+ AV_PIX_FMT_GRAY16,
AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVJ440P,
- AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
- AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
- AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
- AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
- AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
- AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
+ AV_PIX_FMT_YUV420P9,
+ AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV422P10,
+ AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV422P12,
+ AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14,
+ AV_PIX_FMT_YUV422P14,
+ AV_PIX_FMT_YUV444P14,
+ AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRAP,
AV_PIX_FMT_NONE
};
@@ -455,6 +459,7 @@ static int query_formats(AVFilterContext *ctx)
static int config_props(AVFilterLink *link)
{
+ AVFilterContext *ctx = link->src;
YADIFContext *s = link->src->priv;
link->time_base.num = link->src->inputs[0]->time_base.num;
@@ -462,6 +467,14 @@ static int config_props(AVFilterLink *link)
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
+ if(s->mode&1)
+ link->frame_rate = av_mul_q(link->src->inputs[0]->frame_rate, (AVRational){2,1});
+
+ if (link->w < 3 || link->h < 3) {
+ av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
+ return AVERROR(EINVAL);
+ }
+
s->csp = av_pix_fmt_desc_get(link->format);
if (s->csp->comp[0].depth_minus1 / 8 == 1) {
s->filter_line = filter_line_c_16bit;
@@ -469,39 +482,46 @@ static int config_props(AVFilterLink *link)
} else {
s->filter_line = filter_line_c;
s->filter_edges = filter_edges;
-
- if (ARCH_X86)
- ff_yadif_init_x86(s);
}
+ if (ARCH_X86)
+ ff_yadif_init_x86(s);
+
return 0;
}
+
#define OFFSET(x) offsetof(YADIFContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "mode", NULL, OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 3, FLAGS },
- { "parity", NULL, OFFSET(parity), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "parity" },
- { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, .unit = "parity" },
- { "tff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .unit = "parity" },
- { "bff", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .unit = "parity" },
- { "auto", NULL, OFFSET(auto_enable), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
- { NULL },
-};
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption yadif_options[] = {
+ { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
+ CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
+ CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
+ CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
+ CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
-static const AVClass yadif_class = {
- .class_name = "yadif",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+ { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
+ CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
+
+ { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
+ CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
+ CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
+
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(yadif);
+
static const AVFilterPad avfilter_vf_yadif_inputs[] = {
{
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .get_video_buffer = get_video_buffer,
- .filter_frame = filter_frame,
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
},
{ NULL }
};
@@ -510,7 +530,6 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .poll_frame = poll_frame,
.request_frame = request_frame,
.config_props = config_props,
},
@@ -519,16 +538,12 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = {
AVFilter ff_vf_yadif = {
.name = "yadif",
- .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
-
+ .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
.priv_size = sizeof(YADIFContext),
.priv_class = &yadif_class,
.uninit = uninit,
.query_formats = query_formats,
-
- .inputs = avfilter_vf_yadif_inputs,
-
- .outputs = avfilter_vf_yadif_outputs,
-
- .flags = AVFILTER_FLAG_SLICE_THREADS,
+ .inputs = avfilter_vf_yadif_inputs,
+ .outputs = avfilter_vf_yadif_outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_zoompan.c b/libavfilter/vf_zoompan.c
new file mode 100644
index 0000000..8f179e6
--- /dev/null
+++ b/libavfilter/vf_zoompan.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/eval.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "libswscale/swscale.h"
+
+static const char *const var_names[] = {
+ "in_w", "iw",
+ "in_h", "ih",
+ "out_w", "ow",
+ "out_h", "oh",
+ "in",
+ "on",
+ "duration",
+ "pduration",
+ "time",
+ "frame",
+ "zoom",
+ "pzoom",
+ "x", "px",
+ "y", "py",
+ "a",
+ "sar",
+ "dar",
+ "hsub",
+ "vsub",
+ NULL
+};
+
+enum var_name {
+ VAR_IN_W, VAR_IW,
+ VAR_IN_H, VAR_IH,
+ VAR_OUT_W, VAR_OW,
+ VAR_OUT_H, VAR_OH,
+ VAR_IN,
+ VAR_ON,
+ VAR_DURATION,
+ VAR_PDURATION,
+ VAR_TIME,
+ VAR_FRAME,
+ VAR_ZOOM,
+ VAR_PZOOM,
+ VAR_X, VAR_PX,
+ VAR_Y, VAR_PY,
+ VAR_A,
+ VAR_SAR,
+ VAR_DAR,
+ VAR_HSUB,
+ VAR_VSUB,
+ VARS_NB
+};
+
+typedef struct ZPcontext {
+ const AVClass *class;
+ char *zoom_expr_str;
+ char *x_expr_str;
+ char *y_expr_str;
+ char *duration_expr_str;
+ int w, h;
+ double x, y;
+ double prev_zoom;
+ int prev_nb_frames;
+ struct SwsContext *sws;
+ int64_t frame_count;
+} ZPContext;
+
+#define OFFSET(x) offsetof(ZPContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption zoompan_options[] = {
+ { "zoom", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
+ { "z", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
+ { "x", "set the x expression", OFFSET(x_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
+ { "y", "set the y expression", OFFSET(y_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
+ { "d", "set the duration expression", OFFSET(duration_expr_str), AV_OPT_TYPE_STRING, {.str="90"}, .flags = FLAGS },
+ { "s", "set the output image size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, .flags = FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(zoompan);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ ZPContext *s = ctx->priv;
+
+ s->prev_zoom = 1;
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ ZPContext *s = ctx->priv;
+
+ outlink->w = s->w;
+ outlink->h = s->h;
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ZPContext *s = ctx->priv;
+ double var_values[VARS_NB], nb_frames, zoom, dx, dy;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(in->format);
+ AVFrame *out;
+ int i, k, x, y, w, h, ret = 0;
+
+ var_values[VAR_IN_W] = var_values[VAR_IW] = in->width;
+ var_values[VAR_IN_H] = var_values[VAR_IH] = in->height;
+ var_values[VAR_OUT_W] = var_values[VAR_OW] = s->w;
+ var_values[VAR_OUT_H] = var_values[VAR_OH] = s->h;
+ var_values[VAR_IN] = inlink->frame_count + 1;
+ var_values[VAR_ON] = outlink->frame_count + 1;
+ var_values[VAR_PX] = s->x;
+ var_values[VAR_PY] = s->y;
+ var_values[VAR_X] = 0;
+ var_values[VAR_Y] = 0;
+ var_values[VAR_PZOOM] = s->prev_zoom;
+ var_values[VAR_ZOOM] = 1;
+ var_values[VAR_PDURATION] = s->prev_nb_frames;
+ var_values[VAR_A] = (double) in->width / in->height;
+ var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
+ (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
+ var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
+ var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
+ var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
+
+ if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail;
+
+ var_values[VAR_DURATION] = nb_frames;
+ for (i = 0; i < nb_frames; i++) {
+ int px[4];
+ int py[4];
+ uint8_t *input[4];
+ int64_t pts = av_rescale_q(in->pts, inlink->time_base,
+ outlink->time_base) + s->frame_count;
+
+ var_values[VAR_TIME] = pts * av_q2d(outlink->time_base);
+ var_values[VAR_FRAME] = i;
+ var_values[VAR_ON] = outlink->frame_count + 1;
+ if ((ret = av_expr_parse_and_eval(&zoom, s->zoom_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail;
+
+ zoom = av_clipd(zoom, 1, 10);
+ var_values[VAR_ZOOM] = zoom;
+ w = in->width * (1.0 / zoom);
+ h = in->height * (1.0 / zoom);
+
+ if ((ret = av_expr_parse_and_eval(&dx, s->x_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail;
+ x = dx = av_clipd(dx, 0, FFMAX(in->width - w, 0));
+ var_values[VAR_X] = dx;
+ x &= ~((1 << desc->log2_chroma_w) - 1);
+
+ if ((ret = av_expr_parse_and_eval(&dy, s->y_expr_str,
+ var_names, var_values,
+ NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
+ goto fail;
+ y = dy = av_clipd(dy, 0, FFMAX(in->height - h, 0));
+ var_values[VAR_Y] = dy;
+ y &= ~((1 << desc->log2_chroma_h) - 1);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ px[1] = px[2] = FF_CEIL_RSHIFT(x, desc->log2_chroma_w);
+ px[0] = px[3] = x;
+
+ py[1] = py[2] = FF_CEIL_RSHIFT(y, desc->log2_chroma_h);
+ py[0] = py[3] = y;
+
+ s->sws = sws_alloc_context();
+ if (!s->sws) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (k = 0; in->data[k]; k++)
+ input[k] = in->data[k] + py[k] * in->linesize[k] + px[k];
+
+ av_opt_set_int(s->sws, "srcw", w, 0);
+ av_opt_set_int(s->sws, "srch", h, 0);
+ av_opt_set_int(s->sws, "src_format", in->format, 0);
+ av_opt_set_int(s->sws, "dstw", outlink->w, 0);
+ av_opt_set_int(s->sws, "dsth", outlink->h, 0);
+ av_opt_set_int(s->sws, "dst_format", outlink->format, 0);
+ av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0);
+
+ if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0)
+ goto fail;
+
+ sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize);
+
+ out->pts = pts;
+ s->frame_count++;
+
+ ret = ff_filter_frame(outlink, out);
+ if (ret < 0)
+ break;
+
+ sws_freeContext(s->sws);
+ s->sws = NULL;
+ }
+
+ s->x = dx;
+ s->y = dy;
+ s->prev_zoom = zoom;
+ s->prev_nb_frames = nb_frames;
+
+fail:
+ sws_freeContext(s->sws);
+ s->sws = NULL;
+ av_frame_free(&in);
+ return ret;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ411P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ ZPContext *s = ctx->priv;
+
+ sws_freeContext(s->sws);
+ s->sws = NULL;
+}
+
+static const AVFilterPad inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_zoompan = {
+ .name = "zoompan",
+ .description = NULL_IF_CONFIG_SMALL("Apply Zoom & Pan effect."),
+ .priv_size = sizeof(ZPContext),
+ .priv_class = &zoompan_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = inputs,
+ .outputs = outputs,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+};
diff --git a/libavfilter/video.c b/libavfilter/video.c
index 9f1103e..6a55483 100644
--- a/libavfilter/video.c
+++ b/libavfilter/video.c
@@ -1,24 +1,29 @@
/*
- * This file is part of Libav.
+ * Copyright 2007 Bobby Bingham
+ * Copyright Stefano Sabatini <stefasab gmail com>
+ * Copyright Vitor Sessak <vitor1001 gmail com>
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
#include <stdio.h>
+#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
#include "libavutil/imgutils.h"
#include "libavutil/mem.h"
@@ -56,7 +61,7 @@ AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h)
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef *
-avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms,
+avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
int w, int h, enum AVPixelFormat format)
{
AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer));
@@ -105,7 +110,7 @@ AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h)
{
AVFrame *ret = NULL;
- FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0);
+ FF_TPRINTF_START(NULL, get_video_buffer); ff_tlog_link(NULL, link, 0);
if (link->dstpad->get_video_buffer)
ret = link->dstpad->get_video_buffer(link, w, h);
diff --git a/libavfilter/video.h b/libavfilter/video.h
index f7e8e34..56c58d6 100644
--- a/libavfilter/video.h
+++ b/libavfilter/video.h
@@ -1,18 +1,20 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2007 Bobby Bingham
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/vidstabutils.c b/libavfilter/vidstabutils.c
new file mode 100644
index 0000000..13544cf
--- /dev/null
+++ b/libavfilter/vidstabutils.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vidstabutils.h"
+
+/** convert AV's pixelformat to vid.stab pixelformat */
+VSPixelFormat ff_av2vs_pixfmt(AVFilterContext *ctx, enum AVPixelFormat pf)
+{
+ switch (pf) {
+ case AV_PIX_FMT_YUV420P: return PF_YUV420P;
+ case AV_PIX_FMT_YUV422P: return PF_YUV422P;
+ case AV_PIX_FMT_YUV444P: return PF_YUV444P;
+ case AV_PIX_FMT_YUV410P: return PF_YUV410P;
+ case AV_PIX_FMT_YUV411P: return PF_YUV411P;
+ case AV_PIX_FMT_YUV440P: return PF_YUV440P;
+ case AV_PIX_FMT_YUVA420P: return PF_YUVA420P;
+ case AV_PIX_FMT_GRAY8: return PF_GRAY8;
+ case AV_PIX_FMT_RGB24: return PF_RGB24;
+ case AV_PIX_FMT_BGR24: return PF_BGR24;
+ case AV_PIX_FMT_RGBA: return PF_RGBA;
+ default:
+ av_log(ctx, AV_LOG_ERROR, "cannot deal with pixel format %i\n", pf);
+ return PF_NONE;
+ }
+}
+
+/** struct to hold a valid context for logging from within vid.stab lib */
+typedef struct {
+ const AVClass *class;
+} VS2AVLogCtx;
+
+/** wrapper to log vs_log into av_log */
+static int vs2av_log(int type, const char *tag, const char *format, ...)
+{
+ va_list ap;
+ VS2AVLogCtx ctx;
+ AVClass class = {
+ .class_name = tag,
+ .item_name = av_default_item_name,
+ .option = 0,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_FILTER,
+ };
+ ctx.class = &class;
+ va_start(ap, format);
+ av_vlog(&ctx, type, format, ap);
+ va_end(ap);
+ return VS_OK;
+}
+
+/** sets the memory allocation function and logging constants to av versions */
+void ff_vs_init(void)
+{
+ vs_malloc = av_malloc;
+ vs_zalloc = av_mallocz;
+ vs_realloc = av_realloc;
+ vs_free = av_free;
+
+ VS_ERROR_TYPE = AV_LOG_ERROR;
+ VS_WARN_TYPE = AV_LOG_WARNING;
+ VS_INFO_TYPE = AV_LOG_INFO;
+ VS_MSG_TYPE = AV_LOG_VERBOSE;
+
+ vs_log = vs2av_log;
+
+ VS_ERROR = 0;
+ VS_OK = 1;
+}
diff --git a/libavfilter/vidstabutils.h b/libavfilter/vidstabutils.h
new file mode 100644
index 0000000..c6d6ced
--- /dev/null
+++ b/libavfilter/vidstabutils.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013 Georg Martius <georg dot martius at web dot de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_VIDSTABUTILS_H
+#define AVFILTER_VIDSTABUTILS_H
+
+#include <vid.stab/libvidstab.h>
+
+#include "avfilter.h"
+
+/* Conversion routines between libav* and vid.stab */
+
+/**
+ * Converts an AVPixelFormat to a VSPixelFormat.
+ *
+ * @param[in] ctx AVFilterContext used for logging
+ * @param[in] pf AVPixelFormat
+ * @return a corresponding VSPixelFormat
+ */
+VSPixelFormat ff_av2vs_pixfmt(AVFilterContext *ctx, enum AVPixelFormat pf);
+
+/**
+ * Initialize libvidstab
+ *
+ * Sets the memory allocation functions and logging constants to corresponding
+ * av* versions.
+ */
+void ff_vs_init(void);
+
+#endif
diff --git a/libavfilter/vsink_nullsink.c b/libavfilter/vsink_nullsink.c
index 14b6b12..281721b 100644
--- a/libavfilter/vsink_nullsink.c
+++ b/libavfilter/vsink_nullsink.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/vsrc_cellauto.c b/libavfilter/vsrc_cellauto.c
new file mode 100644
index 0000000..5e2df2d
--- /dev/null
+++ b/libavfilter/vsrc_cellauto.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) Stefano Sabatini 2011
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * cellular automaton video source, based on Stephen Wolfram "experimentus crucis"
+ */
+
+/* #define DEBUG */
+
+#include "libavutil/file.h"
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/random_seed.h"
+#include "libavutil/avstring.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ char *filename;
+ char *rule_str;
+ uint8_t *file_buf;
+ size_t file_bufsize;
+ uint8_t *buf;
+ int buf_prev_row_idx, buf_row_idx;
+ uint8_t rule;
+ uint64_t pts;
+ AVRational frame_rate;
+ double random_fill_ratio;
+ uint32_t random_seed;
+ int stitch, scroll, start_full;
+ int64_t generation; ///< the generation number, starting from 0
+ AVLFG lfg;
+ char *pattern;
+} CellAutoContext;
+
+#define OFFSET(x) offsetof(CellAutoContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+
+static const AVOption cellauto_options[] = {
+ { "filename", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "f", "read initial pattern from file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "pattern", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "p", "set initial pattern", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "rule", "set rule", OFFSET(rule), AV_OPT_TYPE_INT, {.i64 = 110}, 0, 255, FLAGS },
+ { "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS },
+ { "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl = 1/M_PHI}, 0, 1, FLAGS },
+ { "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64 = -1}, -1, UINT32_MAX, FLAGS },
+ { "scroll", "scroll pattern downward", OFFSET(scroll), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
+ { "start_full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { "full", "start filling the whole video", OFFSET(start_full), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
+ { "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(cellauto);
+
+#ifdef DEBUG
+static void show_cellauto_row(AVFilterContext *ctx)
+{
+ CellAutoContext *cellauto = ctx->priv;
+ int i;
+ uint8_t *row = cellauto->buf + cellauto->w * cellauto->buf_row_idx;
+ char *line = av_malloc(cellauto->w + 1);
+ if (!line)
+ return;
+
+ for (i = 0; i < cellauto->w; i++)
+ line[i] = row[i] ? '@' : ' ';
+ line[i] = 0;
+ av_log(ctx, AV_LOG_DEBUG, "generation:%"PRId64" row:%s|\n", cellauto->generation, line);
+ av_free(line);
+}
+#endif
+
+static int init_pattern_from_string(AVFilterContext *ctx)
+{
+ CellAutoContext *cellauto = ctx->priv;
+ char *p;
+ int i, w = 0;
+
+ w = strlen(cellauto->pattern);
+ av_log(ctx, AV_LOG_DEBUG, "w:%d\n", w);
+
+ if (cellauto->w) {
+ if (w > cellauto->w) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The specified width is %d which cannot contain the provided string width of %d\n",
+ cellauto->w, w);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ /* width was not specified, set it to width of the provided row */
+ cellauto->w = w;
+ cellauto->h = (double)cellauto->w * M_PHI;
+ }
+
+ cellauto->buf = av_mallocz_array(sizeof(uint8_t) * cellauto->w, cellauto->h);
+ if (!cellauto->buf)
+ return AVERROR(ENOMEM);
+
+ /* fill buf */
+ p = cellauto->pattern;
+ for (i = (cellauto->w - w)/2;; i++) {
+ av_log(ctx, AV_LOG_DEBUG, "%d %c\n", i, *p == '\n' ? 'N' : *p);
+ if (*p == '\n' || !*p)
+ break;
+ else
+ cellauto->buf[i] = !!av_isgraph(*(p++));
+ }
+
+ return 0;
+}
+
+static int init_pattern_from_file(AVFilterContext *ctx)
+{
+ CellAutoContext *cellauto = ctx->priv;
+ int ret;
+
+ ret = av_file_map(cellauto->filename,
+ &cellauto->file_buf, &cellauto->file_bufsize, 0, ctx);
+ if (ret < 0)
+ return ret;
+
+ /* create a string based on the read file */
+ cellauto->pattern = av_malloc(cellauto->file_bufsize + 1);
+ if (!cellauto->pattern)
+ return AVERROR(ENOMEM);
+ memcpy(cellauto->pattern, cellauto->file_buf, cellauto->file_bufsize);
+ cellauto->pattern[cellauto->file_bufsize] = 0;
+
+ return init_pattern_from_string(ctx);
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ CellAutoContext *cellauto = ctx->priv;
+ int ret;
+
+ if (!cellauto->w && !cellauto->filename && !cellauto->pattern)
+ av_opt_set(cellauto, "size", "320x518", 0);
+
+ if (cellauto->filename && cellauto->pattern) {
+ av_log(ctx, AV_LOG_ERROR, "Only one of the filename or pattern options can be used\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (cellauto->filename) {
+ if ((ret = init_pattern_from_file(ctx)) < 0)
+ return ret;
+ } else if (cellauto->pattern) {
+ if ((ret = init_pattern_from_string(ctx)) < 0)
+ return ret;
+ } else {
+ /* fill the first row randomly */
+ int i;
+
+ cellauto->buf = av_mallocz_array(sizeof(uint8_t) * cellauto->w, cellauto->h);
+ if (!cellauto->buf)
+ return AVERROR(ENOMEM);
+ if (cellauto->random_seed == -1)
+ cellauto->random_seed = av_get_random_seed();
+
+ av_lfg_init(&cellauto->lfg, cellauto->random_seed);
+
+ for (i = 0; i < cellauto->w; i++) {
+ double r = (double)av_lfg_get(&cellauto->lfg) / UINT32_MAX;
+ if (r <= cellauto->random_fill_ratio)
+ cellauto->buf[i] = 1;
+ }
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%u\n",
+ cellauto->w, cellauto->h, cellauto->frame_rate.num, cellauto->frame_rate.den,
+ cellauto->rule, cellauto->stitch, cellauto->scroll, cellauto->start_full,
+ cellauto->random_seed);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CellAutoContext *cellauto = ctx->priv;
+
+ av_file_unmap(cellauto->file_buf, cellauto->file_bufsize);
+ av_freep(&cellauto->buf);
+ av_freep(&cellauto->pattern);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ CellAutoContext *cellauto = outlink->src->priv;
+
+ outlink->w = cellauto->w;
+ outlink->h = cellauto->h;
+ outlink->time_base = av_inv_q(cellauto->frame_rate);
+
+ return 0;
+}
+
+static void evolve(AVFilterContext *ctx)
+{
+ CellAutoContext *cellauto = ctx->priv;
+ int i, v, pos[3];
+ uint8_t *row, *prev_row = cellauto->buf + cellauto->buf_row_idx * cellauto->w;
+ enum { NW, N, NE };
+
+ cellauto->buf_prev_row_idx = cellauto->buf_row_idx;
+ cellauto->buf_row_idx = cellauto->buf_row_idx == cellauto->h-1 ? 0 : cellauto->buf_row_idx+1;
+ row = cellauto->buf + cellauto->w * cellauto->buf_row_idx;
+
+ for (i = 0; i < cellauto->w; i++) {
+ if (cellauto->stitch) {
+ pos[NW] = i-1 < 0 ? cellauto->w-1 : i-1;
+ pos[N] = i;
+ pos[NE] = i+1 == cellauto->w ? 0 : i+1;
+ v = prev_row[pos[NW]]<<2 | prev_row[pos[N]]<<1 | prev_row[pos[NE]];
+ } else {
+ v = 0;
+ v|= i-1 >= 0 ? prev_row[i-1]<<2 : 0;
+ v|= prev_row[i ]<<1 ;
+ v|= i+1 < cellauto->w ? prev_row[i+1] : 0;
+ }
+ row[i] = !!(cellauto->rule & (1<<v));
+ av_dlog(ctx, "i:%d context:%c%c%c -> cell:%d\n", i,
+ v&4?'@':' ', v&2?'@':' ', v&1?'@':' ', row[i]);
+ }
+
+ cellauto->generation++;
+}
+
+static void fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ CellAutoContext *cellauto = ctx->priv;
+ int i, j, k, row_idx = 0;
+ uint8_t *p0 = picref->data[0];
+
+ if (cellauto->scroll && cellauto->generation >= cellauto->h)
+ /* show on top the oldest row */
+ row_idx = (cellauto->buf_row_idx + 1) % cellauto->h;
+
+ /* fill the output picture with the whole buffer */
+ for (i = 0; i < cellauto->h; i++) {
+ uint8_t byte = 0;
+ uint8_t *row = cellauto->buf + row_idx*cellauto->w;
+ uint8_t *p = p0;
+ for (k = 0, j = 0; j < cellauto->w; j++) {
+ byte |= row[j]<<(7-k++);
+ if (k==8 || j == cellauto->w-1) {
+ k = 0;
+ *p++ = byte;
+ byte = 0;
+ }
+ }
+ row_idx = (row_idx + 1) % cellauto->h;
+ p0 += picref->linesize[0];
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ CellAutoContext *cellauto = outlink->src->priv;
+ AVFrame *picref = ff_get_video_buffer(outlink, cellauto->w, cellauto->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+ picref->sample_aspect_ratio = (AVRational) {1, 1};
+ if (cellauto->generation == 0 && cellauto->start_full) {
+ int i;
+ for (i = 0; i < cellauto->h-1; i++)
+ evolve(outlink->src);
+ }
+ fill_picture(outlink->src, picref);
+ evolve(outlink->src);
+
+ picref->pts = cellauto->pts++;
+
+#ifdef DEBUG
+ show_cellauto_row(outlink->src);
+#endif
+ return ff_filter_frame(outlink, picref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static const AVFilterPad cellauto_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_cellauto = {
+ .name = "cellauto",
+ .description = NULL_IF_CONFIG_SMALL("Create pattern generated by an elementary cellular automaton."),
+ .priv_size = sizeof(CellAutoContext),
+ .priv_class = &cellauto_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = cellauto_outputs,
+};
diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c
deleted file mode 100644
index 3b506ec..0000000
--- a/libavfilter/vsrc_color.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (c) 2010 Stefano Sabatini
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * color source
- */
-
-#include <stdio.h>
-#include <string.h>
-
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-#include "video.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/colorspace.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/mem.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "drawutils.h"
-
-typedef struct ColorContext {
- const AVClass *class;
- int w, h;
- uint8_t color[4];
- AVRational time_base;
- uint8_t *line[4];
- int line_step[4];
- int hsub, vsub; ///< chroma subsampling values
- uint64_t pts;
- char *color_str;
- char *size_str;
- char *framerate_str;
-} ColorContext;
-
-static av_cold int color_init(AVFilterContext *ctx)
-{
- ColorContext *color = ctx->priv;
- AVRational frame_rate_q;
- int ret;
-
- if (av_parse_video_size(&color->w, &color->h, color->size_str) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: %s\n", color->size_str);
- return AVERROR(EINVAL);
- }
-
- if (av_parse_video_rate(&frame_rate_q, color->framerate_str) < 0 ||
- frame_rate_q.den <= 0 || frame_rate_q.num <= 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", color->framerate_str);
- return AVERROR(EINVAL);
- }
- color->time_base.num = frame_rate_q.den;
- color->time_base.den = frame_rate_q.num;
-
- if ((ret = av_parse_color(color->color, color->color_str, -1, ctx)) < 0)
- return ret;
-
- return 0;
-}
-
-static av_cold void color_uninit(AVFilterContext *ctx)
-{
- ColorContext *color = ctx->priv;
- int i;
-
- for (i = 0; i < 4; i++) {
- av_freep(&color->line[i]);
- color->line_step[i] = 0;
- }
-}
-
-static int query_formats(AVFilterContext *ctx)
-{
- static const enum AVPixelFormat pix_fmts[] = {
- AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA,
- AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
- AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
-
- AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
- AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
- AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
- AV_PIX_FMT_YUVA420P,
-
- AV_PIX_FMT_NONE
- };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
-}
-
-static int color_config_props(AVFilterLink *inlink)
-{
- AVFilterContext *ctx = inlink->src;
- ColorContext *color = ctx->priv;
- uint8_t rgba_color[4];
- int is_packed_rgba;
- const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
-
- color->hsub = pix_desc->log2_chroma_w;
- color->vsub = pix_desc->log2_chroma_h;
-
- color->w &= ~((1 << color->hsub) - 1);
- color->h &= ~((1 << color->vsub) - 1);
- if (av_image_check_size(color->w, color->h, 0, ctx) < 0)
- return AVERROR(EINVAL);
-
- memcpy(rgba_color, color->color, sizeof(rgba_color));
- ff_fill_line_with_color(color->line, color->line_step, color->w, color->color,
- inlink->format, rgba_color, &is_packed_rgba, NULL);
-
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d r:%d/%d color:0x%02x%02x%02x%02x[%s]\n",
- color->w, color->h, color->time_base.den, color->time_base.num,
- color->color[0], color->color[1], color->color[2], color->color[3],
- is_packed_rgba ? "rgba" : "yuva");
- inlink->w = color->w;
- inlink->h = color->h;
- inlink->time_base = color->time_base;
-
- return 0;
-}
-
-static int color_request_frame(AVFilterLink *link)
-{
- ColorContext *color = link->src->priv;
- AVFrame *frame = ff_get_video_buffer(link, color->w, color->h);
-
- if (!frame)
- return AVERROR(ENOMEM);
-
- frame->sample_aspect_ratio = (AVRational) {1, 1};
- frame->pts = color->pts++;
-
- ff_draw_rectangle(frame->data, frame->linesize,
- color->line, color->line_step, color->hsub, color->vsub,
- 0, 0, color->w, color->h);
- return ff_filter_frame(link, frame);
-}
-
-#define OFFSET(x) offsetof(ColorContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "color", "Output video color", OFFSET(color_str), AV_OPT_TYPE_STRING, { .str = "black" }, .flags = FLAGS },
- { "size", "Output video size (wxh or an abbreviation)", OFFSET(size_str), AV_OPT_TYPE_STRING, { .str = "320x240" }, .flags = FLAGS },
- { "framerate", "Output video framerate", OFFSET(framerate_str), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = FLAGS },
- { NULL },
-};
-
-static const AVClass color_class = {
- .class_name = "color",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad avfilter_vsrc_color_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = color_request_frame,
- .config_props = color_config_props
- },
- { NULL }
-};
-
-AVFilter ff_vsrc_color = {
- .name = "color",
- .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input, syntax is: [color[:size[:rate]]]"),
-
- .priv_class = &color_class,
- .priv_size = sizeof(ColorContext),
- .init = color_init,
- .uninit = color_uninit,
-
- .query_formats = query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_color_outputs,
-};
diff --git a/libavfilter/vsrc_life.c b/libavfilter/vsrc_life.c
new file mode 100644
index 0000000..029e1bb
--- /dev/null
+++ b/libavfilter/vsrc_life.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) Stefano Sabatini 2010
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * life video source, based on John Conways' Life Game
+ */
+
+/* #define DEBUG */
+
+#include "libavutil/file.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/lfg.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/random_seed.h"
+#include "libavutil/avstring.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ char *filename;
+ char *rule_str;
+ uint8_t *file_buf;
+ size_t file_bufsize;
+
+ /**
+ * The two grid state buffers.
+ *
+ * A 0xFF (ALIVE_CELL) value means the cell is alive (or new born), while
+ * the decreasing values from 0xFE to 0 means the cell is dead; the range
+ * of values is used for the slow death effect, or mold (0xFE means dead,
+ * 0xFD means very dead, 0xFC means very very dead... and 0x00 means
+ * definitely dead/mold).
+ */
+ uint8_t *buf[2];
+
+ uint8_t buf_idx;
+ uint16_t stay_rule; ///< encode the behavior for filled cells
+ uint16_t born_rule; ///< encode the behavior for empty cells
+ uint64_t pts;
+ AVRational frame_rate;
+ double random_fill_ratio;
+ uint32_t random_seed;
+ int stitch;
+ int mold;
+ uint8_t life_color[4];
+ uint8_t death_color[4];
+ uint8_t mold_color[4];
+ AVLFG lfg;
+ void (*draw)(AVFilterContext*, AVFrame*);
+} LifeContext;
+
+#define ALIVE_CELL 0xFF
+#define OFFSET(x) offsetof(LifeContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption life_options[] = {
+ { "filename", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "f", "set source file", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS },
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "rule", "set rule", OFFSET(rule_str), AV_OPT_TYPE_STRING, {.str = "B3/S23"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "random_fill_ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
+ { "ratio", "set fill ratio for filling initial grid randomly", OFFSET(random_fill_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1/M_PHI}, 0, 1, FLAGS },
+ { "random_seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
+ { "seed", "set the seed for filling the initial grid randomly", OFFSET(random_seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
+ { "stitch", "stitch boundaries", OFFSET(stitch), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
+ { "mold", "set mold speed for dead cells", OFFSET(mold), AV_OPT_TYPE_INT, {.i64=0}, 0, 0xFF, FLAGS },
+ { "life_color", "set life color", OFFSET( life_color), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "death_color", "set death color", OFFSET(death_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "mold_color", "set mold color", OFFSET( mold_color), AV_OPT_TYPE_COLOR, {.str="black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(life);
+
+static int parse_rule(uint16_t *born_rule, uint16_t *stay_rule,
+ const char *rule_str, void *log_ctx)
+{
+ char *tail;
+ const char *p = rule_str;
+ *born_rule = 0;
+ *stay_rule = 0;
+
+ if (strchr("bBsS", *p)) {
+ /* parse rule as a Born / Stay Alive code, see
+ * http://en.wikipedia.org/wiki/Conway%27s_Game_of_Life */
+ do {
+ uint16_t *rule = (*p == 'b' || *p == 'B') ? born_rule : stay_rule;
+ p++;
+ while (*p >= '0' && *p <= '8') {
+ *rule += 1<<(*p - '0');
+ p++;
+ }
+ if (*p != '/')
+ break;
+ p++;
+ } while (strchr("bBsS", *p));
+
+ if (*p)
+ goto error;
+ } else {
+ /* parse rule as a number, expressed in the form STAY|(BORN<<9),
+ * where STAY and BORN encode the corresponding 9-bits rule */
+ long int rule = strtol(rule_str, &tail, 10);
+ if (*tail)
+ goto error;
+ *born_rule = ((1<<9)-1) & rule;
+ *stay_rule = rule >> 9;
+ }
+
+ return 0;
+
+error:
+ av_log(log_ctx, AV_LOG_ERROR, "Invalid rule code '%s' provided\n", rule_str);
+ return AVERROR(EINVAL);
+}
+
+#ifdef DEBUG
+static void show_life_grid(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ int i, j;
+
+ char *line = av_malloc(life->w + 1);
+ if (!line)
+ return;
+ for (i = 0; i < life->h; i++) {
+ for (j = 0; j < life->w; j++)
+ line[j] = life->buf[life->buf_idx][i*life->w + j] == ALIVE_CELL ? '@' : ' ';
+ line[j] = 0;
+ av_log(ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line);
+ }
+ av_free(line);
+}
+#endif
+
+static int init_pattern_from_file(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ char *p;
+ int ret, i, i0, j, h = 0, w, max_w = 0;
+
+ if ((ret = av_file_map(life->filename, &life->file_buf, &life->file_bufsize,
+ 0, ctx)) < 0)
+ return ret;
+ av_freep(&life->filename);
+
+ /* prescan file to get the number of lines and the maximum width */
+ w = 0;
+ for (i = 0; i < life->file_bufsize; i++) {
+ if (life->file_buf[i] == '\n') {
+ h++; max_w = FFMAX(w, max_w); w = 0;
+ } else {
+ w++;
+ }
+ }
+ av_log(ctx, AV_LOG_DEBUG, "h:%d max_w:%d\n", h, max_w);
+
+ if (life->w) {
+ if (max_w > life->w || h > life->h) {
+ av_log(ctx, AV_LOG_ERROR,
+ "The specified size is %dx%d which cannot contain the provided file size of %dx%d\n",
+ life->w, life->h, max_w, h);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ /* size was not specified, set it to size of the grid */
+ life->w = max_w;
+ life->h = h;
+ }
+
+ if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
+ !(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
+ av_free(life->buf[0]);
+ av_free(life->buf[1]);
+ return AVERROR(ENOMEM);
+ }
+
+ /* fill buf[0] */
+ p = life->file_buf;
+ for (i0 = 0, i = (life->h - h)/2; i0 < h; i0++, i++) {
+ for (j = (life->w - max_w)/2;; j++) {
+ av_log(ctx, AV_LOG_DEBUG, "%d:%d %c\n", i, j, *p == '\n' ? 'N' : *p);
+ if (*p == '\n') {
+ p++; break;
+ } else
+ life->buf[0][i*life->w + j] = av_isgraph(*(p++)) ? ALIVE_CELL : 0;
+ }
+ }
+ life->buf_idx = 0;
+
+ return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ int ret;
+
+ if (!life->w && !life->filename)
+ av_opt_set(life, "size", "320x240", 0);
+
+ if ((ret = parse_rule(&life->born_rule, &life->stay_rule, life->rule_str, ctx)) < 0)
+ return ret;
+
+ if (!life->mold && memcmp(life->mold_color, "\x00\x00\x00", 3))
+ av_log(ctx, AV_LOG_WARNING,
+ "Mold color is set while mold isn't, ignoring the color.\n");
+
+ if (!life->filename) {
+ /* fill the grid randomly */
+ int i;
+
+ if (!(life->buf[0] = av_calloc(life->h * life->w, sizeof(*life->buf[0]))) ||
+ !(life->buf[1] = av_calloc(life->h * life->w, sizeof(*life->buf[1])))) {
+ av_free(life->buf[0]);
+ av_free(life->buf[1]);
+ return AVERROR(ENOMEM);
+ }
+ if (life->random_seed == -1)
+ life->random_seed = av_get_random_seed();
+
+ av_lfg_init(&life->lfg, life->random_seed);
+
+ for (i = 0; i < life->w * life->h; i++) {
+ double r = (double)av_lfg_get(&life->lfg) / UINT32_MAX;
+ if (r <= life->random_fill_ratio)
+ life->buf[0][i] = ALIVE_CELL;
+ }
+ life->buf_idx = 0;
+ } else {
+ if ((ret = init_pattern_from_file(ctx)) < 0)
+ return ret;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE,
+ "s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%u\n",
+ life->w, life->h, life->frame_rate.num, life->frame_rate.den,
+ life->rule_str, life->stay_rule, life->born_rule, life->stitch,
+ life->random_seed);
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+
+ av_file_unmap(life->file_buf, life->file_bufsize);
+ av_freep(&life->rule_str);
+ av_freep(&life->buf[0]);
+ av_freep(&life->buf[1]);
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ LifeContext *life = outlink->src->priv;
+
+ outlink->w = life->w;
+ outlink->h = life->h;
+ outlink->time_base = av_inv_q(life->frame_rate);
+
+ return 0;
+}
+
+static void evolve(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ int i, j;
+ uint8_t *oldbuf = life->buf[ life->buf_idx];
+ uint8_t *newbuf = life->buf[!life->buf_idx];
+
+ enum { NW, N, NE, W, E, SW, S, SE };
+
+ /* evolve the grid */
+ for (i = 0; i < life->h; i++) {
+ for (j = 0; j < life->w; j++) {
+ int pos[8][2], n, alive, cell;
+ if (life->stitch) {
+ pos[NW][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NW][1] = (j-1) < 0 ? life->w-1 : j-1;
+ pos[N ][0] = (i-1) < 0 ? life->h-1 : i-1; pos[N ][1] = j ;
+ pos[NE][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NE][1] = (j+1) == life->w ? 0 : j+1;
+ pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? life->w-1 : j-1;
+ pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? 0 : j+1;
+ pos[SW][0] = (i+1) == life->h ? 0 : i+1; pos[SW][1] = (j-1) < 0 ? life->w-1 : j-1;
+ pos[S ][0] = (i+1) == life->h ? 0 : i+1; pos[S ][1] = j ;
+ pos[SE][0] = (i+1) == life->h ? 0 : i+1; pos[SE][1] = (j+1) == life->w ? 0 : j+1;
+ } else {
+ pos[NW][0] = (i-1) < 0 ? -1 : i-1; pos[NW][1] = (j-1) < 0 ? -1 : j-1;
+ pos[N ][0] = (i-1) < 0 ? -1 : i-1; pos[N ][1] = j ;
+ pos[NE][0] = (i-1) < 0 ? -1 : i-1; pos[NE][1] = (j+1) == life->w ? -1 : j+1;
+ pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? -1 : j-1;
+ pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? -1 : j+1;
+ pos[SW][0] = (i+1) == life->h ? -1 : i+1; pos[SW][1] = (j-1) < 0 ? -1 : j-1;
+ pos[S ][0] = (i+1) == life->h ? -1 : i+1; pos[S ][1] = j ;
+ pos[SE][0] = (i+1) == life->h ? -1 : i+1; pos[SE][1] = (j+1) == life->w ? -1 : j+1;
+ }
+
+ /* compute the number of live neighbor cells */
+ n = (pos[NW][0] == -1 || pos[NW][1] == -1 ? 0 : oldbuf[pos[NW][0]*life->w + pos[NW][1]] == ALIVE_CELL) +
+ (pos[N ][0] == -1 || pos[N ][1] == -1 ? 0 : oldbuf[pos[N ][0]*life->w + pos[N ][1]] == ALIVE_CELL) +
+ (pos[NE][0] == -1 || pos[NE][1] == -1 ? 0 : oldbuf[pos[NE][0]*life->w + pos[NE][1]] == ALIVE_CELL) +
+ (pos[W ][0] == -1 || pos[W ][1] == -1 ? 0 : oldbuf[pos[W ][0]*life->w + pos[W ][1]] == ALIVE_CELL) +
+ (pos[E ][0] == -1 || pos[E ][1] == -1 ? 0 : oldbuf[pos[E ][0]*life->w + pos[E ][1]] == ALIVE_CELL) +
+ (pos[SW][0] == -1 || pos[SW][1] == -1 ? 0 : oldbuf[pos[SW][0]*life->w + pos[SW][1]] == ALIVE_CELL) +
+ (pos[S ][0] == -1 || pos[S ][1] == -1 ? 0 : oldbuf[pos[S ][0]*life->w + pos[S ][1]] == ALIVE_CELL) +
+ (pos[SE][0] == -1 || pos[SE][1] == -1 ? 0 : oldbuf[pos[SE][0]*life->w + pos[SE][1]] == ALIVE_CELL);
+ cell = oldbuf[i*life->w + j];
+ alive = 1<<n & (cell == ALIVE_CELL ? life->stay_rule : life->born_rule);
+ if (alive) *newbuf = ALIVE_CELL; // new cell is alive
+ else if (cell) *newbuf = cell - 1; // new cell is dead and in the process of mold
+ else *newbuf = 0; // new cell is definitely dead
+ av_dlog(ctx, "i:%d j:%d live_neighbors:%d cell:%d -> cell:%d\n", i, j, n, cell, *newbuf);
+ newbuf++;
+ }
+ }
+
+ life->buf_idx = !life->buf_idx;
+}
+
+static void fill_picture_monoblack(AVFilterContext *ctx, AVFrame *picref)
+{
+ LifeContext *life = ctx->priv;
+ uint8_t *buf = life->buf[life->buf_idx];
+ int i, j, k;
+
+ /* fill the output picture with the old grid buffer */
+ for (i = 0; i < life->h; i++) {
+ uint8_t byte = 0;
+ uint8_t *p = picref->data[0] + i * picref->linesize[0];
+ for (k = 0, j = 0; j < life->w; j++) {
+ byte |= (buf[i*life->w+j] == ALIVE_CELL)<<(7-k++);
+ if (k==8 || j == life->w-1) {
+ k = 0;
+ *p++ = byte;
+ byte = 0;
+ }
+ }
+ }
+}
+
+// divide by 255 and round to nearest
+// apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
+#define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
+
+static void fill_picture_rgb(AVFilterContext *ctx, AVFrame *picref)
+{
+ LifeContext *life = ctx->priv;
+ uint8_t *buf = life->buf[life->buf_idx];
+ int i, j;
+
+ /* fill the output picture with the old grid buffer */
+ for (i = 0; i < life->h; i++) {
+ uint8_t *p = picref->data[0] + i * picref->linesize[0];
+ for (j = 0; j < life->w; j++) {
+ uint8_t v = buf[i*life->w + j];
+ if (life->mold && v != ALIVE_CELL) {
+ const uint8_t *c1 = life-> mold_color;
+ const uint8_t *c2 = life->death_color;
+ int death_age = FFMIN((0xff - v) * life->mold, 0xff);
+ *p++ = FAST_DIV255((c2[0] << 8) + ((int)c1[0] - (int)c2[0]) * death_age);
+ *p++ = FAST_DIV255((c2[1] << 8) + ((int)c1[1] - (int)c2[1]) * death_age);
+ *p++ = FAST_DIV255((c2[2] << 8) + ((int)c1[2] - (int)c2[2]) * death_age);
+ } else {
+ const uint8_t *c = v == ALIVE_CELL ? life->life_color : life->death_color;
+ AV_WB24(p, c[0]<<16 | c[1]<<8 | c[2]);
+ p += 3;
+ }
+ }
+ }
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ LifeContext *life = outlink->src->priv;
+ AVFrame *picref = ff_get_video_buffer(outlink, life->w, life->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+ picref->sample_aspect_ratio = (AVRational) {1, 1};
+ picref->pts = life->pts++;
+
+ life->draw(outlink->src, picref);
+ evolve(outlink->src);
+#ifdef DEBUG
+ show_life_grid(outlink->src);
+#endif
+ return ff_filter_frame(outlink, picref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ LifeContext *life = ctx->priv;
+ enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_NONE, AV_PIX_FMT_NONE };
+ if (life->mold || memcmp(life-> life_color, "\xff\xff\xff", 3)
+ || memcmp(life->death_color, "\x00\x00\x00", 3)) {
+ pix_fmts[0] = AV_PIX_FMT_RGB24;
+ life->draw = fill_picture_rgb;
+ } else {
+ pix_fmts[0] = AV_PIX_FMT_MONOBLACK;
+ life->draw = fill_picture_monoblack;
+ }
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static const AVFilterPad life_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL}
+};
+
+AVFilter ff_vsrc_life = {
+ .name = "life",
+ .description = NULL_IF_CONFIG_SMALL("Create life."),
+ .priv_size = sizeof(LifeContext),
+ .priv_class = &life_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = life_outputs,
+};
diff --git a/libavfilter/vsrc_mandelbrot.c b/libavfilter/vsrc_mandelbrot.c
new file mode 100644
index 0000000..255f2db
--- /dev/null
+++ b/libavfilter/vsrc_mandelbrot.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2011 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The vsrc_color filter from Stefano Sabatini was used as template to create
+ * this
+ */
+
+/**
+ * @file
+ * Mandelbrot fraktal renderer
+ */
+
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "internal.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include <float.h>
+#include <math.h>
+
+#define SQR(a) ((a)*(a))
+
+enum Outer{
+ ITERATION_COUNT,
+ NORMALIZED_ITERATION_COUNT,
+ WHITE,
+ OUTZ,
+};
+
+enum Inner{
+ BLACK,
+ PERIOD,
+ CONVTIME,
+ MINCOL,
+};
+
+typedef struct Point {
+ double p[2];
+ uint32_t val;
+} Point;
+
+typedef struct {
+ const AVClass *class;
+ int w, h;
+ AVRational frame_rate;
+ uint64_t pts;
+ int maxiter;
+ double start_x;
+ double start_y;
+ double start_scale;
+ double end_scale;
+ double end_pts;
+ double bailout;
+ enum Outer outer;
+ enum Inner inner;
+ int cache_allocated;
+ int cache_used;
+ Point *point_cache;
+ Point *next_cache;
+ double (*zyklus)[2];
+ uint32_t dither;
+
+ double morphxf;
+ double morphyf;
+ double morphamp;
+} MBContext;
+
+#define OFFSET(x) offsetof(MBContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+
+static const AVOption mandelbrot_options[] = {
+ {"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"rate", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"r", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ {"maxiter", "set max iterations number", OFFSET(maxiter), AV_OPT_TYPE_INT, {.i64=7189}, 1, INT_MAX, FLAGS },
+ {"start_x", "set the initial x position", OFFSET(start_x), AV_OPT_TYPE_DOUBLE, {.dbl=-0.743643887037158704752191506114774}, -100, 100, FLAGS },
+ {"start_y", "set the initial y position", OFFSET(start_y), AV_OPT_TYPE_DOUBLE, {.dbl=-0.131825904205311970493132056385139}, -100, 100, FLAGS },
+ {"start_scale", "set the initial scale value", OFFSET(start_scale), AV_OPT_TYPE_DOUBLE, {.dbl=3.0}, 0, FLT_MAX, FLAGS },
+ {"end_scale", "set the terminal scale value", OFFSET(end_scale), AV_OPT_TYPE_DOUBLE, {.dbl=0.3}, 0, FLT_MAX, FLAGS },
+ {"end_pts", "set the terminal pts value", OFFSET(end_pts), AV_OPT_TYPE_DOUBLE, {.dbl=400}, 0, INT64_MAX, FLAGS },
+ {"bailout", "set the bailout value", OFFSET(bailout), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 0, FLT_MAX, FLAGS },
+ {"morphxf", "set morph x frequency", OFFSET(morphxf), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, -FLT_MAX, FLT_MAX, FLAGS },
+ {"morphyf", "set morph y frequency", OFFSET(morphyf), AV_OPT_TYPE_DOUBLE, {.dbl=0.0123}, -FLT_MAX, FLT_MAX, FLAGS },
+ {"morphamp", "set morph amplitude", OFFSET(morphamp), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -FLT_MAX, FLT_MAX, FLAGS },
+
+ {"outer", "set outer coloring mode", OFFSET(outer), AV_OPT_TYPE_INT, {.i64=NORMALIZED_ITERATION_COUNT}, 0, INT_MAX, FLAGS, "outer" },
+ {"iteration_count", "set iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"normalized_iteration_count", "set normalized iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=NORMALIZED_ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"white", "set white mode", 0, AV_OPT_TYPE_CONST, {.i64=WHITE}, INT_MIN, INT_MAX, FLAGS, "outer" },
+ {"outz", "set outz mode", 0, AV_OPT_TYPE_CONST, {.i64=OUTZ}, INT_MIN, INT_MAX, FLAGS, "outer" },
+
+ {"inner", "set inner coloring mode", OFFSET(inner), AV_OPT_TYPE_INT, {.i64=MINCOL}, 0, INT_MAX, FLAGS, "inner" },
+ {"black", "set black mode", 0, AV_OPT_TYPE_CONST, {.i64=BLACK}, INT_MIN, INT_MAX, FLAGS, "inner"},
+ {"period", "set period mode", 0, AV_OPT_TYPE_CONST, {.i64=PERIOD}, INT_MIN, INT_MAX, FLAGS, "inner"},
+ {"convergence", "show time until convergence", 0, AV_OPT_TYPE_CONST, {.i64=CONVTIME}, INT_MIN, INT_MAX, FLAGS, "inner"},
+ {"mincol", "color based on point closest to the origin of the iterations", 0, AV_OPT_TYPE_CONST, {.i64=MINCOL}, INT_MIN, INT_MAX, FLAGS, "inner"},
+
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(mandelbrot);
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MBContext *mb = ctx->priv;
+
+ mb->bailout *= mb->bailout;
+
+ mb->start_scale /=mb->h;
+ mb->end_scale /=mb->h;
+
+ mb->cache_allocated = mb->w * mb->h * 3;
+ mb->cache_used = 0;
+ mb->point_cache= av_malloc_array(mb->cache_allocated, sizeof(*mb->point_cache));
+ mb-> next_cache= av_malloc_array(mb->cache_allocated, sizeof(*mb-> next_cache));
+ mb-> zyklus = av_malloc_array(mb->maxiter + 16, sizeof(*mb->zyklus));
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MBContext *mb = ctx->priv;
+
+ av_freep(&mb->point_cache);
+ av_freep(&mb-> next_cache);
+ av_freep(&mb->zyklus);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_BGR32,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->src;
+ MBContext *mb = ctx->priv;
+
+ if (av_image_check_size(mb->w, mb->h, 0, ctx) < 0)
+ return AVERROR(EINVAL);
+
+ inlink->w = mb->w;
+ inlink->h = mb->h;
+ inlink->time_base = av_inv_q(mb->frame_rate);
+
+ return 0;
+}
+
+static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale){
+ MBContext *mb = ctx->priv;
+ if(mb->morphamp)
+ return;
+ for(; *in_cidx < mb->cache_used; (*in_cidx)++){
+ Point *p= &mb->point_cache[*in_cidx];
+ int x;
+ if(p->p[1] > py)
+ break;
+ x= round((p->p[0] - mb->start_x) / scale + mb->w/2);
+ if(x<0 || x >= mb->w)
+ continue;
+ if(color) color[x] = p->val;
+ if(out_cidx && *out_cidx < mb->cache_allocated)
+ mb->next_cache[(*out_cidx)++]= *p;
+ }
+}
+
+static int interpol(MBContext *mb, uint32_t *color, int x, int y, int linesize)
+{
+ uint32_t a,b,c,d, i;
+ uint32_t ipol=0xFF000000;
+ int dist;
+
+ if(!x || !y || x+1==mb->w || y+1==mb->h)
+ return 0;
+
+ dist= FFMAX(FFABS(x-(mb->w>>1))*mb->h, FFABS(y-(mb->h>>1))*mb->w);
+
+ if(dist<(mb->w*mb->h>>3))
+ return 0;
+
+ a=color[(x+1) + (y+0)*linesize];
+ b=color[(x-1) + (y+1)*linesize];
+ c=color[(x+0) + (y+1)*linesize];
+ d=color[(x+1) + (y+1)*linesize];
+
+ if(a&&c){
+ b= color[(x-1) + (y+0)*linesize];
+ d= color[(x+0) + (y-1)*linesize];
+ }else if(b&&d){
+ a= color[(x+1) + (y-1)*linesize];
+ c= color[(x-1) + (y-1)*linesize];
+ }else if(c){
+ d= color[(x+0) + (y-1)*linesize];
+ a= color[(x-1) + (y+0)*linesize];
+ b= color[(x+1) + (y-1)*linesize];
+ }else if(d){
+ c= color[(x-1) + (y-1)*linesize];
+ a= color[(x-1) + (y+0)*linesize];
+ b= color[(x+1) + (y-1)*linesize];
+ }else
+ return 0;
+
+ for(i=0; i<3; i++){
+ int s= 8*i;
+ uint8_t ac= a>>s;
+ uint8_t bc= b>>s;
+ uint8_t cc= c>>s;
+ uint8_t dc= d>>s;
+ int ipolab= (ac + bc);
+ int ipolcd= (cc + dc);
+ if(FFABS(ipolab - ipolcd) > 5)
+ return 0;
+ if(FFABS(ac-bc)+FFABS(cc-dc) > 20)
+ return 0;
+ ipol |= ((ipolab + ipolcd + 2)/4)<<s;
+ }
+ color[x + y*linesize]= ipol;
+ return 1;
+}
+
+static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, int64_t pts)
+{
+ MBContext *mb = ctx->priv;
+ int x,y,i, in_cidx=0, next_cidx=0, tmp_cidx;
+ double scale= mb->start_scale*pow(mb->end_scale/mb->start_scale, pts/mb->end_pts);
+ int use_zyklus=0;
+ fill_from_cache(ctx, NULL, &in_cidx, NULL, mb->start_y+scale*(-mb->h/2-0.5), scale);
+ tmp_cidx= in_cidx;
+ memset(color, 0, sizeof(*color)*mb->w);
+ for(y=0; y<mb->h; y++){
+ int y1= y+1;
+ const double ci=mb->start_y+scale*(y-mb->h/2);
+ fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci, scale);
+ if(y1<mb->h){
+ memset(color+linesize*y1, 0, sizeof(*color)*mb->w);
+ fill_from_cache(ctx, color+linesize*y1, &tmp_cidx, NULL, ci + 3*scale/2, scale);
+ }
+
+ for(x=0; x<mb->w; x++){
+ float av_uninit(epsilon);
+ const double cr=mb->start_x+scale*(x-mb->w/2);
+ double zr=cr;
+ double zi=ci;
+ uint32_t c=0;
+ double dv= mb->dither / (double)(1LL<<32);
+ mb->dither= mb->dither*1664525+1013904223;
+
+ if(color[x + y*linesize] & 0xFF000000)
+ continue;
+ if(!mb->morphamp){
+ if(interpol(mb, color, x, y, linesize)){
+ if(next_cidx < mb->cache_allocated){
+ mb->next_cache[next_cidx ].p[0]= cr;
+ mb->next_cache[next_cidx ].p[1]= ci;
+ mb->next_cache[next_cidx++].val = color[x + y*linesize];
+ }
+ continue;
+ }
+ }else{
+ zr += cos(pts * mb->morphxf) * mb->morphamp;
+ zi += sin(pts * mb->morphyf) * mb->morphamp;
+ }
+
+ use_zyklus= (x==0 || mb->inner!=BLACK ||color[x-1 + y*linesize] == 0xFF000000);
+ if(use_zyklus)
+ epsilon= scale*1*sqrt(SQR(x-mb->w/2) + SQR(y-mb->h/2))/mb->w;
+
+#define Z_Z2_C(outr,outi,inr,ini)\
+ outr= inr*inr - ini*ini + cr;\
+ outi= 2*inr*ini + ci;
+
+#define Z_Z2_C_ZYKLUS(outr,outi,inr,ini, Z)\
+ Z_Z2_C(outr,outi,inr,ini)\
+ if(use_zyklus){\
+ if(Z && fabs(mb->zyklus[i>>1][0]-outr)+fabs(mb->zyklus[i>>1][1]-outi) <= epsilon)\
+ break;\
+ }\
+ mb->zyklus[i][0]= outr;\
+ mb->zyklus[i][1]= outi;\
+
+
+
+ for(i=0; i<mb->maxiter-8; i++){
+ double t;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ i++;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ i++;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ i++;
+ Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
+ i++;
+ Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
+ if(zr*zr + zi*zi > mb->bailout){
+ i-= FFMIN(7, i);
+ for(; i<mb->maxiter; i++){
+ zr= mb->zyklus[i][0];
+ zi= mb->zyklus[i][1];
+ if(zr*zr + zi*zi > mb->bailout){
+ switch(mb->outer){
+ case ITERATION_COUNT:
+ zr = i;
+ c = lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
+ break;
+ case NORMALIZED_ITERATION_COUNT:
+ zr = i + log2(log(mb->bailout) / log(zr*zr + zi*zi));
+ c = lrintf((sin(zr)+1)*127) + lrintf((sin(zr/1.234)+1)*127)*256*256 + lrintf((sin(zr/100)+1)*127)*256;
+ break;
+ case WHITE:
+ c = 0xFFFFFF;
+ break;
+ case OUTZ:
+ zr /= mb->bailout;
+ zi /= mb->bailout;
+ c = (((int)(zr*128+128))&0xFF)*256 + (((int)(zi*128+128))&0xFF);
+ }
+ break;
+ }
+ }
+ break;
+ }
+ }
+ if(!c){
+ if(mb->inner==PERIOD){
+ int j;
+ for(j=i-1; j; j--)
+ if(SQR(mb->zyklus[j][0]-zr) + SQR(mb->zyklus[j][1]-zi) < epsilon*epsilon*10)
+ break;
+ if(j){
+ c= i-j;
+ c= ((c<<5)&0xE0) + ((c<<10)&0xE000) + ((c<<15)&0xE00000);
+ }
+ }else if(mb->inner==CONVTIME){
+ c= floor(i*255.0/mb->maxiter+dv)*0x010101;
+ } else if(mb->inner==MINCOL){
+ int j;
+ double closest=9999;
+ int closest_index=0;
+ for(j=i-1; j>=0; j--)
+ if(SQR(mb->zyklus[j][0]) + SQR(mb->zyklus[j][1]) < closest){
+ closest= SQR(mb->zyklus[j][0]) + SQR(mb->zyklus[j][1]);
+ closest_index= j;
+ }
+ closest = sqrt(closest);
+ c= lrintf((mb->zyklus[closest_index][0]/closest+1)*127+dv) + lrintf((mb->zyklus[closest_index][1]/closest+1)*127+dv)*256;
+ }
+ }
+ c |= 0xFF000000;
+ color[x + y*linesize]= c;
+ if(next_cidx < mb->cache_allocated){
+ mb->next_cache[next_cidx ].p[0]= cr;
+ mb->next_cache[next_cidx ].p[1]= ci;
+ mb->next_cache[next_cidx++].val = c;
+ }
+ }
+ fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci + scale/2, scale);
+ }
+ FFSWAP(void*, mb->next_cache, mb->point_cache);
+ mb->cache_used = next_cidx;
+ if(mb->cache_used == mb->cache_allocated)
+ av_log(ctx, AV_LOG_INFO, "Mandelbrot cache is too small!\n");
+}
+
+static int request_frame(AVFilterLink *link)
+{
+ MBContext *mb = link->src->priv;
+ AVFrame *picref = ff_get_video_buffer(link, mb->w, mb->h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+
+ picref->sample_aspect_ratio = (AVRational) {1, 1};
+ picref->pts = mb->pts++;
+
+ draw_mandelbrot(link->src, (uint32_t*)picref->data[0], picref->linesize[0]/4, picref->pts);
+ return ff_filter_frame(link, picref);
+}
+
+static const AVFilterPad mandelbrot_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_mandelbrot = {
+ .name = "mandelbrot",
+ .description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."),
+ .priv_size = sizeof(MBContext),
+ .priv_class = &mandelbrot_class,
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mandelbrot_outputs,
+};
diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c
deleted file mode 100644
index 0e5df32..0000000
--- a/libavfilter/vsrc_movie.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (c) 2010 Stefano Sabatini
- * Copyright (c) 2008 Victor Paesa
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * movie video source
- *
- * @todo use direct rendering (no allocation of a new frame)
- * @todo support a PTS correction mechanism
- * @todo support more than one output stream
- */
-
-#include <float.h>
-#include <stdint.h>
-
-#include "libavutil/attributes.h"
-#include "libavutil/avstring.h"
-#include "libavutil/opt.h"
-#include "libavutil/imgutils.h"
-#include "libavformat/avformat.h"
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-#include "video.h"
-
-typedef struct MovieContext {
- const AVClass *class;
- int64_t seek_point; ///< seekpoint in microseconds
- double seek_point_d;
- char *format_name;
- char *file_name;
- int stream_index;
-
- AVFormatContext *format_ctx;
- AVCodecContext *codec_ctx;
- int is_done;
- AVFrame *frame; ///< video frame to store the decoded images in
-
- int w, h;
-} MovieContext;
-
-#define OFFSET(x) offsetof(MovieContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-
-static const AVOption movie_options[]= {
- { "filename", NULL, OFFSET(file_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "format_name", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "f", "set format name", OFFSET(format_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
- { "stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
- { "si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
- { "seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
- { "sp", "set seekpoint (seconds)", OFFSET(seek_point_d), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, (INT64_MAX-1) / 1000000, FLAGS },
- { NULL },
-};
-
-static const char *movie_get_name(void *ctx)
-{
- return "movie";
-}
-
-static const AVClass movie_class = {
- "MovieContext",
- movie_get_name,
- movie_options
-};
-
-static av_cold int movie_init(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
- AVInputFormat *iformat = NULL;
- AVCodec *codec;
- int ret;
- int64_t timestamp;
-
- av_register_all();
-
- // Try to find the movie format (container)
- iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL;
-
- movie->format_ctx = NULL;
- if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) {
- av_log(ctx, AV_LOG_ERROR,
- "Failed to avformat_open_input '%s'\n", movie->file_name);
- return ret;
- }
- if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0)
- av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n");
-
- // if seeking requested, we execute it
- if (movie->seek_point > 0) {
- timestamp = movie->seek_point;
- // add the stream start time, should it exist
- if (movie->format_ctx->start_time != AV_NOPTS_VALUE) {
- if (timestamp > INT64_MAX - movie->format_ctx->start_time) {
- av_log(ctx, AV_LOG_ERROR,
- "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n",
- movie->file_name, movie->format_ctx->start_time, movie->seek_point);
- return AVERROR(EINVAL);
- }
- timestamp += movie->format_ctx->start_time;
- }
- if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n",
- movie->file_name, timestamp);
- return ret;
- }
- }
-
- /* select the video stream */
- if ((ret = av_find_best_stream(movie->format_ctx, AVMEDIA_TYPE_VIDEO,
- movie->stream_index, -1, NULL, 0)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "No video stream with index '%d' found\n",
- movie->stream_index);
- return ret;
- }
- movie->stream_index = ret;
- movie->codec_ctx = movie->format_ctx->streams[movie->stream_index]->codec;
-
- /*
- * So now we've got a pointer to the so-called codec context for our video
- * stream, but we still have to find the actual codec and open it.
- */
- codec = avcodec_find_decoder(movie->codec_ctx->codec_id);
- if (!codec) {
- av_log(ctx, AV_LOG_ERROR, "Failed to find any codec\n");
- return AVERROR(EINVAL);
- }
-
- movie->codec_ctx->refcounted_frames = 1;
-
- if ((ret = avcodec_open2(movie->codec_ctx, codec, NULL)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Failed to open codec\n");
- return ret;
- }
-
- movie->w = movie->codec_ctx->width;
- movie->h = movie->codec_ctx->height;
-
- av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
- movie->seek_point, movie->format_name, movie->file_name,
- movie->stream_index);
-
- return 0;
-}
-
-static av_cold int init(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
-
- movie->seek_point = movie->seek_point_d * 1000000 + 0.5;
-
- return movie_init(ctx);
-}
-
-static av_cold void uninit(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
-
- if (movie->codec_ctx)
- avcodec_close(movie->codec_ctx);
- if (movie->format_ctx)
- avformat_close_input(&movie->format_ctx);
- av_frame_free(&movie->frame);
-}
-
-static int query_formats(AVFilterContext *ctx)
-{
- MovieContext *movie = ctx->priv;
- enum AVPixelFormat pix_fmts[] = { movie->codec_ctx->pix_fmt, AV_PIX_FMT_NONE };
-
- ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
- return 0;
-}
-
-static int config_output_props(AVFilterLink *outlink)
-{
- MovieContext *movie = outlink->src->priv;
-
- outlink->w = movie->w;
- outlink->h = movie->h;
- outlink->time_base = movie->format_ctx->streams[movie->stream_index]->time_base;
-
- return 0;
-}
-
-static int movie_get_frame(AVFilterLink *outlink)
-{
- MovieContext *movie = outlink->src->priv;
- AVPacket pkt;
- int ret, frame_decoded;
-
- if (movie->is_done == 1)
- return 0;
-
- movie->frame = av_frame_alloc();
- if (!movie->frame)
- return AVERROR(ENOMEM);
-
- while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) {
- // Is this a packet from the video stream?
- if (pkt.stream_index == movie->stream_index) {
- avcodec_decode_video2(movie->codec_ctx, movie->frame, &frame_decoded, &pkt);
-
- if (frame_decoded) {
- if (movie->frame->pkt_pts != AV_NOPTS_VALUE)
- movie->frame->pts = movie->frame->pkt_pts;
- av_dlog(outlink->src,
- "movie_get_frame(): file:'%s' pts:%"PRId64" time:%f aspect:%d/%d\n",
- movie->file_name, movie->frame->pts,
- (double)movie->frame->pts *
- av_q2d(movie->format_ctx->streams[movie->stream_index]->time_base),
- movie->frame->sample_aspect_ratio.num,
- movie->frame->sample_aspect_ratio.den);
- // We got it. Free the packet since we are returning
- av_free_packet(&pkt);
-
- return 0;
- }
- }
- // Free the packet that was allocated by av_read_frame
- av_free_packet(&pkt);
- }
-
- // On multi-frame source we should stop the mixing process when
- // the movie source does not have more frames
- if (ret == AVERROR_EOF)
- movie->is_done = 1;
- return ret;
-}
-
-static int request_frame(AVFilterLink *outlink)
-{
- MovieContext *movie = outlink->src->priv;
- int ret;
-
- if (movie->is_done)
- return AVERROR_EOF;
- if ((ret = movie_get_frame(outlink)) < 0)
- return ret;
-
- ret = ff_filter_frame(outlink, movie->frame);
- movie->frame = NULL;
-
- return ret;
-}
-
-static const AVFilterPad avfilter_vsrc_movie_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .request_frame = request_frame,
- .config_props = config_output_props,
- },
- { NULL }
-};
-
-AVFilter ff_vsrc_movie = {
- .name = "movie",
- .description = NULL_IF_CONFIG_SMALL("Read from a movie source."),
- .priv_size = sizeof(MovieContext),
- .priv_class = &movie_class,
- .init = init,
- .uninit = uninit,
- .query_formats = query_formats,
-
- .inputs = NULL,
- .outputs = avfilter_vsrc_movie_outputs,
-};
diff --git a/libavfilter/vsrc_mptestsrc.c b/libavfilter/vsrc_mptestsrc.c
new file mode 100644
index 0000000..d045704
--- /dev/null
+++ b/libavfilter/vsrc_mptestsrc.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * MP test source, ported from MPlayer libmpcodecs/vf_test.c
+ */
+
+#include "libavutil/avstring.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+#define WIDTH 512
+#define HEIGHT 512
+
+enum test_type {
+ TEST_DC_LUMA,
+ TEST_DC_CHROMA,
+ TEST_FREQ_LUMA,
+ TEST_FREQ_CHROMA,
+ TEST_AMP_LUMA,
+ TEST_AMP_CHROMA,
+ TEST_CBP,
+ TEST_MV,
+ TEST_RING1,
+ TEST_RING2,
+ TEST_ALL,
+ TEST_NB
+};
+
+typedef struct MPTestContext {
+ const AVClass *class;
+ AVRational frame_rate;
+ int64_t pts, max_pts, duration;
+ int hsub, vsub;
+ enum test_type test;
+} MPTestContext;
+
+#define OFFSET(x) offsetof(MPTestContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption mptestsrc_options[]= {
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
+ { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+ { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
+
+ { "test", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
+ { "t", "set test to perform", OFFSET(test), AV_OPT_TYPE_INT, {.i64=TEST_ALL}, 0, INT_MAX, FLAGS, "test" },
+ { "dc_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "dc_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_DC_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "freq_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "freq_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_FREQ_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "amp_luma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_LUMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "amp_chroma", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_AMP_CHROMA}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "cbp", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_CBP}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "mv", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_MV}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "ring1", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING1}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "ring2", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_RING2}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { "all", "", 0, AV_OPT_TYPE_CONST, {.i64=TEST_ALL}, INT_MIN, INT_MAX, FLAGS, "test" },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mptestsrc);
+
+static double c[64];
+
+static void init_idct(void)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ double s = i == 0 ? sqrt(0.125) : 0.5;
+
+ for (j = 0; j < 8; j++)
+ c[i*8+j] = s*cos((M_PI/8.0)*i*(j+0.5));
+ }
+}
+
+static void idct(uint8_t *dst, int dst_linesize, int src[64])
+{
+ int i, j, k;
+ double tmp[64];
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++) {
+ double sum = 0.0;
+
+ for (k = 0; k < 8; k++)
+ sum += c[k*8+j] * src[8*i+k];
+
+ tmp[8*i+j] = sum;
+ }
+ }
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++) {
+ double sum = 0.0;
+
+ for (k = 0; k < 8; k++)
+ sum += c[k*8+i]*tmp[8*k+j];
+
+ dst[dst_linesize*i + j] = av_clip((int)floor(sum+0.5), 0, 255);
+ }
+ }
+}
+
+static void draw_dc(uint8_t *dst, int dst_linesize, int color, int w, int h)
+{
+ int x, y;
+
+ for (y = 0; y < h; y++)
+ for (x = 0; x < w; x++)
+ dst[x + y*dst_linesize] = color;
+}
+
+static void draw_basis(uint8_t *dst, int dst_linesize, int amp, int freq, int dc)
+{
+ int src[64];
+
+ memset(src, 0, 64*sizeof(int));
+ src[0] = dc;
+ if (amp)
+ src[freq] = amp;
+ idct(dst, dst_linesize, src);
+}
+
+static void draw_cbp(uint8_t *dst[3], int dst_linesize[3], int cbp, int amp, int dc)
+{
+ if (cbp&1) draw_basis(dst[0] , dst_linesize[0], amp, 1, dc);
+ if (cbp&2) draw_basis(dst[0]+8 , dst_linesize[0], amp, 1, dc);
+ if (cbp&4) draw_basis(dst[0]+ 8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
+ if (cbp&8) draw_basis(dst[0]+8+8*dst_linesize[0], dst_linesize[0], amp, 1, dc);
+ if (cbp&16) draw_basis(dst[1] , dst_linesize[1], amp, 1, dc);
+ if (cbp&32) draw_basis(dst[2] , dst_linesize[2], amp, 1, dc);
+}
+
+static void dc_test(uint8_t *dst, int dst_linesize, int w, int h, int off)
+{
+ const int step = FFMAX(256/(w*h/256), 1);
+ int x, y, color = off;
+
+ for (y = 0; y < h; y += 16) {
+ for (x = 0; x < w; x += 16) {
+ draw_dc(dst + x + y*dst_linesize, dst_linesize, color, 8, 8);
+ color += step;
+ }
+ }
+}
+
+static void freq_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y, freq = 0;
+
+ for (y = 0; y < 8*16; y += 16) {
+ for (x = 0; x < 8*16; x += 16) {
+ draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*(96+off), freq, 128*8);
+ freq++;
+ }
+ }
+}
+
+static void amp_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y, amp = off;
+
+ for (y = 0; y < 16*16; y += 16) {
+ for (x = 0; x < 16*16; x += 16) {
+ draw_basis(dst + x + y*dst_linesize, dst_linesize, 4*amp, 1, 128*8);
+ amp++;
+ }
+ }
+}
+
+static void cbp_test(uint8_t *dst[3], int dst_linesize[3], int off)
+{
+ int x, y, cbp = 0;
+
+ for (y = 0; y < 16*8; y += 16) {
+ for (x = 0; x < 16*8; x += 16) {
+ uint8_t *dst1[3];
+ dst1[0] = dst[0] + x*2 + y*2*dst_linesize[0];
+ dst1[1] = dst[1] + x + y* dst_linesize[1];
+ dst1[2] = dst[2] + x + y* dst_linesize[2];
+
+ draw_cbp(dst1, dst_linesize, cbp, (64+off)*4, 128*8);
+ cbp++;
+ }
+ }
+}
+
+static void mv_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y;
+
+ for (y = 0; y < 16*16; y++) {
+ if (y&16)
+ continue;
+ for (x = 0; x < 16*16; x++)
+ dst[x + y*dst_linesize] = x + off*8/(y/32+1);
+ }
+}
+
+static void ring1_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y, color = 0;
+
+ for (y = off; y < 16*16; y += 16) {
+ for (x = off; x < 16*16; x += 16) {
+ draw_dc(dst + x + y*dst_linesize, dst_linesize, ((x+y)&16) ? color : -color, 16, 16);
+ color++;
+ }
+ }
+}
+
+static void ring2_test(uint8_t *dst, int dst_linesize, int off)
+{
+ int x, y;
+
+ for (y = 0; y < 16*16; y++) {
+ for (x = 0; x < 16*16; x++) {
+ double d = sqrt((x-8*16)*(x-8*16) + (y-8*16)*(y-8*16));
+ double r = d/20 - (int)(d/20);
+ if (r < off/30.0) {
+ dst[x + y*dst_linesize] = 255;
+ dst[x + y*dst_linesize+256] = 0;
+ } else {
+ dst[x + y*dst_linesize] = x;
+ dst[x + y*dst_linesize+256] = x;
+ }
+ }
+ }
+}
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ MPTestContext *test = ctx->priv;
+
+ test->max_pts = test->duration >= 0 ?
+ av_rescale_q(test->duration, AV_TIME_BASE_Q, av_inv_q(test->frame_rate)) : -1;
+ test->pts = 0;
+
+ av_log(ctx, AV_LOG_VERBOSE, "rate:%d/%d duration:%f\n",
+ test->frame_rate.num, test->frame_rate.den,
+ test->duration < 0 ? -1 : test->max_pts * av_q2d(av_inv_q(test->frame_rate)));
+ init_idct();
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ MPTestContext *test = ctx->priv;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(outlink->format);
+
+ test->hsub = pix_desc->log2_chroma_w;
+ test->vsub = pix_desc->log2_chroma_h;
+
+ outlink->w = WIDTH;
+ outlink->h = HEIGHT;
+ outlink->time_base = av_inv_q(test->frame_rate);
+
+ return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ MPTestContext *test = outlink->src->priv;
+ AVFrame *picref;
+ int w = WIDTH, h = HEIGHT,
+ cw = FF_CEIL_RSHIFT(w, test->hsub), ch = FF_CEIL_RSHIFT(h, test->vsub);
+ unsigned int frame = outlink->frame_count;
+ enum test_type tt = test->test;
+ int i;
+
+ if (test->max_pts >= 0 && test->pts > test->max_pts)
+ return AVERROR_EOF;
+ picref = ff_get_video_buffer(outlink, w, h);
+ if (!picref)
+ return AVERROR(ENOMEM);
+ picref->pts = test->pts++;
+
+ // clean image
+ for (i = 0; i < h; i++)
+ memset(picref->data[0] + i*picref->linesize[0], 0, w);
+ for (i = 0; i < ch; i++) {
+ memset(picref->data[1] + i*picref->linesize[1], 128, cw);
+ memset(picref->data[2] + i*picref->linesize[2], 128, cw);
+ }
+
+ if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
+ tt = (frame/30)%(TEST_NB-1);
+
+ switch (tt) {
+ case TEST_DC_LUMA: dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break;
+ case TEST_DC_CHROMA: dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break;
+ case TEST_FREQ_LUMA: freq_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break;
+ case TEST_AMP_LUMA: amp_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_AMP_CHROMA: amp_test(picref->data[1], picref->linesize[1], frame%30); break;
+ case TEST_CBP: cbp_test(picref->data , picref->linesize , frame%30); break;
+ case TEST_MV: mv_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_RING1: ring1_test(picref->data[0], picref->linesize[0], frame%30); break;
+ case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break;
+ }
+
+ return ff_filter_frame(outlink, picref);
+}
+
+static const AVFilterPad mptestsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_mptestsrc = {
+ .name = "mptestsrc",
+ .description = NULL_IF_CONFIG_SMALL("Generate various test pattern."),
+ .priv_size = sizeof(MPTestContext),
+ .priv_class = &mptestsrc_class,
+ .init = init,
+ .query_formats = query_formats,
+ .inputs = NULL,
+ .outputs = mptestsrc_outputs,
+};
diff --git a/libavfilter/vsrc_nullsrc.c b/libavfilter/vsrc_nullsrc.c
deleted file mode 100644
index 63e90fd..0000000
--- a/libavfilter/vsrc_nullsrc.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * null video source
- */
-
-#include <stdio.h>
-
-#include "libavutil/avstring.h"
-#include "libavutil/eval.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "libavutil/parseutils.h"
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-
-static const char *const var_names[] = {
- "E",
- "PHI",
- "PI",
- "AVTB", /* default timebase 1/AV_TIME_BASE */
- NULL
-};
-
-enum var_name {
- VAR_E,
- VAR_PHI,
- VAR_PI,
- VAR_AVTB,
- VAR_VARS_NB
-};
-
-typedef struct NullContext {
- const AVClass *class;
- int w, h;
- char *tb_expr;
- double var_values[VAR_VARS_NB];
-} NullContext;
-
-static int config_props(AVFilterLink *outlink)
-{
- AVFilterContext *ctx = outlink->src;
- NullContext *priv = ctx->priv;
- AVRational tb;
- int ret;
- double res;
-
- priv->var_values[VAR_E] = M_E;
- priv->var_values[VAR_PHI] = M_PHI;
- priv->var_values[VAR_PI] = M_PI;
- priv->var_values[VAR_AVTB] = av_q2d(AV_TIME_BASE_Q);
-
- if ((ret = av_expr_parse_and_eval(&res, priv->tb_expr, var_names, priv->var_values,
- NULL, NULL, NULL, NULL, NULL, 0, NULL)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid expression '%s' for timebase.\n", priv->tb_expr);
- return ret;
- }
- tb = av_d2q(res, INT_MAX);
- if (tb.num <= 0 || tb.den <= 0) {
- av_log(ctx, AV_LOG_ERROR,
- "Invalid non-positive value for the timebase %d/%d.\n",
- tb.num, tb.den);
- return AVERROR(EINVAL);
- }
-
- outlink->w = priv->w;
- outlink->h = priv->h;
- outlink->time_base = tb;
-
- av_log(outlink->src, AV_LOG_VERBOSE, "w:%d h:%d tb:%d/%d\n", priv->w, priv->h,
- tb.num, tb.den);
-
- return 0;
-}
-
-static int request_frame(AVFilterLink *link)
-{
- return -1;
-}
-
-#define OFFSET(x) offsetof(NullContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
-static const AVOption options[] = {
- { "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 352 }, 1, INT_MAX, FLAGS },
- { "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 288 }, 1, INT_MAX, FLAGS },
- { "timebase", NULL, OFFSET(tb_expr), AV_OPT_TYPE_STRING, { .str = "AVTB" }, 0, 0, FLAGS },
- { NULL },
-};
-
-static const AVClass nullsrc_class = {
- .class_name = "nullsrc",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad avfilter_vsrc_nullsrc_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
- .request_frame = request_frame,
- },
- { NULL }
-};
-
-AVFilter ff_vsrc_nullsrc = {
- .name = "nullsrc",
- .description = NULL_IF_CONFIG_SMALL("Null video source, never return images."),
-
- .priv_size = sizeof(NullContext),
- .priv_class = &nullsrc_class,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_nullsrc_outputs,
-};
diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c
index e41625e..8814440 100644
--- a/libavfilter/vsrc_testsrc.c
+++ b/libavfilter/vsrc_testsrc.c
@@ -1,21 +1,22 @@
/*
* Copyright (c) 2007 Nicolas George <nicolas.george@normalesup.org>
* Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2012 Paul B Mahol
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,87 +29,96 @@
*
* rgbtestsrc is ported from MPlayer libmpcodecs/vf_rgbtest.c by
* Michael Niedermayer.
+ *
+ * smptebars and smptehdbars are by Paul B Mahol.
*/
#include <float.h>
+#include "libavutil/avassert.h"
#include "libavutil/common.h"
-#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
+#include "drawutils.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct TestSourceContext {
const AVClass *class;
- int h, w;
+ int w, h;
unsigned int nb_frame;
- AVRational time_base;
- int64_t pts, max_pts;
- char *size; ///< video frame size
- char *rate; ///< video frame rate
- char *duration; ///< total duration of the generated video
+ AVRational time_base, frame_rate;
+ int64_t pts;
+ int64_t duration; ///< duration expressed in microseconds
AVRational sar; ///< sample aspect ratio
+ int draw_once; ///< draw only the first frame, always put out the same picture
+ int draw_once_reset; ///< draw only the first frame or in case of reset
+ AVFrame *picref; ///< cached reference containing the painted picture
void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame);
+ /* only used by testsrc */
+ int nb_decimals;
+
+ /* only used by color */
+ FFDrawContext draw;
+ FFDrawColor color;
+ uint8_t color_rgba[4];
+
/* only used by rgbtest */
- int rgba_map[4];
+ uint8_t rgba_map[4];
+
+ /* only used by haldclut */
+ int level;
} TestSourceContext;
#define OFFSET(x) offsetof(TestSourceContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-static const AVOption testsrc_options[] = {
- { "size", "set video size", OFFSET(size), AV_OPT_TYPE_STRING, {.str = "320x240"}, .flags = FLAGS },
- { "s", "set video size", OFFSET(size), AV_OPT_TYPE_STRING, {.str = "320x240"}, .flags = FLAGS },
- { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, .flags = FLAGS },
- { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_STRING, {.str = "25"}, .flags = FLAGS },
- { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
- { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS },
- { NULL },
-};
+#define SIZE_OPTIONS \
+ { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
+ { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS },\
-static av_cold int init_common(AVFilterContext *ctx)
-{
- TestSourceContext *test = ctx->priv;
- AVRational frame_rate_q;
- int64_t duration = -1;
- int ret = 0;
+#define COMMON_OPTIONS_NOSIZE \
+ { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },\
+ { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },\
+ { "duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
+ { "d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },\
+ { "sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl= 1}, 0, INT_MAX, FLAGS },
- if ((ret = av_parse_video_size(&test->w, &test->h, test->size)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'\n", test->size);
- return ret;
- }
+#define COMMON_OPTIONS SIZE_OPTIONS COMMON_OPTIONS_NOSIZE
- if ((ret = av_parse_video_rate(&frame_rate_q, test->rate)) < 0 ||
- frame_rate_q.den <= 0 || frame_rate_q.num <= 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", test->rate);
- return ret;
- }
+static const AVOption options[] = {
+ COMMON_OPTIONS
+ { NULL }
+};
- if ((test->duration) && (ret = av_parse_time(&duration, test->duration, 1)) < 0) {
- av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", test->duration);
- return ret;
- }
+static av_cold int init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
- test->time_base.num = frame_rate_q.den;
- test->time_base.den = frame_rate_q.num;
- test->max_pts = duration >= 0 ?
- av_rescale_q(duration, AV_TIME_BASE_Q, test->time_base) : -1;
+ test->time_base = av_inv_q(test->frame_rate);
test->nb_frame = 0;
test->pts = 0;
- av_log(ctx, AV_LOG_DEBUG, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n",
- test->w, test->h, frame_rate_q.num, frame_rate_q.den,
- duration < 0 ? -1 : test->max_pts * av_q2d(test->time_base),
+ av_log(ctx, AV_LOG_VERBOSE, "size:%dx%d rate:%d/%d duration:%f sar:%d/%d\n",
+ test->w, test->h, test->frame_rate.num, test->frame_rate.den,
+ test->duration < 0 ? -1 : (double)test->duration/1000000,
test->sar.num, test->sar.den);
return 0;
}
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ av_frame_free(&test->picref);
+}
+
static int config_props(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
@@ -116,7 +126,8 @@ static int config_props(AVFilterLink *outlink)
outlink->w = test->w;
outlink->h = test->h;
outlink->sample_aspect_ratio = test->sar;
- outlink->time_base = test->time_base;
+ outlink->frame_rate = test->frame_rate;
+ outlink->time_base = test->time_base;
return 0;
}
@@ -126,36 +137,320 @@ static int request_frame(AVFilterLink *outlink)
TestSourceContext *test = outlink->src->priv;
AVFrame *frame;
- if (test->max_pts >= 0 && test->pts > test->max_pts)
+ if (test->duration >= 0 &&
+ av_rescale_q(test->pts, test->time_base, AV_TIME_BASE_Q) >= test->duration)
return AVERROR_EOF;
- frame = ff_get_video_buffer(outlink, test->w, test->h);
+
+ if (test->draw_once) {
+ if (test->draw_once_reset) {
+ av_frame_free(&test->picref);
+ test->draw_once_reset = 0;
+ }
+ if (!test->picref) {
+ test->picref =
+ ff_get_video_buffer(outlink, test->w, test->h);
+ if (!test->picref)
+ return AVERROR(ENOMEM);
+ test->fill_picture_fn(outlink->src, test->picref);
+ }
+ frame = av_frame_clone(test->picref);
+ } else
+ frame = ff_get_video_buffer(outlink, test->w, test->h);
+
if (!frame)
return AVERROR(ENOMEM);
-
- frame->pts = test->pts++;
+ frame->pts = test->pts;
frame->key_frame = 1;
frame->interlaced_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_I;
frame->sample_aspect_ratio = test->sar;
+ if (!test->draw_once)
+ test->fill_picture_fn(outlink->src, frame);
+
+ test->pts++;
test->nb_frame++;
- test->fill_picture_fn(outlink->src, frame);
return ff_filter_frame(outlink, frame);
}
-#if CONFIG_TESTSRC_FILTER
+#if CONFIG_COLOR_FILTER
+
+static const AVOption color_options[] = {
+ { "color", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "c", "set color", OFFSET(color_rgba), AV_OPT_TYPE_COLOR, {.str = "black"}, CHAR_MIN, CHAR_MAX, FLAGS },
+ COMMON_OPTIONS
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(color);
+
+static void color_fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ TestSourceContext *test = ctx->priv;
+ ff_fill_rectangle(&test->draw, &test->color,
+ picref->data, picref->linesize,
+ 0, 0, test->w, test->h);
+}
+
+static av_cold int color_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+ test->fill_picture_fn = color_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+static int color_query_formats(AVFilterContext *ctx)
+{
+ ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
+ return 0;
+}
+
+static int color_config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->src;
+ TestSourceContext *test = ctx->priv;
+ int ret;
+
+ ff_draw_init(&test->draw, inlink->format, 0);
+ ff_draw_color(&test->draw, &test->color, test->color_rgba);
+
+ test->w = ff_draw_round_to_sub(&test->draw, 0, -1, test->w);
+ test->h = ff_draw_round_to_sub(&test->draw, 1, -1, test->h);
+ if (av_image_check_size(test->w, test->h, 0, ctx) < 0)
+ return AVERROR(EINVAL);
+
+ if ((ret = config_props(inlink)) < 0)
+ return ret;
+
+ return 0;
+}
+
+static int color_process_command(AVFilterContext *ctx, const char *cmd, const char *args,
+ char *res, int res_len, int flags)
+{
+ TestSourceContext *test = ctx->priv;
+ int ret;
+
+ if (!strcmp(cmd, "color") || !strcmp(cmd, "c")) {
+ uint8_t color_rgba[4];
+
+ ret = av_parse_color(color_rgba, args, -1, ctx);
+ if (ret < 0)
+ return ret;
+
+ memcpy(test->color_rgba, color_rgba, sizeof(color_rgba));
+ ff_draw_color(&test->draw, &test->color, test->color_rgba);
+ test->draw_once_reset = 1;
+ return 0;
+ }
+
+ return AVERROR(ENOSYS);
+}
+
+static const AVFilterPad color_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = color_config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_color = {
+ .name = "color",
+ .description = NULL_IF_CONFIG_SMALL("Provide an uniformly colored input."),
+ .priv_class = &color_class,
+ .priv_size = sizeof(TestSourceContext),
+ .init = color_init,
+ .uninit = uninit,
+ .query_formats = color_query_formats,
+ .inputs = NULL,
+ .outputs = color_outputs,
+ .process_command = color_process_command,
+};
+
+#endif /* CONFIG_COLOR_FILTER */
+
+#if CONFIG_HALDCLUTSRC_FILTER
+
+static const AVOption haldclutsrc_options[] = {
+ { "level", "set level", OFFSET(level), AV_OPT_TYPE_INT, {.i64 = 6}, 2, 8, FLAGS },
+ COMMON_OPTIONS_NOSIZE
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(haldclutsrc);
+
+static void haldclutsrc_fill_picture(AVFilterContext *ctx, AVFrame *frame)
+{
+ int i, j, k, x = 0, y = 0, is16bit = 0, step;
+ uint32_t alpha = 0;
+ const TestSourceContext *hc = ctx->priv;
+ int level = hc->level;
+ float scale;
+ const int w = frame->width;
+ const int h = frame->height;
+ const uint8_t *data = frame->data[0];
+ const int linesize = frame->linesize[0];
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ uint8_t rgba_map[4];
+
+ av_assert0(w == h && w == level*level*level);
+
+ ff_fill_rgba_map(rgba_map, frame->format);
+
+ switch (frame->format) {
+ case AV_PIX_FMT_RGB48:
+ case AV_PIX_FMT_BGR48:
+ case AV_PIX_FMT_RGBA64:
+ case AV_PIX_FMT_BGRA64:
+ is16bit = 1;
+ alpha = 0xffff;
+ break;
+ case AV_PIX_FMT_RGBA:
+ case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_ARGB:
+ case AV_PIX_FMT_ABGR:
+ alpha = 0xff;
+ break;
+ }
+
+ step = av_get_padded_bits_per_pixel(desc) >> (3 + is16bit);
+ scale = ((float)(1 << (8*(is16bit+1))) - 1) / (level*level - 1);
+
+#define LOAD_CLUT(nbits) do { \
+ uint##nbits##_t *dst = ((uint##nbits##_t *)(data + y*linesize)) + x*step; \
+ dst[rgba_map[0]] = av_clip_uint##nbits(i * scale); \
+ dst[rgba_map[1]] = av_clip_uint##nbits(j * scale); \
+ dst[rgba_map[2]] = av_clip_uint##nbits(k * scale); \
+ if (step == 4) \
+ dst[rgba_map[3]] = alpha; \
+} while (0)
+
+ level *= level;
+ for (k = 0; k < level; k++) {
+ for (j = 0; j < level; j++) {
+ for (i = 0; i < level; i++) {
+ if (!is16bit)
+ LOAD_CLUT(8);
+ else
+ LOAD_CLUT(16);
+ if (++x == w) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }
+}
+
+static av_cold int haldclutsrc_init(AVFilterContext *ctx)
+{
+ TestSourceContext *hc = ctx->priv;
+ hc->fill_picture_fn = haldclutsrc_fill_picture;
+ hc->draw_once = 1;
+ return init(ctx);
+}
+
+static int haldclutsrc_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR,
+ AV_PIX_FMT_0RGB, AV_PIX_FMT_0BGR,
+ AV_PIX_FMT_RGB0, AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_RGB48, AV_PIX_FMT_BGR48,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_BGRA64,
+ AV_PIX_FMT_NONE,
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static int haldclutsrc_config_props(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ TestSourceContext *hc = ctx->priv;
+
+ hc->w = hc->h = hc->level * hc->level * hc->level;
+ return config_props(outlink);
+}
+
+static const AVFilterPad haldclutsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = haldclutsrc_config_props,
+ },
+ { NULL }
+};
+
+AVFilter ff_vsrc_haldclutsrc = {
+ .name = "haldclutsrc",
+ .description = NULL_IF_CONFIG_SMALL("Provide an identity Hald CLUT."),
+ .priv_class = &haldclutsrc_class,
+ .priv_size = sizeof(TestSourceContext),
+ .init = haldclutsrc_init,
+ .uninit = uninit,
+ .query_formats = haldclutsrc_query_formats,
+ .inputs = NULL,
+ .outputs = haldclutsrc_outputs,
+};
+#endif /* CONFIG_HALDCLUTSRC_FILTER */
+
+#if CONFIG_NULLSRC_FILTER
+
+#define nullsrc_options options
+AVFILTER_DEFINE_CLASS(nullsrc);
+
+static void nullsrc_fill_picture(AVFilterContext *ctx, AVFrame *picref) { }
-static const char *testsrc_get_name(void *ctx)
+static av_cold int nullsrc_init(AVFilterContext *ctx)
{
- return "testsrc";
+ TestSourceContext *test = ctx->priv;
+
+ test->fill_picture_fn = nullsrc_fill_picture;
+ return init(ctx);
}
-static const AVClass testsrc_class = {
- .class_name = "TestSourceContext",
- .item_name = testsrc_get_name,
- .option = testsrc_options,
+static const AVFilterPad nullsrc_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL },
+};
+
+AVFilter ff_vsrc_nullsrc = {
+ .name = "nullsrc",
+ .description = NULL_IF_CONFIG_SMALL("Null video source, return unprocessed video frames."),
+ .init = nullsrc_init,
+ .uninit = uninit,
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &nullsrc_class,
+ .inputs = NULL,
+ .outputs = nullsrc_outputs,
+};
+
+#endif /* CONFIG_NULLSRC_FILTER */
+
+#if CONFIG_TESTSRC_FILTER
+
+static const AVOption testsrc_options[] = {
+ COMMON_OPTIONS
+ { "decimals", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
+ { "n", "set number of decimals to show", OFFSET(nb_decimals), AV_OPT_TYPE_INT, {.i64=0}, 0, 17, FLAGS },
+ { NULL }
};
+AVFILTER_DEFINE_CLASS(testsrc);
+
/**
* Fill a rectangle with value val.
*
@@ -168,8 +463,8 @@ static const AVClass testsrc_class = {
* @param w width of the rectangle to draw, expressed as a number of segment_width units
* @param h height of the rectangle to draw, expressed as a number of segment_width units
*/
-static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigned segment_width,
- unsigned x, unsigned y, unsigned w, unsigned h)
+static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, int segment_width,
+ int x, int y, int w, int h)
{
int i;
int step = 3;
@@ -183,8 +478,8 @@ static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigne
}
}
-static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize,
- unsigned segment_width)
+static void draw_digit(int digit, uint8_t *dst, int dst_linesize,
+ int segment_width)
{
#define TOP_HBAR 1
#define MID_HBAR 2
@@ -278,7 +573,7 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
}
/* draw sliding color line */
- p = data + frame->linesize[0] * height * 3/4;
+ p0 = p = data + frame->linesize[0] * (height * 3/4);
grad = (256 * test->nb_frame * test->time_base.num / test->time_base.den) %
GRADIENT_SIZE;
rgrad = 0;
@@ -306,15 +601,25 @@ static void test_fill_picture(AVFilterContext *ctx, AVFrame *frame)
if (grad >= GRADIENT_SIZE)
grad -= GRADIENT_SIZE;
}
+ p = p0;
for (y = height / 8; y > 0; y--) {
- memcpy(p, p - frame->linesize[0], 3 * width);
+ memcpy(p+frame->linesize[0], p, 3 * width);
p += frame->linesize[0];
}
/* draw digits */
seg_size = width / 80;
if (seg_size >= 1 && height >= 13 * seg_size) {
- second = test->nb_frame * test->time_base.num / test->time_base.den;
+ int64_t p10decimals = 1;
+ double time = av_q2d(test->time_base) * test->nb_frame *
+ pow(10, test->nb_decimals);
+ if (time >= INT_MAX)
+ return;
+
+ for (x = 0; x < test->nb_decimals; x++)
+ p10decimals *= 10;
+
+ second = av_rescale_rnd(test->nb_frame * test->time_base.num, p10decimals, test->time_base.den, AV_ROUND_ZERO);
x = width - (width - seg_size * 64) / 2;
y = (height - seg_size * 13) / 2;
p = data + (x*3 + y * frame->linesize[0]);
@@ -333,7 +638,7 @@ static av_cold int test_init(AVFilterContext *ctx)
TestSourceContext *test = ctx->priv;
test->fill_picture_fn = test_fill_picture;
- return init_common(ctx);
+ return init(ctx);
}
static int test_query_formats(AVFilterContext *ctx)
@@ -361,28 +666,18 @@ AVFilter ff_vsrc_testsrc = {
.priv_size = sizeof(TestSourceContext),
.priv_class = &testsrc_class,
.init = test_init,
-
+ .uninit = uninit,
.query_formats = test_query_formats,
-
- .inputs = NULL,
-
- .outputs = avfilter_vsrc_testsrc_outputs,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_testsrc_outputs,
};
#endif /* CONFIG_TESTSRC_FILTER */
#if CONFIG_RGBTESTSRC_FILTER
-static const char *rgbtestsrc_get_name(void *ctx)
-{
- return "rgbtestsrc";
-}
-
-static const AVClass rgbtestsrc_class = {
- .class_name = "RGBTestSourceContext",
- .item_name = rgbtestsrc_get_name,
- .option = testsrc_options,
-};
+#define rgbtestsrc_options options
+AVFILTER_DEFINE_CLASS(rgbtestsrc);
#define R 0
#define G 1
@@ -391,7 +686,7 @@ static const AVClass rgbtestsrc_class = {
static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize,
int x, int y, int r, int g, int b, enum AVPixelFormat fmt,
- int rgba_map[4])
+ uint8_t rgba_map[4])
{
int32_t v;
uint8_t *p;
@@ -413,7 +708,7 @@ static void rgbtest_put_pixel(uint8_t *dst, int dst_linesize,
case AV_PIX_FMT_BGRA:
case AV_PIX_FMT_ARGB:
case AV_PIX_FMT_ABGR:
- v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8));
+ v = (r << (rgba_map[R]*8)) + (g << (rgba_map[G]*8)) + (b << (rgba_map[B]*8)) + (255 << (rgba_map[A]*8));
p = dst + 4*x + y*dst_linesize;
AV_WL32(p, v);
break;
@@ -444,8 +739,9 @@ static av_cold int rgbtest_init(AVFilterContext *ctx)
{
TestSourceContext *test = ctx->priv;
+ test->draw_once = 1;
test->fill_picture_fn = rgbtest_fill_picture;
- return init_common(ctx);
+ return init(ctx);
}
static int rgbtest_query_formats(AVFilterContext *ctx)
@@ -466,15 +762,7 @@ static int rgbtest_config_props(AVFilterLink *outlink)
{
TestSourceContext *test = outlink->src->priv;
- switch (outlink->format) {
- case AV_PIX_FMT_ARGB: test->rgba_map[A] = 0; test->rgba_map[R] = 1; test->rgba_map[G] = 2; test->rgba_map[B] = 3; break;
- case AV_PIX_FMT_ABGR: test->rgba_map[A] = 0; test->rgba_map[B] = 1; test->rgba_map[G] = 2; test->rgba_map[R] = 3; break;
- case AV_PIX_FMT_RGBA:
- case AV_PIX_FMT_RGB24: test->rgba_map[R] = 0; test->rgba_map[G] = 1; test->rgba_map[B] = 2; test->rgba_map[A] = 3; break;
- case AV_PIX_FMT_BGRA:
- case AV_PIX_FMT_BGR24: test->rgba_map[B] = 0; test->rgba_map[G] = 1; test->rgba_map[R] = 2; test->rgba_map[A] = 3; break;
- }
-
+ ff_fill_rgba_map(test->rgba_map, outlink->format);
return config_props(outlink);
}
@@ -494,12 +782,290 @@ AVFilter ff_vsrc_rgbtestsrc = {
.priv_size = sizeof(TestSourceContext),
.priv_class = &rgbtestsrc_class,
.init = rgbtest_init,
-
+ .uninit = uninit,
.query_formats = rgbtest_query_formats,
+ .inputs = NULL,
+ .outputs = avfilter_vsrc_rgbtestsrc_outputs,
+};
- .inputs = NULL,
+#endif /* CONFIG_RGBTESTSRC_FILTER */
+
+#if CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER
- .outputs = avfilter_vsrc_rgbtestsrc_outputs,
+static const uint8_t rainbow[7][4] = {
+ { 180, 128, 128, 255 }, /* gray */
+ { 168, 44, 136, 255 }, /* yellow */
+ { 145, 147, 44, 255 }, /* cyan */
+ { 133, 63, 52, 255 }, /* green */
+ { 63, 193, 204, 255 }, /* magenta */
+ { 51, 109, 212, 255 }, /* red */
+ { 28, 212, 120, 255 }, /* blue */
};
-#endif /* CONFIG_RGBTESTSRC_FILTER */
+static const uint8_t wobnair[7][4] = {
+ { 32, 240, 118, 255 }, /* blue */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 54, 184, 198, 255 }, /* magenta */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 188, 154, 16, 255 }, /* cyan */
+ { 19, 128, 128, 255 }, /* 7.5% intensity black */
+ { 191, 128, 128, 255 }, /* gray */
+};
+
+static const uint8_t white[4] = { 235, 128, 128, 255 };
+static const uint8_t black[4] = { 19, 128, 128, 255 }; /* 7.5% intensity black */
+
+/* pluge pulses */
+static const uint8_t neg4ire[4] = { 9, 128, 128, 255 }; /* 3.5% intensity black */
+static const uint8_t pos4ire[4] = { 29, 128, 128, 255 }; /* 11.5% intensity black */
+
+/* fudged Q/-I */
+static const uint8_t i_pixel[4] = { 61, 153, 99, 255 };
+static const uint8_t q_pixel[4] = { 35, 174, 152, 255 };
+
+static const uint8_t gray40[4] = { 104, 128, 128, 255 };
+static const uint8_t gray15[4] = { 49, 128, 128, 255 };
+static const uint8_t cyan[4] = { 188, 154, 16, 255 };
+static const uint8_t yellow[4] = { 219, 16, 138, 255 };
+static const uint8_t blue[4] = { 32, 240, 118, 255 };
+static const uint8_t red[4] = { 63, 102, 240, 255 };
+static const uint8_t black0[4] = { 16, 128, 128, 255 };
+static const uint8_t black2[4] = { 20, 128, 128, 255 };
+static const uint8_t black4[4] = { 25, 128, 128, 255 };
+static const uint8_t neg2[4] = { 12, 128, 128, 255 };
+
+static void draw_bar(TestSourceContext *test, const uint8_t color[4],
+ unsigned x, unsigned y, unsigned w, unsigned h,
+ AVFrame *frame)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
+ uint8_t *p, *p0;
+ int plane;
+
+ x = FFMIN(x, test->w - 1);
+ y = FFMIN(y, test->h - 1);
+ w = FFMIN(w, test->w - x);
+ h = FFMIN(h, test->h - y);
+
+ av_assert0(x + w <= test->w);
+ av_assert0(y + h <= test->h);
+
+ for (plane = 0; frame->data[plane]; plane++) {
+ const int c = color[plane];
+ const int linesize = frame->linesize[plane];
+ int i, px, py, pw, ph;
+
+ if (plane == 1 || plane == 2) {
+ px = x >> desc->log2_chroma_w;
+ pw = w >> desc->log2_chroma_w;
+ py = y >> desc->log2_chroma_h;
+ ph = h >> desc->log2_chroma_h;
+ } else {
+ px = x;
+ pw = w;
+ py = y;
+ ph = h;
+ }
+
+ p0 = p = frame->data[plane] + py * linesize + px;
+ memset(p, c, pw);
+ p += linesize;
+ for (i = 1; i < ph; i++, p += linesize)
+ memcpy(p, p0, pw);
+ }
+}
+
+static int smptebars_query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE,
+ };
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+ return 0;
+}
+
+static const AVFilterPad smptebars_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .request_frame = request_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+#if CONFIG_SMPTEBARS_FILTER
+
+#define smptebars_options options
+AVFILTER_DEFINE_CLASS(smptebars);
+
+static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ TestSourceContext *test = ctx->priv;
+ int r_w, r_h, w_h, p_w, p_h, i, tmp, x = 0;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
+
+ av_frame_set_colorspace(picref, AVCOL_SPC_BT470BG);
+
+ r_w = FFALIGN((test->w + 6) / 7, 1 << pixdesc->log2_chroma_w);
+ r_h = FFALIGN(test->h * 2 / 3, 1 << pixdesc->log2_chroma_h);
+ w_h = FFALIGN(test->h * 3 / 4 - r_h, 1 << pixdesc->log2_chroma_h);
+ p_w = FFALIGN(r_w * 5 / 4, 1 << pixdesc->log2_chroma_w);
+ p_h = test->h - w_h - r_h;
+
+ for (i = 0; i < 7; i++) {
+ draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
+ draw_bar(test, wobnair[i], x, r_h, r_w, w_h, picref);
+ x += r_w;
+ }
+ x = 0;
+ draw_bar(test, i_pixel, x, r_h + w_h, p_w, p_h, picref);
+ x += p_w;
+ draw_bar(test, white, x, r_h + w_h, p_w, p_h, picref);
+ x += p_w;
+ draw_bar(test, q_pixel, x, r_h + w_h, p_w, p_h, picref);
+ x += p_w;
+ tmp = FFALIGN(5 * r_w - x, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, neg4ire, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, black, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, pos4ire, x, r_h + w_h, tmp, p_h, picref);
+ x += tmp;
+ draw_bar(test, black, x, r_h + w_h, test->w - x, p_h, picref);
+}
+
+static av_cold int smptebars_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->fill_picture_fn = smptebars_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+AVFilter ff_vsrc_smptebars = {
+ .name = "smptebars",
+ .description = NULL_IF_CONFIG_SMALL("Generate SMPTE color bars."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &smptebars_class,
+ .init = smptebars_init,
+ .uninit = uninit,
+ .query_formats = smptebars_query_formats,
+ .inputs = NULL,
+ .outputs = smptebars_outputs,
+};
+
+#endif /* CONFIG_SMPTEBARS_FILTER */
+
+#if CONFIG_SMPTEHDBARS_FILTER
+
+#define smptehdbars_options options
+AVFILTER_DEFINE_CLASS(smptehdbars);
+
+static void smptehdbars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
+{
+ TestSourceContext *test = ctx->priv;
+ int d_w, r_w, r_h, l_w, i, tmp, x = 0, y = 0;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
+
+ av_frame_set_colorspace(picref, AVCOL_SPC_BT709);
+
+ d_w = FFALIGN(test->w / 8, 1 << pixdesc->log2_chroma_w);
+ r_h = FFALIGN(test->h * 7 / 12, 1 << pixdesc->log2_chroma_h);
+ draw_bar(test, gray40, x, 0, d_w, r_h, picref);
+ x += d_w;
+
+ r_w = FFALIGN((((test->w + 3) / 4) * 3) / 7, 1 << pixdesc->log2_chroma_w);
+ for (i = 0; i < 7; i++) {
+ draw_bar(test, rainbow[i], x, 0, r_w, r_h, picref);
+ x += r_w;
+ }
+ draw_bar(test, gray40, x, 0, test->w - x, r_h, picref);
+ y = r_h;
+ r_h = FFALIGN(test->h / 12, 1 << pixdesc->log2_chroma_h);
+ draw_bar(test, cyan, 0, y, d_w, r_h, picref);
+ x = d_w;
+ draw_bar(test, i_pixel, x, y, r_w, r_h, picref);
+ x += r_w;
+ tmp = r_w * 6;
+ draw_bar(test, rainbow[0], x, y, tmp, r_h, picref);
+ x += tmp;
+ l_w = x;
+ draw_bar(test, blue, x, y, test->w - x, r_h, picref);
+ y += r_h;
+ draw_bar(test, yellow, 0, y, d_w, r_h, picref);
+ x = d_w;
+ draw_bar(test, q_pixel, x, y, r_w, r_h, picref);
+ x += r_w;
+
+ for (i = 0; i < tmp; i += 1 << pixdesc->log2_chroma_w) {
+ uint8_t yramp[4] = {0};
+
+ yramp[0] = i * 255 / tmp;
+ yramp[1] = 128;
+ yramp[2] = 128;
+ yramp[3] = 255;
+
+ draw_bar(test, yramp, x, y, 1 << pixdesc->log2_chroma_w, r_h, picref);
+ x += 1 << pixdesc->log2_chroma_w;
+ }
+ draw_bar(test, red, x, y, test->w - x, r_h, picref);
+ y += r_h;
+ draw_bar(test, gray15, 0, y, d_w, test->h - y, picref);
+ x = d_w;
+ tmp = FFALIGN(r_w * 3 / 2, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w * 2, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, white, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w * 5 / 6, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ tmp = FFALIGN(r_w / 3, 1 << pixdesc->log2_chroma_w);
+ draw_bar(test, neg2, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black2, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black0, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ draw_bar(test, black4, x, y, tmp, test->h - y, picref);
+ x += tmp;
+ r_w = l_w - x;
+ draw_bar(test, black0, x, y, r_w, test->h - y, picref);
+ x += r_w;
+ draw_bar(test, gray15, x, y, test->w - x, test->h - y, picref);
+}
+
+static av_cold int smptehdbars_init(AVFilterContext *ctx)
+{
+ TestSourceContext *test = ctx->priv;
+
+ test->fill_picture_fn = smptehdbars_fill_picture;
+ test->draw_once = 1;
+ return init(ctx);
+}
+
+AVFilter ff_vsrc_smptehdbars = {
+ .name = "smptehdbars",
+ .description = NULL_IF_CONFIG_SMALL("Generate SMPTE HD color bars."),
+ .priv_size = sizeof(TestSourceContext),
+ .priv_class = &smptehdbars_class,
+ .init = smptehdbars_init,
+ .uninit = uninit,
+ .query_formats = smptebars_query_formats,
+ .inputs = NULL,
+ .outputs = smptebars_outputs,
+};
+
+#endif /* CONFIG_SMPTEHDBARS_FILTER */
+#endif /* CONFIG_SMPTEBARS_FILTER || CONFIG_SMPTEHDBARS_FILTER */
diff --git a/libavfilter/x86/Makefile b/libavfilter/x86/Makefile
index 13b5d31..ef2d0e9 100644
--- a/libavfilter/x86/Makefile
+++ b/libavfilter/x86/Makefile
@@ -1,11 +1,17 @@
OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun_init.o
OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d_init.o
+OBJS-$(CONFIG_IDET_FILTER) += x86/vf_idet_init.o
OBJS-$(CONFIG_INTERLACE_FILTER) += x86/vf_interlace_init.o
+OBJS-$(CONFIG_NOISE_FILTER) += x86/vf_noise.o
+OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup_init.o
+OBJS-$(CONFIG_SPP_FILTER) += x86/vf_spp.o
OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume_init.o
OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif_init.o
YASM-OBJS-$(CONFIG_GRADFUN_FILTER) += x86/vf_gradfun.o
YASM-OBJS-$(CONFIG_HQDN3D_FILTER) += x86/vf_hqdn3d.o
+YASM-OBJS-$(CONFIG_IDET_FILTER) += x86/vf_idet.o
YASM-OBJS-$(CONFIG_INTERLACE_FILTER) += x86/vf_interlace.o
+YASM-OBJS-$(CONFIG_PULLUP_FILTER) += x86/vf_pullup.o
YASM-OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume.o
-YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o
+YASM-OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif.o x86/yadif-16.o x86/yadif-10.o
diff --git a/libavfilter/x86/af_volume.asm b/libavfilter/x86/af_volume.asm
index 4e5ad22..f4cbcbc 100644
--- a/libavfilter/x86/af_volume.asm
+++ b/libavfilter/x86/af_volume.asm
@@ -2,20 +2,20 @@
;* x86-optimized functions for volume filter
;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -99,9 +99,11 @@ cglobal scale_samples_s32, 4,4,4, dst, src, len, volume
INIT_XMM sse2
%define CVTDQ2PD cvtdq2pd
SCALE_SAMPLES_S32
+%if HAVE_AVX_EXTERNAL
%define CVTDQ2PD vcvtdq2pd
INIT_YMM avx
SCALE_SAMPLES_S32
+%endif
%undef CVTDQ2PD
; NOTE: This is not bit-identical with the C version because it clips to
diff --git a/libavfilter/x86/af_volume_init.c b/libavfilter/x86/af_volume_init.c
index c59e0ed..57c7eab 100644
--- a/libavfilter/x86/af_volume_init.c
+++ b/libavfilter/x86/af_volume_init.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavfilter/x86/vf_gradfun.asm b/libavfilter/x86/vf_gradfun.asm
index 00fcb16..3581f89 100644
--- a/libavfilter/x86/vf_gradfun.asm
+++ b/libavfilter/x86/vf_gradfun.asm
@@ -1,20 +1,20 @@
;******************************************************************************
;* x86-optimized functions for gradfun filter
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
diff --git a/libavfilter/x86/vf_gradfun_init.c b/libavfilter/x86/vf_gradfun_init.c
index 3f23bf6..c638a05 100644
--- a/libavfilter/x86/vf_gradfun_init.c
+++ b/libavfilter/x86/vf_gradfun_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2009 Loren Merritt <lorenm@u.washington.edu>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,29 +26,29 @@
#include "libavutil/x86/cpu.h"
#include "libavfilter/gradfun.h"
-void ff_gradfun_filter_line_mmxext(intptr_t x, uint8_t *dst, uint8_t *src,
- uint16_t *dc, int thresh,
+void ff_gradfun_filter_line_mmxext(intptr_t x, uint8_t *dst, const uint8_t *src,
+ const uint16_t *dc, int thresh,
const uint16_t *dithers);
-
-void ff_gradfun_filter_line_ssse3(intptr_t x, uint8_t *dst, uint8_t *src,
- uint16_t *dc, int thresh,
+void ff_gradfun_filter_line_ssse3(intptr_t x, uint8_t *dst, const uint8_t *src,
+ const uint16_t *dc, int thresh,
const uint16_t *dithers);
void ff_gradfun_blur_line_movdqa_sse2(intptr_t x, uint16_t *buf,
- uint16_t *buf1, uint16_t *dc,
- uint8_t *src1, uint8_t *src2);
+ const uint16_t *buf1, uint16_t *dc,
+ const uint8_t *src1, const uint8_t *src2);
void ff_gradfun_blur_line_movdqu_sse2(intptr_t x, uint16_t *buf,
- uint16_t *buf1, uint16_t *dc,
- uint8_t *src1, uint8_t *src2);
+ const uint16_t *buf1, uint16_t *dc,
+ const uint8_t *src1, const uint8_t *src2);
#if HAVE_YASM
-static void gradfun_filter_line(uint8_t *dst, uint8_t *src, uint16_t *dc,
- int width, int thresh, const uint16_t *dithers,
- int alignment)
+static void gradfun_filter_line_mmxext(uint8_t *dst, const uint8_t *src,
+ const uint16_t *dc,
+ int width, int thresh,
+ const uint16_t *dithers)
{
intptr_t x;
- if (width & alignment) {
- x = width & ~alignment;
+ if (width & 3) {
+ x = width & ~3;
ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2,
width - x, thresh, dithers);
width = x;
@@ -58,22 +58,25 @@ static void gradfun_filter_line(uint8_t *dst, uint8_t *src, uint16_t *dc,
thresh, dithers);
}
-static void gradfun_filter_line_mmxext(uint8_t *dst, uint8_t *src, uint16_t *dc,
- int width, int thresh,
- const uint16_t *dithers)
-{
- gradfun_filter_line(dst, src, dc, width, thresh, dithers, 3);
-}
-
-static void gradfun_filter_line_ssse3(uint8_t *dst, uint8_t *src, uint16_t *dc,
+static void gradfun_filter_line_ssse3(uint8_t *dst, const uint8_t *src, const uint16_t *dc,
int width, int thresh,
const uint16_t *dithers)
{
- gradfun_filter_line(dst, src, dc, width, thresh, dithers, 7);
+ intptr_t x;
+ if (width & 7) {
+ // could be 10% faster if I somehow eliminated this
+ x = width & ~7;
+ ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2,
+ width - x, thresh, dithers);
+ width = x;
+ }
+ x = -width;
+ ff_gradfun_filter_line_ssse3(x, dst + width, src + width, dc + width / 2,
+ thresh, dithers);
}
-static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, uint16_t *buf1,
- uint8_t *src, int src_linesize, int width)
+static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, const uint16_t *buf1,
+ const uint8_t *src, int src_linesize, int width)
{
intptr_t x = -2 * width;
if (((intptr_t) src | src_linesize) & 15)
diff --git a/libavfilter/x86/vf_hqdn3d.asm b/libavfilter/x86/vf_hqdn3d.asm
index 02632a1..961127e 100644
--- a/libavfilter/x86/vf_hqdn3d.asm
+++ b/libavfilter/x86/vf_hqdn3d.asm
@@ -1,20 +1,20 @@
;******************************************************************************
;* Copyright (c) 2012 Loren Merritt
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
diff --git a/libavfilter/x86/vf_hqdn3d_init.c b/libavfilter/x86/vf_hqdn3d_init.c
index 06f9e00..b63916b 100644
--- a/libavfilter/x86/vf_hqdn3d_init.c
+++ b/libavfilter/x86/vf_hqdn3d_init.c
@@ -1,18 +1,20 @@
/*
- * This file is part of Libav.
+ * Copyright (c) 2012 Loren Merritt
*
- * Libav is free software; you can redistribute it and/or modify
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
diff --git a/libavfilter/x86/vf_idet.asm b/libavfilter/x86/vf_idet.asm
new file mode 100644
index 0000000..007e63d
--- /dev/null
+++ b/libavfilter/x86/vf_idet.asm
@@ -0,0 +1,170 @@
+;*****************************************************************************
+;* x86-optimized functions for idet filter
+;*
+;* Copyright (C) 2014 Pascal Massimino (pascal.massimino@gmail.com)
+;* Copyright (c) 2014 Neil Birkbeck (birkbeck@google.com)
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_TEXT
+
+; Implementation that does 8-bytes at a time using single-word operations.
+%macro IDET_FILTER_LINE 1
+INIT_MMX %1
+cglobal idet_filter_line, 4, 5, 0, a, b, c, width, index
+ xor indexq, indexq
+%define m_zero m2
+%define m_sum m5
+ pxor m_sum, m_sum
+ pxor m_zero, m_zero
+
+.loop:
+ movu m0, [aq + indexq*1]
+ punpckhbw m1, m0, m_zero
+ punpcklbw m0, m_zero
+
+ movu m3, [cq + indexq*1]
+ punpckhbw m4, m3, m_zero
+ punpcklbw m3, m_zero
+
+ paddsw m1, m4
+ paddsw m0, m3
+
+ movu m3, [bq + indexq*1]
+ punpckhbw m4, m3, m_zero
+ punpcklbw m3, m_zero
+
+ paddw m4, m4
+ paddw m3, m3
+ psubsw m1, m4
+ psubsw m0, m3
+
+ ABS2 m1, m0, m4, m3
+
+ paddw m0, m1
+ punpckhwd m1, m0, m_zero
+ punpcklwd m0, m_zero
+
+ paddd m0, m1
+ paddd m_sum, m0
+
+ add indexq, 0x8
+ CMP widthd, indexd
+ jg .loop
+
+ HADDD m_sum, m0
+ movd eax, m_sum
+ RET
+%endmacro
+
+%if ARCH_X86_32
+IDET_FILTER_LINE mmxext
+IDET_FILTER_LINE mmx
+%endif
+
+;******************************************************************************
+; 16bit implementation that does 4/8-pixels at a time
+
+%macro PABS_DIFF_WD 3 ; a, b, junk , output=a
+ psubusw %3, %2, %1
+ psubusw %1, %2
+ por %1, %3
+
+ mova %2, %1
+ punpcklwd %1, m_zero
+ punpckhwd %2, m_zero
+ paddd %1, %2
+%endmacro
+
+%macro IDET_FILTER_LINE_16BIT 1 ; %1=increment (4 or 8 words)
+cglobal idet_filter_line_16bit, 4, 5, 8, a, b, c, width, index
+ xor indexq, indexq
+%define m_zero m1
+%define m_sum m0
+ pxor m_sum, m_sum
+ pxor m_zero, m_zero
+
+.loop_16bit:
+ movu m2, [bq + indexq * 2] ; B
+ movu m3, [aq + indexq * 2] ; A
+ mova m6, m2
+ psubusw m5, m2, m3 ; ba
+
+ movu m4, [cq + indexq * 2] ; C
+ add indexq, %1
+ psubusw m3, m2 ; ab
+ CMP indexd, widthd
+
+ psubusw m6, m4 ; bc
+ psubusw m4, m2 ; cb
+
+ PABS_DIFF_WD m3, m6, m7 ; |ab - bc|
+ PABS_DIFF_WD m5, m4, m7 ; |ba - cb|
+ paddd m_sum, m3
+ paddd m_sum, m5
+ jl .loop_16bit
+
+ HADDD m_sum, m2
+ movd eax, m_sum
+ RET
+%endmacro
+
+INIT_XMM sse2
+IDET_FILTER_LINE_16BIT 8
+%if ARCH_X86_32
+INIT_MMX mmx
+IDET_FILTER_LINE_16BIT 4
+%endif
+
+;******************************************************************************
+; SSE2 8-bit implementation that does 16-bytes at a time:
+
+INIT_XMM sse2
+cglobal idet_filter_line, 4, 6, 7, a, b, c, width, index, total
+ xor indexq, indexq
+ pxor m0, m0
+ pxor m1, m1
+
+.sse2_loop:
+ movu m2, [bq + indexq*1] ; B
+ movu m3, [aq + indexq*1] ; A
+ mova m6, m2
+ mova m4, m3
+ psubusb m5, m2, m3 ; ba
+
+ movu m3, [cq + indexq*1] ; C
+ add indexq, 0x10
+ psubusb m4, m2 ; ab
+ CMP indexd, widthd
+
+ psubusb m6, m3 ; bc
+ psubusb m3, m2 ; cb
+
+ psadbw m4, m6 ; |ab - bc|
+ paddq m0, m4
+ psadbw m5, m3 ; |ba - cb|
+ paddq m1, m5
+ jl .sse2_loop
+
+ paddq m0, m1
+ movhlps m1, m0
+ paddq m0, m1
+ movd eax, m0
+ RET
diff --git a/libavfilter/x86/vf_idet_init.c b/libavfilter/x86/vf_idet_init.c
new file mode 100644
index 0000000..1147ca8
--- /dev/null
+++ b/libavfilter/x86/vf_idet_init.c
@@ -0,0 +1,87 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/vf_idet.h"
+
+#if HAVE_YASM
+
+/* declares main callable idet_filter_line_{mmx,mmxext,sse2}() */
+#define FUNC_MAIN_DECL(KIND, SPAN) \
+int ff_idet_filter_line_##KIND(const uint8_t *a, const uint8_t *b, \
+ const uint8_t *c, int w); \
+static int idet_filter_line_##KIND(const uint8_t *a, const uint8_t *b, \
+ const uint8_t *c, int w) { \
+ int sum = 0; \
+ const int left_over = w & (SPAN - 1); \
+ w -= left_over; \
+ if (w > 0) \
+ sum += ff_idet_filter_line_##KIND(a, b, c, w); \
+ if (left_over > 0) \
+ sum += ff_idet_filter_line_c(a + w, b + w, c + w, left_over); \
+ return sum; \
+}
+
+
+#define FUNC_MAIN_DECL_16bit(KIND, SPAN) \
+int ff_idet_filter_line_16bit_##KIND(const uint16_t *a, const uint16_t *b, \
+ const uint16_t *c, int w); \
+static int idet_filter_line_16bit_##KIND(const uint16_t *a, const uint16_t *b, \
+ const uint16_t *c, int w) { \
+ int sum = 0; \
+ const int left_over = w & (SPAN - 1); \
+ w -= left_over; \
+ if (w > 0) \
+ sum += ff_idet_filter_line_16bit_##KIND(a, b, c, w); \
+ if (left_over > 0) \
+ sum += ff_idet_filter_line_c_16bit(a + w, b + w, c + w, left_over); \
+ return sum; \
+}
+
+FUNC_MAIN_DECL(sse2, 16)
+FUNC_MAIN_DECL_16bit(sse2, 8)
+#if ARCH_X86_32
+FUNC_MAIN_DECL(mmx, 8)
+FUNC_MAIN_DECL(mmxext, 8)
+FUNC_MAIN_DECL_16bit(mmx, 4)
+#endif
+
+#endif
+av_cold void ff_idet_init_x86(IDETContext *idet, int for_16b)
+{
+#if HAVE_YASM
+ const int cpu_flags = av_get_cpu_flags();
+
+#if ARCH_X86_32
+ if (EXTERNAL_MMX(cpu_flags)) {
+ idet->filter_line = for_16b ? (ff_idet_filter_func)idet_filter_line_16bit_mmx : idet_filter_line_mmx;
+ }
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ idet->filter_line = for_16b ? (ff_idet_filter_func)idet_filter_line_16bit_mmx : idet_filter_line_mmxext;
+ }
+#endif // ARCH_x86_32
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ idet->filter_line = for_16b ? (ff_idet_filter_func)idet_filter_line_16bit_sse2 : idet_filter_line_sse2;
+ }
+#endif // HAVE_YASM
+}
diff --git a/libavfilter/x86/vf_interlace.asm b/libavfilter/x86/vf_interlace.asm
index 8c2e9b0..55b430d 100644
--- a/libavfilter/x86/vf_interlace.asm
+++ b/libavfilter/x86/vf_interlace.asm
@@ -3,20 +3,20 @@
;*
;* Copyright (C) 2014 Kieran Kunhya <kierank@obe.tv>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or modify
+;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
-;* with Libav; if not, write to the Free Software Foundation, Inc.,
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
diff --git a/libavfilter/x86/vf_interlace_init.c b/libavfilter/x86/vf_interlace_init.c
index 231ab85..68ee47d 100644
--- a/libavfilter/x86/vf_interlace_init.c
+++ b/libavfilter/x86/vf_interlace_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2014 Kieran Kunhya <kierank@obe.tv>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or modify
+ * FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
- * with Libav; if not, write to the Free Software Foundation, Inc.,
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
diff --git a/libavfilter/x86/vf_noise.c b/libavfilter/x86/vf_noise.c
new file mode 100644
index 0000000..0a86cb0
--- /dev/null
+++ b/libavfilter/x86/vf_noise.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/x86/cpu.h"
+#include "libavutil/x86/asm.h"
+#include "libavfilter/vf_noise.h"
+
+#if HAVE_INLINE_ASM
+static void line_noise_mmx(uint8_t *dst, const uint8_t *src,
+ const int8_t *noise, int len, int shift)
+{
+ x86_reg mmx_len= len & (~7);
+ noise += shift;
+
+ __asm__ volatile(
+ "mov %3, %%"REG_a" \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "packsswb %%mm7, %%mm7 \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "paddsb %%mm1, %%mm0 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"REG_a
+ );
+ if (mmx_len != len)
+ ff_line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
+}
+
+#if HAVE_6REGS
+static void line_noise_avg_mmx(uint8_t *dst, const uint8_t *src,
+ int len, const int8_t * const *shift)
+{
+ x86_reg mmx_len = len & (~7);
+
+ __asm__ volatile(
+ "mov %5, %%"REG_a" \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "paddb (%2, %%"REG_a"), %%mm1 \n\t"
+ "paddb (%3, %%"REG_a"), %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm2, %%mm2 \n\t"
+ "punpcklbw %%mm1, %%mm1 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "pmulhw %%mm0, %%mm1 \n\t"
+ "pmulhw %%mm2, %%mm3 \n\t"
+ "paddw %%mm1, %%mm1 \n\t"
+ "paddw %%mm3, %%mm3 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "psrlw $8, %%mm3 \n\t"
+ "packuswb %%mm3, %%mm1 \n\t"
+ "movq %%mm1, (%4, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len),
+ "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"REG_a
+ );
+
+ if (mmx_len != len){
+ const int8_t *shift2[3] = { shift[0]+mmx_len, shift[1]+mmx_len, shift[2]+mmx_len };
+ ff_line_noise_avg_c(dst+mmx_len, src+mmx_len, len-mmx_len, shift2);
+ }
+}
+#endif /* HAVE_6REGS */
+
+static void line_noise_mmxext(uint8_t *dst, const uint8_t *src,
+ const int8_t *noise, int len, int shift)
+{
+ x86_reg mmx_len = len & (~7);
+ noise += shift;
+
+ __asm__ volatile(
+ "mov %3, %%"REG_a" \n\t"
+ "pcmpeqb %%mm7, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "packsswb %%mm7, %%mm7 \n\t"
+ ".p2align 4 \n\t"
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "paddsb %%mm1, %%mm0 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movntq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
+ : "%"REG_a
+ );
+ if (mmx_len != len)
+ ff_line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
+}
+#endif /* HAVE_INLINE_ASM */
+
+av_cold void ff_noise_init_x86(NoiseContext *n)
+{
+#if HAVE_INLINE_ASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (INLINE_MMX(cpu_flags)) {
+ n->line_noise = line_noise_mmx;
+#if HAVE_6REGS
+ n->line_noise_avg = line_noise_avg_mmx;
+#endif
+ }
+ if (INLINE_MMXEXT(cpu_flags)) {
+ n->line_noise = line_noise_mmxext;
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_pullup.asm b/libavfilter/x86/vf_pullup.asm
new file mode 100644
index 0000000..d3a1955
--- /dev/null
+++ b/libavfilter/x86/vf_pullup.asm
@@ -0,0 +1,178 @@
+;*****************************************************************************
+;* x86-optimized functions for pullup filter
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License along
+;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_TEXT
+
+INIT_MMX mmx
+cglobal pullup_filter_diff, 3, 5, 8, first, second, size
+ mov r3, 4
+ pxor m4, m4
+ pxor m7, m7
+
+.loop:
+ movq m0, [firstq]
+ movq m2, [firstq]
+ add firstq, sizeq
+ movq m1, [secondq]
+ add secondq, sizeq
+ psubusb m2, m1
+ psubusb m1, m0
+ movq m0, m2
+ movq m3, m1
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpckhbw m2, m7
+ punpckhbw m3, m7
+ paddw m4, m0
+ paddw m4, m1
+ paddw m4, m2
+ paddw m4, m3
+
+ dec r3
+ jnz .loop
+
+ movq m3, m4
+ punpcklwd m4, m7
+ punpckhwd m3, m7
+ paddd m3, m4
+ movd eax, m3
+ psrlq m3, 32
+ movd r4d, m3
+ add eax, r4d
+ RET
+
+INIT_MMX mmx
+cglobal pullup_filter_comb, 3, 5, 8, first, second, size
+ mov r3, 4
+ pxor m6, m6
+ pxor m7, m7
+ sub secondq, sizeq
+
+.loop:
+ movq m0, [firstq]
+ movq m1, [secondq]
+ punpcklbw m0, m7
+ movq m2, [secondq+sizeq]
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ movq m0, [firstq]
+ movq m1, [secondq]
+ punpckhbw m0, m7
+ movq m2, [secondq+sizeq]
+ punpckhbw m1, m7
+ punpckhbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ movq m0, [secondq+sizeq]
+ movq m1, [firstq]
+ punpcklbw m0, m7
+ movq m2, [firstq+sizeq]
+ punpcklbw m1, m7
+ punpcklbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ movq m0, [secondq+sizeq]
+ movq m1, [firstq]
+ punpckhbw m0, m7
+ movq m2, [firstq+sizeq]
+ punpckhbw m1, m7
+ punpckhbw m2, m7
+ paddw m0, m0
+ paddw m1, m2
+ movq m2, m0
+ psubusw m0, m1
+ psubusw m1, m2
+ paddw m6, m0
+ paddw m6, m1
+
+ add firstq, sizeq
+ add secondq, sizeq
+ dec r3
+ jnz .loop
+
+ movq m5, m6
+ punpcklwd m6, m7
+ punpckhwd m5, m7
+ paddd m5, m6
+ movd eax, m5
+ psrlq m5, 32
+ movd r4d, m5
+ add eax, r4d
+ RET
+
+INIT_MMX mmx
+cglobal pullup_filter_var, 3, 5, 8, first, second, size
+ mov r3, 3
+ pxor m4, m4
+ pxor m7, m7
+
+.loop:
+ movq m0, [firstq]
+ movq m2, [firstq]
+ movq m1, [firstq+sizeq]
+ add firstq, sizeq
+ psubusb m2, m1
+ psubusb m1, m0
+ movq m0, m2
+ movq m3, m1
+ punpcklbw m0, m7
+ punpcklbw m1, m7
+ punpckhbw m2, m7
+ punpckhbw m3, m7
+ paddw m4, m0
+ paddw m4, m1
+ paddw m4, m2
+ paddw m4, m3
+
+ dec r3
+ jnz .loop
+
+ movq m3, m4
+ punpcklwd m4, m7
+ punpckhwd m3, m7
+ paddd m3, m4
+ movd eax, m3
+ psrlq m3, 32
+ movd r4d, m3
+ add eax, r4d
+ shl eax, 2
+ RET
diff --git a/libavfilter/x86/vf_pullup_init.c b/libavfilter/x86/vf_pullup_init.c
new file mode 100644
index 0000000..5b36b68
--- /dev/null
+++ b/libavfilter/x86/vf_pullup_init.c
@@ -0,0 +1,41 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavutil/x86/cpu.h"
+#include "libavfilter/vf_pullup.h"
+
+int ff_pullup_filter_diff_mmx(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+int ff_pullup_filter_comb_mmx(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+int ff_pullup_filter_var_mmx (const uint8_t *a, const uint8_t *b, ptrdiff_t s);
+
+av_cold void ff_pullup_init_x86(PullupContext *s)
+{
+#if HAVE_YASM
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ s->diff = ff_pullup_filter_diff_mmx;
+ s->comb = ff_pullup_filter_comb_mmx;
+ s->var = ff_pullup_filter_var_mmx;
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_spp.c b/libavfilter/x86/vf_spp.c
new file mode 100644
index 0000000..eb46ddc
--- /dev/null
+++ b/libavfilter/x86/vf_spp.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/mem.h"
+#include "libavutil/x86/asm.h"
+#include "libavfilter/vf_spp.h"
+
+#if HAVE_MMX_INLINE
+static void hardthresh_mmx(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int bias = 0; //FIXME
+ unsigned int threshold1;
+
+ threshold1 = qp * ((1<<4) - bias) - 1;
+
+#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
+ "movq " #src0 ", %%mm0 \n" \
+ "movq " #src1 ", %%mm1 \n" \
+ "movq " #src2 ", %%mm2 \n" \
+ "movq " #src3 ", %%mm3 \n" \
+ "psubw %%mm4, %%mm0 \n" \
+ "psubw %%mm4, %%mm1 \n" \
+ "psubw %%mm4, %%mm2 \n" \
+ "psubw %%mm4, %%mm3 \n" \
+ "paddusw %%mm5, %%mm0 \n" \
+ "paddusw %%mm5, %%mm1 \n" \
+ "paddusw %%mm5, %%mm2 \n" \
+ "paddusw %%mm5, %%mm3 \n" \
+ "paddw %%mm6, %%mm0 \n" \
+ "paddw %%mm6, %%mm1 \n" \
+ "paddw %%mm6, %%mm2 \n" \
+ "paddw %%mm6, %%mm3 \n" \
+ "psubusw %%mm6, %%mm0 \n" \
+ "psubusw %%mm6, %%mm1 \n" \
+ "psubusw %%mm6, %%mm2 \n" \
+ "psubusw %%mm6, %%mm3 \n" \
+ "psraw $3, %%mm0 \n" \
+ "psraw $3, %%mm1 \n" \
+ "psraw $3, %%mm2 \n" \
+ "psraw $3, %%mm3 \n" \
+ \
+ "movq %%mm0, %%mm7 \n" \
+ "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
+ "movq %%mm1, %%mm2 \n" \
+ "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
+ "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
+ "movq %%mm0, %%mm3 \n" \
+ "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
+ "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
+ "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
+ \
+ "movq %%mm0, " #dst0 " \n" \
+ "movq %%mm7, " #dst1 " \n" \
+ "movq %%mm3, " #dst2 " \n" \
+ "movq %%mm1, " #dst3 " \n"
+
+ __asm__ volatile(
+ "movd %2, %%mm4 \n"
+ "movd %3, %%mm5 \n"
+ "movd %4, %%mm6 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ "packssdw %%mm6, %%mm6 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ "packssdw %%mm6, %%mm6 \n"
+ REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
+ REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
+ REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
+ REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
+ : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
+ );
+ dst[0] = (src[0] + 4) >> 3;
+}
+
+static void softthresh_mmx(int16_t dst[64], const int16_t src[64],
+ int qp, const uint8_t *permutation)
+{
+ int bias = 0; //FIXME
+ unsigned int threshold1;
+
+ threshold1 = qp*((1<<4) - bias) - 1;
+
+#undef REQUANT_CORE
+#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
+ "movq " #src0 ", %%mm0 \n" \
+ "movq " #src1 ", %%mm1 \n" \
+ "pxor %%mm6, %%mm6 \n" \
+ "pxor %%mm7, %%mm7 \n" \
+ "pcmpgtw %%mm0, %%mm6 \n" \
+ "pcmpgtw %%mm1, %%mm7 \n" \
+ "pxor %%mm6, %%mm0 \n" \
+ "pxor %%mm7, %%mm1 \n" \
+ "psubusw %%mm4, %%mm0 \n" \
+ "psubusw %%mm4, %%mm1 \n" \
+ "pxor %%mm6, %%mm0 \n" \
+ "pxor %%mm7, %%mm1 \n" \
+ "movq " #src2 ", %%mm2 \n" \
+ "movq " #src3 ", %%mm3 \n" \
+ "pxor %%mm6, %%mm6 \n" \
+ "pxor %%mm7, %%mm7 \n" \
+ "pcmpgtw %%mm2, %%mm6 \n" \
+ "pcmpgtw %%mm3, %%mm7 \n" \
+ "pxor %%mm6, %%mm2 \n" \
+ "pxor %%mm7, %%mm3 \n" \
+ "psubusw %%mm4, %%mm2 \n" \
+ "psubusw %%mm4, %%mm3 \n" \
+ "pxor %%mm6, %%mm2 \n" \
+ "pxor %%mm7, %%mm3 \n" \
+ \
+ "paddsw %%mm5, %%mm0 \n" \
+ "paddsw %%mm5, %%mm1 \n" \
+ "paddsw %%mm5, %%mm2 \n" \
+ "paddsw %%mm5, %%mm3 \n" \
+ "psraw $3, %%mm0 \n" \
+ "psraw $3, %%mm1 \n" \
+ "psraw $3, %%mm2 \n" \
+ "psraw $3, %%mm3 \n" \
+ \
+ "movq %%mm0, %%mm7 \n" \
+ "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
+ "movq %%mm1, %%mm2 \n" \
+ "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
+ "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
+ "movq %%mm0, %%mm3 \n" \
+ "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
+ "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
+ "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
+ "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
+ \
+ "movq %%mm0, " #dst0 " \n" \
+ "movq %%mm7, " #dst1 " \n" \
+ "movq %%mm3, " #dst2 " \n" \
+ "movq %%mm1, " #dst3 " \n"
+
+ __asm__ volatile(
+ "movd %2, %%mm4 \n"
+ "movd %3, %%mm5 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ "packssdw %%mm4, %%mm4 \n"
+ "packssdw %%mm5, %%mm5 \n"
+ REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
+ REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
+ REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
+ REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
+ : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
+ );
+
+ dst[0] = (src[0] + 4) >> 3;
+}
+
+static void store_slice_mmx(uint8_t *dst, const int16_t *src,
+ int dst_stride, int src_stride,
+ int width, int height, int log2_scale,
+ const uint8_t dither[8][8])
+{
+ int y;
+
+ for (y = 0; y < height; y++) {
+ uint8_t *dst1 = dst;
+ const int16_t *src1 = src;
+ __asm__ volatile(
+ "movq (%3), %%mm3 \n"
+ "movq (%3), %%mm4 \n"
+ "movd %4, %%mm2 \n"
+ "pxor %%mm0, %%mm0 \n"
+ "punpcklbw %%mm0, %%mm3 \n"
+ "punpckhbw %%mm0, %%mm4 \n"
+ "psraw %%mm2, %%mm3 \n"
+ "psraw %%mm2, %%mm4 \n"
+ "movd %5, %%mm2 \n"
+ "1: \n"
+ "movq (%0), %%mm0 \n"
+ "movq 8(%0), %%mm1 \n"
+ "paddw %%mm3, %%mm0 \n"
+ "paddw %%mm4, %%mm1 \n"
+ "psraw %%mm2, %%mm0 \n"
+ "psraw %%mm2, %%mm1 \n"
+ "packuswb %%mm1, %%mm0 \n"
+ "movq %%mm0, (%1) \n"
+ "add $16, %0 \n"
+ "add $8, %1 \n"
+ "cmp %2, %1 \n"
+ " jb 1b \n"
+ : "+r" (src1), "+r"(dst1)
+ : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(MAX_LEVEL - log2_scale)
+ );
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+#endif /* HAVE_MMX_INLINE */
+
+av_cold void ff_spp_init_x86(SPPContext *s)
+{
+#if HAVE_MMX_INLINE
+ int cpu_flags = av_get_cpu_flags();
+
+ if (cpu_flags & AV_CPU_FLAG_MMX) {
+ s->store_slice = store_slice_mmx;
+ switch (s->mode) {
+ case 0: s->requantize = hardthresh_mmx; break;
+ case 1: s->requantize = softthresh_mmx; break;
+ }
+ }
+#endif
+}
diff --git a/libavfilter/x86/vf_yadif.asm b/libavfilter/x86/vf_yadif.asm
index 3d8b2bc..a29620c 100644
--- a/libavfilter/x86/vf_yadif.asm
+++ b/libavfilter/x86/vf_yadif.asm
@@ -4,20 +4,20 @@
;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -39,11 +39,7 @@ SECTION .text
pavgb m5, m3
pand m4, [pb_1]
psubusb m5, m4
-%if mmsize == 16
- psrldq m5, 1
-%else
- psrlq m5, 8
-%endif
+ RSHIFT m5, 1
punpcklbw m5, m7
mova m4, m2
psubusb m2, m3
@@ -51,13 +47,8 @@ SECTION .text
pmaxub m2, m3
mova m3, m2
mova m4, m2
-%if mmsize == 16
- psrldq m3, 1
- psrldq m4, 2
-%else
- psrlq m3, 8
- psrlq m4, 16
-%endif
+ RSHIFT m3, 1
+ RSHIFT m4, 2
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
@@ -90,17 +81,17 @@ SECTION .text
%endmacro
%macro LOAD 2
- movh m%1, %2
- punpcklbw m%1, m7
+ movh %1, %2
+ punpcklbw %1, m7
%endmacro
%macro FILTER 3
.loop%1:
pxor m7, m7
- LOAD 0, [curq+t1]
- LOAD 1, [curq+t0]
- LOAD 2, [%2]
- LOAD 3, [%3]
+ LOAD m0, [curq+t1]
+ LOAD m1, [curq+t0]
+ LOAD m2, [%2]
+ LOAD m3, [%3]
mova m4, m3
paddw m3, m2
psraw m3, 1
@@ -109,8 +100,8 @@ SECTION .text
mova [rsp+32], m1
psubw m2, m4
ABS1 m2, m4
- LOAD 3, [prevq+t1]
- LOAD 4, [prevq+t0]
+ LOAD m3, [prevq+t1]
+ LOAD m4, [prevq+t0]
psubw m3, m0
psubw m4, m1
ABS1 m3, m5
@@ -119,8 +110,8 @@ SECTION .text
psrlw m2, 1
psrlw m3, 1
pmaxsw m2, m3
- LOAD 3, [nextq+t1]
- LOAD 4, [nextq+t0]
+ LOAD m3, [nextq+t1]
+ LOAD m4, [nextq+t0]
psubw m3, m0
psubw m4, m1
ABS1 m3, m5
@@ -166,10 +157,10 @@ SECTION .text
mova m6, [rsp+48]
cmp DWORD r8m, 2
jge .end%1
- LOAD 2, [%2+t1*2]
- LOAD 4, [%3+t1*2]
- LOAD 3, [%2+t0*2]
- LOAD 5, [%3+t0*2]
+ LOAD m2, [%2+t1*2]
+ LOAD m4, [%3+t1*2]
+ LOAD m3, [%2+t0*2]
+ LOAD m5, [%3+t0*2]
paddw m2, m4
paddw m3, m5
psrlw m2, 1
@@ -220,8 +211,6 @@ cglobal yadif_filter_line, 4, 6, 8, 80, dst, prev, cur, next, w, prefs, \
cglobal yadif_filter_line, 4, 7, 8, 80, dst, prev, cur, next, w, prefs, \
mrefs, parity, mode
%endif
- cmp DWORD wm, 0
- jle .ret
%if ARCH_X86_32
mov r4, r5mp
mov r5, r6mp
diff --git a/libavfilter/x86/vf_yadif_init.c b/libavfilter/x86/vf_yadif_init.c
index 510a023..1460a64 100644
--- a/libavfilter/x86/vf_yadif_init.c
+++ b/libavfilter/x86/vf_yadif_init.c
@@ -1,26 +1,25 @@
/*
* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
@@ -36,16 +35,63 @@ void ff_yadif_filter_line_ssse3(void *dst, void *prev, void *cur,
void *next, int w, int prefs,
int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_16bit_sse4(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+
+void ff_yadif_filter_line_10bit_mmxext(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_10bit_sse2(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+void ff_yadif_filter_line_10bit_ssse3(void *dst, void *prev, void *cur,
+ void *next, int w, int prefs,
+ int mrefs, int parity, int mode);
+
av_cold void ff_yadif_init_x86(YADIFContext *yadif)
{
int cpu_flags = av_get_cpu_flags();
+ int bit_depth = (!yadif->csp) ? 8
+ : yadif->csp->comp[0].depth_minus1 + 1;
+ if (bit_depth >= 15) {
+#if ARCH_X86_32
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_mmxext;
+#endif /* ARCH_X86_32 */
+ if (EXTERNAL_SSE2(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_ssse3;
+ if (EXTERNAL_SSE4(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_16bit_sse4;
+ } else if ( bit_depth >= 9 && bit_depth <= 14) {
+#if ARCH_X86_32
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_10bit_mmxext;
+#endif /* ARCH_X86_32 */
+ if (EXTERNAL_SSE2(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_10bit_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_10bit_ssse3;
+ } else {
#if ARCH_X86_32
- if (EXTERNAL_MMXEXT(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_mmxext;
+ if (EXTERNAL_MMXEXT(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_mmxext;
#endif /* ARCH_X86_32 */
- if (EXTERNAL_SSE2(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_sse2;
- if (EXTERNAL_SSSE3(cpu_flags))
- yadif->filter_line = ff_yadif_filter_line_ssse3;
+ if (EXTERNAL_SSE2(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_sse2;
+ if (EXTERNAL_SSSE3(cpu_flags))
+ yadif->filter_line = ff_yadif_filter_line_ssse3;
+ }
}
diff --git a/libavfilter/x86/yadif-10.asm b/libavfilter/x86/yadif-10.asm
new file mode 100644
index 0000000..8853e0d
--- /dev/null
+++ b/libavfilter/x86/yadif-10.asm
@@ -0,0 +1,255 @@
+;*****************************************************************************
+;* x86-optimized functions for yadif filter
+;*
+;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
+;* Copyright (c) 2011-2013 James Darnley <james.darnley@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_1: times 8 dw 1
+
+SECTION .text
+
+%macro PMAXUW 2
+%if cpuflag(sse4)
+ pmaxuw %1, %2
+%else
+ psubusw %1, %2
+ paddusw %1, %2
+%endif
+%endmacro
+
+%macro CHECK 2
+ movu m2, [curq+t1+%1*2]
+ movu m3, [curq+t0+%2*2]
+ mova m4, m2
+ mova m5, m2
+ pxor m4, m3
+ pavgw m5, m3
+ pand m4, [pw_1]
+ psubusw m5, m4
+ RSHIFT m5, 2
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ mova m4, m2
+ RSHIFT m3, 2
+ RSHIFT m4, 4
+ paddw m2, m3
+ paddw m2, m4
+%endmacro
+
+%macro CHECK1 0
+ mova m3, m0
+ pcmpgtw m3, m2
+ pminsw m0, m2
+ mova m6, m3
+ pand m5, m3
+ pandn m3, m1
+ por m3, m5
+ mova m1, m3
+%endmacro
+
+; %macro CHECK2 0
+; paddw m6, [pw_1]
+; psllw m6, 14
+; paddsw m2, m6
+; mova m3, m0
+; pcmpgtw m3, m2
+; pminsw m0, m2
+; pand m5, m3
+; pandn m3, m1
+; por m3, m5
+; mova m1, m3
+; %endmacro
+
+; This version of CHECK2 is required for 14-bit samples. The left-shift trick
+; in the old code is not large enough to correctly select pixels or scores.
+
+%macro CHECK2 0
+ mova m3, m0
+ pcmpgtw m0, m2
+ pand m0, m6
+ mova m6, m0
+ pand m5, m6
+ pand m2, m0
+ pandn m6, m1
+ pandn m0, m3
+ por m6, m5
+ por m0, m2
+ mova m1, m6
+%endmacro
+
+%macro LOAD 2
+ movu %1, %2
+%endmacro
+
+%macro FILTER 3
+.loop%1:
+ pxor m7, m7
+ LOAD m0, [curq+t1]
+ LOAD m1, [curq+t0]
+ LOAD m2, [%2]
+ LOAD m3, [%3]
+ mova m4, m3
+ paddw m3, m2
+ psraw m3, 1
+ mova [rsp+ 0], m0
+ mova [rsp+16], m3
+ mova [rsp+32], m1
+ psubw m2, m4
+ ABS1 m2, m4
+ LOAD m3, [prevq+t1]
+ LOAD m4, [prevq+t0]
+ psubw m3, m0
+ psubw m4, m1
+ ABS2 m3, m4, m5, m6
+ paddw m3, m4
+ psrlw m2, 1
+ psrlw m3, 1
+ pmaxsw m2, m3
+ LOAD m3, [nextq+t1]
+ LOAD m4, [nextq+t0]
+ psubw m3, m0
+ psubw m4, m1
+ ABS2 m3, m4, m5, m6
+ paddw m3, m4
+ psrlw m3, 1
+ pmaxsw m2, m3
+ mova [rsp+48], m2
+
+ paddw m1, m0
+ paddw m0, m0
+ psubw m0, m1
+ psrlw m1, 1
+ ABS1 m0, m2
+
+ movu m2, [curq+t1-1*2]
+ movu m3, [curq+t0-1*2]
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ RSHIFT m3, 4
+ paddw m0, m2
+ paddw m0, m3
+ psubw m0, [pw_1]
+
+ CHECK -2, 0
+ CHECK1
+ CHECK -3, 1
+ CHECK2
+ CHECK 0, -2
+ CHECK1
+ CHECK 1, -3
+ CHECK2
+
+ mova m6, [rsp+48]
+ cmp DWORD r8m, 2
+ jge .end%1
+ LOAD m2, [%2+t1*2]
+ LOAD m4, [%3+t1*2]
+ LOAD m3, [%2+t0*2]
+ LOAD m5, [%3+t0*2]
+ paddw m2, m4
+ paddw m3, m5
+ psrlw m2, 1
+ psrlw m3, 1
+ mova m4, [rsp+ 0]
+ mova m5, [rsp+16]
+ mova m7, [rsp+32]
+ psubw m2, m4
+ psubw m3, m7
+ mova m0, m5
+ psubw m5, m4
+ psubw m0, m7
+ mova m4, m2
+ pminsw m2, m3
+ pmaxsw m3, m4
+ pmaxsw m2, m5
+ pminsw m3, m5
+ pmaxsw m2, m0
+ pminsw m3, m0
+ pxor m4, m4
+ pmaxsw m6, m3
+ psubw m4, m2
+ pmaxsw m6, m4
+
+.end%1:
+ mova m2, [rsp+16]
+ mova m3, m2
+ psubw m2, m6
+ paddw m3, m6
+ pmaxsw m1, m2
+ pminsw m1, m3
+
+ movu [dstq], m1
+ add dstq, mmsize-4
+ add prevq, mmsize-4
+ add curq, mmsize-4
+ add nextq, mmsize-4
+ sub DWORD r4m, mmsize/2-2
+ jg .loop%1
+%endmacro
+
+%macro YADIF 0
+%if ARCH_X86_32
+cglobal yadif_filter_line_10bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%else
+cglobal yadif_filter_line_10bit, 4, 7, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%endif
+%if ARCH_X86_32
+ mov r4, r5mp
+ mov r5, r6mp
+ DECLARE_REG_TMP 4,5
+%else
+ movsxd r5, DWORD r5m
+ movsxd r6, DWORD r6m
+ DECLARE_REG_TMP 5,6
+%endif
+
+ cmp DWORD paritym, 0
+ je .parity0
+ FILTER 1, prevq, curq
+ jmp .ret
+
+.parity0:
+ FILTER 0, curq, nextq
+
+.ret:
+ RET
+%endmacro
+
+INIT_XMM ssse3
+YADIF
+INIT_XMM sse2
+YADIF
+%if ARCH_X86_32
+INIT_MMX mmxext
+YADIF
+%endif
diff --git a/libavfilter/x86/yadif-16.asm b/libavfilter/x86/yadif-16.asm
new file mode 100644
index 0000000..79d127d
--- /dev/null
+++ b/libavfilter/x86/yadif-16.asm
@@ -0,0 +1,317 @@
+;*****************************************************************************
+;* x86-optimized functions for yadif filter
+;*
+;* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (c) 2013 Daniel Kang <daniel.d.kang@gmail.com>
+;* Copyright (c) 2011-2013 James Darnley <james.darnley@gmail.com>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA
+
+pw_1: times 8 dw 1
+pw_8000: times 8 dw 0x8000
+pd_1: times 4 dd 1
+pd_8000: times 4 dd 0x8000
+
+SECTION .text
+
+%macro PABS 2
+%if cpuflag(ssse3)
+ pabsd %1, %1
+%else
+ pxor %2, %2
+ pcmpgtd %2, %1
+ pxor %1, %2
+ psubd %1, %2
+%endif
+%endmacro
+
+%macro PACK 1
+%if cpuflag(sse4)
+ packusdw %1, %1
+%else
+ psubd %1, [pd_8000]
+ packssdw %1, %1
+ paddw %1, [pw_8000]
+%endif
+%endmacro
+
+%macro PMINSD 3
+%if cpuflag(sse4)
+ pminsd %1, %2
+%else
+ mova %3, %2
+ pcmpgtd %3, %1
+ pand %1, %3
+ pandn %3, %2
+ por %1, %3
+%endif
+%endmacro
+
+%macro PMAXSD 3
+%if cpuflag(sse4)
+ pmaxsd %1, %2
+%else
+ mova %3, %1
+ pcmpgtd %3, %2
+ pand %1, %3
+ pandn %3, %2
+ por %1, %3
+%endif
+%endmacro
+
+%macro PMAXUW 2
+%if cpuflag(sse4)
+ pmaxuw %1, %2
+%else
+ psubusw %1, %2
+ paddusw %1, %2
+%endif
+%endmacro
+
+%macro CHECK 2
+ movu m2, [curq+t1+%1*2]
+ movu m3, [curq+t0+%2*2]
+ mova m4, m2
+ mova m5, m2
+ pxor m4, m3
+ pavgw m5, m3
+ pand m4, [pw_1]
+ psubusw m5, m4
+ RSHIFT m5, 2
+ punpcklwd m5, m7
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ mova m4, m2
+ RSHIFT m3, 2
+ RSHIFT m4, 4
+ punpcklwd m2, m7
+ punpcklwd m3, m7
+ punpcklwd m4, m7
+ paddd m2, m3
+ paddd m2, m4
+%endmacro
+
+%macro CHECK1 0
+ mova m3, m0
+ pcmpgtd m3, m2
+ PMINSD m0, m2, m6
+ mova m6, m3
+ pand m5, m3
+ pandn m3, m1
+ por m3, m5
+ mova m1, m3
+%endmacro
+
+%macro CHECK2 0
+ paddd m6, [pd_1]
+ pslld m6, 30
+ paddd m2, m6
+ mova m3, m0
+ pcmpgtd m3, m2
+ PMINSD m0, m2, m4
+ pand m5, m3
+ pandn m3, m1
+ por m3, m5
+ mova m1, m3
+%endmacro
+
+; This version of CHECK2 has 3 fewer instructions on sets older than SSE4 but I
+; am not sure whether it is any faster. A rewrite or refactor of the filter
+; code should make it possible to eliminate the move instruction at the end. It
+; exists to satisfy the expectation that the "score" values are in m1.
+
+; %macro CHECK2 0
+; mova m3, m0
+; pcmpgtd m0, m2
+; pand m0, m6
+; mova m6, m0
+; pand m5, m6
+; pand m2, m0
+; pandn m6, m1
+; pandn m0, m3
+; por m6, m5
+; por m0, m2
+; mova m1, m6
+; %endmacro
+
+%macro LOAD 2
+ movh %1, %2
+ punpcklwd %1, m7
+%endmacro
+
+%macro FILTER 3
+.loop%1:
+ pxor m7, m7
+ LOAD m0, [curq+t1]
+ LOAD m1, [curq+t0]
+ LOAD m2, [%2]
+ LOAD m3, [%3]
+ mova m4, m3
+ paddd m3, m2
+ psrad m3, 1
+ mova [rsp+ 0], m0
+ mova [rsp+16], m3
+ mova [rsp+32], m1
+ psubd m2, m4
+ PABS m2, m4
+ LOAD m3, [prevq+t1]
+ LOAD m4, [prevq+t0]
+ psubd m3, m0
+ psubd m4, m1
+ PABS m3, m5
+ PABS m4, m5
+ paddd m3, m4
+ psrld m2, 1
+ psrld m3, 1
+ PMAXSD m2, m3, m6
+ LOAD m3, [nextq+t1]
+ LOAD m4, [nextq+t0]
+ psubd m3, m0
+ psubd m4, m1
+ PABS m3, m5
+ PABS m4, m5
+ paddd m3, m4
+ psrld m3, 1
+ PMAXSD m2, m3, m6
+ mova [rsp+48], m2
+
+ paddd m1, m0
+ paddd m0, m0
+ psubd m0, m1
+ psrld m1, 1
+ PABS m0, m2
+
+ movu m2, [curq+t1-1*2]
+ movu m3, [curq+t0-1*2]
+ mova m4, m2
+ psubusw m2, m3
+ psubusw m3, m4
+ PMAXUW m2, m3
+ mova m3, m2
+ RSHIFT m3, 4
+ punpcklwd m2, m7
+ punpcklwd m3, m7
+ paddd m0, m2
+ paddd m0, m3
+ psubd m0, [pd_1]
+
+ CHECK -2, 0
+ CHECK1
+ CHECK -3, 1
+ CHECK2
+ CHECK 0, -2
+ CHECK1
+ CHECK 1, -3
+ CHECK2
+
+ mova m6, [rsp+48]
+ cmp DWORD r8m, 2
+ jge .end%1
+ LOAD m2, [%2+t1*2]
+ LOAD m4, [%3+t1*2]
+ LOAD m3, [%2+t0*2]
+ LOAD m5, [%3+t0*2]
+ paddd m2, m4
+ paddd m3, m5
+ psrld m2, 1
+ psrld m3, 1
+ mova m4, [rsp+ 0]
+ mova m5, [rsp+16]
+ mova m7, [rsp+32]
+ psubd m2, m4
+ psubd m3, m7
+ mova m0, m5
+ psubd m5, m4
+ psubd m0, m7
+ mova m4, m2
+ PMINSD m2, m3, m7
+ PMAXSD m3, m4, m7
+ PMAXSD m2, m5, m7
+ PMINSD m3, m5, m7
+ PMAXSD m2, m0, m7
+ PMINSD m3, m0, m7
+ pxor m4, m4
+ PMAXSD m6, m3, m7
+ psubd m4, m2
+ PMAXSD m6, m4, m7
+
+.end%1:
+ mova m2, [rsp+16]
+ mova m3, m2
+ psubd m2, m6
+ paddd m3, m6
+ PMAXSD m1, m2, m7
+ PMINSD m1, m3, m7
+ PACK m1
+
+ movh [dstq], m1
+ add dstq, mmsize/2
+ add prevq, mmsize/2
+ add curq, mmsize/2
+ add nextq, mmsize/2
+ sub DWORD r4m, mmsize/4
+ jg .loop%1
+%endmacro
+
+%macro YADIF 0
+%if ARCH_X86_32
+cglobal yadif_filter_line_16bit, 4, 6, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%else
+cglobal yadif_filter_line_16bit, 4, 7, 8, 80, dst, prev, cur, next, w, \
+ prefs, mrefs, parity, mode
+%endif
+%if ARCH_X86_32
+ mov r4, r5mp
+ mov r5, r6mp
+ DECLARE_REG_TMP 4,5
+%else
+ movsxd r5, DWORD r5m
+ movsxd r6, DWORD r6m
+ DECLARE_REG_TMP 5,6
+%endif
+
+ cmp DWORD paritym, 0
+ je .parity0
+ FILTER 1, prevq, curq
+ jmp .ret
+
+.parity0:
+ FILTER 0, curq, nextq
+
+.ret:
+ RET
+%endmacro
+
+INIT_XMM sse4
+YADIF
+INIT_XMM ssse3
+YADIF
+INIT_XMM sse2
+YADIF
+%if ARCH_X86_32
+INIT_MMX mmxext
+YADIF
+%endif
diff --git a/libavfilter/yadif.h b/libavfilter/yadif.h
index 75e35c4..07f2cc9 100644
--- a/libavfilter/yadif.h
+++ b/libavfilter/yadif.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -22,31 +22,33 @@
#include "libavutil/pixdesc.h"
#include "avfilter.h"
+enum YADIFMode {
+ YADIF_MODE_SEND_FRAME = 0, ///< send 1 frame for each frame
+ YADIF_MODE_SEND_FIELD = 1, ///< send 1 frame for each field
+ YADIF_MODE_SEND_FRAME_NOSPATIAL = 2, ///< send 1 frame for each frame but skips spatial interlacing check
+ YADIF_MODE_SEND_FIELD_NOSPATIAL = 3, ///< send 1 frame for each field but skips spatial interlacing check
+};
+
+enum YADIFParity {
+ YADIF_PARITY_TFF = 0, ///< top field first
+ YADIF_PARITY_BFF = 1, ///< bottom field first
+ YADIF_PARITY_AUTO = -1, ///< auto detection
+};
+
+enum YADIFDeint {
+ YADIF_DEINT_ALL = 0, ///< deinterlace all frames
+ YADIF_DEINT_INTERLACED = 1, ///< only deinterlace frames marked as interlaced
+};
+
typedef struct YADIFContext {
const AVClass *class;
- /**
- * 0: send 1 frame for each frame
- * 1: send 1 frame for each field
- * 2: like 0 but skips spatial interlacing check
- * 3: like 1 but skips spatial interlacing check
- */
- int mode;
- /**
- * 0: top field first
- * 1: bottom field first
- * -1: auto-detection
- */
- int parity;
+ enum YADIFMode mode;
+ enum YADIFParity parity;
+ enum YADIFDeint deint;
int frame_pending;
- /**
- * 0: deinterlace all frames
- * 1: only deinterlace frames marked as interlaced
- */
- int auto_enable;
-
AVFrame *cur;
AVFrame *next;
AVFrame *prev;
@@ -63,6 +65,8 @@ typedef struct YADIFContext {
const AVPixFmtDescriptor *csp;
int eof;
+ uint8_t *temp_line;
+ int temp_line_size;
} YADIFContext;
void ff_yadif_init_x86(YADIFContext *yadif);
OpenPOWER on IntegriCloud