diff options
author | Clément Bœsch <u@pkh.me> | 2014-12-24 15:03:26 +0100 |
---|---|---|
committer | Stefano Sabatini <stefasab@gmail.com> | 2015-03-18 12:19:08 +0100 |
commit | 304fdfe9f3a3879ce95d01c4dcb7e33bdb68f9ef (patch) | |
tree | 61535b815be4fcc17ba622e9f4faab5a2577fdc1 | |
parent | 0f16dfda50cb6b7b13e2a3f02f0823f67eeca748 (diff) | |
download | ffmpeg-streaming-304fdfe9f3a3879ce95d01c4dcb7e33bdb68f9ef.zip ffmpeg-streaming-304fdfe9f3a3879ce95d01c4dcb7e33bdb68f9ef.tar.gz |
lavfi: add showwavespic filter
This is a variant of showwaves. It is implemented as a different filter
so that the user is not allowed to use meaningless options which belong
to showwaves (such as rate).
Major edits done by Stefano Sabatini, from a patch by ubitux.
See thread:
From: Clément Bœsch <u@pkh.me>
To: ffmpeg-devel@ffmpeg.org
Date: Wed, 24 Dec 2014 15:03:26 +0100
Subject: [FFmpeg-devel] [PATCH] avfilter/showwaves: add single_pic option
-rw-r--r-- | Changelog | 1 | ||||
-rw-r--r-- | doc/filters.texi | 27 | ||||
-rw-r--r-- | libavfilter/Makefile | 1 | ||||
-rw-r--r-- | libavfilter/allfilters.c | 1 | ||||
-rw-r--r-- | libavfilter/avf_showwaves.c | 274 | ||||
-rw-r--r-- | libavfilter/version.h | 2 |
6 files changed, 270 insertions, 36 deletions
@@ -5,6 +5,7 @@ version <next>: - FFT video filter - TDSC decoder - DTS lossless extension (XLL) decoding (not lossless, disabled by default) +- showwavespic filter version 2.6: diff --git a/doc/filters.texi b/doc/filters.texi index dbcd391..3acd3e8 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -11741,6 +11741,33 @@ aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r= @end example @end itemize +@section showwavespic + +Convert input audio to a single video frame, representing the samples waves. + +The filter accepts the following options: + +@table @option +@item size, s +Specify the video size for the output. For the syntax of this option, check the +@ref{video size syntax,,"Video size" section in the ffmpeg-utils manual,ffmpeg-utils}. +Default value is @code{600x240}. + +@item split_channels +Set if channels should be drawn separately or overlap. Default value is 0. +@end table + +@subsection Examples + +@itemize +@item +Extract a channel split representation of the wave form of a whole audio track +in a 1024x800 picture using @command{ffmpeg}: +@example +ffmpeg -i audio.flac -lavfi showwavespic=split_channels=1:s=1024x800 waveform.png +@end example +@end itemize + @section split, asplit Split input into several identical outputs. diff --git a/libavfilter/Makefile b/libavfilter/Makefile index b184f07..2cde029 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -236,6 +236,7 @@ OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o OBJS-$(CONFIG_SHOWCQT_FILTER) += avf_showcqt.o OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o +OBJS-$(CONFIG_SHOWWAVESPIC_FILTER) += avf_showwaves.o # multimedia sources OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 043ac56..0288082 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -251,6 +251,7 @@ void avfilter_register_all(void) REGISTER_FILTER(SHOWCQT, showcqt, avf); REGISTER_FILTER(SHOWSPECTRUM, showspectrum, avf); REGISTER_FILTER(SHOWWAVES, showwaves, avf); + REGISTER_FILTER(SHOWWAVESPIC, showwavespic, avf); /* multimedia sources */ REGISTER_FILTER(AMOVIE, amovie, avsrc); diff --git a/libavfilter/avf_showwaves.c b/libavfilter/avf_showwaves.c index 9cddc51..57a6b2e 100644 --- a/libavfilter/avf_showwaves.c +++ b/libavfilter/avf_showwaves.c @@ -23,6 +23,7 @@ * audio to video multimedia filter */ +#include "libavutil/avassert.h" #include "libavutil/channel_layout.h" #include "libavutil/opt.h" #include "libavutil/parseutils.h" @@ -40,6 +41,11 @@ enum ShowWavesMode { MODE_NB, }; +struct frame_node { + AVFrame *frame; + struct frame_node *next; +}; + typedef struct { const AVClass *class; int w, h; @@ -54,6 +60,13 @@ typedef struct { int split_channels; void (*draw_sample)(uint8_t *buf, int height, int linesize, int16_t sample, int16_t *prev_y, int intensity); + + /* single picture */ + int single_pic; + struct frame_node *audio_frames; + struct frame_node *last_frame; + int64_t total_samples; + int64_t *sum; /* abs sum of the samples per channel */ } ShowWavesContext; #define OFFSET(x) offsetof(ShowWavesContext, x) @@ -82,6 +95,19 @@ static av_cold void uninit(AVFilterContext *ctx) av_frame_free(&showwaves->outpicref); av_freep(&showwaves->buf_idy); + + if (showwaves->single_pic) { + struct frame_node *node = showwaves->audio_frames; + while (node) { + struct frame_node *tmp = node; + + node = node->next; + av_frame_free(&tmp->frame); + av_freep(&tmp); + } + av_freep(&showwaves->sum); + showwaves->last_frame = NULL; + } } static int query_formats(AVFilterContext *ctx) @@ -162,6 +188,55 @@ inline static int push_frame(AVFilterLink *outlink) return ret; } +static int push_single_pic(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + ShowWavesContext *showwaves = ctx->priv; + int64_t n = 0, max_samples = showwaves->total_samples / outlink->w; + AVFrame *out = showwaves->outpicref; + struct frame_node *node; + const int nb_channels = inlink->channels; + const int x = 255 / (showwaves->split_channels ? 1 : nb_channels); + const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h; + const int linesize = out->linesize[0]; + int col = 0; + int64_t *sum = showwaves->sum; + + av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples); + + memset(sum, 0, nb_channels); + + for (node = showwaves->audio_frames; node; node = node->next) { + int i; + const AVFrame *frame = node->frame; + const int16_t *p = (const int16_t *)frame->data[0]; + + for (i = 0; i < frame->nb_samples; i++) { + int ch; + + for (ch = 0; ch < nb_channels; ch++) + sum[ch] += abs(p[ch + i*nb_channels]) << 1; + if (n++ == max_samples) { + for (ch = 0; ch < nb_channels; ch++) { + int16_t sample = sum[ch] / max_samples; + uint8_t *buf = out->data[0] + col; + if (showwaves->split_channels) + buf += ch*ch_height*linesize; + av_assert0(col < outlink->w); + showwaves->draw_sample(buf, ch_height, linesize, sample, &showwaves->buf_idy[ch], x); + sum[ch] = 0; + } + col++; + n = 0; + } + } + } + + return push_frame(outlink); +} + + static int request_frame(AVFilterLink *outlink) { ShowWavesContext *showwaves = outlink->src->priv; @@ -173,8 +248,13 @@ static int request_frame(AVFilterLink *outlink) ret = ff_request_frame(inlink); } while (!showwaves->req_fullfilled && ret >= 0); - if (ret == AVERROR_EOF && showwaves->outpicref) - push_frame(outlink); + if (ret == AVERROR_EOF && showwaves->outpicref) { + if (showwaves->single_pic) + push_single_pic(outlink); + else + push_frame(outlink); + } + return ret; } @@ -229,14 +309,56 @@ static void draw_sample_cline(uint8_t *buf, int height, int linesize, buf[k * linesize] += intensity; } -static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) +static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, + const AVFilterLink *inlink, AVFilterLink *outlink, + const AVFrame *in) +{ + if (!showwaves->outpicref) { + int j; + AVFrame *out = showwaves->outpicref = + ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!out) + return AVERROR(ENOMEM); + out->width = outlink->w; + out->height = outlink->h; + out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels, + av_make_q(1, inlink->sample_rate), + outlink->time_base); + for (j = 0; j < outlink->h; j++) + memset(out->data[0] + j*out->linesize[0], 0, outlink->w); + } + return 0; +} + +static av_cold int init(AVFilterContext *ctx) +{ + ShowWavesContext *showwaves = ctx->priv; + + if (!strcmp(ctx->filter->name, "showwavespic")) { + showwaves->single_pic = 1; + showwaves->mode = MODE_CENTERED_LINE; + } + + switch (showwaves->mode) { + case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; + case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; + case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; + case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; + default: + return AVERROR_BUG; + } + return 0; +} + +#if CONFIG_SHOWWAVES_FILTER + +static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; ShowWavesContext *showwaves = ctx->priv; const int nb_samples = insamples->nb_samples; AVFrame *outpicref = showwaves->outpicref; - int linesize = outpicref ? outpicref->linesize[0] : 0; int16_t *p = (int16_t *)insamples->data[0]; int nb_channels = inlink->channels; int i, j, ret = 0; @@ -246,23 +368,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) /* draw data in the buffer */ for (i = 0; i < nb_samples; i++) { - if (!showwaves->outpicref) { - showwaves->outpicref = outpicref = - ff_get_video_buffer(outlink, outlink->w, outlink->h); - if (!outpicref) - return AVERROR(ENOMEM); - outpicref->width = outlink->w; - outpicref->height = outlink->h; - outpicref->pts = insamples->pts + - av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, - (AVRational){ 1, inlink->sample_rate }, - outlink->time_base); - linesize = outpicref->linesize[0]; - for (j = 0; j < outlink->h; j++) - memset(outpicref->data[0] + j * linesize, 0, outlink->w); - } + + ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); + if (ret < 0) + goto end; + outpicref = showwaves->outpicref; + for (j = 0; j < nb_channels; j++) { uint8_t *buf = outpicref->data[0] + showwaves->buf_idx; + const int linesize = outpicref->linesize[0]; if (showwaves->split_channels) buf += j*ch_height*linesize; showwaves->draw_sample(buf, ch_height, linesize, *p++, @@ -280,30 +394,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) outpicref = showwaves->outpicref; } +end: av_frame_free(&insamples); return ret; } -static av_cold int init(AVFilterContext *ctx) -{ - ShowWavesContext *showwaves = ctx->priv; - - switch (showwaves->mode) { - case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; - case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; - case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; - case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; - default: - return AVERROR_BUG; - } - return 0; -} - static const AVFilterPad showwaves_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, - .filter_frame = filter_frame, + .filter_frame = showwaves_filter_frame, }, { NULL } }; @@ -329,3 +429,107 @@ AVFilter ff_avf_showwaves = { .outputs = showwaves_outputs, .priv_class = &showwaves_class, }; + +#endif // CONFIG_SHOWWAVES_FILTER + +#if CONFIG_SHOWWAVESPIC_FILTER + +#define OFFSET(x) offsetof(ShowWavesContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption showwavespic_options[] = { + { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, + { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, + { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(showwavespic); + +static int showwavespic_config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + ShowWavesContext *showwaves = ctx->priv; + + if (showwaves->single_pic) { + showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum)); + if (!showwaves->sum) + return AVERROR(ENOMEM); + } + + return 0; +} + +static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples) +{ + AVFilterContext *ctx = inlink->dst; + AVFilterLink *outlink = ctx->outputs[0]; + ShowWavesContext *showwaves = ctx->priv; + int16_t *p = (int16_t *)insamples->data[0]; + int ret = 0; + + if (showwaves->single_pic) { + struct frame_node *f; + + ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); + if (ret < 0) + goto end; + + /* queue the audio frame */ + f = av_malloc(sizeof(*f)); + if (!f) { + ret = AVERROR(ENOMEM); + goto end; + } + f->frame = insamples; + f->next = NULL; + if (!showwaves->last_frame) { + showwaves->audio_frames = + showwaves->last_frame = f; + } else { + showwaves->last_frame->next = f; + showwaves->last_frame = f; + } + showwaves->total_samples += insamples->nb_samples; + + return 0; + } + +end: + av_frame_free(&insamples); + return ret; +} + +static const AVFilterPad showwavespic_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .config_props = showwavespic_config_input, + .filter_frame = showwavespic_filter_frame, + }, + { NULL } +}; + +static const AVFilterPad showwavespic_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + .request_frame = request_frame, + }, + { NULL } +}; + +AVFilter ff_avf_showwavespic = { + .name = "showwavespic", + .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."), + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .priv_size = sizeof(ShowWavesContext), + .inputs = showwavespic_inputs, + .outputs = showwavespic_outputs, + .priv_class = &showwavespic_class, +}; + +#endif // CONFIG_SHOWWAVESPIC_FILTER diff --git a/libavfilter/version.h b/libavfilter/version.h index a964392..b349aaf 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -30,7 +30,7 @@ #include "libavutil/version.h" #define LIBAVFILTER_VERSION_MAJOR 5 -#define LIBAVFILTER_VERSION_MINOR 12 +#define LIBAVFILTER_VERSION_MINOR 13 #define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |