This patch adds the `compand` audio filter from ffmpeg master branch
(currently at 7f0f47b3df) adapted to work with libav.
The filter makes use of `av_strtok` to parse the input parameters, and
it appears that libav has removed that function, so I inlined it into
the filter. Same goes for `av_clipd_c`. Same for
`av_samples_alloc_array_and_samples` except it looks like ffmpeg added
that function recently.
---
Changelog | 1 +
doc/filters.texi | 74 ++++++
libavfilter/Makefile | 1 +
libavfilter/af_compand.c | 595 +++++++++++++++++++++++++++++++++++++++++++++++
libavfilter/allfilters.c | 1 +
libavfilter/version.h | 2 +-
6 files changed, 673 insertions(+), 1 deletion(-)
create mode 100644 libavfilter/af_compand.c
diff --git a/Changelog b/Changelog
index edd4754..fabbeee 100644
--- a/Changelog
+++ b/Changelog
@@ -56,6 +56,7 @@ version 10:
- ATRAC3+ decoder
- framepack filter
- Mirillis FIC video decoder
+- compand audio filter
version 9:
diff --git a/doc/filters.texi b/doc/filters.texi
index 8c83b4e..8863384 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -467,6 +467,80 @@ To fix a 5.1 WAV improperly encoded in AAC's native
channel order
avconv -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
@end example
+@section compand
+Compress or expand audio dynamic range.
+
+A description of the accepted options follows.
+
+@table @option
+
+@item attacks
+@item decays
+Set list of times in seconds for each channel over which the instantaneous
level
+of the input signal is averaged to determine its volume. @var{attacks} refers
to
+increase of volume and @var{decays} refers to decrease of volume. For most
+situations, the attack time (response to the audio getting louder) should be
+shorter than the decay time because the human ear is more sensitive to sudden
+loud audio than sudden soft audio. Typical value for attack is 0.3 seconds and
+for decay 0.8 seconds.
+
+@item points
+Set list of points for transfer function, specified in dB relative to maximum
+possible signal amplitude. Each key points list need to be defined using the
+following syntax: @code{x0/y0 x1/y1 x2/y2 ....}
+
+The input values must be in strictly increasing order but the transfer function
+does not have to be monotonically rising. The point @code{0/0} is assumed but
+may be overridden (by @code{0/out-dBn}). Typical values for the transfer
+function are @code{-70/-70 -60/-20}.
+
+@item soft-knee
+Set amount for which the points at where adjacent line segments on the transfer
+function meet will be rounded. Defaults is 0.01.
+
+@item gain
+Set additional gain in dB to be applied at all points on the transfer function
+and allows easy adjustment of the overall gain. Default is 0.
+
+@item volume
+Set initial volume in dB to be assumed for each channel when filtering starts.
+This permits the user to supply a nominal level initially, so that, for
+example, a very large gain is not applied to initial signal levels before the
+companding has begun to operate. A typical value for audio which is initially
+quiet is -90 dB. Default is 0.
+
+@item delay
+Set delay in seconds. Default is 0. The input audio is analysed immediately,
+but audio is delayed before being fed to the volume adjuster. Specifying a
+delay approximately equal to the attack/decay times allows the filter to
+effectively operate in predictive rather than reactive mode.
+
+@end table
+
+@subsection Examples
+
+@itemize
+@item
+Make music with both quiet and loud passages suitable for listening in a noisy
+environment:
+@example
+compand=.3 .3:1 1:-90/-60 -60/-40 -40/-30 -20/-20:6:0:-90:0.2
+@end example
+
+@item
+Noise-gate for when the noise is at a lower level than the signal:
+@example
+compand=.1 .1:.2 .2:-900/-900 -50.1/-900 -50/-50:.01:0:-90:.1
+@end example
+
+@item
+Here is another noise-gate, this time for when the noise is at a higher level
+than the signal (making it, in some ways, similar to squelch):
+@example
+compand=.1 .1:.1 .1:-45.1/-45.1 -45/-900 0/-900:.01:45:-90:.1
+@end example
+@end itemize
+
@section join
Join multiple input streams into one multi-channel stream.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 92c1561..2badb3e 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -37,6 +37,7 @@ OBJS-$(CONFIG_CHANNELSPLIT_FILTER) +=
af_channelsplit.o
OBJS-$(CONFIG_JOIN_FILTER) += af_join.o
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o
+OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
diff --git a/libavfilter/af_compand.c b/libavfilter/af_compand.c
new file mode 100644
index 0000000..6308cda
--- /dev/null
+++ b/libavfilter/af_compand.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 1999 Chris Bagwell
+ * Copyright (c) 1999 Nick Bailey
+ * Copyright (c) 2007 Rob Sykes <[email protected]>
+ * Copyright (c) 2013 Paul B Mahol
+ * Copyright (c) 2014 Andrew Kelley
+ *
+ * This file is part of libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file
+ * audio compand filter
+ */
+
+#include "libavutil/mem.h"
+#include "libavutil/avassert.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/opt.h"
+#include "libavutil/common.h"
+#include "audio.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+typedef struct ChanParam {
+ double attack;
+ double decay;
+ double volume;
+} ChanParam;
+
+typedef struct CompandSegment {
+ double x, y;
+ double a, b;
+} CompandSegment;
+
+typedef struct CompandContext {
+ const AVClass *class;
+ char *attacks, *decays, *points;
+ CompandSegment *segments;
+ ChanParam *channels;
+ double in_min_lin;
+ double out_min_lin;
+ double curve_dB;
+ double gain_dB;
+ double initial_volume;
+ double delay;
+ uint8_t **delayptrs;
+ int delay_samples;
+ int delay_count;
+ int delay_index;
+ int64_t pts;
+
+ int (*compand)(AVFilterContext *ctx, AVFrame *frame);
+} CompandContext;
+
+#define OFFSET(x) offsetof(CompandContext, x)
+#define A AV_OPT_FLAG_AUDIO_PARAM
+
+static const AVOption compand_options[] = {
+ { "attacks", "set time over which increase of volume is determined",
OFFSET(attacks), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "decays", "set time over which decrease of volume is determined",
OFFSET(decays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "points", "set points of transfer function", OFFSET(points),
AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
+ { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE,
{.dbl=0.01}, 0.01, 900, A },
+ { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE,
{.dbl=0}, -900, 900, A },
+ { "volume", "set initial volume", OFFSET(initial_volume),
AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 0, A },
+ { "delay", "set delay for samples before sending them to volume adjuster",
OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 20, A },
+ { NULL }
+};
+
+static const AVClass compand_class = {
+ .class_name = "compand filter",
+ .item_name = av_default_item_name,
+ .option = compand_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static av_cold int init(AVFilterContext *ctx)
+{
+ CompandContext *s = ctx->priv;
+
+ if (!s->attacks || !s->decays || !s->points) {
+ av_log(ctx, AV_LOG_ERROR, "Missing attacks and/or decays and/or
points.\n");
+ return AVERROR(EINVAL);
+ }
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CompandContext *s = ctx->priv;
+
+ av_freep(&s->channels);
+ av_freep(&s->segments);
+ if (s->delayptrs)
+ av_freep(&s->delayptrs[0]);
+ av_freep(&s->delayptrs);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterChannelLayouts *layouts;
+ AVFilterFormats *formats;
+ static const enum AVSampleFormat sample_fmts[] = {
+ AV_SAMPLE_FMT_DBLP,
+ AV_SAMPLE_FMT_NONE
+ };
+
+ layouts = ff_all_channel_layouts();
+ if (!layouts)
+ return AVERROR(ENOMEM);
+ ff_set_common_channel_layouts(ctx, layouts);
+
+ formats = ff_make_format_list(sample_fmts);
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_formats(ctx, formats);
+
+ formats = ff_all_samplerates();
+ if (!formats)
+ return AVERROR(ENOMEM);
+ ff_set_common_samplerates(ctx, formats);
+
+ return 0;
+}
+
+static void count_items(char *item_str, int *nb_items)
+{
+ char *p;
+
+ *nb_items = 1;
+ for (p = item_str; *p; p++) {
+ if (*p == ' ')
+ (*nb_items)++;
+ }
+
+}
+
+static void update_volume(ChanParam *cp, double in)
+{
+ double delta = in - cp->volume;
+
+ if (delta > 0.0)
+ cp->volume += delta * cp->attack;
+ else
+ cp->volume += delta * cp->decay;
+}
+
+static double get_volume(CompandContext *s, double in_lin)
+{
+ CompandSegment *cs;
+ double in_log, out_log;
+ int i;
+
+ if (in_lin < s->in_min_lin)
+ return s->out_min_lin;
+
+ in_log = log(in_lin);
+
+ for (i = 1;; i++)
+ if (in_log <= s->segments[i + 1].x)
+ break;
+
+ cs = &s->segments[i];
+ in_log -= cs->x;
+ out_log = cs->y + in_log * (cs->a * in_log + cs->b);
+
+ return exp(out_log);
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin,
double amax)
+{
+ av_assert2(amin <= amax);
+ if (a < amin)
+ return amin;
+ else if (a > amax)
+ return amax;
+ else
+ return a;
+}
+
+static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
+{
+ CompandContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const int channels =
av_get_channel_layout_nb_channels(inlink->channel_layout);
+ const int nb_samples = frame->nb_samples;
+ AVFrame *out_frame;
+ int chan, i;
+
+ if (av_frame_is_writable(frame)) {
+ out_frame = frame;
+ } else {
+ out_frame = ff_get_audio_buffer(inlink, nb_samples);
+ if (!out_frame)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_frame, frame);
+ }
+
+ for (chan = 0; chan < channels; chan++) {
+ const double *src = (double *)frame->extended_data[chan];
+ double *dst = (double *)out_frame->extended_data[chan];
+ ChanParam *cp = &s->channels[chan];
+
+ for (i = 0; i < nb_samples; i++) {
+ update_volume(cp, fabs(src[i]));
+
+ dst[i] = av_clipd_c(src[i] * get_volume(s, cp->volume), -1, 1);
+ }
+ }
+
+ if (frame != out_frame)
+ av_frame_free(&frame);
+
+ return ff_filter_frame(ctx->outputs[0], out_frame);
+}
+
+#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
+
+static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
+{
+ CompandContext *s = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ const int channels =
av_get_channel_layout_nb_channels(inlink->channel_layout);
+ const int nb_samples = frame->nb_samples;
+ int chan, i, av_uninit(dindex), oindex, av_uninit(count);
+ AVFrame *out_frame = NULL;
+
+ av_assert2(channels > 0); /* would corrupt delay_count and delay_index */
+
+ for (chan = 0; chan < channels; chan++) {
+ const double *src = (double *)frame->extended_data[chan];
+ double *dbuf = (double *)s->delayptrs[chan];
+ ChanParam *cp = &s->channels[chan];
+ double *dst;
+
+ count = s->delay_count;
+ dindex = s->delay_index;
+ for (i = 0, oindex = 0; i < nb_samples; i++) {
+ const double in = src[i];
+ update_volume(cp, fabs(in));
+
+ if (count >= s->delay_samples) {
+ if (!out_frame) {
+ out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
+ if (!out_frame)
+ return AVERROR(ENOMEM);
+ av_frame_copy_props(out_frame, frame);
+ out_frame->pts = s->pts;
+ s->pts += av_rescale_q(nb_samples - i, (AVRational){1,
inlink->sample_rate}, inlink->time_base);
+ }
+
+ dst = (double *)out_frame->extended_data[chan];
+ dst[oindex++] = av_clipd_c(dbuf[dindex] * get_volume(s,
cp->volume), -1, 1);
+ } else {
+ count++;
+ }
+
+ dbuf[dindex] = in;
+ dindex = MOD(dindex + 1, s->delay_samples);
+ }
+ }
+
+ s->delay_count = count;
+ s->delay_index = dindex;
+
+ av_frame_free(&frame);
+ return out_frame ? ff_filter_frame(ctx->outputs[0], out_frame) : 0;
+}
+
+static int compand_drain(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ CompandContext *s = ctx->priv;
+ const int channels =
av_get_channel_layout_nb_channels(outlink->channel_layout);
+ int chan, i, dindex;
+ AVFrame *frame = NULL;
+
+ frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
+ if (!frame)
+ return AVERROR(ENOMEM);
+ frame->pts = s->pts;
+ s->pts += av_rescale_q(frame->nb_samples, (AVRational){1,
outlink->sample_rate}, outlink->time_base);
+
+ for (chan = 0; chan < channels; chan++) {
+ double *dbuf = (double *)s->delayptrs[chan];
+ double *dst = (double *)frame->extended_data[chan];
+ ChanParam *cp = &s->channels[chan];
+
+ dindex = s->delay_index;
+ for (i = 0; i < frame->nb_samples; i++) {
+ dst[i] = av_clipd_c(dbuf[dindex] * get_volume(s, cp->volume), -1,
1);
+ dindex = MOD(dindex + 1, s->delay_samples);
+ }
+ }
+ s->delay_count -= frame->nb_samples;
+ s->delay_index = dindex;
+
+ return ff_filter_frame(outlink, frame);
+}
+
+static char *av_strtok(char *s, const char *delim, char **saveptr)
+{
+ char *tok;
+
+ if (!s && !(s = *saveptr))
+ return NULL;
+
+ /* skip leading delimiters */
+ s += strspn(s, delim);
+
+ /* s now points to the first non delimiter char, or to the end of the
string */
+ if (!*s) {
+ *saveptr = NULL;
+ return NULL;
+ }
+ tok = s++;
+
+ /* skip non delimiters */
+ s += strcspn(s, delim);
+ if (*s) {
+ *s = 0;
+ *saveptr = s+1;
+ } else {
+ *saveptr = NULL;
+ }
+
+ return tok;
+}
+
+
+static int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int
*linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat
sample_fmt, int align)
+{
+ int ret, nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;
+
+ *audio_data = av_mallocz(nb_planes * sizeof(**audio_data));
+ if (!*audio_data)
+ return AVERROR(ENOMEM);
+ ret = av_samples_alloc(*audio_data, linesize, nb_channels,
+ nb_samples, sample_fmt, align);
+ if (ret < 0)
+ av_freep(audio_data);
+ return ret;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ CompandContext *s = ctx->priv;
+ const int channels =
av_get_channel_layout_nb_channels(outlink->channel_layout);
+ const int sample_rate = outlink->sample_rate;
+ double radius = s->curve_dB * M_LN10 / 20;
+ int nb_attacks, nb_decays, nb_points;
+ char *p, *saveptr = NULL;
+ int new_nb_items, num;
+ int i;
+
+ count_items(s->attacks, &nb_attacks);
+ count_items(s->decays, &nb_decays);
+ count_items(s->points, &nb_points);
+
+ if ((nb_attacks > channels) || (nb_decays > channels)) {
+ av_log(ctx, AV_LOG_ERROR, "Number of attacks/decays bigger than number
of channels.\n");
+ return AVERROR(EINVAL);
+ }
+
+ uninit(ctx);
+
+ s->channels = av_mallocz_array(channels, sizeof(*s->channels));
+ s->segments = av_mallocz_array((nb_points + 4) * 2, sizeof(*s->segments));
+
+ if (!s->channels || !s->segments)
+ return AVERROR(ENOMEM);
+
+ p = s->attacks;
+ for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
+ char *tstr = av_strtok(p, " ", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
+ if (s->channels[i].attack < 0)
+ return AVERROR(EINVAL);
+ }
+ nb_attacks = new_nb_items;
+
+ p = s->decays;
+ for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
+ char *tstr = av_strtok(p, " ", &saveptr);
+ p = NULL;
+ new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
+ if (s->channels[i].decay < 0)
+ return AVERROR(EINVAL);
+ }
+ nb_decays = new_nb_items;
+
+ if (nb_attacks != nb_decays) {
+ av_log(ctx, AV_LOG_ERROR, "Number of attacks %d differs from number of
decays %d.\n", nb_attacks, nb_decays);
+ return AVERROR(EINVAL);
+ }
+
+#define S(x) s->segments[2 * ((x) + 1)]
+ p = s->points;
+ for (i = 0, new_nb_items = 0; i < nb_points; i++) {
+ char *tstr = av_strtok(p, " ", &saveptr);
+ p = NULL;
+ if (sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
+ av_log(ctx, AV_LOG_ERROR, "Invalid and/or missing input/output
value.\n");
+ return AVERROR(EINVAL);
+ }
+ if (i && S(i - 1).x > S(i).x) {
+ av_log(ctx, AV_LOG_ERROR, "Transfer function input values must be
increasing.\n");
+ return AVERROR(EINVAL);
+ }
+ S(i).y -= S(i).x;
+ av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
+ new_nb_items++;
+ }
+ num = new_nb_items;
+
+ /* Add 0,0 if necessary */
+ if (num == 0 || S(num - 1).x)
+ num++;
+
+#undef S
+#define S(x) s->segments[2 * (x)]
+ /* Add a tail off segment at the start */
+ S(0).x = S(1).x - 2 * s->curve_dB;
+ S(0).y = S(1).y;
+ num++;
+
+ /* Join adjacent colinear segments */
+ for (i = 2; i < num; i++) {
+ double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
+ double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
+ int j;
+
+ if (fabs(g1 - g2))
+ continue;
+ num--;
+ for (j = --i; j < num; j++)
+ S(j) = S(j + 1);
+ }
+
+ for (i = 0; !i || s->segments[i - 2].x; i += 2) {
+ s->segments[i].y += s->gain_dB;
+ s->segments[i].x *= M_LN10 / 20;
+ s->segments[i].y *= M_LN10 / 20;
+ }
+
+#define L(x) s->segments[i - (x)]
+ for (i = 4; s->segments[i - 2].x; i += 2) {
+ double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
+
+ L(4).a = 0;
+ L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
+
+ L(2).a = 0;
+ L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
+
+ theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
+ len = sqrt(pow(L(2).x - L(4).x, 2.) + pow(L(2).y - L(4).y, 2.));
+ r = FFMIN(radius, len);
+ L(3).x = L(2).x - r * cos(theta);
+ L(3).y = L(2).y - r * sin(theta);
+
+ theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
+ len = sqrt(pow(L(0).x - L(2).x, 2.) + pow(L(0).y - L(2).y, 2.));
+ r = FFMIN(radius, len / 2);
+ x = L(2).x + r * cos(theta);
+ y = L(2).y + r * sin(theta);
+
+ cx = (L(3).x + L(2).x + x) / 3;
+ cy = (L(3).y + L(2).y + y) / 3;
+
+ L(2).x = x;
+ L(2).y = y;
+
+ in1 = cx - L(3).x;
+ out1 = cy - L(3).y;
+ in2 = L(2).x - L(3).x;
+ out2 = L(2).y - L(3).y;
+ L(3).a = (out2 / in2 - out1 / in1) / (in2-in1);
+ L(3).b = out1 / in1 - L(3).a * in1;
+ }
+ L(3).x = 0;
+ L(3).y = L(2).y;
+
+ s->in_min_lin = exp(s->segments[1].x);
+ s->out_min_lin = exp(s->segments[1].y);
+
+ for (i = 0; i < channels; i++) {
+ ChanParam *cp = &s->channels[i];
+
+ if (cp->attack > 1.0 / sample_rate)
+ cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
+ else
+ cp->attack = 1.0;
+ if (cp->decay > 1.0 / sample_rate)
+ cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
+ else
+ cp->decay = 1.0;
+ cp->volume = pow(10.0, s->initial_volume / 20);
+ }
+
+ s->delay_samples = s->delay * sample_rate;
+ if (s->delay_samples > 0) {
+ int ret;
+ if ((ret = av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
+ channels,
+ s->delay_samples,
+ outlink->format, 0)) < 0)
+ return ret;
+ s->compand = compand_delay;
+ } else {
+ s->compand = compand_nodelay;
+ }
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
+{
+ AVFilterContext *ctx = inlink->dst;
+ CompandContext *s = ctx->priv;
+
+ return s->compand(ctx, frame);
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+ AVFilterContext *ctx = outlink->src;
+ CompandContext *s = ctx->priv;
+ int ret;
+
+ ret = ff_request_frame(ctx->inputs[0]);
+
+ if (ret == AVERROR_EOF && s->delay_count)
+ ret = compand_drain(outlink);
+
+ return ret;
+}
+
+static const AVFilterPad compand_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad compand_outputs[] = {
+ {
+ .name = "default",
+ .request_frame = request_frame,
+ .config_props = config_output,
+ .type = AVMEDIA_TYPE_AUDIO,
+ },
+ { NULL }
+};
+
+
+AVFilter ff_af_compand = {
+ .name = "compand",
+ .description = NULL_IF_CONFIG_SMALL("Compress or expand audio dynamic
range."),
+ .query_formats = query_formats,
+ .priv_size = sizeof(CompandContext),
+ .priv_class = &compand_class,
+ .init = init,
+ .inputs = compand_inputs,
+ .outputs = compand_outputs,
+};
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 9702a0a..e47a22e 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -54,6 +54,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(ATRIM, atrim, af);
REGISTER_FILTER(CHANNELMAP, channelmap, af);
REGISTER_FILTER(CHANNELSPLIT, channelsplit, af);
+ REGISTER_FILTER(COMPAND, compand, af);
REGISTER_FILTER(JOIN, join, af);
REGISTER_FILTER(RESAMPLE, resample, af);
REGISTER_FILTER(VOLUME, volume, af);
diff --git a/libavfilter/version.h b/libavfilter/version.h
index 1684aa5..f09b501 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 4
-#define LIBAVFILTER_VERSION_MINOR 1
+#define LIBAVFILTER_VERSION_MINOR 2
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
--
1.8.3.2
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel