The branch, master has been updated
via 8ff34b29c71c1f108c879496ce25be04c716159b (commit)
via dfc3256da12e52947cb9e145140ddc00bc0758fe (commit)
via 81c1004891fb7316e784ff07f2ea072ccdb68b36 (commit)
via 2e0885a091a3e1649a00d98706325859b06b754e (commit)
via 386264ae164b675a57dfb43c7354261d06be20e8 (commit)
from 2451e06f19fe148456e4994b6f1259683cb04ef3 (commit)
- Log -----------------------------------------------------------------
commit 8ff34b29c71c1f108c879496ce25be04c716159b
Author: Niklas Haas <[email protected]>
AuthorDate: Mon Sep 1 22:29:22 2025 +0200
Commit: Niklas Haas <[email protected]>
CommitDate: Thu Sep 4 23:12:34 2025 +0000
fate/filter-audio: update acrossfade to test multiple inputs
diff --git a/tests/fate/filter-audio.mak b/tests/fate/filter-audio.mak
index ac7f353d59..eee0209c59 100644
--- a/tests/fate/filter-audio.mak
+++ b/tests/fate/filter-audio.mak
@@ -57,7 +57,8 @@ FATE_AFILTER_SAMPLES-$(call FILTERDEMDECENCMUX, ACROSSFADE,
WAV, PCM_S16LE, PCM_
fate-filter-acrossfade: tests/data/asynth-44100-2.wav
fate-filter-acrossfade: SRC = $(TARGET_PATH)/tests/data/asynth-44100-2.wav
fate-filter-acrossfade: SRC2 =
$(TARGET_SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav
-fate-filter-acrossfade: CMD = framecrc -i $(SRC) -i $(SRC2) -filter_complex
acrossfade=d=2:c1=log:c2=exp
+fate-filter-acrossfade: SRC3 =
$(TARGET_SAMPLES)/audio-reference/chorusnoise_2ch_44kHz_s16.wav
+fate-filter-acrossfade: CMD = framecrc -i $(SRC) -i $(SRC2) -i $(SRC3)
-filter_complex acrossfade=n=3:d=2:c1=log:c2=exp
FATE_AFILTER-$(call FILTERDEMDECENCMUX, AGATE ARESAMPLE, WAV, PCM_S16LE,
PCM_S16LE, WAV) += fate-filter-agate
fate-filter-agate: tests/data/asynth-44100-2.wav
diff --git a/tests/ref/fate/filter-acrossfade b/tests/ref/fate/filter-acrossfade
index 92231bec7d..fe45d4ec10 100644
--- a/tests/ref/fate/filter-acrossfade
+++ b/tests/ref/fate/filter-acrossfade
@@ -107,26 +107,7 @@
0, 491792, 491792, 4096, 16384, 0xad648c75
0, 495888, 495888, 4096, 16384, 0xe24fa60b
0, 499984, 499984, 4096, 16384, 0x96b1bf9e
-0, 504080, 504080, 4096, 16384, 0xf1e34827
-0, 508176, 508176, 4096, 16384, 0xc267c4d7
-0, 512272, 512272, 4096, 16384, 0x5d5a2115
-0, 516368, 516368, 4096, 16384, 0xfd6024aa
-0, 520464, 520464, 4096, 16384, 0x9cd58cc0
-0, 524560, 524560, 4096, 16384, 0xb458f309
-0, 528656, 528656, 4096, 16384, 0xaee359c9
-0, 532752, 532752, 4096, 16384, 0x9aa3c89b
-0, 536848, 536848, 4096, 16384, 0x2a4f1f66
-0, 540944, 540944, 4096, 16384, 0x0c817abf
-0, 545040, 545040, 4096, 16384, 0x1e97f79e
-0, 549136, 549136, 4096, 16384, 0x12a32ed0
-0, 553232, 553232, 4096, 16384, 0x722bfc87
-0, 557328, 557328, 4096, 16384, 0x65e0fab9
-0, 561424, 561424, 4096, 16384, 0x55cca852
-0, 565520, 565520, 4096, 16384, 0x19a10ac6
-0, 569616, 569616, 4096, 16384, 0x078180b2
-0, 573712, 573712, 4096, 16384, 0xf6f8fc53
-0, 577808, 577808, 4096, 16384, 0xc72ffe63
-0, 581904, 581904, 4096, 16384, 0xf9f837fd
-0, 586000, 586000, 4096, 16384, 0x78d70334
-0, 590096, 590096, 4096, 16384, 0x061d910c
-0, 594192, 594192, 1158, 4632, 0xdc098a7f
+0, 504080, 504080, 3070, 12280, 0x1368a641
+0, 507150, 507150, 88200, 352800, 0xfdb19b5d
+0, 595350, 595350, 1912, 7648, 0x5d71938c
+0, 597262, 597262, 3097, 12388, 0xd852f4c5
commit dfc3256da12e52947cb9e145140ddc00bc0758fe
Author: Niklas Haas <[email protected]>
AuthorDate: Wed Sep 3 13:38:41 2025 +0200
Commit: Niklas Haas <[email protected]>
CommitDate: Thu Sep 4 23:12:34 2025 +0000
avfilter/af_afade: support multiple inputs
Instead of just 2 files, generalize this filter to support crossfading
arbitrarily many files. This makes the filter essentially operate similar
to the `concat` filter, chaining multiple files one after another.
Aside from just adding more input pads, this requires rewriting the
activate function. Instead of a finite state machine, we keep track of the
currently active input index; and advance it only once the current input is
fully exhausted.
This results in arguably simpler logic overall.
diff --git a/doc/filters.texi b/doc/filters.texi
index 1b5665d36e..5b52fc5521 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -577,6 +577,11 @@ The cross fade is applied for specified duration near the
end of first stream.
The filter accepts the following options:
@table @option
+@item inputs, n
+Specify the number of inputs to crossfade. When crossfading multiple inputs,
+each input will be concatenated and crossfaded in sequence, similar to the
+@ref{concat filter}. Default is 2.
+
@item nb_samples, ns
Specify the number of samples for which the cross fade effect has to last.
At the end of the cross fade effect the first input audio will be completely
@@ -615,6 +620,11 @@ Cross fade from one input to another but without
overlapping:
@example
ffmpeg -i first.flac -i second.flac -filter_complex
acrossfade=d=10:o=0:c1=exp:c2=exp output.flac
@end example
+
+Concatenate multiple inputs with cross fade between each:
+@example
+ffmpeg -i first.flac -i second.flac -i third.flac -filter_complex
acrossfade=n=3 output.flac
+@end example
@end itemize
@section acrossover
@@ -31008,6 +31018,7 @@ bench=start,selectivecolor=reds=-.2 .12 -.49,bench=stop
@end example
@end itemize
+@anchor{concat filter}
@section concat
Concatenate audio and video streams, joining them together one after the
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index c3f4fc55f4..055f234f7c 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -26,6 +26,7 @@
#include "config_components.h"
#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
@@ -33,6 +34,7 @@
typedef struct AudioFadeContext {
const AVClass *class;
+ int nb_inputs;
int type;
int curve, curve2;
int64_t nb_samples;
@@ -43,7 +45,7 @@ typedef struct AudioFadeContext {
double unity;
int overlap;
int64_t pts;
- int xfade_status;
+ int xfade_idx;
void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
int nb_samples, int channels, int direction,
@@ -451,6 +453,8 @@ const FFFilter ff_af_afade = {
#if CONFIG_ACROSSFADE_FILTER
static const AVOption acrossfade_options[] = {
+ { "inputs", "set number of input files to cross fade",
OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT32_MAX, FLAGS },
+ { "n", "set number of input files to cross fade",
OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT32_MAX, FLAGS },
{ "nb_samples", "set number of samples for cross fade duration",
OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT32_MAX/10,
FLAGS },
{ "ns", "set number of samples for cross fade duration",
OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT32_MAX/10,
FLAGS },
{ "duration", "set cross fade duration",
OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
@@ -566,18 +570,22 @@ static int pass_samples(AVFilterLink *inlink,
AVFilterLink *outlink, unsigned nb
return ff_filter_frame(outlink, in);
}
-static int pass_crossfade(AVFilterContext *ctx, AVFilterLink *in0,
AVFilterLink *in1)
+static int pass_crossfade(AVFilterContext *ctx, const int idx0, const int idx1)
{
AudioFadeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out, *cf[2] = { NULL };
int ret;
+ AVFilterLink *in0 = ctx->inputs[idx0];
+ AVFilterLink *in1 = ctx->inputs[idx1];
int queued_samples0 = ff_inlink_queued_samples(in0);
int queued_samples1 = ff_inlink_queued_samples(in1);
/* Limit to the relevant region */
av_assert1(queued_samples0 <= s->nb_samples);
+ if (ff_outlink_get_status(in1) && idx1 < s->nb_inputs - 1)
+ queued_samples1 /= 2; /* reserve second half for next fade-out */
queued_samples1 = FFMIN(queued_samples1, s->nb_samples);
if (s->overlap) {
@@ -586,7 +594,7 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
"is shorter than crossfade duration (%"PRId64" samples), "
"crossfade will be shorter by %"PRId64" samples.\n",
- queued_samples0 <= queued_samples1 ? 0 : 1,
+ queued_samples0 <= queued_samples1 ? idx0 : idx1,
nb_samples, s->nb_samples, s->nb_samples - nb_samples);
if (queued_samples0 > nb_samples) {
@@ -628,10 +636,11 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
return ff_filter_frame(outlink, out);
} else {
if (queued_samples0 < s->nb_samples) {
- av_log(ctx, AV_LOG_WARNING, "Input 0 duration (%d samples) "
+ av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
"is shorter than crossfade duration (%"PRId64" samples), "
"fade-out will be shorter by %"PRId64" samples.\n",
- queued_samples0, s->nb_samples, s->nb_samples -
queued_samples0);
+ idx0, queued_samples0, s->nb_samples,
+ s->nb_samples - queued_samples0);
if (!queued_samples0)
goto fade_in;
}
@@ -658,10 +667,11 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
fade_in:
if (queued_samples1 < s->nb_samples) {
- av_log(ctx, AV_LOG_WARNING, "Input 1 duration (%d samples) "
+ av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
"is shorter than crossfade duration (%"PRId64" samples), "
"fade-in will be shorter by %"PRId64" samples.\n",
- queued_samples1, s->nb_samples, s->nb_samples -
queued_samples1);
+ idx1, ff_inlink_queued_samples(in1), s->nb_samples,
+ s->nb_samples - queued_samples1);
if (!queued_samples1)
return 0;
}
@@ -689,51 +699,84 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
static int activate(AVFilterContext *ctx)
{
AudioFadeContext *s = ctx->priv;
+ const int idx0 = s->xfade_idx;
+ const int idx1 = s->xfade_idx + 1;
AVFilterLink *outlink = ctx->outputs[0];
- int ret;
+ AVFilterLink *in0 = ctx->inputs[idx0];
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
- // Read first input until EOF
- if (s->xfade_status == 0) {
- int queued_samples = ff_inlink_queued_samples(ctx->inputs[0]);
- if (queued_samples > s->nb_samples) {
- AVFrame *frame = ff_inlink_peek_frame(ctx->inputs[0], 0);
- if (queued_samples - s->nb_samples >= frame->nb_samples)
- return pass_frame(ctx->inputs[0], outlink, &s->pts);
- }
- if (ff_outlink_get_status(ctx->inputs[0])) {
- if (queued_samples > s->nb_samples)
- return pass_samples(ctx->inputs[0], outlink, queued_samples -
s->nb_samples, &s->pts);
- s->xfade_status = 1;
- } else {
- FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[0]);
- }
+ if (idx0 == s->nb_inputs - 1) {
+ /* Last active input, read until EOF */
+ if (ff_inlink_queued_frames(in0))
+ return pass_frame(in0, outlink, &s->pts);
+ FF_FILTER_FORWARD_STATUS(in0, outlink);
+ FF_FILTER_FORWARD_WANTED(outlink, in0);
+ return FFERROR_NOT_READY;
}
- // Read second input until enough data is ready or EOF
- if (s->xfade_status == 1) {
- if (ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples ||
ff_outlink_get_status(ctx->inputs[1])) {
- s->xfade_status = 2;
- } else {
- FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[1]);
- }
+
+ AVFilterLink *in1 = ctx->inputs[idx1];
+ int queued_samples0 = ff_inlink_queued_samples(in0);
+ if (queued_samples0 > s->nb_samples) {
+ AVFrame *frame = ff_inlink_peek_frame(in0, 0);
+ if (queued_samples0 - s->nb_samples >= frame->nb_samples)
+ return pass_frame(in0, outlink, &s->pts);
+ }
+
+ /* Continue reading until EOF */
+ if (ff_outlink_get_status(in0)) {
+ if (queued_samples0 > s->nb_samples)
+ return pass_samples(in0, outlink, queued_samples0 - s->nb_samples,
&s->pts);
+ } else {
+ FF_FILTER_FORWARD_WANTED(outlink, in0);
+ return FFERROR_NOT_READY;
+ }
+
+ /* At this point, in0 has reached EOF with no more samples remaining
+ * except those that we want to crossfade */
+ av_assert0(queued_samples0 <= s->nb_samples);
+ int queued_samples1 = ff_inlink_queued_samples(in1);
+
+ /* If this clip is sandwiched between two other clips, buffer at least
+ * twice the total crossfade duration to ensure that we won't reach EOF
+ * during the second fade (in which case we would shorten the fade) */
+ int needed_samples = s->nb_samples;
+ if (idx1 < s->nb_inputs - 1)
+ needed_samples *= 2;
+
+ if (queued_samples1 >= needed_samples || ff_outlink_get_status(in1)) {
+ /* The first filter may EOF before delivering any samples, in which
+ * case it's possible for pass_crossfade() to be a no-op. Just ensure
+ * the activate() function runs again after incrementing the index to
+ * ensure we correctly move on to the next input in that case. */
+ s->xfade_idx++;
+ ff_filter_set_ready(ctx, 10);
+ return pass_crossfade(ctx, idx0, idx1);
+ } else {
+ FF_FILTER_FORWARD_WANTED(outlink, in1);
+ return FFERROR_NOT_READY;
}
- // Do crossfade
- if (s->xfade_status == 2) {
- ret = pass_crossfade(ctx, ctx->inputs[0], ctx->inputs[1]);
+}
+
+static av_cold int acrossfade_init(AVFilterContext *ctx)
+{
+ AudioFadeContext *s = ctx->priv;
+ int ret;
+
+ for (int i = 0; i < s->nb_inputs; i++) {
+ AVFilterPad pad = {
+ .name = av_asprintf("crossfade%d", i),
+ .type = AVMEDIA_TYPE_AUDIO,
+ };
+ if (!pad.name)
+ return AVERROR(ENOMEM);
+
+ ret = ff_append_inpad_free_name(ctx, &pad);
if (ret < 0)
return ret;
- s->xfade_status = 3;
- }
- // Read second input until EOF
- if (s->xfade_status == 3) {
- if (ff_inlink_queued_frames(ctx->inputs[1]))
- return pass_frame(ctx->inputs[1], outlink, &s->pts);
- FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
- FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[1]);
}
- return FFERROR_NOT_READY;
+ return 0;
}
static int acrossfade_config_output(AVFilterLink *outlink)
@@ -759,17 +802,6 @@ static int acrossfade_config_output(AVFilterLink *outlink)
return 0;
}
-static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
- {
- .name = "crossfade0",
- .type = AVMEDIA_TYPE_AUDIO,
- },
- {
- .name = "crossfade1",
- .type = AVMEDIA_TYPE_AUDIO,
- },
-};
-
static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
{
.name = "default",
@@ -782,9 +814,10 @@ const FFFilter ff_af_acrossfade = {
.p.name = "acrossfade",
.p.description = NULL_IF_CONFIG_SMALL("Cross fade two input audio
streams."),
.p.priv_class = &acrossfade_class,
+ .p.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
.priv_size = sizeof(AudioFadeContext),
+ .init = acrossfade_init,
.activate = activate,
- FILTER_INPUTS(avfilter_af_acrossfade_inputs),
FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
FILTER_SAMPLEFMTS_ARRAY(sample_fmts),
};
commit 81c1004891fb7316e784ff07f2ea072ccdb68b36
Author: Niklas Haas <[email protected]>
AuthorDate: Mon Sep 1 22:01:28 2025 +0200
Commit: Niklas Haas <[email protected]>
CommitDate: Thu Sep 4 23:12:34 2025 +0000
avfilter/af_afade: shorten crossfade on too short inputs
This behavior is currently completely broken, leading to an abrupt end of
the
first audio stream. I want to generalize this filter to multiple inputs, but
having too short input files will always represent a significant problem.
I considered a few approaches for how to handle this more gracefully, but
most of them come with their own problems; in particular when a short input
is sandwiched between two longer ones; or when there is a sequence of short
inputs. In the end, it's simplest to just shorten the crossfade window.
I also considered (and tested) padding the input with silence, but this also
has its own aesthetic implications and strange edge cases.
See-Also: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/20388
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index 73386ce9a8..c3f4fc55f4 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -573,18 +573,44 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
AVFrame *out, *cf[2] = { NULL };
int ret;
+ int queued_samples0 = ff_inlink_queued_samples(in0);
+ int queued_samples1 = ff_inlink_queued_samples(in1);
+
+ /* Limit to the relevant region */
+ av_assert1(queued_samples0 <= s->nb_samples);
+ queued_samples1 = FFMIN(queued_samples1, s->nb_samples);
+
if (s->overlap) {
- out = ff_get_audio_buffer(outlink, s->nb_samples);
+ int nb_samples = FFMIN(queued_samples0, queued_samples1);
+ if (nb_samples < s->nb_samples) {
+ av_log(ctx, AV_LOG_WARNING, "Input %d duration (%d samples) "
+ "is shorter than crossfade duration (%"PRId64" samples), "
+ "crossfade will be shorter by %"PRId64" samples.\n",
+ queued_samples0 <= queued_samples1 ? 0 : 1,
+ nb_samples, s->nb_samples, s->nb_samples - nb_samples);
+
+ if (queued_samples0 > nb_samples) {
+ ret = pass_samples(in0, outlink, queued_samples0 - nb_samples,
&s->pts);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!nb_samples)
+ return 0; /* either input was completely empty */
+ }
+
+ av_assert1(nb_samples > 0);
+ out = ff_get_audio_buffer(outlink, nb_samples);
if (!out)
return AVERROR(ENOMEM);
- ret = ff_inlink_consume_samples(in0, s->nb_samples, s->nb_samples,
&cf[0]);
+ ret = ff_inlink_consume_samples(in0, nb_samples, nb_samples, &cf[0]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
- ret = ff_inlink_consume_samples(in1, s->nb_samples, s->nb_samples,
&cf[1]);
+ ret = ff_inlink_consume_samples(in1, nb_samples, nb_samples, &cf[1]);
if (ret < 0) {
av_frame_free(&cf[0]);
av_frame_free(&out);
@@ -592,50 +618,68 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
}
s->crossfade_samples(out->extended_data, cf[0]->extended_data,
- cf[1]->extended_data,
- s->nb_samples, out->ch_layout.nb_channels,
- s->curve, s->curve2);
+ cf[1]->extended_data, nb_samples,
+ out->ch_layout.nb_channels, s->curve, s->curve2);
out->pts = s->pts;
- s->pts += av_rescale_q(s->nb_samples,
+ s->pts += av_rescale_q(nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
av_frame_free(&cf[0]);
av_frame_free(&cf[1]);
return ff_filter_frame(outlink, out);
} else {
- out = ff_get_audio_buffer(outlink, s->nb_samples);
+ if (queued_samples0 < s->nb_samples) {
+ av_log(ctx, AV_LOG_WARNING, "Input 0 duration (%d samples) "
+ "is shorter than crossfade duration (%"PRId64" samples), "
+ "fade-out will be shorter by %"PRId64" samples.\n",
+ queued_samples0, s->nb_samples, s->nb_samples -
queued_samples0);
+ if (!queued_samples0)
+ goto fade_in;
+ }
+
+ out = ff_get_audio_buffer(outlink, queued_samples0);
if (!out)
return AVERROR(ENOMEM);
- ret = ff_inlink_consume_samples(in0, s->nb_samples, s->nb_samples,
&cf[0]);
+ ret = ff_inlink_consume_samples(in0, queued_samples0, queued_samples0,
&cf[0]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
- s->fade_samples(out->extended_data, cf[0]->extended_data,
s->nb_samples,
- outlink->ch_layout.nb_channels, -1, s->nb_samples - 1,
s->nb_samples, s->curve, 0., 1.);
+ s->fade_samples(out->extended_data, cf[0]->extended_data,
cf[0]->nb_samples,
+ outlink->ch_layout.nb_channels, -1, cf[0]->nb_samples
- 1, cf[0]->nb_samples, s->curve, 0., 1.);
out->pts = s->pts;
- s->pts += av_rescale_q(s->nb_samples,
+ s->pts += av_rescale_q(cf[0]->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
av_frame_free(&cf[0]);
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
- out = ff_get_audio_buffer(outlink, s->nb_samples);
+ fade_in:
+ if (queued_samples1 < s->nb_samples) {
+ av_log(ctx, AV_LOG_WARNING, "Input 1 duration (%d samples) "
+ "is shorter than crossfade duration (%"PRId64" samples), "
+ "fade-in will be shorter by %"PRId64" samples.\n",
+ queued_samples1, s->nb_samples, s->nb_samples -
queued_samples1);
+ if (!queued_samples1)
+ return 0;
+ }
+
+ out = ff_get_audio_buffer(outlink, queued_samples1);
if (!out)
return AVERROR(ENOMEM);
- ret = ff_inlink_consume_samples(in1, s->nb_samples, s->nb_samples,
&cf[1]);
+ ret = ff_inlink_consume_samples(in1, queued_samples1, queued_samples1,
&cf[1]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
- s->fade_samples(out->extended_data, cf[1]->extended_data,
s->nb_samples,
- outlink->ch_layout.nb_channels, 1, 0, s->nb_samples,
s->curve2, 0., 1.);
+ s->fade_samples(out->extended_data, cf[1]->extended_data,
cf[1]->nb_samples,
+ outlink->ch_layout.nb_channels, 1, 0,
cf[1]->nb_samples, s->curve2, 0., 1.);
out->pts = s->pts;
- s->pts += av_rescale_q(s->nb_samples,
+ s->pts += av_rescale_q(cf[1]->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
av_frame_free(&cf[1]);
return ff_filter_frame(outlink, out);
@@ -646,6 +690,7 @@ static int activate(AVFilterContext *ctx)
{
AudioFadeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
+ int ret;
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
@@ -675,11 +720,10 @@ static int activate(AVFilterContext *ctx)
}
// Do crossfade
if (s->xfade_status == 2) {
+ ret = pass_crossfade(ctx, ctx->inputs[0], ctx->inputs[1]);
+ if (ret < 0)
+ return ret;
s->xfade_status = 3;
- // TODO: Do some partial crossfade if not all inputs have enough
duration?
- if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->nb_samples &&
- ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples)
- return pass_crossfade(ctx, ctx->inputs[0], ctx->inputs[1]);
}
// Read second input until EOF
if (s->xfade_status == 3) {
commit 2e0885a091a3e1649a00d98706325859b06b754e
Author: Niklas Haas <[email protected]>
AuthorDate: Tue Sep 2 13:02:16 2025 +0200
Commit: Niklas Haas <[email protected]>
CommitDate: Thu Sep 4 23:12:34 2025 +0000
avfilter/af_afade: don't leak memory on error
This first frame was never cleaned up if the second input fails.
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index 67ae3758a2..73386ce9a8 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -586,6 +586,7 @@ static int pass_crossfade(AVFilterContext *ctx,
AVFilterLink *in0, AVFilterLink
ret = ff_inlink_consume_samples(in1, s->nb_samples, s->nb_samples,
&cf[1]);
if (ret < 0) {
+ av_frame_free(&cf[0]);
av_frame_free(&out);
return ret;
}
commit 386264ae164b675a57dfb43c7354261d06be20e8
Author: Niklas Haas <[email protected]>
AuthorDate: Mon Sep 1 21:36:01 2025 +0200
Commit: Niklas Haas <[email protected]>
CommitDate: Thu Sep 4 23:12:34 2025 +0000
avfilter/af_afade: generalize pass_crossfade() signature
Prerequisite to an upcoming refactor.
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index d4ea1a7bab..67ae3758a2 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -566,7 +566,7 @@ static int pass_samples(AVFilterLink *inlink, AVFilterLink
*outlink, unsigned nb
return ff_filter_frame(outlink, in);
}
-static int pass_crossfade(AVFilterContext *ctx)
+static int pass_crossfade(AVFilterContext *ctx, AVFilterLink *in0,
AVFilterLink *in1)
{
AudioFadeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -578,13 +578,13 @@ static int pass_crossfade(AVFilterContext *ctx)
if (!out)
return AVERROR(ENOMEM);
- ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples,
s->nb_samples, &cf[0]);
+ ret = ff_inlink_consume_samples(in0, s->nb_samples, s->nb_samples,
&cf[0]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
- ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples,
s->nb_samples, &cf[1]);
+ ret = ff_inlink_consume_samples(in1, s->nb_samples, s->nb_samples,
&cf[1]);
if (ret < 0) {
av_frame_free(&out);
return ret;
@@ -605,7 +605,7 @@ static int pass_crossfade(AVFilterContext *ctx)
if (!out)
return AVERROR(ENOMEM);
- ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples,
s->nb_samples, &cf[0]);
+ ret = ff_inlink_consume_samples(in0, s->nb_samples, s->nb_samples,
&cf[0]);
if (ret < 0) {
av_frame_free(&out);
return ret;
@@ -625,7 +625,7 @@ static int pass_crossfade(AVFilterContext *ctx)
if (!out)
return AVERROR(ENOMEM);
- ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples,
s->nb_samples, &cf[1]);
+ ret = ff_inlink_consume_samples(in1, s->nb_samples, s->nb_samples,
&cf[1]);
if (ret < 0) {
av_frame_free(&out);
return ret;
@@ -678,7 +678,7 @@ static int activate(AVFilterContext *ctx)
// TODO: Do some partial crossfade if not all inputs have enough
duration?
if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->nb_samples &&
ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples)
- return pass_crossfade(ctx);
+ return pass_crossfade(ctx, ctx->inputs[0], ctx->inputs[1]);
}
// Read second input until EOF
if (s->xfade_status == 3) {
-----------------------------------------------------------------------
Summary of changes:
doc/filters.texi | 11 ++
libavfilter/af_afade.c | 212 ++++++++++++++++++++++++++-------------
tests/fate/filter-audio.mak | 3 +-
tests/ref/fate/filter-acrossfade | 27 +----
4 files changed, 162 insertions(+), 91 deletions(-)
hooks/post-receive
--
_______________________________________________
ffmpeg-cvslog mailing list -- [email protected]
To unsubscribe send an email to [email protected]