ffmpeg | branch: master | Paul B Mahol | Thu Jan 18 11:17:03
2018 +0100| [8088b5d69c51b9329ccbc6d1125cf55c54ff6374] | committer: Paul B Mahol
avfilter/af_afade: acrossfade: switch to activate
Signed-off-by: Paul B Mahol
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=8088b5d69c51b9329ccbc6d1125cf55c54ff6374
---
libavfilter/af_afade.c | 207 -
1 file changed, 83 insertions(+), 124 deletions(-)
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index 4d0b31eac7..285b5b6557 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -23,10 +23,14 @@
* fade audio filter
*/
+#define FF_INTERNAL_FIELDS 1
+#include "framequeue.h"
+
#include "libavutil/audio_fifo.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
+#include "filters.h"
#include "internal.h"
typedef struct AudioFadeContext {
@@ -39,6 +43,7 @@ typedef struct AudioFadeContext {
int64_t start_time;
int overlap;
int cf0_eof;
+int prev_size;
int crossfade_is_over;
AVAudioFifo *fifo[2];
int64_t pts;
@@ -428,157 +433,127 @@ CROSSFADE(flt, float)
CROSSFADE(s16, int16_t)
CROSSFADE(s32, int32_t)
-static int acrossfade_filter_frame(AVFilterLink *inlink, AVFrame *in)
+static int activate(AVFilterContext *ctx)
{
-AVFilterContext *ctx = inlink->dst;
AudioFadeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
-AVFrame *out, *cf[2] = { NULL };
-int ret = 0, nb_samples;
+AVFrame *in = NULL, *out, *cf[2] = { NULL };
+int ret = 0, nb_samples, status;
+int64_t pts;
if (s->crossfade_is_over) {
+ret = ff_inlink_consume_frame(ctx->inputs[1], );
+if (ret < 0) {
+return ret;
+} else if (ff_inlink_acknowledge_status(ctx->inputs[1], ,
)) {
+ff_outlink_set_status(ctx->outputs[0], status, pts);
+return 0;
+} else {
+if (ff_outlink_frame_wanted(ctx->outputs[0]) && !in) {
+ff_inlink_request_frame(ctx->inputs[1]);
+return 0;
+}
+}
in->pts = s->pts;
s->pts += av_rescale_q(in->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
return ff_filter_frame(outlink, in);
-} else if (inlink == ctx->inputs[0]) {
-av_audio_fifo_write(s->fifo[0], (void **)in->extended_data,
in->nb_samples);
+}
-nb_samples = av_audio_fifo_size(s->fifo[0]) - s->nb_samples;
+if (ff_framequeue_queued_samples(>inputs[0]->fifo) > s->nb_samples) {
+nb_samples = ff_framequeue_queued_samples(>inputs[0]->fifo) -
s->nb_samples;
if (nb_samples > 0) {
-out = ff_get_audio_buffer(outlink, nb_samples);
-if (!out) {
-ret = AVERROR(ENOMEM);
-goto fail;
-}
-av_audio_fifo_read(s->fifo[0], (void **)out->extended_data,
nb_samples);
-out->pts = s->pts;
-s->pts += av_rescale_q(nb_samples,
-(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
-ret = ff_filter_frame(outlink, out);
-}
-} else if (av_audio_fifo_size(s->fifo[1]) < s->nb_samples) {
-if (!s->overlap && av_audio_fifo_size(s->fifo[0]) > 0) {
-nb_samples = av_audio_fifo_size(s->fifo[0]);
-
-cf[0] = ff_get_audio_buffer(outlink, nb_samples);
-out = ff_get_audio_buffer(outlink, nb_samples);
-if (!out || !cf[0]) {
-ret = AVERROR(ENOMEM);
-goto fail;
+ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples,
nb_samples, );
+if (ret < 0) {
+return ret;
}
-av_audio_fifo_read(s->fifo[0], (void **)cf[0]->extended_data,
nb_samples);
-
-s->fade_samples(out->extended_data, cf[0]->extended_data,
nb_samples,
-outlink->channels, -1, nb_samples - 1, nb_samples,
s->curve);
-out->pts = s->pts;
-s->pts += av_rescale_q(nb_samples,
-(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
-ret = ff_filter_frame(outlink, out);
-if (ret < 0)
-goto fail;
}
-
-av_audio_fifo_write(s->fifo[1], (void **)in->extended_data,
in->nb_samples);
-} else if (av_audio_fifo_size(s->fifo[1]) >= s->nb_samples) {
-av_audio_fifo_write(s->fifo[1], (void **)in->extended_data,
in->nb_samples);
-
+in->pts = s->pts;
+s->pts += av_rescale_q(in->nb_samples,
+(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
+return ff_filter_frame(outlink, in);
+} else if (ff_framequeue_queued_samples(>inputs[1]->fifo) >=
s->nb_samples) {
if (s->overlap) {
-cf[0] = ff_get_audio_buffer(outlink, s->nb_samples);
-