We need to have the AVFloatDSPContext allocated by libavutil in order to
maintain cross-lib ABI compatibility when adding fields to the end.
---
libavcodec/aac.h | 2 +-
libavcodec/aacdec.c | 58 +++++++++++++++++++++++------------------
libavcodec/aacenc.c | 15 ++++++++---
libavcodec/aacenc.h | 2 +-
libavcodec/aacsbr.c | 4 +-
libavcodec/ac3dec.c | 42 +++++++++++++++++-------------
libavcodec/ac3dec.h | 2 +-
libavcodec/ac3enc.c | 6 +++-
libavcodec/ac3enc.h | 2 +-
libavcodec/ac3enc_template.c | 2 +-
libavcodec/atrac1.c | 20 ++++++++++----
libavcodec/atrac3.c | 21 ++++++++++-----
libavcodec/dcadec.c | 12 ++++++---
libavcodec/libmp3lame.c | 17 ++++++++----
libavcodec/nellymoserdec.c | 16 ++++++++---
libavcodec/nellymoserenc.c | 13 ++++++---
libavcodec/ra288.c | 21 ++++++++++++---
libavcodec/twinvq.c | 31 +++++++++++++--------
libavcodec/vorbisdec.c | 55 +++++++++++++++++++++------------------
libavcodec/wma.c | 8 ++++-
libavcodec/wma.h | 2 +-
libavcodec/wmaenc.c | 4 +-
libavcodec/wmaprodec.c | 52 +++++++++++++++++++++++--------------
libavfilter/af_amix.c | 13 ++++++---
libavfilter/af_volume.c | 32 +++++++++++++++--------
libavfilter/af_volume.h | 2 +-
libavutil/float_dsp.c | 4 ++-
libavutil/float_dsp.h | 6 ++++
28 files changed, 291 insertions(+), 173 deletions(-)
diff --git a/libavcodec/aac.h b/libavcodec/aac.h
index 6c5d962..9697b4c 100644
--- a/libavcodec/aac.h
+++ b/libavcodec/aac.h
@@ -293,7 +293,7 @@ typedef struct AACContext {
FFTContext mdct_ltp;
DSPContext dsp;
FmtConvertContext fmt_conv;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
int random_state;
/** @} */
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index d59dea4..ffdef86 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -839,6 +839,7 @@ static void reset_predictor_group(PredictorState *ps, int
group_num)
static av_cold int aac_decode_init(AVCodecContext *avctx)
{
+ int ret;
AACContext *ac = avctx->priv_data;
ac->avctx = avctx;
@@ -897,7 +898,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ff_dsputil_init(&ac->dsp, avctx);
ff_fmt_convert_init(&ac->fmt_conv, avctx);
- avpriv_float_dsp_init(&ac->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&ac->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ return ret;
ac->random_state = 0x1f2e3d4c;
@@ -1360,7 +1363,7 @@ static int decode_spectrum_and_dequant(AACContext *ac,
float coef[1024],
band_energy = ac->dsp.scalarproduct_float(cfo, cfo,
off_len);
scale = sf[idx] / sqrtf(band_energy);
- ac->fdsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
+ ac->fdsp->vector_fmul_scalar(cfo, cfo, scale, off_len);
}
} else {
const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
@@ -1506,7 +1509,7 @@ static int decode_spectrum_and_dequant(AACContext *ac,
float coef[1024],
}
} while (len -= 2);
- ac->fdsp.vector_fmul_scalar(cfo, cfo, sf[idx],
off_len);
+ ac->fdsp->vector_fmul_scalar(cfo, cfo, sf[idx],
off_len);
}
}
@@ -1730,10 +1733,10 @@ static void apply_intensity_stereo(AACContext *ac,
ChannelElement *cpe, int ms_p
c *= 1 - 2 * cpe->ms_mask[idx];
scale = c * sce1->sf[idx];
for (group = 0; group < ics->group_len[g]; group++)
- ac->fdsp.vector_fmul_scalar(coef1 + group * 128 +
offsets[i],
- coef0 + group * 128 +
offsets[i],
- scale,
- offsets[i + 1] -
offsets[i]);
+ ac->fdsp->vector_fmul_scalar(coef1 + group * 128 +
offsets[i],
+ coef0 + group * 128 +
offsets[i],
+ scale,
+ offsets[i + 1] -
offsets[i]);
}
} else {
int bt_run_end = sce1->band_type_run_end[idx];
@@ -2061,10 +2064,10 @@ static void windowing_and_mdct_ltp(AACContext *ac,
float *out,
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 :
ff_sine_128;
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
- ac->fdsp.vector_fmul(in, in, lwindow_prev, 1024);
+ ac->fdsp->vector_fmul(in, in, lwindow_prev, 1024);
} else {
memset(in, 0, 448 * sizeof(float));
- ac->fdsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
+ ac->fdsp->vector_fmul(in + 448, in + 448, swindow_prev, 128);
}
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
@@ -2173,35 +2176,35 @@ static void imdct_and_windowing(AACContext *ac,
SingleChannelElement *sce)
*/
if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE ||
ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
(ics->window_sequence[0] == ONLY_LONG_SEQUENCE ||
ics->window_sequence[0] == LONG_START_SEQUENCE)) {
- ac->fdsp.vector_fmul_window( out, saved,
buf, lwindow_prev, 512);
+ ac->fdsp->vector_fmul_window( out, saved,
buf, lwindow_prev, 512);
} else {
- memcpy( out, saved,
448 * sizeof(float));
+ memcpy( out, saved,
448 * sizeof(float));
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
- ac->fdsp.vector_fmul_window(out + 448 + 0*128, saved + 448,
buf + 0*128, swindow_prev, 64);
- ac->fdsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64,
buf + 1*128, swindow, 64);
- ac->fdsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64,
buf + 2*128, swindow, 64);
- ac->fdsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64,
buf + 3*128, swindow, 64);
- ac->fdsp.vector_fmul_window(temp, buf + 3*128 + 64,
buf + 4*128, swindow, 64);
- memcpy( out + 448 + 4*128, temp, 64 *
sizeof(float));
+ ac->fdsp->vector_fmul_window(out + 448 + 0*128, saved + 448,
buf + 0*128, swindow_prev, 64);
+ ac->fdsp->vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64,
buf + 1*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64,
buf + 2*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64,
buf + 3*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(temp, buf + 3*128 + 64,
buf + 4*128, swindow, 64);
+ memcpy( out + 448 + 4*128, temp, 64 *
sizeof(float));
} else {
- ac->fdsp.vector_fmul_window(out + 448, saved + 448,
buf, swindow_prev, 64);
- memcpy( out + 576, buf + 64,
448 * sizeof(float));
+ ac->fdsp->vector_fmul_window(out + 448, saved + 448,
buf, swindow_prev, 64);
+ memcpy( out + 576, buf + 64,
448 * sizeof(float));
}
}
// buffer update
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
memcpy( saved, temp + 64, 64 *
sizeof(float));
- ac->fdsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf +
5*128, swindow, 64);
- ac->fdsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf +
6*128, swindow, 64);
- ac->fdsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf +
7*128, swindow, 64);
- memcpy( saved + 448, buf + 7*128 + 64, 64 *
sizeof(float));
+ ac->fdsp->vector_fmul_window(saved + 64, buf + 4*128 + 64, buf +
5*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(saved + 192, buf + 5*128 + 64, buf +
6*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(saved + 320, buf + 6*128 + 64, buf +
7*128, swindow, 64);
+ memcpy( saved + 448, buf + 7*128 + 64, 64 *
sizeof(float));
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
- memcpy( saved, buf + 512, 448 *
sizeof(float));
- memcpy( saved + 448, buf + 7*128 + 64, 64 *
sizeof(float));
+ memcpy( saved, buf + 512, 448 *
sizeof(float));
+ memcpy( saved + 448, buf + 7*128 + 64, 64 *
sizeof(float));
} else { // LONG_STOP or ONLY_LONG
- memcpy( saved, buf + 512, 512 *
sizeof(float));
+ memcpy( saved, buf + 512, 512 *
sizeof(float));
}
}
@@ -2583,6 +2586,9 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
ff_mdct_end(&ac->mdct);
ff_mdct_end(&ac->mdct_small);
ff_mdct_end(&ac->mdct_ltp);
+
+ av_freep(&ac->fdsp);
+
return 0;
}
diff --git a/libavcodec/aacenc.c b/libavcodec/aacenc.c
index 6f582ca..088ee10 100644
--- a/libavcodec/aacenc.c
+++ b/libavcodec/aacenc.c
@@ -253,7 +253,7 @@ static void apply_window_and_mdct(AACEncContext *s,
SingleChannelElement *sce,
int i;
float *output = sce->ret_buf;
- apply_window[sce->ics.window_sequence[0]](&s->dsp, &s->fdsp, sce, audio);
+ apply_window[sce->ics.window_sequence[0]](&s->dsp, s->fdsp, sce, audio);
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
@@ -684,6 +684,7 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
av_freep(&s->buffer.samples);
av_freep(&s->cpe);
ff_af_queue_close(&s->afq);
+ av_freep(&s->fdsp);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
@@ -695,7 +696,9 @@ static av_cold int dsp_init(AVCodecContext *avctx,
AACEncContext *s)
int ret = 0;
ff_dsputil_init(&s->dsp, avctx);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ return ret;
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
@@ -704,11 +707,15 @@ static av_cold int dsp_init(AVCodecContext *avctx,
AACEncContext *s)
ff_init_ff_sine_windows(7);
if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0))
- return ret;
+ goto error;
if (ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0))
- return ret;
+ goto error;
return 0;
+
+error:
+ aac_encode_end(avctx);
+ return ret;
}
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
diff --git a/libavcodec/aacenc.h b/libavcodec/aacenc.h
index 81ffb97..2c99781 100644
--- a/libavcodec/aacenc.h
+++ b/libavcodec/aacenc.h
@@ -59,7 +59,7 @@ typedef struct AACEncContext {
FFTContext mdct1024; ///< long (1024 samples)
frame transform context
FFTContext mdct128; ///< short (128 samples)
frame transform context
DSPContext dsp;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
float *planar_samples[6]; ///< saved preprocessed input
int samplerate_index; ///< MPEG-4 samplerate index
diff --git a/libavcodec/aacsbr.c b/libavcodec/aacsbr.c
index add9f18..d86d732 100644
--- a/libavcodec/aacsbr.c
+++ b/libavcodec/aacsbr.c
@@ -1702,13 +1702,13 @@ void ff_sbr_apply(AACContext *ac,
SpectralBandReplication *sbr, int id_aac,
nch = 2;
}
- sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, &ac->fdsp,
+ sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, ac->fdsp,
L, sbr->X[0], sbr->qmf_filter_scratch,
sbr->data[0].synthesis_filterbank_samples,
&sbr->data[0].synthesis_filterbank_samples_offset,
downsampled);
if (nch == 2)
- sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, &ac->fdsp,
+ sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, ac->fdsp,
R, sbr->X[1], sbr->qmf_filter_scratch,
sbr->data[1].synthesis_filterbank_samples,
&sbr->data[1].synthesis_filterbank_samples_offset,
diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c
index 3c51472..c9f1cc1 100644
--- a/libavcodec/ac3dec.c
+++ b/libavcodec/ac3dec.c
@@ -155,12 +155,26 @@ static av_cold void ac3_tables_init(void)
}
/**
+ * Uninitialize the AC-3 decoder.
+ */
+static av_cold int ac3_decode_end(AVCodecContext *avctx)
+{
+ AC3DecodeContext *s = avctx->priv_data;
+ ff_mdct_end(&s->imdct_512);
+ ff_mdct_end(&s->imdct_256);
+
+ av_freep(&s->fdsp);
+
+ return 0;
+}
+
+/**
* AVCodec initialization
*/
static av_cold int ac3_decode_init(AVCodecContext *avctx)
{
AC3DecodeContext *s = avctx->priv_data;
- int i;
+ int i, ret;
s->avctx = avctx;
@@ -170,7 +184,11 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
ff_mdct_init(&s->imdct_512, 9, 1, 1.0);
ff_kbd_window_init(s->window, 5.0, 256);
ff_dsputil_init(&s->dsp, avctx);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
{
+ ac3_decode_end(avctx);
+ return ret;
+ }
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
ff_fmt_convert_init(&s->fmt_conv, avctx);
av_lfg_init(&s->dith_state, 0);
@@ -607,15 +625,15 @@ static inline void do_imdct(AC3DecodeContext *s, int
channels)
for (i = 0; i < 128; i++)
x[i] = s->transform_coeffs[ch][2 * i];
s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x);
- s->fdsp.vector_fmul_window(s->outptr[ch - 1], s->delay[ch - 1],
- s->tmp_output, s->window, 128);
+ s->fdsp->vector_fmul_window(s->outptr[ch - 1], s->delay[ch - 1],
+ s->tmp_output, s->window, 128);
for (i = 0; i < 128; i++)
x[i] = s->transform_coeffs[ch][2 * i + 1];
s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch - 1], x);
} else {
s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output,
s->transform_coeffs[ch]);
- s->fdsp.vector_fmul_window(s->outptr[ch - 1], s->delay[ch - 1],
- s->tmp_output, s->window, 128);
+ s->fdsp->vector_fmul_window(s->outptr[ch - 1], s->delay[ch - 1],
+ s->tmp_output, s->window, 128);
memcpy(s->delay[ch - 1], s->tmp_output + 128, 128 * sizeof(float));
}
}
@@ -1408,18 +1426,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void
*data,
return FFMIN(buf_size, s->frame_size);
}
-/**
- * Uninitialize the AC-3 decoder.
- */
-static av_cold int ac3_decode_end(AVCodecContext *avctx)
-{
- AC3DecodeContext *s = avctx->priv_data;
- ff_mdct_end(&s->imdct_512);
- ff_mdct_end(&s->imdct_256);
-
- return 0;
-}
-
#define OFFSET(x) offsetof(AC3DecodeContext, x)
#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
static const AVOption options[] = {
diff --git a/libavcodec/ac3dec.h b/libavcodec/ac3dec.h
index 8d3a311..7e6f99c 100644
--- a/libavcodec/ac3dec.h
+++ b/libavcodec/ac3dec.h
@@ -194,7 +194,7 @@ typedef struct AC3DecodeContext {
///@name Optimization
DSPContext dsp; ///< for optimization
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
AC3DSPContext ac3dsp;
FmtConvertContext fmt_conv; ///< optimized conversion functions
///@}
diff --git a/libavcodec/ac3enc.c b/libavcodec/ac3enc.c
index c0acc64..752e654 100644
--- a/libavcodec/ac3enc.c
+++ b/libavcodec/ac3enc.c
@@ -2052,6 +2052,8 @@ av_cold int ff_ac3_encode_close(AVCodecContext *avctx)
s->mdct_end(s);
+ av_freep(&s->fdsp);
+
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
@@ -2493,7 +2495,9 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
#endif
ff_dsputil_init(&s->dsp, avctx);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto init_fail;
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
dprint_options(s);
diff --git a/libavcodec/ac3enc.h b/libavcodec/ac3enc.h
index be9dcf2..f5699bb 100644
--- a/libavcodec/ac3enc.h
+++ b/libavcodec/ac3enc.h
@@ -160,7 +160,7 @@ typedef struct AC3EncodeContext {
AVCodecContext *avctx; ///< parent AVCodecContext
PutBitContext pb; ///< bitstream writer context
DSPContext dsp;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
AC3DSPContext ac3dsp; ///< AC-3 optimized functions
FFTContext mdct; ///< FFT context for MDCT
calculation
const SampleType *mdct_window; ///< MDCT window function array
diff --git a/libavcodec/ac3enc_template.c b/libavcodec/ac3enc_template.c
index 388d753..24ce891 100644
--- a/libavcodec/ac3enc_template.c
+++ b/libavcodec/ac3enc_template.c
@@ -101,7 +101,7 @@ static void apply_mdct(AC3EncodeContext *s)
const SampleType *input_samples = &s->planar_samples[ch][blk *
AC3_BLOCK_SIZE];
#if CONFIG_AC3ENC_FLOAT
- apply_window(&s->fdsp, s->windowed_samples, input_samples,
+ apply_window(s->fdsp, s->windowed_samples, input_samples,
s->mdct_window, AC3_WINDOW_SIZE);
#else
apply_window(&s->dsp, s->windowed_samples, input_samples,
diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c
index b74bef6..660c8c6 100644
--- a/libavcodec/atrac1.c
+++ b/libavcodec/atrac1.c
@@ -82,7 +82,7 @@ typedef struct {
DECLARE_ALIGNED(32, float, high)[512];
float* bands[3];
FFTContext mdct_ctx[3];
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
} AT1Ctx;
/** size of the transform in samples in the long mode for each QMF band */
@@ -142,8 +142,9 @@ static int at1_imdct_block(AT1SUCtx* su, AT1Ctx *q)
at1_imdct(q, &q->spec[pos], &su->spectrum[0][ref_pos + start_pos],
nbits, band_num);
/* overlap and window */
- q->fdsp.vector_fmul_window(&q->bands[band_num][start_pos],
prev_buf,
- &su->spectrum[0][ref_pos + start_pos],
ff_sine_32, 16);
+ q->fdsp->vector_fmul_window(&q->bands[band_num][start_pos],
prev_buf,
+ &su->spectrum[0][ref_pos + start_pos],
+ ff_sine_32, 16);
prev_buf = &su->spectrum[0][ref_pos+start_pos + 16];
start_pos += block_size;
@@ -328,6 +329,8 @@ static av_cold int atrac1_decode_end(AVCodecContext * avctx)
ff_mdct_end(&q->mdct_ctx[1]);
ff_mdct_end(&q->mdct_ctx[2]);
+ av_freep(&q->fdsp);
+
return 0;
}
@@ -350,15 +353,16 @@ static av_cold int atrac1_decode_init(AVCodecContext
*avctx)
(ret = ff_mdct_init(&q->mdct_ctx[1], 8, 1, -1.0/ (1 << 15))) ||
(ret = ff_mdct_init(&q->mdct_ctx[2], 9, 1, -1.0/ (1 << 15)))) {
av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
- atrac1_decode_end(avctx);
- return ret;
+ goto fail;
}
ff_init_ff_sine_windows(5);
ff_atrac_generate_tables();
- avpriv_float_dsp_init(&q->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&q->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto fail;
q->bands[0] = q->low;
q->bands[1] = q->mid;
@@ -374,6 +378,10 @@ static av_cold int atrac1_decode_init(AVCodecContext
*avctx)
avctx->coded_frame = &q->frame;
return 0;
+
+fail:
+ atrac1_decode_end(avctx);
+ return ret;
}
diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c
index 312b283..334899f 100644
--- a/libavcodec/atrac3.c
+++ b/libavcodec/atrac3.c
@@ -113,7 +113,7 @@ typedef struct ATRAC3Context {
FFTContext mdct_ctx;
FmtConvertContext fmt_conv;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
} ATRAC3Context;
static DECLARE_ALIGNED(32, float, mdct_window)[MDCT_SIZE];
@@ -149,7 +149,7 @@ static void imlt(ATRAC3Context *q, float *input, float
*output, int odd_band)
q->mdct_ctx.imdct_calc(&q->mdct_ctx, output, input);
/* Perform windowing on the output. */
- q->fdsp.vector_fmul(output, output, mdct_window, MDCT_SIZE);
+ q->fdsp->vector_fmul(output, output, mdct_window, MDCT_SIZE);
}
/*
@@ -199,6 +199,8 @@ static av_cold int atrac3_decode_close(AVCodecContext
*avctx)
ff_mdct_end(&q->mdct_ctx);
+ av_freep(&q->fdsp);
+
return 0;
}
@@ -959,8 +961,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
/* initialize the MDCT transform */
if ((ret = ff_mdct_init(&q->mdct_ctx, 9, 1, 1.0 / 32768)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
- av_freep(&q->decoded_bytes_buffer);
- return ret;
+ goto fail;
}
/* init the joint-stereo decoding data */
@@ -977,19 +978,25 @@ static av_cold int atrac3_decode_init(AVCodecContext
*avctx)
q->matrix_coeff_index_next[i] = 3;
}
- avpriv_float_dsp_init(&q->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&q->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto fail;
ff_fmt_convert_init(&q->fmt_conv, avctx);
q->units = av_mallocz(sizeof(*q->units) * avctx->channels);
if (!q->units) {
- atrac3_decode_close(avctx);
- return AVERROR(ENOMEM);
+ ret = AVERROR(ENOMEM);
+ goto fail;
}
avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame;
return 0;
+
+fail:
+ atrac3_decode_close(avctx);
+ return ret;
}
AVCodec ff_atrac3_decoder = {
diff --git a/libavcodec/dcadec.c b/libavcodec/dcadec.c
index f26111a..9886b4c 100644
--- a/libavcodec/dcadec.c
+++ b/libavcodec/dcadec.c
@@ -386,7 +386,7 @@ typedef struct {
int profile;
int debug_flag; ///< used for suppressing repeated error
messages output
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
FFTContext imdct;
SynthFilterContext synth;
DCADSPContext dcadsp;
@@ -1880,8 +1880,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void
*data,
float *back_chan =
s->samples_chanptr[s->channel_order_tab[s->xch_base_channel]];
float *lt_chan =
s->samples_chanptr[s->channel_order_tab[s->xch_base_channel - 2]];
float *rt_chan =
s->samples_chanptr[s->channel_order_tab[s->xch_base_channel - 1]];
- s->fdsp.vector_fmac_scalar(lt_chan, back_chan, -M_SQRT1_2, 256);
- s->fdsp.vector_fmac_scalar(rt_chan, back_chan, -M_SQRT1_2, 256);
+ s->fdsp->vector_fmac_scalar(lt_chan, back_chan, -M_SQRT1_2, 256);
+ s->fdsp->vector_fmac_scalar(rt_chan, back_chan, -M_SQRT1_2, 256);
}
}
@@ -1906,12 +1906,15 @@ static int dca_decode_frame(AVCodecContext *avctx, void
*data,
static av_cold int dca_decode_init(AVCodecContext *avctx)
{
+ int ret;
DCAContext *s = avctx->priv_data;
s->avctx = avctx;
dca_init_vlcs();
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ return ret;
ff_mdct_init(&s->imdct, 6, 1, 1.0);
ff_synth_filter_init(&s->synth);
ff_dcadsp_init(&s->dcadsp);
@@ -1936,6 +1939,7 @@ static av_cold int dca_decode_end(AVCodecContext *avctx)
DCAContext *s = avctx->priv_data;
ff_mdct_end(&s->imdct);
av_freep(&s->extra_channels_buffer);
+ av_freep(&s->fdsp);
return 0;
}
diff --git a/libavcodec/libmp3lame.c b/libavcodec/libmp3lame.c
index 156665c..8a68519 100644
--- a/libavcodec/libmp3lame.c
+++ b/libavcodec/libmp3lame.c
@@ -50,7 +50,7 @@ typedef struct LAMEContext {
int reservoir;
float *samples_flt[2];
AudioFrameQueue afq;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
} LAMEContext;
@@ -88,6 +88,9 @@ static av_cold int mp3lame_encode_close(AVCodecContext *avctx)
ff_af_queue_close(&s->afq);
lame_close(s->gfp);
+
+ av_freep(&s->fdsp);
+
return 0;
}
@@ -167,7 +170,9 @@ static av_cold int mp3lame_encode_init(AVCodecContext
*avctx)
if (ret < 0)
goto error;
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto error;
return 0;
error:
@@ -205,10 +210,10 @@ static int mp3lame_encode_frame(AVCodecContext *avctx,
AVPacket *avpkt,
return AVERROR(EINVAL);
}
for (ch = 0; ch < avctx->channels; ch++) {
- s->fdsp.vector_fmul_scalar(s->samples_flt[ch],
- (const float *)frame->data[ch],
- 32768.0f,
- FFALIGN(frame->nb_samples, 8));
+ s->fdsp->vector_fmul_scalar(s->samples_flt[ch],
+ (const float *)frame->data[ch],
+ 32768.0f,
+ FFALIGN(frame->nb_samples, 8));
}
ENCODE_BUFFER(lame_encode_buffer_float, float, s->samples_flt);
break;
diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c
index 08cc4ab..2e89cbc 100644
--- a/libavcodec/nellymoserdec.c
+++ b/libavcodec/nellymoserdec.c
@@ -53,7 +53,7 @@ typedef struct NellyMoserDecodeContext {
AVLFG random_state;
GetBitContext gb;
float scale_bias;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
FFTContext imdct_ctx;
DECLARE_ALIGNED(32, float, imdct_buf)[2][NELLY_BUF_LEN];
float *imdct_out;
@@ -108,15 +108,16 @@ static void nelly_decode_block(NellyMoserDecodeContext *s,
(NELLY_BUF_LEN - NELLY_FILL_LEN) * sizeof(float));
s->imdct_ctx.imdct_half(&s->imdct_ctx, s->imdct_out, aptr);
- s->fdsp.vector_fmul_window(aptr, s->imdct_prev + NELLY_BUF_LEN / 2,
- s->imdct_out, ff_sine_128,
- NELLY_BUF_LEN / 2);
+ s->fdsp->vector_fmul_window(aptr, s->imdct_prev + NELLY_BUF_LEN / 2,
+ s->imdct_out, ff_sine_128,
+ NELLY_BUF_LEN / 2);
FFSWAP(float *, s->imdct_out, s->imdct_prev);
}
}
static av_cold int decode_init(AVCodecContext * avctx) {
NellyMoserDecodeContext *s = avctx->priv_data;
+ int ret;
s->avctx = avctx;
s->imdct_out = s->imdct_buf[0];
@@ -124,7 +125,10 @@ static av_cold int decode_init(AVCodecContext * avctx) {
av_lfg_init(&s->random_state, 0);
ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp, avctx->flags &
CODEC_FLAG_BITEXACT)) < 0) {
+ ff_mdct_end(&s->imdct_ctx);
+ return ret;
+ }
s->scale_bias = 1.0/(32768*8);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
@@ -193,6 +197,8 @@ static av_cold int decode_end(AVCodecContext * avctx) {
ff_mdct_end(&s->imdct_ctx);
+ av_freep(&s->fdsp);
+
return 0;
}
diff --git a/libavcodec/nellymoserenc.c b/libavcodec/nellymoserenc.c
index 1222826..f6c9ffd 100644
--- a/libavcodec/nellymoserenc.c
+++ b/libavcodec/nellymoserenc.c
@@ -56,7 +56,7 @@ typedef struct NellyMoserEncodeContext {
AVCodecContext *avctx;
int last_frame;
DSPContext dsp;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
FFTContext mdct_ctx;
AudioFrameQueue afq;
DECLARE_ALIGNED(32, float, mdct_out)[NELLY_SAMPLES];
@@ -122,11 +122,11 @@ static void apply_mdct(NellyMoserEncodeContext *s)
float *in1 = s->buf + NELLY_BUF_LEN;
float *in2 = s->buf + 2 * NELLY_BUF_LEN;
- s->fdsp.vector_fmul (s->in_buff, in0, ff_sine_128,
NELLY_BUF_LEN);
+ s->fdsp->vector_fmul (s->in_buff, in0, ff_sine_128,
NELLY_BUF_LEN);
s->dsp.vector_fmul_reverse(s->in_buff + NELLY_BUF_LEN, in1, ff_sine_128,
NELLY_BUF_LEN);
s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out, s->in_buff);
- s->fdsp.vector_fmul (s->in_buff, in1, ff_sine_128,
NELLY_BUF_LEN);
+ s->fdsp->vector_fmul (s->in_buff, in1, ff_sine_128,
NELLY_BUF_LEN);
s->dsp.vector_fmul_reverse(s->in_buff + NELLY_BUF_LEN, in2, ff_sine_128,
NELLY_BUF_LEN);
s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN,
s->in_buff);
}
@@ -142,6 +142,9 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_free(s->path);
}
ff_af_queue_close(&s->afq);
+
+ av_freep(&s->fdsp);
+
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
@@ -174,7 +177,9 @@ static av_cold int encode_init(AVCodecContext *avctx)
if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0)
goto error;
ff_dsputil_init(&s->dsp, avctx);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto error;
/* Generate overlap window */
ff_sine_window_init(ff_sine_128, 128);
diff --git a/libavcodec/ra288.c b/libavcodec/ra288.c
index 8266673..098705a 100644
--- a/libavcodec/ra288.c
+++ b/libavcodec/ra288.c
@@ -39,7 +39,7 @@
typedef struct {
AVFrame frame;
DSPContext dsp;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
DECLARE_ALIGNED(32, float, sp_lpc)[FFALIGN(36, 16)]; ///< LPC
coefficients for speech data (spec: A)
DECLARE_ALIGNED(32, float, gain_lpc)[FFALIGN(10, 16)]; ///< LPC
coefficients for gain (spec: GB)
@@ -60,15 +60,27 @@ typedef struct {
float gain_rec[11];
} RA288Context;
+static av_cold int ra288_decode_close(AVCodecContext *avctx)
+{
+ RA288Context *ractx = avctx->priv_data;
+
+ av_freep(&ractx->fdsp);
+
+ return 0;
+}
+
static av_cold int ra288_decode_init(AVCodecContext *avctx)
{
RA288Context *ractx = avctx->priv_data;
+ int ret;
avctx->channels = 1;
avctx->channel_layout = AV_CH_LAYOUT_MONO;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- avpriv_float_dsp_init(&ractx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&ractx->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ return ret;
avcodec_get_frame_defaults(&ractx->frame);
avctx->coded_frame = &ractx->frame;
@@ -143,7 +155,7 @@ static void do_hybrid_window(RA288Context *ractx,
MAX_BACKWARD_FILTER_LEN +
MAX_BACKWARD_FILTER_NONREC, 16)]);
- ractx->fdsp.vector_fmul(work, window, hist, FFALIGN(order + n + non_rec,
16));
+ ractx->fdsp->vector_fmul(work, window, hist, FFALIGN(order + n + non_rec,
16));
convolve(buffer1, work + order , n , order);
convolve(buffer2, work + order + n, non_rec, order);
@@ -170,7 +182,7 @@ static void backward_filter(RA288Context *ractx,
do_hybrid_window(ractx, order, n, non_rec, temp, hist, rec, window);
if (!compute_lpc_coefs(temp, order, lpc, 0, 1, 1))
- ractx->fdsp.vector_fmul(lpc, lpc, tab, FFALIGN(order, 16));
+ ractx->fdsp->vector_fmul(lpc, lpc, tab, FFALIGN(order, 16));
memmove(hist, hist + n, move_size*sizeof(*hist));
}
@@ -232,6 +244,7 @@ AVCodec ff_ra_288_decoder = {
.id = AV_CODEC_ID_RA_288,
.priv_data_size = sizeof(RA288Context),
.init = ra288_decode_init,
+ .close = ra288_decode_close,
.decode = ra288_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"),
diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c
index b59031b..4a588c0 100644
--- a/libavcodec/twinvq.c
+++ b/libavcodec/twinvq.c
@@ -179,7 +179,7 @@ typedef struct TwinContext {
AVCodecContext *avctx;
AVFrame frame;
DSPContext dsp;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
FFTContext mdct_ctx[3];
const ModeTab *mtab;
@@ -650,10 +650,10 @@ static void imdct_and_window(TwinContext *tctx, enum
FrameType ftype, int wtype,
mdct->imdct_half(mdct, buf1 + bsize*j, in + bsize*j);
- tctx->fdsp.vector_fmul_window(out2, prev_buf + (bsize-wsize) / 2,
- buf1 + bsize * j,
- ff_sine_windows[av_log2(wsize)],
- wsize / 2);
+ tctx->fdsp->vector_fmul_window(out2, prev_buf + (bsize-wsize) / 2,
+ buf1 + bsize * j,
+ ff_sine_windows[av_log2(wsize)],
+ wsize / 2);
out2 += wsize;
memcpy(out2, buf1 + bsize*j + wsize/2, (bsize -
wsize/2)*sizeof(float));
@@ -781,9 +781,9 @@ static void read_and_decode_spectrum(TwinContext *tctx,
GetBitContext *gb,
dec_bark_env(tctx, bark1[i][j], bark_use_hist[i][j], i,
tctx->tmp_buf, gain[sub*i+j], ftype);
- tctx->fdsp.vector_fmul(chunk + block_size*j, chunk + block_size*j,
- tctx->tmp_buf, block_size);
-
+ tctx->fdsp->vector_fmul(chunk + block_size * j,
+ chunk + block_size * j, tctx->tmp_buf,
+ block_size);
}
if (ftype == FT_LONG) {
@@ -803,7 +803,7 @@ static void read_and_decode_spectrum(TwinContext *tctx,
GetBitContext *gb,
dec_lpc_spectrum_inv(tctx, lsp, ftype, tctx->tmp_buf);
for (j = 0; j < mtab->fmode[ftype].sub; j++) {
- tctx->fdsp.vector_fmul(chunk, chunk, tctx->tmp_buf, block_size);
+ tctx->fdsp->vector_fmul(chunk, chunk, tctx->tmp_buf, block_size);
chunk += block_size;
}
}
@@ -1100,6 +1100,7 @@ static av_cold int twin_decode_close(AVCodecContext
*avctx)
av_free(tctx->spectrum);
av_free(tctx->prev_frame);
av_free(tctx->tmp_buf);
+ av_freep(&tctx->fdsp);
return 0;
}
@@ -1158,11 +1159,13 @@ static av_cold int twin_decode_init(AVCodecContext
*avctx)
}
ff_dsputil_init(&tctx->dsp, avctx);
- avpriv_float_dsp_init(&tctx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&tctx->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto error;
+
if ((ret = init_mdct_win(tctx))) {
av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
- twin_decode_close(avctx);
- return ret;
+ goto error;
}
init_bitstream_params(tctx);
@@ -1172,6 +1175,10 @@ static av_cold int twin_decode_init(AVCodecContext
*avctx)
avctx->coded_frame = &tctx->frame;
return 0;
+
+error:
+ twin_decode_close(avctx);
+ return ret;
}
AVCodec ff_twinvq_decoder = {
diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c
index 9bea908..5a3663c 100644
--- a/libavcodec/vorbisdec.c
+++ b/libavcodec/vorbisdec.c
@@ -126,7 +126,7 @@ typedef struct vorbis_context_s {
AVFrame frame;
GetBitContext gb;
DSPContext dsp;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
FmtConvertContext fmt_conv;
FFTContext mdct[2];
@@ -186,8 +186,11 @@ static float vorbisfloat2float(unsigned val)
// Free all allocated memory -----------------------------------------
-static void vorbis_free(vorbis_context *vc)
+// Close decoder
+
+static av_cold int vorbis_decode_close(AVCodecContext *avccontext)
{
+ vorbis_context *vc = avccontext->priv_data;
int i;
av_freep(&vc->channel_residues);
@@ -225,6 +228,10 @@ static void vorbis_free(vorbis_context *vc)
av_free(vc->mappings[i].mux);
}
av_freep(&vc->mappings);
+
+ av_freep(&vc->fdsp);
+
+ return 0;
}
// Parse setup header -------------------------------------------------
@@ -982,19 +989,22 @@ static av_cold int vorbis_decode_init(AVCodecContext
*avccontext)
vc->avccontext = avccontext;
ff_dsputil_init(&vc->dsp, avccontext);
- avpriv_float_dsp_init(&vc->fdsp, avccontext->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&vc->fdsp,
+ avccontext->flags & CODEC_FLAG_BITEXACT))
< 0)
+ goto error;
ff_fmt_convert_init(&vc->fmt_conv, avccontext);
avccontext->sample_fmt = AV_SAMPLE_FMT_FLTP;
if (!headers_len) {
av_log(avccontext, AV_LOG_ERROR, "Extradata missing.\n");
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto error;
}
if ((ret = avpriv_split_xiph_headers(headers, headers_len, 30,
header_start, header_len)) < 0) {
av_log(avccontext, AV_LOG_ERROR, "Extradata corrupt.\n");
- return ret;
+ goto error;
}
init_get_bits(gb, header_start[0], header_len[0]*8);
@@ -1005,21 +1015,19 @@ static av_cold int vorbis_decode_init(AVCodecContext
*avccontext)
}
if ((ret = vorbis_parse_id_hdr(vc))) {
av_log(avccontext, AV_LOG_ERROR, "Id header corrupt.\n");
- vorbis_free(vc);
- return ret;
+ goto error;
}
init_get_bits(gb, header_start[2], header_len[2]*8);
hdr_type = get_bits(gb, 8);
if (hdr_type != 5) {
av_log(avccontext, AV_LOG_ERROR, "Third header is not the setup
header.\n");
- vorbis_free(vc);
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto error;
}
if ((ret = vorbis_parse_setup_hdr(vc))) {
av_log(avccontext, AV_LOG_ERROR, "Setup header corrupt.\n");
- vorbis_free(vc);
- return ret;
+ goto error;
}
if (vc->audio_channels > 8)
@@ -1034,6 +1042,10 @@ static av_cold int vorbis_decode_init(AVCodecContext
*avccontext)
avccontext->coded_frame = &vc->frame;
return 0;
+
+error:
+ vorbis_decode_close(avccontext);
+ return ret;
}
// Decode audiopackets -------------------------------------------------
@@ -1603,7 +1615,7 @@ static int vorbis_parse_audio_packet(vorbis_context *vc,
float **floor_ptr)
for (j = vc->audio_channels-1;j >= 0; j--) {
ch_res_ptr = vc->channel_residues + res_chan[j] * blocksize / 2;
- vc->fdsp.vector_fmul(floor_ptr[j], floor_ptr[j], ch_res_ptr, blocksize
/ 2);
+ vc->fdsp->vector_fmul(floor_ptr[j], floor_ptr[j], ch_res_ptr,
blocksize / 2);
mdct->imdct_half(mdct, ch_res_ptr, floor_ptr[j]);
}
@@ -1620,13 +1632,15 @@ static int vorbis_parse_audio_packet(vorbis_context
*vc, float **floor_ptr)
const float *win = vc->win[blockflag & previous_window];
if (blockflag == previous_window) {
- vc->fdsp.vector_fmul_window(ret, saved, buf, win, blocksize / 4);
+ vc->fdsp->vector_fmul_window(ret, saved, buf, win, blocksize / 4);
} else if (blockflag > previous_window) {
- vc->fdsp.vector_fmul_window(ret, saved, buf, win, bs0 / 4);
+ vc->fdsp->vector_fmul_window(ret, saved, buf, win, bs0 / 4);
memcpy(ret+bs0/2, buf+bs0/4, ((bs1-bs0)/4) * sizeof(float));
} else {
memcpy(ret, saved, ((bs1 - bs0) / 4) * sizeof(float));
- vc->fdsp.vector_fmul_window(ret + (bs1 - bs0) / 4, saved + (bs1 -
bs0) / 4, buf, win, bs0 / 4);
+ vc->fdsp->vector_fmul_window(ret + (bs1 - bs0) / 4,
+ saved + (bs1 - bs0) / 4, buf, win,
+ bs0 / 4);
}
memcpy(saved, buf + blocksize / 4, blocksize / 4 * sizeof(float));
}
@@ -1687,17 +1701,6 @@ static int vorbis_decode_frame(AVCodecContext
*avccontext, void *data,
return buf_size;
}
-// Close decoder
-
-static av_cold int vorbis_decode_close(AVCodecContext *avccontext)
-{
- vorbis_context *vc = avccontext->priv_data;
-
- vorbis_free(vc);
-
- return 0;
-}
-
static av_cold void vorbis_decode_flush(AVCodecContext *avccontext)
{
vorbis_context *vc = avccontext->priv_data;
diff --git a/libavcodec/wma.c b/libavcodec/wma.c
index 9808a16..c5c5c70 100644
--- a/libavcodec/wma.c
+++ b/libavcodec/wma.c
@@ -71,7 +71,7 @@ static void init_coef_vlc(VLC *vlc, uint16_t **prun_table,
int ff_wma_init(AVCodecContext *avctx, int flags2)
{
WMACodecContext *s = avctx->priv_data;
- int i;
+ int i, ret;
float bps1, high_freq;
volatile float bps;
int sample_rate1;
@@ -84,7 +84,9 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
ff_dsputil_init(&s->dsp, avctx);
ff_fmt_convert_init(&s->fmt_conv, avctx);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ return ret;
if (avctx->codec->id == AV_CODEC_ID_WMAV1) {
s->version = 1;
@@ -387,6 +389,8 @@ int ff_wma_end(AVCodecContext *avctx)
av_free(s->int_table[i]);
}
+ av_freep(&s->fdsp);
+
return 0;
}
diff --git a/libavcodec/wma.h b/libavcodec/wma.h
index fb2aa8b..ffeb036 100644
--- a/libavcodec/wma.h
+++ b/libavcodec/wma.h
@@ -134,7 +134,7 @@ typedef struct WMACodecContext {
float lsp_pow_m_table2[(1 << LSP_POW_BITS)];
DSPContext dsp;
FmtConvertContext fmt_conv;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
#ifdef TRACE
int frame_count;
diff --git a/libavcodec/wmaenc.c b/libavcodec/wmaenc.c
index 044114b..ceb16bf 100644
--- a/libavcodec/wmaenc.c
+++ b/libavcodec/wmaenc.c
@@ -111,9 +111,9 @@ static void apply_window_and_mdct(AVCodecContext * avctx,
const AVFrame *frame)
for (ch = 0; ch < avctx->channels; ch++) {
memcpy(s->output, s->frame_out[ch], window_len * sizeof(*s->output));
- s->fdsp.vector_fmul_scalar(s->frame_out[ch], audio[ch], n, len);
+ s->fdsp->vector_fmul_scalar(s->frame_out[ch], audio[ch], n, len);
s->dsp.vector_fmul_reverse(&s->output[window_len], s->frame_out[ch],
win, len);
- s->fdsp.vector_fmul(s->frame_out[ch], s->frame_out[ch], win, len);
+ s->fdsp->vector_fmul(s->frame_out[ch], s->frame_out[ch], win, len);
mdct->mdct_calc(mdct, s->coefs[ch], s->output);
}
}
diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c
index f04b43f..2f3fecb 100644
--- a/libavcodec/wmaprodec.c
+++ b/libavcodec/wmaprodec.c
@@ -171,7 +171,7 @@ typedef struct WMAProDecodeCtx {
AVCodecContext* avctx; ///< codec context for
av_log
AVFrame frame; ///< AVFrame for decoded
output
DSPContext dsp; ///< accelerated DSP
functions
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
uint8_t frame_data[MAX_FRAMESIZE +
FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
PutBitContext pb; ///< context for filling
the frame_data buffer
@@ -276,13 +276,15 @@ static av_cold int decode_init(AVCodecContext *avctx)
WMAProDecodeCtx *s = avctx->priv_data;
uint8_t *edata_ptr = avctx->extradata;
unsigned int channel_mask;
- int i, bits;
+ int i, bits, ret;
int log2_max_num_subframes;
int num_possible_block_sizes;
s->avctx = avctx;
ff_dsputil_init(&s->dsp, avctx);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp,
+ avctx->flags & CODEC_FLAG_BITEXACT)) < 0)
+ goto error;
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
@@ -299,7 +301,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
} else {
av_log_ask_for_sample(avctx, "Unknown extradata size\n");
- return AVERROR_PATCHWELCOME;
+ ret = AVERROR_PATCHWELCOME;
+ goto error;
}
/** generic init */
@@ -314,7 +317,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags);
if (bits > WMAPRO_BLOCK_MAX_BITS) {
av_log_missing_feature(avctx, "14-bits block sizes", 1);
- return AVERROR_PATCHWELCOME;
+ ret = AVERROR_PATCHWELCOME;
+ goto error;
}
s->samples_per_frame = 1 << bits;
@@ -332,21 +336,25 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (s->max_num_subframes > MAX_SUBFRAMES) {
av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %i\n",
s->max_num_subframes);
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto error;
}
if (s->avctx->sample_rate <= 0) {
av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto error;
}
if (avctx->channels < 0) {
av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
avctx->channels);
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto error;
} else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
- return AVERROR_PATCHWELCOME;
+ ret = AVERROR_PATCHWELCOME;
+ goto error;
}
/** init previous block len */
@@ -467,6 +475,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->coded_frame = &s->frame;
return 0;
+
+error:
+ decode_end(avctx);
+ return ret;
}
/**
@@ -1012,12 +1024,12 @@ static void inverse_channel_transform(WMAProDecodeCtx
*s)
}
} else if (s->avctx->channels == 2) {
int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
- s->fdsp.vector_fmul_scalar(ch_data[0] + sfb[0],
- ch_data[0] + sfb[0],
- 181.0 / 128, len);
- s->fdsp.vector_fmul_scalar(ch_data[1] + sfb[0],
- ch_data[1] + sfb[0],
- 181.0 / 128, len);
+ s->fdsp->vector_fmul_scalar(ch_data[0] + sfb[0],
+ ch_data[0] + sfb[0],
+ 181.0 / 128, len);
+ s->fdsp->vector_fmul_scalar(ch_data[1] + sfb[0],
+ ch_data[1] + sfb[0],
+ 181.0 / 128, len);
}
}
}
@@ -1046,8 +1058,8 @@ static void wmapro_window(WMAProDecodeCtx *s)
winlen >>= 1;
- s->fdsp.vector_fmul_window(start, start, start + winlen,
- window, winlen);
+ s->fdsp->vector_fmul_window(start, start, start + winlen,
+ window, winlen);
s->channel[c].prev_block_len = s->subframe_len;
}
@@ -1263,9 +1275,9 @@ static int decode_subframe(WMAProDecodeCtx *s)
s->channel[c].scale_factor_step;
const float quant = pow(10.0, exp / 20.0);
int start = s->cur_sfb_offsets[b];
- s->fdsp.vector_fmul_scalar(s->tmp + start,
- s->channel[c].coeffs + start,
- quant, end - start);
+ s->fdsp->vector_fmul_scalar(s->tmp + start,
+ s->channel[c].coeffs + start,
+ quant, end - start);
}
/** apply imdct (imdct_half == DCTIV with reverse) */
diff --git a/libavfilter/af_amix.c b/libavfilter/af_amix.c
index c2fb158..f88d3ab 100644
--- a/libavfilter/af_amix.c
+++ b/libavfilter/af_amix.c
@@ -154,7 +154,7 @@ static int frame_list_add_frame(FrameList *frame_list, int
nb_samples, int64_t p
typedef struct MixContext {
const AVClass *class; /**< class for AVOptions */
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
int nb_inputs; /**< number of inputs */
int active_inputs; /**< number of input currently active */
@@ -302,9 +302,9 @@ static int output_frame(AVFilterLink *outlink, int
nb_samples)
plane_size = FFALIGN(plane_size, 16);
for (p = 0; p < planes; p++) {
- s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p],
- (float *) in_buf->extended_data[p],
- s->input_scale[i], plane_size);
+ s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p],
+ (float *) in_buf->extended_data[p],
+ s->input_scale[i], plane_size);
}
}
}
@@ -514,7 +514,8 @@ static int init(AVFilterContext *ctx, const char *args)
ff_insert_inpad(ctx, i, &pad);
}
- avpriv_float_dsp_init(&s->fdsp, 0);
+ if ((ret = avpriv_float_dsp_init(&s->fdsp, 0)) < 0)
+ return ret;
return 0;
}
@@ -536,6 +537,8 @@ static void uninit(AVFilterContext *ctx)
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
+
+ av_freep(&s->fdsp);
}
static int query_formats(AVFilterContext *ctx)
diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c
index 3f3ad47..f0811f6 100644
--- a/libavfilter/af_volume.c
+++ b/libavfilter/af_volume.c
@@ -60,6 +60,12 @@ static const AVClass volume_class = {
.version = LIBAVUTIL_VERSION_INT,
};
+static void uninit(AVFilterContext *ctx)
+{
+ VolumeContext *vol = ctx->priv;
+ av_freep(&vol->fdsp);
+}
+
static av_cold int init(AVFilterContext *ctx, const char *args)
{
VolumeContext *vol = ctx->priv;
@@ -184,8 +190,10 @@ static inline void scale_samples_s32(uint8_t *dst, const
uint8_t *src,
-static void volume_init(VolumeContext *vol)
+static int volume_init(VolumeContext *vol)
{
+ int ret;
+
vol->samples_align = 1;
switch (av_get_packed_sample_fmt(vol->sample_fmt)) {
@@ -205,7 +213,8 @@ static void volume_init(VolumeContext *vol)
vol->scale_samples = scale_samples_s32;
break;
case AV_SAMPLE_FMT_FLT:
- avpriv_float_dsp_init(&vol->fdsp, 0);
+ if ((ret = avpriv_float_dsp_init(&vol->fdsp, 0)) < 0)
+ return ret;
vol->samples_align = 4;
break;
case AV_SAMPLE_FMT_DBL:
@@ -216,6 +225,8 @@ static void volume_init(VolumeContext *vol)
if (ARCH_X86)
ff_volume_init_x86(vol);
+
+ return 0;
}
static int config_output(AVFilterLink *outlink)
@@ -228,9 +239,7 @@ static int config_output(AVFilterLink *outlink)
vol->channels =
av_get_channel_layout_nb_channels(inlink->channel_layout);
vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels
: 1;
- volume_init(vol);
-
- return 0;
+ return volume_init(vol);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
@@ -269,15 +278,15 @@ static int filter_frame(AVFilterLink *inlink,
AVFilterBufferRef *buf)
}
} else if (av_get_packed_sample_fmt(vol->sample_fmt) ==
AV_SAMPLE_FMT_FLT) {
for (p = 0; p < vol->planes; p++) {
- vol->fdsp.vector_fmul_scalar((float
*)out_buf->extended_data[p],
- (const float
*)buf->extended_data[p],
- vol->volume, plane_samples);
+ vol->fdsp->vector_fmul_scalar((float
*)out_buf->extended_data[p],
+ (const float
*)buf->extended_data[p],
+ vol->volume, plane_samples);
}
} else {
for (p = 0; p < vol->planes; p++) {
- vol->fdsp.vector_dmul_scalar((double
*)out_buf->extended_data[p],
- (const double
*)buf->extended_data[p],
- vol->volume, plane_samples);
+ vol->fdsp->vector_dmul_scalar((double
*)out_buf->extended_data[p],
+ (const double
*)buf->extended_data[p],
+ vol->volume, plane_samples);
}
}
}
@@ -312,6 +321,7 @@ AVFilter avfilter_af_volume = {
.query_formats = query_formats,
.priv_size = sizeof(VolumeContext),
.init = init,
+ .uninit = uninit,
.inputs = avfilter_af_volume_inputs,
.outputs = avfilter_af_volume_outputs,
};
diff --git a/libavfilter/af_volume.h b/libavfilter/af_volume.h
index a1883ed..0f8f7d4 100644
--- a/libavfilter/af_volume.h
+++ b/libavfilter/af_volume.h
@@ -37,7 +37,7 @@ enum PrecisionType {
typedef struct VolumeContext {
const AVClass *class;
- AVFloatDSPContext fdsp;
+ AVFloatDSPContext *fdsp;
enum PrecisionType precision;
double volume;
int volume_i;
diff --git a/libavutil/float_dsp.c b/libavutil/float_dsp.c
index 5e723ff..178913c 100644
--- a/libavutil/float_dsp.c
+++ b/libavutil/float_dsp.c
@@ -18,7 +18,9 @@
#include "config.h"
+#include "error.h"
#include "float_dsp.h"
+#include "mem.h"
static void vector_fmul_c(float *dst, const float *src0, const float *src1,
int len)
@@ -75,7 +77,7 @@ int avpriv_float_dsp_init(AVFloatDSPContext **fdsp_p, int
bit_exact)
{
AVFloatDSPContext *fdsp;
- if (!fdsp_p) {
+ if (!*fdsp_p) {
*fdsp_p = av_mallocz(sizeof(**fdsp_p));
if (!*fdsp_p)
return AVERROR(ENOMEM);
diff --git a/libavutil/float_dsp.h b/libavutil/float_dsp.h
index ffac886..684bf8b 100644
--- a/libavutil/float_dsp.h
+++ b/libavutil/float_dsp.h
@@ -19,6 +19,12 @@
#ifndef AVUTIL_FLOAT_DSP_H
#define AVUTIL_FLOAT_DSP_H
+/**
+ * DSP Context for optimized generic functions operating on floats and doubles.
+ *
+ * Must be allocated using avpriv_float_dsp_init(), as the size of this struct
+ * is not part of the ABI. Only add fields/functions to the end of the struct.
+ */
typedef struct AVFloatDSPContext {
/**
* Calculate the product of two vectors of floats and store the result in
--
1.7.1
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel