---
 libavcodec/avcodec.h       |  123 +++++++++-
 libavcodec/internal.h      |   49 ++--
 libavcodec/options.c       |    4 +-
 libavcodec/options_table.h |    1 +
 libavcodec/pthread.c       |    2 -
 libavcodec/utils.c         |  578 +++++++++++++++++++++++++++-----------------
 libavcodec/version.h       |    3 +
 7 files changed, 505 insertions(+), 255 deletions(-)

diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 4dde26c..589b750 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -823,6 +823,7 @@ typedef struct AVPanScan{
 #define FF_QSCALE_TYPE_H264  2
 #define FF_QSCALE_TYPE_VP56  3
 
+#if FF_API_GET_BUFFER
 #define FF_BUFFER_TYPE_INTERNAL 1
 #define FF_BUFFER_TYPE_USER     2 ///< direct rendering buffers (image is 
(de)allocated by user)
 #define FF_BUFFER_TYPE_SHARED   4 ///< Buffer from somewhere else; don't 
deallocate image (data/base), all other tables are not shared.
@@ -832,6 +833,12 @@ typedef struct AVPanScan{
 #define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
 #define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
 #define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
 
 /**
  * @defgroup lavc_packet AVPacket
@@ -1859,6 +1866,7 @@ typedef struct AVCodecContext {
      */
     enum AVSampleFormat request_sample_fmt;
 
+#if FF_API_GET_BUFFER
     /**
      * Called at the beginning of each frame to get a buffer for it.
      *
@@ -1918,7 +1926,10 @@ typedef struct AVCodecContext {
      *
      * - encoding: unused
      * - decoding: Set by libavcodec, user can override.
+     *
+     * @deprecated use get_buffer2()
      */
+    attribute_deprecated
     int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
 
     /**
@@ -1929,7 +1940,10 @@ typedef struct AVCodecContext {
      * but not by more than one thread at once, so does not need to be 
reentrant.
      * - encoding: unused
      * - decoding: Set by libavcodec, user can override.
+     *
+     * @deprecated custom freeing callbacks should be set from get_buffer2()
      */
+    attribute_deprecated
     void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
 
     /**
@@ -1944,8 +1958,100 @@ typedef struct AVCodecContext {
      * - encoding: unused
      * - decoding: Set by libavcodec, user can override.
      */
+    attribute_deprecated
     int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
 
+    /**
+     * This callback is called at the beginning of each frame to get data
+     * buffer(s) for it. There may be one contiguous buffer for all the data or
+     * there may be a buffer per each data plane or anything in between. Each
+     * buffer must be reference-counted using the AVBuffer API.
+     *
+     * The following fields will be set in the frame before this callback is
+     * called:
+     * - format
+     * - width, height (video only)
+     * - sample_rate, channel_layout, nb_samples (audio only)
+     * Their values may differ from the corresponding values in
+     * AVCodecContext. This callback must use the frame values, not the codec
+     * context values, to calculate the required buffer size.
+     *
+     * This callback must fill the following fields in the frame:
+     * - data[]
+     * - linesize[]
+     * - extended_data:
+     *   * if the data is planar audio with more than 8 channels, then this
+     *     callback must allocate and fill extended_data to contain all 
pointers
+     *     to all data planes. data[] must hold as many pointers as it can.
+     *     extended_data must be allocated with av_malloc() and will be freed 
in
+     *     av_frame_unref().
+     *   * otherwise exended_data must point to data
+     * - buf[] must contain references to the buffers that contain the frame
+     *   data.
+     * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+     *   this callback and filled with the extra buffers if there are more
+     *   buffers than buf[] can hold. extended_buf will be freed in
+     *   av_frame_unref().
+     *
+     * If CODEC_CAP_DR1 is not set then get_buffer2() must call
+     * avcodec_default_get_buffer2() instead of providing buffers allocated by
+     * some other means.
+     *
+     * Each data plane must be aligned to the maximum required by the target
+     * CPU.
+     *
+     * @see avcodec_default_get_buffer2()
+     *
+     * Video:
+     *
+     * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+     * (read and/or written to if it is writable) later by libavcodec.
+     *
+     * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain 
an
+     * edge of the size returned by avcodec_get_edge_width() on all sides
+     *
+     * avcodec_align_dimensions2() should be used to find the required width 
and
+     * height, as they normally need to be rounded up to the next multiple of 
16.
+     *
+     * If frame multithreading is used and thread_safe_callbacks is set,
+     * this callback may be called from a different thread, but not from more
+     * than one at once. Does not need to be reentrant.
+     *
+     * @see avcodec_align_dimensions2()
+     *
+     * Audio:
+     *
+     * Decoders request a buffer of a particular size by setting
+     * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+     * however, utilize only part of the buffer by setting AVFrame.nb_samples
+     * to a smaller value in the output frame.
+     *
+     * As a convenience, av_samples_get_buffer_size() and
+     * av_samples_fill_arrays() in libavutil may be used by custom 
get_buffer2()
+     * functions to find the required data size and to fill data pointers and
+     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+     * since all planes must be the same size.
+     *
+     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+     *
+     * - encoding: unused
+     * - decoding: Set by libavcodec, user can override.
+     */
+    int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+    /**
+     * If non-zero, the decoded audio and video frames returned from
+     * avcodec_decode_video2() and avcodec_decode_audio4() are 
reference-counted
+     * and are valid indefinitely. The caller must free them with
+     * av_frame_unref() when they are not needed anymore.
+     * Otherwise, the decoded frames must not be freed by the caller and are
+     * only valid until the next decode call.
+     *
+     * - encoding: unused
+     * - decoding: set by the caller before avcodec_open2().
+     */
+    int refcounted_frames;
 
     /* - encoding parameters */
     float qcompress;  ///< amount of qscale change between easy & hard scenes 
(0.0-1.0)
@@ -3208,9 +3314,18 @@ AVCodec *avcodec_find_decoder(enum AVCodecID id);
  */
 AVCodec *avcodec_find_decoder_by_name(const char *name);
 
-int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
-void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
-int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#if FF_API_GET_BUFFER
+attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame 
*pic);
+attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, 
AVFrame *pic);
+attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, 
AVFrame *pic);
+#endif
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders 
without
+ * CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
 
 /**
  * Return the amount of padding in pixels which the get_buffer callback must
@@ -4141,8 +4256,6 @@ int avcodec_fill_audio_frame(AVFrame *frame, int 
nb_channels,
  */
 void avcodec_flush_buffers(AVCodecContext *avctx);
 
-void avcodec_default_free_buffers(AVCodecContext *s);
-
 /**
  * Return codec bits per sample.
  *
diff --git a/libavcodec/internal.h b/libavcodec/internal.h
index 069a855..8e64640 100644
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@ -26,34 +26,33 @@
 
 #include <stdint.h>
 
+#include "libavutil/buffer.h"
 #include "libavutil/mathematics.h"
 #include "libavutil/pixfmt.h"
 #include "avcodec.h"
 
 #define FF_SANE_NB_CHANNELS 128U
 
-typedef struct InternalBuffer {
-    uint8_t *base[AV_NUM_DATA_POINTERS];
-    uint8_t *data[AV_NUM_DATA_POINTERS];
-    int linesize[AV_NUM_DATA_POINTERS];
-    int width;
-    int height;
-    enum AVPixelFormat pix_fmt;
-} InternalBuffer;
-
-typedef struct AVCodecInternal {
+typedef struct FramePool {
     /**
-     * internal buffer count
-     * used by default get/release/reget_buffer().
+     * Pools for each data plane. For audio all the planes have the same size,
+     * so only pools[0] is used.
      */
-    int buffer_count;
+    AVBufferPool *pools[4];
 
-    /**
-     * internal buffers
-     * used by default get/release/reget_buffer().
+    /*
+     * Pool parameters
      */
-    InternalBuffer *buffer;
+    int format;
+    int width, height;
+    int stride_align[AV_NUM_DATA_POINTERS];
+    int linesize[4];
+    int planes;
+    int channels;
+    int samples;
+} FramePool;
 
+typedef struct AVCodecInternal {
     /**
      * Whether the parent AVCodecContext is a copy of the context which had
      * init() called on it.
@@ -76,11 +75,9 @@ typedef struct AVCodecInternal {
      */
     int last_audio_frame;
 
-    /**
-     * The data for the last allocated audio frame.
-     * Stored here so we can free it.
-     */
-    uint8_t *audio_data;
+    AVFrame to_free;
+
+    FramePool *pool;
 } AVCodecInternal;
 
 struct AVCodecDefault {
@@ -149,6 +146,12 @@ static av_always_inline int64_t 
ff_samples_to_time_base(AVCodecContext *avctx,
  * AVCodecContext.get_buffer() and should be used instead calling get_buffer()
  * directly.
  */
-int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame);
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+/**
+ * Identical in function to av_frame_make_writable(), except it uses
+ * ff_get_buffer() to allocate the buffer when needed.
+ */
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame);
 
 #endif /* AVCODEC_INTERNAL_H */
diff --git a/libavcodec/options.c b/libavcodec/options.c
index fc2a184..d140552 100644
--- a/libavcodec/options.c
+++ b/libavcodec/options.c
@@ -88,8 +88,7 @@ int avcodec_get_context_defaults3(AVCodecContext *s, const 
AVCodec *codec)
     av_opt_set_defaults(s);
 
     s->time_base           = (AVRational){0,1};
-    s->get_buffer          = avcodec_default_get_buffer;
-    s->release_buffer      = avcodec_default_release_buffer;
+    s->get_buffer2         = avcodec_default_get_buffer2;
     s->get_format          = avcodec_default_get_format;
     s->execute             = avcodec_default_execute;
     s->execute2            = avcodec_default_execute2;
@@ -97,7 +96,6 @@ int avcodec_get_context_defaults3(AVCodecContext *s, const 
AVCodec *codec)
     s->pix_fmt             = AV_PIX_FMT_NONE;
     s->sample_fmt          = AV_SAMPLE_FMT_NONE;
 
-    s->reget_buffer        = avcodec_default_reget_buffer;
     s->reordered_opaque    = AV_NOPTS_VALUE;
     if(codec && codec->priv_data_size){
         if(!s->priv_data){
diff --git a/libavcodec/options_table.h b/libavcodec/options_table.h
index 7d0795a..28ce352 100644
--- a/libavcodec/options_table.h
+++ b/libavcodec/options_table.h
@@ -403,6 +403,7 @@ static const AVOption options[]={
 {"s32p", "32-bit signed integer planar",  0, AV_OPT_TYPE_CONST, {.i64 = 
AV_SAMPLE_FMT_S32P }, INT_MIN, INT_MAX, A|D, "request_sample_fmt"},
 {"fltp", "32-bit float planar",           0, AV_OPT_TYPE_CONST, {.i64 = 
AV_SAMPLE_FMT_FLTP }, INT_MIN, INT_MAX, A|D, "request_sample_fmt"},
 {"dblp", "64-bit double planar",          0, AV_OPT_TYPE_CONST, {.i64 = 
AV_SAMPLE_FMT_DBLP }, INT_MIN, INT_MAX, A|D, "request_sample_fmt"},
+{"refcounted_frames", NULL, OFFSET(refcounted_frames), AV_OPT_TYPE_INT, {.i64 
= 0}, 0, 1, A|V|D },
 {NULL},
 };
 
diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c
index 751fca8..82df03c 100644
--- a/libavcodec/pthread.c
+++ b/libavcodec/pthread.c
@@ -760,8 +760,6 @@ static void frame_thread_free(AVCodecContext *avctx, int 
thread_count)
     for (i = 0; i < thread_count; i++) {
         PerThreadContext *p = &fctx->threads[i];
 
-        avcodec_default_free_buffers(p->avctx);
-
         pthread_mutex_destroy(&p->mutex);
         pthread_mutex_destroy(&p->progress_mutex);
         pthread_cond_destroy(&p->input_cond);
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 27e9cfe..253290c 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -29,6 +29,7 @@
 #include "libavutil/avstring.h"
 #include "libavutil/channel_layout.h"
 #include "libavutil/crc.h"
+#include "libavutil/frame.h"
 #include "libavutil/mathematics.h"
 #include "libavutil/pixdesc.h"
 #include "libavutil/imgutils.h"
@@ -154,8 +155,6 @@ void avcodec_set_dimensions(AVCodecContext *s, int width, 
int height)
     s->height       = height;
 }
 
-#define INTERNAL_BUFFER_SIZE (32 + 1)
-
 void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
                                int linesize_align[AV_NUM_DATA_POINTERS])
 {
@@ -297,87 +296,26 @@ int avcodec_fill_audio_frame(AVFrame *frame, int 
nb_channels,
     return ret;
 }
 
-static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
 {
-    AVCodecInternal *avci = avctx->internal;
-    int buf_size, ret;
-
-    av_freep(&avci->audio_data);
-    buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
-                                          frame->nb_samples, avctx->sample_fmt,
-                                          0);
-    if (buf_size < 0)
-        return AVERROR(EINVAL);
-
-    frame->data[0] = av_mallocz(buf_size);
-    if (!frame->data[0])
-        return AVERROR(ENOMEM);
-
-    ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt,
-                                   frame->data[0], buf_size, 0);
-    if (ret < 0) {
-        av_freep(&frame->data[0]);
-        return ret;
-    }
-
-    avci->audio_data = frame->data[0];
-    if (avctx->debug & FF_DEBUG_BUFFERS)
-        av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
-                                    "internal audio buffer used\n", frame);
-
-    return 0;
-}
-
-static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
-{
-    int i;
-    int w = s->width;
-    int h = s->height;
-    InternalBuffer *buf;
-    AVCodecInternal *avci = s->internal;
-
-    if (pic->data[0] != NULL) {
-        av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in 
avcodec_default_get_buffer\n");
-        return -1;
-    }
-    if (avci->buffer_count >= INTERNAL_BUFFER_SIZE) {
-        av_log(s, AV_LOG_ERROR, "buffer_count overflow (missing 
release_buffer?)\n");
-        return -1;
-    }
-
-    if (av_image_check_size(w, h, 0, s))
-        return -1;
-
-    if (!avci->buffer) {
-        avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE + 1) *
-                                  sizeof(InternalBuffer));
-    }
-
-    buf = &avci->buffer[avci->buffer_count];
+    FramePool *pool = avctx->internal->pool;
+    int i, ret;
 
-    if (buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt 
!= s->pix_fmt)) {
-        for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
-            av_freep(&buf->base[i]);
-            buf->data[i] = NULL;
-        }
-    }
-
-    if (!buf->base[0]) {
-        int h_chroma_shift, v_chroma_shift;
-        int size[4] = { 0 };
-        int tmpsize;
-        int unaligned;
+    switch (avctx->codec_type) {
+    case AVMEDIA_TYPE_VIDEO: {
         AVPicture picture;
-        int stride_align[AV_NUM_DATA_POINTERS];
-        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
-        const int pixel_size = desc->comp[0].step_minus1 + 1;
+        int size[4] = { 0 };
+        int w = frame->width;
+        int h = frame->height;
+        int tmpsize, unaligned;
 
-        av_pix_fmt_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift,
-                                         &v_chroma_shift);
+        if (pool->format == frame->format &&
+            pool->width == frame->width && pool->height == frame->height)
+            return 0;
 
-        avcodec_align_dimensions2(s, &w, &h, stride_align);
+        avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
 
-        if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
+        if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
             w += EDGE_WIDTH * 2;
             h += EDGE_WIDTH * 2;
         }
@@ -385,16 +323,17 @@ static int video_get_buffer(AVCodecContext *s, AVFrame 
*pic)
         do {
             // NOTE: do not align linesizes individually, this breaks e.g. 
assumptions
             // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
-            av_image_fill_linesizes(picture.linesize, s->pix_fmt, w);
+            av_image_fill_linesizes(picture.linesize, avctx->pix_fmt, w);
             // increase alignment of w for next try (rhs gives the lowest bit 
set in w)
             w += w & ~(w - 1);
 
             unaligned = 0;
             for (i = 0; i < 4; i++)
-                unaligned |= picture.linesize[i] % stride_align[i];
+                unaligned |= picture.linesize[i] % pool->stride_align[i];
         } while (unaligned);
 
-        tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, 
picture.linesize);
+        tmpsize = av_image_fill_pointers(picture.data, avctx->pix_fmt, h,
+                                         NULL, picture.linesize);
         if (tmpsize < 0)
             return -1;
 
@@ -402,54 +341,156 @@ static int video_get_buffer(AVCodecContext *s, AVFrame 
*pic)
             size[i] = picture.data[i + 1] - picture.data[i];
         size[i] = tmpsize - (picture.data[i] - picture.data[0]);
 
-        memset(buf->base, 0, sizeof(buf->base));
-        memset(buf->data, 0, sizeof(buf->data));
-
-        for (i = 0; i < 4 && size[i]; i++) {
-            const int h_shift = i == 0 ? 0 : h_chroma_shift;
-            const int v_shift = i == 0 ? 0 : v_chroma_shift;
+        for (i = 0; i < 4; i++) {
+            av_buffer_pool_uninit(&pool->pools[i]);
+            pool->linesize[i] = picture.linesize[i];
+            if (size[i]) {
+                pool->pools[i] = av_buffer_pool_init(size[i], NULL);
+                if (!pool->pools[i])
+                    return AVERROR(ENOMEM);
+            }
+        }
+        pool->format = frame->format;
+        pool->width  = frame->width;
+        pool->height = frame->height;
 
-            buf->linesize[i] = picture.linesize[i];
+        break;
+        }
+    case AVMEDIA_TYPE_AUDIO: {
+        int ch     = av_get_channel_layout_nb_channels(frame->channel_layout);
+        int planar = av_sample_fmt_is_planar(frame->format);
+        int planes = planar ? ch : 1;
+
+        if (pool->format == frame->format && pool->planes == planes &&
+            pool->channels == ch && frame->nb_samples == pool->samples)
+            return 0;
+
+        av_buffer_pool_uninit(&pool->pools[0]);
+        ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
+                                         frame->nb_samples, frame->format, 0);
+        if (ret < 0)
+            return AVERROR(EINVAL);
 
-            buf->base[i] = av_malloc(size[i] + 16); //FIXME 16
-            if (buf->base[i] == NULL)
-                return -1;
+        pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
+        if (!pool->pools[0])
+            return AVERROR(ENOMEM);
 
-            // no edge if EDGE EMU or not planar YUV
-            if ((s->flags & CODEC_FLAG_EMU_EDGE) || !size[2])
-                buf->data[i] = buf->base[i];
-            else
-                buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i] * 
EDGE_WIDTH >> v_shift) + (pixel_size * EDGE_WIDTH >> h_shift), stride_align[i]);
+        pool->format     = frame->format;
+        pool->planes     = planes;
+        pool->channels   = ch;
+        pool->samples = frame->nb_samples;
+        break;
         }
-        for (; i < AV_NUM_DATA_POINTERS; i++) {
-            buf->base[i]     = buf->data[i] = NULL;
-            buf->linesize[i] = 0;
+    default: av_assert0(0);
+    }
+    return 0;
+}
+
+static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    FramePool *pool = avctx->internal->pool;
+    int planes = pool->planes;
+    int i;
+
+    if (planes > AV_NUM_DATA_POINTERS) {
+        frame->extended_data = av_mallocz(planes * 
sizeof(*frame->extended_data));
+        frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
+        frame->extended_buf  = av_mallocz(frame->nb_extended_buf *
+                                          sizeof(*frame->extended_buf));
+        if (!frame->extended_data || !frame->extended_buf) {
+            av_freep(&frame->extended_data);
+            av_freep(&frame->extended_buf);
+            return AVERROR(ENOMEM);
         }
-        if (size[1] && !size[2])
-            avpriv_set_systematic_pal2((uint32_t *)buf->data[1], s->pix_fmt);
-        buf->width   = s->width;
-        buf->height  = s->height;
-        buf->pix_fmt = s->pix_fmt;
+    } else
+        frame->extended_data = frame->data;
+
+    for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
+        frame->buf[i] = av_buffer_alloc_pool(pool->pools[0]);
+        if (!frame->buf[i])
+            goto fail;
+        frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
+    }
+    for (i = 0; i < frame->nb_extended_buf; i++) {
+        frame->extended_buf[i] = av_buffer_alloc_pool(pool->pools[0]);
+        if (!frame->extended_buf[i])
+            goto fail;
+        frame->extended_data[i + AV_NUM_DATA_POINTERS] = 
frame->extended_buf[i]->data;
     }
 
-    for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
-        pic->base[i]     = buf->base[i];
-        pic->data[i]     = buf->data[i];
-        pic->linesize[i] = buf->linesize[i];
+    if (avctx->debug & FF_DEBUG_BUFFERS)
+        av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", 
frame);
+
+    return 0;
+fail:
+    av_frame_unref(frame);
+    return AVERROR(ENOMEM);
+}
+
+static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+{
+    FramePool *pool = s->internal->pool;
+    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
+    int pixel_size = desc->comp[0].step_minus1 + 1;
+    int h_chroma_shift, v_chroma_shift;
+    int i;
+
+    if (pic->data[0] != NULL) {
+        av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in 
avcodec_default_get_buffer\n");
+        return -1;
     }
+
+    memset(pic->data, 0, sizeof(pic->data));
     pic->extended_data = pic->data;
-    avci->buffer_count++;
+
+    av_pix_fmt_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, 
&v_chroma_shift);
+
+    for (i = 0; i < 4 && pool->pools[i]; i++) {
+        const int h_shift = i == 0 ? 0 : h_chroma_shift;
+        const int v_shift = i == 0 ? 0 : v_chroma_shift;
+
+        pic->linesize[i] = pool->linesize[i];
+
+        pic->buf[i] = av_buffer_alloc_pool(pool->pools[i]);
+        if (!pic->buf[i])
+            goto fail;
+
+        // no edge if EDGE EMU or not planar YUV
+        if ((s->flags & CODEC_FLAG_EMU_EDGE) || !pool->pools[2])
+            pic->data[i] = pic->buf[i]->data;
+        else {
+            pic->data[i] = pic->buf[i]->data +
+                FFALIGN((pic->linesize[i] * EDGE_WIDTH >> v_shift) +
+                        (pixel_size * EDGE_WIDTH >> h_shift), 
pool->stride_align[i]);
+        }
+    }
+    for (; i < AV_NUM_DATA_POINTERS; i++) {
+        pic->data[i] = NULL;
+        pic->linesize[i] = 0;
+    }
+    if (pic->data[1] && !pic->data[2])
+        avpriv_set_systematic_pal2((uint32_t *)pic->data[1], s->pix_fmt);
 
     if (s->debug & FF_DEBUG_BUFFERS)
-        av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d "
-                                "buffers used\n", pic, avci->buffer_count);
+        av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
 
     return 0;
+fail:
+    av_frame_unref(pic);
+    return AVERROR(ENOMEM);
 }
 
-int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int 
flags)
 {
+    int ret;
+
+    if ((ret = update_frame_pool(avctx, frame)) < 0)
+        return ret;
+
+#if FF_API_GET_BUFFER
     frame->type = FF_BUFFER_TYPE_INTERNAL;
+#endif
+
     switch (avctx->codec_type) {
     case AVMEDIA_TYPE_VIDEO:
         return video_get_buffer(avctx, frame);
@@ -460,14 +501,44 @@ int avcodec_default_get_buffer(AVCodecContext *avctx, 
AVFrame *frame)
     }
 }
 
-int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+#if FF_API_GET_BUFFER
+int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    return avcodec_default_get_buffer2(avctx, frame, 0);
+}
+
+typedef struct CompatReleaseBufPriv {
+    AVCodecContext avctx;
+    AVFrame frame;
+} CompatReleaseBufPriv;
+
+static void compat_free_buffer(void *opaque, uint8_t *data)
+{
+    CompatReleaseBufPriv *priv = opaque;
+    priv->avctx.release_buffer(&priv->avctx, &priv->frame);
+    av_freep(&priv);
+}
+
+static void compat_release_buffer(void *opaque, uint8_t *data)
+{
+    AVBufferRef *buf = opaque;
+    av_buffer_unref(&buf);
+}
+#endif
+
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
 {
+    int ret;
+
     switch (avctx->codec_type) {
     case AVMEDIA_TYPE_VIDEO:
         frame->width               = avctx->width;
         frame->height              = avctx->height;
         frame->format              = avctx->pix_fmt;
         frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
+
+        if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) 
< 0)
+            return ret;
         break;
     case AVMEDIA_TYPE_AUDIO:
         frame->sample_rate    = avctx->sample_rate;
@@ -480,87 +551,154 @@ int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
     frame->pkt_pts = avctx->pkt ? avctx->pkt->pts : AV_NOPTS_VALUE;
     frame->reordered_opaque = avctx->reordered_opaque;
 
-    return avctx->get_buffer(avctx, frame);
-}
+#if FF_API_GET_BUFFER
+    /*
+     * Wrap an old get_buffer()-allocated buffer in an bunch of AVBuffers.
+     * We wrap each plane in its own AVBuffer. Each of those has a reference to
+     * a dummy AVBuffer as its private data, unreffing it on free.
+     * When all the planes are freed, the dummy buffer's free callback calls
+     * release_buffer().
+     */
+    if (avctx->get_buffer) {
+        CompatReleaseBufPriv *priv = NULL;
+        AVBufferRef *dummy_buf = NULL;
+        int planes, i, ret;
 
-void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic)
-{
-    int i;
-    InternalBuffer *buf, *last;
-    AVCodecInternal *avci = s->internal;
+        if (flags & AV_GET_BUFFER_FLAG_REF)
+            frame->reference    = 1;
 
-    assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+        ret = avctx->get_buffer(avctx, frame);
+        if (ret < 0)
+            return ret;
 
-    assert(pic->type == FF_BUFFER_TYPE_INTERNAL);
-    assert(avci->buffer_count);
+        /* return if the buffers are already set up
+         * this would happen e.g. when a custom get_buffer() calls
+         * avcodec_default_get_buffer
+         */
+        if (frame->buf[0])
+            return 0;
+
+        priv = av_mallocz(sizeof(*priv));
+        if (!priv) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+        priv->avctx = *avctx;
+        priv->frame = *frame;
 
-    if (avci->buffer) {
-        buf = NULL; /* avoids warning */
-        for (i = 0; i < avci->buffer_count; i++) { //just 3-5 checks so is not 
worth to optimize
-            buf = &avci->buffer[i];
-            if (buf->data[0] == pic->data[0])
-                break;
+        dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, priv, 0);
+        if (!dummy_buf) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
         }
-        assert(i < avci->buffer_count);
-        avci->buffer_count--;
-        last = &avci->buffer[avci->buffer_count];
 
-        if (buf != last)
-            FFSWAP(InternalBuffer, *buf, *last);
-    }
+#define WRAP_PLANE(ref_out, data, data_size)                            \
+do {                                                                    \
+    AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf);                  \
+    if (!dummy_ref) {                                                   \
+        ret = AVERROR(ENOMEM);                                          \
+        goto fail;                                                      \
+    }                                                                   \
+    ref_out = av_buffer_create(data, data_size, compat_release_buffer,  \
+                               dummy_ref, 0);                           \
+    if (!ref_out) {                                                     \
+        av_frame_unref(frame);                                          \
+        ret = AVERROR(ENOMEM);                                          \
+        goto fail;                                                      \
+    }                                                                   \
+} while (0)
+
+        if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+            const AVPixFmtDescriptor *desc = 
av_pix_fmt_desc_get(frame->format);
+
+            if (!desc) {
+                ret = AVERROR(EINVAL);
+                goto fail;
+            }
+            planes = (desc->flags & PIX_FMT_PLANAR) ? desc->nb_components : 1;
 
-    for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
-        pic->data[i] = NULL;
-//        pic->base[i]=NULL;
+            for (i = 0; i < planes; i++) {
+                int h_shift    = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
+                int plane_size = (frame->width >> h_shift) * 
frame->linesize[i];
 
-    if (s->debug & FF_DEBUG_BUFFERS)
-        av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d "
-                                "buffers used\n", pic, avci->buffer_count);
-}
+                WRAP_PLANE(frame->buf[i], frame->data[i], plane_size);
+            }
+        } else {
+            int planar = av_sample_fmt_is_planar(frame->format);
+            planes = planar ? avctx->channels : 1;
+
+            if (planes > FF_ARRAY_ELEMS(frame->buf)) {
+                frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf);
+                frame->extended_buf = av_malloc(sizeof(*frame->extended_buf) *
+                                                frame->nb_extended_buf);
+                if (!frame->extended_buf) {
+                    ret = AVERROR(ENOMEM);
+                    goto fail;
+                }
+            }
 
-int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic)
-{
-    AVFrame temp_pic;
-    int i;
+            for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++)
+                WRAP_PLANE(frame->buf[i], frame->extended_data[i], 
frame->linesize[0]);
 
-    assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
+            for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++)
+                WRAP_PLANE(frame->extended_buf[i],
+                           frame->extended_data[i + 
FF_ARRAY_ELEMS(frame->buf)],
+                           frame->linesize[0]);
+        }
+
+        av_buffer_unref(&dummy_buf);
+
+        return 0;
 
-    /* If no picture return a new buffer */
-    if (pic->data[0] == NULL) {
-        /* We will copy from buffer, so must be readable */
-        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
-        return ff_get_buffer(s, pic);
+fail:
+        avctx->release_buffer(avctx, frame);
+        av_freep(&priv);
+        av_buffer_unref(&dummy_buf);
+        return ret;
     }
+#endif
 
-    assert(s->pix_fmt == pic->format);
+    return avctx->get_buffer2(avctx, frame, flags);
+}
 
-    /* If internal buffer type return the same buffer */
-    if (pic->type == FF_BUFFER_TYPE_INTERNAL) {
-        if (s->pkt)
-            pic->pkt_pts = s->pkt->pts;
-        else
-            pic->pkt_pts = AV_NOPTS_VALUE;
-        pic->reordered_opaque = s->reordered_opaque;
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+    AVFrame tmp;
+    int ret;
+
+    av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
+
+    if (!frame->data[0])
+        return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+
+    if (av_frame_is_writable(frame))
         return 0;
-    }
 
-    /*
-     * Not internal type and reget_buffer not overridden, emulate cr buffer
-     */
-    temp_pic = *pic;
-    for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
-        pic->data[i] = pic->base[i] = NULL;
-    pic->opaque = NULL;
-    /* Allocate new frame */
-    if (ff_get_buffer(s, pic))
-        return -1;
-    /* Copy image data from old buffer to new buffer */
-    av_picture_copy((AVPicture *)pic, (AVPicture *)&temp_pic, s->pix_fmt, 
s->width,
-                    s->height);
-    s->release_buffer(s, &temp_pic); // Release old frame
+    tmp = *frame;
+    tmp.extended_data = tmp.data;
+
+    ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+    if (ret < 0)
+        return ret;
+
+    av_image_copy(frame->data, frame->linesize, tmp.data, tmp.linesize,
+                  frame->format, frame->width, frame->height);
+
+    av_frame_unref(&tmp);
+
     return 0;
 }
 
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic)
+{
+    av_assert0(0);
+}
+
+int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic)
+{
+    av_assert0(0);
+}
+
 int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, 
void *arg2), void *arg, int *ret, int count, int size)
 {
     int i;
@@ -684,6 +822,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext 
*avctx, const AVCodec *code
         goto end;
     }
 
+    avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool));
+    if (!avctx->internal->pool) {
+        ret = AVERROR(ENOMEM);
+        goto free_and_end;
+    }
+
     if (codec->priv_data_size > 0) {
         if (!avctx->priv_data) {
             avctx->priv_data = av_mallocz(codec->priv_data_size);
@@ -869,6 +1013,8 @@ end:
 free_and_end:
     av_dict_free(&tmp);
     av_freep(&avctx->priv_data);
+    if (avctx->internal)
+        av_freep(&avctx->internal->pool);
     av_freep(&avctx->internal);
     avctx->codec = NULL;
     goto end;
@@ -1272,6 +1418,7 @@ int attribute_align_arg 
avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
                                               int *got_picture_ptr,
                                               AVPacket *avpkt)
 {
+    AVCodecInternal *avci = avctx->internal;
     int ret;
 
     *got_picture_ptr = 0;
@@ -1283,6 +1430,9 @@ int attribute_align_arg 
avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
 
     avcodec_get_frame_defaults(picture);
 
+    if (!avctx->refcounted_frames)
+        av_frame_unref(&avci->to_free);
+
     if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || 
(avctx->active_thread_type & FF_THREAD_FRAME)) {
         if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
             ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
@@ -1302,8 +1452,17 @@ int attribute_align_arg 
avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
 
         emms_c(); //needed to avoid an emms_c() call before every return;
 
-        if (*got_picture_ptr)
+        if (ret < 0 && picture->data[0])
+            av_frame_unref(picture);
+
+        if (*got_picture_ptr) {
+            if (!avctx->refcounted_frames) {
+                avci->to_free = *picture;
+                avci->to_free.extended_data = avci->to_free.data;
+            }
+
             avctx->frame_number++;
+        }
     } else
         ret = 0;
 
@@ -1367,6 +1526,7 @@ int attribute_align_arg 
avcodec_decode_audio4(AVCodecContext *avctx,
                                               int *got_frame_ptr,
                                               AVPacket *avpkt)
 {
+    AVCodecInternal *avci = avctx->internal;
     int planar, channels;
     int ret = 0;
 
@@ -1383,6 +1543,9 @@ int attribute_align_arg 
avcodec_decode_audio4(AVCodecContext *avctx,
 
     avcodec_get_frame_defaults(frame);
 
+    if (!avctx->refcounted_frames)
+        av_frame_unref(&avci->to_free);
+
     if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
         ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
         if (ret >= 0 && *got_frame_ptr) {
@@ -1390,7 +1553,15 @@ int attribute_align_arg 
avcodec_decode_audio4(AVCodecContext *avctx,
             frame->pkt_dts = avpkt->dts;
             if (frame->format == AV_SAMPLE_FMT_NONE)
                 frame->format = avctx->sample_fmt;
+
+            if (!avctx->refcounted_frames) {
+                avci->to_free = *frame;
+                avci->to_free.extended_data = avci->to_free.data;
+            }
         }
+
+        if (ret < 0 && frame->data[0])
+            av_frame_unref(frame);
     }
 
     /* many decoders assign whole AVFrames, thus overwriting extended_data;
@@ -1453,12 +1624,18 @@ av_cold int avcodec_close(AVCodecContext *avctx)
     }
 
     if (avcodec_is_open(avctx)) {
+        FramePool *pool = avctx->internal->pool;
+        int i;
         if (HAVE_THREADS && avctx->thread_opaque)
             ff_thread_free(avctx);
         if (avctx->codec && avctx->codec->close)
             avctx->codec->close(avctx);
-        avcodec_default_free_buffers(avctx);
         avctx->coded_frame = NULL;
+        if (!avctx->refcounted_frames)
+            av_frame_unref(&avctx->internal->to_free);
+        for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
+            av_buffer_pool_uninit(&pool->pools[i]);
+        av_freep(&avctx->internal->pool);
         av_freep(&avctx->internal);
     }
 
@@ -1727,49 +1904,6 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
         avctx->codec->flush(avctx);
 }
 
-static void video_free_buffers(AVCodecContext *s)
-{
-    AVCodecInternal *avci = s->internal;
-    int i, j;
-
-    if (!avci->buffer)
-        return;
-
-    if (avci->buffer_count)
-        av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n",
-               avci->buffer_count);
-    for (i = 0; i < INTERNAL_BUFFER_SIZE; i++) {
-        InternalBuffer *buf = &avci->buffer[i];
-        for (j = 0; j < 4; j++) {
-            av_freep(&buf->base[j]);
-            buf->data[j] = NULL;
-        }
-    }
-    av_freep(&avci->buffer);
-
-    avci->buffer_count = 0;
-}
-
-static void audio_free_buffers(AVCodecContext *avctx)
-{
-    AVCodecInternal *avci = avctx->internal;
-    av_freep(&avci->audio_data);
-}
-
-void avcodec_default_free_buffers(AVCodecContext *avctx)
-{
-    switch (avctx->codec_type) {
-    case AVMEDIA_TYPE_VIDEO:
-        video_free_buffers(avctx);
-        break;
-    case AVMEDIA_TYPE_AUDIO:
-        audio_free_buffers(avctx);
-        break;
-    default:
-        break;
-    }
-}
-
 int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
 {
     switch (codec_id) {
@@ -2107,15 +2241,15 @@ unsigned int avpriv_toupper4(unsigned int x)
 
 #if !HAVE_THREADS
 
-int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
 {
     f->owner = avctx;
-    return ff_get_buffer(avctx, f);
+    return ff_get_buffer(avctx, f, flags);
 }
 
 void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
 {
-    f->owner->release_buffer(f->owner, f);
+    av_frame_unref(f);
 }
 
 void ff_thread_finish_setup(AVCodecContext *avctx)
diff --git a/libavcodec/version.h b/libavcodec/version.h
index d80dc0a..20da9cb 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -100,5 +100,8 @@
 #ifndef FF_API_DESTRUCT_PACKET
 #define FF_API_DESTRUCT_PACKET   (LIBAVCODEC_VERSION_MAJOR < 56)
 #endif
+#ifndef FF_API_GET_BUFFER
+#define FF_API_GET_BUFFER        (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
 
 #endif /* AVCODEC_VERSION_H */
-- 
1.7.10.4

_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to