On 2013-02-13 19:51:09 +0100, Anton Khirnov wrote:
> ---
>  libavcodec/error_resilience.c  |   15 +-
>  libavcodec/mpeg4videoenc.c     |    2 +-
>  libavcodec/mpegvideo.c         |  531 
> ++++++++++++++++++++++++----------------
>  libavcodec/mpegvideo.h         |   56 +++--
>  libavcodec/mpegvideo_enc.c     |   82 +++----
>  tests/ref/fate/mpeg2-field-enc |    2 +-
>  6 files changed, 400 insertions(+), 288 deletions(-)
> 
> diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
> index 00b7202..e63e3ab 100644
> --- a/libavcodec/error_resilience.c
> +++ b/libavcodec/error_resilience.c
> @@ -548,7 +548,7 @@ skip_mean_and_median:
>                          if (s->avctx->codec_id == AV_CODEC_ID_H264) {
>                              // FIXME
>                          } else {
> -                            ff_thread_await_progress(&s->last_pic->f,
> +                            ff_thread_await_progress(&s->last_pic->tf,
>                                                       mb_y, 0);
>                          }
>                          if (!s->last_pic->motion_val[0] ||
> @@ -707,7 +707,7 @@ static int is_intra_more_likely(ERContext *s)
>                  if (s->avctx->codec_id == AV_CODEC_ID_H264) {
>                      // FIXME
>                  } else {
> -                    ff_thread_await_progress(&s->last_pic->f, mb_y, 0);
> +                    ff_thread_await_progress(&s->last_pic->tf, mb_y, 0);
>                  }
>                  is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
>                                                   linesize[0], 16);
> @@ -836,9 +836,12 @@ void ff_er_frame_end(ERContext *s)
>          av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
>  
>          for (i = 0; i < 2; i++) {
> -            s->cur_pic->ref_index[i  ]     = av_mallocz(s->mb_stride * 
> s->mb_height * 4 * sizeof(uint8_t));
> -            s->cur_pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * 
> sizeof(uint16_t));
> -            s->cur_pic->motion_val[i]    = s->cur_pic->motion_val_base[i] + 
> 4;
> +            s->cur_pic->ref_index_buf[i]  = av_buffer_allocz(s->mb_stride * 
> s->mb_height * 4 * sizeof(uint8_t));
> +            s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 
> * sizeof(uint16_t));
> +            if (!s->cur_pic->ref_index_buf[i] || 
> !s->cur_pic->motion_val_buf[i])
> +                return;

buffer leak if (i > 0 || !!s->cur_pic->ref_index_buf[0])

probably not that important and still an improvement over the previous
ignoring that malloc may fail.

> +            s->cur_pic->ref_index[i]  = s->cur_pic->ref_index_buf[i]->data;
> +            s->cur_pic->motion_val[i] = (int16_t 
> (*)[2])s->cur_pic->motion_val_buf[i]->data + 4;
>          }
>          s->cur_pic->f.motion_subsample_log2 = 3;
>      }
> @@ -1076,7 +1079,7 @@ void ff_er_frame_end(ERContext *s)
>                      int time_pp = s->pp_time;
>                      int time_pb = s->pb_time;
>  
> -                    ff_thread_await_progress(&s->next_pic->f, mb_y, 0);
> +                    ff_thread_await_progress(&s->next_pic->tf, mb_y, 0);
>  
>                      s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] *  
> time_pb            / time_pp;
>                      s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] *  
> time_pb            / time_pp;
> diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
> index 1bb0d0d..fb4d7a8 100644
> --- a/libavcodec/mpeg4videoenc.c
> +++ b/libavcodec/mpeg4videoenc.c
> @@ -643,7 +643,7 @@ void ff_mpeg4_encode_mb(MpegEncContext * s,
>                              break;
>  
>                          b_pic = pic->f.data[0] + offset;
> -                        if (pic->f.type != FF_BUFFER_TYPE_SHARED)
> +                        if (!pic->shared)
>                              b_pic+= INPLACE_OFFSET;
>                          diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 
> 16);
>                          if(diff>s->qscale*70){ //FIXME check that 70 is 
> optimal
> diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
> index 98fcb97..8fa24e6 100644
> --- a/libavcodec/mpegvideo.c
> +++ b/libavcodec/mpegvideo.c
> @@ -27,6 +27,7 @@
>   * The simplest mpeg encoder (well, it was the simplest!).
>   */
>  
> +#include "libavutil/avassert.h"
>  #include "libavutil/imgutils.h"
>  #include "avcodec.h"
>  #include "dsputil.h"
> @@ -250,28 +251,6 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
>      return 0;
>  }
>  
> -void ff_copy_picture(Picture *dst, Picture *src)
> -{
> -    *dst = *src;
> -    dst->f.type = FF_BUFFER_TYPE_COPY;
> -}
> -
> -/**
> - * Release a frame buffer
> - */
> -static void free_frame_buffer(MpegEncContext *s, Picture *pic)
> -{
> -    /* WM Image / Screen codecs allocate internal buffers with different
> -     * dimensions / colorspaces; ignore user-defined callbacks for these. */
> -    if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
> -        s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
> -        s->codec_id != AV_CODEC_ID_MSS2)
> -        ff_thread_release_buffer(s->avctx, &pic->f);
> -    else
> -        avcodec_default_release_buffer(s->avctx, &pic->f);
> -    av_freep(&pic->hwaccel_picture_private);
> -}
> -
>  int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
>  {
>      int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
> @@ -315,16 +294,18 @@ static int alloc_frame_buffer(MpegEncContext *s, 
> Picture *pic)
>          }
>      }
>  
> +    pic->tf.f = &pic->f;
>      if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
>          s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
>          s->codec_id != AV_CODEC_ID_MSS2)
> -        r = ff_thread_get_buffer(s->avctx, &pic->f);
> +        r = ff_thread_get_buffer(s->avctx, &pic->tf,
> +                                 pic->reference ? AV_GET_BUFFER_FLAG_REF : 
> 0);
>      else
> -        r = avcodec_default_get_buffer(s->avctx, &pic->f);
> +        r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
>  
> -    if (r < 0 || !pic->f.type || !pic->f.data[0]) {
> -        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
> -               r, pic->f.type, pic->f.data[0]);
> +    if (r < 0 || !pic->f.data[0]) {

I wonder why it's checking for pic->f.data[0]? cargo cult?

no need to fix it now though

> +        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
> +               r, pic->f.data[0]);
>          av_freep(&pic->hwaccel_picture_private);
>          return -1;
>      }
> @@ -333,14 +314,14 @@ static int alloc_frame_buffer(MpegEncContext *s, 
> Picture *pic)
>                          s->uvlinesize != pic->f.linesize[1])) {
>          av_log(s->avctx, AV_LOG_ERROR,
>                 "get_buffer() failed (stride changed)\n");
> -        free_frame_buffer(s, pic);
> +        ff_mpeg_unref_picture(s, pic);
>          return -1;
>      }
>  
>      if (pic->f.linesize[1] != pic->f.linesize[2]) {
>          av_log(s->avctx, AV_LOG_ERROR,
>                 "get_buffer() failed (uv stride mismatch)\n");
> -        free_frame_buffer(s, pic);
> +        ff_mpeg_unref_picture(s, pic);
>          return -1;
>      }
>  
> @@ -348,33 +329,105 @@ static int alloc_frame_buffer(MpegEncContext *s, 
> Picture *pic)
>          (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
>          av_log(s->avctx, AV_LOG_ERROR,
>                 "get_buffer() failed to allocate context scratch buffers.\n");
> -        free_frame_buffer(s, pic);
> +        ff_mpeg_unref_picture(s, pic);
>          return ret;
>      }
>  
>      return 0;
>  }
>  
> -/**
> - * Allocate a Picture.
> - * The pixels are allocated/set by calling get_buffer() if shared = 0
> - */
> -int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
> +static void free_picture_tables(Picture *pic)
>  {
> -    const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
> +    int i;
>  
> -    // the + 1 is needed so memset(,,stride*height) does not sig11
> +    av_buffer_unref(&pic->mb_var_buf);
> +    av_buffer_unref(&pic->mc_mb_var_buf);
> +    av_buffer_unref(&pic->mb_mean_buf);
> +    av_buffer_unref(&pic->mbskip_table_buf);
> +    av_buffer_unref(&pic->qscale_table_buf);
> +    av_buffer_unref(&pic->mb_type_buf);
> +
> +    for (i = 0; i < 2; i++) {
> +        av_buffer_unref(&pic->motion_val_buf[i]);
> +        av_buffer_unref(&pic->ref_index_buf[i]);
> +    }
> +}
>  
> +static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
> +{
> +    const int big_mb_num    = s->mb_stride * (s->mb_height + 1) + 1;
>      const int mb_array_size = s->mb_stride * s->mb_height;
>      const int b8_array_size = s->b8_stride * s->mb_height * 2;
> -    const int b4_array_size = s->b4_stride * s->mb_height * 4;
>      int i;
> -    int r = -1;
> +
> +
> +    pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
> +    pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
> +    pic->mb_type_buf      = av_buffer_allocz((big_mb_num + s->mb_stride) *
> +                                             sizeof(uint32_t));
> +    if (!pic->mbskip_table_buf || !pic->qscale_table_buf || 
> !pic->mb_type_buf)
> +        return AVERROR(ENOMEM);
> +
> +    if (s->encoding) {
> +        pic->mb_var_buf    = av_buffer_allocz(mb_array_size * 
> sizeof(int16_t));
> +        pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * 
> sizeof(int16_t));
> +        pic->mb_mean_buf   = av_buffer_allocz(mb_array_size);
> +        if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
> +            return AVERROR(ENOMEM);
> +    }
> +
> +    if (s->out_format == FMT_H263 || s->encoding ||
> +               (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
> +        int mv_size        = 2 * (b8_array_size + 4) * sizeof(int16_t);
> +        int ref_index_size = 4 * mb_array_size;
> +
> +        for (i = 0; mv_size && i < 2; i++) {
> +            pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
> +            pic->ref_index_buf[i]  = av_buffer_allocz(ref_index_size);
> +            if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
> +                return AVERROR(ENOMEM);
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +static int make_tables_writable(Picture *pic)
> +{
> +    int ret, i;
> +#define MAKE_WRITABLE(table) \
> +do {\
> +    if (pic->table &&\
> +       (ret = av_buffer_make_writable(&pic->table)) < 0)\
> +    return ret;\
> +} while (0)
> +
> +    MAKE_WRITABLE(mb_var_buf);
> +    MAKE_WRITABLE(mc_mb_var_buf);
> +    MAKE_WRITABLE(mb_mean_buf);
> +    MAKE_WRITABLE(mbskip_table_buf);
> +    MAKE_WRITABLE(qscale_table_buf);
> +    MAKE_WRITABLE(mb_type_buf);
> +
> +    for (i = 0; i < 2; i++) {
> +        MAKE_WRITABLE(motion_val_buf[i]);
> +        MAKE_WRITABLE(ref_index_buf[i]);
> +    }
> +
> +    return 0;
> +}
> +
> +/**
> + * Allocate a Picture.
> + * The pixels are allocated/set by calling get_buffer() if shared = 0
> + */
> +int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
> +{
> +    int i, ret;
>  
>      if (shared) {
>          assert(pic->f.data[0]);
> -        assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
> -        pic->f.type = FF_BUFFER_TYPE_SHARED;
> +        pic->shared = 1;
>      } else {
>          assert(!pic->f.data[0]);
>  
> @@ -385,95 +438,138 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, 
> int shared)
>          s->uvlinesize = pic->f.linesize[1];
>      }
>  
> -    if (pic->qscale_table == NULL) {
> -        if (s->encoding) {
> -            FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
> -                              mb_array_size * sizeof(int16_t), fail)
> -            FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
> -                              mb_array_size * sizeof(int16_t), fail)
> -            FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
> -                              mb_array_size * sizeof(int8_t ), fail)
> -        }
> +    if (!pic->qscale_table_buf)
> +        ret = alloc_picture_tables(s, pic);
> +    else
> +        ret = make_tables_writable(pic);
> +    if (ret < 0)
> +        goto fail;
>  
> -        FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table,
> -                          mb_array_size * sizeof(uint8_t) + 2, fail)// the + 
> 2 is for the slice end check
> -        FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
> -                          (big_mb_num + s->mb_stride) * sizeof(uint8_t),
> -                          fail)
> -        FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
> -                          (big_mb_num + s->mb_stride) * sizeof(uint32_t),
> -                          fail)
> -        pic->mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
> -        pic->qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
> -        if (s->out_format == FMT_H264) {
> -            for (i = 0; i < 2; i++) {
> -                FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
> -                                  2 * (b4_array_size + 4) * sizeof(int16_t),
> -                                  fail)
> -                pic->motion_val[i] = pic->motion_val_base[i] + 4;
> -                FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i],
> -                                  4 * mb_array_size * sizeof(uint8_t), fail)
> -            }
> -            pic->f.motion_subsample_log2 = 2;
> -        } else if (s->out_format == FMT_H263 || s->encoding ||
> -                   (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
> -            for (i = 0; i < 2; i++) {
> -                FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
> -                                  2 * (b8_array_size + 4) * sizeof(int16_t),
> -                                  fail)
> -                pic->motion_val[i] = pic->motion_val_base[i] + 4;
> -                FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i],
> -                                  4 * mb_array_size * sizeof(uint8_t), fail)
> -            }
> -            pic->f.motion_subsample_log2 = 3;
> -        }
> -        pic->f.qstride = s->mb_stride;
> -        FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
> -                          1 * sizeof(AVPanScan), fail)
> +    if (s->encoding) {
> +        pic->mb_var    = (uint16_t*)pic->mb_var_buf->data;
> +        pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
> +        pic->mb_mean   = pic->mb_mean_buf->data;
>      }
>  
> -    pic->owner2 = s;
> +    pic->mbskip_table = pic->mbskip_table_buf->data;
> +    pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
> +    pic->mb_type      = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride 
> + 1;
> +
> +    if (pic->motion_val_buf[0]) {
> +        for (i = 0; i < 2; i++) {
> +            pic->motion_val[i] = (int16_t 
> (*)[2])pic->motion_val_buf[i]->data + 4;
> +            pic->ref_index[i]  = pic->ref_index_buf[i]->data;
> +        }
> +    }
>  
>      return 0;
> -fail: // for  the FF_ALLOCZ_OR_GOTO macro
> -    if (r >= 0)
> -        free_frame_buffer(s, pic);
> -    return -1;
> +fail:
> +    av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
> +    ff_mpeg_unref_picture(s, pic);
> +    free_picture_tables(pic);
> +    return AVERROR(ENOMEM);
>  }
>  
>  /**
>   * Deallocate a picture.
>   */
> -static void free_picture(MpegEncContext *s, Picture *pic)
> +void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
>  {
> -    int i;
> +    int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
> +
> +    pic->tf.f = &pic->f;
> +    /* WM Image / Screen codecs allocate internal buffers with different
> +     * dimensions / colorspaces; ignore user-defined callbacks for these. */
> +    if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
> +        s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
> +        s->codec_id != AV_CODEC_ID_MSS2)
> +        ff_thread_release_buffer(s->avctx, &pic->tf);
> +    else
> +        av_frame_unref(&pic->f);
> +
> +    av_buffer_unref(&pic->hwaccel_priv_buf);
> +
> +    memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
> +}

Are you sure this doesn't leak references to the picture table buffers?

The externally called ff_mpeg_ref_picture() refs the pictures tables but
external code has no direct way of unreffing them.

> -    if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
> -        free_frame_buffer(s, pic);
> +static int update_picture_tables(Picture *dst, Picture *src)
> +{
> +     int i;
> +
> +#define UPDATE_TABLE(table)\
> +do {\
> +    if (src->table &&\
> +        (!dst->table || dst->table->buffer != src->table->buffer)) {\
> +        av_buffer_unref(&dst->table);\
> +        dst->table = av_buffer_ref(src->table);\
> +        if (!dst->table)\
> +            return AVERROR(ENOMEM);\
> +    }\
> +} while (0)
> +
> +    UPDATE_TABLE(mb_var_buf);
> +    UPDATE_TABLE(mc_mb_var_buf);
> +    UPDATE_TABLE(mb_mean_buf);
> +    UPDATE_TABLE(mbskip_table_buf);
> +    UPDATE_TABLE(qscale_table_buf);
> +    UPDATE_TABLE(mb_type_buf);
> +    for (i = 0; i < 2; i++) {
> +        UPDATE_TABLE(motion_val_buf[i]);
> +        UPDATE_TABLE(ref_index_buf[i]);
>      }

leaks buffer refs on errors, probably best to move the
free_picture_tables into the error handling in the macro since you call
it only after one of the update_picture_tables() calls

> -    av_freep(&pic->mb_var);
> -    av_freep(&pic->mc_mb_var);
> -    av_freep(&pic->mb_mean);
> -    av_freep(&pic->mbskip_table);
> -    av_freep(&pic->qscale_table_base);
> -    pic->qscale_table = NULL;
> -    av_freep(&pic->mb_type_base);
> -    av_freep(&pic->f.pan_scan);
> -    pic->mb_type = NULL;
> +    dst->mb_var        = src->mb_var;
> +    dst->mc_mb_var     = src->mc_mb_var;
> +    dst->mb_mean       = src->mb_mean;
> +    dst->mbskip_table  = src->mbskip_table;
> +    dst->qscale_table  = src->qscale_table;
> +    dst->mb_type       = src->mb_type;
>      for (i = 0; i < 2; i++) {
> -        av_freep(&pic->motion_val_base[i]);
> -        av_freep(&pic->ref_index[i]);
> -        pic->motion_val[i] = NULL;
> +        dst->motion_val[i] = src->motion_val[i];
> +        dst->ref_index[i]  = src->ref_index[i];
>      }
>  
> -    if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
> -        for (i = 0; i < 4; i++) {
> -            pic->f.base[i] =
> -            pic->f.data[i] = NULL;
> -        }
> -        pic->f.type = 0;
> +    return 0;
> +}
> +
> +int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
> +{
> +    int ret;
> +
> +    av_assert0(!dst->f.buf[0]);
> +    av_assert0(src->f.buf[0]);
> +
> +    src->tf.f = &src->f;
> +    dst->tf.f = &dst->f;
> +    ret = ff_thread_ref_frame(&dst->tf, &src->tf);
> +    if (ret < 0)
> +        goto fail;
> +
> +    ret = update_picture_tables(dst, src);
> +    if (ret < 0) {
> +        free_picture_tables(dst);
> +        goto fail;
> +    }
> +
> +    if (src->hwaccel_picture_private) {
> +        dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
> +        if (!dst->hwaccel_priv_buf)
> +            goto fail;
> +        dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
>      }
> +
> +    dst->field_picture           = src->field_picture;
> +    dst->mb_var_sum              = src->mb_var_sum;
> +    dst->mc_mb_var_sum           = src->mc_mb_var_sum;
> +    dst->b_frame_score           = src->b_frame_score;
> +    dst->needs_realloc           = src->needs_realloc;
> +    dst->reference               = src->reference;
> +    dst->shared                  = src->shared;
> +
> +    return 0;
> +fail:
> +    ff_mpeg_unref_picture(s, dst);
> +    return ret;
>  }
>  
>  static int init_duplicate_context(MpegEncContext *s)
> @@ -594,7 +690,7 @@ int ff_update_duplicate_context(MpegEncContext *dst, 
> MpegEncContext *src)
>  int ff_mpeg_update_thread_context(AVCodecContext *dst,
>                                    const AVCodecContext *src)
>  {
> -    int i;
> +    int i, ret;
>      MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
>  
>      if (dst == src || !s1->context_initialized)
> @@ -606,8 +702,6 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
>          memcpy(s, s1, sizeof(MpegEncContext));
>  
>          s->avctx                 = dst;
> -        s->picture_range_start  += MAX_PICTURE_COUNT;
> -        s->picture_range_end    += MAX_PICTURE_COUNT;
>          s->bitstream_buffer      = NULL;
>          s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
>  
> @@ -632,13 +726,27 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
>      s->picture_number       = s1->picture_number;
>      s->input_picture_number = s1->input_picture_number;
>  
> -    memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
> -    memcpy(&s->last_picture, &s1->last_picture,
> -           (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
> -
> -    // reset s->picture[].f.extended_data to s->picture[].f.data
> -    for (i = 0; i < s->picture_count; i++)
> -        s->picture[i].f.extended_data = s->picture[i].f.data;
> +    for (i = 0; i < s->picture_count; i++) {
> +        ff_mpeg_unref_picture(s, &s->picture[i]);
> +        if (s1->picture[i].f.data[0] &&
> +            (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) 
> < 0)
> +            return ret;
> +    }
> +
> +#define UPDATE_PICTURE(pic)\
> +do {\
> +    ff_mpeg_unref_picture(s, &s->pic);\
> +    if (s1->pic.f.data[0])\
> +        ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
> +    else\
> +        ret = update_picture_tables(&s->pic, &s1->pic);\
> +    if (ret < 0)\
> +        return ret;\
> +} while (0)
> +
> +    UPDATE_PICTURE(current_picture);
> +    UPDATE_PICTURE(last_picture);
> +    UPDATE_PICTURE(next_picture);
>  
>      s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
>      s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
> @@ -728,9 +836,6 @@ void ff_MPV_common_defaults(MpegEncContext *s)
>      s->f_code                = 1;
>      s->b_code                = 1;
>  
> -    s->picture_range_start   = 0;
> -    s->picture_range_end     = MAX_PICTURE_COUNT;
> -
>      s->slice_context_count   = 1;
>  }
>  
> @@ -1017,12 +1122,18 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
>          }
>      }
>  
> -    s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
> +    s->picture_count = MAX_PICTURE_COUNT;

no need to keep picture_count if it's always MAX_PICTURE_COUNT

>      FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
>                        s->picture_count * sizeof(Picture), fail);
>      for (i = 0; i < s->picture_count; i++) {
>          avcodec_get_frame_defaults(&s->picture[i].f);
>      }
> +    memset(&s->next_picture, 0, sizeof(s->next_picture));
> +    memset(&s->last_picture, 0, sizeof(s->last_picture));
> +    memset(&s->current_picture, 0, sizeof(s->current_picture));
> +    avcodec_get_frame_defaults(&s->next_picture.f);
> +    avcodec_get_frame_defaults(&s->last_picture.f);
> +    avcodec_get_frame_defaults(&s->current_picture.f);
>  
>      if (s->width && s->height) {
>          if (init_context_frame(s))
> @@ -1136,7 +1247,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
>      } else
>          free_duplicate_context(s);
>  
> -    free_context_frame(s);
> +    if ((err = free_context_frame(s)) < 0)
> +        return err;
>  
>      if (s->picture)
>          for (i = 0; i < s->picture_count; i++) {
> @@ -1225,18 +1337,24 @@ void ff_MPV_common_end(MpegEncContext *s)
>      av_freep(&s->reordered_input_picture);
>      av_freep(&s->dct_offset);
>  
> -    if (s->picture && !s->avctx->internal->is_copy) {
> +    if (s->picture) {
>          for (i = 0; i < s->picture_count; i++) {
> -            free_picture(s, &s->picture[i]);
> +            free_picture_tables(&s->picture[i]);
> +            ff_mpeg_unref_picture(s, &s->picture[i]);
>          }
>      }
>      av_freep(&s->picture);
> +    free_picture_tables(&s->last_picture);
> +    ff_mpeg_unref_picture(s, &s->last_picture);
> +    free_picture_tables(&s->current_picture);
> +    ff_mpeg_unref_picture(s, &s->current_picture);
> +    free_picture_tables(&s->next_picture);
> +    ff_mpeg_unref_picture(s, &s->next_picture);
> +    free_picture_tables(&s->new_picture);
> +    ff_mpeg_unref_picture(s, &s->new_picture);
>  
>      free_context_frame(s);
>  
> -    if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
> -        avcodec_default_free_buffers(s->avctx);
> -
>      s->context_initialized      = 0;
>      s->last_picture_ptr         =
>      s->next_picture_ptr         =
> @@ -1342,11 +1460,9 @@ void ff_release_unused_pictures(MpegEncContext*s, int 
> remove_current)
>  
>      /* release non reference frames */
>      for (i = 0; i < s->picture_count; i++) {
> -        if (s->picture[i].f.data[0] && !s->picture[i].reference &&
> -            (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
> -            (remove_current || &s->picture[i] !=  s->current_picture_ptr)
> -            /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
> -            free_frame_buffer(s, &s->picture[i]);
> +        if (!s->picture[i].reference &&
> +            (remove_current || &s->picture[i] !=  s->current_picture_ptr)) {
> +            ff_mpeg_unref_picture(s, &s->picture[i]);
>          }
>      }
>  }
> @@ -1356,8 +1472,7 @@ static inline int pic_is_unused(MpegEncContext *s, 
> Picture *pic)
>      if (pic->f.data[0] == NULL)
>          return 1;
>      if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
> -        if (!pic->owner2 || pic->owner2 == s)
> -            return 1;
> +        return 1;
>      return 0;
>  }
>  
> @@ -1366,16 +1481,12 @@ static int find_unused_picture(MpegEncContext *s, int 
> shared)
>      int i;
>  
>      if (shared) {
> -        for (i = s->picture_range_start; i < s->picture_range_end; i++) {
> -            if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
> +        for (i = 0; i < s->picture_count; i++) {
> +            if (s->picture[i].f.data[0] == NULL)
>                  return i;
>          }
>      } else {
> -        for (i = s->picture_range_start; i < s->picture_range_end; i++) {
> -            if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 
> 0)
> -                return i; // FIXME
> -        }
> -        for (i = s->picture_range_start; i < s->picture_range_end; i++) {
> +        for (i = 0; i < s->picture_count; i++) {
>              if (pic_is_unused(s, &s->picture[i]))
>                  return i;
>          }
> @@ -1388,10 +1499,11 @@ int ff_find_unused_picture(MpegEncContext *s, int 
> shared)
>  {
>      int ret = find_unused_picture(s, shared);
>  
> -    if (ret >= 0 && ret < s->picture_range_end) {
> +    if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
>          if (s->picture[ret].needs_realloc) {
>              s->picture[ret].needs_realloc = 0;
> -            free_picture(s, &s->picture[ret]);
> +            free_picture_tables(&s->picture[ret]);
> +            ff_mpeg_unref_picture(s, &s->picture[ret]);
>              avcodec_get_frame_defaults(&s->picture[ret].f);
>          }
>      }
> @@ -1425,7 +1537,7 @@ static void update_noise_reduction(MpegEncContext *s)
>   */
>  int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
>  {
> -    int i;
> +    int i, ret;
>      Picture *pic;
>      s->mb_skipped = 0;
>  
> @@ -1434,22 +1546,20 @@ int ff_MPV_frame_start(MpegEncContext *s, 
> AVCodecContext *avctx)
>          if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
>              s->last_picture_ptr != s->next_picture_ptr &&
>              s->last_picture_ptr->f.data[0]) {
> -            if (s->last_picture_ptr->owner2 == s)
> -                free_frame_buffer(s, s->last_picture_ptr);
> +            ff_mpeg_unref_picture(s, s->last_picture_ptr);
>          }
>  
>          /* release forgotten pictures */
>          /* if (mpeg124/h263) */
>          if (!s->encoding) {
>              for (i = 0; i < s->picture_count; i++) {
> -                if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
> -                    &s->picture[i] != s->last_picture_ptr &&
> +                if (&s->picture[i] != s->last_picture_ptr &&
>                      &s->picture[i] != s->next_picture_ptr &&
>                      s->picture[i].reference && !s->picture[i].needs_realloc) 
> {
>                      if (!(avctx->active_thread_type & FF_THREAD_FRAME))
>                          av_log(avctx, AV_LOG_ERROR,
>                                 "releasing zombie picture\n");
> -                    free_frame_buffer(s, &s->picture[i]);
> +                    ff_mpeg_unref_picture(s, &s->picture[i]);
>                  }
>              }
>          }
> @@ -1504,9 +1614,12 @@ int ff_MPV_frame_start(MpegEncContext *s, 
> AVCodecContext *avctx)
>      //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
>      s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
>  
> -    ff_copy_picture(&s->current_picture, s->current_picture_ptr);
> +    ff_mpeg_unref_picture(s, &s->current_picture);
> +    if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
> +                                   s->current_picture_ptr)) < 0)
> +        return ret;
>  
> -    if (s->pict_type != AV_PICTURE_TYPE_B) {
> +    if (s->codec_id != AV_CODEC_ID_H264 && s->pict_type != 
> AV_PICTURE_TYPE_B) {
>          s->last_picture_ptr = s->next_picture_ptr;
>          if (!s->droppable)
>              s->next_picture_ptr = s->current_picture_ptr;
> @@ -1554,9 +1667,8 @@ int ff_MPV_frame_start(MpegEncContext *s, 
> AVCodecContext *avctx)
>                     (avctx->height >> v_chroma_shift) *
>                     s->last_picture_ptr->f.linesize[2]);
>  
> -            ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
> -            ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
> -            s->last_picture_ptr->reference = 3;
> +            ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
> +            ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
>          }
>          if ((s->next_picture_ptr == NULL ||
>               s->next_picture_ptr->f.data[0] == NULL) &&
> @@ -1572,27 +1684,31 @@ int ff_MPV_frame_start(MpegEncContext *s, 
> AVCodecContext *avctx)
>                  s->next_picture_ptr = NULL;
>                  return -1;
>              }
> -            ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
> -            ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
> -            s->next_picture_ptr->reference = 3;
> +            ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
> +            ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
>          }
>      }
>  
> -    if (s->last_picture_ptr)
> -        ff_copy_picture(&s->last_picture, s->last_picture_ptr);
> -    if (s->next_picture_ptr)
> -        ff_copy_picture(&s->next_picture, s->next_picture_ptr);
> +    if (s->codec_id != AV_CODEC_ID_H264) {

hmm, cleanup directly after h264 dempegenccontextize would have been
less confusing. can be done after the evil plan is commited to avoid
rebase pain.

> +        if (s->last_picture_ptr) {
> +            ff_mpeg_unref_picture(s, &s->last_picture);
> +            if (s->last_picture_ptr->f.data[0] &&
> +                (ret = ff_mpeg_ref_picture(s, &s->last_picture,
> +                                           s->last_picture_ptr)) < 0)
> +                return ret;
> +        }
> +        if (s->next_picture_ptr) {
> +            ff_mpeg_unref_picture(s, &s->next_picture);
> +            if (s->next_picture_ptr->f.data[0] &&
> +                (ret = ff_mpeg_ref_picture(s, &s->next_picture,
> +                                           s->next_picture_ptr)) < 0)
> +                return ret;
> +        }
>  
> -    if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
> -        if (s->next_picture_ptr)
> -            s->next_picture_ptr->owner2 = s;
> -        if (s->last_picture_ptr)
> -            s->last_picture_ptr->owner2 = s;
> +        assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
> +                                                     
> s->last_picture_ptr->f.data[0]));
>      }
>  
> -    assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
> -                                                 
> s->last_picture_ptr->f.data[0]));
> -
>      if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
>          int i;
>          for (i = 0; i < 4; i++) {
> @@ -1687,10 +1803,8 @@ void ff_MPV_frame_end(MpegEncContext *s)
>      if (s->encoding) {
>          /* release non-reference frames */
>          for (i = 0; i < s->picture_count; i++) {
> -            if (s->picture[i].f.data[0] && !s->picture[i].reference
> -                /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
> -                free_frame_buffer(s, &s->picture[i]);
> -            }
> +            if (!s->picture[i].reference)
> +                ff_mpeg_unref_picture(s, &s->picture[i]);
>          }
>      }
>      // clear copies, to avoid confusion
> @@ -1701,9 +1815,8 @@ void ff_MPV_frame_end(MpegEncContext *s)
>  #endif
>      s->avctx->coded_frame = &s->current_picture_ptr->f;
>  
> -    if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.reference) {
> -        ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
> -    }
> +    if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.reference)
> +        ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
>  }
>  
>  /**
> @@ -1841,7 +1954,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
>                             p->qscale_table[x + y * s->mb_stride]);
>                  }
>                  if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
> -                    int mb_type = pict->mb_type[x + y * s->mb_stride];
> +                    int mb_type = p->mb_type[x + y * s->mb_stride];


patch looks incomplete, I miss the change in the function definition
which is required this variable renaming. it probably ended up in
another patch. I guess it's ok in the squashed commit.

>                      // Type & MV direction
>                      if (IS_PCM(mb_type))
>                          av_log(s->avctx, AV_LOG_DEBUG, "P");
> @@ -1915,7 +2028,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
>                                pict->linesize[i] * height >> v_chroma_shift);
>              pict->data[i] = s->visualization_buffer[i];
>          }
> -        pict->type   = FF_BUFFER_TYPE_COPY;
> +        //pict->type   = FF_BUFFER_TYPE_COPY;

remove the line instead of commenting it

>          ptr          = pict->data[0];
>          block_height = 16 >> v_chroma_shift;
>  
> @@ -1923,7 +2036,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
>              int mb_x;
>              for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
>                  const int mb_index = mb_x + mb_y * s->mb_stride;
> -                if ((s->avctx->debug_mv) && pict->motion_val) {
> +                if ((s->avctx->debug_mv) && p->motion_val) {
>                      int type;
>                      for (type = 0; type < 3; type++) {
>                          int direction = 0;
> @@ -1947,46 +2060,46 @@ void ff_print_debug_info(MpegEncContext *s, Picture 
> *p)
>                              direction = 1;
>                              break;
>                          }
> -                        if (!USES_LIST(pict->mb_type[mb_index], direction))
> +                        if (!USES_LIST(p->mb_type[mb_index], direction))
>                              continue;
>  
> -                        if (IS_8X8(pict->mb_type[mb_index])) {
> +                        if (IS_8X8(p->mb_type[mb_index])) {
>                              int i;
>                              for (i = 0; i < 4; i++) {
>                                  int sx = mb_x * 16 + 4 + 8 * (i & 1);
>                                  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
>                                  int xy = (mb_x * 2 + (i & 1) +
>                                            (mb_y * 2 + (i >> 1)) * mv_stride) 
> << (mv_sample_log2 - 1);
> -                                int mx = (pict->motion_val[direction][xy][0] 
> >> shift) + sx;
> -                                int my = (pict->motion_val[direction][xy][1] 
> >> shift) + sy;
> +                                int mx = (p->motion_val[direction][xy][0] >> 
> shift) + sx;
> +                                int my = (p->motion_val[direction][xy][1] >> 
> shift) + sy;
>                                  draw_arrow(ptr, sx, sy, mx, my, width,
>                                             height, s->linesize, 100);
>                              }
> -                        } else if (IS_16X8(pict->mb_type[mb_index])) {
> +                        } else if (IS_16X8(p->mb_type[mb_index])) {
>                              int i;
>                              for (i = 0; i < 2; i++) {
>                                  int sx = mb_x * 16 + 8;
>                                  int sy = mb_y * 16 + 4 + 8 * i;
>                                  int xy = (mb_x * 2 + (mb_y * 2 + i) * 
> mv_stride) << (mv_sample_log2 - 1);
> -                                int mx = (pict->motion_val[direction][xy][0] 
> >> shift);
> -                                int my = (pict->motion_val[direction][xy][1] 
> >> shift);
> +                                int mx = (p->motion_val[direction][xy][0] >> 
> shift);
> +                                int my = (p->motion_val[direction][xy][1] >> 
> shift);
>  
> -                                if (IS_INTERLACED(pict->mb_type[mb_index]))
> +                                if (IS_INTERLACED(p->mb_type[mb_index]))
>                                      my *= 2;
>  
>                              draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
>                                         height, s->linesize, 100);
>                              }
> -                        } else if (IS_8X16(pict->mb_type[mb_index])) {
> +                        } else if (IS_8X16(p->mb_type[mb_index])) {
>                              int i;
>                              for (i = 0; i < 2; i++) {
>                                  int sx = mb_x * 16 + 4 + 8 * i;
>                                  int sy = mb_y * 16 + 8;
>                                  int xy = (mb_x * 2 + i + mb_y * 2 * 
> mv_stride) << (mv_sample_log2 - 1);
> -                                int mx = pict->motion_val[direction][xy][0] 
> >> shift;
> -                                int my = pict->motion_val[direction][xy][1] 
> >> shift;
> +                                int mx = p->motion_val[direction][xy][0] >> 
> shift;
> +                                int my = p->motion_val[direction][xy][1] >> 
> shift;
>  
> -                                if (IS_INTERLACED(pict->mb_type[mb_index]))
> +                                if (IS_INTERLACED(p->mb_type[mb_index]))
>                                      my *= 2;
>  
>                                  draw_arrow(ptr, sx, sy, mx + sx, my + sy, 
> width,
> @@ -1996,13 +2109,13 @@ void ff_print_debug_info(MpegEncContext *s, Picture 
> *p)
>                                int sx = mb_x * 16 + 8;
>                                int sy = mb_y * 16 + 8;
>                                int xy = (mb_x + mb_y * mv_stride) << 
> mv_sample_log2;
> -                              int mx = pict->motion_val[direction][xy][0] >> 
> shift + sx;
> -                              int my = pict->motion_val[direction][xy][1] >> 
> shift + sy;
> +                              int mx = p->motion_val[direction][xy][0] >> 
> shift + sx;
> +                              int my = p->motion_val[direction][xy][1] >> 
> shift + sy;
>                                draw_arrow(ptr, sx, sy, mx, my, width, height, 
> s->linesize, 100);
>                          }
>                      }
>                  }
> -                if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) 
> {
> +                if ((s->avctx->debug & FF_DEBUG_VIS_QP) && p->motion_val) {
>                      uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
>                                   0x0101010101010101ULL;
>                      int y;
> @@ -2016,8 +2129,8 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
>                      }
>                  }
>                  if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
> -                    pict->motion_val) {
> -                    int mb_type = pict->mb_type[mb_index];
> +                    p->motion_val) {
> +                    int mb_type = p->mb_type[mb_index];
>                      uint64_t u,v;
>                      int y;
>  #define COLOR(theta, r) \
> @@ -2081,7 +2194,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
>                              int xy = (mb_x * 2 + (i & 1) +
>                                       (mb_y * 2 + (i >> 1)) * mv_stride) << 
> (mv_sample_log2 - 1);
>                              // FIXME bidir
> -                            int32_t *mv = (int32_t *) 
> &pict->motion_val[0][xy];
> +                            int32_t *mv = (int32_t *) &p->motion_val[0][xy];
>                              if (mv[0] != mv[dm] ||
>                                  mv[dm * mv_stride] != mv[dm * (mv_stride + 
> 1)])
>                                  for (y = 0; y < 8; y++)
> @@ -2295,12 +2408,12 @@ void MPV_decode_mb_internal(MpegEncContext *s, 
> int16_t block[12][64],
>  
>                  if(HAVE_THREADS && 
> s->avctx->active_thread_type&FF_THREAD_FRAME) {
>                      if (s->mv_dir & MV_DIR_FORWARD) {
> -                        ff_thread_await_progress(&s->last_picture_ptr->f,
> +                        ff_thread_await_progress(&s->last_picture_ptr->tf,
>                                                   
> ff_MPV_lowest_referenced_row(s, 0),
>                                                   0);
>                      }
>                      if (s->mv_dir & MV_DIR_BACKWARD) {
> -                        ff_thread_await_progress(&s->next_picture_ptr->f,
> +                        ff_thread_await_progress(&s->next_picture_ptr->tf,
>                                                   
> ff_MPV_lowest_referenced_row(s, 1),
>                                                   0);
>                      }
> @@ -2606,12 +2719,8 @@ void ff_mpeg_flush(AVCodecContext *avctx){
>      if(s==NULL || s->picture==NULL)
>          return;
>  
> -    for(i=0; i<s->picture_count; i++){
> -       if (s->picture[i].f.data[0] &&
> -           (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
> -            s->picture[i].f.type == FF_BUFFER_TYPE_USER))
> -        free_frame_buffer(s, &s->picture[i]);
> -    }
> +    for (i = 0; i < s->picture_count; i++)
> +        ff_mpeg_unref_picture(s, &s->picture[i]);
>      s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = 
> NULL;
>  
>      s->mb_x= s->mb_y= 0;
> @@ -2864,7 +2973,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
>  void ff_MPV_report_decode_progress(MpegEncContext *s)
>  {
>      if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && 
> !s->er.error_occurred)
> -        ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
> +        ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
>  }
>  
>  void ff_mpeg_er_frame_start(MpegEncContext *s)
> diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
> index bc919a9..f25797e 100644
> --- a/libavcodec/mpegvideo.h
> +++ b/libavcodec/mpegvideo.h
> @@ -37,6 +37,7 @@
>  #include "parser.h"
>  #include "mpeg12data.h"
>  #include "rl.h"
> +#include "thread.h"
>  #include "videodsp.h"
>  
>  #include "libavutil/opt.h"
> @@ -94,10 +95,38 @@ struct MpegEncContext;
>   */
>  typedef struct Picture{
>      struct AVFrame f;
> +    ThreadFrame tf;
> +
> +    AVBufferRef *qscale_table_buf;
> +    int8_t *qscale_table;
> +
> +    AVBufferRef *motion_val_buf[2];
> +    int16_t (*motion_val[2])[2];
> +
> +    AVBufferRef *mb_type_buf;
> +    uint32_t *mb_type;
> +
> +    AVBufferRef *mbskip_table_buf;
> +    uint8_t *mbskip_table;
> +
> +    AVBufferRef *ref_index_buf[2];
> +    int8_t *ref_index[2];
> +
> +    AVBufferRef *mb_var_buf;
> +    uint16_t *mb_var;           ///< Table for MB variances
> +
> +    AVBufferRef *mc_mb_var_buf;
> +    uint16_t *mc_mb_var;        ///< Table for motion compensated MB 
> variances
> +
> +    AVBufferRef *mb_mean_buf;
> +    uint8_t *mb_mean;           ///< Table for MB luminance
> +
> +    AVBufferRef *hwaccel_priv_buf;
> +    /**
> +     * hardware accelerator private data
> +     */
> +    void *hwaccel_picture_private;
>  
> -    int8_t *qscale_table_base;
> -    int16_t (*motion_val_base[2])[2];
> -    uint32_t *mb_type_base;
>  #define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if there is just 
> one type
>  #define IS_INTRA4x4(a)   ((a)&MB_TYPE_INTRA4x4)
>  #define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
> @@ -137,24 +166,12 @@ typedef struct Picture{
>  
>      int mb_var_sum;             ///< sum of MB variance for current frame
>      int mc_mb_var_sum;          ///< motion compensated MB variance for 
> current frame
> -    uint16_t *mb_var;           ///< Table for MB variances
> -    uint16_t *mc_mb_var;        ///< Table for motion compensated MB 
> variances
> -    uint8_t *mb_mean;           ///< Table for MB luminance
> +
>      int b_frame_score;          /* */
> -    void *owner2;               ///< pointer to the context that allocated 
> this picture
>      int needs_realloc;          ///< Picture needs to be reallocated (eg due 
> to a frame size change)
> -    /**
> -     * hardware accelerator private data
> -     */
> -    void *hwaccel_picture_private;
>  
>      int reference;
> -
> -    int8_t *qscale_table;
> -    uint8_t *mbskip_table;
> -    int16_t (*motion_val[2])[2];
> -    uint32_t *mb_type;
> -    int8_t *ref_index[2];
> +    int shared;
>  } Picture;

is Picture.type still used?

>  
>  /**
> @@ -324,7 +341,6 @@ typedef struct MpegEncContext {
>      Picture *next_picture_ptr;     ///< pointer to the next picture (for 
> bidir pred)
>      Picture *current_picture_ptr;  ///< pointer to the current picture
>      int picture_count;             ///< number of allocated pictures 
> (MAX_PICTURE_COUNT * avctx->thread_count)
> -    int picture_range_start, picture_range_end; ///< the part of picture 
> that this context can allocate in
>      uint8_t *visualization_buffer[3]; ///< temporary buffer vor MV 
> visualization
>      int last_dc[3];                ///< last DC values for MPEG1
>      int16_t *dc_val_base;
> @@ -798,7 +814,6 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], 
> uint16_t (*qmat16)[2][6
>  int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, 
> int *overflow);
>  
>  void ff_init_block_index(MpegEncContext *s);
> -void ff_copy_picture(Picture *dst, Picture *src);
>  
>  void ff_MPV_motion(MpegEncContext *s,
>                     uint8_t *dest_y, uint8_t *dest_cb,
> @@ -926,4 +941,7 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
>                         int16_t block[6][64],
>                         int motion_x, int motion_y);
>  
> +int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src);
> +void ff_mpeg_unref_picture(MpegEncContext *s, Picture *picture);
> +
>  #endif /* AVCODEC_MPEGVIDEO_H */
> diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
> index 6127eac..ffa412f 100644
> --- a/libavcodec/mpegvideo_enc.c
> +++ b/libavcodec/mpegvideo_enc.c
> @@ -878,9 +878,9 @@ static int get_intra_count(MpegEncContext *s, uint8_t 
> *src,
>  
>  static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
>  {
> -    AVFrame *pic = NULL;
> +    Picture *pic = NULL;
>      int64_t pts;
> -    int i, display_picture_number = 0;
> +    int i, display_picture_number = 0, ret;
>      const int encoding_delay = s->max_b_frames ? s->max_b_frames :
>                                                   (s->low_delay ? 0 : 1);
>      int direct = 1;
> @@ -919,7 +919,7 @@ static int load_input_picture(MpegEncContext *s, const 
> AVFrame *pic_arg)
>      }
>  
>      if (pic_arg) {
> -        if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
> +        if (!pic_arg->buf[0]);
>              direct = 0;
>          if (pic_arg->linesize[0] != s->linesize)
>              direct = 0;
> @@ -936,14 +936,12 @@ static int load_input_picture(MpegEncContext *s, const 
> AVFrame *pic_arg)
>              if (i < 0)
>                  return i;
>  
> -            pic = &s->picture[i].f;
> +            pic = &s->picture[i];
>              pic->reference = 3;
>  
> -            for (i = 0; i < 4; i++) {
> -                pic->data[i]     = pic_arg->data[i];
> -                pic->linesize[i] = pic_arg->linesize[i];
> -            }
> -            if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
> +            if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
> +                return ret;
> +            if (ff_alloc_picture(s, pic, 1) < 0) {
>                  return -1;
>              }
>          } else {
> @@ -951,16 +949,16 @@ static int load_input_picture(MpegEncContext *s, const 
> AVFrame *pic_arg)
>              if (i < 0)
>                  return i;
>  
> -            pic = &s->picture[i].f;
> +            pic = &s->picture[i];
>              pic->reference = 3;
>  
> -            if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
> +            if (ff_alloc_picture(s, pic, 0) < 0) {
>                  return -1;
>              }
>  
> -            if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
> -                pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
> -                pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
> +            if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
> +                pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
> +                pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
>                  // empty
>              } else {
>                  int h_chroma_shift, v_chroma_shift;
> @@ -976,7 +974,7 @@ static int load_input_picture(MpegEncContext *s, const 
> AVFrame *pic_arg)
>                      int w = s->width  >> h_shift;
>                      int h = s->height >> v_shift;
>                      uint8_t *src = pic_arg->data[i];
> -                    uint8_t *dst = pic->data[i];
> +                    uint8_t *dst = pic->f.data[i];
>  
>                      if (!s->avctx->rc_buffer_size)
>                          dst += INPLACE_OFFSET;
> @@ -993,9 +991,9 @@ static int load_input_picture(MpegEncContext *s, const 
> AVFrame *pic_arg)
>                  }
>              }
>          }
> -        copy_picture_attributes(s, pic, pic_arg);
> -        pic->display_picture_number = display_picture_number;
> -        pic->pts = pts; // we set this here to avoid modifiying pic_arg
> +        copy_picture_attributes(s, &pic->f, pic_arg);
> +        pic->f.display_picture_number = display_picture_number;
> +        pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
>      }
>  
>      /* shift buffer entries */
> @@ -1018,7 +1016,7 @@ static int skip_check(MpegEncContext *s, Picture *p, 
> Picture *ref)
>          const int bw = plane ? 1 : 2;
>          for (y = 0; y < s->mb_height * bw; y++) {
>              for (x = 0; x < s->mb_width * bw; x++) {
> -                int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
> +                int off = p->shared ? 0 : 16;
>                  uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + 
> off;
>                  uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
>                  int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
> @@ -1114,7 +1112,7 @@ static int estimate_best_b_count(MpegEncContext *s)
>          if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
>              pre_input = *pre_input_ptr;
>  
> -            if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
> +            if (!pre_input.shared && i) {
>                  pre_input.f.data[0] += INPLACE_OFFSET;
>                  pre_input.f.data[1] += INPLACE_OFFSET;
>                  pre_input.f.data[2] += INPLACE_OFFSET;
> @@ -1185,7 +1183,7 @@ static int estimate_best_b_count(MpegEncContext *s)
>  
>  static int select_input_picture(MpegEncContext *s)
>  {
> -    int i;
> +    int i, ret;
>  
>      for (i = 1; i < MAX_PICTURE_COUNT; i++)
>          s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
> @@ -1206,17 +1204,7 @@ static int select_input_picture(MpegEncContext *s)
>                  if (s->picture_in_gop_number < s->gop_size &&
>                      skip_check(s, s->input_picture[0], s->next_picture_ptr)) 
> {
>                      // FIXME check that te gop check above is +-1 correct
> -                    if (s->input_picture[0]->f.type == 
> FF_BUFFER_TYPE_SHARED) {
> -                        for (i = 0; i < 4; i++)
> -                            s->input_picture[0]->f.data[i] = NULL;
> -                        s->input_picture[0]->f.type = 0;
> -                    } else {
> -                        assert(s->input_picture[0]->f.type == 
> FF_BUFFER_TYPE_USER ||
> -                               s->input_picture[0]->f.type == 
> FF_BUFFER_TYPE_INTERNAL);
> -
> -                        s->avctx->release_buffer(s->avctx,
> -                                                 &s->input_picture[0]->f);
> -                    }
> +                    av_frame_unref(&s->input_picture[0]->f);
>  
>                      emms_c();
>                      ff_vbv_update(s, 0);
> @@ -1324,10 +1312,11 @@ no_output_pic:
>             s->reordered_input_picture[0]->f.pict_type !=
>                 AV_PICTURE_TYPE_B ? 3 : 0;
>  
> -        ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
> +        ff_mpeg_unref_picture(s, &s->new_picture);
> +        if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, 
> s->reordered_input_picture[0])))
> +            return ret;
>  
> -        if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
> -            s->avctx->rc_buffer_size) {
> +        if (s->reordered_input_picture[0]->shared || 
> s->avctx->rc_buffer_size) {
>              // input is a shared pix, so we can't modifiy it -> alloc a new
>              // one & ensure that the shared one is reuseable
>  
> @@ -1342,32 +1331,25 @@ no_output_pic:
>                  return -1;
>              }
>  
> -            /* mark us unused / free shared pic */
> -            if (s->reordered_input_picture[0]->f.type == 
> FF_BUFFER_TYPE_INTERNAL)
> -                s->avctx->release_buffer(s->avctx,
> -                                         &s->reordered_input_picture[0]->f);
> -            for (i = 0; i < 4; i++)
> -                s->reordered_input_picture[0]->f.data[i] = NULL;
> -            s->reordered_input_picture[0]->f.type = 0;
> -
>              copy_picture_attributes(s, &pic->f,
>                                      &s->reordered_input_picture[0]->f);
>  
> +            /* mark us unused / free shared pic */
> +            av_frame_unref(&s->reordered_input_picture[0]->f);
> +            s->reordered_input_picture[0]->shared = 0;
> +
>              s->current_picture_ptr = pic;
>          } else {
>              // input is not a shared pix -> reuse buffer for current_pix
> -
> -            assert(s->reordered_input_picture[0]->f.type ==
> -                       FF_BUFFER_TYPE_USER ||
> -                   s->reordered_input_picture[0]->f.type ==
> -                       FF_BUFFER_TYPE_INTERNAL);
> -
>              s->current_picture_ptr = s->reordered_input_picture[0];
>              for (i = 0; i < 4; i++) {
>                  s->new_picture.f.data[i] += INPLACE_OFFSET;
>              }
>          }
> -        ff_copy_picture(&s->current_picture, s->current_picture_ptr);
> +        ff_mpeg_unref_picture(s, &s->current_picture);
> +        if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
> +                                       s->current_picture_ptr)) < 0)
> +            return ret;
>  
>          s->picture_number = s->new_picture.f.display_picture_number;
>      } else {
> diff --git a/tests/ref/fate/mpeg2-field-enc b/tests/ref/fate/mpeg2-field-enc
> index 079aae4..d3ef6ba 100644
> --- a/tests/ref/fate/mpeg2-field-enc
> +++ b/tests/ref/fate/mpeg2-field-enc
> @@ -29,4 +29,4 @@
>  0,     129600,     129600,        0,   622080, 0xa45e1d95
>  0,     133200,     133200,        0,   622080, 0x6cc61d6c
>  0,     136800,     136800,        0,   622080, 0x6983b417
> -0,     140400,     140400,        0,   622080, 0x982363c0
> +0,     140400,     140400,        0,   622080, 0xb8fc8ca2

why does the fate reference change?

the rest looks ok-ish

Janne
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to