On Sun, 3 Mar 2013 20:02:13 +0100, Janne Grunau <[email protected]> wrote:
> >  
> >  /**
> > @@ -324,7 +341,6 @@ typedef struct MpegEncContext {
> >      Picture *next_picture_ptr;     ///< pointer to the next picture (for 
> > bidir pred)
> >      Picture *current_picture_ptr;  ///< pointer to the current picture
> >      int picture_count;             ///< number of allocated pictures 
> > (MAX_PICTURE_COUNT * avctx->thread_count)
> > -    int picture_range_start, picture_range_end; ///< the part of picture 
> > that this context can allocate in
> >      uint8_t *visualization_buffer[3]; ///< temporary buffer vor MV 
> > visualization
> >      int last_dc[3];                ///< last DC values for MPEG1
> >      int16_t *dc_val_base;
> > @@ -798,7 +814,6 @@ void ff_convert_matrix(DSPContext *dsp, int 
> > (*qmat)[64], uint16_t (*qmat16)[2][6
> >  int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int 
> > qscale, int *overflow);
> >  
> >  void ff_init_block_index(MpegEncContext *s);
> > -void ff_copy_picture(Picture *dst, Picture *src);
> >  
> >  void ff_MPV_motion(MpegEncContext *s,
> >                     uint8_t *dest_y, uint8_t *dest_cb,
> > @@ -926,4 +941,7 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
> >                         int16_t block[6][64],
> >                         int motion_x, int motion_y);
> >  
> > +int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src);
> > +void ff_mpeg_unref_picture(MpegEncContext *s, Picture *picture);
> > +
> >  #endif /* AVCODEC_MPEGVIDEO_H */
> > diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
> > index 6127eac..ffa412f 100644
> > --- a/libavcodec/mpegvideo_enc.c
> > +++ b/libavcodec/mpegvideo_enc.c
> > @@ -878,9 +878,9 @@ static int get_intra_count(MpegEncContext *s, uint8_t 
> > *src,
> >  
> >  static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
> >  {
> > -    AVFrame *pic = NULL;
> > +    Picture *pic = NULL;
> >      int64_t pts;
> > -    int i, display_picture_number = 0;
> > +    int i, display_picture_number = 0, ret;
> >      const int encoding_delay = s->max_b_frames ? s->max_b_frames :
> >                                                   (s->low_delay ? 0 : 1);
> >      int direct = 1;
> > @@ -919,7 +919,7 @@ static int load_input_picture(MpegEncContext *s, const 
> > AVFrame *pic_arg)
> >      }
> >  
> >      if (pic_arg) {
> > -        if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
> > +        if (!pic_arg->buf[0]);
> >              direct = 0;
> >          if (pic_arg->linesize[0] != s->linesize)
> >              direct = 0;
> > @@ -936,14 +936,12 @@ static int load_input_picture(MpegEncContext *s, 
> > const AVFrame *pic_arg)
> >              if (i < 0)
> >                  return i;
> >  
> > -            pic = &s->picture[i].f;
> > +            pic = &s->picture[i];
> >              pic->reference = 3;
> >  
> > -            for (i = 0; i < 4; i++) {
> > -                pic->data[i]     = pic_arg->data[i];
> > -                pic->linesize[i] = pic_arg->linesize[i];
> > -            }
> > -            if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
> > +            if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
> > +                return ret;
> > +            if (ff_alloc_picture(s, pic, 1) < 0) {
> >                  return -1;
> >              }
> >          } else {
> > @@ -951,16 +949,16 @@ static int load_input_picture(MpegEncContext *s, 
> > const AVFrame *pic_arg)
> >              if (i < 0)
> >                  return i;
> >  
> > -            pic = &s->picture[i].f;
> > +            pic = &s->picture[i];
> >              pic->reference = 3;
> >  
> > -            if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
> > +            if (ff_alloc_picture(s, pic, 0) < 0) {
> >                  return -1;
> >              }
> >  
> > -            if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
> > -                pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
> > -                pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
> > +            if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
> > +                pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
> > +                pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
> >                  // empty
> >              } else {
> >                  int h_chroma_shift, v_chroma_shift;
> > @@ -976,7 +974,7 @@ static int load_input_picture(MpegEncContext *s, const 
> > AVFrame *pic_arg)
> >                      int w = s->width  >> h_shift;
> >                      int h = s->height >> v_shift;
> >                      uint8_t *src = pic_arg->data[i];
> > -                    uint8_t *dst = pic->data[i];
> > +                    uint8_t *dst = pic->f.data[i];
> >  
> >                      if (!s->avctx->rc_buffer_size)
> >                          dst += INPLACE_OFFSET;
> > @@ -993,9 +991,9 @@ static int load_input_picture(MpegEncContext *s, const 
> > AVFrame *pic_arg)
> >                  }
> >              }
> >          }
> > -        copy_picture_attributes(s, pic, pic_arg);
> > -        pic->display_picture_number = display_picture_number;
> > -        pic->pts = pts; // we set this here to avoid modifiying pic_arg
> > +        copy_picture_attributes(s, &pic->f, pic_arg);
> > +        pic->f.display_picture_number = display_picture_number;
> > +        pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
> >      }
> >  
> >      /* shift buffer entries */
> > @@ -1018,7 +1016,7 @@ static int skip_check(MpegEncContext *s, Picture *p, 
> > Picture *ref)
> >          const int bw = plane ? 1 : 2;
> >          for (y = 0; y < s->mb_height * bw; y++) {
> >              for (x = 0; x < s->mb_width * bw; x++) {
> > -                int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
> > +                int off = p->shared ? 0 : 16;
> >                  uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + 
> > off;
> >                  uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
> >                  int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 
> > 8);
> > @@ -1114,7 +1112,7 @@ static int estimate_best_b_count(MpegEncContext *s)
> >          if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
> >              pre_input = *pre_input_ptr;
> >  
> > -            if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
> > +            if (!pre_input.shared && i) {
> >                  pre_input.f.data[0] += INPLACE_OFFSET;
> >                  pre_input.f.data[1] += INPLACE_OFFSET;
> >                  pre_input.f.data[2] += INPLACE_OFFSET;
> > @@ -1185,7 +1183,7 @@ static int estimate_best_b_count(MpegEncContext *s)
> >  
> >  static int select_input_picture(MpegEncContext *s)
> >  {
> > -    int i;
> > +    int i, ret;
> >  
> >      for (i = 1; i < MAX_PICTURE_COUNT; i++)
> >          s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
> > @@ -1206,17 +1204,7 @@ static int select_input_picture(MpegEncContext *s)
> >                  if (s->picture_in_gop_number < s->gop_size &&
> >                      skip_check(s, s->input_picture[0], 
> > s->next_picture_ptr)) {
> >                      // FIXME check that te gop check above is +-1 correct
> > -                    if (s->input_picture[0]->f.type == 
> > FF_BUFFER_TYPE_SHARED) {
> > -                        for (i = 0; i < 4; i++)
> > -                            s->input_picture[0]->f.data[i] = NULL;
> > -                        s->input_picture[0]->f.type = 0;
> > -                    } else {
> > -                        assert(s->input_picture[0]->f.type == 
> > FF_BUFFER_TYPE_USER ||
> > -                               s->input_picture[0]->f.type == 
> > FF_BUFFER_TYPE_INTERNAL);
> > -
> > -                        s->avctx->release_buffer(s->avctx,
> > -                                                 &s->input_picture[0]->f);
> > -                    }
> > +                    av_frame_unref(&s->input_picture[0]->f);
> >  
> >                      emms_c();
> >                      ff_vbv_update(s, 0);
> > @@ -1324,10 +1312,11 @@ no_output_pic:
> >             s->reordered_input_picture[0]->f.pict_type !=
> >                 AV_PICTURE_TYPE_B ? 3 : 0;
> >  
> > -        ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
> > +        ff_mpeg_unref_picture(s, &s->new_picture);
> > +        if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, 
> > s->reordered_input_picture[0])))
> > +            return ret;
> >  
> > -        if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED 
> > ||
> > -            s->avctx->rc_buffer_size) {
> > +        if (s->reordered_input_picture[0]->shared || 
> > s->avctx->rc_buffer_size) {
> >              // input is a shared pix, so we can't modifiy it -> alloc a new
> >              // one & ensure that the shared one is reuseable
> >  
> > @@ -1342,32 +1331,25 @@ no_output_pic:
> >                  return -1;
> >              }
> >  
> > -            /* mark us unused / free shared pic */
> > -            if (s->reordered_input_picture[0]->f.type == 
> > FF_BUFFER_TYPE_INTERNAL)
> > -                s->avctx->release_buffer(s->avctx,
> > -                                         
> > &s->reordered_input_picture[0]->f);
> > -            for (i = 0; i < 4; i++)
> > -                s->reordered_input_picture[0]->f.data[i] = NULL;
> > -            s->reordered_input_picture[0]->f.type = 0;
> > -
> >              copy_picture_attributes(s, &pic->f,
> >                                      &s->reordered_input_picture[0]->f);
> >  
> > +            /* mark us unused / free shared pic */
> > +            av_frame_unref(&s->reordered_input_picture[0]->f);
> > +            s->reordered_input_picture[0]->shared = 0;
> > +
> >              s->current_picture_ptr = pic;
> >          } else {
> >              // input is not a shared pix -> reuse buffer for current_pix
> > -
> > -            assert(s->reordered_input_picture[0]->f.type ==
> > -                       FF_BUFFER_TYPE_USER ||
> > -                   s->reordered_input_picture[0]->f.type ==
> > -                       FF_BUFFER_TYPE_INTERNAL);
> > -
> >              s->current_picture_ptr = s->reordered_input_picture[0];
> >              for (i = 0; i < 4; i++) {
> >                  s->new_picture.f.data[i] += INPLACE_OFFSET;
> >              }
> >          }
> > -        ff_copy_picture(&s->current_picture, s->current_picture_ptr);
> > +        ff_mpeg_unref_picture(s, &s->current_picture);
> > +        if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
> > +                                       s->current_picture_ptr)) < 0)
> > +            return ret;
> >  
> >          s->picture_number = s->new_picture.f.display_picture_number;
> >      } else {
> > diff --git a/tests/ref/fate/mpeg2-field-enc b/tests/ref/fate/mpeg2-field-enc
> > index 079aae4..d3ef6ba 100644
> > --- a/tests/ref/fate/mpeg2-field-enc
> > +++ b/tests/ref/fate/mpeg2-field-enc
> > @@ -29,4 +29,4 @@
> >  0,     129600,     129600,        0,   622080, 0xa45e1d95
> >  0,     133200,     133200,        0,   622080, 0x6cc61d6c
> >  0,     136800,     136800,        0,   622080, 0x6983b417
> > -0,     140400,     140400,        0,   622080, 0x982363c0
> > +0,     140400,     140400,        0,   622080, 0xb8fc8ca2
> 
> why does the fate reference change?

Ah right..the sample is cut, so only a part of the last frame is there. Since
it's encoded as field pictures, ER doesn't work. So the bottom part just has
whatever data was in the buffer.

I suppose we could skip that last frame in the test.

-- 
Anton Khirnov
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to