On 2013-02-13 14:01:07 +0100, Anton Khirnov wrote:
> Most of the changes are just trivial are just trivial replacements of
> fields from MpegEncContext with equivalent fields in H264Context.
> Everything in h264* other than h264.c are those trivial changes.
>
> The nontrivial parts are:
> 1) extracting a simplified version of the frame management code from
> mpegvideo.c. We don't need last/next_picture anymore, since h264 uses
> its own more complex system already and those were set only to appease
> the mpegvideo parts.
> 2) some tables that need to be allocated/freed in appropriate places.
> 3) hwaccels -- mostly trivial replacements.
> for dxva, the draw_horiz_band() call is moved from
> ff_dxva2_common_end_frame() to per-codec end_frame() callbacks,
> because it's now different for h264 and MpegEncContext-based
> decoders.
> 4) svq3 -- it does not use h264 complex reference system, so I just
> added some very simplistic frame management instead and dropped the
> use of ff_h264_frame_start(). Because of this I also had to move some
> initialization code to svq3.
> ---
> By popular vote renamed cur to cur_pic_ptr.
> Fixed ER to work (more) properly.
> Other minor fixes/missed things here and there.
> FATE still passes of course, both frame and slice threads under valgrind.
> I also did some amount of fuzzing. Some deadlocks appear, but those are
> present
> in master too.
> ---
> libavcodec/dxva2.c | 6 +-
> libavcodec/dxva2_h264.c | 48 +-
> libavcodec/dxva2_internal.h | 2 +-
> libavcodec/dxva2_mpeg2.c | 12 +-
> libavcodec/dxva2_vc1.c | 12 +-
these are ok
> libavcodec/h264.c | 1768
> ++++++++++++++++++++++++-----------------
not reviewed for now
> diff --git a/libavcodec/h264.h b/libavcodec/h264.h
> index c4addbe..6d57723 100644
> --- a/libavcodec/h264.h
> +++ b/libavcodec/h264.h
> @@ -30,6 +30,7 @@
>
> #include "libavutil/intreadwrite.h"
> #include "cabac.h"
> +#include "get_bits.h"
> #include "mpegvideo.h"
> #include "h264chroma.h"
> #include "h264dsp.h"
> @@ -60,7 +61,7 @@
> #define MB_MBAFF h->mb_mbaff
> #define MB_FIELD h->mb_field_decoding_flag
> #define FRAME_MBAFF h->mb_aff_frame
> -#define FIELD_PICTURE (s->picture_structure != PICT_FRAME)
> +#define FIELD_PICTURE (h->picture_structure != PICT_FRAME)
> #define LEFT_MBS 2
> #define LTOP 0
> #define LBOT 1
> @@ -250,15 +251,42 @@ typedef struct MMCO {
> * H264Context
> */
> typedef struct H264Context {
> - MpegEncContext s;
> + AVCodecContext *avctx;
> + DSPContext dsp;
> + VideoDSPContext vdsp;
> H264DSPContext h264dsp;
> H264ChromaContext h264chroma;
> H264QpelContext h264qpel;
> + MotionEstContext me;
> + ParseContext parse_context;
> + GetBitContext gb;
> + ERContext er;
> +
> + Picture *DPB;
> + Picture *cur_pic_ptr;
> + Picture cur_pic;
> + int picture_count;
> + int picture_range_start, picture_range_end;
> +
> int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264
> int chroma_qp[2]; // QPc
>
> int qp_thresh; ///< QP threshold to skip loopfilter
>
> + int width, height;
> + int linesize, uvlinesize;
> + int chroma_x_shift, chroma_y_shift;
> +
> + int qscale;
> + int droppable;
> + int data_partitioning;
> + int coded_picture_number;
> + int low_delay;
> +
> + int context_initialized;
> + int flags;
is there a good reason why we copy flags from the avctx?
> + int workaround_bugs;
> +
> int prev_mb_skipped;
> int next_mb_skipped;
>
> @@ -348,6 +376,8 @@ typedef struct H264Context {
> int mb_aff_frame;
> int mb_field_decoding_flag;
> int mb_mbaff; ///< mb_aff_frame && mb_field_decoding_flag
> + int picture_structure;
> + int first_field;
>
> DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
>
> @@ -424,6 +454,13 @@ typedef struct H264Context {
>
> int x264_build;
>
> + int mb_x, mb_y;
> + int resync_mb_x;
> + int resync_mb_y;
> + int mb_skip_run;
> + int mb_height, mb_width;
> + int mb_stride;
> + int mb_num;
> int mb_xy;
>
> int is_complex;
> @@ -521,12 +558,16 @@ typedef struct H264Context {
> */
> int max_contexts;
>
> + int slice_context_count;
> +
> /**
> * 1 if the single thread fallback warning has already been
> * displayed, 0 otherwise.
> */
> int single_decode_warning;
>
> + enum AVPictureType pict_type;
> +
> int last_slice_type;
> /** @} */
>
> @@ -578,6 +619,8 @@ typedef struct H264Context {
>
> int cur_chroma_format_idc;
> uint8_t *bipred_scratchpad;
> + uint8_t *edge_emu_buffer;
> + int16_t *dc_val_base;
> } H264Context;
>
> extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM + 1]; ///< One chroma
> qp table for each supported bit depth (8, 9, 10).
> @@ -786,7 +829,7 @@ static av_always_inline int pred_intra_mode(H264Context
> *h, int n)
> const int top = h->intra4x4_pred_mode_cache[index8 - 8];
> const int min = FFMIN(left, top);
>
> - tprintf(h->s.avctx, "mode:%d %d min:%d\n", left, top, min);
> + tprintf(h->avctx, "mode:%d %d min:%d\n", left, top, min);
>
> if (min < 0)
> return DC_PRED;
> @@ -820,7 +863,7 @@ static av_always_inline void
> write_back_non_zero_count(H264Context *h)
> AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
> AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
>
> - if (!h->s.chroma_y_shift) {
> + if (!h->chroma_y_shift) {
> AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
> AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
> AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
> @@ -829,12 +872,11 @@ static av_always_inline void
> write_back_non_zero_count(H264Context *h)
> }
>
> static av_always_inline void write_back_motion_list(H264Context *h,
> - MpegEncContext *const s,
> int b_stride,
> int b_xy, int b8_xy,
> int mb_type, int list)
> {
> - int16_t(*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy];
> + int16_t(*mv_dst)[2] = &h->cur_pic.f.motion_val[list][b_xy];
> int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]];
> AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
> AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
> @@ -855,7 +897,7 @@ static av_always_inline void
> write_back_motion_list(H264Context *h,
> }
>
> {
> - int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy];
> + int8_t *ref_index = &h->cur_pic.f.ref_index[list][b8_xy];
> int8_t *ref_cache = h->ref_cache[list];
> ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
> ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
> @@ -866,19 +908,18 @@ static av_always_inline void
> write_back_motion_list(H264Context *h,
>
> static av_always_inline void write_back_motion(H264Context *h, int mb_type)
> {
> - MpegEncContext *const s = &h->s;
> const int b_stride = h->b_stride;
> - const int b_xy = 4 * s->mb_x + 4 * s->mb_y * h->b_stride; // try
> mb2b(8)_xy
> + const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride; // try
> mb2b(8)_xy
> const int b8_xy = 4 * h->mb_xy;
>
> if (USES_LIST(mb_type, 0)) {
> - write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0);
> + write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0);
> } else {
> - fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy],
> + fill_rectangle(&h->cur_pic.f.ref_index[0][b8_xy],
> 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
> }
> if (USES_LIST(mb_type, 1))
> - write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
> + write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 1);
>
> if (h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC) {
> if (IS_8X8(mb_type)) {
> @@ -902,4 +943,6 @@ static av_always_inline int
> get_dct8x8_allowed(H264Context *h)
> 0x0001000100010001ULL));
> }
>
> +void ff_h264_draw_horiz_band(H264Context *h, int y, int height);
> +
> #endif /* AVCODEC_H264_H */
ok
> diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
> diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
> diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
> diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
> diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c
> diff --git a/libavcodec/h264_mc_template.c b/libavcodec/h264_mc_template.c
> diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h
> diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c
> diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c
> diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
> diff --git a/libavcodec/h264_sei.c b/libavcodec/h264_sei.c
> diff --git a/libavcodec/h264pred.c b/libavcodec/h264pred.c
> diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
ok
> diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c
> index ee54a17..49ea549 100644
> --- a/libavcodec/svq3.c
> +++ b/libavcodec/svq3.c
> @@ -66,11 +66,19 @@
>
> typedef struct {
> H264Context h;
> + Picture *cur;
> + Picture *next;
> + Picture *last;
please add _pic or pic_ptr suffixes here too
> int halfpel_flag;
> int thirdpel_flag;
> int unknown_flag;
> int next_slice_index;
> uint32_t watermark_key;
> + int adaptive_quant;
> + int next_p_frame_damaged;
> + int h_edge_pos;
> + int v_edge_pos;
> + int last_frame_output;
> } SVQ3Context;
>
> #define FULLPEL_MODE 1
[...]
> @@ -984,13 +999,15 @@ static av_cold int svq3_decode_init(AVCodecContext
> *avctx)
> }
> }
>
> - s->width = avctx->width;
> - s->height = avctx->height;
> -
> - if (ff_MPV_common_init(s) < 0)
> - return -1;
> -
> - h->b_stride = 4 * s->mb_width;
> + h->width = avctx->width;
> + h->height = avctx->height;
> + h->mb_width = (h->width + 15) / 16;
> + h->mb_height = (h->height + 15) / 16;
> + h->mb_stride = h->mb_width + 1;
> + h->mb_num = h->mb_width * h->mb_height;
> + h->b_stride = 4 * h->mb_width;
> + s->h_edge_pos = h->mb_width * 16;
> + s->v_edge_pos = h->mb_height * 16;
>
> if (ff_h264_alloc_tables(h) < 0) {
> av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
> @@ -1000,73 +1017,156 @@ static av_cold int svq3_decode_init(AVCodecContext
> *avctx)
> return 0;
> }
>
> +static int get_buffer(AVCodecContext *avctx, Picture *pic)
> +{
> + SVQ3Context *s = avctx->priv_data;
> + H264Context *h = &s->h;
> + const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
> + const int mb_array_size = h->mb_stride * h->mb_height;
> + const int b4_stride = h->mb_width * 4 + 1;
> + const int b4_array_size = b4_stride * h->mb_height * 4;
> + int ret;
> +
> + if (!pic->motion_val_base[0]) {
> + int i;
> +
> + pic->mb_type_base = av_mallocz((big_mb_num + h->mb_stride) *
> sizeof(uint32_t));
> + if (!pic->mb_type_base)
> + return AVERROR(ENOMEM);
> + pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
> +
> + for (i = 0; i < 2; i++) {
> + pic->motion_val_base[i] = av_mallocz(2 * (b4_array_size + 4) *
> sizeof(int16_t));
> + pic->f.ref_index[i] = av_mallocz(4 * mb_array_size);
> + if (!pic->motion_val_base[i] || !pic->f.ref_index[i])
> + return AVERROR(ENOMEM);
> +
> + pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
> + }
> + }
> + pic->f.motion_subsample_log2 = 2;
> + pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
> +
> + ret = ff_get_buffer(avctx, &pic->f);
> +
> + h->linesize = pic->f.linesize[0];
> + h->uvlinesize = pic->f.linesize[1];
> +
> + return ret;
> +}
> +
> static int svq3_decode_frame(AVCodecContext *avctx, void *data,
> int *got_frame, AVPacket *avpkt)
> {
> const uint8_t *buf = avpkt->data;
> - SVQ3Context *svq3 = avctx->priv_data;
> - H264Context *h = &svq3->h;
> - MpegEncContext *s = &h->s;
> + SVQ3Context *s = avctx->priv_data;
> + H264Context *h = &s->h;
> int buf_size = avpkt->size;
> - int m;
> + int ret, m, i;
>
> /* special case for last picture */
> if (buf_size == 0) {
> - if (s->next_picture_ptr && !s->low_delay) {
> - *(AVFrame *) data = s->next_picture.f;
> - s->next_picture_ptr = NULL;
> + if (s->next->f.data[0] && !h->low_delay && !s->last_frame_output) {
> + *(AVFrame *) data = s->next->f;
> + s->last_frame_output = 1;
> *got_frame = 1;
> }
> return 0;
> }
>
> - init_get_bits(&s->gb, buf, 8 * buf_size);
> + init_get_bits(&h->gb, buf, 8 * buf_size);
>
> - s->mb_x = s->mb_y = h->mb_xy = 0;
> + h->mb_x = h->mb_y = h->mb_xy = 0;
>
> if (svq3_decode_slice_header(avctx))
> return -1;
>
> - s->pict_type = h->slice_type;
> - s->picture_number = h->slice_num;
> + h->pict_type = h->slice_type;
>
> - if (avctx->debug & FF_DEBUG_PICT_INFO)
> - av_log(h->s.avctx, AV_LOG_DEBUG,
> - "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
> - av_get_picture_type_char(s->pict_type),
> - svq3->halfpel_flag, svq3->thirdpel_flag,
> - s->adaptive_quant, s->qscale, h->slice_num);
> + if (h->pict_type != AV_PICTURE_TYPE_B)
> + FFSWAP(Picture*, s->next, s->last);
> +
> + if (s->cur->f.data[0])
> + avctx->release_buffer(avctx, &s->cur->f);
>
> /* for skipping the frame */
> - s->current_picture.f.pict_type = s->pict_type;
> - s->current_picture.f.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
> + s->cur->f.pict_type = h->pict_type;
> + s->cur->f.key_frame = (h->pict_type == AV_PICTURE_TYPE_I);
> +
> + ret = get_buffer(avctx, s->cur);
> + if (ret < 0)
> + return ret;
> +
> + h->cur_pic_ptr = s->cur;
> + h->cur_pic = *s->cur;
> +
> + for (i = 0; i < 16; i++) {
> + h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4
> * h->linesize * ((scan8[i] - scan8[0]) >> 3);
> + h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8
> * h->linesize * ((scan8[i] - scan8[0]) >> 3);
> + }
> + for (i = 0; i < 16; i++) {
> + h->block_offset[16 + i] =
> + h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4
> * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
> + h->block_offset[48 + 16 + i] =
> + h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8
> * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
> + }
> +
> + if (h->pict_type != AV_PICTURE_TYPE_I) {
> + if (!s->last->f.data[0]) {
> + av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
> + ret = get_buffer(avctx, s->last);
> + if (ret < 0)
> + return ret;
> + memset(s->last->f.data[0], 0, avctx->height *
> s->last->f.linesize[0]);
> + memset(s->last->f.data[1], 0x80, (avctx->height / 2) *
> + s->last->f.linesize[1]);
> + memset(s->last->f.data[2], 0x80, (avctx->height / 2) *
> + s->last->f.linesize[2]);
> + }
> +
> + if (h->pict_type == AV_PICTURE_TYPE_B && !s->next->f.data[0]) {
> + av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
> + ret = get_buffer(avctx, s->next);
> + if (ret < 0)
> + return ret;
> + memset(s->next->f.data[0], 0, avctx->height *
> s->next->f.linesize[0]);
> + memset(s->next->f.data[1], 0x80, (avctx->height / 2) *
> + s->next->f.linesize[1]);
> + memset(s->next->f.data[2], 0x80, (avctx->height / 2) *
> + s->next->f.linesize[2]);
> + }
> + }
> +
> + if (avctx->debug & FF_DEBUG_PICT_INFO)
> + av_log(h->avctx, AV_LOG_DEBUG,
> + "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
> + av_get_picture_type_char(h->pict_type),
> + s->halfpel_flag, s->thirdpel_flag,
> + s->adaptive_quant, h->qscale, h->slice_num);
>
> /* Skip B-frames if we do not have reference frames. */
> - if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
> + if (!s->last->f.data[0] && h->pict_type == AV_PICTURE_TYPE_B)
this can't happen since you initialize references above if they are
missing.
ommitted hunks look ok
> diff --git a/libavcodec/vaapi.c b/libavcodec/vaapi.c
> index b74604e..cf7b0b3 100644
> --- a/libavcodec/vaapi.c
> +++ b/libavcodec/vaapi.c
> @@ -21,6 +21,7 @@
> * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
> USA
> */
>
> +#include "h264.h"
> #include "vaapi_internal.h"
>
> /**
> @@ -175,19 +176,28 @@ VASliceParameterBufferBase *ff_vaapi_alloc_slice(struct
> vaapi_context *vactx, co
> return slice_param;
> }
>
> -int ff_vaapi_common_end_frame(MpegEncContext *s)
> +int ff_vaapi_common_end_frame(AVCodecContext *avctx)
> {
> - struct vaapi_context * const vactx = s->avctx->hwaccel_context;
> + struct vaapi_context * const vactx = avctx->hwaccel_context;
> int ret = -1;
>
> - av_dlog(s->avctx, "ff_vaapi_common_end_frame()\n");
> + av_dlog(avctx, "ff_vaapi_common_end_frame()\n");
>
> if (commit_slices(vactx) < 0)
> goto done;
> if (vactx->n_slice_buf_ids > 0) {
> - if (render_picture(vactx,
> ff_vaapi_get_surface_id(s->current_picture_ptr)) < 0)
> - goto done;
> - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
> + if (avctx->codec_id == AV_CODEC_ID_H264) {
> + H264Context *h = avctx->priv_data;
can we please don't do this. especially since we already have a
function pointer and codec specific functions for this.
The previous per codec end_frame functions were pointless but they
aren't now.
> + if (render_picture(vactx,
> ff_vaapi_get_surface_id(h->cur_pic_ptr)) < 0)
> + goto done;
> + ff_h264_draw_horiz_band(h, 0, h->avctx->height);
> +
> + } else {
> + MpegEncContext *s = avctx->priv_data;
> + if (render_picture(vactx,
> ff_vaapi_get_surface_id(s->current_picture_ptr)) < 0)
> + goto done;
> + ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
> + }
> }
> ret = 0;
>
> diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
> index 4ffc7d8..83a003c 100644
> --- a/libavcodec/vaapi_h264.c
> +++ b/libavcodec/vaapi_h264.c
> @@ -20,8 +20,8 @@
> * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
> USA
> */
>
> -#include "vaapi_internal.h"
> #include "h264.h"
> +#include "vaapi_internal.h"
stray hunk, otherwise ok except for .end_frame
> diff --git a/libavcodec/vaapi_internal.h b/libavcodec/vaapi_internal.h
> diff --git a/libavcodec/vaapi_mpeg2.c b/libavcodec/vaapi_mpeg2.c
> diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c
> diff --git a/libavcodec/vaapi_vc1.c b/libavcodec/vaapi_vc1.c
ok except for .end_frame
> diff --git a/libavcodec/vda_h264.c b/libavcodec/vda_h264.c
> index 34fcd3c..8491f22 100644
> --- a/libavcodec/vda_h264.c
> +++ b/libavcodec/vda_h264.c
> @@ -241,7 +241,7 @@ static int end_frame(AVCodecContext *avctx)
> {
> H264Context *h = avctx->priv_data;
> struct vda_context *vda_ctx = avctx->hwaccel_context;
> - AVFrame *frame = &h->s.current_picture_ptr->f;
> + AVFrame *frame = &h->cur->f;
> int status;
>
> if (!vda_ctx->decoder || !vda_ctx->priv_bitstream)
ok
> diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
> index 3b77386..d029d85 100644
> --- a/libavcodec/vdpau.c
> +++ b/libavcodec/vdpau.c
> @@ -50,16 +50,26 @@ int ff_vdpau_common_start_frame(AVCodecContext *avctx,
>
> int ff_vdpau_common_end_frame(AVCodecContext *avctx)
> {
> - MpegEncContext * const s = avctx->priv_data;
> AVVDPAUContext *hwctx = avctx->hwaccel_context;
>
> if (hwctx->bitstream_buffers_used) {
> - VdpVideoSurface surf =
> ff_vdpau_get_surface_id(s->current_picture_ptr);
> + if (avctx->codec_id == AV_CODEC_ID_H264) {
> + H264Context *h = avctx->priv_data;
> + VdpVideoSurface surf = ff_vdpau_get_surface_id(h->cur_pic_ptr);
>
> - hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info,
> - hwctx->bitstream_buffers_used,
> hwctx->bitstream_buffers);
> + hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info,
> + hwctx->bitstream_buffers_used,
> hwctx->bitstream_buffers);
>
> - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
> + ff_h264_draw_horiz_band(h, 0, h->avctx->height);
> + } else {
> + MpegEncContext *s = avctx->priv_data;
> + VdpVideoSurface surf =
> ff_vdpau_get_surface_id(s->current_picture_ptr);
> +
> + hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info,
> + hwctx->bitstream_buffers_used,
> hwctx->bitstream_buffers);
> +
> + ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
> + }
see vaapi comments
> hwctx->bitstream_buffers_used = 0;
> }
> return 0;
> @@ -87,15 +97,14 @@ int ff_vdpau_add_buffer(AVCodecContext *avctx,
>
> /* Obsolete non-hwaccel VDPAU support below... */
>
> -void ff_vdpau_h264_set_reference_frames(MpegEncContext *s)
> +void ff_vdpau_h264_set_reference_frames(H264Context *h)
> {
> - H264Context *h = s->avctx->priv_data;
> struct vdpau_render_state *render, *render_ref;
> VdpReferenceFrameH264 *rf, *rf2;
> Picture *pic;
> int i, list, pic_frame_idx;
>
> - render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0];
> + render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
> assert(render);
>
> rf = &render->info.h264.referenceFrames[0];
> @@ -156,12 +165,9 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext
> *s)
> }
> }
>
> -void ff_vdpau_add_data_chunk(MpegEncContext *s,
> - const uint8_t *buf, int buf_size)
> +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
this looks weird please make it a void pointer. alternative would be to
make ff_vdpau_add_data_chunk() static and pass a struct
vdpau_render_state pointer directly and add a external wrapper for h264
since that is the only codec which uses it externally.
rest is ok
Janne
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel