On 2013-02-13 19:51:01 +0100, Anton Khirnov wrote:
> ---
> libavcodec/dxva2_h264.c | 10 +++++-----
> libavcodec/h264.c | 36 ++++++++++++++++++------------------
> libavcodec/h264_direct.c | 16 ++++++++--------
> libavcodec/h264_refs.c | 32 ++++++++++++++++----------------
> libavcodec/mpegvideo.c | 26 +++++++++++++-------------
> libavcodec/mpegvideo.h | 2 ++
> libavcodec/mpegvideo_enc.c | 4 ++--
> libavcodec/svq3.c | 2 +-
> libavcodec/vaapi_h264.c | 10 +++++-----
> libavcodec/vdpau.c | 12 ++++++------
> libavcodec/vdpau_h264.c | 12 ++++++------
> 11 files changed, 82 insertions(+), 80 deletions(-)
>
> diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
> index 05f5a64..43dbfbe 100644
> --- a/libavcodec/dxva2_h264.c
> +++ b/libavcodec/dxva2_h264.c
> @@ -69,15 +69,15 @@ static void fill_picture_parameters(struct dxva_context
> *ctx, const H264Context
> ff_dxva2_get_surface_index(ctx, r),
> r->long_ref != 0);
>
> - if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] !=
> INT_MAX)
> + if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] !=
> INT_MAX)
> pp->FieldOrderCntList[i][0] = r->field_poc[0];
> - if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] !=
> INT_MAX)
> + if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] !=
> INT_MAX)
> pp->FieldOrderCntList[i][1] = r->field_poc[1];
>
> pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;
> - if (r->f.reference & PICT_TOP_FIELD)
> + if (r->reference & PICT_TOP_FIELD)
> pp->UsedForReferenceFlags |= 1 << (2*i + 0);
> - if (r->f.reference & PICT_BOTTOM_FIELD)
> + if (r->reference & PICT_BOTTOM_FIELD)
> pp->UsedForReferenceFlags |= 1 << (2*i + 1);
> } else {
> pp->RefFrameList[i].bPicEntry = 0xff;
> @@ -230,7 +230,7 @@ static void fill_slice_long(AVCodecContext *avctx,
> DXVA_Slice_H264_Long *slice,
> unsigned plane;
> fill_picture_entry(&slice->RefPicList[list][i],
> ff_dxva2_get_surface_index(ctx, r),
> - r->f.reference == PICT_BOTTOM_FIELD);
> + r->reference == PICT_BOTTOM_FIELD);
> for (plane = 0; plane < 3; plane++) {
> int w, o;
> if (plane == 0 && h->luma_weight_flag[list]) {
> diff --git a/libavcodec/h264.c b/libavcodec/h264.c
> index cc1afbe..86f1988 100644
> --- a/libavcodec/h264.c
> +++ b/libavcodec/h264.c
> @@ -142,7 +142,7 @@ static void release_unused_pictures(H264Context *h, int
> remove_current)
>
> /* release non reference frames */
> for (i = 0; i < h->picture_count; i++) {
> - if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference &&
> + if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
> (!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
> (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
> free_frame_buffer(h, &h->DPB[i]);
> @@ -232,7 +232,7 @@ static inline int pic_is_unused(H264Context *h, Picture
> *pic)
> {
> if (pic->f.data[0] == NULL)
> return 1;
> - if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
> + if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
> if (!pic->owner2 || pic->owner2 == h)
> return 1;
> return 0;
> @@ -504,7 +504,7 @@ static inline void get_lowest_part_y(H264Context *h, int
> refs[2][48], int n,
> // Don't try to wait on these as it will cause a deadlock.
> // Fields can wait on each other, though.
> if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
> - (ref->f.reference & 3) != h->picture_structure) {
> + (ref->reference & 3) != h->picture_structure) {
> my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
> if (refs[0][ref_n] < 0)
> nrefs[0] += 1;
> @@ -517,7 +517,7 @@ static inline void get_lowest_part_y(H264Context *h, int
> refs[2][48], int n,
> Picture *ref = &h->ref_list[1][ref_n];
>
> if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
> - (ref->f.reference & 3) != h->picture_structure) {
> + (ref->reference & 3) != h->picture_structure) {
> my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
> if (refs[1][ref_n] < 0)
> nrefs[1] += 1;
> @@ -606,7 +606,7 @@ static void await_references(H264Context *h)
> int row = refs[list][ref];
> if (row >= 0) {
> Picture *ref_pic = &h->ref_list[list][ref];
> - int ref_field = ref_pic->f.reference - 1;
> + int ref_field = ref_pic->reference - 1;
> int ref_field_picture = ref_pic->field_picture;
> int pic_height = 16 * h->mb_height >>
> ref_field_picture;
>
> @@ -723,7 +723,7 @@ static av_always_inline void mc_dir_part(H264Context *h,
> Picture *pic,
> ysh = 3 - (chroma_idc == 2 /* yuv422 */);
> if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
> // chroma offset when predicting from a field of opposite parity
> - my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1));
> + my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
> emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
> }
>
> @@ -1598,7 +1598,7 @@ int ff_h264_frame_start(H264Context *h)
> }
> pic = &h->DPB[i];
>
> - pic->f.reference = h->droppable ? 0 : h->picture_structure;
> + pic->reference = h->droppable ? 0 : h->picture_structure;
> pic->f.coded_picture_number = h->coded_picture_number++;
> pic->field_picture = h->picture_structure != PICT_FRAME;
> /*
> @@ -1655,7 +1655,7 @@ int ff_h264_frame_start(H264Context *h)
> * get released even with set reference, besides SVQ3 and others do not
> * mark frames as reference later "naturally". */
> if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
> - h->cur_pic_ptr->f.reference = 0;
> + h->cur_pic_ptr->reference = 0;
>
> h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
>
> @@ -1785,8 +1785,8 @@ static void decode_postinit(H264Context *h, int
> setup_finished)
> assert(pics <= MAX_DELAYED_PIC_COUNT);
>
> h->delayed_pic[pics++] = cur;
> - if (cur->f.reference == 0)
> - cur->f.reference = DELAYED_PIC_REF;
> + if (cur->reference == 0)
> + cur->reference = DELAYED_PIC_REF;
>
> /* Frame reordering. This code takes pictures from coding order and sorts
> * them by their incremental POC value into display order. It supports
> POC
> @@ -1851,7 +1851,7 @@ static void decode_postinit(H264Context *h, int
> setup_finished)
> }
>
> if (pics > h->avctx->has_b_frames) {
> - out->f.reference &= ~DELAYED_PIC_REF;
> + out->reference &= ~DELAYED_PIC_REF;
> // for frame threading, the owner must be the second field's thread
> or
> // else the first thread can release the picture and reuse it
> unsafely
> out->owner2 = h;
> @@ -2454,7 +2454,7 @@ static void flush_change(H264Context *h)
> h->prev_interlaced_frame = 1;
> idr(h);
> if (h->cur_pic_ptr)
> - h->cur_pic_ptr->f.reference = 0;
> + h->cur_pic_ptr->reference = 0;
> h->first_field = 0;
> memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
> memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
> @@ -2471,7 +2471,7 @@ static void flush_dpb(AVCodecContext *avctx)
>
> for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
> if (h->delayed_pic[i])
> - h->delayed_pic[i]->f.reference = 0;
> + h->delayed_pic[i]->reference = 0;
> h->delayed_pic[i] = NULL;
> }
>
> @@ -2665,7 +2665,7 @@ static int field_end(H264Context *h, int in_setup)
> if (h->er.error_count &&
> !h->avctx->hwaccel &&
> !(h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
> - h->cur_pic_ptr->f.reference &&
> + h->cur_pic_ptr->reference &&
> !(h->flags & CODEC_FLAG_EMU_EDGE)) {
> const AVPixFmtDescriptor *desc =
> av_pix_fmt_desc_get(h->avctx->pix_fmt);
> int hshift = desc->log2_chroma_w;
> @@ -3198,7 +3198,7 @@ static int decode_slice_header(H264Context *h,
> H264Context *h0)
> if (h0->first_field) {
> assert(h0->cur_pic_ptr);
> assert(h0->cur_pic_ptr->f.data[0]);
> - assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
> + assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
>
> /* Mark old field/frame as completed */
> if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
> @@ -3297,7 +3297,7 @@ static int decode_slice_header(H264Context *h,
> H264Context *h0)
> if (h0->first_field) {
> assert(h0->cur_pic_ptr);
> assert(h0->cur_pic_ptr->f.data[0]);
> - assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
> + assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
>
> /* figure out if we have a complementary field pair */
> if (!FIELD_PICTURE || h->picture_structure ==
> last_pic_structure) {
> @@ -3581,12 +3581,12 @@ static int decode_slice_header(H264Context *h,
> H264Context *h0)
> ref2frm[1] = -1;
> for (i = 0; i < 16; i++)
> ref2frm[i + 2] = 4 * id_list[i] +
> - (h->ref_list[j][i].f.reference & 3);
> + (h->ref_list[j][i].reference & 3);
> ref2frm[18 + 0] =
> ref2frm[18 + 1] = -1;
> for (i = 16; i < 48; i++)
> ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
> - (h->ref_list[j][i].f.reference & 3);
> + (h->ref_list[j][i].reference & 3);
> }
>
> // FIXME: fix draw_edges + PAFF + frame threads
> diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
> index e7a0b61..5cafbbe 100644
> --- a/libavcodec/h264_direct.c
> +++ b/libavcodec/h264_direct.c
> @@ -88,7 +88,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32],
> int list, int field,
> poc= (poc&~3) + rfield + 1;
>
> for(j=start; j<end; j++){
> - if (4 * h->ref_list[0][j].frame_num +
> (h->ref_list[0][j].f.reference & 3) == poc) {
> + if (4 * h->ref_list[0][j].frame_num +
> (h->ref_list[0][j].reference & 3) == poc) {
> int cur_ref= mbafi ? (j-16)^field : j;
> if (ref1->mbaff)
> map[list][2 * old_ref + (rfield^field) + 16] =
> cur_ref;
> @@ -106,12 +106,12 @@ void ff_h264_direct_ref_list_init(H264Context * const
> h){
> Picture * const cur = h->cur_pic_ptr;
> int list, j, field;
> int sidx= (h->picture_structure&1)^1;
> - int ref1sidx = (ref1->f.reference&1)^1;
> + int ref1sidx = (ref1->reference&1)^1;
>
> for(list=0; list<2; list++){
> cur->ref_count[sidx][list] = h->ref_count[list];
> for(j=0; j<h->ref_count[list]; j++)
> - cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num
> + (h->ref_list[list][j].f.reference & 3);
> + cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num
> + (h->ref_list[list][j].reference & 3);
> }
>
> if(h->picture_structure == PICT_FRAME){
> @@ -127,8 +127,8 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
> int *col_poc = h->ref_list[1]->field_poc;
> h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] -
> cur_poc));
> ref1sidx=sidx= h->col_parity;
> - } else if (!(h->picture_structure & h->ref_list[1][0].f.reference) &&
> !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
> - h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
> + } else if (!(h->picture_structure & h->ref_list[1][0].reference) &&
> !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
> + h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
> }
>
> if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
> @@ -144,7 +144,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
>
> static void await_reference_mb_row(H264Context * const h, Picture *ref, int
> mb_y)
> {
> - int ref_field = ref->f.reference - 1;
> + int ref_field = ref->reference - 1;
> int ref_field_picture = ref->field_picture;
> int ref_height = 16*h->mb_height >> ref_field_picture;
>
> @@ -173,7 +173,7 @@ static void pred_spatial_direct_motion(H264Context *
> const h, int *mb_type){
> int mv[2];
> int list;
>
> - assert(h->ref_list[1][0].f.reference & 3);
> + assert(h->ref_list[1][0].reference & 3);
>
> await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y +
> !!IS_INTERLACED(*mb_type));
>
> @@ -420,7 +420,7 @@ static void pred_temp_direct_motion(H264Context * const
> h, int *mb_type){
> unsigned int sub_mb_type;
> int i8, i4;
>
> - assert(h->ref_list[1][0].f.reference & 3);
> + assert(h->ref_list[1][0].reference & 3);
>
> await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y +
> !!IS_INTERLACED(*mb_type));
>
> diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
> index f855bcb..95b4f83 100644
> --- a/libavcodec/h264_refs.c
> +++ b/libavcodec/h264_refs.c
> @@ -40,7 +40,7 @@ static void pic_as_field(Picture *pic, const int parity){
> for (i = 0; i < 4; ++i) {
> if (parity == PICT_BOTTOM_FIELD)
> pic->f.data[i] += pic->f.linesize[i];
> - pic->f.reference = parity;
> + pic->reference = parity;
> pic->f.linesize[i] *= 2;
> }
> pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
> @@ -48,7 +48,7 @@ static void pic_as_field(Picture *pic, const int parity){
>
> static int split_field_copy(Picture *dest, Picture *src,
> int parity, int id_add){
> - int match = !!(src->f.reference & parity);
> + int match = !!(src->reference & parity);
>
> if (match) {
> *dest = *src;
> @@ -67,9 +67,9 @@ static int build_def_list(Picture *def, Picture **in, int
> len, int is_long, int
> int index=0;
>
> while(i[0]<len || i[1]<len){
> - while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference &
> sel)))
> + while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
> i[0]++;
> - while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference &
> (sel^3))))
> + while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->reference &
> (sel^3))))
> i[1]++;
> if(i[0] < len){
> in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
> @@ -225,11 +225,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context
> *h){
>
> for(i= h->short_ref_count-1; i>=0; i--){
> ref = h->short_ref[i];
> - assert(ref->f.reference);
> + assert(ref->reference);
> assert(!ref->long_ref);
> if(
> ref->frame_num == frame_num &&
> - (ref->f.reference & pic_structure)
> + (ref->reference & pic_structure)
> )
> break;
> }
> @@ -246,8 +246,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context
> *h){
> return -1;
> }
> ref = h->long_ref[long_idx];
> - assert(!(ref && !ref->f.reference));
> - if (ref && (ref->f.reference & pic_structure)) {
> + assert(!(ref && !ref->reference));
> + if (ref && (ref->reference & pic_structure)) {
> ref->pic_id= pic_id;
> assert(ref->long_ref);
> i=0;
> @@ -303,12 +303,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
> field[0] = *frame;
> for(j=0; j<3; j++)
> field[0].f.linesize[j] <<= 1;
> - field[0].f.reference = PICT_TOP_FIELD;
> + field[0].reference = PICT_TOP_FIELD;
> field[0].poc= field[0].field_poc[0];
> field[1] = field[0];
> for(j=0; j<3; j++)
> field[1].f.data[j] += frame->f.linesize[j];
> - field[1].f.reference = PICT_BOTTOM_FIELD;
> + field[1].reference = PICT_BOTTOM_FIELD;
> field[1].poc= field[1].field_poc[1];
>
> h->luma_weight[16+2*i][list][0] =
> h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
> @@ -334,12 +334,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
> */
> static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
> int i;
> - if (pic->f.reference &= refmask) {
> + if (pic->reference &= refmask) {
> return 0;
> } else {
> for(i = 0; h->delayed_pic[i]; i++)
> if(pic == h->delayed_pic[i]){
> - pic->f.reference = DELAYED_PIC_REF;
> + pic->reference = DELAYED_PIC_REF;
> break;
> }
> return 1;
> @@ -491,7 +491,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int
> first_slice)
>
> if (h->short_ref_count &&
> h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
> - !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->f.reference)) {
> + !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->reference)) {
> mmco[0].opcode = MMCO_SHORT2UNUSED;
> mmco[0].short_pic_num = h->short_ref[h->short_ref_count -
> 1]->frame_num;
> mmco_index = 1;
> @@ -585,7 +585,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO
> *mmco, int mmco_count){
> h->long_ref_count++;
> }
>
> - h->cur_pic_ptr->f.reference |= h->picture_structure;
> + h->cur_pic_ptr->reference |= h->picture_structure;
> current_ref_assigned=1;
> break;
> case MMCO_SET_MAX_LONG:
> @@ -620,7 +620,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO
> *mmco, int mmco_count){
> */
> if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
> /* Just mark the second field valid */
> - h->cur_pic_ptr->f.reference = PICT_FRAME;
> + h->cur_pic_ptr->reference = PICT_FRAME;
> } else if (h->cur_pic_ptr->long_ref) {
> av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
> "assignment for second field "
> @@ -639,7 +639,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO
> *mmco, int mmco_count){
>
> h->short_ref[0]= h->cur_pic_ptr;
> h->short_ref_count++;
> - h->cur_pic_ptr->f.reference |= h->picture_structure;
> + h->cur_pic_ptr->reference |= h->picture_structure;
> }
> }
>
> diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
> index 9601995..9ddfa0d 100644
> --- a/libavcodec/mpegvideo.c
> +++ b/libavcodec/mpegvideo.c
> @@ -1348,7 +1348,7 @@ void ff_release_unused_pictures(MpegEncContext*s, int
> remove_current)
>
> /* release non reference frames */
> for (i = 0; i < s->picture_count; i++) {
> - if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
> + if (s->picture[i].f.data[0] && !s->picture[i].reference &&
> (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
> (remove_current || &s->picture[i] != s->current_picture_ptr)
> /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
> @@ -1361,7 +1361,7 @@ static inline int pic_is_unused(MpegEncContext *s,
> Picture *pic)
> {
> if (pic->f.data[0] == NULL)
> return 1;
> - if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
> + if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
> if (!pic->owner2 || pic->owner2 == s)
> return 1;
> return 0;
> @@ -1451,7 +1451,7 @@ int ff_MPV_frame_start(MpegEncContext *s,
> AVCodecContext *avctx)
> if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
> &s->picture[i] != s->last_picture_ptr &&
> &s->picture[i] != s->next_picture_ptr &&
> - s->picture[i].f.reference &&
> !s->picture[i].needs_realloc) {
> + s->picture[i].reference && !s->picture[i].needs_realloc)
> {
> if (!(avctx->active_thread_type & FF_THREAD_FRAME))
> av_log(avctx, AV_LOG_ERROR,
> "releasing zombie picture\n");
> @@ -1478,12 +1478,12 @@ int ff_MPV_frame_start(MpegEncContext *s,
> AVCodecContext *avctx)
> pic = &s->picture[i];
> }
>
> - pic->f.reference = 0;
> + pic->reference = 0;
> if (!s->droppable) {
> if (s->codec_id == AV_CODEC_ID_H264)
> - pic->f.reference = s->picture_structure;
> + pic->reference = s->picture_structure;
> else if (s->pict_type != AV_PICTURE_TYPE_B)
> - pic->f.reference = 3;
> + pic->reference = 3;
> }
>
> pic->f.coded_picture_number = s->coded_picture_number++;
> @@ -1562,7 +1562,7 @@ int ff_MPV_frame_start(MpegEncContext *s,
> AVCodecContext *avctx)
>
> ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
> ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
> - s->last_picture_ptr->f.reference = 3;
> + s->last_picture_ptr->reference = 3;
> }
> if ((s->next_picture_ptr == NULL ||
> s->next_picture_ptr->f.data[0] == NULL) &&
> @@ -1580,7 +1580,7 @@ int ff_MPV_frame_start(MpegEncContext *s,
> AVCodecContext *avctx)
> }
> ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
> ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
> - s->next_picture_ptr->f.reference = 3;
> + s->next_picture_ptr->reference = 3;
> }
> }
>
> @@ -1652,7 +1652,7 @@ void ff_MPV_frame_end(MpegEncContext *s)
> !s->avctx->hwaccel &&
> !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
> s->unrestricted_mv &&
> - s->current_picture.f.reference &&
> + s->current_picture.reference &&
> !s->intra_only &&
> !(s->flags & CODEC_FLAG_EMU_EDGE)) {
> const AVPixFmtDescriptor *desc =
> av_pix_fmt_desc_get(s->avctx->pix_fmt);
> @@ -1693,7 +1693,7 @@ void ff_MPV_frame_end(MpegEncContext *s)
> if (s->encoding) {
> /* release non-reference frames */
> for (i = 0; i < s->picture_count; i++) {
> - if (s->picture[i].f.data[0] && !s->picture[i].f.reference
> + if (s->picture[i].f.data[0] && !s->picture[i].reference
> /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
> free_frame_buffer(s, &s->picture[i]);
> }
> @@ -1707,7 +1707,7 @@ void ff_MPV_frame_end(MpegEncContext *s)
> #endif
> s->avctx->coded_frame = &s->current_picture_ptr->f;
>
> - if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
> + if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.reference) {
> ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
> }
> }
> @@ -2274,7 +2274,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t
> block[12][64],
> s->mb_skipped= 0;
> assert(s->pict_type!=AV_PICTURE_TYPE_I);
> *mbskip_ptr = 1;
> - } else if(!s->current_picture.f.reference) {
> + } else if(!s->current_picture.reference) {
> *mbskip_ptr = 1;
> } else{
> *mbskip_ptr = 0; /* not skipped */
> @@ -2476,7 +2476,7 @@ void ff_draw_horiz_band(AVCodecContext *avctx,
> DSPContext *dsp, Picture *cur,
> if (!avctx->hwaccel &&
> !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
> draw_edges &&
> - cur->f.reference &&
> + cur->reference &&
> !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
> int *linesize = cur->f.linesize;
> int sides = 0, edge_h;
> diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
> index 413d9ca..fff1e13 100644
> --- a/libavcodec/mpegvideo.h
> +++ b/libavcodec/mpegvideo.h
> @@ -147,6 +147,8 @@ typedef struct Picture{
> * hardware accelerator private data
> */
> void *hwaccel_picture_private;
> +
> + int reference;
> } Picture;
>
> /**
> diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
> index 3ba6f81..b4b676f 100644
> --- a/libavcodec/mpegvideo_enc.c
> +++ b/libavcodec/mpegvideo_enc.c
> @@ -1352,7 +1352,7 @@ static int select_input_picture(MpegEncContext *s)
> }
> no_output_pic:
> if (s->reordered_input_picture[0]) {
> - s->reordered_input_picture[0]->f.reference =
> + s->reordered_input_picture[0]->reference =
> s->reordered_input_picture[0]->f.pict_type !=
> AV_PICTURE_TYPE_B ? 3 : 0;
>
> @@ -1369,7 +1369,7 @@ no_output_pic:
> return i;
> pic = &s->picture[i];
>
> - pic->f.reference = s->reordered_input_picture[0]->f.reference;
> + pic->reference = s->reordered_input_picture[0]->reference;
> if (ff_alloc_picture(s, pic, 0) < 0) {
> return -1;
> }
> diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c
> index 6c695f7..79eb877 100644
> --- a/libavcodec/svq3.c
> +++ b/libavcodec/svq3.c
> @@ -1045,7 +1045,7 @@ static int get_buffer(AVCodecContext *avctx, Picture
> *pic)
> }
> }
> pic->f.motion_subsample_log2 = 2;
> - pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
> + pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
>
> ret = ff_get_buffer(avctx, &pic->f);
>
> diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
> index 5e98312..d198151 100644
> --- a/libavcodec/vaapi_h264.c
> +++ b/libavcodec/vaapi_h264.c
> @@ -55,7 +55,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic,
> int pic_structure)
> {
> if (pic_structure == 0)
> - pic_structure = pic->f.reference;
> + pic_structure = pic->reference;
> pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */
>
> va_pic->picture_id = ff_vaapi_get_surface_id(pic);
> @@ -64,7 +64,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic,
> va_pic->flags = 0;
> if (pic_structure != PICT_FRAME)
> va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ?
> VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD;
> - if (pic->f.reference)
> + if (pic->reference)
> va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE
> : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
>
> va_pic->TopFieldOrderCnt = 0;
> @@ -134,13 +134,13 @@ static int
> fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param,
>
> for (i = 0; i < h->short_ref_count; i++) {
> Picture * const pic = h->short_ref[i];
> - if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0)
> + if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
> return -1;
> }
>
> for (i = 0; i < 16; i++) {
> Picture * const pic = h->long_ref[i];
> - if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0)
> + if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
> return -1;
> }
> return 0;
> @@ -160,7 +160,7 @@ static void fill_vaapi_RefPicList(VAPictureH264
> RefPicList[32],
> {
> unsigned int i, n = 0;
> for (i = 0; i < ref_count; i++)
> - if (ref_list[i].f.reference)
> + if (ref_list[i].reference)
> fill_vaapi_pic(&RefPicList[n++], &ref_list[i], 0);
>
> for (; n < 32; n++)
> diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
> index e5c459b..66c7f59 100644
> --- a/libavcodec/vdpau.c
> +++ b/libavcodec/vdpau.c
> @@ -104,7 +104,7 @@ void ff_vdpau_h264_set_reference_frames(H264Context *h)
>
> for (i = 0; i < ls; ++i) {
> pic = lp[i];
> - if (!pic || !pic->f.reference)
> + if (!pic || !pic->reference)
> continue;
> pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
>
> @@ -122,8 +122,8 @@ void ff_vdpau_h264_set_reference_frames(H264Context *h)
> ++rf2;
> }
> if (rf2 != rf) {
> - rf2->top_is_reference |= (pic->f.reference &
> PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
> - rf2->bottom_is_reference |= (pic->f.reference &
> PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
> + rf2->top_is_reference |= (pic->reference &
> PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
> + rf2->bottom_is_reference |= (pic->reference &
> PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
> continue;
> }
>
> @@ -132,8 +132,8 @@ void ff_vdpau_h264_set_reference_frames(H264Context *h)
>
> rf->surface = render_ref->surface;
> rf->is_long_term = pic->long_ref;
> - rf->top_is_reference = (pic->f.reference & PICT_TOP_FIELD)
> ? VDP_TRUE : VDP_FALSE;
> - rf->bottom_is_reference = (pic->f.reference & PICT_BOTTOM_FIELD)
> ? VDP_TRUE : VDP_FALSE;
> + rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ?
> VDP_TRUE : VDP_FALSE;
> + rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ?
> VDP_TRUE : VDP_FALSE;
> rf->field_order_cnt[0] = pic->field_poc[0];
> rf->field_order_cnt[1] = pic->field_poc[1];
> rf->frame_idx = pic_frame_idx;
> @@ -199,7 +199,7 @@ void ff_vdpau_h264_picture_complete(H264Context *h)
> if (render->info.h264.slice_count < 1)
> return;
>
> - render->info.h264.is_reference =
> (h->cur_pic_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE;
> + render->info.h264.is_reference =
> (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
> render->info.h264.field_pic_flag =
> h->picture_structure != PICT_FRAME;
> render->info.h264.bottom_field_flag =
> h->picture_structure == PICT_BOTTOM_FIELD;
> render->info.h264.num_ref_frames =
> h->sps.ref_frame_count;
> diff --git a/libavcodec/vdpau_h264.c b/libavcodec/vdpau_h264.c
> index 91f8008..e787762 100644
> --- a/libavcodec/vdpau_h264.c
> +++ b/libavcodec/vdpau_h264.c
> @@ -52,10 +52,10 @@ static void vdpau_h264_set_rf(VdpReferenceFrameH264 *rf,
> Picture *pic,
> VdpVideoSurface surface = ff_vdpau_get_surface_id(pic);
>
> if (pic_structure == 0)
> - pic_structure = pic->f.reference;
> + pic_structure = pic->reference;
>
> rf->surface = surface;
> - rf->is_long_term = pic->f.reference && pic->long_ref;
> + rf->is_long_term = pic->reference && pic->long_ref;
> rf->top_is_reference = (pic_structure & PICT_TOP_FIELD) != 0;
> rf->bottom_is_reference = (pic_structure & PICT_BOTTOM_FIELD) != 0;
> rf->field_order_cnt[0] = h264_foc(pic->field_poc[0]);
> @@ -83,7 +83,7 @@ static void vdpau_h264_set_reference_frames(AVCodecContext
> *avctx)
> VdpVideoSurface surface_ref;
> int pic_frame_idx;
>
> - if (!pic || !pic->f.reference)
> + if (!pic || !pic->reference)
> continue;
> pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
> surface_ref = ff_vdpau_get_surface_id(pic);
> @@ -97,15 +97,15 @@ static void
> vdpau_h264_set_reference_frames(AVCodecContext *avctx)
> ++rf2;
> }
> if (rf2 != rf) {
> - rf2->top_is_reference |= (pic->f.reference &
> PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
> - rf2->bottom_is_reference |= (pic->f.reference &
> PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
> + rf2->top_is_reference |= (pic->reference &
> PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
> + rf2->bottom_is_reference |= (pic->reference &
> PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
> continue;
> }
>
> if (rf >= &info->referenceFrames[H264_RF_COUNT])
> continue;
>
> - vdpau_h264_set_rf(rf, pic, pic->f.reference);
> + vdpau_h264_set_rf(rf, pic, pic->reference);
> ++rf;
> }
> }
ok (ignoring vertical alignment)
Janne
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel