---
libavcodec/h263.c | 6 ++---
libavcodec/h264.c | 20 +++++++--------
libavcodec/h264_cabac.c | 4 +--
libavcodec/h264_cavlc.c | 4 +--
libavcodec/h264_loopfilter.c | 26 ++++++++++----------
libavcodec/h264_mvpred.h | 2 +-
libavcodec/intrax8.c | 2 +-
libavcodec/ituh263enc.c | 2 +-
libavcodec/mpeg4videodec.c | 14 +++++------
libavcodec/mpeg4videoenc.c | 4 +--
libavcodec/mpegvideo.c | 12 ++++-----
libavcodec/mpegvideo.h | 2 ++
libavcodec/mpegvideo_enc.c | 6 ++---
libavcodec/mpegvideo_xvmc.c | 2 +-
libavcodec/rv30.c | 8 +++---
libavcodec/rv34.c | 4 +--
libavcodec/rv40.c | 2 +-
libavcodec/vc1dec.c | 56 +++++++++++++++++++++---------------------
18 files changed, 89 insertions(+), 87 deletions(-)
diff --git a/libavcodec/h263.c b/libavcodec/h263.c
index 03d4563..9a4973b 100644
--- a/libavcodec/h263.c
+++ b/libavcodec/h263.c
@@ -167,7 +167,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
- qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
+ qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
@@ -190,7 +190,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 -
s->mb_stride]))
qp_dt= qp_tt;
else
- qp_dt = s->current_picture.f.qscale_table[xy - 1 -
s->mb_stride];
+ qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
@@ -212,7 +212,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
qp_lc= qp_c;
else
- qp_lc = s->current_picture.f.qscale_table[xy - 1];
+ qp_lc = s->current_picture.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 86f1988..f050797 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -126,7 +126,7 @@ static void free_picture(H264Context *h, Picture *pic)
free_frame_buffer(h, pic);
av_freep(&pic->qscale_table_base);
- pic->f.qscale_table = NULL;
+ pic->qscale_table = NULL;
av_freep(&pic->mb_type_base);
pic->f.mb_type = NULL;
for (i = 0; i < 2; i++) {
@@ -197,7 +197,7 @@ static int alloc_picture(H264Context *h, Picture *pic)
h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1];
- if (pic->f.qscale_table == NULL) {
+ if (pic->qscale_table == NULL) {
FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base,
(big_mb_num + h->mb_stride) * sizeof(uint8_t),
fail)
@@ -205,7 +205,7 @@ static int alloc_picture(H264Context *h, Picture *pic)
(big_mb_num + h->mb_stride) * sizeof(uint32_t),
fail)
pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
+ pic->qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
for (i = 0; i < 2; i++) {
FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i],
@@ -3759,18 +3759,18 @@ static int fill_filter_caches(H264Context *h, int
mb_type)
* This is a conservative estimate: could also check beta_offset
* and more accurate chroma_qp. */
int qp_thresh = h->qp_thresh; // FIXME strictly we should store
qp_thresh for each mb of a slice
- int qp = h->cur_pic.f.qscale_table[mb_xy];
+ int qp = h->cur_pic.qscale_table[mb_xy];
if (qp <= qp_thresh &&
(left_xy[LTOP] < 0 ||
- ((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <=
qp_thresh) &&
+ ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <=
qp_thresh) &&
(top_xy < 0 ||
- ((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <=
qp_thresh)) {
+ ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
if (!FRAME_MBAFF)
return 1;
if ((left_xy[LTOP] < 0 ||
- ((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <=
qp_thresh) &&
+ ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <=
qp_thresh) &&
(top_xy < h->mb_stride ||
- ((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1)
>> 1) <= qp_thresh))
+ ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >>
1) <= qp_thresh))
return 1;
}
}
@@ -3917,8 +3917,8 @@ static void loop_filter(H264Context *h, int start_x, int
end_x)
uvlinesize, 0);
if (fill_filter_caches(h, mb_type))
continue;
- h->chroma_qp[0] = get_chroma_qp(h, 0,
h->cur_pic.f.qscale_table[mb_xy]);
- h->chroma_qp[1] = get_chroma_qp(h, 1,
h->cur_pic.f.qscale_table[mb_xy]);
+ h->chroma_qp[0] = get_chroma_qp(h, 0,
h->cur_pic.qscale_table[mb_xy]);
+ h->chroma_qp[1] = get_chroma_qp(h, 1,
h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF) {
ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
index 8fc6b95..0a36aed 100644
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@ -2012,7 +2012,7 @@ decode_intra_mb:
h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0
- h->cur_pic.f.qscale_table[mb_xy] = 0;
+ h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
h->cur_pic.f.mb_type[mb_xy] = mb_type;
@@ -2411,7 +2411,7 @@ decode_intra_mb:
h->last_qscale_diff = 0;
}
- h->cur_pic.f.qscale_table[mb_xy] = h->qscale;
+ h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h);
return 0;
diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
index 9585b77..da60437 100644
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@ -774,7 +774,7 @@ decode_intra_mb:
}
// In deblocking, the quantizer is 0
- h->cur_pic.f.qscale_table[mb_xy] = 0;
+ h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
@@ -1173,7 +1173,7 @@ decode_intra_mb:
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
- h->cur_pic.f.qscale_table[mb_xy] = h->qscale;
+ h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h);
return 0;
diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
index 929997c..a6c2134 100644
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@ -257,9 +257,9 @@ static av_always_inline void
h264_filter_mb_fast_internal(H264Context *h,
int b = h->slice_beta_offset - qp_bd_offset;
int mb_type = h->cur_pic.f.mb_type[mb_xy];
- int qp = h->cur_pic.f.qscale_table[mb_xy];
- int qp0 = h->cur_pic.f.qscale_table[mb_xy - 1];
- int qp1 = h->cur_pic.f.qscale_table[h->top_mb_xy];
+ int qp = h->cur_pic.qscale_table[mb_xy];
+ int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
+ int qp1 = h->cur_pic.qscale_table[h->top_mb_xy];
int qpc = get_chroma_qp( h, 0, qp );
int qpc0 = get_chroma_qp( h, 0, qp0 );
int qpc1 = get_chroma_qp( h, 0, qp1 );
@@ -515,12 +515,12 @@ static av_always_inline void filter_mb_dir(H264Context
*h, int mb_x, int mb_y, u
}
// Do not use s->qscale as luma quantizer because it has not
the same
// value in IPCM macroblocks.
- qp = (h->cur_pic.f.qscale_table[mb_xy] +
h->cur_pic.f.qscale_table[mbn_xy] + 1) >> 1;
+ qp = (h->cur_pic.qscale_table[mb_xy] +
h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d
ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, "
bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a,
b, h, 0 );
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0,
h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1;
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1,
h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0,
h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1,
h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) {
if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize],
tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
@@ -580,10 +580,10 @@ static av_always_inline void filter_mb_dir(H264Context
*h, int mb_x, int mb_y, u
// Do not use s->qscale as luma quantizer because it has not the
same
// value in IPCM macroblocks.
if(bS[0]+bS[1]+bS[2]+bS[3]){
- qp = (h->cur_pic.f.qscale_table[mb_xy] +
h->cur_pic.f.qscale_table[mbm_xy] + 1) >> 1;
+ qp = (h->cur_pic.qscale_table[mb_xy] +
h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d
ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
- chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0,
h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1;
- chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1,
h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0,
h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1,
h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
if (chroma) {
@@ -663,7 +663,7 @@ static av_always_inline void filter_mb_dir(H264Context *h,
int mb_x, int mb_y, u
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
- qp = h->cur_pic.f.qscale_table[mb_xy];
+ qp = h->cur_pic.qscale_table[mb_xy];
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d
uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
if( dir == 0 ) {
filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS,
qp, a, b, h, 0 );
@@ -758,9 +758,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y,
uint8_t *img_y, uint
}
}
- mb_qp = h->cur_pic.f.qscale_table[mb_xy];
- mbn0_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[0]];
- mbn1_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[1]];
+ mb_qp = h->cur_pic.qscale_table[mb_xy];
+ mbn0_qp = h->cur_pic.qscale_table[h->left_mb_xy[0]];
+ mbn1_qp = h->cur_pic.qscale_table[h->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h
index b49ff31..846e67f 100644
--- a/libavcodec/h264_mvpred.h
+++ b/libavcodec/h264_mvpred.h
@@ -822,7 +822,7 @@ static void av_unused decode_mb_skip(H264Context *h)
write_back_motion(h, mb_type);
h->cur_pic.f.mb_type[mb_xy] = mb_type;
- h->cur_pic.f.qscale_table[mb_xy] = h->qscale;
+ h->cur_pic.qscale_table[mb_xy] = h->qscale;
h->slice_table[mb_xy] = h->slice_num;
h->prev_mb_skipped = 1;
}
diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c
index a57e1f9..f1f7774 100644
--- a/libavcodec/intrax8.c
+++ b/libavcodec/intrax8.c
@@ -773,7 +773,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int
dquant, int quant_of
/*emulate MB info in the relevant tables*/
s->mbskip_table [mb_xy]=0;
s->mbintra_table[mb_xy]=1;
- s->current_picture.f.qscale_table[mb_xy] = w->quant;
+ s->current_picture.qscale_table[mb_xy] = w->quant;
mb_xy++;
}
s->dest[0]+= 8;
diff --git a/libavcodec/ituh263enc.c b/libavcodec/ituh263enc.c
index 07277bb..749738a 100644
--- a/libavcodec/ituh263enc.c
+++ b/libavcodec/ituh263enc.c
@@ -275,7 +275,7 @@ void ff_h263_encode_gob_header(MpegEncContext * s, int
mb_line)
*/
void ff_clean_h263_qscales(MpegEncContext *s){
int i;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
ff_init_qscale_tab(s);
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index ebc74a7..7922a61 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -55,7 +55,7 @@ void ff_mpeg4_pred_ac(MpegEncContext * s, int16_t *block, int
n,
{
int i;
int16_t *ac_val, *ac_val1;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
/* find prediction */
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
@@ -577,7 +577,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
if(cbpc & 4) {
ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb,
2)]);
}
- s->current_picture.f.qscale_table[xy]= s->qscale;
+ s->current_picture.qscale_table[xy]= s->qscale;
s->mbintra_table[xy]= 1;
for(i=0; i<6; i++){
@@ -741,7 +741,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int
mb_count){
if(s->cbp_table[xy] & 8) {
ff_set_qscale(s, s->qscale +
quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.f.qscale_table[xy] = s->qscale;
+ s->current_picture.qscale_table[xy] = s->qscale;
for(i=0; i<6; i++){
int dc_pred_dir;
@@ -758,7 +758,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int
mb_count){
s->current_picture.f.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
s->pred_dir_table[xy]= dir;
} else if (IS_SKIP(s->current_picture.f.mb_type[xy])) {
- s->current_picture.f.qscale_table[xy] = s->qscale;
+ s->current_picture.qscale_table[xy] = s->qscale;
s->cbp_table[xy]= 0;
}else{
int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table,
CBPY_VLC_BITS, 1);
@@ -771,7 +771,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int
mb_count){
if(s->cbp_table[xy] & 8) {
ff_set_qscale(s, s->qscale +
quant_tab[get_bits(&s->gb, 2)]);
}
- s->current_picture.f.qscale_table[xy] = s->qscale;
+ s->current_picture.qscale_table[xy] = s->qscale;
s->cbp_table[xy]&= 3; //remove dquant
s->cbp_table[xy]|= (cbpy^0xf)<<2;
@@ -1075,8 +1075,8 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s,
int16_t block[6][64])
s->use_intra_dc_vlc= s->qscale < s->intra_dc_threshold;
- if (s->current_picture.f.qscale_table[xy] != s->qscale) {
- ff_set_qscale(s, s->current_picture.f.qscale_table[xy]);
+ if (s->current_picture.qscale_table[xy] != s->qscale) {
+ ff_set_qscale(s, s->current_picture.qscale_table[xy]);
}
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index 9bb1f9a..f925eb8 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -126,7 +126,7 @@ static inline int decide_ac_pred(MpegEncContext * s,
int16_t block[6][64], const
{
int score= 0;
int i, n;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
@@ -203,7 +203,7 @@ static inline int decide_ac_pred(MpegEncContext * s,
int16_t block[6][64], const
*/
void ff_clean_mpeg4_qscales(MpegEncContext *s){
int i;
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
ff_clean_h263_qscales(s);
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 5aa45d7..48d007c 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -385,7 +385,7 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int
shared)
s->uvlinesize = pic->f.linesize[1];
}
- if (pic->f.qscale_table == NULL) {
+ if (pic->qscale_table == NULL) {
if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
mb_array_size * sizeof(int16_t), fail)
@@ -404,7 +404,7 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int
shared)
(big_mb_num + s->mb_stride) * sizeof(uint32_t),
fail)
pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
+ pic->qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
if (s->out_format == FMT_H264) {
for (i = 0; i < 2; i++) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
@@ -461,7 +461,7 @@ static void free_picture(MpegEncContext *s, Picture *pic)
av_freep(&pic->mb_mean);
av_freep(&pic->f.mbskip_table);
av_freep(&pic->qscale_table_base);
- pic->f.qscale_table = NULL;
+ pic->qscale_table = NULL;
av_freep(&pic->mb_type_base);
pic->f.mb_type = NULL;
av_freep(&pic->f.dct_coeff);
@@ -1844,7 +1844,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
}
if (s->avctx->debug & FF_DEBUG_QP) {
av_log(s->avctx, AV_LOG_DEBUG, "%2d",
- pict->qscale_table[x + y * s->mb_stride]);
+ p->qscale_table[x + y * s->mb_stride]);
}
if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
int mb_type = pict->mb_type[x + y * s->mb_stride];
@@ -2009,7 +2009,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
}
}
if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
- uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
+ uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
0x0101010101010101ULL;
int y;
for (y = 0; y < block_height; y++) {
@@ -2241,7 +2241,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t
block[12][64],
}
}
- s->current_picture.f.qscale_table[mb_xy] = s->qscale;
+ s->current_picture.qscale_table[mb_xy] = s->qscale;
/* update DC predictors for P macroblocks */
if (!s->mb_intra) {
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index e5cc201..48c2bf1 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -149,6 +149,8 @@ typedef struct Picture{
void *hwaccel_picture_private;
int reference;
+
+ int8_t *qscale_table;
} Picture;
/**
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index b4b676f..f1f937d 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -177,7 +177,7 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t
*matrix)
*/
void ff_init_qscale_tab(MpegEncContext *s)
{
- int8_t * const qscale_table = s->current_picture.f.qscale_table;
+ int8_t * const qscale_table = s->current_picture.qscale_table;
int i;
for (i = 0; i < s->mb_num; i++) {
@@ -1751,7 +1751,7 @@ static av_always_inline void
encode_mb_internal(MpegEncContext *s,
update_qscale(s);
if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
- s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
+ s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
s->dquant = s->qscale - last_qp;
if (s->out_format == FMT_H263) {
@@ -2812,7 +2812,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
}
}
- s->current_picture.f.qscale_table[xy] = best_s.qscale;
+ s->current_picture.qscale_table[xy] = best_s.qscale;
copy_context_after_encode(s, &best_s, -1);
diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c
index 9a2b140..cdf73a7 100644
--- a/libavcodec/mpegvideo_xvmc.c
+++ b/libavcodec/mpegvideo_xvmc.c
@@ -179,7 +179,7 @@ void ff_xvmc_decode_mb(MpegEncContext *s)
// Do I need to export quant when I could not perform postprocessing?
// Anyway, it doesn't hurt.
- s->current_picture.f.qscale_table[mb_xy] = s->qscale;
+ s->current_picture.qscale_table[mb_xy] = s->qscale;
// start of XVMC-specific code
render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c
index 3c3579b..d343d61 100644
--- a/libavcodec/rv30.c
+++ b/libavcodec/rv30.c
@@ -154,9 +154,9 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
*/
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- cur_lim =
rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
+ cur_lim =
rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
if(mb_x)
- left_lim =
rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]];
+ left_lim =
rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
for(j = 0; j < 16; j += 4){
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) *
s->linesize + 4 * !mb_x;
for(i = !mb_x; i < 4; i++, Y += 4){
@@ -196,9 +196,9 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
- cur_lim =
rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
+ cur_lim =
rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
if(row)
- top_lim =
rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos -
s->mb_stride]];
+ top_lim =
rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
for(j = 4*!row; j < 16; j += 4){
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) *
s->linesize;
for(i = 0; i < 4; i++, Y += 4){
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index 3c3e3e2..eec4d6e 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -1205,7 +1205,7 @@ static int rv34_decode_inter_macroblock(RV34DecContext
*r, int8_t *intra_types)
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
- s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
+ s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
@@ -1307,7 +1307,7 @@ static int rv34_decode_intra_macroblock(RV34DecContext
*r, int8_t *intra_types)
r->cbp_luma [mb_pos] = cbp;
r->cbp_chroma[mb_pos] = cbp >> 16;
r->deblock_coefs[mb_pos] = 0xFFFF;
- s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
+ s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
if(cbp == -1)
return -1;
diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c
index f95622a..55864cf 100644
--- a/libavcodec/rv40.c
+++ b/libavcodec/rv40.c
@@ -379,7 +379,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
unsigned y_to_deblock;
int c_to_deblock[2];
- q = s->current_picture_ptr->f.qscale_table[mb_pos];
+ q = s->current_picture_ptr->qscale_table[mb_pos];
alpha = rv40_alpha_tab[q];
beta = rv40_beta_tab [q];
betaY = betaC = beta * 3;
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index a8baffa..c5a104f 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -2382,17 +2382,17 @@ static inline int vc1_pred_dc(MpegEncContext *s, int
overlap, int pq, int n,
b = dc_val[ - 1 - wrap];
a = dc_val[ - wrap];
/* scale predictors if needed */
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
dqscale_index = s->y_dc_scale_table[q1] - 1;
if (dqscale_index < 0)
return 0;
if (c_avail && (n != 1 && n != 3)) {
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (q2 && q2 != q1)
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] +
0x20000) >> 18;
}
if (a_avail && (n != 2 && n != 3)) {
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if (q2 && q2 != q1)
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] +
0x20000) >> 18;
}
@@ -2402,7 +2402,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int
overlap, int pq, int n,
off--;
if (n != 2)
off -= s->mb_stride;
- q2 = s->current_picture.f.qscale_table[off];
+ q2 = s->current_picture.qscale_table[off];
if (q2 && q2 != q1)
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] +
0x20000) >> 18;
}
@@ -2773,11 +2773,11 @@ static int vc1_decode_i_block_adv(VC1Context *v,
int16_t block[64], int n,
else // top
ac_val -= 16 * s->block_wrap[n];
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
if ( dc_pred_dir && c_avail && mb_pos)
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if ( dc_pred_dir && n == 1)
q2 = q1;
if (!dc_pred_dir && n == 2)
@@ -2996,11 +2996,11 @@ static int vc1_decode_intra_block(VC1Context *v,
int16_t block[64], int n,
else //top
ac_val -= 16 * s->block_wrap[n];
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
if (dc_pred_dir && c_avail && mb_pos)
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if ( dc_pred_dir && n == 1)
q2 = q1;
if (!dc_pred_dir && n == 2)
@@ -3494,7 +3494,7 @@ static int vc1_decode_p_mb(VC1Context *v)
mquant = v->pq;
cbp = 0;
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
@@ -3549,7 +3549,7 @@ static int vc1_decode_p_mb(VC1Context *v)
s->dc_val[0][s->block_index[i]] = 0;
}
s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0],
0, 0);
vc1_mc_1mv(v, 0);
}
@@ -3592,7 +3592,7 @@ static int vc1_decode_p_mb(VC1Context *v)
if (!intra_count && !coded_inter)
goto end;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* test if block is intra and has pred */
{
int intrapred = 0;
@@ -3655,7 +3655,7 @@ static int vc1_decode_p_mb(VC1Context *v)
}
} else { // skipped MB
s->mb_intra = 0;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
for (i = 0; i < 6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
@@ -3665,7 +3665,7 @@ static int vc1_decode_p_mb(VC1Context *v)
vc1_mc_4mv_luma(v, i, 0);
}
vc1_mc_4mv_chroma(v, 0);
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
}
}
end:
@@ -3752,7 +3752,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table,
VC1_CBPCY_P_VLC_BITS, 2);
v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary
here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -3844,7 +3844,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
}
if (cbp)
GET_MQUANT(); // p. 227
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
for (i = 0; i < 6; i++) {
@@ -3874,7 +3874,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
s->dc_val[0][s->block_index[i]] = 0;
}
s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
v->blk_mv_type[s->block_index[0]] = 0;
v->blk_mv_type[s->block_index[1]] = 0;
v->blk_mv_type[s->block_index[2]] = 0;
@@ -3915,7 +3915,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
s->current_picture.f.motion_val[1][s->block_index[0] +
v->blocks_off][1] = 0;
s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -3978,7 +3978,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
if (cbp) {
GET_MQUANT();
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp) {
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
}
@@ -4044,7 +4044,7 @@ static void vc1_decode_b_mb(VC1Context *v)
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
if (!direct) {
if (!skipped) {
@@ -4081,7 +4081,7 @@ static void vc1_decode_b_mb(VC1Context *v)
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
s->mb_intra = 0;
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
@@ -4096,7 +4096,7 @@ static void vc1_decode_b_mb(VC1Context *v)
}
if (s->mb_intra && !mb_has_coeffs) {
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
s->ac_pred = get_bits1(gb);
cbp = 0;
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
@@ -4118,7 +4118,7 @@ static void vc1_decode_b_mb(VC1Context *v)
s->ac_pred = get_bits1(gb);
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table,
VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
}
@@ -4189,7 +4189,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
s->current_picture.f.mb_type[mb_pos + v->mb_off] =
MB_TYPE_INTRA;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -4290,7 +4290,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
if (cbp) {
GET_MQUANT();
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp) {
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
}
@@ -4373,7 +4373,7 @@ static void vc1_decode_i_blocks(VC1Context *v)
s->dsp.clear_blocks(s->block[0]);
mb_pos = s->mb_x + s->mb_y * s->mb_width;
s->current_picture.f.mb_type[mb_pos] =
MB_TYPE_INTRA;
- s->current_picture.f.qscale_table[mb_pos] = v->pq;
+ s->current_picture.qscale_table[mb_pos] = v->pq;
s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
@@ -4530,7 +4530,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
--
1.7.10.4
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel