Also drop the notion of active PPS/SPS from H264ParamSets.
This way, encountering new parameter sets that overwrite the old ones,
won't make the active parameters sets suddenly disappear while the
decoder is not done using them.
---
libavcodec/dxva2_h264.c | 10 +++---
libavcodec/h264_cabac.c | 20 +++++------
libavcodec/h264_cavlc.c | 26 +++++++-------
libavcodec/h264_direct.c | 6 ++--
libavcodec/h264_loopfilter.c | 38 ++++++++++----------
libavcodec/h264_mb.c | 8 ++---
libavcodec/h264_mb_template.c | 20 +++++------
libavcodec/h264_mvpred.h | 2 +-
libavcodec/h264_parser.c | 29 ++++++++-------
libavcodec/h264_ps.c | 5 ---
libavcodec/h264_ps.h | 5 ---
libavcodec/h264_refs.c | 10 +++---
libavcodec/h264_slice.c | 84 +++++++++++++++++++++++++------------------
libavcodec/h264dec.c | 10 ++++--
libavcodec/h264dec.h | 13 ++++---
libavcodec/vaapi_h264.c | 6 ++--
libavcodec/vdpau_h264.c | 4 +--
17 files changed, 155 insertions(+), 141 deletions(-)
diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
index 84959c5..611ef61 100644
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@ -51,8 +51,8 @@ static void fill_picture_parameters(const AVCodecContext
*avctx, AVDXVAContext *
DXVA_PicParams_H264 *pp)
{
const H264Picture *current_picture = h->cur_pic_ptr;
- const SPS *sps = h->ps.sps;
- const PPS *pps = h->ps.pps;
+ const SPS *sps = h->sps;
+ const PPS *pps = h->pps;
int i, j;
memset(pp, 0, sizeof(*pp));
@@ -166,7 +166,7 @@ static void fill_picture_parameters(const AVCodecContext
*avctx, AVDXVAContext *
static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext
*ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm)
{
unsigned i, j;
- const PPS *pps = h->ps.pps;
+ const PPS *pps = h->pps;
memset(qm, 0, sizeof(*qm));
if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) &
FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) {
for (i = 0; i < 6; i++)
@@ -284,11 +284,11 @@ static void fill_slice_long(AVCodecContext *avctx,
DXVA_Slice_H264_Long *slice,
}
}
slice->slice_qs_delta = 0; /* XXX not implemented by Libav */
- slice->slice_qp_delta = sl->qscale - h->ps.pps->init_qp;
+ slice->slice_qp_delta = sl->qscale - h->pps->init_qp;
slice->redundant_pic_cnt = sl->redundant_pic_count;
if (sl->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred;
- slice->cabac_init_idc = h->ps.pps->cabac ? sl->cabac_init_idc : 0;
+ slice->cabac_init_idc = h->pps->cabac ? sl->cabac_init_idc : 0;
if (sl->deblocking_filter < 2)
slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter;
else
diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
index b28e486..d71ef57 100644
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@ -1264,7 +1264,7 @@ void ff_h264_init_cabac_states(const H264Context *h,
H264SliceContext *sl)
{
int i;
const int8_t (*tab)[2];
- const int slice_qp = av_clip(sl->qscale - 6*(h->ps.sps->bit_depth_luma-8),
0, 51);
+ const int slice_qp = av_clip(sl->qscale - 6*(h->sps->bit_depth_luma-8), 0,
51);
if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I;
else tab =
cabac_context_init_PB[sl->cabac_init_idc];
@@ -1869,7 +1869,7 @@ static av_always_inline void
decode_cabac_luma_residual(const H264Context *h, H2
decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p],
LUMA_DC_BLOCK_INDEX+p, scan, 16);
if( cbp&15 ) {
- qmul = h->ps.pps->dequant4_coeff[p][qscale];
+ qmul = h->pps->dequant4_coeff[p][qscale];
for( i4x4 = 0; i4x4 < 16; i4x4++ ) {
const int index = 16*p + i4x4;
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index <<
pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15);
@@ -1884,9 +1884,9 @@ static av_always_inline void
decode_cabac_luma_residual(const H264Context *h, H2
if( IS_8x8DCT(mb_type) ) {
const int index = 16*p + 4*i8x8;
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index <<
pixel_shift), ctx_cat[3][p], index,
- scan8x8,
h->ps.pps->dequant8_coeff[cqm][qscale], 64);
+ scan8x8,
h->pps->dequant8_coeff[cqm][qscale], 64);
} else {
- qmul = h->ps.pps->dequant4_coeff[cqm][qscale];
+ qmul = h->pps->dequant4_coeff[cqm][qscale];
for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
const int index = 16*p + 4*i8x8 + i4x4;
//START_TIMER
@@ -1907,10 +1907,10 @@ static av_always_inline void
decode_cabac_luma_residual(const H264Context *h, H2
*/
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
{
- const SPS *sps = h->ps.sps;
+ const SPS *sps = h->sps;
int mb_xy;
int mb_type, partition_count, cbp = 0;
- int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
+ int dct8x8_allowed= h->pps->transform_8x8_mode;
int decode_chroma = sps->chroma_format_idc == 1 || sps->chroma_format_idc
== 2;
const int pixel_shift = h->pixel_shift;
@@ -2385,8 +2385,8 @@ decode_intra_mb:
if (sl->qscale < 0) sl->qscale += max_qp + 1;
else sl->qscale -= max_qp + 1;
}
- sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
- sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
+ sl->chroma_qp[0] = get_chroma_qp(h->pps, 0, sl->qscale);
+ sl->chroma_qp[1] = get_chroma_qp(h->pps, 1, sl->qscale);
}else
sl->last_qscale_diff=0;
@@ -2407,7 +2407,7 @@ decode_intra_mb:
int c, i, i8x8;
for( c = 0; c < 2; c++ ) {
int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift);
- qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type )
? 0:3)][sl->chroma_qp[c]];
+ qmul = h->pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ?
0:3)][sl->chroma_qp[c]];
for (i8x8 = 0; i8x8 < 2; i8x8++) {
for (i = 0; i < 4; i++) {
const int index = 16 + 16 * c + 8*i8x8 + i;
@@ -2431,7 +2431,7 @@ decode_intra_mb:
if( cbp&0x20 ) {
int c, i;
for( c = 0; c < 2; c++ ) {
- qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type )
? 0:3)][sl->chroma_qp[c]];
+ qmul = h->pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ?
0:3)][sl->chroma_qp[c]];
for( i = 0; i < 4; i++ ) {
const int index = 16 + 16 * c + i;
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index
<< pixel_shift), 4, index, scan + 1, qmul, 15);
diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
index c11e211..0786852 100644
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@ -649,7 +649,7 @@ int decode_luma_residual(const H264Context *h,
H264SliceContext *sl,
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8 + p*16;
if( decode_residual(h, sl, gb, sl->mb + (16*index <<
pixel_shift),
- index, scan + 1, h->ps.pps->dequant4_coeff[p][qscale],
15) < 0 ){
+ index, scan + 1, h->pps->dequant4_coeff[p][qscale],
15) < 0 ){
return -1;
}
}
@@ -671,7 +671,7 @@ int decode_luma_residual(const H264Context *h,
H264SliceContext *sl,
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8 + p*16;
if( decode_residual(h, sl, gb, buf, index,
scan8x8+16*i4x4,
-
h->ps.pps->dequant8_coeff[cqm][qscale], 16) < 0 )
+
h->pps->dequant8_coeff[cqm][qscale], 16) < 0 )
return -1;
}
nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]];
@@ -681,7 +681,7 @@ int decode_luma_residual(const H264Context *h,
H264SliceContext *sl,
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8 + p*16;
if( decode_residual(h, sl, gb, sl->mb + (16*index <<
pixel_shift), index,
- scan,
h->ps.pps->dequant4_coeff[cqm][qscale], 16) < 0 ){
+ scan,
h->pps->dequant4_coeff[cqm][qscale], 16) < 0 ){
return -1;
}
new_cbp |= sl->non_zero_count_cache[scan8[index]] <<
i8x8;
@@ -701,8 +701,8 @@ int ff_h264_decode_mb_cavlc(const H264Context *h,
H264SliceContext *sl)
int mb_xy;
int partition_count;
unsigned int mb_type, cbp;
- int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
- int decode_chroma = h->ps.sps->chroma_format_idc == 1 ||
h->ps.sps->chroma_format_idc == 2;
+ int dct8x8_allowed= h->pps->transform_8x8_mode;
+ int decode_chroma = h->sps->chroma_format_idc == 1 ||
h->sps->chroma_format_idc == 2;
const int pixel_shift = h->pixel_shift;
mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
@@ -767,8 +767,8 @@ decode_intra_mb:
h->slice_table[mb_xy] = sl->slice_num;
if(IS_INTRA_PCM(mb_type)){
- const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] *
- h->ps.sps->bit_depth_luma;
+ const int mb_size = ff_h264_mb_sizes[h->sps->chroma_format_idc] *
+ h->sps->bit_depth_luma;
// We assume these blocks are very rare so we do not optimize it.
sl->intra_pcm_ptr = align_get_bits(&sl->gb);
@@ -941,7 +941,7 @@ decode_intra_mb:
}
}else if(IS_DIRECT(mb_type)){
ff_h264_pred_direct_motion(h, sl, &mb_type);
- dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag;
+ dct8x8_allowed &= h->sps->direct_8x8_inference_flag;
}else{
int list, mx, my, i;
//FIXME we should set ref_idx_l? to 0 if we use that later ...
@@ -1091,7 +1091,7 @@ decode_intra_mb:
int ret;
GetBitContext *gb = &sl->gb;
const uint8_t *scan, *scan8x8;
- const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8);
+ const int max_qp = 51 + 6 * (h->sps->bit_depth_luma - 8);
if(IS_INTERLACED(mb_type)){
scan8x8 = sl->qscale ? h->field_scan8x8_cavlc :
h->field_scan8x8_cavlc_q0;
@@ -1114,8 +1114,8 @@ decode_intra_mb:
}
}
- sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
- sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
+ sl->chroma_qp[0] = get_chroma_qp(h->pps, 0, sl->qscale);
+ sl->chroma_qp[1] = get_chroma_qp(h->pps, 1, sl->qscale);
if ((ret = decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift,
mb_type, cbp, 0)) < 0 ) {
return -1;
@@ -1140,7 +1140,7 @@ decode_intra_mb:
if(cbp&0x20){
for(chroma_idx=0; chroma_idx<2; chroma_idx++){
- const uint32_t *qmul =
h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ?
0:3)][sl->chroma_qp[chroma_idx]];
+ const uint32_t *qmul =
h->pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ?
0:3)][sl->chroma_qp[chroma_idx]];
int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) <<
pixel_shift);
for (i8x8 = 0; i8x8 < 2; i8x8++) {
for (i4x4 = 0; i4x4 < 4; i4x4++) {
@@ -1166,7 +1166,7 @@ decode_intra_mb:
if(cbp&0x20){
for(chroma_idx=0; chroma_idx<2; chroma_idx++){
- const uint32_t *qmul =
h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ?
0:3)][sl->chroma_qp[chroma_idx]];
+ const uint32_t *qmul =
h->pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ?
0:3)][sl->chroma_qp[chroma_idx]];
for(i4x4=0; i4x4<4; i4x4++){
const int index= 16 + 16*chroma_idx + i4x4;
if( decode_residual(h, sl, gb, sl->mb + (16*index <<
pixel_shift), index, scan + 1, qmul, 15) < 0){
diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index 7ec49b6..04e2afd 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -311,7 +311,7 @@ single_col:
*mb_type |= MB_TYPE_DIRECT2 |
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
- if (!h->ps.sps->direct_8x8_inference_flag) {
+ if (!h->sps->direct_8x8_inference_flag) {
/* FIXME: Save sub mb types from previous frames (or derive
* from MVs) so we know exactly what block size to use. */
sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4
*/
@@ -534,7 +534,7 @@ single_col:
*mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
- if (!h->ps.sps->direct_8x8_inference_flag) {
+ if (!h->sps->direct_8x8_inference_flag) {
/* FIXME: save sub mb types from previous frames (or derive
* from MVs) so we know exactly what block size to use */
sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
@@ -575,7 +575,7 @@ single_col:
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
int y_shift = 2 * !IS_INTERLACED(*mb_type);
- assert(h->ps.sps->direct_8x8_inference_flag);
+ assert(h->sps->direct_8x8_inference_flag);
for (i8 = 0; i8 < 4; i8++) {
const int x8 = i8 & 1;
diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
index f39b951..1558f35 100644
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@ -253,7 +253,7 @@ static av_always_inline void
h264_filter_mb_fast_internal(const H264Context *h,
int left_type = sl->left_type[LTOP];
int top_type = sl->top_type;
- int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
+ int qp_bd_offset = 6 * (h->sps->bit_depth_luma - 8);
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
int b = 52 + sl->slice_beta_offset - qp_bd_offset;
@@ -261,9 +261,9 @@ static av_always_inline void
h264_filter_mb_fast_internal(const H264Context *h,
int qp = h->cur_pic.qscale_table[mb_xy];
int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
int qp1 = h->cur_pic.qscale_table[sl->top_mb_xy];
- int qpc = get_chroma_qp(h->ps.pps, 0, qp);
- int qpc0 = get_chroma_qp(h->ps.pps, 0, qp0);
- int qpc1 = get_chroma_qp(h->ps.pps, 0, qp1);
+ int qpc = get_chroma_qp(h->pps, 0, qp);
+ int qpc0 = get_chroma_qp(h->pps, 0, qp0);
+ int qpc1 = get_chroma_qp(h->pps, 0, qp1);
qp0 = (qp + qp0 + 1) >> 1;
qp1 = (qp + qp1 + 1) >> 1;
qpc0 = (qpc + qpc0 + 1) >> 1;
@@ -423,7 +423,7 @@ void ff_h264_filter_mb_fast(const H264Context *h,
H264SliceContext *sl,
unsigned int linesize, unsigned int uvlinesize)
{
assert(!FRAME_MBAFF(h));
- if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) {
+ if(!h->h264dsp.h264_loop_filter_strength || h->pps->chroma_qp_diff) {
ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize,
uvlinesize);
return;
}
@@ -532,8 +532,8 @@ static av_always_inline void filter_mb_dir(const
H264Context *h, H264SliceContex
ff_tlog(h->avctx, "filter mb:%d/%d dir:%d, QPy:%d ls:%d
uvls:%d", mb_x, mb_y, dir, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, "
bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a,
b, h, 0 );
- chroma_qp_avg[0] = (sl->chroma_qp[0] +
get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
- chroma_qp_avg[1] = (sl->chroma_qp[1] +
get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->pps,
0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->pps,
1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) {
if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize],
tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
@@ -595,8 +595,8 @@ static av_always_inline void filter_mb_dir(const
H264Context *h, H264SliceContex
if(bS[0]+bS[1]+bS[2]+bS[3]){
qp = (h->cur_pic.qscale_table[mb_xy] +
h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d
ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
- chroma_qp_avg[0] = (sl->chroma_qp[0] +
get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
- chroma_qp_avg[1] = (sl->chroma_qp[1] +
get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->pps,
0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
+ chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->pps,
1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
if (chroma) {
@@ -723,7 +723,7 @@ void ff_h264_filter_mb(const H264Context *h,
H264SliceContext *sl,
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0;
int chroma = !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
- int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
+ int qp_bd_offset = 6 * (h->sps->bit_depth_luma - 8);
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
int b = 52 + sl->slice_beta_offset - qp_bd_offset;
@@ -766,7 +766,7 @@ void ff_h264_filter_mb(const H264Context *h,
H264SliceContext *sl,
bS[i] = 4;
else{
bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] |
- ((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ?
+ ((!h->pps->cabac && IS_8x8DCT(mbn_type)) ?
(h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) :
(mb_y&1)) ? 8 : 2) << 12))
:
h->non_zero_count[mbn_xy][ off[i] ]));
@@ -778,15 +778,15 @@ void ff_h264_filter_mb(const H264Context *h,
H264SliceContext *sl,
mbn0_qp = h->cur_pic.qscale_table[sl->left_mb_xy[0]];
mbn1_qp = h->cur_pic.qscale_table[sl->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
- bqp[0] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
- get_chroma_qp(h->ps.pps, 0, mbn0_qp) + 1) >> 1;
- rqp[0] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
- get_chroma_qp(h->ps.pps, 1, mbn0_qp) + 1) >> 1;
+ bqp[0] = (get_chroma_qp(h->pps, 0, mb_qp) +
+ get_chroma_qp(h->pps, 0, mbn0_qp) + 1) >> 1;
+ rqp[0] = (get_chroma_qp(h->pps, 1, mb_qp) +
+ get_chroma_qp(h->pps, 1, mbn0_qp) + 1) >> 1;
qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
- bqp[1] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
- get_chroma_qp(h->ps.pps, 0, mbn1_qp) + 1 ) >> 1;
- rqp[1] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
- get_chroma_qp(h->ps.pps, 1, mbn1_qp) + 1 ) >> 1;
+ bqp[1] = (get_chroma_qp(h->pps, 0, mb_qp) +
+ get_chroma_qp(h->pps, 0, mbn1_qp) + 1 ) >> 1;
+ rqp[1] = (get_chroma_qp(h->pps, 1, mb_qp) +
+ get_chroma_qp(h->pps, 1, mbn1_qp) + 1 ) >> 1;
/* Filter edge */
ff_tlog(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d
QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0],
rqp[1], linesize, uvlinesize);
diff --git a/libavcodec/h264_mb.c b/libavcodec/h264_mb.c
index f037bd5..960b25e 100644
--- a/libavcodec/h264_mb.c
+++ b/libavcodec/h264_mb.c
@@ -635,7 +635,7 @@ static av_always_inline void
hl_decode_mb_predict_luma(const H264Context *h,
for (i = 0; i < 16; i += 4) {
uint8_t *const ptr = dest_y + block_offset[i];
const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
- if (transform_bypass && h->ps.sps->profile_idc == 244 && dir
<= 1) {
+ if (transform_bypass && h->sps->profile_idc == 244 && dir <=
1) {
h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256
<< pixel_shift), linesize);
} else {
const int nnz = sl->non_zero_count_cache[scan8[i + p *
16]];
@@ -661,7 +661,7 @@ static av_always_inline void
hl_decode_mb_predict_luma(const H264Context *h,
uint8_t *const ptr = dest_y + block_offset[i];
const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
- if (transform_bypass && h->ps.sps->profile_idc == 244 && dir
<= 1) {
+ if (transform_bypass && h->sps->profile_idc == 244 && dir <=
1) {
h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 <<
pixel_shift), linesize);
} else {
uint8_t *topright;
@@ -700,7 +700,7 @@ static av_always_inline void
hl_decode_mb_predict_luma(const H264Context *h,
if (!transform_bypass)
h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 <<
pixel_shift),
sl->mb_luma_dc[p],
-
h->ps.pps->dequant4_coeff[p][qscale][0]);
+
h->pps->dequant4_coeff[p][qscale][0]);
else {
static const uint8_t dc_mapping[16] = {
0 * 16, 1 * 16, 4 * 16, 5 * 16,
@@ -732,7 +732,7 @@ static av_always_inline void hl_decode_mb_idct_luma(const
H264Context *h, H264Sl
if (!IS_INTRA4x4(mb_type)) {
if (IS_INTRA16x16(mb_type)) {
if (transform_bypass) {
- if (h->ps.sps->profile_idc == 244 &&
+ if (h->sps->profile_idc == 244 &&
(sl->intra16x16_pred_mode == VERT_PRED8x8 ||
sl->intra16x16_pred_mode == HOR_PRED8x8)) {
h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y,
block_offset,
diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c
index 1f583df..83467a9 100644
--- a/libavcodec/h264_mb_template.c
+++ b/libavcodec/h264_mb_template.c
@@ -48,7 +48,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context
*h, H264SliceContex
int linesize, uvlinesize /*dct_offset*/;
int i, j;
const int *block_offset = &h->block_offset[0];
- const int transform_bypass = !SIMPLE && (sl->qscale == 0 &&
h->ps.sps->transform_bypass);
+ const int transform_bypass = !SIMPLE && (sl->qscale == 0 &&
h->sps->transform_bypass);
void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
const int block_h = 16 >> h->chroma_y_shift;
const int chroma422 = CHROMA422(h);
@@ -97,11 +97,11 @@ static av_noinline void FUNC(hl_decode_mb)(const
H264Context *h, H264SliceContex
if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
if (PIXEL_SHIFT) {
- const int bit_depth = h->ps.sps->bit_depth_luma;
+ const int bit_depth = h->sps->bit_depth_luma;
int j;
GetBitContext gb;
init_get_bits(&gb, sl->intra_pcm_ptr,
- ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] *
bit_depth);
+ ff_h264_mb_sizes[h->sps->chroma_format_idc] *
bit_depth);
for (i = 0; i < 16; i++) {
uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize);
@@ -109,7 +109,7 @@ static av_noinline void FUNC(hl_decode_mb)(const
H264Context *h, H264SliceContex
tmp_y[j] = get_bits(&gb, bit_depth);
}
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
- if (!h->ps.sps->chroma_format_idc) {
+ if (!h->sps->chroma_format_idc) {
for (i = 0; i < block_h; i++) {
uint16_t *tmp_cb = (uint16_t *)(dest_cb + i *
uvlinesize);
for (j = 0; j < 8; j++)
@@ -137,7 +137,7 @@ static av_noinline void FUNC(hl_decode_mb)(const
H264Context *h, H264SliceContex
for (i = 0; i < 16; i++)
memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16);
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
- if (!h->ps.sps->chroma_format_idc) {
+ if (!h->sps->chroma_format_idc) {
for (i = 0; i < block_h; i++) {
memset(dest_cb + i * uvlinesize, 128, 8);
memset(dest_cr + i * uvlinesize, 128, 8);
@@ -197,7 +197,7 @@ static av_noinline void FUNC(hl_decode_mb)(const
H264Context *h, H264SliceContex
(sl->cbp & 0x30)) {
uint8_t *dest[2] = { dest_cb, dest_cr };
if (transform_bypass) {
- if (IS_INTRA(mb_type) && h->ps.sps->profile_idc == 244 &&
+ if (IS_INTRA(mb_type) && h->sps->profile_idc == 244 &&
(sl->chroma_pred_mode == VERT_PRED8x8 ||
sl->chroma_pred_mode == HOR_PRED8x8)) {
h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0],
@@ -238,10 +238,10 @@ static av_noinline void FUNC(hl_decode_mb)(const
H264Context *h, H264SliceContex
}
if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]])
h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 *
1 << PIXEL_SHIFT),
-
h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
+
h->pps->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]])
h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 *
2 << PIXEL_SHIFT),
-
h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
+
h->pps->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
h->h264dsp.h264_idct_add8(dest, block_offset,
sl->mb, uvlinesize,
sl->non_zero_count_cache);
@@ -266,7 +266,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const
H264Context *h, H264SliceCo
int linesize;
int i, j, p;
const int *block_offset = &h->block_offset[0];
- const int transform_bypass = !SIMPLE && (sl->qscale == 0 &&
h->ps.sps->transform_bypass);
+ const int transform_bypass = !SIMPLE && (sl->qscale == 0 &&
h->sps->transform_bypass);
const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags &
AV_CODEC_FLAG_GRAY)) ? 3 : 1;
for (p = 0; p < plane_count; p++) {
@@ -308,7 +308,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const
H264Context *h, H264SliceCo
if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
if (PIXEL_SHIFT) {
- const int bit_depth = h->ps.sps->bit_depth_luma;
+ const int bit_depth = h->sps->bit_depth_luma;
GetBitContext gb;
init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth);
diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h
index 83b1ea6..3e7c130 100644
--- a/libavcodec/h264_mvpred.h
+++ b/libavcodec/h264_mvpred.h
@@ -464,7 +464,7 @@ static void fill_decode_caches(const H264Context *h,
H264SliceContext *sl, int m
if (!IS_SKIP(mb_type)) {
if (IS_INTRA(mb_type)) {
- int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) :
-1;
+ int type_mask = h->pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
sl->topleft_samples_available =
sl->top_samples_available =
sl->left_samples_available = 0xFFFF;
diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c
index 44a846e..950129f 100644
--- a/libavcodec/h264_parser.c
+++ b/libavcodec/h264_parser.c
@@ -51,6 +51,10 @@ typedef struct H264ParseContext {
H264DSPContext h264dsp;
H264POCContext poc;
H264SEIContext sei;
+
+ const PPS *pps;
+ const SPS *sps;
+
int is_avc;
int nal_length_size;
int got_first;
@@ -123,13 +127,13 @@ static int scan_mmco_reset(AVCodecParserContext *s,
GetBitContext *gb,
int list_count, ref_count[2];
- if (p->ps.pps->redundant_pic_cnt_present)
+ if (p->pps->redundant_pic_cnt_present)
get_ue_golomb(gb); // redundant_pic_count
if (slice_type_nos == AV_PICTURE_TYPE_B)
get_bits1(gb); // direct_spatial_mv_pred
- if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
+ if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->pps,
slice_type_nos, p->picture_structure) < 0)
return AVERROR_INVALIDDATA;
@@ -161,9 +165,9 @@ static int scan_mmco_reset(AVCodecParserContext *s,
GetBitContext *gb,
}
}
- if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
- (p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos ==
AV_PICTURE_TYPE_B))
- ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos,
+ if ((p->pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
+ (p->pps->weighted_bipred_idc == 1 && slice_type_nos ==
AV_PICTURE_TYPE_B))
+ ff_h264_pred_weight_table(gb, p->sps, ref_count, slice_type_nos,
&pwt);
if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
@@ -299,15 +303,10 @@ static inline int parse_nal_units(AVCodecParserContext *s,
"non-existing PPS %u referenced\n", pps_id);
goto fail;
}
- p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
- if (!p->ps.sps_list[p->ps.pps->sps_id]) {
- av_log(avctx, AV_LOG_ERROR,
- "non-existing SPS %u referenced\n", p->ps.pps->sps_id);
- goto fail;
- }
- p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
+ p->pps = (const PPS*)p->ps.pps_list[pps_id]->data;
+ p->sps = (const SPS*)p->pps->sps_buf->data;
- sps = p->ps.sps;
+ sps = p->sps;
p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
@@ -358,7 +357,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
if (sps->poc_type == 0) {
p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
- if (p->ps.pps->pic_order_present == 1 &&
+ if (p->pps->pic_order_present == 1 &&
p->picture_structure == PICT_FRAME)
p->poc.delta_poc_bottom = get_se_golomb(&nal.gb);
}
@@ -367,7 +366,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
!sps->delta_pic_order_always_zero_flag) {
p->poc.delta_poc[0] = get_se_golomb(&nal.gb);
- if (p->ps.pps->pic_order_present == 1 &&
+ if (p->pps->pic_order_present == 1 &&
p->picture_structure == PICT_FRAME)
p->poc.delta_poc[1] = get_se_golomb(&nal.gb);
}
diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c
index ba6e371..80ce800 100644
--- a/libavcodec/h264_ps.c
+++ b/libavcodec/h264_ps.c
@@ -109,8 +109,6 @@ static const int level_max_dpb_mbs[][2] = {
static void remove_pps(H264ParamSets *s, int id)
{
- if (s->pps_list[id] && s->pps == (const PPS*)s->pps_list[id]->data)
- s->pps = NULL;
av_buffer_unref(&s->pps_list[id]);
}
@@ -118,9 +116,6 @@ static void remove_sps(H264ParamSets *s, int id)
{
int i;
if (s->sps_list[id]) {
- if (s->sps == (SPS*)s->sps_list[id]->data)
- s->sps = NULL;
-
/* drop all PPS that depend on this SPS */
for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++)
if (s->pps_list[i] && ((PPS*)s->pps_list[i]->data)->sps_id == id)
diff --git a/libavcodec/h264_ps.h b/libavcodec/h264_ps.h
index c0ff98c..1090f7d 100644
--- a/libavcodec/h264_ps.h
+++ b/libavcodec/h264_ps.h
@@ -135,11 +135,6 @@ typedef struct PPS {
typedef struct H264ParamSets {
AVBufferRef *sps_list[MAX_SPS_COUNT];
AVBufferRef *pps_list[MAX_PPS_COUNT];
-
- /* currently active parameters sets */
- const PPS *pps;
- // FIXME this should properly be const
- SPS *sps;
} H264ParamSets;
/**
diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
index 9536c4b..195757d 100644
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@ -530,10 +530,10 @@ static void generate_sliding_window_mmcos(H264Context *h)
MMCO *mmco = h->mmco;
int nb_mmco = 0;
- assert(h->long_ref_count + h->short_ref_count <=
h->ps.sps->ref_frame_count);
+ assert(h->long_ref_count + h->short_ref_count <= h->sps->ref_frame_count);
if (h->short_ref_count &&
- h->long_ref_count + h->short_ref_count == h->ps.sps->ref_frame_count &&
+ h->long_ref_count + h->short_ref_count == h->sps->ref_frame_count &&
!(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) {
mmco[0].opcode = MMCO_SHORT2UNUSED;
mmco[0].short_pic_num = h->short_ref[h->short_ref_count -
1]->frame_num;
@@ -557,7 +557,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h)
int current_ref_assigned = 0, err = 0;
H264Picture *av_uninit(pic);
- if (!h->ps.sps) {
+ if (!h->sps) {
av_log(h->avctx, AV_LOG_ERROR, "SPS is unset\n");
err = AVERROR_INVALIDDATA;
goto out;
@@ -704,7 +704,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h)
}
if (h->long_ref_count + h->short_ref_count -
- (h->short_ref[0] == h->cur_pic_ptr) > h->ps.sps->ref_frame_count) {
+ (h->short_ref[0] == h->cur_pic_ptr) > h->sps->ref_frame_count) {
/* We have too many reference frames, probably due to corrupted
* stream. Need to discard one frame. Prevents overrun of the
@@ -713,7 +713,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h)
av_log(h->avctx, AV_LOG_ERROR,
"number of reference frames (%d+%d) exceeds max (%d; probably "
"corrupt input), discarding one\n",
- h->long_ref_count, h->short_ref_count,
h->ps.sps->ref_frame_count);
+ h->long_ref_count, h->short_ref_count, h->sps->ref_frame_count);
err = AVERROR_INVALIDDATA;
if (h->long_ref_count && !h->short_ref_count) {
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 705baa0..86fe9d5 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -321,7 +321,7 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
if (dst == src || !h1->context_initialized)
return 0;
- if (!h1->ps.sps)
+ if (!h1->sps)
return AVERROR_INVALIDDATA;
if (inited &&
@@ -329,10 +329,10 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
h->height != h1->height ||
h->mb_width != h1->mb_width ||
h->mb_height != h1->mb_height ||
- !h->ps.sps ||
- h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
- h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
- h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
+ !h->sps ||
+ h->sps->bit_depth_luma != h1->sps->bit_depth_luma ||
+ h->sps->chroma_format_idc != h1->sps->chroma_format_idc ||
+ h->sps->colorspace != h1->sps->colorspace)) {
need_reinit = 1;
}
@@ -354,7 +354,16 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
}
}
- h->ps.sps = h1->ps.sps;
+ av_buffer_unref(&h->pps_buf);
+ h->pps = NULL;
+ h->sps = NULL;
+
+ h->pps_buf = av_buffer_ref(h1->pps_buf);
+ if (!h->pps_buf)
+ return AVERROR(ENOMEM);
+
+ h->pps = h1->pps;
+ h->sps = h1->sps;
if (need_reinit || !inited) {
h->width = h1->width;
@@ -507,7 +516,7 @@ static int h264_frame_start(H264Context *h)
h->postpone_filter = 0;
- h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure ==
PICT_FRAME);
+ h->mb_aff_frame = h->sps->mb_aff && (h->picture_structure == PICT_FRAME);
assert(h->cur_pic_ptr->long_ref == 0);
@@ -697,7 +706,7 @@ static void init_scan_tables(H264Context *h)
h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
#undef TRANSPOSE
}
- if (h->ps.sps->transform_bypass) { // FIXME same ugly
+ if (h->sps->transform_bypass) { // FIXME same ugly
h->zigzag_scan_q0 = ff_zigzag_scan;
h->zigzag_scan8x8_q0 = ff_zigzag_direct;
h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
@@ -724,7 +733,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
const enum AVPixelFormat *choices = pix_fmts;
- switch (h->ps.sps->bit_depth_luma) {
+ switch (h->sps->bit_depth_luma) {
case 9:
if (CHROMA444(h)) {
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
@@ -787,7 +796,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
break;
default:
av_log(h->avctx, AV_LOG_ERROR,
- "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
+ "Unsupported bit depth %d\n", h->sps->bit_depth_luma);
return AVERROR_INVALIDDATA;
}
@@ -799,7 +808,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
/* export coded and cropped frame dimensions to AVCodecContext */
static int init_dimensions(H264Context *h)
{
- SPS *sps = h->ps.sps;
+ const SPS *sps = h->sps;
int cr = sps->crop_right;
int cl = sps->crop_left;
int ct = sps->crop_top;
@@ -837,7 +846,7 @@ static int init_dimensions(H264Context *h)
static int h264_slice_header_init(H264Context *h)
{
- const SPS *sps = h->ps.sps;
+ const SPS *sps = h->sps;
int i, ret;
ff_set_sar(h->avctx, sps->sar);
@@ -915,15 +924,22 @@ static int h264_init_ps(H264Context *h, const
H264SliceContext *sl)
const SPS *sps;
int needs_reinit = 0, ret;
- h->ps.pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
- if (h->ps.sps != (const SPS*)h->ps.pps->sps_buf->data) {
- h->ps.sps = (SPS*)h->ps.pps->sps_buf->data;
+ if (h->pps != (const PPS*)h->ps.pps_list[sl->pps_id]->data) {
+ av_buffer_unref(&h->pps_buf);
+ h->pps = NULL;
+ h->sps = NULL;
+
+ h->pps_buf = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
+ if (!h->pps_buf)
+ return AVERROR(ENOMEM);
+ h->pps = (const PPS*)h->pps_buf->data;
+ h->sps = (const SPS*)h->pps->sps_buf->data;
- if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
- h->chroma_format_idc != h->ps.sps->chroma_format_idc)
+ if (h->bit_depth_luma != h->sps->bit_depth_luma ||
+ h->chroma_format_idc != h->sps->chroma_format_idc)
needs_reinit = 1;
}
- sps = h->ps.sps;
+ sps = h->sps;
h->avctx->profile = ff_h264_get_profile(sps);
h->avctx->level = sps->level_idc;
@@ -994,7 +1010,7 @@ static int h264_init_ps(H264Context *h, const
H264SliceContext *sl)
static int h264_export_frame_props(H264Context *h)
{
- const SPS *sps = h->ps.sps;
+ const SPS *sps = h->sps;
H264Picture *cur = h->cur_pic_ptr;
cur->f->interlaced_frame = 0;
@@ -1160,7 +1176,7 @@ static int h264_export_frame_props(H264Context *h)
static int h264_select_output_frame(H264Context *h)
{
- const SPS *sps = h->ps.sps;
+ const SPS *sps = h->sps;
H264Picture *out = h->cur_pic_ptr;
H264Picture *cur = h->cur_pic_ptr;
int i, pics, out_of_order, out_idx;
@@ -1307,7 +1323,7 @@ static int h264_field_start(H264Context *h, const
H264SliceContext *sl,
if (ret < 0)
return ret;
- sps = h->ps.sps;
+ sps = h->sps;
last_pic_droppable = h->droppable;
last_pic_structure = h->picture_structure;
@@ -1479,7 +1495,7 @@ static int h264_field_start(H264Context *h, const
H264SliceContext *sl,
}
ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
- h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
+ h->sps, &h->poc, h->picture_structure, nal->ref_idc);
memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
h->nb_mmco = sl->nb_mmco;
@@ -1489,7 +1505,7 @@ static int h264_field_start(H264Context *h, const
H264SliceContext *sl,
if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame <
0) {
h->recovery_frame = (h->poc.frame_num +
h->sei.recovery_point.recovery_frame_cnt) &
- ((1 << h->ps.sps->log2_max_frame_num) - 1);
+ ((1 << h->sps->log2_max_frame_num) - 1);
}
h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE) ||
@@ -1728,7 +1744,7 @@ static int h264_slice_init(H264Context *h,
H264SliceContext *sl,
int i, j, ret = 0;
if (h->current_slice > 0) {
- if (h->ps.pps != (const PPS*)h->ps.pps_list[sl->pps_id]->data) {
+ if (h->pps != (const PPS*)h->ps.pps_list[sl->pps_id]->data) {
av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
return AVERROR_INVALIDDATA;
}
@@ -1769,7 +1785,7 @@ static int h264_slice_init(H264Context *h,
H264SliceContext *sl,
if (ret < 0)
return ret;
- if (h->ps.pps->weighted_bipred_idc == 2 &&
+ if (h->pps->weighted_bipred_idc == 2 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, -1);
if (FRAME_MBAFF(h)) {
@@ -1803,9 +1819,9 @@ static int h264_slice_init(H264Context *h,
H264SliceContext *sl,
sl->qp_thresh = 15 -
FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
FFMAX3(0,
- h->ps.pps->chroma_qp_index_offset[0],
- h->ps.pps->chroma_qp_index_offset[1]) +
- 6 * (h->ps.sps->bit_depth_luma - 8);
+ h->pps->chroma_qp_index_offset[0],
+ h->pps->chroma_qp_index_offset[1]) +
+ 6 * (h->sps->bit_depth_luma - 8);
sl->slice_num = ++h->current_slice;
if (sl->slice_num >= MAX_SLICES) {
@@ -2141,7 +2157,7 @@ static int fill_filter_caches(const H264Context *h,
H264SliceContext *sl, int mb
/* CAVLC 8x8dct requires NNZ values for residual decoding that differ
* from what the loop filter needs */
- if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
+ if (!CABAC(h) && h->pps->transform_8x8_mode) {
if (IS_8x8DCT(top_type)) {
nnz_cache[4 + 8 * 0] =
nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
@@ -2234,8 +2250,8 @@ static void loop_filter(const H264Context *h,
H264SliceContext *sl, int start_x,
uvlinesize, 0);
if (fill_filter_caches(h, sl, mb_type))
continue;
- sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0,
h->cur_pic.qscale_table[mb_xy]);
- sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1,
h->cur_pic.qscale_table[mb_xy]);
+ sl->chroma_qp[0] = get_chroma_qp(h->pps, 0,
h->cur_pic.qscale_table[mb_xy]);
+ sl->chroma_qp[1] = get_chroma_qp(h->pps, 1,
h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF(h)) {
ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb,
dest_cr,
@@ -2249,8 +2265,8 @@ static void loop_filter(const H264Context *h,
H264SliceContext *sl, int start_x,
sl->slice_type = old_slice_type;
sl->mb_x = end_x;
sl->mb_y = end_mb_y - FRAME_MBAFF(h);
- sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
- sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
+ sl->chroma_qp[0] = get_chroma_qp(h->pps, 0, sl->qscale);
+ sl->chroma_qp[1] = get_chroma_qp(h->pps, 1, sl->qscale);
}
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext
*sl)
@@ -2335,7 +2351,7 @@ static int decode_slice(struct AVCodecContext *avctx,
void *arg)
sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
- if (h->ps.pps->cabac) {
+ if (h->pps->cabac) {
/* realign */
align_get_bits(&sl->gb);
diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c
index 2a532a7..5d0d211 100644
--- a/libavcodec/h264dec.c
+++ b/libavcodec/h264dec.c
@@ -353,6 +353,10 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
for (i = 0; i < MAX_PPS_COUNT; i++)
av_buffer_unref(&h->ps.pps_list[i]);
+ h->sps = NULL;
+ h->pps = NULL;
+ av_buffer_unref(&h->pps_buf);
+
ff_h2645_packet_uninit(&h->pkt);
ff_h264_unref_picture(h, &h->cur_pic);
@@ -393,9 +397,9 @@ static av_cold int h264_decode_init(AVCodecContext *avctx)
}
}
- if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
- h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
- h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
+ if (h->sps && h->sps->bitstream_restriction_flag &&
+ h->avctx->has_b_frames < h->sps->num_reorder_frames) {
+ h->avctx->has_b_frames = h->sps->num_reorder_frames;
}
avctx->internal->allocate_progress = 1;
diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h
index fc7beeb..ee21c3c 100644
--- a/libavcodec/h264dec.h
+++ b/libavcodec/h264dec.h
@@ -91,11 +91,11 @@
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
#ifndef CABAC
-#define CABAC(h) h->ps.pps->cabac
+#define CABAC(h) h->pps->cabac
#endif
-#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2)
-#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3)
+#define CHROMA422(h) (h->sps->chroma_format_idc == 2)
+#define CHROMA444(h) (h->sps->chroma_format_idc == 3)
#define MB_TYPE_REF0 MB_TYPE_ACPRED // dirty but it fits in 16 bit
#define MB_TYPE_8x8DCT 0x01000000
@@ -447,6 +447,11 @@ typedef struct H264Context {
H264ParamSets ps;
+ // Currently active parameter sets
+ AVBufferRef *pps_buf;
+ const PPS *pps;
+ const SPS *sps;
+
uint16_t *slice_table_base;
H264POCContext poc;
@@ -789,7 +794,7 @@ static av_always_inline void write_back_motion(const
H264Context *h,
static av_always_inline int get_dct8x8_allowed(const H264Context *h,
H264SliceContext *sl)
{
- if (h->ps.sps->direct_8x8_inference_flag)
+ if (h->sps->direct_8x8_inference_flag)
return !(AV_RN64A(sl->sub_mb_type) &
((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
0x0001000100010001ULL));
diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index f765523..6d6d9ea 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -229,8 +229,8 @@ static int vaapi_h264_start_frame(AVCodecContext
*avctx,
{
const H264Context *h = avctx->priv_data;
VAAPIDecodePicture *pic = h->cur_pic_ptr->hwaccel_picture_private;
- const PPS *pps = h->ps.pps;
- const SPS *sps = h->ps.sps;
+ const PPS *pps = h->pps;
+ const SPS *sps = h->sps;
VAPictureParameterBufferH264 pic_param;
VAIQMatrixBufferH264 iq_matrix;
int err;
@@ -349,7 +349,7 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx,
.num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0]
- 1 : 0,
.num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1]
- 1 : 0,
.cabac_init_idc = sl->cabac_init_idc,
- .slice_qp_delta = sl->qscale - h->ps.pps->init_qp,
+ .slice_qp_delta = sl->qscale - h->pps->init_qp,
.disable_deblocking_filter_idc = sl->deblocking_filter < 2 ?
!sl->deblocking_filter : sl->deblocking_filter,
.slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2,
.slice_beta_offset_div2 = sl->slice_beta_offset / 2,
diff --git a/libavcodec/vdpau_h264.c b/libavcodec/vdpau_h264.c
index a189418..543be64 100644
--- a/libavcodec/vdpau_h264.c
+++ b/libavcodec/vdpau_h264.c
@@ -122,8 +122,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer, uint32_t size)
{
H264Context * const h = avctx->priv_data;
- const PPS *pps = h->ps.pps;
- const SPS *sps = h->ps.sps;
+ const PPS *pps = h->pps;
+ const SPS *sps = h->sps;
H264Picture *pic = h->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
VdpPictureInfoH264 *info = &pic_ctx->info.h264;
--
2.0.0
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel