Merge commit '56febc993b928ccc039a32158ca60b234c311875'
* commit '56febc993b928ccc039a32158ca60b234c311875': h264: move the slice type variables into the per-slice context Conflicts: libavcodec/h264.c libavcodec/h264_cabac.c libavcodec/h264_cavlc.c libavcodec/h264_slice.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
97c8ecaada
@ -223,8 +223,8 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
|
|||||||
slice->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
|
slice->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
|
||||||
slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */
|
slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */
|
||||||
slice->BitOffsetToSliceData = get_bits_count(&h->gb);
|
slice->BitOffsetToSliceData = get_bits_count(&h->gb);
|
||||||
slice->slice_type = ff_h264_get_slice_type(h);
|
slice->slice_type = ff_h264_get_slice_type(sl);
|
||||||
if (h->slice_type_fixed)
|
if (sl->slice_type_fixed)
|
||||||
slice->slice_type += 5;
|
slice->slice_type += 5;
|
||||||
slice->luma_log2_weight_denom = sl->luma_log2_weight_denom;
|
slice->luma_log2_weight_denom = sl->luma_log2_weight_denom;
|
||||||
slice->chroma_log2_weight_denom = sl->chroma_log2_weight_denom;
|
slice->chroma_log2_weight_denom = sl->chroma_log2_weight_denom;
|
||||||
@ -278,7 +278,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
|
|||||||
slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */
|
slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */
|
||||||
slice->slice_qp_delta = sl->qscale - h->pps.init_qp;
|
slice->slice_qp_delta = sl->qscale - h->pps.init_qp;
|
||||||
slice->redundant_pic_cnt = h->redundant_pic_count;
|
slice->redundant_pic_cnt = h->redundant_pic_count;
|
||||||
if (h->slice_type == AV_PICTURE_TYPE_B)
|
if (sl->slice_type == AV_PICTURE_TYPE_B)
|
||||||
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
|
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
|
||||||
slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;
|
slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;
|
||||||
if (h->deblocking_filter < 2)
|
if (h->deblocking_filter < 2)
|
||||||
@ -417,6 +417,7 @@ static int dxva2_h264_decode_slice(AVCodecContext *avctx,
|
|||||||
uint32_t size)
|
uint32_t size)
|
||||||
{
|
{
|
||||||
const H264Context *h = avctx->priv_data;
|
const H264Context *h = avctx->priv_data;
|
||||||
|
const H264SliceContext *sl = &h->slice_ctx[0];
|
||||||
struct dxva_context *ctx = avctx->hwaccel_context;
|
struct dxva_context *ctx = avctx->hwaccel_context;
|
||||||
const H264Picture *current_picture = h->cur_pic_ptr;
|
const H264Picture *current_picture = h->cur_pic_ptr;
|
||||||
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
|
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
|
||||||
@ -438,7 +439,7 @@ static int dxva2_h264_decode_slice(AVCodecContext *avctx,
|
|||||||
&ctx_pic->pp, position, size);
|
&ctx_pic->pp, position, size);
|
||||||
ctx_pic->slice_count++;
|
ctx_pic->slice_count++;
|
||||||
|
|
||||||
if (h->slice_type != AV_PICTURE_TYPE_I && h->slice_type != AV_PICTURE_TYPE_SI)
|
if (sl->slice_type != AV_PICTURE_TYPE_I && sl->slice_type != AV_PICTURE_TYPE_SI)
|
||||||
ctx_pic->pp.wBitFields &= ~(1 << 15); /* Set IntraPicFlag to 0 */
|
ctx_pic->pp.wBitFields &= ~(1 << 15); /* Set IntraPicFlag to 0 */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1072,7 +1072,7 @@ int ff_pred_weight_table(H264Context *h, H264SliceContext *sl)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_B)
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_B)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sl->use_weight = sl->use_weight || sl->use_weight_chroma;
|
sl->use_weight = sl->use_weight || sl->use_weight_chroma;
|
||||||
@ -1296,7 +1296,7 @@ int ff_h264_set_parameter_from_sps(H264Context *h)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_set_ref_count(H264Context *h)
|
int ff_set_ref_count(H264Context *h, H264SliceContext *sl)
|
||||||
{
|
{
|
||||||
int ref_count[2], list_count;
|
int ref_count[2], list_count;
|
||||||
int num_ref_idx_active_override_flag;
|
int num_ref_idx_active_override_flag;
|
||||||
@ -1305,17 +1305,17 @@ int ff_set_ref_count(H264Context *h)
|
|||||||
ref_count[0] = h->pps.ref_count[0];
|
ref_count[0] = h->pps.ref_count[0];
|
||||||
ref_count[1] = h->pps.ref_count[1];
|
ref_count[1] = h->pps.ref_count[1];
|
||||||
|
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||||
unsigned max[2];
|
unsigned max[2];
|
||||||
max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
|
max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
|
||||||
|
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B)
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||||
h->direct_spatial_mv_pred = get_bits1(&h->gb);
|
h->direct_spatial_mv_pred = get_bits1(&h->gb);
|
||||||
num_ref_idx_active_override_flag = get_bits1(&h->gb);
|
num_ref_idx_active_override_flag = get_bits1(&h->gb);
|
||||||
|
|
||||||
if (num_ref_idx_active_override_flag) {
|
if (num_ref_idx_active_override_flag) {
|
||||||
ref_count[0] = get_ue_golomb(&h->gb) + 1;
|
ref_count[0] = get_ue_golomb(&h->gb) + 1;
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
ref_count[1] = get_ue_golomb(&h->gb) + 1;
|
ref_count[1] = get_ue_golomb(&h->gb) + 1;
|
||||||
} else
|
} else
|
||||||
// full range is spec-ok in this case, even for frames
|
// full range is spec-ok in this case, even for frames
|
||||||
@ -1329,7 +1329,7 @@ int ff_set_ref_count(H264Context *h)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B)
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||||
list_count = 2;
|
list_count = 2;
|
||||||
else
|
else
|
||||||
list_count = 1;
|
list_count = 1;
|
||||||
@ -1579,7 +1579,7 @@ again:
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (h->sei_recovery_frame_cnt >= 0) {
|
if (h->sei_recovery_frame_cnt >= 0) {
|
||||||
if (h->frame_num != h->sei_recovery_frame_cnt || hx->slice_type_nos != AV_PICTURE_TYPE_I)
|
if (h->frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
|
||||||
h->valid_recovery_point = 1;
|
h->valid_recovery_point = 1;
|
||||||
|
|
||||||
if ( h->recovery_frame < 0
|
if ( h->recovery_frame < 0
|
||||||
|
@ -336,6 +336,11 @@ typedef struct H264Picture {
|
|||||||
typedef struct H264SliceContext {
|
typedef struct H264SliceContext {
|
||||||
struct H264Context *h264;
|
struct H264Context *h264;
|
||||||
|
|
||||||
|
int slice_num;
|
||||||
|
int slice_type;
|
||||||
|
int slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P)
|
||||||
|
int slice_type_fixed;
|
||||||
|
|
||||||
int qscale;
|
int qscale;
|
||||||
int chroma_qp[2]; // QPc
|
int chroma_qp[2]; // QPc
|
||||||
int qp_thresh; ///< QP threshold to skip loopfilter
|
int qp_thresh; ///< QP threshold to skip loopfilter
|
||||||
@ -467,11 +472,7 @@ typedef struct H264Context {
|
|||||||
uint32_t(*dequant4_coeff[6])[16];
|
uint32_t(*dequant4_coeff[6])[16];
|
||||||
uint32_t(*dequant8_coeff[6])[64];
|
uint32_t(*dequant8_coeff[6])[64];
|
||||||
|
|
||||||
int slice_num;
|
|
||||||
uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1
|
uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1
|
||||||
int slice_type;
|
|
||||||
int slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P)
|
|
||||||
int slice_type_fixed;
|
|
||||||
|
|
||||||
// interlacing specific flags
|
// interlacing specific flags
|
||||||
int mb_aff_frame;
|
int mb_aff_frame;
|
||||||
@ -820,7 +821,7 @@ void ff_h264_free_context(H264Context *h);
|
|||||||
/**
|
/**
|
||||||
* Reconstruct bitstream slice_type.
|
* Reconstruct bitstream slice_type.
|
||||||
*/
|
*/
|
||||||
int ff_h264_get_slice_type(const H264Context *h);
|
int ff_h264_get_slice_type(const H264SliceContext *sl);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate tables.
|
* Allocate tables.
|
||||||
@ -831,7 +832,7 @@ int ff_h264_alloc_tables(H264Context *h);
|
|||||||
/**
|
/**
|
||||||
* Fill the default_ref_list.
|
* Fill the default_ref_list.
|
||||||
*/
|
*/
|
||||||
int ff_h264_fill_default_ref_list(H264Context *h);
|
int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl);
|
||||||
|
|
||||||
int ff_h264_decode_ref_pic_list_reordering(H264Context *h);
|
int ff_h264_decode_ref_pic_list_reordering(H264Context *h);
|
||||||
void ff_h264_fill_mbaff_ref_list(H264Context *h, H264SliceContext *sl);
|
void ff_h264_fill_mbaff_ref_list(H264Context *h, H264SliceContext *sl);
|
||||||
@ -882,7 +883,7 @@ void ff_h264_init_cabac_states(H264Context *h, H264SliceContext *sl);
|
|||||||
void ff_h264_init_dequant_tables(H264Context *h);
|
void ff_h264_init_dequant_tables(H264Context *h);
|
||||||
|
|
||||||
void ff_h264_direct_dist_scale_factor(H264Context *const h);
|
void ff_h264_direct_dist_scale_factor(H264Context *const h);
|
||||||
void ff_h264_direct_ref_list_init(H264Context *const h);
|
void ff_h264_direct_ref_list_init(H264Context *const h, H264SliceContext *sl);
|
||||||
void ff_h264_pred_direct_motion(H264Context *const h, H264SliceContext *sl,
|
void ff_h264_pred_direct_motion(H264Context *const h, H264SliceContext *sl,
|
||||||
int *mb_type);
|
int *mb_type);
|
||||||
|
|
||||||
@ -1097,7 +1098,7 @@ static av_always_inline void write_back_motion(H264Context *h,
|
|||||||
if (USES_LIST(mb_type, 1))
|
if (USES_LIST(mb_type, 1))
|
||||||
write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
|
write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
|
||||||
|
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
|
||||||
if (IS_8X8(mb_type)) {
|
if (IS_8X8(mb_type)) {
|
||||||
uint8_t *direct_table = &h->direct_table[4 * h->mb_xy];
|
uint8_t *direct_table = &h->direct_table[4 * h->mb_xy];
|
||||||
direct_table[1] = h->sub_mb_type[1] >> 1;
|
direct_table[1] = h->sub_mb_type[1] >> 1;
|
||||||
@ -1158,7 +1159,7 @@ int ff_h264_set_parameter_from_sps(H264Context *h);
|
|||||||
void ff_h264_draw_horiz_band(H264Context *h, int y, int height);
|
void ff_h264_draw_horiz_band(H264Context *h, int y, int height);
|
||||||
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc);
|
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc);
|
||||||
int ff_pred_weight_table(H264Context *h, H264SliceContext *sl);
|
int ff_pred_weight_table(H264Context *h, H264SliceContext *sl);
|
||||||
int ff_set_ref_count(H264Context *h);
|
int ff_set_ref_count(H264Context *h, H264SliceContext *sl);
|
||||||
|
|
||||||
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Context *h0);
|
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Context *h0);
|
||||||
#define SLICE_SINGLETHREAD 1
|
#define SLICE_SINGLETHREAD 1
|
||||||
|
@ -1267,7 +1267,7 @@ void ff_h264_init_cabac_states(H264Context *h, H264SliceContext *sl)
|
|||||||
const int8_t (*tab)[2];
|
const int8_t (*tab)[2];
|
||||||
const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51);
|
const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51);
|
||||||
|
|
||||||
if( h->slice_type_nos == AV_PICTURE_TYPE_I ) tab = cabac_context_init_I;
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I;
|
||||||
else tab = cabac_context_init_PB[h->cabac_init_idc];
|
else tab = cabac_context_init_PB[h->cabac_init_idc];
|
||||||
|
|
||||||
/* calculate pre-state */
|
/* calculate pre-state */
|
||||||
@ -1282,13 +1282,14 @@ void ff_h264_init_cabac_states(H264Context *h, H264SliceContext *sl)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
static int decode_cabac_field_decoding_flag(H264Context *h, H264SliceContext *sl)
|
||||||
|
{
|
||||||
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
||||||
|
|
||||||
unsigned long ctx = 0;
|
unsigned long ctx = 0;
|
||||||
|
|
||||||
ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
|
ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
|
||||||
ctx += (h->cur_pic.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
|
ctx += (h->cur_pic.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == sl->slice_num);
|
||||||
|
|
||||||
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
|
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
|
||||||
}
|
}
|
||||||
@ -1325,7 +1326,9 @@ static int decode_cabac_intra_mb_type(H264Context *h, H264SliceContext *sl,
|
|||||||
return mb_type;
|
return mb_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
|
static int decode_cabac_mb_skip(H264Context *h, H264SliceContext *sl,
|
||||||
|
int mb_x, int mb_y)
|
||||||
|
{
|
||||||
int mba_xy, mbb_xy;
|
int mba_xy, mbb_xy;
|
||||||
int ctx = 0;
|
int ctx = 0;
|
||||||
|
|
||||||
@ -1333,13 +1336,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
|
|||||||
int mb_xy = mb_x + (mb_y&~1)*h->mb_stride;
|
int mb_xy = mb_x + (mb_y&~1)*h->mb_stride;
|
||||||
mba_xy = mb_xy - 1;
|
mba_xy = mb_xy - 1;
|
||||||
if( (mb_y&1)
|
if( (mb_y&1)
|
||||||
&& h->slice_table[mba_xy] == h->slice_num
|
&& h->slice_table[mba_xy] == sl->slice_num
|
||||||
&& MB_FIELD(h) == !!IS_INTERLACED( h->cur_pic.mb_type[mba_xy] ) )
|
&& MB_FIELD(h) == !!IS_INTERLACED( h->cur_pic.mb_type[mba_xy] ) )
|
||||||
mba_xy += h->mb_stride;
|
mba_xy += h->mb_stride;
|
||||||
if (MB_FIELD(h)) {
|
if (MB_FIELD(h)) {
|
||||||
mbb_xy = mb_xy - h->mb_stride;
|
mbb_xy = mb_xy - h->mb_stride;
|
||||||
if( !(mb_y&1)
|
if( !(mb_y&1)
|
||||||
&& h->slice_table[mbb_xy] == h->slice_num
|
&& h->slice_table[mbb_xy] == sl->slice_num
|
||||||
&& IS_INTERLACED( h->cur_pic.mb_type[mbb_xy] ) )
|
&& IS_INTERLACED( h->cur_pic.mb_type[mbb_xy] ) )
|
||||||
mbb_xy -= h->mb_stride;
|
mbb_xy -= h->mb_stride;
|
||||||
}else
|
}else
|
||||||
@ -1350,12 +1353,12 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
|
|||||||
mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE(h));
|
mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE(h));
|
||||||
}
|
}
|
||||||
|
|
||||||
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mba_xy] ))
|
if( h->slice_table[mba_xy] == sl->slice_num && !IS_SKIP(h->cur_pic.mb_type[mba_xy] ))
|
||||||
ctx++;
|
ctx++;
|
||||||
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mbb_xy] ))
|
if( h->slice_table[mbb_xy] == sl->slice_num && !IS_SKIP(h->cur_pic.mb_type[mbb_xy] ))
|
||||||
ctx++;
|
ctx++;
|
||||||
|
|
||||||
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||||
ctx += 13;
|
ctx += 13;
|
||||||
return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
|
return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
|
||||||
}
|
}
|
||||||
@ -1466,7 +1469,7 @@ static int decode_cabac_mb_ref(H264Context *h, H264SliceContext *sl, int list, i
|
|||||||
int ref = 0;
|
int ref = 0;
|
||||||
int ctx = 0;
|
int ctx = 0;
|
||||||
|
|
||||||
if( h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
if( refa > 0 && !(h->direct_cache[scan8[n] - 1]&(MB_TYPE_DIRECT2>>1)) )
|
if( refa > 0 && !(h->direct_cache[scan8[n] - 1]&(MB_TYPE_DIRECT2>>1)) )
|
||||||
ctx++;
|
ctx++;
|
||||||
if( refb > 0 && !(h->direct_cache[scan8[n] - 8]&(MB_TYPE_DIRECT2>>1)) )
|
if( refb > 0 && !(h->direct_cache[scan8[n] - 8]&(MB_TYPE_DIRECT2>>1)) )
|
||||||
@ -1911,20 +1914,20 @@ int ff_h264_decode_mb_cabac(H264Context *h, H264SliceContext *sl)
|
|||||||
mb_xy = h->mb_xy = h->mb_x + h->mb_y*h->mb_stride;
|
mb_xy = h->mb_xy = h->mb_x + h->mb_y*h->mb_stride;
|
||||||
|
|
||||||
tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y);
|
tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y);
|
||||||
if( h->slice_type_nos != AV_PICTURE_TYPE_I ) {
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||||
int skip;
|
int skip;
|
||||||
/* a skipped mb needs the aff flag from the following mb */
|
/* a skipped mb needs the aff flag from the following mb */
|
||||||
if (FRAME_MBAFF(h) && (h->mb_y & 1) == 1 && sl->prev_mb_skipped)
|
if (FRAME_MBAFF(h) && (h->mb_y & 1) == 1 && sl->prev_mb_skipped)
|
||||||
skip = sl->next_mb_skipped;
|
skip = sl->next_mb_skipped;
|
||||||
else
|
else
|
||||||
skip = decode_cabac_mb_skip( h, h->mb_x, h->mb_y );
|
skip = decode_cabac_mb_skip(h, sl, h->mb_x, h->mb_y );
|
||||||
/* read skip flags */
|
/* read skip flags */
|
||||||
if( skip ) {
|
if( skip ) {
|
||||||
if (FRAME_MBAFF(h) && (h->mb_y & 1) == 0) {
|
if (FRAME_MBAFF(h) && (h->mb_y & 1) == 0) {
|
||||||
h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP;
|
h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP;
|
||||||
sl->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 );
|
sl->next_mb_skipped = decode_cabac_mb_skip(h, sl, h->mb_x, h->mb_y+1 );
|
||||||
if(!sl->next_mb_skipped)
|
if(!sl->next_mb_skipped)
|
||||||
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
|
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl);
|
||||||
}
|
}
|
||||||
|
|
||||||
decode_mb_skip(h, sl);
|
decode_mb_skip(h, sl);
|
||||||
@ -1940,16 +1943,16 @@ int ff_h264_decode_mb_cabac(H264Context *h, H264SliceContext *sl)
|
|||||||
if (FRAME_MBAFF(h)) {
|
if (FRAME_MBAFF(h)) {
|
||||||
if( (h->mb_y&1) == 0 )
|
if( (h->mb_y&1) == 0 )
|
||||||
h->mb_mbaff =
|
h->mb_mbaff =
|
||||||
h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
|
h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl);
|
||||||
}
|
}
|
||||||
|
|
||||||
sl->prev_mb_skipped = 0;
|
sl->prev_mb_skipped = 0;
|
||||||
|
|
||||||
fill_decode_neighbors(h, sl, -(MB_FIELD(h)));
|
fill_decode_neighbors(h, sl, -(MB_FIELD(h)));
|
||||||
|
|
||||||
if( h->slice_type_nos == AV_PICTURE_TYPE_B ) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
int ctx = 0;
|
int ctx = 0;
|
||||||
av_assert2(h->slice_type_nos == AV_PICTURE_TYPE_B);
|
av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_B);
|
||||||
|
|
||||||
if (!IS_DIRECT(sl->left_type[LTOP] - 1))
|
if (!IS_DIRECT(sl->left_type[LTOP] - 1))
|
||||||
ctx++;
|
ctx++;
|
||||||
@ -1982,7 +1985,7 @@ int ff_h264_decode_mb_cabac(H264Context *h, H264SliceContext *sl)
|
|||||||
}
|
}
|
||||||
partition_count= b_mb_type_info[mb_type].partition_count;
|
partition_count= b_mb_type_info[mb_type].partition_count;
|
||||||
mb_type= b_mb_type_info[mb_type].type;
|
mb_type= b_mb_type_info[mb_type].type;
|
||||||
} else if( h->slice_type_nos == AV_PICTURE_TYPE_P ) {
|
} else if (sl->slice_type_nos == AV_PICTURE_TYPE_P) {
|
||||||
if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
|
if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
|
||||||
/* P-type */
|
/* P-type */
|
||||||
if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
|
if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
|
||||||
@ -2000,9 +2003,9 @@ int ff_h264_decode_mb_cabac(H264Context *h, H264SliceContext *sl)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mb_type = decode_cabac_intra_mb_type(h, sl, 3, 1);
|
mb_type = decode_cabac_intra_mb_type(h, sl, 3, 1);
|
||||||
if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
|
if (sl->slice_type == AV_PICTURE_TYPE_SI && mb_type)
|
||||||
mb_type--;
|
mb_type--;
|
||||||
av_assert2(h->slice_type_nos == AV_PICTURE_TYPE_I);
|
av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_I);
|
||||||
decode_intra_mb:
|
decode_intra_mb:
|
||||||
partition_count = 0;
|
partition_count = 0;
|
||||||
cbp= i_mb_type_info[mb_type].cbp;
|
cbp= i_mb_type_info[mb_type].cbp;
|
||||||
@ -2012,7 +2015,7 @@ decode_intra_mb:
|
|||||||
if(MB_FIELD(h))
|
if(MB_FIELD(h))
|
||||||
mb_type |= MB_TYPE_INTERLACED;
|
mb_type |= MB_TYPE_INTERLACED;
|
||||||
|
|
||||||
h->slice_table[ mb_xy ]= h->slice_num;
|
h->slice_table[mb_xy] = sl->slice_num;
|
||||||
|
|
||||||
if(IS_INTRA_PCM(mb_type)) {
|
if(IS_INTRA_PCM(mb_type)) {
|
||||||
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
|
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
|
||||||
@ -2091,7 +2094,7 @@ decode_intra_mb:
|
|||||||
} else if( partition_count == 4 ) {
|
} else if( partition_count == 4 ) {
|
||||||
int i, j, sub_partition_count[4], list, ref[2][4];
|
int i, j, sub_partition_count[4], list, ref[2][4];
|
||||||
|
|
||||||
if( h->slice_type_nos == AV_PICTURE_TYPE_B ) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B ) {
|
||||||
for( i = 0; i < 4; i++ ) {
|
for( i = 0; i < 4; i++ ) {
|
||||||
h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
|
h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
|
||||||
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
|
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
|
||||||
|
@ -716,7 +716,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
|
|||||||
tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y);
|
tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y);
|
||||||
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
|
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
|
||||||
down the code */
|
down the code */
|
||||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||||
if(h->mb_skip_run==-1)
|
if(h->mb_skip_run==-1)
|
||||||
h->mb_skip_run= get_ue_golomb_long(&h->gb);
|
h->mb_skip_run= get_ue_golomb_long(&h->gb);
|
||||||
|
|
||||||
@ -737,7 +737,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
|
|||||||
sl->prev_mb_skipped = 0;
|
sl->prev_mb_skipped = 0;
|
||||||
|
|
||||||
mb_type= get_ue_golomb(&h->gb);
|
mb_type= get_ue_golomb(&h->gb);
|
||||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
if(mb_type < 23){
|
if(mb_type < 23){
|
||||||
partition_count= b_mb_type_info[mb_type].partition_count;
|
partition_count= b_mb_type_info[mb_type].partition_count;
|
||||||
mb_type= b_mb_type_info[mb_type].type;
|
mb_type= b_mb_type_info[mb_type].type;
|
||||||
@ -745,7 +745,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
|
|||||||
mb_type -= 23;
|
mb_type -= 23;
|
||||||
goto decode_intra_mb;
|
goto decode_intra_mb;
|
||||||
}
|
}
|
||||||
}else if(h->slice_type_nos == AV_PICTURE_TYPE_P){
|
} else if (sl->slice_type_nos == AV_PICTURE_TYPE_P) {
|
||||||
if(mb_type < 5){
|
if(mb_type < 5){
|
||||||
partition_count= p_mb_type_info[mb_type].partition_count;
|
partition_count= p_mb_type_info[mb_type].partition_count;
|
||||||
mb_type= p_mb_type_info[mb_type].type;
|
mb_type= p_mb_type_info[mb_type].type;
|
||||||
@ -754,12 +754,12 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
|
|||||||
goto decode_intra_mb;
|
goto decode_intra_mb;
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
av_assert2(h->slice_type_nos == AV_PICTURE_TYPE_I);
|
av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_I);
|
||||||
if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
|
if (sl->slice_type == AV_PICTURE_TYPE_SI && mb_type)
|
||||||
mb_type--;
|
mb_type--;
|
||||||
decode_intra_mb:
|
decode_intra_mb:
|
||||||
if(mb_type > 25){
|
if(mb_type > 25){
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(h->slice_type), h->mb_x, h->mb_y);
|
av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(sl->slice_type), h->mb_x, h->mb_y);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
partition_count=0;
|
partition_count=0;
|
||||||
@ -771,7 +771,7 @@ decode_intra_mb:
|
|||||||
if(MB_FIELD(h))
|
if(MB_FIELD(h))
|
||||||
mb_type |= MB_TYPE_INTERLACED;
|
mb_type |= MB_TYPE_INTERLACED;
|
||||||
|
|
||||||
h->slice_table[ mb_xy ]= h->slice_num;
|
h->slice_table[mb_xy] = sl->slice_num;
|
||||||
|
|
||||||
if(IS_INTRA_PCM(mb_type)){
|
if(IS_INTRA_PCM(mb_type)){
|
||||||
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
|
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
|
||||||
@ -845,7 +845,7 @@ decode_intra_mb:
|
|||||||
}else if(partition_count==4){
|
}else if(partition_count==4){
|
||||||
int i, j, sub_partition_count[4], list, ref[2][4];
|
int i, j, sub_partition_count[4], list, ref[2][4];
|
||||||
|
|
||||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
h->sub_mb_type[i]= get_ue_golomb_31(&h->gb);
|
h->sub_mb_type[i]= get_ue_golomb_31(&h->gb);
|
||||||
if(h->sub_mb_type[i] >=13){
|
if(h->sub_mb_type[i] >=13){
|
||||||
@ -863,7 +863,7 @@ decode_intra_mb:
|
|||||||
sl->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
|
sl->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
av_assert2(h->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
|
av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
|
||||||
for(i=0; i<4; i++){
|
for(i=0; i<4; i++){
|
||||||
h->sub_mb_type[i]= get_ue_golomb_31(&h->gb);
|
h->sub_mb_type[i]= get_ue_golomb_31(&h->gb);
|
||||||
if(h->sub_mb_type[i] >=4){
|
if(h->sub_mb_type[i] >=4){
|
||||||
|
@ -104,7 +104,7 @@ static void fill_colmap(H264Context *h, int map[2][16 + 32], int list,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_h264_direct_ref_list_init(H264Context *const h)
|
void ff_h264_direct_ref_list_init(H264Context *const h, H264SliceContext *sl)
|
||||||
{
|
{
|
||||||
H264Picture *const ref1 = &h->ref_list[1][0];
|
H264Picture *const ref1 = &h->ref_list[1][0];
|
||||||
H264Picture *const cur = h->cur_pic_ptr;
|
H264Picture *const cur = h->cur_pic_ptr;
|
||||||
@ -140,7 +140,7 @@ void ff_h264_direct_ref_list_init(H264Context *const h)
|
|||||||
h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
|
h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (list = 0; list < 2; list++) {
|
for (list = 0; list < 2; list++) {
|
||||||
|
@ -528,7 +528,7 @@ static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (h->deblocking_filter == 2) {
|
if (h->deblocking_filter == 2) {
|
||||||
deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num;
|
deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == sl->slice_num;
|
||||||
deblock_top = sl->top_type;
|
deblock_top = sl->top_type;
|
||||||
} else {
|
} else {
|
||||||
deblock_topleft = (h->mb_x > 0);
|
deblock_topleft = (h->mb_x > 0);
|
||||||
|
@ -423,22 +423,22 @@ static void fill_decode_neighbors(H264Context *h, H264SliceContext *sl, int mb_t
|
|||||||
sl->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
|
sl->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
|
||||||
|
|
||||||
if (FMO) {
|
if (FMO) {
|
||||||
if (h->slice_table[topleft_xy] != h->slice_num)
|
if (h->slice_table[topleft_xy] != sl->slice_num)
|
||||||
sl->topleft_type = 0;
|
sl->topleft_type = 0;
|
||||||
if (h->slice_table[top_xy] != h->slice_num)
|
if (h->slice_table[top_xy] != sl->slice_num)
|
||||||
sl->top_type = 0;
|
sl->top_type = 0;
|
||||||
if (h->slice_table[left_xy[LTOP]] != h->slice_num)
|
if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
|
||||||
sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
|
sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
|
||||||
} else {
|
} else {
|
||||||
if (h->slice_table[topleft_xy] != h->slice_num) {
|
if (h->slice_table[topleft_xy] != sl->slice_num) {
|
||||||
sl->topleft_type = 0;
|
sl->topleft_type = 0;
|
||||||
if (h->slice_table[top_xy] != h->slice_num)
|
if (h->slice_table[top_xy] != sl->slice_num)
|
||||||
sl->top_type = 0;
|
sl->top_type = 0;
|
||||||
if (h->slice_table[left_xy[LTOP]] != h->slice_num)
|
if (h->slice_table[left_xy[LTOP]] != sl->slice_num)
|
||||||
sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
|
sl->left_type[LTOP] = sl->left_type[LBOT] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (h->slice_table[topright_xy] != h->slice_num)
|
if (h->slice_table[topright_xy] != sl->slice_num)
|
||||||
sl->topright_type = 0;
|
sl->topright_type = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -720,7 +720,7 @@ static void fill_decode_caches(H264Context *h, H264SliceContext *sl, int mb_type
|
|||||||
}
|
}
|
||||||
AV_ZERO16(mvd_cache[2 + 8 * 0]);
|
AV_ZERO16(mvd_cache[2 + 8 * 0]);
|
||||||
AV_ZERO16(mvd_cache[2 + 8 * 2]);
|
AV_ZERO16(mvd_cache[2 + 8 * 2]);
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
uint8_t *direct_cache = &h->direct_cache[scan8[0]];
|
uint8_t *direct_cache = &h->direct_cache[scan8[0]];
|
||||||
uint8_t *direct_table = h->direct_table;
|
uint8_t *direct_table = h->direct_table;
|
||||||
fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
|
fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16 >> 1, 1);
|
||||||
@ -810,7 +810,7 @@ static void av_unused decode_mb_skip(H264Context *h, H264SliceContext *sl)
|
|||||||
if (MB_FIELD(h))
|
if (MB_FIELD(h))
|
||||||
mb_type |= MB_TYPE_INTERLACED;
|
mb_type |= MB_TYPE_INTERLACED;
|
||||||
|
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
// just for fill_caches. pred_direct_motion will set the real mb_type
|
// just for fill_caches. pred_direct_motion will set the real mb_type
|
||||||
mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | MB_TYPE_SKIP;
|
mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | MB_TYPE_SKIP;
|
||||||
if (h->direct_spatial_mv_pred) {
|
if (h->direct_spatial_mv_pred) {
|
||||||
@ -829,7 +829,7 @@ static void av_unused decode_mb_skip(H264Context *h, H264SliceContext *sl)
|
|||||||
write_back_motion(h, sl, mb_type);
|
write_back_motion(h, sl, mb_type);
|
||||||
h->cur_pic.mb_type[mb_xy] = mb_type;
|
h->cur_pic.mb_type[mb_xy] = mb_type;
|
||||||
h->cur_pic.qscale_table[mb_xy] = sl->qscale;
|
h->cur_pic.qscale_table[mb_xy] = sl->qscale;
|
||||||
h->slice_table[mb_xy] = h->slice_num;
|
h->slice_table[mb_xy] = sl->slice_num;
|
||||||
sl->prev_mb_skipped = 1;
|
sl->prev_mb_skipped = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,15 +135,15 @@ static int scan_mmco_reset(AVCodecParserContext *s)
|
|||||||
H264Context *h = &p->h;
|
H264Context *h = &p->h;
|
||||||
H264SliceContext *sl = &h->slice_ctx[0];
|
H264SliceContext *sl = &h->slice_ctx[0];
|
||||||
|
|
||||||
h->slice_type_nos = s->pict_type & 3;
|
sl->slice_type_nos = s->pict_type & 3;
|
||||||
|
|
||||||
if (h->pps.redundant_pic_cnt_present)
|
if (h->pps.redundant_pic_cnt_present)
|
||||||
get_ue_golomb(&h->gb); // redundant_pic_count
|
get_ue_golomb(&h->gb); // redundant_pic_count
|
||||||
|
|
||||||
if (ff_set_ref_count(h) < 0)
|
if (ff_set_ref_count(h, sl) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||||
int list;
|
int list;
|
||||||
for (list = 0; list < h->list_count; list++) {
|
for (list = 0; list < h->list_count; list++) {
|
||||||
if (get_bits1(&h->gb)) {
|
if (get_bits1(&h->gb)) {
|
||||||
@ -171,8 +171,8 @@ static int scan_mmco_reset(AVCodecParserContext *s)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
|
if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
|
||||||
(h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B))
|
(h->pps.weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B))
|
||||||
ff_pred_weight_table(h, sl);
|
ff_pred_weight_table(h, sl);
|
||||||
|
|
||||||
if (get_bits1(&h->gb)) { // adaptive_ref_pic_marking_mode_flag
|
if (get_bits1(&h->gb)) { // adaptive_ref_pic_marking_mode_flag
|
||||||
|
@ -112,11 +112,11 @@ static int add_sorted(H264Picture **sorted, H264Picture **src, int len, int limi
|
|||||||
return out_i;
|
return out_i;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_h264_fill_default_ref_list(H264Context *h)
|
int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
|
||||||
{
|
{
|
||||||
int i, len;
|
int i, len;
|
||||||
|
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
H264Picture *sorted[32];
|
H264Picture *sorted[32];
|
||||||
int cur_poc, list;
|
int cur_poc, list;
|
||||||
int lens[2];
|
int lens[2];
|
||||||
@ -172,7 +172,7 @@ int ff_h264_fill_default_ref_list(H264Context *h)
|
|||||||
h->default_ref_list[0][i].pic_id,
|
h->default_ref_list[0][i].pic_id,
|
||||||
h->default_ref_list[0][i].f.data[0]);
|
h->default_ref_list[0][i].f.data[0]);
|
||||||
}
|
}
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
for (i = 0; i < h->ref_count[1]; i++) {
|
for (i = 0; i < h->ref_count[1]; i++) {
|
||||||
tprintf(h->avctx, "List1: %s fn:%d 0x%p\n",
|
tprintf(h->avctx, "List1: %s fn:%d 0x%p\n",
|
||||||
(h->default_ref_list[1][i].long_ref ? "LT" : "ST"),
|
(h->default_ref_list[1][i].long_ref ? "LT" : "ST"),
|
||||||
|
@ -1326,31 +1326,31 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
}
|
}
|
||||||
if (slice_type > 4) {
|
if (slice_type > 4) {
|
||||||
slice_type -= 5;
|
slice_type -= 5;
|
||||||
h->slice_type_fixed = 1;
|
sl->slice_type_fixed = 1;
|
||||||
} else
|
} else
|
||||||
h->slice_type_fixed = 0;
|
sl->slice_type_fixed = 0;
|
||||||
|
|
||||||
slice_type = golomb_to_pict_type[slice_type];
|
slice_type = golomb_to_pict_type[slice_type];
|
||||||
h->slice_type = slice_type;
|
sl->slice_type = slice_type;
|
||||||
h->slice_type_nos = slice_type & 3;
|
sl->slice_type_nos = slice_type & 3;
|
||||||
|
|
||||||
if (h->nal_unit_type == NAL_IDR_SLICE &&
|
if (h->nal_unit_type == NAL_IDR_SLICE &&
|
||||||
h->slice_type_nos != AV_PICTURE_TYPE_I) {
|
sl->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
|
av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
(h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
|
(h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
|
||||||
(h->avctx->skip_frame >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) ||
|
(h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
|
||||||
(h->avctx->skip_frame >= AVDISCARD_NONINTRA && h->slice_type_nos != AV_PICTURE_TYPE_I) ||
|
(h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
|
||||||
(h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != NAL_IDR_SLICE) ||
|
(h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != NAL_IDR_SLICE) ||
|
||||||
h->avctx->skip_frame >= AVDISCARD_ALL) {
|
h->avctx->skip_frame >= AVDISCARD_ALL) {
|
||||||
return SLICE_SKIPED;
|
return SLICE_SKIPED;
|
||||||
}
|
}
|
||||||
|
|
||||||
// to make a few old functions happy, it's wrong though
|
// to make a few old functions happy, it's wrong though
|
||||||
h->pict_type = h->slice_type;
|
h->pict_type = sl->slice_type;
|
||||||
|
|
||||||
pps_id = get_ue_golomb(&h->gb);
|
pps_id = get_ue_golomb(&h->gb);
|
||||||
if (pps_id >= MAX_PPS_COUNT) {
|
if (pps_id >= MAX_PPS_COUNT) {
|
||||||
@ -1794,7 +1794,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
if (h->pps.redundant_pic_cnt_present)
|
if (h->pps.redundant_pic_cnt_present)
|
||||||
h->redundant_pic_count = get_ue_golomb(&h->gb);
|
h->redundant_pic_count = get_ue_golomb(&h->gb);
|
||||||
|
|
||||||
ret = ff_set_ref_count(h);
|
ret = ff_set_ref_count(h, sl);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1803,10 +1803,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
slice_type != h0->last_slice_type ||
|
slice_type != h0->last_slice_type ||
|
||||||
memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
||||||
|
|
||||||
ff_h264_fill_default_ref_list(h);
|
ff_h264_fill_default_ref_list(h, sl);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||||
ret = ff_h264_decode_ref_pic_list_reordering(h);
|
ret = ff_h264_decode_ref_pic_list_reordering(h);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
h->ref_count[1] = h->ref_count[0] = 0;
|
h->ref_count[1] = h->ref_count[0] = 0;
|
||||||
@ -1814,12 +1814,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
|
if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
|
||||||
(h->pps.weighted_bipred_idc == 1 &&
|
(h->pps.weighted_bipred_idc == 1 &&
|
||||||
h->slice_type_nos == AV_PICTURE_TYPE_B))
|
sl->slice_type_nos == AV_PICTURE_TYPE_B))
|
||||||
ff_pred_weight_table(h, sl);
|
ff_pred_weight_table(h, sl);
|
||||||
else if (h->pps.weighted_bipred_idc == 2 &&
|
else if (h->pps.weighted_bipred_idc == 2 &&
|
||||||
h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
implicit_weight_table(h, sl, -1);
|
implicit_weight_table(h, sl, -1);
|
||||||
} else {
|
} else {
|
||||||
sl->use_weight = 0;
|
sl->use_weight = 0;
|
||||||
@ -1845,17 +1845,17 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
if (FRAME_MBAFF(h)) {
|
if (FRAME_MBAFF(h)) {
|
||||||
ff_h264_fill_mbaff_ref_list(h, sl);
|
ff_h264_fill_mbaff_ref_list(h, sl);
|
||||||
|
|
||||||
if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
implicit_weight_table(h, sl, 0);
|
implicit_weight_table(h, sl, 0);
|
||||||
implicit_weight_table(h, sl, 1);
|
implicit_weight_table(h, sl, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred)
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred)
|
||||||
ff_h264_direct_dist_scale_factor(h);
|
ff_h264_direct_dist_scale_factor(h);
|
||||||
ff_h264_direct_ref_list_init(h);
|
ff_h264_direct_ref_list_init(h, sl);
|
||||||
|
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
|
||||||
tmp = get_ue_golomb_31(&h->gb);
|
tmp = get_ue_golomb_31(&h->gb);
|
||||||
if (tmp > 2) {
|
if (tmp > 2) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
|
av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
|
||||||
@ -1874,10 +1874,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
|
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
|
||||||
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
|
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
|
||||||
// FIXME qscale / qp ... stuff
|
// FIXME qscale / qp ... stuff
|
||||||
if (h->slice_type == AV_PICTURE_TYPE_SP)
|
if (sl->slice_type == AV_PICTURE_TYPE_SP)
|
||||||
get_bits1(&h->gb); /* sp_for_switch_flag */
|
get_bits1(&h->gb); /* sp_for_switch_flag */
|
||||||
if (h->slice_type == AV_PICTURE_TYPE_SP ||
|
if (sl->slice_type == AV_PICTURE_TYPE_SP ||
|
||||||
h->slice_type == AV_PICTURE_TYPE_SI)
|
sl->slice_type == AV_PICTURE_TYPE_SI)
|
||||||
get_se_golomb(&h->gb); /* slice_qs_delta */
|
get_se_golomb(&h->gb); /* slice_qs_delta */
|
||||||
|
|
||||||
h->deblocking_filter = 1;
|
h->deblocking_filter = 1;
|
||||||
@ -1913,9 +1913,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
(h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
|
(h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
|
||||||
h->nal_unit_type != NAL_IDR_SLICE) ||
|
h->nal_unit_type != NAL_IDR_SLICE) ||
|
||||||
(h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
|
(h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
|
||||||
h->slice_type_nos != AV_PICTURE_TYPE_I) ||
|
sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
|
||||||
(h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
|
(h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
|
||||||
h->slice_type_nos == AV_PICTURE_TYPE_B) ||
|
sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
|
||||||
(h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
|
(h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
|
||||||
h->nal_ref_idc == 0))
|
h->nal_ref_idc == 0))
|
||||||
h->deblocking_filter = 0;
|
h->deblocking_filter = 0;
|
||||||
@ -1951,20 +1951,20 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
|
|
||||||
h0->last_slice_type = slice_type;
|
h0->last_slice_type = slice_type;
|
||||||
memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count));
|
memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count));
|
||||||
h->slice_num = ++h0->current_slice;
|
sl->slice_num = ++h0->current_slice;
|
||||||
|
|
||||||
if (h->slice_num)
|
if (sl->slice_num)
|
||||||
h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y;
|
h0->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y;
|
||||||
if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y
|
if ( h0->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y
|
||||||
&& h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y
|
&& h0->slice_row[sl->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y
|
||||||
&& h->slice_num >= MAX_SLICES) {
|
&& sl->slice_num >= MAX_SLICES) {
|
||||||
//in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
|
//in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
|
||||||
av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES);
|
av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < 2; j++) {
|
for (j = 0; j < 2; j++) {
|
||||||
int id_list[16];
|
int id_list[16];
|
||||||
int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
|
int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
id_list[i] = 60;
|
id_list[i] = 60;
|
||||||
if (j < h->list_count && i < h->ref_count[j] &&
|
if (j < h->list_count && i < h->ref_count[j] &&
|
||||||
@ -2003,11 +2003,11 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||||
av_log(h->avctx, AV_LOG_DEBUG,
|
av_log(h->avctx, AV_LOG_DEBUG,
|
||||||
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
|
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
|
||||||
h->slice_num,
|
sl->slice_num,
|
||||||
(h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
|
(h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
|
||||||
first_mb_in_slice,
|
first_mb_in_slice,
|
||||||
av_get_picture_type_char(h->slice_type),
|
av_get_picture_type_char(sl->slice_type),
|
||||||
h->slice_type_fixed ? " fix" : "",
|
sl->slice_type_fixed ? " fix" : "",
|
||||||
h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
|
h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
|
||||||
pps_id, h->frame_num,
|
pps_id, h->frame_num,
|
||||||
h->cur_pic_ptr->field_poc[0],
|
h->cur_pic_ptr->field_poc[0],
|
||||||
@ -2018,15 +2018,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
|
|||||||
h->slice_alpha_c0_offset, h->slice_beta_offset,
|
h->slice_alpha_c0_offset, h->slice_beta_offset,
|
||||||
sl->use_weight,
|
sl->use_weight,
|
||||||
sl->use_weight == 1 && sl->use_weight_chroma ? "c" : "",
|
sl->use_weight == 1 && sl->use_weight_chroma ? "c" : "",
|
||||||
h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
|
sl->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_h264_get_slice_type(const H264Context *h)
|
int ff_h264_get_slice_type(const H264SliceContext *sl)
|
||||||
{
|
{
|
||||||
switch (h->slice_type) {
|
switch (sl->slice_type) {
|
||||||
case AV_PICTURE_TYPE_P:
|
case AV_PICTURE_TYPE_P:
|
||||||
return 0;
|
return 0;
|
||||||
case AV_PICTURE_TYPE_B:
|
case AV_PICTURE_TYPE_B:
|
||||||
@ -2105,7 +2105,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
|
|||||||
|
|
||||||
{
|
{
|
||||||
int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
|
int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
|
||||||
int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
|
int (*ref2frm)[64] = (void*)(h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
|
||||||
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
|
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
|
||||||
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
|
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
|
||||||
AV_WN32A(&ref_cache[0 * 8], ref01);
|
AV_WN32A(&ref_cache[0 * 8], ref01);
|
||||||
@ -2184,9 +2184,9 @@ static int fill_filter_caches(H264Context *h, H264SliceContext *sl, int mb_type)
|
|||||||
left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
|
left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
|
||||||
left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
|
left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
|
||||||
if (h->deblocking_filter == 2) {
|
if (h->deblocking_filter == 2) {
|
||||||
if (h->slice_table[top_xy] != h->slice_num)
|
if (h->slice_table[top_xy] != sl->slice_num)
|
||||||
top_type = 0;
|
top_type = 0;
|
||||||
if (h->slice_table[left_xy[LBOT]] != h->slice_num)
|
if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
|
||||||
left_type[LTOP] = left_type[LBOT] = 0;
|
left_type[LTOP] = left_type[LBOT] = 0;
|
||||||
} else {
|
} else {
|
||||||
if (h->slice_table[top_xy] == 0xFFFF)
|
if (h->slice_table[top_xy] == 0xFFFF)
|
||||||
@ -2277,7 +2277,7 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
|
|||||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||||
int linesize, uvlinesize, mb_x, mb_y;
|
int linesize, uvlinesize, mb_x, mb_y;
|
||||||
const int end_mb_y = h->mb_y + FRAME_MBAFF(h);
|
const int end_mb_y = h->mb_y + FRAME_MBAFF(h);
|
||||||
const int old_slice_type = h->slice_type;
|
const int old_slice_type = sl->slice_type;
|
||||||
const int pixel_shift = h->pixel_shift;
|
const int pixel_shift = h->pixel_shift;
|
||||||
const int block_h = 16 >> h->chroma_y_shift;
|
const int block_h = 16 >> h->chroma_y_shift;
|
||||||
|
|
||||||
@ -2286,7 +2286,7 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
|
|||||||
for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
|
for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
|
||||||
int mb_xy, mb_type;
|
int mb_xy, mb_type;
|
||||||
mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
|
mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
|
||||||
h->slice_num = h->slice_table[mb_xy];
|
sl->slice_num = h->slice_table[mb_xy];
|
||||||
mb_type = h->cur_pic.mb_type[mb_xy];
|
mb_type = h->cur_pic.mb_type[mb_xy];
|
||||||
h->list_count = h->list_counts[mb_xy];
|
h->list_count = h->list_counts[mb_xy];
|
||||||
|
|
||||||
@ -2334,19 +2334,19 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h->slice_type = old_slice_type;
|
sl->slice_type = old_slice_type;
|
||||||
h->mb_x = end_x;
|
h->mb_x = end_x;
|
||||||
h->mb_y = end_mb_y - FRAME_MBAFF(h);
|
h->mb_y = end_mb_y - FRAME_MBAFF(h);
|
||||||
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
|
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
|
||||||
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
|
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void predict_field_decoding_flag(H264Context *h)
|
static void predict_field_decoding_flag(H264Context *h, H264SliceContext *sl)
|
||||||
{
|
{
|
||||||
const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
|
const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
|
||||||
int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
|
int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
|
||||||
h->cur_pic.mb_type[mb_xy - 1] :
|
h->cur_pic.mb_type[mb_xy - 1] :
|
||||||
(h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
|
(h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
|
||||||
h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
|
h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
|
||||||
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
|
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
|
||||||
}
|
}
|
||||||
@ -2479,7 +2479,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
|
|||||||
if (FIELD_OR_MBAFF_PICTURE(h)) {
|
if (FIELD_OR_MBAFF_PICTURE(h)) {
|
||||||
++h->mb_y;
|
++h->mb_y;
|
||||||
if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
|
if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
|
||||||
predict_field_decoding_flag(h);
|
predict_field_decoding_flag(h, sl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2526,7 +2526,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
|
|||||||
if (FIELD_OR_MBAFF_PICTURE(h)) {
|
if (FIELD_OR_MBAFF_PICTURE(h)) {
|
||||||
++h->mb_y;
|
++h->mb_y;
|
||||||
if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
|
if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
|
||||||
predict_field_decoding_flag(h);
|
predict_field_decoding_flag(h, sl);
|
||||||
}
|
}
|
||||||
if (h->mb_y >= h->mb_height) {
|
if (h->mb_y >= h->mb_height) {
|
||||||
tprintf(h->avctx, "slice end %d %d\n",
|
tprintf(h->avctx, "slice end %d %d\n",
|
||||||
|
@ -824,7 +824,7 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
h->slice_type = golomb_to_pict_type[slice_id];
|
sl->slice_type = golomb_to_pict_type[slice_id];
|
||||||
|
|
||||||
if ((header & 0x9F) == 2) {
|
if ((header & 0x9F) == 2) {
|
||||||
i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
|
i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
|
||||||
@ -835,8 +835,8 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
|
|||||||
h->mb_skip_run = 0;
|
h->mb_skip_run = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
h->slice_num = get_bits(&h->gb, 8);
|
sl->slice_num = get_bits(&h->gb, 8);
|
||||||
sl->qscale = get_bits(&h->gb, 5);
|
sl->qscale = get_bits(&h->gb, 5);
|
||||||
s->adaptive_quant = get_bits1(&h->gb);
|
s->adaptive_quant = get_bits1(&h->gb);
|
||||||
|
|
||||||
/* unknown fields */
|
/* unknown fields */
|
||||||
@ -1167,7 +1167,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (svq3_decode_slice_header(avctx))
|
if (svq3_decode_slice_header(avctx))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
h->pict_type = h->slice_type;
|
h->pict_type = sl->slice_type;
|
||||||
|
|
||||||
if (h->pict_type != AV_PICTURE_TYPE_B)
|
if (h->pict_type != AV_PICTURE_TYPE_B)
|
||||||
FFSWAP(H264Picture*, s->next_pic, s->last_pic);
|
FFSWAP(H264Picture*, s->next_pic, s->last_pic);
|
||||||
@ -1233,7 +1233,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
"%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
|
"%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
|
||||||
av_get_picture_type_char(h->pict_type),
|
av_get_picture_type_char(h->pict_type),
|
||||||
s->halfpel_flag, s->thirdpel_flag,
|
s->halfpel_flag, s->thirdpel_flag,
|
||||||
s->adaptive_quant, h->slice_ctx[0].qscale, h->slice_num);
|
s->adaptive_quant, h->slice_ctx[0].qscale, sl->slice_num);
|
||||||
|
|
||||||
if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
|
if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
|
||||||
avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
|
avctx->skip_frame >= AVDISCARD_NONKEY && h->pict_type != AV_PICTURE_TYPE_I ||
|
||||||
@ -1248,7 +1248,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (h->pict_type == AV_PICTURE_TYPE_B) {
|
if (h->pict_type == AV_PICTURE_TYPE_B) {
|
||||||
h->frame_num_offset = h->slice_num - h->prev_frame_num;
|
h->frame_num_offset = sl->slice_num - h->prev_frame_num;
|
||||||
|
|
||||||
if (h->frame_num_offset < 0)
|
if (h->frame_num_offset < 0)
|
||||||
h->frame_num_offset += 256;
|
h->frame_num_offset += 256;
|
||||||
@ -1259,7 +1259,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
h->prev_frame_num = h->frame_num;
|
h->prev_frame_num = h->frame_num;
|
||||||
h->frame_num = h->slice_num;
|
h->frame_num = sl->slice_num;
|
||||||
h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
|
h->prev_frame_num_offset = h->frame_num - h->prev_frame_num;
|
||||||
|
|
||||||
if (h->prev_frame_num_offset < 0)
|
if (h->prev_frame_num_offset < 0)
|
||||||
|
@ -329,8 +329,8 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx,
|
|||||||
return -1;
|
return -1;
|
||||||
slice_param->slice_data_bit_offset = get_bits_count(&h->gb) + 8; /* bit buffer started beyond nal_unit_type */
|
slice_param->slice_data_bit_offset = get_bits_count(&h->gb) + 8; /* bit buffer started beyond nal_unit_type */
|
||||||
slice_param->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
|
slice_param->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
|
||||||
slice_param->slice_type = ff_h264_get_slice_type(h);
|
slice_param->slice_type = ff_h264_get_slice_type(sl);
|
||||||
slice_param->direct_spatial_mv_pred_flag = h->slice_type == AV_PICTURE_TYPE_B ? h->direct_spatial_mv_pred : 0;
|
slice_param->direct_spatial_mv_pred_flag = sl->slice_type == AV_PICTURE_TYPE_B ? h->direct_spatial_mv_pred : 0;
|
||||||
slice_param->num_ref_idx_l0_active_minus1 = h->list_count > 0 ? h->ref_count[0] - 1 : 0;
|
slice_param->num_ref_idx_l0_active_minus1 = h->list_count > 0 ? h->ref_count[0] - 1 : 0;
|
||||||
slice_param->num_ref_idx_l1_active_minus1 = h->list_count > 1 ? h->ref_count[1] - 1 : 0;
|
slice_param->num_ref_idx_l1_active_minus1 = h->list_count > 1 ? h->ref_count[1] - 1 : 0;
|
||||||
slice_param->cabac_init_idc = h->cabac_init_idc;
|
slice_param->cabac_init_idc = h->cabac_init_idc;
|
||||||
|
@ -468,7 +468,7 @@ void ff_vdpau_h264_picture_complete(H264Context *h)
|
|||||||
render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
|
render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0];
|
||||||
assert(render);
|
assert(render);
|
||||||
|
|
||||||
render->info.h264.slice_count = h->slice_num;
|
render->info.h264.slice_count = h->current_slice;
|
||||||
if (render->info.h264.slice_count < 1)
|
if (render->info.h264.slice_count < 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user