vp9/decoder,vp9/*.[hc]: apply clang-format

Change-Id: Ic38ea06c7b2fb3e8e94a4c0910e82672a1acaea7
This commit is contained in:
clang-format 2016-07-26 20:20:13 -07:00 committed by James Zern
parent 8ff40f8bec
commit 08131055e4
16 changed files with 1011 additions and 1241 deletions

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_DECODER_VP9_DECODEFRAME_H_
#define VP9_DECODER_VP9_DECODEFRAME_H_
@ -22,13 +21,12 @@ struct VP9Decoder;
struct vpx_read_bit_buffer;
int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb);
void vp9_read_frame_size(struct vpx_read_bit_buffer *rb,
int *width, int *height);
void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
int *height);
BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb);
void vp9_decode_frame(struct VP9Decoder *pbi,
const uint8_t *data, const uint8_t *data_end,
const uint8_t **p_data_end);
void vp9_decode_frame(struct VP9Decoder *pbi, const uint8_t *data,
const uint8_t *data_end, const uint8_t **p_data_end);
#ifdef __cplusplus
} // extern "C"

View File

@ -33,29 +33,26 @@ static PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, MACROBLOCKD *xd,
const PREDICTION_MODE y_mode =
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->y_mode[size_group][y_mode];
if (counts) ++counts->y_mode[size_group][y_mode];
return y_mode;
}
static PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, MACROBLOCKD *xd,
vpx_reader *r,
PREDICTION_MODE y_mode) {
const PREDICTION_MODE uv_mode = read_intra_mode(r,
cm->fc->uv_mode_prob[y_mode]);
const PREDICTION_MODE uv_mode =
read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->uv_mode[y_mode][uv_mode];
if (counts) ++counts->uv_mode[y_mode][uv_mode];
return uv_mode;
}
static PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, MACROBLOCKD *xd,
vpx_reader *r, int ctx) {
const int mode = vpx_read_tree(r, vp9_inter_mode_tree,
cm->fc->inter_mode_probs[ctx]);
const int mode =
vpx_read_tree(r, vp9_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->inter_mode[ctx][mode];
if (counts) ++counts->inter_mode[ctx][mode];
return NEARESTMV + mode;
}
@ -76,8 +73,7 @@ static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
tx_size += vpx_read(r, tx_probs[2]);
}
if (counts)
++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
return (TX_SIZE)tx_size;
}
@ -105,8 +101,8 @@ static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids,
return segment_id;
}
static void set_segment_id(VP9_COMMON *cm, int mi_offset,
int x_mis, int y_mis, int segment_id) {
static void set_segment_id(VP9_COMMON *cm, int mi_offset, int x_mis, int y_mis,
int segment_id) {
int x, y;
assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
@ -117,25 +113,24 @@ static void set_segment_id(VP9_COMMON *cm, int mi_offset,
}
static void copy_segment_id(const VP9_COMMON *cm,
const uint8_t *last_segment_ids,
uint8_t *current_segment_ids,
int mi_offset, int x_mis, int y_mis) {
const uint8_t *last_segment_ids,
uint8_t *current_segment_ids, int mi_offset,
int x_mis, int y_mis) {
int x, y;
for (y = 0; y < y_mis; y++)
for (x = 0; x < x_mis; x++)
current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ?
last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0;
current_segment_ids[mi_offset + y * cm->mi_cols + x] =
last_segment_ids ? last_segment_ids[mi_offset + y * cm->mi_cols + x]
: 0;
}
static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset,
int x_mis, int y_mis,
vpx_reader *r) {
static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset, int x_mis,
int y_mis, vpx_reader *r) {
struct segmentation *const seg = &cm->seg;
int segment_id;
if (!seg->enabled)
return 0; // Default for disabled segmentation
if (!seg->enabled) return 0; // Default for disabled segmentation
if (!seg->update_map) {
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
@ -156,12 +151,12 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
int predicted_segment_id, segment_id;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
if (!seg->enabled)
return 0; // Default for disabled segmentation
if (!seg->enabled) return 0; // Default for disabled segmentation
predicted_segment_id = cm->last_frame_seg_map ?
dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) :
0;
predicted_segment_id = cm->last_frame_seg_map
? dec_get_segment_id(cm, cm->last_frame_seg_map,
mi_offset, x_mis, y_mis)
: 0;
if (!seg->update_map) {
copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
@ -172,8 +167,8 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
if (seg->temporal_update) {
const vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
mi->seg_id_predicted = vpx_read(r, pred_prob);
segment_id = mi->seg_id_predicted ? predicted_segment_id
: read_segment_id(r, seg);
segment_id =
mi->seg_id_predicted ? predicted_segment_id : read_segment_id(r, seg);
} else {
segment_id = read_segment_id(r, seg);
}
@ -181,27 +176,26 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
return segment_id;
}
static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd,
int segment_id, vpx_reader *r) {
static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
vpx_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int ctx = vp9_get_skip_context(xd);
const int skip = vpx_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->skip[ctx][skip];
if (counts) ++counts->skip[ctx][skip];
return skip;
}
}
static void read_intra_frame_mode_info(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
int mi_row, int mi_col, vpx_reader *r,
int x_mis, int y_mis) {
MACROBLOCKD *const xd, int mi_row,
int mi_col, vpx_reader *r, int x_mis,
int y_mis) {
MODE_INFO *const mi = xd->mi[0];
const MODE_INFO *above_mi = xd->above_mi;
const MODE_INFO *left_mi = xd->left_mi;
const MODE_INFO *left_mi = xd->left_mi;
const BLOCK_SIZE bsize = mi->sb_type;
int i;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
@ -232,15 +226,14 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm,
read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2));
break;
default:
mi->mode = read_intra_mode(r,
get_y_mode_probs(mi, above_mi, left_mi, 0));
mi->mode = read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
}
mi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mi->mode]);
}
static int read_mv_component(vpx_reader *r,
const nmv_component *mvcomp, int usehp) {
static int read_mv_component(vpx_reader *r, const nmv_component *mvcomp,
int usehp) {
int mag, d, fr, hp;
const int sign = vpx_read(r, mvcomp->sign);
const int mv_class = vpx_read_tree(r, vp9_mv_class_tree, mvcomp->classes);
@ -255,18 +248,16 @@ static int read_mv_component(vpx_reader *r,
const int n = mv_class + CLASS0_BITS - 1; // number of bits
d = 0;
for (i = 0; i < n; ++i)
d |= vpx_read(r, mvcomp->bits[i]) << i;
for (i = 0; i < n; ++i) d |= vpx_read(r, mvcomp->bits[i]) << i;
mag = CLASS0_SIZE << (mv_class + 2);
}
// Fractional part
fr = vpx_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d]
: mvcomp->fp);
fr = vpx_read_tree(r, vp9_mv_fp_tree,
class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
: 1;
hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
@ -274,12 +265,12 @@ static int read_mv_component(vpx_reader *r,
}
static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref,
const nmv_context *ctx,
nmv_context_counts *counts, int allow_hp) {
const nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
const MV_JOINT_TYPE joint_type =
(MV_JOINT_TYPE)vpx_read_tree(r, vp9_mv_joint_tree, ctx->joints);
const int use_hp = allow_hp && use_mv_hp(ref);
MV diff = {0, 0};
MV diff = { 0, 0 };
if (mv_joint_vertical(joint_type))
diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
@ -301,8 +292,7 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
const REFERENCE_MODE mode =
(REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->comp_inter[ctx][mode];
if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
} else {
return cm->reference_mode;
@ -311,8 +301,8 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
// Read the referncence frame
static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
vpx_reader *r,
int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
vpx_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = xd->counts;
@ -327,20 +317,17 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
const int bit = vpx_read(r, fc->comp_ref_prob[ctx]);
if (counts)
++counts->comp_ref[ctx][bit];
if (counts) ++counts->comp_ref[ctx][bit];
ref_frame[idx] = cm->comp_fixed_ref;
ref_frame[!idx] = cm->comp_var_ref[bit];
} else if (mode == SINGLE_REFERENCE) {
const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]);
if (counts)
++counts->single_ref[ctx0][0][bit0];
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]);
if (counts)
++counts->single_ref[ctx1][1][bit1];
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
ref_frame[0] = LAST_FRAME;
@ -353,16 +340,14 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
}
}
static INLINE INTERP_FILTER read_switchable_interp_filter(
VP9_COMMON *const cm, MACROBLOCKD *const xd,
vpx_reader *r) {
static INLINE INTERP_FILTER read_switchable_interp_filter(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
vpx_reader *r) {
const int ctx = get_pred_context_switchable_interp(xd);
const INTERP_FILTER type =
(INTERP_FILTER)vpx_read_tree(r, vp9_switchable_interp_tree,
cm->fc->switchable_interp_prob[ctx]);
const INTERP_FILTER type = (INTERP_FILTER)vpx_read_tree(
r, vp9_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->switchable_interp[ctx][type];
if (counts) ++counts->switchable_interp[ctx][type];
return type;
}
@ -379,19 +364,16 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm,
mi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd,
r, 0);
mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0);
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode =
read_intra_mode_y(cm, xd, r, 0);
break;
case BLOCK_8X4:
mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd,
r, 0);
mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0);
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode =
read_intra_mode_y(cm, xd, r, 0);
break;
default:
mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
default: mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
}
mi->uv_mode = read_intra_mode_uv(cm, xd, r, mi->mode);
@ -405,8 +387,8 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm,
}
static INLINE int is_mv_valid(const MV *mv) {
return mv->row > MV_LOW && mv->row < MV_UPP &&
mv->col > MV_LOW && mv->col < MV_UPP;
return mv->row > MV_LOW && mv->row < MV_UPP && mv->col > MV_LOW &&
mv->col < MV_UPP;
}
static INLINE void copy_mv_pair(int_mv *dst, const int_mv *src) {
@ -418,9 +400,8 @@ static INLINE void zero_mv_pair(int_mv *dst) {
}
static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd,
PREDICTION_MODE mode,
int_mv mv[2], int_mv ref_mv[2],
int_mv near_nearest_mv[2],
PREDICTION_MODE mode, int_mv mv[2],
int_mv ref_mv[2], int_mv near_nearest_mv[2],
int is_compound, int allow_hp, vpx_reader *r) {
int i;
int ret = 1;
@ -445,9 +426,7 @@ static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd,
zero_mv_pair(mv);
break;
}
default: {
return 0;
}
default: { return 0; }
}
return ret;
}
@ -460,8 +439,7 @@ static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const int ctx = get_intra_inter_context(xd);
const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts)
++counts->intra_inter[ctx][is_inter];
if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
}
}
@ -487,35 +465,33 @@ static void fpm_sync(void *const data, int mi_row) {
// already in the list. If it's the second motion vector or early_break
// it will also skip all additional processing and jump to Done!
#define ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done) \
do { \
if (refmv_count) { \
if ((mv).as_int != (mv_ref_list)[0].as_int) { \
(mv_ref_list)[(refmv_count)] = (mv); \
refmv_count++; \
goto Done; \
} \
} else { \
(mv_ref_list)[(refmv_count)++] = (mv); \
if (early_break) \
goto Done; \
} \
do { \
if (refmv_count) { \
if ((mv).as_int != (mv_ref_list)[0].as_int) { \
(mv_ref_list)[(refmv_count)] = (mv); \
refmv_count++; \
goto Done; \
} \
} else { \
(mv_ref_list)[(refmv_count)++] = (mv); \
if (early_break) goto Done; \
} \
} while (0)
// If either reference frame is different, not INTRA, and they
// are different from each other scale and add the mv to our list.
#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \
refmv_count, mv_ref_list, Done) \
do { \
if (is_inter_block(mbmi)) { \
if ((mbmi)->ref_frame[0] != ref_frame) \
#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \
refmv_count, mv_ref_list, Done) \
do { \
if (is_inter_block(mbmi)) { \
if ((mbmi)->ref_frame[0] != ref_frame) \
ADD_MV_REF_LIST_EB(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, Done); \
if (has_second_ref(mbmi) && \
(mbmi)->ref_frame[1] != ref_frame && \
(mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
refmv_count, mv_ref_list, Done); \
if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \
(mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
ADD_MV_REF_LIST_EB(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, Done); \
} \
refmv_count, mv_ref_list, Done); \
} \
} while (0)
// This function searches the neighborhood of a given MB/SB
@ -523,14 +499,16 @@ static void fpm_sync(void *const data, int mi_row) {
static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame,
const POSITION *const mv_ref_search,
int_mv *mv_ref_list,
int mi_row, int mi_col, int block, int is_sub8x8,
find_mv_refs_sync sync, void *const data) {
int_mv *mv_ref_list, int mi_row, int mi_col,
int block, int is_sub8x8, find_mv_refs_sync sync,
void *const data) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
int different_ref_found = 0;
const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
const MV_REF *const prev_frame_mvs =
cm->use_prev_frame_mvs
? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col
: NULL;
const TileInfo *const tile = &xd->tile;
// If mode is nearestmv or newmv (uses nearestmv as a reference) then stop
// searching after the first mv is found.
@ -553,11 +531,11 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST_EB(
get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
refmv_count, mv_ref_list, Done);
else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST_EB(
get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
refmv_count, mv_ref_list, Done);
}
}
}
@ -579,14 +557,14 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
}
}
// TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
// on windows platform. The sync here is unnecessary if use_prev_frame_mvs
// is 0. But after removing it, there will be hang in the unit test on windows
// due to several threads waiting for a thread's signal.
// TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
// on windows platform. The sync here is unnecessary if use_prev_frame_mvs
// is 0. But after removing it, there will be hang in the unit test on windows
// due to several threads waiting for a thread's signal.
#if defined(_WIN32) && !HAVE_PTHREAD_H
if (cm->frame_parallel_decode && sync != NULL) {
sync(data, mi_row);
}
if (cm->frame_parallel_decode && sync != NULL) {
sync(data, mi_row);
}
#endif
// Check the last frame's mode and mv info.
@ -652,10 +630,9 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
// we only care about the nearestmv for the remaining modes
refmv_count = 1;
Done:
Done:
// Clamp vectors
for (i = 0; i < refmv_count; ++i)
clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
for (i = 0; i < refmv_count; ++i) clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
return refmv_count;
}
@ -673,14 +650,12 @@ static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
refmv_count = dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref],
mv_ref_search, mv_list, mi_row, mi_col, block,
1, NULL, NULL);
refmv_count =
dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], mv_ref_search,
mv_list, mi_row, mi_col, block, 1, NULL, NULL);
switch (block) {
case 0:
best_sub8x8->as_int = mv_list[refmv_count - 1].as_int;
break;
case 0: best_sub8x8->as_int = mv_list[refmv_count - 1].as_int; break;
case 1:
case 2:
if (b_mode == NEARESTMV) {
@ -711,14 +686,13 @@ static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
}
}
break;
default:
assert(0 && "Invalid block index.");
default: assert(0 && "Invalid block index.");
}
}
static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const POSITION *const mv_ref_search,
int mi_row, int mi_col) {
const POSITION *const mv_ref_search, int mi_row,
int mi_col) {
int i;
int context_counter = 0;
const TileInfo *const tile = &xd->tile;
@ -739,8 +713,8 @@ static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd,
static void read_inter_block_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi,
int mi_row, int mi_col, vpx_reader *r) {
MODE_INFO *const mi, int mi_row,
int mi_col, vpx_reader *r) {
VP9_COMMON *const cm = &pbi->common;
const BLOCK_SIZE bsize = mi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
@ -756,9 +730,9 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
mi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid usage of segement feature on small blocks");
return;
vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid usage of segement feature on small blocks");
return;
}
} else {
if (bsize >= BLOCK_8X8)
@ -776,9 +750,9 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
int refmv_count;
refmv_count = dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search,
tmp_mvs, mi_row, mi_col, -1, 0,
fpm_sync, (void *)pbi);
refmv_count =
dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search, tmp_mvs,
mi_row, mi_col, -1, 0, fpm_sync, (void *)pbi);
dec_find_best_ref_mvs(allow_hp, tmp_mvs, &best_ref_mvs[ref],
refmv_count);
@ -787,8 +761,8 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
}
mi->interp_filter = (cm->interp_filter == SWITCHABLE)
? read_switchable_interp_filter(cm, xd, r)
: cm->interp_filter;
? read_switchable_interp_filter(cm, xd, r)
: cm->interp_filter;
if (bsize < BLOCK_8X8) {
const int num_4x4_w = 1 << xd->bmode_blocks_wl;
@ -813,10 +787,8 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
break;
}
if (num_4x4_h == 2)
mi->bmi[j + 2] = mi->bmi[j];
if (num_4x4_w == 2)
mi->bmi[j + 1] = mi->bmi[j];
if (num_4x4_h == 2) mi->bmi[j + 2] = mi->bmi[j];
if (num_4x4_w == 2) mi->bmi[j + 1] = mi->bmi[j];
}
}
@ -830,15 +802,15 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi,
}
static void read_inter_frame_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
int mi_row, int mi_col, vpx_reader *r,
int x_mis, int y_mis) {
MACROBLOCKD *const xd, int mi_row,
int mi_col, vpx_reader *r, int x_mis,
int y_mis) {
VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
int inter_block;
mi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis,
y_mis);
mi->segment_id =
read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis, y_mis);
mi->skip = read_skip(cm, xd, mi->segment_id, r);
inter_block = read_is_inter_block(cm, xd, mi->segment_id, r);
mi->tx_size = read_tx_size(cm, xd, !mi->skip || !inter_block, r);
@ -854,12 +826,11 @@ static INLINE void copy_ref_frame_pair(MV_REFERENCE_FRAME *dst,
memcpy(dst, src, sizeof(*dst) * 2);
}
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
int mi_row, int mi_col, vpx_reader *r,
int x_mis, int y_mis) {
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
int mi_col, vpx_reader *r, int x_mis, int y_mis) {
VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
if (frame_is_intra_only(cm)) {
@ -876,7 +847,7 @@ void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
frame_mvs += cm->mi_cols;
}
}
#if 0 // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
#if 0 // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
(xd->above_mi == NULL || xd->left_mi == NULL) &&
!is_inter_block(mi) && need_top_left[mi->uv_mode])

View File

@ -19,9 +19,8 @@
extern "C" {
#endif
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
int mi_row, int mi_col, vpx_reader *r,
int x_mis, int y_mis);
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
int mi_col, vpx_reader *r, int x_mis, int y_mis);
#ifdef __cplusplus
} // extern "C"

View File

@ -57,12 +57,10 @@ static void vp9_dec_setup_mi(VP9_COMMON *cm) {
static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip)
return 1;
if (!cm->mip) return 1;
cm->mi_alloc_size = mi_size;
cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
if (!cm->mi_grid_base)
return 1;
cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->mi_grid_base) return 1;
return 0;
}
@ -77,8 +75,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
if (!cm)
return NULL;
if (!cm) return NULL;
vp9_zero(*pbi);
@ -90,11 +87,10 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
cm->error.setjmp = 1;
CHECK_MEM_ERROR(cm, cm->fc,
(FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(cm, cm->frame_contexts,
(FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
sizeof(*cm->frame_contexts)));
CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(
cm, cm->frame_contexts,
(FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
pbi->need_resync = 1;
once(initialize_dec);
@ -126,8 +122,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
void vp9_decoder_remove(VP9Decoder *pbi) {
int i;
if (!pbi)
return;
if (!pbi) return;
vpx_get_worker_interface()->end(&pbi->lf_worker);
vpx_free(pbi->lf_worker.data1);
@ -149,8 +144,8 @@ void vp9_decoder_remove(VP9Decoder *pbi) {
static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b) {
return a->y_height == b->y_height && a->y_width == b->y_width &&
a->uv_height == b->uv_height && a->uv_width == b->uv_width;
return a->y_height == b->y_height && a->y_width == b->y_width &&
a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
@ -176,14 +171,12 @@ vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
else
vp8_yv12_copy_frame(cfg, sd);
} else {
vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
"Invalid reference frame");
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
}
return cm->error.error_code;
}
vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
VP9_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
@ -206,8 +199,7 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
} else if (ref_frame_flag == VP9_ALT_FLAG) {
idx = cm->ref_frame_map[2];
} else {
vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
"Invalid reference frame");
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
return cm->error.error_code;
}
@ -273,8 +265,8 @@ static void swap_frame_buffers(VP9Decoder *pbi) {
cm->frame_refs[ref_index].idx = -1;
}
int vp9_receive_compressed_data(VP9Decoder *pbi,
size_t size, const uint8_t **psource) {
int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size,
const uint8_t **psource) {
VP9_COMMON *volatile const cm = &pbi->common;
BufferPool *volatile const pool = cm->buffer_pool;
RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
@ -301,8 +293,8 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
// Check if the previous frame was a frame without any references to it.
// Release frame buffer if not decoding in frame parallel mode.
if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0
&& frame_bufs[cm->new_fb_idx].ref_count == 0)
if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0 &&
frame_bufs[cm->new_fb_idx].ref_count == 0)
pool->release_fb_cb(pool->cb_priv,
&frame_bufs[cm->new_fb_idx].raw_frame_buffer);
// Find a free frame buffer. Return error if can not find any.
@ -330,7 +322,6 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
}
if (setjmp(cm->error.jmp)) {
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
int i;
@ -425,14 +416,12 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
(void)*flags;
#endif
if (pbi->ready_for_new_data == 1)
return ret;
if (pbi->ready_for_new_data == 1) return ret;
pbi->ready_for_new_data = 1;
/* no raw frame to show!!! */
if (!cm->show_frame)
return ret;
if (!cm->show_frame) return ret;
pbi->ready_for_new_data = 1;
@ -451,8 +440,7 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
return ret;
}
vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
size_t data_sz,
vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
uint32_t sizes[8], int *count,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
@ -475,18 +463,16 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
// This chunk is marked as having a superframe index but doesn't have
// enough data for it, thus it's an invalid superframe index.
if (data_sz < index_sz)
return VPX_CODEC_CORRUPT_FRAME;
if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
{
const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state,
data + data_sz - index_sz);
const uint8_t marker2 =
read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
// This chunk is marked as having a superframe index but doesn't have
// the matching marker byte at the front of the index therefore it's an
// invalid chunk.
if (marker != marker2)
return VPX_CODEC_CORRUPT_FRAME;
if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
}
{
@ -505,8 +491,7 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
for (i = 0; i < frames; ++i) {
uint32_t this_sz = 0;
for (j = 0; j < mag; ++j)
this_sz |= ((uint32_t)(*x++)) << (j * 8);
for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8);
sizes[i] = this_sz;
}
*count = frames;

View File

@ -57,9 +57,9 @@ typedef struct VP9Decoder {
// TODO(hkuang): Combine this with cur_buf in macroblockd as they are
// the same.
RefCntBuffer *cur_buf; // Current decoding frame buffer.
RefCntBuffer *cur_buf; // Current decoding frame buffer.
VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
VPxWorker lf_worker;
VPxWorker *tile_workers;
TileWorkerData *tile_worker_data;
@ -74,12 +74,12 @@ typedef struct VP9Decoder {
int max_threads;
int inv_tile_order;
int need_resync; // wait for key/intra-only frame.
int need_resync; // wait for key/intra-only frame.
int hold_ref_buf; // hold the reference buffer.
} VP9Decoder;
int vp9_receive_compressed_data(struct VP9Decoder *pbi,
size_t size, const uint8_t **dest);
int vp9_receive_compressed_data(struct VP9Decoder *pbi, size_t size,
const uint8_t **dest);
int vp9_get_raw_frame(struct VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
vp9_ppflags_t *flags);
@ -93,8 +93,7 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
YV12_BUFFER_CONFIG *sd);
static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
void *decrypt_state,
const uint8_t *data) {
void *decrypt_state, const uint8_t *data) {
if (decrypt_cb) {
uint8_t marker;
decrypt_cb(decrypt_state, data, &marker, 1);
@ -105,8 +104,7 @@ static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
// This function is exposed for use in tests, as well as the inlined function
// "read_marker".
vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
size_t data_sz,
vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
uint32_t sizes[8], int *count,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state);

View File

@ -20,25 +20,22 @@
#include "vp9/decoder/vp9_detokenize.h"
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
#define ONE_CONTEXT_NODE 2
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
#define ONE_CONTEXT_NODE 2
#define INCREMENT_COUNT(token) \
do { \
if (counts) \
++coef_counts[band][ctx][token]; \
#define INCREMENT_COUNT(token) \
do { \
if (counts) ++coef_counts[band][ctx][token]; \
} while (0)
static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) {
int i, val = 0;
for (i = 0; i < n; ++i)
val = (val << 1) | vpx_read(r, probs[i]);
for (i = 0; i < n; ++i) val = (val << 1) | vpx_read(r, probs[i]);
return val;
}
static int decode_coefs(const MACROBLOCKD *xd,
PLANE_TYPE type,
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
vpx_reader *r) {
@ -47,11 +44,11 @@ static int decode_coefs(const MACROBLOCKD *xd,
const FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(xd->mi[0]);
int band, c = 0;
const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size][type][ref];
const vpx_prob *prob;
unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int (*eob_branch_count)[COEFF_CONTEXTS];
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[32 * 32];
const uint8_t *band_translate = get_band_translate(tx_size);
const int dq_shift = (tx_size == TX_32X32);
@ -59,16 +56,16 @@ static int decode_coefs(const MACROBLOCKD *xd,
int16_t dqv = dq[0];
const uint8_t *const cat6_prob =
#if CONFIG_VP9_HIGHBITDEPTH
(xd->bd == VPX_BITS_12) ? vp9_cat6_prob_high12 :
(xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 :
(xd->bd == VPX_BITS_12)
? vp9_cat6_prob_high12
: (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 :
#endif // CONFIG_VP9_HIGHBITDEPTH
vp9_cat6_prob;
vp9_cat6_prob;
const int cat6_bits =
#if CONFIG_VP9_HIGHBITDEPTH
(xd->bd == VPX_BITS_12) ? 18 :
(xd->bd == VPX_BITS_10) ? 16 :
(xd->bd == VPX_BITS_12) ? 18 : (xd->bd == VPX_BITS_10) ? 16 :
#endif // CONFIG_VP9_HIGHBITDEPTH
14;
14;
if (counts) {
coef_counts = counts->coef[tx_size][type][ref];
@ -79,8 +76,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
int val = -1;
band = *band_translate++;
prob = coef_probs[band][ctx];
if (counts)
++eob_branch_count[band][ctx];
if (counts) ++eob_branch_count[band][ctx];
if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
@ -91,8 +87,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
dqv = dq[1];
token_cache[scan[c]] = 0;
++c;
if (c >= max_eob)
return c; // zero tokens at the end (no eob token)
if (c >= max_eob) return c; // zero tokens at the end (no eob token)
ctx = get_coef_context(nb, token_cache, c);
band = *band_translate++;
prob = coef_probs[band][ctx];
@ -109,9 +104,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
switch (token) {
case TWO_TOKEN:
case THREE_TOKEN:
case FOUR_TOKEN:
val = token;
break;
case FOUR_TOKEN: val = token; break;
case CATEGORY1_TOKEN:
val = CAT1_MIN_VAL + read_coeff(vp9_cat1_prob, 1, r);
break;
@ -135,8 +128,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
v = (val * dqv) >> dq_shift;
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#if CONFIG_VP9_HIGHBITDEPTH
dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v),
xd->bd);
dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd);
#else
dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v);
#endif // CONFIG_VP9_HIGHBITDEPTH
@ -178,7 +170,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
switch (tx_size) {
case TX_4X4:
ctx = a[0] != 0;
ctx = a[0] != 0;
ctx += l[0] != 0;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
@ -186,7 +178,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
break;
case TX_8X8:
get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_8X8);
ctx = !!*(const uint16_t *)a;
ctx = !!*(const uint16_t *)a;
ctx += !!*(const uint16_t *)l;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
@ -195,7 +187,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
break;
case TX_16X16:
get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_16X16);
ctx = !!*(const uint32_t *)a;
ctx = !!*(const uint32_t *)a;
ctx += !!*(const uint32_t *)l;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
@ -207,7 +199,7 @@ int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
// NOTE: casting to uint64_t here is safe because the default memory
// alignment is at least 8 bytes and the TX_32X32 is aligned on 8 byte
// boundaries.
ctx = !!*(const uint64_t *)a;
ctx = !!*(const uint64_t *)a;
ctx += !!*(const uint64_t *)l;
eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);

View File

@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_DECODER_VP9_DETOKENIZE_H_
#define VP9_DECODER_VP9_DETOKENIZE_H_
@ -20,10 +19,8 @@
extern "C" {
#endif
int vp9_decode_block_tokens(MACROBLOCKD *xd,
int plane, const scan_order *sc,
int x, int y,
TX_SIZE tx_size, vpx_reader *r,
int vp9_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
int x, int y, TX_SIZE tx_size, vpx_reader *r,
int seg_id);
#ifdef __cplusplus

View File

@ -15,8 +15,7 @@
#include "vp9/decoder/vp9_dsubexp.h"
static int inv_recenter_nonneg(int v, int m) {
if (v > 2 * m)
return v;
if (v > 2 * m) return v;
return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
}
@ -25,19 +24,19 @@ static int decode_uniform(vpx_reader *r) {
const int l = 8;
const int m = (1 << l) - 191;
const int v = vpx_read_literal(r, l - 1);
return v < m ? v : (v << 1) - m + vpx_read_bit(r);
return v < m ? v : (v << 1) - m + vpx_read_bit(r);
}
static int inv_remap_prob(int v, int m) {
static uint8_t inv_map_table[MAX_PROB] = {
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92,
93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 125,
126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141,
142, 143, 144, 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157,
@ -59,16 +58,13 @@ static int inv_remap_prob(int v, int m) {
}
static int decode_term_subexp(vpx_reader *r) {
if (!vpx_read_bit(r))
return vpx_read_literal(r, 4);
if (!vpx_read_bit(r))
return vpx_read_literal(r, 4) + 16;
if (!vpx_read_bit(r))
return vpx_read_literal(r, 5) + 32;
if (!vpx_read_bit(r)) return vpx_read_literal(r, 4);
if (!vpx_read_bit(r)) return vpx_read_literal(r, 4) + 16;
if (!vpx_read_bit(r)) return vpx_read_literal(r, 5) + 32;
return decode_uniform(r) + 64;
}
void vp9_diff_update_prob(vpx_reader *r, vpx_prob* p) {
void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p) {
if (vpx_read(r, DIFF_UPDATE_PROB)) {
const int delp = decode_term_subexp(r);
*p = (vpx_prob)inv_remap_prob(delp, *p);

View File

@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_DECODER_VP9_DSUBEXP_H_
#define VP9_DECODER_VP9_DSUBEXP_H_
@ -18,7 +17,7 @@
extern "C" {
#endif
void vp9_diff_update_prob(vpx_reader *r, vpx_prob* p);
void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p);
#ifdef __cplusplus
} // extern "C"

View File

@ -62,8 +62,7 @@ void vp9_frameworker_signal_stats(VPxWorker *const worker) {
void vp9_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
int row) {
#if CONFIG_MULTITHREAD
if (!ref_buf)
return;
if (!ref_buf) return;
#ifndef BUILDING_WITH_TSAN
// The following line of code will get harmless tsan error but it is the key
@ -147,11 +146,12 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
vp9_frameworker_lock_stats(src_worker);
while (!src_worker_data->frame_context_ready) {
pthread_cond_wait(&src_worker_data->stats_cond,
&src_worker_data->stats_mutex);
&src_worker_data->stats_mutex);
}
dst_cm->last_frame_seg_map = src_cm->seg.enabled ?
src_cm->current_frame_seg_map : src_cm->last_frame_seg_map;
dst_cm->last_frame_seg_map = src_cm->seg.enabled
? src_cm->current_frame_seg_map
: src_cm->last_frame_seg_map;
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
vp9_frameworker_unlock_stats(src_worker);
@ -159,17 +159,18 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
#if CONFIG_VP9_HIGHBITDEPTH
dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
#endif
dst_cm->prev_frame = src_cm->show_existing_frame ?
src_cm->prev_frame : src_cm->cur_frame;
dst_cm->last_width = !src_cm->show_existing_frame ?
src_cm->width : src_cm->last_width;
dst_cm->last_height = !src_cm->show_existing_frame ?
src_cm->height : src_cm->last_height;
dst_cm->prev_frame =
src_cm->show_existing_frame ? src_cm->prev_frame : src_cm->cur_frame;
dst_cm->last_width =
!src_cm->show_existing_frame ? src_cm->width : src_cm->last_width;
dst_cm->last_height =
!src_cm->show_existing_frame ? src_cm->height : src_cm->last_height;
dst_cm->subsampling_x = src_cm->subsampling_x;
dst_cm->subsampling_y = src_cm->subsampling_y;
dst_cm->frame_type = src_cm->frame_type;
dst_cm->last_show_frame = !src_cm->show_existing_frame ?
src_cm->show_frame : src_cm->last_show_frame;
dst_cm->last_show_frame = !src_cm->show_existing_frame
? src_cm->show_frame
: src_cm->last_show_frame;
for (i = 0; i < REF_FRAMES; ++i)
dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
@ -183,7 +184,7 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
#else
(void) dst_worker;
(void) src_worker;
(void)dst_worker;
(void)src_worker;
#endif // CONFIG_MULTITHREAD
}

View File

@ -68,7 +68,7 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker,
VPxWorker *const src_worker);
#ifdef __cplusplus
} // extern "C"
} // extern "C"
#endif
#endif // VP9_DECODER_VP9_DTHREAD_H_

File diff suppressed because it is too large Load Diff

View File

@ -41,8 +41,7 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
if (!ctx->priv) {
vpx_codec_alg_priv_t *const priv =
(vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv));
if (priv == NULL)
return VPX_CODEC_MEM_ERROR;
if (priv == NULL) return VPX_CODEC_MEM_ERROR;
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
@ -51,7 +50,9 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx,
// Only do frame parallel decode when threads > 1.
priv->frame_parallel_decode =
(ctx->config.dec && (ctx->config.dec->threads > 1) &&
(ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
(ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING))
? 1
: 0;
if (ctx->config.dec) {
priv->cfg = *ctx->config.dec;
ctx->config.dec = &priv->cfg;
@ -102,11 +103,10 @@ static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
return VPX_CODEC_OK;
}
static int parse_bitdepth_colorspace_sampling(
BITSTREAM_PROFILE profile, struct vpx_read_bit_buffer *rb) {
static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile,
struct vpx_read_bit_buffer *rb) {
vpx_color_space_t color_space;
if (profile >= PROFILE_2)
rb->bit_offset += 1; // Bit-depth 10 or 12.
if (profile >= PROFILE_2) rb->bit_offset += 1; // Bit-depth 10 or 12.
color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3);
if (color_space != VPX_CS_SRGB) {
rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range.
@ -125,17 +125,13 @@ static int parse_bitdepth_colorspace_sampling(
return 1;
}
static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
unsigned int data_sz,
vpx_codec_stream_info_t *si,
int *is_intra_only,
vpx_decrypt_cb decrypt_cb,
void *decrypt_state) {
static vpx_codec_err_t decoder_peek_si_internal(
const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si,
int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) {
int intra_only_flag = 0;
uint8_t clear_buffer[10];
if (data + data_sz <= data)
return VPX_CODEC_INVALID_PARAM;
if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM;
si->is_kf = 0;
si->w = si->h = 0;
@ -148,8 +144,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
// A maximum of 6 bits are needed to read the frame marker, profile and
// show_existing_frame.
if (data_sz < 1)
return VPX_CODEC_UNSUP_BITSTREAM;
if (data_sz < 1) return VPX_CODEC_UNSUP_BITSTREAM;
{
int show_frame;
@ -158,17 +153,14 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
const int frame_marker = vpx_rb_read_literal(&rb, 2);
const BITSTREAM_PROFILE profile = vp9_read_profile(&rb);
if (frame_marker != VP9_FRAME_MARKER)
return VPX_CODEC_UNSUP_BITSTREAM;
if (frame_marker != VP9_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
if (profile >= MAX_PROFILES)
return VPX_CODEC_UNSUP_BITSTREAM;
if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
if (vpx_rb_read_bit(&rb)) { // show an existing frame
// If profile is > 2 and show_existing_frame is true, then at least 1 more
// byte (6+3=9 bits) is needed.
if (profile > 2 && data_sz < 2)
return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > 2 && data_sz < 2) return VPX_CODEC_UNSUP_BITSTREAM;
vpx_rb_read_literal(&rb, 3); // Frame buffer to show.
return VPX_CODEC_OK;
}
@ -176,16 +168,14 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
// For the rest of the function, a maximum of 9 more bytes are needed
// (computed by taking the maximum possible bits needed in each case). Note
// that this has to be updated if we read any more bits in this function.
if (data_sz < 10)
return VPX_CODEC_UNSUP_BITSTREAM;
if (data_sz < 10) return VPX_CODEC_UNSUP_BITSTREAM;
si->is_kf = !vpx_rb_read_bit(&rb);
show_frame = vpx_rb_read_bit(&rb);
error_resilient = vpx_rb_read_bit(&rb);
if (si->is_kf) {
if (!vp9_read_sync_code(&rb))
return VPX_CODEC_UNSUP_BITSTREAM;
if (!vp9_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
@ -196,8 +186,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context
if (intra_only_flag) {
if (!vp9_read_sync_code(&rb))
return VPX_CODEC_UNSUP_BITSTREAM;
if (!vp9_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
if (profile > PROFILE_0) {
if (!parse_bitdepth_colorspace_sampling(profile, &rb))
return VPX_CODEC_UNSUP_BITSTREAM;
@ -207,8 +196,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data,
}
}
}
if (is_intra_only != NULL)
*is_intra_only = intra_only_flag;
if (is_intra_only != NULL) *is_intra_only = intra_only_flag;
return VPX_CODEC_OK;
}
@ -221,8 +209,8 @@ static vpx_codec_err_t decoder_peek_si(const uint8_t *data,
static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx,
vpx_codec_stream_info_t *si) {
const size_t sz = (si->sz >= sizeof(vp9_stream_info_t))
? sizeof(vp9_stream_info_t)
: sizeof(vpx_codec_stream_info_t);
? sizeof(vp9_stream_info_t)
: sizeof(vpx_codec_stream_info_t);
memcpy(si, &ctx->si, sz);
si->sz = (unsigned int)sz;
@ -234,8 +222,8 @@ static void set_error_detail(vpx_codec_alg_priv_t *ctx,
ctx->base.err_detail = error;
}
static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
const struct vpx_internal_error_info *error) {
static vpx_codec_err_t update_error_state(
vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
if (error->error_code)
set_error_detail(ctx, error->has_detail ? error->detail : NULL);
@ -278,10 +266,8 @@ static void set_default_ppflags(vp8_postproc_cfg_t *cfg) {
cfg->noise_level = 0;
}
static void set_ppflags(const vpx_codec_alg_priv_t *ctx,
vp9_ppflags_t *flags) {
flags->post_proc_flag =
ctx->postproc_cfg.post_proc_flag;
static void set_ppflags(const vpx_codec_alg_priv_t *ctx, vp9_ppflags_t *flags) {
flags->post_proc_flag = ctx->postproc_cfg.post_proc_flag;
flags->deblocking_level = ctx->postproc_cfg.deblocking_level;
flags->noise_level = ctx->postproc_cfg.noise_level;
@ -292,10 +278,8 @@ static int frame_worker_hook(void *arg1, void *arg2) {
const uint8_t *data = frame_worker_data->data;
(void)arg2;
frame_worker_data->result =
vp9_receive_compressed_data(frame_worker_data->pbi,
frame_worker_data->data_size,
&data);
frame_worker_data->result = vp9_receive_compressed_data(
frame_worker_data->pbi, frame_worker_data->data_size, &data);
frame_worker_data->data_end = data;
if (frame_worker_data->pbi->frame_parallel_decode) {
@ -337,25 +321,24 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
ctx->num_cache_frames = 0;
ctx->need_resync = 1;
ctx->num_frame_workers =
(ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
(ctx->frame_parallel_decode == 1) ? ctx->cfg.threads : 1;
if (ctx->num_frame_workers > MAX_DECODE_THREADS)
ctx->num_frame_workers = MAX_DECODE_THREADS;
ctx->available_threads = ctx->num_frame_workers;
ctx->flushed = 0;
ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
if (ctx->buffer_pool == NULL)
return VPX_CODEC_MEM_ERROR;
if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR;
#if CONFIG_MULTITHREAD
if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
set_error_detail(ctx, "Failed to allocate buffer pool mutex");
return VPX_CODEC_MEM_ERROR;
}
if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
set_error_detail(ctx, "Failed to allocate buffer pool mutex");
return VPX_CODEC_MEM_ERROR;
}
#endif
ctx->frame_workers = (VPxWorker *)
vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers *
sizeof(*ctx->frame_workers));
if (ctx->frame_workers == NULL) {
set_error_detail(ctx, "Failed to allocate frame_workers");
return VPX_CODEC_MEM_ERROR;
@ -411,8 +394,7 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
if (!ctx->postproc_cfg_set &&
(ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC))
set_default_ppflags(&ctx->postproc_cfg);
init_buffer_callbacks(ctx);
@ -442,11 +424,9 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
const vpx_codec_err_t res =
decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only,
ctx->decrypt_cb, ctx->decrypt_state);
if (res != VPX_CODEC_OK)
return res;
if (res != VPX_CODEC_OK) return res;
if (!ctx->si.is_kf && !is_intra_only)
return VPX_CODEC_ERROR;
if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR;
}
if (!ctx->frame_parallel_decode) {
@ -520,7 +500,7 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
vp9_ppflags_t flags = {0, 0, 0};
vp9_ppflags_t flags = { 0, 0, 0 };
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
@ -541,8 +521,7 @@ static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
frame_worker_data->user_priv);
ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
ctx->frame_cache_write =
(ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
ctx->frame_cache_write = (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
++ctx->num_cache_frames;
}
}
@ -551,7 +530,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
const uint8_t *data_start = data;
const uint8_t * const data_end = data + data_sz;
const uint8_t *const data_end = data + data_sz;
vpx_codec_err_t res;
uint32_t frame_sizes[8];
int frame_count;
@ -567,14 +546,12 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
// Initialize the decoder workers on the first frame.
if (ctx->frame_workers == NULL) {
const vpx_codec_err_t res = init_decoder(ctx);
if (res != VPX_CODEC_OK)
return res;
if (res != VPX_CODEC_OK) return res;
}
res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
ctx->decrypt_cb, ctx->decrypt_state);
if (res != VPX_CODEC_OK)
return res;
if (res != VPX_CODEC_OK) return res;
if (ctx->frame_parallel_decode) {
// Decode in frame parallel mode. When decoding in this mode, the frame
@ -587,8 +564,8 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
for (i = 0; i < frame_count; ++i) {
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
if (data_start < data
|| frame_size > (uint32_t) (data_end - data_start)) {
if (data_start < data ||
frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
@ -605,10 +582,9 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
}
}
res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
deadline);
if (res != VPX_CODEC_OK)
return res;
res =
decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
if (res != VPX_CODEC_OK) return res;
data_start += frame_size;
}
} else {
@ -625,8 +601,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
}
res = decode_one(ctx, &data, data_sz, user_priv, deadline);
if (res != VPX_CODEC_OK)
return res;
if (res != VPX_CODEC_OK) return res;
}
} else {
// Decode in serial mode.
@ -637,33 +612,30 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
vpx_codec_err_t res;
if (data_start < data
|| frame_size > (uint32_t) (data_end - data_start)) {
if (data_start < data ||
frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
deadline);
if (res != VPX_CODEC_OK)
return res;
res =
decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline);
if (res != VPX_CODEC_OK) return res;
data_start += frame_size;
}
} else {
while (data_start < data_end) {
const uint32_t frame_size = (uint32_t) (data_end - data_start);
const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size,
user_priv, deadline);
if (res != VPX_CODEC_OK)
return res;
const uint32_t frame_size = (uint32_t)(data_end - data_start);
const vpx_codec_err_t res =
decode_one(ctx, &data_start, frame_size, user_priv, deadline);
if (res != VPX_CODEC_OK) return res;
// Account for suboptimal termination by the encoder.
while (data_start < data_end) {
const uint8_t marker = read_marker(ctx->decrypt_cb,
ctx->decrypt_state, data_start);
if (marker)
break;
const uint8_t marker =
read_marker(ctx->decrypt_cb, ctx->decrypt_state, data_start);
if (marker) break;
++data_start;
}
}
@ -698,9 +670,8 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
// Output the frames in the cache first.
if (ctx->num_cache_frames > 0) {
release_last_output_frame(ctx);
ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
if (ctx->need_resync)
return NULL;
ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
if (ctx->need_resync) return NULL;
img = &ctx->frame_cache[ctx->frame_cache_read].img;
ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE;
--ctx->num_cache_frames;
@ -712,10 +683,9 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
if (*iter == NULL && ctx->frame_workers != NULL) {
do {
YV12_BUFFER_CONFIG sd;
vp9_ppflags_t flags = {0, 0, 0};
vp9_ppflags_t flags = { 0, 0, 0 };
const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
VPxWorker *const worker =
&ctx->frame_workers[ctx->next_output_worker_id];
VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
ctx->next_output_worker_id =
@ -735,8 +705,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
release_last_output_frame(ctx);
ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
if (ctx->need_resync)
return NULL;
if (ctx->need_resync) return NULL;
yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv);
ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
img = &ctx->img;
@ -747,8 +716,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
frame_worker_data->received_frame = 0;
++ctx->available_threads;
ctx->need_resync = 1;
if (ctx->flushed != 1)
return NULL;
if (ctx->flushed != 1) return NULL;
}
} while (ctx->next_output_worker_id != ctx->next_submit_worker_id);
}
@ -756,8 +724,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
}
static vpx_codec_err_t decoder_set_fb_fn(
vpx_codec_alg_priv_t *ctx,
vpx_get_frame_buffer_cb_fn_t cb_get,
vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get,
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
if (cb_get == NULL || cb_release == NULL) {
return VPX_CODEC_INVALID_PARAM;
@ -808,7 +775,7 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
}
if (data) {
vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
@ -831,7 +798,7 @@ static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
}
if (data) {
YV12_BUFFER_CONFIG* fb;
YV12_BUFFER_CONFIG *fb;
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
@ -1022,8 +989,7 @@ static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
ctx->byte_alignment = byte_alignment;
if (ctx->frame_workers) {
VPxWorker *const worker = ctx->frame_workers;
FrameWorkerData *const frame_worker_data =
(FrameWorkerData *)worker->data1;
FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
frame_worker_data->pbi->common.byte_alignment = byte_alignment;
}
return VPX_CODEC_OK;
@ -1043,29 +1009,29 @@ static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx,
}
static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
{VP8_COPY_REFERENCE, ctrl_copy_reference},
{ VP8_COPY_REFERENCE, ctrl_copy_reference },
// Setters
{VP8_SET_REFERENCE, ctrl_set_reference},
{VP8_SET_POSTPROC, ctrl_set_postproc},
{VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options},
{VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options},
{VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options},
{VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options},
{VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order},
{VPXD_SET_DECRYPTOR, ctrl_set_decryptor},
{VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment},
{VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter},
{ VP8_SET_REFERENCE, ctrl_set_reference },
{ VP8_SET_POSTPROC, ctrl_set_postproc },
{ VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options },
{ VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options },
{ VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options },
{ VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options },
{ VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order },
{ VPXD_SET_DECRYPTOR, ctrl_set_decryptor },
{ VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment },
{ VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter },
// Getters
{VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates},
{VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted},
{VP9_GET_REFERENCE, ctrl_get_reference},
{VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size},
{VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth},
{VP9D_GET_FRAME_SIZE, ctrl_get_frame_size},
{ VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates },
{ VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted },
{ VP9_GET_REFERENCE, ctrl_get_reference },
{ VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size },
{ VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth },
{ VP9D_GET_FRAME_SIZE, ctrl_get_frame_size },
{ -1, NULL},
{ -1, NULL },
};
#ifndef VERSION_STRING
@ -1076,24 +1042,26 @@ CODEC_INTERFACE(vpx_codec_vp9_dx) = {
VPX_CODEC_INTERNAL_ABI_VERSION,
VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC |
VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // vpx_codec_caps_t
decoder_init, // vpx_codec_init_fn_t
decoder_destroy, // vpx_codec_destroy_fn_t
decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
{ // NOLINT
decoder_peek_si, // vpx_codec_peek_si_fn_t
decoder_get_si, // vpx_codec_get_si_fn_t
decoder_decode, // vpx_codec_decode_fn_t
decoder_get_frame, // vpx_codec_frame_get_fn_t
decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
decoder_init, // vpx_codec_init_fn_t
decoder_destroy, // vpx_codec_destroy_fn_t
decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t
{
// NOLINT
decoder_peek_si, // vpx_codec_peek_si_fn_t
decoder_get_si, // vpx_codec_get_si_fn_t
decoder_decode, // vpx_codec_decode_fn_t
decoder_get_frame, // vpx_codec_frame_get_fn_t
decoder_set_fb_fn, // vpx_codec_set_fb_fn_t
},
{ // NOLINT
0,
NULL, // vpx_codec_enc_cfg_map_t
NULL, // vpx_codec_encode_fn_t
NULL, // vpx_codec_get_cx_data_fn_t
NULL, // vpx_codec_enc_config_set_fn_t
NULL, // vpx_codec_get_global_headers_fn_t
NULL, // vpx_codec_get_preview_frame_fn_t
NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
{
// NOLINT
0,
NULL, // vpx_codec_enc_cfg_map_t
NULL, // vpx_codec_encode_fn_t
NULL, // vpx_codec_get_cx_data_fn_t
NULL, // vpx_codec_enc_config_set_fn_t
NULL, // vpx_codec_get_global_headers_fn_t
NULL, // vpx_codec_get_preview_frame_fn_t
NULL // vpx_codec_enc_mr_get_mem_loc_fn_t
}
};

View File

@ -17,7 +17,7 @@ typedef vpx_codec_stream_info_t vp9_stream_info_t;
// This limit is due to framebuffer numbers.
// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
typedef struct cache_frame {
int fb_idx;
@ -25,36 +25,36 @@ typedef struct cache_frame {
} cache_frame;
struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
vpx_codec_dec_cfg_t cfg;
vp9_stream_info_t si;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
vpx_image_t img;
int img_avail;
int flushed;
int invert_tile_order;
int last_show_frame; // Index of last output frame.
int byte_alignment;
int skip_loop_filter;
vpx_codec_priv_t base;
vpx_codec_dec_cfg_t cfg;
vp9_stream_info_t si;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
vpx_image_t img;
int img_avail;
int flushed;
int invert_tile_order;
int last_show_frame; // Index of last output frame.
int byte_alignment;
int skip_loop_filter;
// Frame parallel related.
int frame_parallel_decode; // frame-based threading.
VPxWorker *frame_workers;
int num_frame_workers;
int next_submit_worker_id;
int last_submit_worker_id;
int next_output_worker_id;
int available_threads;
cache_frame frame_cache[FRAME_CACHE_SIZE];
int frame_cache_write;
int frame_cache_read;
int num_cache_frames;
int need_resync; // wait for key/intra-only frame
int frame_parallel_decode; // frame-based threading.
VPxWorker *frame_workers;
int num_frame_workers;
int next_submit_worker_id;
int last_submit_worker_id;
int next_output_worker_id;
int available_threads;
cache_frame frame_cache[FRAME_CACHE_SIZE];
int frame_cache_write;
int frame_cache_read;
int num_cache_frames;
int need_resync; // wait for key/intra-only frame
// BufferPool that holds all reference frames. Shared by all the FrameWorkers.
BufferPool *buffer_pool;
BufferPool *buffer_pool;
// External frame buffer info to save for VP9 common.
void *ext_priv; // Private data associated with the external frame buffers.

View File

@ -12,7 +12,7 @@
#include "vpx_ports/mem.h"
static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
void *user_priv) {
/** vpx_img_wrap() doesn't allow specifying independent strides for
* the Y, U, and V planes, nor other alignment adjustments that
@ -61,9 +61,9 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12,
// of the image.
img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH);
img->bit_depth = yv12->bit_depth;
img->planes[VPX_PLANE_Y] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->y_buffer);
img->planes[VPX_PLANE_U] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->u_buffer);
img->planes[VPX_PLANE_V] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->v_buffer);
img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer);
img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer);
img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer);
img->planes[VPX_PLANE_ALPHA] = NULL;
img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride;
img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride;
@ -84,17 +84,17 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
yv12->u_buffer = img->planes[VPX_PLANE_U];
yv12->v_buffer = img->planes[VPX_PLANE_V];
yv12->y_crop_width = img->d_w;
yv12->y_crop_width = img->d_w;
yv12->y_crop_height = img->d_h;
yv12->render_width = img->r_w;
yv12->render_width = img->r_w;
yv12->render_height = img->r_h;
yv12->y_width = img->d_w;
yv12->y_width = img->d_w;
yv12->y_height = img->d_h;
yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2
: yv12->y_width;
yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2
: yv12->y_height;
yv12->uv_width =
img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width;
yv12->uv_height =
img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height;
yv12->uv_crop_width = yv12->uv_width;
yv12->uv_crop_height = yv12->uv_height;
@ -124,9 +124,9 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
} else {
yv12->flags = 0;
}
yv12->border = (yv12->y_stride - img->w) / 2;
yv12->border = (yv12->y_stride - img->w) / 2;
#else
yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
#endif // CONFIG_VP9_HIGHBITDEPTH
yv12->subsampling_x = img->x_chroma_shift;
yv12->subsampling_y = img->y_chroma_shift;
@ -135,12 +135,9 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img,
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
switch (frame) {
case VP8_LAST_FRAME:
return VP9_LAST_FLAG;
case VP8_GOLD_FRAME:
return VP9_GOLD_FLAG;
case VP8_ALTR_FRAME:
return VP9_ALT_FLAG;
case VP8_LAST_FRAME: return VP9_LAST_FLAG;
case VP8_GOLD_FRAME: return VP9_GOLD_FLAG;
case VP8_ALTR_FRAME: return VP9_ALT_FLAG;
}
assert(0 && "Invalid Reference Frame");
return VP9_LAST_FLAG;