64x64 blocksize support.

3.2% gains on std/hd, 1.0% gains on hd.

Change-Id: I481d5df23d8a4fc650a5bcba956554490b2bd200
This commit is contained in:
Ronald S. Bultje 2013-01-05 18:20:25 -08:00
parent 81d1171fd4
commit c3941665e9
27 changed files with 3059 additions and 1773 deletions

1
configure vendored
View File

@ -240,6 +240,7 @@ EXPERIMENT_LIST="
csm csm
comp_intra_pred comp_intra_pred
superblocks superblocks
superblocks64
pred_filter pred_filter
lossless lossless
subpelrefmv subpelrefmv

View File

@ -226,6 +226,16 @@ typedef enum {
MAX_REF_FRAMES = 4 MAX_REF_FRAMES = 4
} MV_REFERENCE_FRAME; } MV_REFERENCE_FRAME;
#if CONFIG_SUPERBLOCKS
typedef enum {
BLOCK_SIZE_MB16X16 = 0,
BLOCK_SIZE_SB32X32 = 1,
#if CONFIG_SUPERBLOCKS64
BLOCK_SIZE_SB64X64 = 2,
#endif
} BLOCK_SIZE_TYPE;
#endif
typedef struct { typedef struct {
MB_PREDICTION_MODE mode, uv_mode; MB_PREDICTION_MODE mode, uv_mode;
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
@ -268,8 +278,8 @@ typedef struct {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
// FIXME need a SB array of 4 MB_MODE_INFOs that // FIXME need a SB array of 4 MB_MODE_INFOs that
// only needs one encoded_as_sb. // only needs one sb_type.
unsigned char encoded_as_sb; BLOCK_SIZE_TYPE sb_type;
#endif #endif
} MB_MODE_INFO; } MB_MODE_INFO;
@ -415,6 +425,7 @@ typedef struct macroblockd {
DECLARE_ALIGNED(32, uint8_t, y_buf[22 * 32]); DECLARE_ALIGNED(32, uint8_t, y_buf[22 * 32]);
#endif #endif
int sb_index;
int mb_index; // Index of the MB in the SB (0..3) int mb_index; // Index of the MB in the SB (0..3)
int q_index; int q_index;
@ -519,7 +530,7 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, const BLOCKD *b) {
return tx_type; return tx_type;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
// TODO(rbultje, debargha): Explore ADST usage for superblocks // TODO(rbultje, debargha): Explore ADST usage for superblocks
if (xd->mode_info_context->mbmi.encoded_as_sb) if (xd->mode_info_context->mbmi.sb_type)
return tx_type; return tx_type;
#endif #endif
if (xd->mode_info_context->mbmi.mode == B_PRED && if (xd->mode_info_context->mbmi.mode == B_PRED &&
@ -576,7 +587,7 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, const BLOCKD *b) {
return tx_type; return tx_type;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
// TODO(rbultje, debargha): Explore ADST usage for superblocks // TODO(rbultje, debargha): Explore ADST usage for superblocks
if (xd->mode_info_context->mbmi.encoded_as_sb) if (xd->mode_info_context->mbmi.sb_type)
return tx_type; return tx_type;
#endif #endif
if (xd->mode_info_context->mbmi.mode == I8X8_PRED && if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
@ -611,7 +622,7 @@ static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, const BLOCKD *b) {
return tx_type; return tx_type;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
// TODO(rbultje, debargha): Explore ADST usage for superblocks // TODO(rbultje, debargha): Explore ADST usage for superblocks
if (xd->mode_info_context->mbmi.encoded_as_sb) if (xd->mode_info_context->mbmi.sb_type)
return tx_type; return tx_type;
#endif #endif
if (xd->mode_info_context->mbmi.mode < I8X8_PRED && if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&

View File

@ -21,6 +21,9 @@
#define TRUE 1 #define TRUE 1
#define FALSE 0 #define FALSE 0
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
/* Only need this for fixed-size arrays, for structs just assign. */ /* Only need this for fixed-size arrays, for structs just assign. */
#define vp9_copy(Dest, Src) { \ #define vp9_copy(Dest, Src) { \

View File

@ -191,7 +191,7 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
above_src, xd->dst.y_stride, &sse); above_src, xd->dst.y_stride, &sse);
score += sse; score += sse;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
vp9_sub_pixel_variance16x2_c(above_ref + offset + 16, vp9_sub_pixel_variance16x2_c(above_ref + offset + 16,
ref_y_stride, ref_y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.col),
@ -199,6 +199,22 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
above_src + 16, xd->dst.y_stride, &sse); above_src + 16, xd->dst.y_stride, &sse);
score += sse; score += sse;
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
vp9_sub_pixel_variance16x2_c(above_ref + offset + 32,
ref_y_stride,
SP(this_mv.as_mv.col),
SP(this_mv.as_mv.row),
above_src + 32, xd->dst.y_stride, &sse);
score += sse;
vp9_sub_pixel_variance16x2_c(above_ref + offset + 48,
ref_y_stride,
SP(this_mv.as_mv.col),
SP(this_mv.as_mv.row),
above_src + 48, xd->dst.y_stride, &sse);
score += sse;
}
#endif
#endif #endif
} }
if (xd->left_available) { if (xd->left_available) {
@ -208,7 +224,7 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
left_src, xd->dst.y_stride, &sse); left_src, xd->dst.y_stride, &sse);
score += sse; score += sse;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 16, vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 16,
ref_y_stride, ref_y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.col),
@ -217,6 +233,24 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
xd->dst.y_stride, &sse); xd->dst.y_stride, &sse);
score += sse; score += sse;
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 32,
ref_y_stride,
SP(this_mv.as_mv.col),
SP(this_mv.as_mv.row),
left_src + xd->dst.y_stride * 32,
xd->dst.y_stride, &sse);
score += sse;
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 48,
ref_y_stride,
SP(this_mv.as_mv.col),
SP(this_mv.as_mv.row),
left_src + xd->dst.y_stride * 48,
xd->dst.y_stride, &sse);
score += sse;
}
#endif
#endif #endif
} }
#else #else
@ -230,22 +264,42 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
score += vp9_sad16x3(above_src, xd->dst.y_stride, score += vp9_sad16x3(above_src, xd->dst.y_stride,
above_ref + offset, ref_y_stride); above_ref + offset, ref_y_stride);
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
score += vp9_sad16x3(above_src + 16, xd->dst.y_stride, score += vp9_sad16x3(above_src + 16, xd->dst.y_stride,
above_ref + offset + 16, ref_y_stride); above_ref + offset + 16, ref_y_stride);
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
score += vp9_sad16x3(above_src + 32, xd->dst.y_stride,
above_ref + offset + 32, ref_y_stride);
score += vp9_sad16x3(above_src + 48, xd->dst.y_stride,
above_ref + offset + 48, ref_y_stride);
}
#endif
#endif #endif
} }
if (xd->left_available) { if (xd->left_available) {
score += vp9_sad3x16(left_src, xd->dst.y_stride, score += vp9_sad3x16(left_src, xd->dst.y_stride,
left_ref + offset, ref_y_stride); left_ref + offset, ref_y_stride);
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB32X32) {
score += vp9_sad3x16(left_src + xd->dst.y_stride * 16, score += vp9_sad3x16(left_src + xd->dst.y_stride * 16,
xd->dst.y_stride, xd->dst.y_stride,
left_ref + offset + ref_y_stride * 16, left_ref + offset + ref_y_stride * 16,
ref_y_stride); ref_y_stride);
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
score += vp9_sad3x16(left_src + xd->dst.y_stride * 32,
xd->dst.y_stride,
left_ref + offset + ref_y_stride * 32,
ref_y_stride);
score += vp9_sad3x16(left_src + xd->dst.y_stride * 48,
xd->dst.y_stride,
left_ref + offset + ref_y_stride * 48,
ref_y_stride);
}
#endif
#endif #endif
} }
#endif #endif

View File

@ -228,7 +228,7 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
if (mb_col > 0 if (mb_col > 0
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
&& !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb && && !((mb_col & 1) && mode_info_context->mbmi.sb_type &&
((mode_info_context[0].mbmi.mb_skip_coeff && ((mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-1].mbmi.mb_skip_coeff) mode_info_context[-1].mbmi.mb_skip_coeff)
#if CONFIG_TX32X32 #if CONFIG_TX32X32
@ -253,7 +253,7 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
/* don't apply across umv border */ /* don't apply across umv border */
if (mb_row > 0 if (mb_row > 0
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
&& !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb && && !((mb_row & 1) && mode_info_context->mbmi.sb_type &&
((mode_info_context[0].mbmi.mb_skip_coeff && ((mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-mis].mbmi.mb_skip_coeff) mode_info_context[-mis].mbmi.mb_skip_coeff)
#if CONFIG_TX32X32 #if CONFIG_TX32X32
@ -277,7 +277,7 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
// FIXME: Not 8x8 aware // FIXME: Not 8x8 aware
if (mb_col > 0 if (mb_col > 0
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
&& !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb && && !((mb_col & 1) && mode_info_context->mbmi.sb_type &&
mode_info_context[0].mbmi.mb_skip_coeff && mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-1].mbmi.mb_skip_coeff) mode_info_context[-1].mbmi.mb_skip_coeff)
#endif #endif
@ -292,7 +292,7 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
/* don't apply across umv border */ /* don't apply across umv border */
if (mb_row > 0 if (mb_row > 0
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
&& !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb && && !((mb_row & 1) && mode_info_context->mbmi.sb_type &&
mode_info_context[0].mbmi.mb_skip_coeff && mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff) mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
#endif #endif

View File

@ -237,7 +237,7 @@ void vp9_find_mv_refs(
vpx_memset(candidate_scores, 0, sizeof(candidate_scores)); vpx_memset(candidate_scores, 0, sizeof(candidate_scores));
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (mbmi->encoded_as_sb) { if (mbmi->sb_type) {
mv_ref_search = sb_mv_ref_search; mv_ref_search = sb_mv_ref_search;
ref_distance_weight = sb_ref_distance_weight; ref_distance_weight = sb_ref_distance_weight;
} else { } else {

View File

@ -229,7 +229,7 @@ typedef struct VP9Common {
/* Y,U,V,Y2 */ /* Y,U,V,Y2 */
ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */ ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */
ENTROPY_CONTEXT_PLANES left_context[2]; /* (up to) 4 contexts "" */ ENTROPY_CONTEXT_PLANES left_context[4]; /* (up to) 4 contexts "" */
/* keyframe block modes are predicted by their above, left neighbors */ /* keyframe block modes are predicted by their above, left neighbors */
@ -248,7 +248,10 @@ typedef struct VP9Common {
vp9_prob prob_last_coded; vp9_prob prob_last_coded;
vp9_prob prob_gf_coded; vp9_prob prob_gf_coded;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
vp9_prob sb_coded; vp9_prob sb32_coded;
#if CONFIG_SUPERBLOCKS64
vp9_prob sb64_coded;
#endif // CONFIG_SUPERBLOCKS64
#endif #endif
// Context probabilities when using predictive coding of segment id // Context probabilities when using predictive coding of segment id

View File

@ -9,6 +9,7 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_seg_common.h" #include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_treecoder.h" #include "vp9/common/vp9_treecoder.h"
@ -230,13 +231,18 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
case PRED_SEG_ID: case PRED_SEG_ID:
xd->mode_info_context->mbmi.seg_id_predicted = pred_flag; xd->mode_info_context->mbmi.seg_id_predicted = pred_flag;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type) {
if (xd->mb_to_right_edge >= 0) #define sub(a, b) (b) < 0 ? (a) + (b) : (a)
xd->mode_info_context[1].mbmi.seg_id_predicted = pred_flag; const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
if (xd->mb_to_bottom_edge >= 0) { const int x_mbs = sub(n_mbs, xd->mb_to_right_edge >> 7);
xd->mode_info_context[mis].mbmi.seg_id_predicted = pred_flag; const int y_mbs = sub(n_mbs, xd->mb_to_bottom_edge >> 7);
if (xd->mb_to_right_edge >= 0) int x, y;
xd->mode_info_context[mis + 1].mbmi.seg_id_predicted = pred_flag;
for (y = 0; y < y_mbs; y++) {
for (x = !y; x < x_mbs; x++) {
xd->mode_info_context[y * mis + x].mbmi.seg_id_predicted =
pred_flag;
}
} }
} }
#endif #endif
@ -245,13 +251,16 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
case PRED_REF: case PRED_REF:
xd->mode_info_context->mbmi.ref_predicted = pred_flag; xd->mode_info_context->mbmi.ref_predicted = pred_flag;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type) {
if (xd->mb_to_right_edge >= 0) const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
xd->mode_info_context[1].mbmi.ref_predicted = pred_flag; const int x_mbs = sub(n_mbs, xd->mb_to_right_edge >> 7);
if (xd->mb_to_bottom_edge >= 0) { const int y_mbs = sub(n_mbs, xd->mb_to_bottom_edge >> 7);
xd->mode_info_context[mis].mbmi.ref_predicted = pred_flag; int x, y;
if (xd->mb_to_right_edge >= 0)
xd->mode_info_context[mis + 1].mbmi.ref_predicted = pred_flag; for (y = 0; y < y_mbs; y++) {
for (x = !y; x < x_mbs; x++) {
xd->mode_info_context[y * mis + x].mbmi.ref_predicted = pred_flag;
}
} }
} }
#endif #endif
@ -260,13 +269,16 @@ void vp9_set_pred_flag(MACROBLOCKD *const xd,
case PRED_MBSKIP: case PRED_MBSKIP:
xd->mode_info_context->mbmi.mb_skip_coeff = pred_flag; xd->mode_info_context->mbmi.mb_skip_coeff = pred_flag;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (xd->mode_info_context->mbmi.sb_type) {
if (xd->mb_to_right_edge >= 0) const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
xd->mode_info_context[1].mbmi.mb_skip_coeff = pred_flag; const int x_mbs = sub(n_mbs, xd->mb_to_right_edge >> 7);
if (xd->mb_to_bottom_edge >= 0) { const int y_mbs = sub(n_mbs, xd->mb_to_bottom_edge >> 7);
xd->mode_info_context[mis].mbmi.mb_skip_coeff = pred_flag; int x, y;
if (xd->mb_to_right_edge >= 0)
xd->mode_info_context[mis + 1].mbmi.mb_skip_coeff = pred_flag; for (y = 0; y < y_mbs; y++) {
for (x = !y; x < x_mbs; x++) {
xd->mode_info_context[y * mis + x].mbmi.mb_skip_coeff = pred_flag;
}
} }
} }
#endif #endif
@ -288,21 +300,25 @@ unsigned char vp9_get_pred_mb_segid(const VP9_COMMON *const cm,
// Currently the prediction for the macroblock segment ID is // Currently the prediction for the macroblock segment ID is
// the value stored for this macroblock in the previous frame. // the value stored for this macroblock in the previous frame.
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb) { if (!xd->mode_info_context->mbmi.sb_type) {
#endif #endif
return cm->last_frame_seg_map[MbIndex]; return cm->last_frame_seg_map[MbIndex];
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
} else { } else {
int seg_id = cm->last_frame_seg_map[MbIndex]; const int n_mbs = 1 << xd->mode_info_context->mbmi.sb_type;
int mb_col = MbIndex % cm->mb_cols; const int mb_col = MbIndex % cm->mb_cols;
int mb_row = MbIndex / cm->mb_cols; const int mb_row = MbIndex / cm->mb_cols;
if (mb_col + 1 < cm->mb_cols) const int x_mbs = MIN(n_mbs, cm->mb_cols - mb_col);
seg_id = seg_id && cm->last_frame_seg_map[MbIndex + 1]; const int y_mbs = MIN(n_mbs, cm->mb_rows - mb_row);
if (mb_row + 1 < cm->mb_rows) { int x, y;
seg_id = seg_id && cm->last_frame_seg_map[MbIndex + cm->mb_cols]; unsigned seg_id = -1;
if (mb_col + 1 < cm->mb_cols)
seg_id = seg_id && cm->last_frame_seg_map[MbIndex + cm->mb_cols + 1]; for (y = mb_row; y < mb_row + y_mbs; y++) {
for (x = mb_col; x < mb_col + x_mbs; x++) {
seg_id = MIN(seg_id, cm->last_frame_seg_map[cm->mb_cols * y + x]);
} }
}
return seg_id; return seg_id;
} }
#endif #endif

View File

@ -780,6 +780,70 @@ void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
} }
#endif #endif
} }
void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_y,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
int dst_uvstride) {
uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
*v2 = x->second_pre.v_buffer;
int edge[4], n;
edge[0] = x->mb_to_top_edge;
edge[1] = x->mb_to_bottom_edge;
edge[2] = x->mb_to_left_edge;
edge[3] = x->mb_to_right_edge;
for (n = 0; n < 4; n++) {
const int x_idx = n & 1, y_idx = n >> 1;
x->mb_to_top_edge = edge[0] - ((y_idx * 32) << 3);
x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 32) << 3);
x->mb_to_left_edge = edge[2] - ((x_idx * 32) << 3);
x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 32) << 3);
x->pre.y_buffer = y1 + y_idx * 32 * x->pre.y_stride + x_idx * 32;
x->pre.u_buffer = u1 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
x->pre.v_buffer = v1 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
x->second_pre.y_buffer = y2 + y_idx * 32 * x->pre.y_stride + x_idx * 32;
x->second_pre.u_buffer = u2 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
x->second_pre.v_buffer = v2 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
}
vp9_build_inter32x32_predictors_sb(x,
dst_y + y_idx * 32 * dst_ystride + x_idx * 32,
dst_u + y_idx * 16 * dst_uvstride + x_idx * 16,
dst_v + y_idx * 16 * dst_uvstride + x_idx * 16,
dst_ystride, dst_uvstride);
}
x->mb_to_top_edge = edge[0];
x->mb_to_bottom_edge = edge[1];
x->mb_to_left_edge = edge[2];
x->mb_to_right_edge = edge[3];
x->pre.y_buffer = y1;
x->pre.u_buffer = u1;
x->pre.v_buffer = v1;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
x->second_pre.y_buffer = y2;
x->second_pre.u_buffer = u2;
x->second_pre.v_buffer = v2;
}
#if CONFIG_COMP_INTERINTRA_PRED
if (x->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_64x64_predictors_sb(x, dst_y, dst_u, dst_v,
dst_ystride, dst_uvstride);
}
#endif
}
#endif #endif
/* /*

View File

@ -54,6 +54,13 @@ extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_v, uint8_t *dst_v,
int dst_ystride, int dst_ystride,
int dst_uvstride); int dst_uvstride);
extern void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_y,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
int dst_uvstride);
#endif #endif
extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd); extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd);

View File

@ -254,7 +254,7 @@ void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride,
int up_available, int left_available) { int up_available, int left_available) {
uint8_t *yabove_row = src - src_stride; uint8_t *yabove_row = src - src_stride;
uint8_t yleft_col[32]; uint8_t yleft_col[64];
uint8_t ytop_left = yabove_row[-1]; uint8_t ytop_left = yabove_row[-1];
int r, c, i; int r, c, i;
@ -271,15 +271,19 @@ void vp9_build_intra_predictors_internal(uint8_t *src, int src_stride,
int average = 0; int average = 0;
int log2_bsize_minus_1; int log2_bsize_minus_1;
assert(bsize == 4 || bsize == 8 || bsize == 16 || bsize == 32); assert(bsize == 4 || bsize == 8 || bsize == 16 || bsize == 32 ||
bsize == 64);
if (bsize == 4) { if (bsize == 4) {
log2_bsize_minus_1 = 1; log2_bsize_minus_1 = 1;
} else if (bsize == 8) { } else if (bsize == 8) {
log2_bsize_minus_1 = 2; log2_bsize_minus_1 = 2;
} else if (bsize == 16) { } else if (bsize == 16) {
log2_bsize_minus_1 = 3; log2_bsize_minus_1 = 3;
} else /* bsize == 32 */ { } else if (bsize == 32) {
log2_bsize_minus_1 = 4; log2_bsize_minus_1 = 4;
} else {
assert(bsize == 64);
log2_bsize_minus_1 = 5;
} }
if (up_available || left_available) { if (up_available || left_available) {
@ -517,16 +521,17 @@ static void combine_interintra(MB_PREDICTION_MODE mode,
71, 70, 70, 70, 69, 69, 69, 68, 71, 70, 70, 70, 69, 69, 69, 68,
68, 68, 68, 68, 67, 67, 67, 67, 68, 68, 68, 68, 67, 67, 67, 67,
}; };
int size_scale = (size == 32 ? 1 : int size_scale = (size >= 32 ? 1 :
size == 16 ? 2 : size == 16 ? 2 :
size == 8 ? 4 : 8); size == 8 ? 4 : 8);
int size_shift = size == 64 ? 1 : 0;
int i, j; int i, j;
switch (mode) { switch (mode) {
case V_PRED: case V_PRED:
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) { for (j = 0; j < size; ++j) {
int k = i * interstride + j; int k = i * interstride + j;
int scale = weights1d[i * size_scale]; int scale = weights1d[i * size_scale >> size_shift];
interpred[k] = interpred[k] =
((scale_max - scale) * interpred[k] + ((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round) scale * intrapred[i * intrastride + j] + scale_round)
@ -539,7 +544,7 @@ static void combine_interintra(MB_PREDICTION_MODE mode,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) { for (j = 0; j < size; ++j) {
int k = i * interstride + j; int k = i * interstride + j;
int scale = weights1d[j * size_scale]; int scale = weights1d[j * size_scale >> size_shift];
interpred[k] = interpred[k] =
((scale_max - scale) * interpred[k] + ((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round) scale * intrapred[i * intrastride + j] + scale_round)
@ -553,8 +558,9 @@ static void combine_interintra(MB_PREDICTION_MODE mode,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) { for (j = 0; j < size; ++j) {
int k = i * interstride + j; int k = i * interstride + j;
int scale = (weights2d[i * size_scale * 32 + j * size_scale] + int scale = (weights2d[(i * size_scale * 32 +
weights1d[i * size_scale]) >> 1; j * size_scale) >> size_shift] +
weights1d[i * size_scale >> size_shift]) >> 1;
interpred[k] = interpred[k] =
((scale_max - scale) * interpred[k] + ((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round) scale * intrapred[i * intrastride + j] + scale_round)
@ -568,8 +574,9 @@ static void combine_interintra(MB_PREDICTION_MODE mode,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) { for (j = 0; j < size; ++j) {
int k = i * interstride + j; int k = i * interstride + j;
int scale = (weights2d[i * size_scale * 32 + j * size_scale] + int scale = (weights2d[(i * size_scale * 32 +
weights1d[j * size_scale]) >> 1; j * size_scale) >> size_shift] +
weights1d[j * size_scale >> size_shift]) >> 1;
interpred[k] = interpred[k] =
((scale_max - scale) * interpred[k] + ((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round) scale * intrapred[i * intrastride + j] + scale_round)
@ -582,7 +589,8 @@ static void combine_interintra(MB_PREDICTION_MODE mode,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) { for (j = 0; j < size; ++j) {
int k = i * interstride + j; int k = i * interstride + j;
int scale = weights2d[i * size_scale * 32 + j * size_scale]; int scale = weights2d[(i * size_scale * 32 +
j * size_scale) >> size_shift];
interpred[k] = interpred[k] =
((scale_max - scale) * interpred[k] + ((scale_max - scale) * interpred[k] +
scale * intrapred[i * intrastride + j] + scale_round) scale * intrapred[i * intrastride + j] + scale_round)
@ -695,6 +703,47 @@ void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd,
vp9_build_interintra_32x32_predictors_sby(xd, ypred, ystride); vp9_build_interintra_32x32_predictors_sby(xd, ypred, ystride);
vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride); vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride);
} }
void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd,
uint8_t *ypred,
int ystride) {
uint8_t intrapredictor[4096];
const int mode = xd->mode_info_context->mbmi.interintra_mode;
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
intrapredictor, 64, mode, 64,
xd->up_available, xd->left_available);
combine_interintra(xd->mode_info_context->mbmi.interintra_mode,
ypred, ystride, intrapredictor, 64, 64);
}
void vp9_build_interintra_64x64_predictors_sbuv(MACROBLOCKD *xd,
uint8_t *upred,
uint8_t *vpred,
int uvstride) {
uint8_t uintrapredictor[1024];
uint8_t vintrapredictor[1024];
const int mode = xd->mode_info_context->mbmi.interintra_uv_mode;
vp9_build_intra_predictors_internal(xd->dst.u_buffer, xd->dst.uv_stride,
uintrapredictor, 32, mode, 32,
xd->up_available, xd->left_available);
vp9_build_intra_predictors_internal(xd->dst.v_buffer, xd->dst.uv_stride,
vintrapredictor, 32, mode, 32,
xd->up_available, xd->left_available);
combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode,
upred, uvstride, uintrapredictor, 32, 32);
combine_interintra(xd->mode_info_context->mbmi.interintra_uv_mode,
vpred, uvstride, vintrapredictor, 32, 32);
}
void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd,
uint8_t *ypred,
uint8_t *upred,
uint8_t *vpred,
int ystride,
int uvstride) {
vp9_build_interintra_64x64_predictors_sby(xd, ypred, ystride);
vp9_build_interintra_64x64_predictors_sbuv(xd, upred, vpred, uvstride);
}
#endif #endif
#endif #endif
@ -719,6 +768,13 @@ void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) {
xd->mode_info_context->mbmi.mode, 32, xd->mode_info_context->mbmi.mode, 32,
xd->up_available, xd->left_available); xd->up_available, xd->left_available);
} }
void vp9_build_intra_predictors_sb64y_s(MACROBLOCKD *xd) {
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_buffer, xd->dst.y_stride,
xd->mode_info_context->mbmi.mode, 64,
xd->up_available, xd->left_available);
}
#endif #endif
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
@ -778,6 +834,13 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) {
xd->mode_info_context->mbmi.uv_mode, xd->mode_info_context->mbmi.uv_mode,
16); 16);
} }
void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) {
vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
xd->dst.v_buffer, xd->dst.uv_stride,
xd->mode_info_context->mbmi.uv_mode,
32);
}
#endif #endif
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED

View File

@ -41,6 +41,12 @@ extern void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd,
uint8_t *vpred, uint8_t *vpred,
int ystride, int ystride,
int uvstride); int uvstride);
extern void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd,
uint8_t *ypred,
uint8_t *upred,
uint8_t *vpred,
int ystride,
int uvstride);
#endif // CONFIG_SUPERBLOCKS #endif // CONFIG_SUPERBLOCKS
#endif // VP9_COMMON_VP9_RECONINTRA_H_ #endif // VP9_COMMON_VP9_RECONINTRA_H_

View File

@ -434,12 +434,9 @@ void vp9_comp_intra4x4_predict_c(BLOCKD *x,
* to the right prediction have filled in pixels to use. * to the right prediction have filled in pixels to use.
*/ */
void vp9_intra_prediction_down_copy(MACROBLOCKD *xd) { void vp9_intra_prediction_down_copy(MACROBLOCKD *xd) {
int extend_edge = (xd->mb_to_right_edge == 0 && xd->mb_index < 2); int extend_edge = xd->mb_to_right_edge == 0 && xd->mb_index < 2;
uint8_t *above_right = *(xd->block[0].base_dst) + xd->block[0].dst - uint8_t *above_right = *(xd->block[0].base_dst) + xd->block[0].dst -
xd->block[0].dst_stride + 16; xd->block[0].dst_stride + 16;
uint32_t *src_ptr = (uint32_t *)
(above_right - (xd->mb_index == 3 ? 16 * xd->block[0].dst_stride : 0));
uint32_t *dst_ptr0 = (uint32_t *)above_right; uint32_t *dst_ptr0 = (uint32_t *)above_right;
uint32_t *dst_ptr1 = uint32_t *dst_ptr1 =
(uint32_t *)(above_right + 4 * xd->block[0].dst_stride); (uint32_t *)(above_right + 4 * xd->block[0].dst_stride);
@ -448,6 +445,17 @@ void vp9_intra_prediction_down_copy(MACROBLOCKD *xd) {
uint32_t *dst_ptr3 = uint32_t *dst_ptr3 =
(uint32_t *)(above_right + 12 * xd->block[0].dst_stride); (uint32_t *)(above_right + 12 * xd->block[0].dst_stride);
uint32_t *src_ptr = (uint32_t *) above_right;
if ((xd->sb_index >= 2 && xd->mb_to_right_edge == 0) ||
(xd->sb_index == 3 && xd->mb_index & 1))
src_ptr = (uint32_t *) (((uint8_t *) src_ptr) - 32 *
xd->block[0].dst_stride);
if (xd->mb_index == 3 ||
(xd->mb_to_right_edge == 0 && xd->mb_index == 2))
src_ptr = (uint32_t *) (((uint8_t *) src_ptr) - 16 *
xd->block[0].dst_stride);
if (extend_edge) { if (extend_edge) {
*src_ptr = ((uint8_t *) src_ptr)[-1] * 0x01010101U; *src_ptr = ((uint8_t *) src_ptr)[-1] * 0x01010101U;
} }

View File

@ -418,6 +418,9 @@ if [ "$CONFIG_VP9_ENCODER" = "yes" ]; then
prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance32x32 specialize vp9_variance32x32
prototype unsigned int vp9_variance64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance64x64
prototype unsigned int vp9_variance16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" prototype unsigned int vp9_variance16x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance16x16 mmx sse2 specialize vp9_variance16x16 mmx sse2
vp9_variance16x16_sse2=vp9_variance16x16_wmt vp9_variance16x16_sse2=vp9_variance16x16_wmt
@ -443,6 +446,9 @@ specialize vp9_variance4x4 mmx sse2
vp9_variance4x4_sse2=vp9_variance4x4_wmt vp9_variance4x4_sse2=vp9_variance4x4_wmt
vp9_variance4x4_mmx=vp9_variance4x4_mmx vp9_variance4x4_mmx=vp9_variance4x4_mmx
prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance64x64
prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int Refstride, unsigned int *sse" prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_variance32x32 specialize vp9_sub_pixel_variance32x32
@ -467,6 +473,9 @@ prototype unsigned int vp9_sub_pixel_variance4x4 "const uint8_t *src_ptr, int so
specialize vp9_sub_pixel_variance4x4 sse2 mmx specialize vp9_sub_pixel_variance4x4 sse2 mmx
vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
prototype unsigned int vp9_sad64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad64x64
prototype unsigned int vp9_sad32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad" prototype unsigned int vp9_sad32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad32x32 specialize vp9_sad32x32
@ -502,6 +511,15 @@ prototype unsigned int vp9_variance_halfpixvar16x16_hv "const uint8_t *src_ptr,
specialize vp9_variance_halfpixvar16x16_hv mmx sse2 specialize vp9_variance_halfpixvar16x16_hv mmx sse2
vp9_variance_halfpixvar16x16_hv_sse2=vp9_variance_halfpixvar16x16_hv_wmt vp9_variance_halfpixvar16x16_hv_sse2=vp9_variance_halfpixvar16x16_hv_wmt
prototype unsigned int vp9_variance_halfpixvar64x64_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar64x64_h
prototype unsigned int vp9_variance_halfpixvar64x64_v "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar64x64_v
prototype unsigned int vp9_variance_halfpixvar64x64_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar64x64_hv
prototype unsigned int vp9_variance_halfpixvar32x32_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" prototype unsigned int vp9_variance_halfpixvar32x32_h "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar32x32_h specialize vp9_variance_halfpixvar32x32_h
@ -511,6 +529,9 @@ specialize vp9_variance_halfpixvar32x32_v
prototype unsigned int vp9_variance_halfpixvar32x32_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse" prototype unsigned int vp9_variance_halfpixvar32x32_hv "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance_halfpixvar32x32_hv specialize vp9_variance_halfpixvar32x32_hv
prototype void vp9_sad64x64x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad64x64x3
prototype void vp9_sad32x32x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" prototype void vp9_sad32x32x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x32x3 specialize vp9_sad32x32x3
@ -529,6 +550,9 @@ specialize vp9_sad8x8x3 sse3
prototype void vp9_sad4x4x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array" prototype void vp9_sad4x4x3 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad4x4x3 sse3 specialize vp9_sad4x4x3 sse3
prototype void vp9_sad64x64x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint16_t *sad_array"
specialize vp9_sad64x64x8
prototype void vp9_sad32x32x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint16_t *sad_array" prototype void vp9_sad32x32x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint16_t *sad_array"
specialize vp9_sad32x32x8 specialize vp9_sad32x32x8
@ -547,6 +571,9 @@ specialize vp9_sad8x8x8 sse4
prototype void vp9_sad4x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint16_t *sad_array" prototype void vp9_sad4x4x8 "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint16_t *sad_array"
specialize vp9_sad4x4x8 sse4 specialize vp9_sad4x4x8 sse4
prototype void vp9_sad64x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t **ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad64x64x4d
prototype void vp9_sad32x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t **ref_ptr, int ref_stride, unsigned int *sad_array" prototype void vp9_sad32x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t **ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x32x4d specialize vp9_sad32x32x4d
@ -583,6 +610,9 @@ prototype unsigned int vp9_mse16x16 "const uint8_t *src_ptr, int source_stride,
specialize vp9_mse16x16 mmx sse2 specialize vp9_mse16x16 mmx sse2
vp9_mse16x16_sse2=vp9_mse16x16_wmt vp9_mse16x16_sse2=vp9_mse16x16_wmt
prototype unsigned int vp9_sub_pixel_mse64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_mse64x64
prototype unsigned int vp9_sub_pixel_mse32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int Refstride, unsigned int *sse" prototype unsigned int vp9_sub_pixel_mse32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int Refstride, unsigned int *sse"
specialize vp9_sub_pixel_mse32x32 specialize vp9_sub_pixel_mse32x32

View File

@ -14,7 +14,7 @@
#include "vp9/common/vp9_entropymode.h" #include "vp9/common/vp9_entropymode.h"
#include "vp9/decoder/vp9_onyxd_int.h" #include "vp9/decoder/vp9_onyxd_int.h"
#include "vp9/common/vp9_findnearmv.h" #include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_seg_common.h" #include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropy.h"
@ -122,7 +122,24 @@ static void kfread_modes(VP9D_COMP *pbi,
m->mbmi.segment_id = 0; m->mbmi.segment_id = 0;
if (pbi->mb.update_mb_segmentation_map) { if (pbi->mb.update_mb_segmentation_map) {
read_mb_segid(bc, &m->mbmi, &pbi->mb); read_mb_segid(bc, &m->mbmi, &pbi->mb);
pbi->common.last_frame_seg_map[map_index] = m->mbmi.segment_id; #if CONFIG_SUPERBLOCKS
if (m->mbmi.sb_type) {
const int nmbs = 1 << m->mbmi.sb_type;
const int ymbs = MIN(cm->mb_rows - mb_row, nmbs);
const int xmbs = MIN(cm->mb_cols - mb_col, nmbs);
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
cm->last_frame_seg_map[map_index + x + y * cm->mb_cols] =
m->mbmi.segment_id;
}
}
} else
#endif
{
cm->last_frame_seg_map[map_index] = m->mbmi.segment_id;
}
} }
m->mbmi.mb_skip_coeff = 0; m->mbmi.mb_skip_coeff = 0;
@ -145,7 +162,7 @@ static void kfread_modes(VP9D_COMP *pbi,
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) { if (m->mbmi.sb_type) {
y_mode = (MB_PREDICTION_MODE) read_kf_sb_ymode(bc, y_mode = (MB_PREDICTION_MODE) read_kf_sb_ymode(bc,
pbi->common.sb_kf_ymode_prob[pbi->common.kf_ymode_probs_index]); pbi->common.sb_kf_ymode_prob[pbi->common.kf_ymode_probs_index]);
} else } else
@ -212,12 +229,12 @@ static void kfread_modes(VP9D_COMP *pbi,
if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED) { if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED) {
m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[1]); m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[1]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS #if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
if (m->mbmi.txfm_size != TX_8X8 && m->mbmi.encoded_as_sb) if (m->mbmi.txfm_size != TX_8X8 && m->mbmi.sb_type)
m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[2]); m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[2]);
#endif #endif
} }
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS #if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
} else if (cm->txfm_mode >= ALLOW_32X32 && m->mbmi.encoded_as_sb) { } else if (cm->txfm_mode >= ALLOW_32X32 && m->mbmi.sb_type) {
m->mbmi.txfm_size = TX_32X32; m->mbmi.txfm_size = TX_32X32;
#endif #endif
} else if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) { } else if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
@ -638,14 +655,17 @@ static void read_mb_segment_id(VP9D_COMP *pbi,
read_mb_segid(bc, mbmi, xd); read_mb_segid(bc, mbmi, xd);
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (mbmi->encoded_as_sb) { if (mbmi->sb_type) {
cm->last_frame_seg_map[index] = mbmi->segment_id; const int nmbs = 1 << mbmi->sb_type;
if (mb_col + 1 < cm->mb_cols) const int ymbs = MIN(cm->mb_rows - mb_row, nmbs);
cm->last_frame_seg_map[index + 1] = mbmi->segment_id; const int xmbs = MIN(cm->mb_cols - mb_col, nmbs);
if (mb_row + 1 < cm->mb_rows) { int x, y;
cm->last_frame_seg_map[index + cm->mb_cols] = mbmi->segment_id;
if (mb_col + 1 < cm->mb_cols) for (y = 0; y < ymbs; y++) {
cm->last_frame_seg_map[index + cm->mb_cols + 1] = mbmi->segment_id; for (x = 0; x < xmbs; x++) {
cm->last_frame_seg_map[index + x + y * cm->mb_cols] =
mbmi->segment_id;
}
} }
} else } else
#endif #endif
@ -654,18 +674,21 @@ static void read_mb_segment_id(VP9D_COMP *pbi,
} }
} else { } else {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (mbmi->encoded_as_sb) { if (mbmi->sb_type) {
mbmi->segment_id = cm->last_frame_seg_map[index]; const int nmbs = 1 << mbmi->sb_type;
if (mb_col < cm->mb_cols - 1) const int ymbs = MIN(cm->mb_rows - mb_row, nmbs);
mbmi->segment_id = mbmi->segment_id && const int xmbs = MIN(cm->mb_cols - mb_col, nmbs);
cm->last_frame_seg_map[index + 1]; unsigned segment_id = -1;
if (mb_row < cm->mb_rows - 1) { int x, y;
mbmi->segment_id = mbmi->segment_id &&
cm->last_frame_seg_map[index + cm->mb_cols]; for (y = 0; y < ymbs; y++) {
if (mb_col < cm->mb_cols - 1) for (x = 0; x < xmbs; x++) {
mbmi->segment_id = mbmi->segment_id && segment_id = MIN(segment_id,
cm->last_frame_seg_map[index + cm->mb_cols + 1]; cm->last_frame_seg_map[index + x +
y * cm->mb_cols]);
} }
}
mbmi->segment_id = segment_id;
} else } else
#endif #endif
{ {
@ -693,6 +716,11 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_to_right_edge; int mb_to_right_edge;
int mb_to_top_edge; int mb_to_top_edge;
int mb_to_bottom_edge; int mb_to_bottom_edge;
#if CONFIG_SUPERBLOCKS
const int mb_size = 1 << mi->mbmi.sb_type;
#else
const int mb_size = 1;
#endif
mb_to_top_edge = xd->mb_to_top_edge; mb_to_top_edge = xd->mb_to_top_edge;
mb_to_bottom_edge = xd->mb_to_bottom_edge; mb_to_bottom_edge = xd->mb_to_bottom_edge;
@ -707,18 +735,8 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
xd->mb_to_left_edge = xd->mb_to_left_edge =
mb_to_left_edge = -((mb_col * 16) << 3); mb_to_left_edge = -((mb_col * 16) << 3);
mb_to_left_edge -= LEFT_TOP_MARGIN; mb_to_left_edge -= LEFT_TOP_MARGIN;
#if CONFIG_SUPERBLOCKS
if (mi->mbmi.encoded_as_sb) {
xd->mb_to_right_edge = xd->mb_to_right_edge =
mb_to_right_edge = ((pbi->common.mb_cols - 2 - mb_col) * 16) << 3; mb_to_right_edge = ((pbi->common.mb_cols - mb_size - mb_col) * 16) << 3;
} else {
#endif
xd->mb_to_right_edge =
mb_to_right_edge = ((pbi->common.mb_cols - 1 - mb_col) * 16) << 3;
#if CONFIG_SUPERBLOCKS
}
#endif
mb_to_right_edge += RIGHT_BOTTOM_MARGIN; mb_to_right_edge += RIGHT_BOTTOM_MARGIN;
// Make sure the MACROBLOCKD mode info pointer is pointed at the // Make sure the MACROBLOCKD mode info pointer is pointed at the
@ -801,7 +819,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_MODE); vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_MODE);
} else { } else {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (mbmi->encoded_as_sb) if (mbmi->sb_type)
mbmi->mode = read_sb_mv_ref(bc, mv_ref_p); mbmi->mode = read_sb_mv_ref(bc, mv_ref_p);
else else
#endif #endif
@ -1155,7 +1173,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->mode = (MB_PREDICTION_MODE) mbmi->mode = (MB_PREDICTION_MODE)
vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_MODE); vp9_get_segdata(xd, mbmi->segment_id, SEG_LVL_MODE);
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
} else if (mbmi->encoded_as_sb) { } else if (mbmi->sb_type) {
mbmi->mode = (MB_PREDICTION_MODE) mbmi->mode = (MB_PREDICTION_MODE)
read_sb_ymode(bc, pbi->common.fc.sb_ymode_prob); read_sb_ymode(bc, pbi->common.fc.sb_ymode_prob);
pbi->common.fc.sb_ymode_counts[mbmi->mode]++; pbi->common.fc.sb_ymode_counts[mbmi->mode]++;
@ -1232,12 +1250,12 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->mode != SPLITMV) { mbmi->mode != SPLITMV) {
mbmi->txfm_size += vp9_read(bc, cm->prob_tx[1]); mbmi->txfm_size += vp9_read(bc, cm->prob_tx[1]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS #if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
if (mbmi->encoded_as_sb && mbmi->txfm_size != TX_8X8) if (mbmi->sb_type && mbmi->txfm_size != TX_8X8)
mbmi->txfm_size += vp9_read(bc, cm->prob_tx[2]); mbmi->txfm_size += vp9_read(bc, cm->prob_tx[2]);
#endif #endif
} }
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS #if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
} else if (mbmi->encoded_as_sb && cm->txfm_mode >= ALLOW_32X32) { } else if (mbmi->sb_type && cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32; mbmi->txfm_size = TX_32X32;
#endif #endif
} else if (cm->txfm_mode >= ALLOW_16X16 && } else if (cm->txfm_mode >= ALLOW_16X16 &&

View File

@ -10,6 +10,7 @@
#include "vp9/decoder/vp9_onyxd_int.h" #include "vp9/decoder/vp9_onyxd_int.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_header.h" #include "vp9/common/vp9_header.h"
#include "vp9/common/vp9_reconintra.h" #include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconintra4x4.h" #include "vp9/common/vp9_reconintra4x4.h"
@ -172,27 +173,43 @@ static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *xd) {
static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) { static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) {
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { #if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
vp9_build_intra_predictors_sb64uv_s(xd);
vp9_build_intra_predictors_sb64y_s(xd);
} else
#endif // CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
vp9_build_intra_predictors_sbuv_s(xd); vp9_build_intra_predictors_sbuv_s(xd);
vp9_build_intra_predictors_sby_s(xd); vp9_build_intra_predictors_sby_s(xd);
} else { } else
#endif #endif // CONFIG_SUPERBLOCKS
{
vp9_build_intra_predictors_mbuv_s(xd); vp9_build_intra_predictors_mbuv_s(xd);
vp9_build_intra_predictors_mby_s(xd); vp9_build_intra_predictors_mby_s(xd);
#if CONFIG_SUPERBLOCKS
} }
#endif
} else { } else {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { #if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
vp9_build_inter64x64_predictors_sb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
} else
#endif // CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
vp9_build_inter32x32_predictors_sb(xd, vp9_build_inter32x32_predictors_sb(xd,
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.u_buffer,
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.y_stride,
xd->dst.uv_stride); xd->dst.uv_stride);
} else { } else
#endif #endif // CONFIG_SUPERBLOCKS
{
vp9_build_1st_inter16x16_predictors_mb(xd, vp9_build_1st_inter16x16_predictors_mb(xd,
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.u_buffer,
@ -218,9 +235,7 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) {
xd->dst.uv_stride); xd->dst.uv_stride);
} }
#endif #endif
#if CONFIG_SUPERBLOCKS
} }
#endif
} }
} }
@ -546,8 +561,9 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
BOOL_DECODER* const bc, int n) { BOOL_DECODER* const bc, int n,
int x_idx = n & 1, y_idx = n >> 1; int maska, int shiftb) {
int x_idx = n & maska, y_idx = n >> shiftb;
TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]); TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]);
if (tx_type != DCT_DCT) { if (tx_type != DCT_DCT) {
vp9_ht_dequant_idct_add_16x16_c( vp9_ht_dequant_idct_add_16x16_c(
@ -571,9 +587,10 @@ static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
}; };
static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
BOOL_DECODER* const bc, int n) { BOOL_DECODER* const bc, int n,
int maska, int shiftb) {
int x_idx = n & maska, y_idx = n >> shiftb;
BLOCKD *b = &xd->block[24]; BLOCKD *b = &xd->block[24];
int x_idx = n & 1, y_idx = n >> 1;
TX_TYPE tx_type = get_tx_type_8x8(xd, &xd->block[0]); TX_TYPE tx_type = get_tx_type_8x8(xd, &xd->block[0]);
if (tx_type != DCT_DCT) { if (tx_type != DCT_DCT) {
int i; int i;
@ -632,9 +649,10 @@ static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
}; };
static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
BOOL_DECODER* const bc, int n) { BOOL_DECODER* const bc, int n,
int maska, int shiftb) {
int x_idx = n & maska, y_idx = n >> shiftb;
BLOCKD *b = &xd->block[24]; BLOCKD *b = &xd->block[24];
int x_idx = n & 1, y_idx = n >> 1;
TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[0]); TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[0]);
if (tx_type != DCT_DCT) { if (tx_type != DCT_DCT) {
int i; int i;
@ -687,7 +705,8 @@ static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.uv_stride, xd->eobs + 16, xd); xd->dst.uv_stride, xd->eobs + 16, xd);
}; };
static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd, #if CONFIG_SUPERBLOCKS64
static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, unsigned int mb_col, int mb_row, unsigned int mb_col,
BOOL_DECODER* const bc) { BOOL_DECODER* const bc) {
int i, n, eobtotal; int i, n, eobtotal;
@ -696,7 +715,138 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
MODE_INFO *orig_mi = xd->mode_info_context; MODE_INFO *orig_mi = xd->mode_info_context;
const int mis = pc->mode_info_stride; const int mis = pc->mode_info_stride;
assert(xd->mode_info_context->mbmi.encoded_as_sb); assert(xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64);
if (pbi->common.frame_type != KEY_FRAME)
vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, pc);
// re-initialize macroblock dequantizer before detokenization
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
int n;
vp9_reset_mb_tokens_context(xd);
for (n = 1; n <= 3; n++) {
if (mb_col < pc->mb_cols - n)
xd->above_context += n;
if (mb_row < pc->mb_rows - n)
xd->left_context += n;
vp9_reset_mb_tokens_context(xd);
if (mb_col < pc->mb_cols - n)
xd->above_context -= n;
if (mb_row < pc->mb_rows - n)
xd->left_context -= n;
}
/* Special case: Force the loopfilter to skip when eobtotal and
* mb_skip_coeff are zero.
*/
skip_recon_mb(pbi, xd);
return;
}
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sb64y_s(xd);
vp9_build_intra_predictors_sb64uv_s(xd);
} else {
vp9_build_inter64x64_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
}
/* dequantization and idct */
#if CONFIG_TX32X32
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
for (n = 0; n < 4; n++) {
const int x_idx = n & 1, y_idx = n >> 1;
if (mb_col + x_idx * 2 >= pc->mb_cols ||
mb_row + y_idx * 2 >= pc->mb_rows)
continue;
xd->left_context = pc->left_context + (y_idx << 1);
xd->above_context = pc->above_context + mb_col + (x_idx << 1);
xd->mode_info_context = orig_mi + x_idx * 2 + y_idx * 2 * mis;
eobtotal = vp9_decode_sb_tokens(pbi, xd, bc);
if (eobtotal == 0) { // skip loopfilter
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
if (mb_col + 1 < pc->mb_cols)
xd->mode_info_context[1].mbmi.mb_skip_coeff = 1;
if (mb_row + 1 < pc->mb_rows) {
xd->mode_info_context[mis].mbmi.mb_skip_coeff = 1;
if (mb_col + 1 < pc->mb_cols)
xd->mode_info_context[mis + 1].mbmi.mb_skip_coeff = 1;
}
} else {
vp9_dequant_idct_add_32x32(xd->sb_coeff_data.qcoeff, xd->block[0].dequant,
xd->dst.y_buffer + x_idx * 32 +
xd->dst.y_stride * y_idx * 32,
xd->dst.y_buffer + x_idx * 32 +
xd->dst.y_stride * y_idx * 32,
xd->dst.y_stride, xd->dst.y_stride,
xd->eobs[0]);
vp9_dequant_idct_add_uv_block_16x16_c(xd->sb_coeff_data.qcoeff + 1024,
xd->block[16].dequant,
xd->dst.u_buffer + x_idx * 16 +
xd->dst.uv_stride * y_idx * 16,
xd->dst.v_buffer + x_idx * 16 +
xd->dst.uv_stride * y_idx * 16,
xd->dst.uv_stride, xd->eobs + 16);
}
}
} else {
#endif
for (n = 0; n < 16; n++) {
int x_idx = n & 3, y_idx = n >> 2;
if (mb_col + x_idx >= pc->mb_cols || mb_row + y_idx >= pc->mb_rows)
continue;
xd->above_context = pc->above_context + mb_col + x_idx;
xd->left_context = pc->left_context + y_idx;
xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
for (i = 0; i < 25; i++) {
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
eobtotal = vp9_decode_mb_tokens(pbi, xd, bc);
if (eobtotal == 0) { // skip loopfilter
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
continue;
}
if (tx_size == TX_16X16) {
decode_16x16_sb(pbi, xd, bc, n, 3, 2);
} else if (tx_size == TX_8X8) {
decode_8x8_sb(pbi, xd, bc, n, 3, 2);
} else {
decode_4x4_sb(pbi, xd, bc, n, 3, 2);
}
}
#if CONFIG_TX32X32
}
#endif
xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context;
xd->mode_info_context = orig_mi;
}
#endif // CONFIG_SUPERBLOCKS64
static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, unsigned int mb_col,
BOOL_DECODER* const bc) {
int i, n, eobtotal;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
VP9_COMMON *const pc = &pbi->common;
MODE_INFO *orig_mi = xd->mode_info_context;
const int mis = pc->mode_info_stride;
assert(xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32);
if (pbi->common.frame_type != KEY_FRAME) if (pbi->common.frame_type != KEY_FRAME)
vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, pc); vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, pc);
@ -767,7 +917,7 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->above_context = pc->above_context + mb_col + x_idx; xd->above_context = pc->above_context + mb_col + x_idx;
xd->left_context = pc->left_context + y_idx; xd->left_context = pc->left_context + y_idx + (mb_row & 2);
xd->mode_info_context = orig_mi + x_idx + y_idx * mis; xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
for (i = 0; i < 25; i++) { for (i = 0; i < 25; i++) {
xd->block[i].eob = 0; xd->block[i].eob = 0;
@ -781,16 +931,16 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
} }
if (tx_size == TX_16X16) { if (tx_size == TX_16X16) {
decode_16x16_sb(pbi, xd, bc, n); decode_16x16_sb(pbi, xd, bc, n, 1, 1);
} else if (tx_size == TX_8X8) { } else if (tx_size == TX_8X8) {
decode_8x8_sb(pbi, xd, bc, n); decode_8x8_sb(pbi, xd, bc, n, 1, 1);
} else { } else {
decode_4x4_sb(pbi, xd, bc, n); decode_4x4_sb(pbi, xd, bc, n, 1, 1);
} }
} }
xd->above_context = pc->above_context + mb_col; xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context; xd->left_context = pc->left_context + (mb_row & 2);
xd->mode_info_context = orig_mi; xd->mode_info_context = orig_mi;
#if CONFIG_TX32X32 #if CONFIG_TX32X32
} }
@ -807,7 +957,7 @@ static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
int tx_size; int tx_size;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
assert(!xd->mode_info_context->mbmi.encoded_as_sb); assert(!xd->mode_info_context->mbmi.sb_type);
#endif #endif
// re-initialize macroblock dequantizer before detokenization // re-initialize macroblock dequantizer before detokenization
@ -930,190 +1080,186 @@ static int get_delta_q(vp9_reader *bc, int prev, int *q_update) {
FILE *vpxlog = 0; FILE *vpxlog = 0;
#endif #endif
/* Decode a row of Superblocks (2x2 region of MBs) */ static void set_offsets(VP9D_COMP *pbi, int block_size,
static void int mb_row, int mb_col) {
decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc, int mbrow, MACROBLOCKD *xd, VP9_COMMON *const cm = &pbi->common;
BOOL_DECODER* const bc) { MACROBLOCKD *const xd = &pbi->mb;
int i; const int mis = cm->mode_info_stride;
int sb_col; const int idx = mis * mb_row + mb_col;
int mb_row, mb_col; const int dst_fb_idx = cm->new_fb_idx;
int recon_yoffset, recon_uvoffset; const int recon_y_stride = cm->yv12_fb[dst_fb_idx].y_stride;
int ref_fb_idx = pc->lst_fb_idx; const int recon_uv_stride = cm->yv12_fb[dst_fb_idx].uv_stride;
int dst_fb_idx = pc->new_fb_idx; const int recon_yoffset = mb_row * 16 * recon_y_stride + 16 * mb_col;
int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; const int recon_uvoffset = mb_row * 8 * recon_uv_stride + 8 * mb_col;
int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
int sb_cols = (pc->mb_cols + 1) >> 1;
// For a SB there are 2 left contexts, each pertaining to a MB row within
vpx_memset(pc->left_context, 0, sizeof(pc->left_context));
mb_row = mbrow;
mb_col = 0;
for (sb_col = 0; sb_col < sb_cols; sb_col++) {
MODE_INFO *mi = xd->mode_info_context;
xd->mode_info_context = cm->mi + idx;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
mi->mbmi.encoded_as_sb = vp9_read(bc, pc->sb_coded); xd->mode_info_context->mbmi.sb_type = block_size >> 5;
#endif #endif
xd->prev_mode_info_context = cm->prev_mi + idx;
// Process the 4 MBs within the SB in the order: xd->above_context = cm->above_context + mb_col;
// top-left, top-right, bottom-left, bottom-right xd->left_context = cm->left_context + (mb_row & 3);
for (i = 0; i < 4; i++) {
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
xd->mb_index = i;
mi = xd->mode_info_context;
if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols)) {
// MB lies outside frame, skip on to next
mb_row += dy;
mb_col += dx;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
continue;
}
#if CONFIG_SUPERBLOCKS
if (i)
mi->mbmi.encoded_as_sb = 0;
#endif
// Set above context pointer
xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context + (i >> 1);
/* Distance of Mb to the various image edges. /* Distance of Mb to the various image edges.
* These are specified to 8th pel as they are always compared to * These are specified to 8th pel as they are always compared to
* values that are in 1/8th pel units * values that are in 1/8th pel units
*/ */
block_size >>= 4; // in mb units
xd->mb_to_top_edge = -((mb_row * 16)) << 3; xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_left_edge = -((mb_col * 16) << 3);
#if CONFIG_SUPERBLOCKS xd->mb_to_bottom_edge = ((cm->mb_rows - block_size - mb_row) * 16) << 3;
if (mi->mbmi.encoded_as_sb) { xd->mb_to_right_edge = ((cm->mb_cols - block_size - mb_col) * 16) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 2 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((pc->mb_cols - 2 - mb_col) * 16) << 3;
} else {
#endif
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
#if CONFIG_SUPERBLOCKS
}
#endif
#ifdef DEC_DEBUG
dec_debug = (pbi->common.current_video_frame == 46 &&
mb_row == 5 && mb_col == 2);
if (dec_debug)
#if CONFIG_SUPERBLOCKS
printf("Enter Debug %d %d sb %d\n", mb_row, mb_col,
mi->mbmi.encoded_as_sb);
#else
printf("Enter Debug %d %d\n", mb_row, mb_col);
#endif
#endif
xd->up_available = (mb_row != 0); xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0); xd->left_available = (mb_col != 0);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
}
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16); static void set_refs(VP9D_COMP *pbi, int block_size,
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8); int mb_row, int mb_col) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; if (mbmi->ref_frame > INTRA_FRAME) {
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; int ref_fb_idx, ref_yoffset, ref_uvoffset, ref_y_stride, ref_uv_stride;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc);
update_blockd_bmi(xd);
#ifdef DEC_DEBUG
if (dec_debug)
printf("Hello\n");
#endif
/* Select the appropriate reference frame for this MB */ /* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) if (mbmi->ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx; ref_fb_idx = cm->lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) else if (mbmi->ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx; ref_fb_idx = cm->gld_fb_idx;
else else
ref_fb_idx = pc->alt_fb_idx; ref_fb_idx = cm->alt_fb_idx;
xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; ref_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; ref_yoffset = mb_row * 16 * ref_y_stride + 16 * mb_col;
xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; xd->pre.y_buffer = cm->yv12_fb[ref_fb_idx].y_buffer + ref_yoffset;
ref_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
ref_uvoffset = mb_row * 8 * ref_uv_stride + 8 * mb_col;
xd->pre.u_buffer = cm->yv12_fb[ref_fb_idx].u_buffer + ref_uvoffset;
xd->pre.v_buffer = cm->yv12_fb[ref_fb_idx].v_buffer + ref_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame > 0) { /* propagate errors from reference frames */
xd->corrupted |= cm->yv12_fb[ref_fb_idx].corrupted;
if (mbmi->second_ref_frame > INTRA_FRAME) {
int second_ref_fb_idx; int second_ref_fb_idx;
/* Select the appropriate reference frame for this MB */ /* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME) if (mbmi->second_ref_frame == LAST_FRAME)
second_ref_fb_idx = pc->lst_fb_idx; second_ref_fb_idx = cm->lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == else if (mbmi->second_ref_frame == GOLDEN_FRAME)
GOLDEN_FRAME) second_ref_fb_idx = cm->gld_fb_idx;
second_ref_fb_idx = pc->gld_fb_idx;
else else
second_ref_fb_idx = pc->alt_fb_idx; second_ref_fb_idx = cm->alt_fb_idx;
xd->second_pre.y_buffer = xd->second_pre.y_buffer =
pc->yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset; cm->yv12_fb[second_ref_fb_idx].y_buffer + ref_yoffset;
xd->second_pre.u_buffer = xd->second_pre.u_buffer =
pc->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset; cm->yv12_fb[second_ref_fb_idx].u_buffer + ref_uvoffset;
xd->second_pre.v_buffer = xd->second_pre.v_buffer =
pc->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset; cm->yv12_fb[second_ref_fb_idx].v_buffer + ref_uvoffset;
}
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
/* propagate errors from reference frames */ /* propagate errors from reference frames */
xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted; xd->corrupted |= cm->yv12_fb[second_ref_fb_idx].corrupted;
}
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (mbmi->sb_type) {
if (mb_col < pc->mb_cols - 1) const int n_mbs = 1 << mbmi->sb_type;
mi[1] = mi[0]; const int y_mbs = MIN(n_mbs, cm->mb_rows - mb_row);
if (mb_row < pc->mb_rows - 1) { const int x_mbs = MIN(n_mbs, cm->mb_cols - mb_col);
mi[pc->mode_info_stride] = mi[0]; const int mis = cm->mode_info_stride;
if (mb_col < pc->mb_cols - 1) int x, y;
mi[pc->mode_info_stride + 1] = mi[0];
for (y = 0; y < y_mbs; y++) {
for (x = !y; x < x_mbs; x++) {
mi[y * mis + x] = *mi;
}
} }
} }
if (xd->mode_info_context->mbmi.encoded_as_sb) {
decode_superblock(pbi, xd, mb_row, mb_col, bc);
} else {
#endif #endif
}
/* Decode a row of Superblocks (2x2 region of MBs) */
static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc,
int mb_row, MACROBLOCKD *xd,
BOOL_DECODER* const bc) {
int mb_col;
// For a SB there are 2 left contexts, each pertaining to a MB row within
vpx_memset(pc->left_context, 0, sizeof(pc->left_context));
for (mb_col = 0; mb_col < pc->mb_cols; mb_col += 4) {
#if CONFIG_SUPERBLOCKS64 && CONFIG_SUPERBLOCKS
if (vp9_read(bc, pc->sb64_coded)) {
set_offsets(pbi, 64, mb_row, mb_col);
vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc);
set_refs(pbi, 64, mb_row, mb_col);
decode_superblock64(pbi, xd, mb_row, mb_col, bc);
xd->corrupted |= bool_error(bc);
} else
#endif // CONFIG_SUPERBLOCKS64
{
int j;
for (j = 0; j < 4; j++) {
const int x_idx_sb = (j & 1) << 1, y_idx_sb = j & 2;
if (mb_row + y_idx_sb >= pc->mb_rows ||
mb_col + x_idx_sb >= pc->mb_cols) {
// MB lies outside frame, skip on to next
continue;
}
xd->sb_index = j;
#if CONFIG_SUPERBLOCKS
if (vp9_read(bc, pc->sb32_coded)) {
set_offsets(pbi, 32, mb_row + y_idx_sb, mb_col + x_idx_sb);
vp9_decode_mb_mode_mv(pbi,
xd, mb_row + y_idx_sb, mb_col + x_idx_sb, bc);
set_refs(pbi, 32, mb_row + y_idx_sb, mb_col + x_idx_sb);
decode_superblock32(pbi,
xd, mb_row + y_idx_sb, mb_col + x_idx_sb, bc);
xd->corrupted |= bool_error(bc);
} else
#endif // CONFIG_SUPERBLOCKS
{
int i;
// Process the 4 MBs within the SB in the order:
// top-left, top-right, bottom-left, bottom-right
for (i = 0; i < 4; i++) {
const int x_idx = x_idx_sb + (i & 1), y_idx = y_idx_sb + (i >> 1);
if (mb_row + y_idx >= pc->mb_rows ||
mb_col + x_idx >= pc->mb_cols) {
// MB lies outside frame, skip on to next
continue;
}
set_offsets(pbi, 16, mb_row + y_idx, mb_col + x_idx);
xd->mb_index = i;
vp9_decode_mb_mode_mv(pbi, xd, mb_row + y_idx, mb_col + x_idx, bc);
update_blockd_bmi(xd);
set_refs(pbi, 16, mb_row + y_idx, mb_col + x_idx);
vp9_intra_prediction_down_copy(xd); vp9_intra_prediction_down_copy(xd);
decode_macroblock(pbi, xd, mb_row, mb_col, bc); decode_macroblock(pbi, xd, mb_row, mb_col, bc);
#if CONFIG_SUPERBLOCKS
}
#endif
/* check if the boolean decoder has suffered an error */ /* check if the boolean decoder has suffered an error */
xd->corrupted |= bool_error(bc); xd->corrupted |= bool_error(bc);
#if CONFIG_SUPERBLOCKS
if (mi->mbmi.encoded_as_sb) {
assert(!i);
mb_col += 2;
xd->mode_info_context += 2;
xd->prev_mode_info_context += 2;
break;
}
#endif
// skip to next MB
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
mb_row += dy;
mb_col += dx;
} }
} }
}
/* skip prediction column */ }
xd->mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride; }
xd->prev_mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride;
} }
static unsigned int read_partition_size(const unsigned char *cx_size) { static unsigned int read_partition_size(const unsigned char *cx_size) {
@ -1462,7 +1608,10 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
pc->sb_coded = vp9_read_literal(&header_bc, 8); #if CONFIG_SUPERBLOCKS64
pc->sb64_coded = vp9_read_literal(&header_bc, 8);
#endif
pc->sb32_coded = vp9_read_literal(&header_bc, 8);
#endif #endif
/* Read the loop filter level and type */ /* Read the loop filter level and type */
@ -1727,12 +1876,8 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
// Resset the macroblock mode info context to the start of the list
xd->mode_info_context = pc->mi;
xd->prev_mode_info_context = pc->prev_mi;
/* Decode a row of superblocks */ /* Decode a row of superblocks */
for (mb_row = 0; mb_row < pc->mb_rows; mb_row += 2) { for (mb_row = 0; mb_row < pc->mb_rows; mb_row += 4) {
decode_sb_row(pbi, pc, mb_row, xd, &residual_bc); decode_sb_row(pbi, pc, mb_row, xd, &residual_bc);
} }
corrupt_tokens |= xd->corrupted; corrupt_tokens |= xd->corrupted;

View File

@ -562,19 +562,7 @@ static void write_mb_segid(vp9_writer *bc,
const MB_MODE_INFO *mi, const MACROBLOCKD *xd) { const MB_MODE_INFO *mi, const MACROBLOCKD *xd) {
// Encode the MB segment id. // Encode the MB segment id.
int seg_id = mi->segment_id; int seg_id = mi->segment_id;
#if CONFIG_SUPERBLOCKS
if (mi->encoded_as_sb) {
if (xd->mb_to_right_edge >= 0)
seg_id = seg_id && xd->mode_info_context[1].mbmi.segment_id;
if (xd->mb_to_bottom_edge >= 0) {
seg_id = seg_id &&
xd->mode_info_context[xd->mode_info_stride].mbmi.segment_id;
if (xd->mb_to_right_edge >= 0)
seg_id = seg_id &&
xd->mode_info_context[xd->mode_info_stride + 1].mbmi.segment_id;
}
}
#endif
if (xd->segmentation_enabled && xd->update_mb_segmentation_map) { if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
switch (seg_id) { switch (seg_id) {
case 0: case 0:
@ -703,99 +691,47 @@ static void update_ref_probs(VP9_COMP *const cpi) {
vp9_compute_mod_refprobs(cm); vp9_compute_mod_refprobs(cm);
} }
static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) { static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
vp9_writer *bc,
int mb_rows_left, int mb_cols_left) {
VP9_COMMON *const pc = &cpi->common; VP9_COMMON *const pc = &cpi->common;
const nmv_context *nmvc = &pc->fc.nmvc; const nmv_context *nmvc = &pc->fc.nmvc;
MACROBLOCKD *xd = &cpi->mb.e_mbd; MACROBLOCK *const x = &cpi->mb;
MODE_INFO *m; MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *prev_m;
TOKENEXTRA *tok = cpi->tok;
TOKENEXTRA *tok_end = tok + cpi->tok_count;
const int mis = pc->mode_info_stride; const int mis = pc->mode_info_stride;
int mb_row, mb_col; MB_MODE_INFO *const mi = &m->mbmi;
int row, col; const MV_REFERENCE_FRAME rf = mi->ref_frame;
const MB_PREDICTION_MODE mode = mi->mode;
// Values used in prediction model coding const int segment_id = mi->segment_id;
vp9_prob pred_prob;
unsigned char prediction_flag;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
cpi->mb.partition_info = cpi->mb.pi;
mb_row = 0;
for (row = 0; row < pc->mb_rows; row += 2) {
m = pc->mi + row * mis;
prev_m = pc->prev_mi + row * mis;
mb_col = 0;
for (col = 0; col < pc->mb_cols; col += 2) {
int i;
// Process the 4 MBs in the order:
// top-left, top-right, bottom-left, bottom-right
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
vp9_write(bc, m->mbmi.encoded_as_sb, pc->sb_coded); const int mb_size = 1 << mi->sb_type;
#else
const int mb_size = 1;
#endif #endif
for (i = 0; i < 4; i++) { int skip_coeff;
MB_MODE_INFO *mi;
MV_REFERENCE_FRAME rf;
MV_REFERENCE_FRAME sec_ref_frame;
MB_PREDICTION_MODE mode;
int segment_id, skip_coeff;
int dy = row_delta[i]; int mb_row = pc->mb_rows - mb_rows_left;
int dx = col_delta[i]; int mb_col = pc->mb_cols - mb_cols_left;
int offset_extended = dy * mis + dx; xd->prev_mode_info_context = pc->prev_mi + (m - pc->mi);
x->partition_info = x->pi + (m - pc->mi);
if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols)) {
// MB lies outside frame, move on
mb_row += dy;
mb_col += dx;
m += offset_extended;
prev_m += offset_extended;
cpi->mb.partition_info += offset_extended;
continue;
}
mi = &m->mbmi;
rf = mi->ref_frame;
sec_ref_frame = mi->second_ref_frame;
mode = mi->mode;
segment_id = mi->segment_id;
// Distance of Mb to the various image edges. // Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to MV // These specified to 8th pel as they are always compared to MV
// values that are in 1/8th pel units // values that are in 1/8th pel units
xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_top_edge = -((mb_row * 16)) << 3; xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_right_edge = ((pc->mb_cols - mb_size - mb_col) * 16) << 3;
#if CONFIG_SUPERBLOCKS xd->mb_to_bottom_edge = ((pc->mb_rows - mb_size - mb_row) * 16) << 3;
if (mi->encoded_as_sb) {
xd->mb_to_right_edge = ((pc->mb_cols - 2 - mb_col) * 16) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 2 - mb_row) * 16) << 3;
} else {
#endif
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
#if CONFIG_SUPERBLOCKS
}
#endif
// Make sure the MacroBlockD mode info pointer is set correctly
xd->mode_info_context = m;
xd->prev_mode_info_context = prev_m;
#ifdef ENTROPY_STATS #ifdef ENTROPY_STATS
active_section = 9; active_section = 9;
#endif #endif
if (cpi->mb.e_mbd.update_mb_segmentation_map) { if (cpi->mb.e_mbd.update_mb_segmentation_map) {
// Is temporal coding of the segment map enabled // Is temporal coding of the segment map enabled
if (pc->temporal_update) { if (pc->temporal_update) {
prediction_flag = vp9_get_pred_flag(xd, PRED_SEG_ID); unsigned char prediction_flag = vp9_get_pred_flag(xd, PRED_SEG_ID);
pred_prob = vp9_get_pred_prob(pc, xd, PRED_SEG_ID); vp9_prob pred_prob = vp9_get_pred_prob(pc, xd, PRED_SEG_ID);
// Code the segment id prediction flag for this mb // Code the segment id prediction flag for this mb
vp9_write(bc, prediction_flag, pred_prob); vp9_write(bc, prediction_flag, pred_prob);
@ -809,18 +745,24 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
} }
} }
if (!pc->mb_no_coeff_skip) {
skip_coeff = 0;
} else if (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) {
skip_coeff = 1; skip_coeff = 1;
if (pc->mb_no_coeff_skip && } else {
(!vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) || const int nmbs = mb_size;
(vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) { const int xmbs = MIN(nmbs, mb_cols_left);
skip_coeff = mi->mb_skip_coeff; const int ymbs = MIN(nmbs, mb_rows_left);
#if CONFIG_SUPERBLOCKS int x, y;
if (mi->encoded_as_sb) {
skip_coeff &= m[1].mbmi.mb_skip_coeff; skip_coeff = 1;
skip_coeff &= m[mis].mbmi.mb_skip_coeff; for (y = 0; y < ymbs; y++) {
skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff; for (x = 0; x < xmbs; x++) {
skip_coeff = skip_coeff && m[y * mis + x].mbmi.mb_skip_coeff;
} }
#endif }
vp9_write(bc, skip_coeff, vp9_write(bc, skip_coeff,
vp9_get_pred_prob(pc, xd, PRED_MBSKIP)); vp9_get_pred_prob(pc, xd, PRED_MBSKIP));
} }
@ -840,7 +782,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) { if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) if (m->mbmi.sb_type)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob); write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
else else
#endif #endif
@ -885,7 +827,6 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
vp9_mv_ref_probs(&cpi->common, mv_ref_p, mi->mb_mode_context[rf]); vp9_mv_ref_probs(&cpi->common, mv_ref_p, mi->mb_mode_context[rf]);
// #ifdef ENTROPY_STATS // #ifdef ENTROPY_STATS
#ifdef ENTROPY_STATS #ifdef ENTROPY_STATS
accum_mv_refs(mode, ct); accum_mv_refs(mode, ct);
@ -895,7 +836,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
// Is the segment coding of mode enabled // Is the segment coding of mode enabled
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) { if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (mi->encoded_as_sb) { if (mi->sb_type) {
write_sb_mv_ref(bc, mode, mv_ref_p); write_sb_mv_ref(bc, mode, mv_ref_p);
} else } else
#endif #endif
@ -916,8 +857,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
cpi->common.pred_filter_mode); cpi->common.pred_filter_mode);
} }
#endif #endif
if (mode >= NEARESTMV && mode <= SPLITMV) if (mode >= NEARESTMV && mode <= SPLITMV) {
{
if (cpi->common.mcomp_filter_type == SWITCHABLE) { if (cpi->common.mcomp_filter_type == SWITCHABLE) {
write_token(bc, vp9_switchable_interp_tree, write_token(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(&cpi->common, xd, vp9_get_pred_probs(&cpi->common, xd,
@ -925,8 +865,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
vp9_switchable_interp_encodings + vp9_switchable_interp_encodings +
vp9_switchable_interp_map[mi->interp_filter]); vp9_switchable_interp_map[mi->interp_filter]);
} else { } else {
assert (mi->interp_filter == assert(mi->interp_filter == cpi->common.mcomp_filter_type);
cpi->common.mcomp_filter_type);
} }
} }
@ -973,7 +912,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
} }
} }
#endif #endif
{
switch (mode) { /* new, split require MVs */ switch (mode) { /* new, split require MVs */
case NEWMV: case NEWMV:
#ifdef ENTROPY_STATS #ifdef ENTROPY_STATS
@ -1002,8 +941,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
do { do {
B_PREDICTION_MODE blockmode; B_PREDICTION_MODE blockmode;
int_mv blockmv; int_mv blockmv;
const int *const L = const int *const L = vp9_mbsplits[mi->partitioning];
vp9_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */ int k = -1; /* first block in subset j */
int mv_contz; int mv_contz;
int_mv leftmv, abovemv; int_mv leftmv, abovemv;
@ -1041,13 +979,12 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
} }
} }
} while (++j < cpi->mb.partition_info->count); } while (++j < cpi->mb.partition_info->count);
}
break; break;
}
default: default:
break; break;
} }
} }
}
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) || if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
(rf != INTRA_FRAME && !(mode == SPLITMV && (rf != INTRA_FRAME && !(mode == SPLITMV &&
@ -1062,84 +999,56 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV) { if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV) {
vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]); vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS #if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
if (mi->encoded_as_sb && sz != TX_8X8) if (mi->sb_type && sz != TX_8X8)
vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]); vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
#endif #endif
} }
} }
#ifdef ENTROPY_STATS
active_section = 1;
#endif
assert(tok < tok_end);
pack_mb_tokens(bc, &tok, tok_end);
#if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) {
assert(!i);
mb_col += 2;
m += 2;
cpi->mb.partition_info += 2;
prev_m += 2;
break;
}
#endif
// Next MB
mb_row += dy;
mb_col += dx;
m += offset_extended;
prev_m += offset_extended;
cpi->mb.partition_info += offset_extended;
#if CONFIG_DEBUG
assert((prev_m - cpi->common.prev_mip) == (m - cpi->common.mip));
assert((prev_m - cpi->common.prev_mi) == (m - cpi->common.mi));
#endif
}
} }
// Next SB static void write_mb_modes_kf(const VP9_COMP *cpi,
mb_row += 2;
m += mis + (1 - (pc->mb_cols & 0x1));
prev_m += mis + (1 - (pc->mb_cols & 0x1));
cpi->mb.partition_info += mis + (1 - (pc->mb_cols & 0x1));
}
}
static void write_mb_modes_kf(const VP9_COMMON *c,
const MACROBLOCKD *xd,
const MODE_INFO *m, const MODE_INFO *m,
int mode_info_stride, vp9_writer *bc,
vp9_writer *const bc) { int mb_rows_left, int mb_cols_left) {
int ym; const VP9_COMMON *const c = &cpi->common;
int segment_id; const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int mis = c->mode_info_stride;
ym = m->mbmi.mode; const int ym = m->mbmi.mode;
segment_id = m->mbmi.segment_id; const int segment_id = m->mbmi.segment_id;
int skip_coeff;
if (xd->update_mb_segmentation_map) { if (xd->update_mb_segmentation_map) {
write_mb_segid(bc, &m->mbmi, xd); write_mb_segid(bc, &m->mbmi, xd);
} }
if (c->mb_no_coeff_skip && if (!c->mb_no_coeff_skip) {
(!vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) || skip_coeff = 0;
(vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) { } else if (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
int skip_coeff = m->mbmi.mb_skip_coeff; vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) {
skip_coeff = 1;
} else {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
const int mis = mode_info_stride; const int nmbs = 1 << m->mbmi.sb_type;
if (m->mbmi.encoded_as_sb) { #else
skip_coeff &= m[1].mbmi.mb_skip_coeff; const int nmbs = 1;
skip_coeff &= m[mis].mbmi.mb_skip_coeff;
skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff;
}
#endif #endif
const int xmbs = MIN(nmbs, mb_cols_left);
const int ymbs = MIN(nmbs, mb_rows_left);
int x, y;
skip_coeff = 1;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
skip_coeff = skip_coeff && m[y * mis + x].mbmi.mb_skip_coeff;
}
}
vp9_write(bc, skip_coeff, vp9_write(bc, skip_coeff,
vp9_get_pred_prob(c, xd, PRED_MBSKIP)); vp9_get_pred_prob(c, xd, PRED_MBSKIP));
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) { if (m->mbmi.sb_type) {
sb_kfwrite_ymode(bc, ym, sb_kfwrite_ymode(bc, ym,
c->sb_kf_ymode_prob[c->kf_ymode_probs_index]); c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
} else } else
@ -1150,7 +1059,6 @@ static void write_mb_modes_kf(const VP9_COMMON *c,
} }
if (ym == B_PRED) { if (ym == B_PRED) {
const int mis = c->mode_info_stride;
int i = 0; int i = 0;
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
int uses_second = int uses_second =
@ -1195,7 +1103,7 @@ static void write_mb_modes_kf(const VP9_COMMON *c,
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]); write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT && if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT &&
!((c->mb_no_coeff_skip && m->mbmi.mb_skip_coeff) || !((c->mb_no_coeff_skip && skip_coeff) ||
(vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) && (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) { vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
TX_SIZE sz = m->mbmi.txfm_size; TX_SIZE sz = m->mbmi.txfm_size;
@ -1204,75 +1112,99 @@ static void write_mb_modes_kf(const VP9_COMMON *c,
if (sz != TX_4X4 && ym <= TM_PRED) { if (sz != TX_4X4 && ym <= TM_PRED) {
vp9_write(bc, sz != TX_8X8, c->prob_tx[1]); vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS #if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb && sz != TX_8X8) if (m->mbmi.sb_type && sz != TX_8X8)
vp9_write(bc, sz != TX_16X16, c->prob_tx[2]); vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
#endif #endif
} }
} }
} }
static void write_kfmodes(VP9_COMP* const cpi, vp9_writer* const bc) { static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mb_row, int mb_col) {
VP9_COMMON *const c = &cpi->common; VP9_COMMON *const c = &cpi->common;
const int mis = c->mode_info_stride; MACROBLOCKD *const xd = &cpi->mb.e_mbd;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
int i;
int row, col;
int mb_row, mb_col;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
TOKENEXTRA *tok = cpi->tok;
TOKENEXTRA *tok_end = tok + cpi->tok_count;
mb_row = 0;
for (row = 0; row < c->mb_rows; row += 2) {
m = c->mi + row * mis;
mb_col = 0;
for (col = 0; col < c->mb_cols; col += 2) {
#if CONFIG_SUPERBLOCKS
vp9_write(bc, m->mbmi.encoded_as_sb, c->sb_coded);
#endif
// Process the 4 MBs in the order:
// top-left, top-right, bottom-left, bottom-right
for (i = 0; i < 4; i++) {
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * mis + dx;
if ((mb_row >= c->mb_rows) || (mb_col >= c->mb_cols)) {
// MB lies outside frame, move on
mb_row += dy;
mb_col += dx;
m += offset_extended;
continue;
}
// Make sure the MacroBlockD mode info pointer is set correctly
xd->mode_info_context = m; xd->mode_info_context = m;
if (c->frame_type == KEY_FRAME) {
write_mb_modes_kf(c, xd, m, mis, bc); write_mb_modes_kf(cpi, m, bc,
c->mb_rows - mb_row, c->mb_cols - mb_col);
#ifdef ENTROPY_STATS #ifdef ENTROPY_STATS
active_section = 8; active_section = 8;
#endif #endif
assert(tok < tok_end); } else {
pack_mb_tokens(bc, &tok, tok_end); pack_inter_mode_mvs(cpi, m, bc,
c->mb_rows - mb_row, c->mb_cols - mb_col);
#ifdef ENTROPY_STATS
active_section = 1;
#endif
}
assert(*tok < tok_end);
pack_mb_tokens(bc, tok, tok_end);
}
static void write_modes(VP9_COMP *cpi, vp9_writer* const bc) {
VP9_COMMON *const c = &cpi->common;
const int mis = c->mode_info_stride;
MODE_INFO *m, *m_ptr = c->mi;
int i, mb_row, mb_col;
TOKENEXTRA *tok = cpi->tok;
TOKENEXTRA *tok_end = tok + cpi->tok_count;
for (mb_row = 0; mb_row < c->mb_rows; mb_row += 4, m_ptr += 4 * mis) {
m = m_ptr;
for (mb_col = 0; mb_col < c->mb_cols; mb_col += 4, m += 4) {
#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
vp9_write(bc, m->mbmi.sb_type == BLOCK_SIZE_SB64X64, c->sb64_coded);
if (m->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
write_modes_b(cpi, m, bc, &tok, tok_end, mb_row, mb_col);
} else
#endif
{
int j;
for (j = 0; j < 4; j++) {
const int x_idx_sb = (j & 1) << 1, y_idx_sb = j & 2;
#if CONFIG_SUPERBLOCKS
MODE_INFO *sb_m = m + y_idx_sb * mis + x_idx_sb;
#endif
if (mb_col + x_idx_sb >= c->mb_cols ||
mb_row + y_idx_sb >= c->mb_rows)
continue;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) { vp9_write(bc, sb_m->mbmi.sb_type, c->sb32_coded);
assert(!i); if (sb_m->mbmi.sb_type) {
mb_col += 2; assert(sb_m->mbmi.sb_type == BLOCK_SIZE_SB32X32);
m += 2; write_modes_b(cpi, sb_m, bc, &tok, tok_end,
break; mb_row + y_idx_sb, mb_col + x_idx_sb);
} } else
#endif #endif
// Next MB {
mb_row += dy; // Process the 4 MBs in the order:
mb_col += dx; // top-left, top-right, bottom-left, bottom-right
m += offset_extended; for (i = 0; i < 4; i++) {
const int x_idx = x_idx_sb + (i & 1), y_idx = y_idx_sb + (i >> 1);
MODE_INFO *mb_m = m + x_idx + y_idx * mis;
if (mb_row + y_idx >= c->mb_rows ||
mb_col + x_idx >= c->mb_cols) {
// MB lies outside frame, move on
continue;
}
#if CONFIG_SUPERBLOCKS
assert(mb_m->mbmi.sb_type == BLOCK_SIZE_MB16X16);
#endif
write_modes_b(cpi, mb_m, bc, &tok, tok_end,
mb_row + y_idx, mb_col + x_idx);
}
}
}
} }
} }
mb_row += 2;
} }
} }
@ -1800,13 +1732,12 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
{ #if CONFIG_SUPERBLOCKS64
/* sb mode probability */ pc->sb64_coded = get_binary_prob(cpi->sb64_count[0], cpi->sb64_count[1]);
const int sb_max = (((pc->mb_rows + 1) >> 1) * ((pc->mb_cols + 1) >> 1)); vp9_write_literal(&header_bc, pc->sb64_coded, 8);
#endif
pc->sb_coded = get_prob(sb_max - cpi->sb_count, sb_max); pc->sb32_coded = get_binary_prob(cpi->sb32_count[0], cpi->sb32_count[1]);
vp9_write_literal(&header_bc, pc->sb_coded, 8); vp9_write_literal(&header_bc, pc->sb32_coded, 8);
}
#endif #endif
{ {
@ -2195,12 +2126,12 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
if (pc->frame_type == KEY_FRAME) { if (pc->frame_type == KEY_FRAME) {
decide_kf_ymode_entropy(cpi); decide_kf_ymode_entropy(cpi);
write_kfmodes(cpi, &residual_bc); write_modes(cpi, &residual_bc);
} else { } else {
/* This is not required if the counts in cpi are consistent with the /* This is not required if the counts in cpi are consistent with the
* final packing pass */ * final packing pass */
// if (!cpi->dummy_packing) vp9_zero(cpi->NMVcount); // if (!cpi->dummy_packing) vp9_zero(cpi->NMVcount);
pack_inter_mode_mvs(cpi, &residual_bc); write_modes(cpi, &residual_bc);
vp9_update_mode_context(&cpi->common); vp9_update_mode_context(&cpi->common);
} }

View File

@ -181,10 +181,13 @@ typedef struct macroblock {
// Structure to hold context for each of the 4 MBs within a SB: // Structure to hold context for each of the 4 MBs within a SB:
// when encoded as 4 independent MBs: // when encoded as 4 independent MBs:
PICK_MODE_CONTEXT mb_context[4]; PICK_MODE_CONTEXT mb_context[4][4];
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
// when 4 MBs share coding parameters: // when 4 MBs share coding parameters:
PICK_MODE_CONTEXT sb_context[4]; PICK_MODE_CONTEXT sb32_context[4];
#if CONFIG_SUPERBLOCKS64
PICK_MODE_CONTEXT sb64_context;
#endif // CONFIG_SUPERBLOCKS64
#endif #endif
void (*vp9_short_fdct4x4)(int16_t *input, int16_t *output, int pitch); void (*vp9_short_fdct4x4)(int16_t *input, int16_t *output, int pitch);

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@
#include <limits.h> #include <limits.h>
#include <math.h> #include <math.h>
#include "vp9/common/vp9_findnearmv.h" #include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_common.h"
#ifdef ENTROPY_STATS #ifdef ENTROPY_STATS
static int mv_ref_ct [31] [4] [2]; static int mv_ref_ct [31] [4] [2];
@ -241,9 +242,6 @@ void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) {
}, \ }, \
v = INT_MAX;) v = INT_MAX;)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv, int_mv *bestmv, int_mv *ref_mv,
int error_per_bit, int error_per_bit,

View File

@ -556,43 +556,19 @@ static void print_seg_map(VP9_COMP *cpi) {
} }
static void update_reference_segmentation_map(VP9_COMP *cpi) { static void update_reference_segmentation_map(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common; VP9_COMMON *const cm = &cpi->common;
int row, col, sb_rows = (cm->mb_rows + 1) >> 1, sb_cols = (cm->mb_cols + 1) >> 1; int row, col;
MODE_INFO *mi = cm->mi; MODE_INFO *mi, *mi_ptr = cm->mi;
uint8_t *segmap = cpi->segmentation_map; uint8_t *cache_ptr = cm->last_frame_seg_map, *cache;
uint8_t *segcache = cm->last_frame_seg_map;
for (row = 0; row < sb_rows; row++) { for (row = 0; row < cm->mb_rows; row++) {
for (col = 0; col < sb_cols; col++) { mi = mi_ptr;
MODE_INFO *miptr = mi + col * 2; cache = cache_ptr;
uint8_t *cache = segcache + col * 2; for (col = 0; col < cm->mb_cols; col++, mi++, cache++) {
#if CONFIG_SUPERBLOCKS cache[0] = mi->mbmi.segment_id;
if (miptr->mbmi.encoded_as_sb) {
cache[0] = miptr->mbmi.segment_id;
if (!(cm->mb_cols & 1) || col < sb_cols - 1)
cache[1] = miptr->mbmi.segment_id;
if (!(cm->mb_rows & 1) || row < sb_rows - 1) {
cache[cm->mb_cols] = miptr->mbmi.segment_id;
if (!(cm->mb_cols & 1) || col < sb_cols - 1)
cache[cm->mb_cols + 1] = miptr->mbmi.segment_id;
} }
} else mi_ptr += cm->mode_info_stride;
#endif cache_ptr += cm->mb_cols;
{
cache[0] = miptr[0].mbmi.segment_id;
if (!(cm->mb_cols & 1) || col < sb_cols - 1)
cache[1] = miptr[1].mbmi.segment_id;
if (!(cm->mb_rows & 1) || row < sb_rows - 1) {
cache[cm->mb_cols] = miptr[cm->mode_info_stride].mbmi.segment_id;
if (!(cm->mb_cols & 1) || col < sb_cols - 1)
cache[1] = miptr[1].mbmi.segment_id;
cache[cm->mb_cols + 1] = miptr[cm->mode_info_stride + 1].mbmi.segment_id;
}
}
}
segmap += 2 * cm->mb_cols;
segcache += 2 * cm->mb_cols;
mi += 2 * cm->mode_info_stride;
} }
} }
@ -1788,7 +1764,10 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cm->prob_gf_coded = 128; cm->prob_gf_coded = 128;
cm->prob_intra_coded = 63; cm->prob_intra_coded = 63;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
cm->sb_coded = 200; cm->sb32_coded = 200;
#if CONFIG_SUPERBLOCKS64
cm->sb64_coded = 200;
#endif
#endif #endif
for (i = 0; i < COMP_PRED_CONTEXTS; i++) for (i = 0; i < COMP_PRED_CONTEXTS; i++)
cm->prob_comppred[i] = 128; cm->prob_comppred[i] = 128;
@ -1994,6 +1973,13 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
vp9_variance_halfpixvar32x32_h, vp9_variance_halfpixvar32x32_v, vp9_variance_halfpixvar32x32_h, vp9_variance_halfpixvar32x32_v,
vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8, vp9_variance_halfpixvar32x32_hv, vp9_sad32x32x3, vp9_sad32x32x8,
vp9_sad32x32x4d) vp9_sad32x32x4d)
#if CONFIG_SUPERBLOCKS64
BFP(BLOCK_64X64, vp9_sad64x64, vp9_variance64x64, vp9_sub_pixel_variance64x64,
vp9_variance_halfpixvar64x64_h, vp9_variance_halfpixvar64x64_v,
vp9_variance_halfpixvar64x64_hv, vp9_sad64x64x3, vp9_sad64x64x8,
vp9_sad64x64x4d)
#endif
#endif #endif
BFP(BLOCK_16X16, vp9_sad16x16, vp9_variance16x16, vp9_sub_pixel_variance16x16, BFP(BLOCK_16X16, vp9_sad16x16, vp9_variance16x16, vp9_sub_pixel_variance16x16,

View File

@ -390,8 +390,15 @@ enum BlockSize {
BLOCK_4X4 = PARTITIONING_4X4, BLOCK_4X4 = PARTITIONING_4X4,
BLOCK_16X16, BLOCK_16X16,
BLOCK_MAX_SEGMENTS, BLOCK_MAX_SEGMENTS,
#if CONFIG_SUPERBLOCKS
BLOCK_32X32 = BLOCK_MAX_SEGMENTS, BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
#if CONFIG_SUPERBLOCKS64
BLOCK_64X64,
#endif // CONFIG_SUPERBLOCKS64
BLOCK_MAX_SB_SEGMENTS, BLOCK_MAX_SB_SEGMENTS,
#else // CONFIG_SUPERBLOCKS
BLOCK_MAX_SB_SEGMENTS = BLOCK_MAX_SEGMENTS,
#endif // CONFIG_SUPERBLOCKS
}; };
typedef struct VP9_COMP { typedef struct VP9_COMP {
@ -571,7 +578,10 @@ typedef struct VP9_COMP {
int cq_target_quality; int cq_target_quality;
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
int sb_count; int sb32_count[2];
#if CONFIG_SUPERBLOCKS64
int sb64_count[2];
#endif
int sb_ymode_count [VP9_I32X32_MODES]; int sb_ymode_count [VP9_I32X32_MODES];
#endif #endif
int ymode_count[VP9_YMODES]; /* intra MB type cts this frame */ int ymode_count[VP9_YMODES]; /* intra MB type cts this frame */

View File

@ -41,6 +41,7 @@
#include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_entropy.h"
#include "vp9_rtcd.h" #include "vp9_rtcd.h"
#include "vp9/common/vp9_mvref_common.h" #include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_common.h"
#define MAXF(a,b) (((a) > (b)) ? (a) : (b)) #define MAXF(a,b) (((a) > (b)) ? (a) : (b))
@ -926,14 +927,21 @@ static void copy_predictor(uint8_t *dst, const uint8_t *predictor) {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
#if CONFIG_TX32X32 #if CONFIG_TX32X32
static int rdcost_sby_32x32(MACROBLOCK *x) { static int rdcost_sby_32x32(MACROBLOCK *x, int backup) {
MACROBLOCKD * const xd = &x->e_mbd; MACROBLOCKD * const xd = &x->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left; ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta = (ENTROPY_CONTEXT *) &t_above, ENTROPY_CONTEXT *ta, *tl;
*tl = (ENTROPY_CONTEXT *) &t_left;
if (backup) {
ta = (ENTROPY_CONTEXT *) &t_above,
tl = (ENTROPY_CONTEXT *) &t_left;
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES)); vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES)); vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
} else {
ta = (ENTROPY_CONTEXT *) xd->above_context;
tl = (ENTROPY_CONTEXT *) xd->left_context;
}
return cost_coeffs(x, xd->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_32X32); return cost_coeffs(x, xd->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_32X32);
} }
@ -953,7 +961,8 @@ static int vp9_sb_block_error_c(int16_t *coeff, int16_t *dqcoeff,
#define DEBUG_ERROR 0 #define DEBUG_ERROR 0
static void super_block_yrd_32x32(MACROBLOCK *x, static void super_block_yrd_32x32(MACROBLOCK *x,
int *rate, int *distortion, int *skippable) { int *rate, int *distortion, int *skippable,
int backup) {
SUPERBLOCK * const x_sb = &x->sb_coeff_data; SUPERBLOCK * const x_sb = &x->sb_coeff_data;
MACROBLOCKD * const xd = &x->e_mbd; MACROBLOCKD * const xd = &x->e_mbd;
SUPERBLOCKD * const xd_sb = &xd->sb_coeff_data; SUPERBLOCKD * const xd_sb = &xd->sb_coeff_data;
@ -976,7 +985,7 @@ static void super_block_yrd_32x32(MACROBLOCK *x,
printf("IDCT/FDCT error 32x32: %d (d: %d)\n", printf("IDCT/FDCT error 32x32: %d (d: %d)\n",
vp9_block_error_c(x_sb->src_diff, out, 1024), *distortion); vp9_block_error_c(x_sb->src_diff, out, 1024), *distortion);
#endif #endif
*rate = rdcost_sby_32x32(x); *rate = rdcost_sby_32x32(x, backup);
*skippable = vp9_sby_is_skippable_32x32(&x->e_mbd); *skippable = vp9_sby_is_skippable_32x32(&x->e_mbd);
} }
#endif #endif
@ -1005,7 +1014,7 @@ static void super_block_yrd(VP9_COMP *cpi,
#if CONFIG_TX32X32 #if CONFIG_TX32X32
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride, vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride,
dst, dst_y_stride); dst, dst_y_stride);
super_block_yrd_32x32(x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32]); super_block_yrd_32x32(x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32], 1);
#endif #endif
#if DEBUG_ERROR #if DEBUG_ERROR
@ -1065,6 +1074,104 @@ static void super_block_yrd(VP9_COMP *cpi,
xd->above_context = orig_above; xd->above_context = orig_above;
xd->left_context = orig_left; xd->left_context = orig_left;
} }
static void super_block_64_yrd(VP9_COMP *cpi,
MACROBLOCK *x, int *rate, int *distortion,
int *skip,
int64_t txfm_cache[NB_TXFM_MODES]) {
MACROBLOCKD *const xd = &x->e_mbd;
int r[TX_SIZE_MAX_SB][2], d[TX_SIZE_MAX_SB], s[TX_SIZE_MAX_SB], n;
const uint8_t *src = x->src.y_buffer, *dst = xd->dst.y_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
ENTROPY_CONTEXT_PLANES t_above[TX_SIZE_MAX_SB][4],
*orig_above = xd->above_context;
ENTROPY_CONTEXT_PLANES t_left[TX_SIZE_MAX_SB][4],
*orig_left = xd->left_context;
for (n = TX_4X4; n < TX_SIZE_MAX_SB; n++) {
vpx_memcpy(t_above[n], xd->above_context, sizeof(t_above[n]));
vpx_memcpy(t_left[n], xd->left_context, sizeof(t_left[n]));
r[n][0] = 0;
d[n] = 0;
s[n] = 1;
}
#if CONFIG_TX32X32
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
int r_tmp, d_tmp, s_tmp;
xd->above_context = &t_above[TX_32X32][x_idx << 1];
xd->left_context = &t_left[TX_32X32][y_idx << 1];
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff,
src + 32 * x_idx + 32 * y_idx * src_y_stride,
src_y_stride,
dst + 32 * x_idx + 32 * y_idx * dst_y_stride,
dst_y_stride);
super_block_yrd_32x32(x, &r_tmp, &d_tmp, &s_tmp, 0);
r[TX_32X32][0] += r_tmp;
d[TX_32X32] += d_tmp;
s[TX_32X32] = s[TX_32X32] && s_tmp;
}
#endif
#if DEBUG_ERROR
int err[3] = { 0, 0, 0 };
#endif
for (n = 0; n < 16; n++) {
int x_idx = n & 3, y_idx = n >> 2;
int r_tmp, d_tmp, s_tmp;
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
xd->above_context = &t_above[TX_16X16][x_idx];
xd->left_context = &t_left[TX_16X16][y_idx];
macro_block_yrd_16x16(x, &r_tmp, &d_tmp, &s_tmp, 0);
d[TX_16X16] += d_tmp;
r[TX_16X16][0] += r_tmp;
s[TX_16X16] = s[TX_16X16] && s_tmp;
#if DEBUG_ERROR
vp9_inverse_transform_mby_16x16(xd);
err[2] += vp9_block_error_c(xd->diff, x->src_diff, 256);
#endif
xd->above_context = &t_above[TX_4X4][x_idx];
xd->left_context = &t_left[TX_4X4][y_idx];
macro_block_yrd_4x4(x, &r_tmp, &d_tmp, &s_tmp, 0);
d[TX_4X4] += d_tmp;
r[TX_4X4][0] += r_tmp;
s[TX_4X4] = s[TX_4X4] && s_tmp;
#if DEBUG_ERROR
vp9_inverse_transform_mby_4x4(xd);
err[0] += vp9_block_error_c(xd->diff, x->src_diff, 256);
#endif
xd->above_context = &t_above[TX_8X8][x_idx];
xd->left_context = &t_left[TX_8X8][y_idx];
macro_block_yrd_8x8(x, &r_tmp, &d_tmp, &s_tmp, 0);
d[TX_8X8] += d_tmp;
r[TX_8X8][0] += r_tmp;
s[TX_8X8] = s[TX_8X8] && s_tmp;
#if DEBUG_ERROR
vp9_inverse_transform_mby_8x8(xd);
err[1] += vp9_block_error_c(xd->diff, x->src_diff, 256);
#endif
}
#if DEBUG_ERROR
printf("IDCT/FDCT error 16x16: %d (d: %d)\n", err[2], d[2]);
printf("IDCT/FDCT error 8x8: %d (d: %d)\n", err[1], d[1]);
printf("IDCT/FDCT error 4x4: %d (d: %d)\n", err[0], d[0]);
#endif
choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, skip, txfm_cache,
TX_SIZE_MAX_SB - 1);
xd->above_context = orig_above;
xd->left_context = orig_left;
}
#endif #endif
static void copy_predictor_8x8(uint8_t *dst, const uint8_t *predictor) { static void copy_predictor_8x8(uint8_t *dst, const uint8_t *predictor) {
@ -1359,6 +1466,48 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi,
return best_rd; return best_rd;
} }
#if CONFIG_SUPERBLOCKS64
static int64_t rd_pick_intra_sb64y_mode(VP9_COMP *cpi,
MACROBLOCK *x,
int *rate,
int *rate_tokenonly,
int *distortion,
int *skippable,
int64_t txfm_cache[NB_TXFM_MODES]) {
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
int this_rate, this_rate_tokenonly;
int this_distortion, s;
int64_t best_rd = INT64_MAX, this_rd;
/* Y Search for 32x32 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.mode = mode;
vp9_build_intra_predictors_sb64y_s(&x->e_mbd);
super_block_64_yrd(cpi, x, &this_rate_tokenonly,
&this_distortion, &s, txfm_cache);
this_rate = this_rate_tokenonly +
x->mbmode_cost[x->e_mbd.frame_type]
[x->e_mbd.mode_info_context->mbmi.mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
mode_selected = mode;
best_rd = this_rd;
*rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
*distortion = this_distortion;
*skippable = s;
}
}
x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
return best_rd;
}
#endif // CONFIG_SUPERBLOCKS64
#endif #endif
static int64_t rd_pick_intra16x16mby_mode(VP9_COMP *cpi, static int64_t rd_pick_intra16x16mby_mode(VP9_COMP *cpi,
@ -1735,18 +1884,23 @@ static int64_t rd_inter16x16_uv_8x8(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
#if CONFIG_TX32X32 #if CONFIG_TX32X32
static int rd_cost_sbuv_16x16(MACROBLOCK *x) { static int rd_cost_sbuv_16x16(MACROBLOCK *x, int backup) {
int b; int b;
int cost = 0; int cost = 0;
MACROBLOCKD *const xd = &x->e_mbd; MACROBLOCKD *const xd = &x->e_mbd;
ENTROPY_CONTEXT_PLANES t_above, t_left; ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl; ENTROPY_CONTEXT *ta, *tl;
if (backup) {
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES)); vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES)); vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *) &t_above; ta = (ENTROPY_CONTEXT *) &t_above;
tl = (ENTROPY_CONTEXT *) &t_left; tl = (ENTROPY_CONTEXT *) &t_left;
} else {
ta = (ENTROPY_CONTEXT *)xd->above_context;
tl = (ENTROPY_CONTEXT *)xd->left_context;
}
for (b = 16; b < 24; b += 4) for (b = 16; b < 24; b += 4)
cost += cost_coeffs(x, xd->block + b, PLANE_TYPE_UV, cost += cost_coeffs(x, xd->block + b, PLANE_TYPE_UV,
@ -1757,13 +1911,14 @@ static int rd_cost_sbuv_16x16(MACROBLOCK *x) {
} }
static void rd_inter32x32_uv_16x16(MACROBLOCK *x, int *rate, static void rd_inter32x32_uv_16x16(MACROBLOCK *x, int *rate,
int *distortion, int *skip) { int *distortion, int *skip,
int backup) {
MACROBLOCKD *const xd = &x->e_mbd; MACROBLOCKD *const xd = &x->e_mbd;
vp9_transform_sbuv_16x16(x); vp9_transform_sbuv_16x16(x);
vp9_quantize_sbuv_16x16(x); vp9_quantize_sbuv_16x16(x);
*rate = rd_cost_sbuv_16x16(x); *rate = rd_cost_sbuv_16x16(x, backup);
*distortion = vp9_block_error_c(x->sb_coeff_data.coeff + 1024, *distortion = vp9_block_error_c(x->sb_coeff_data.coeff + 1024,
xd->sb_coeff_data.dqcoeff + 1024, 512) >> 2; xd->sb_coeff_data.dqcoeff + 1024, 512) >> 2;
*skip = vp9_sbuv_is_skippable_16x16(xd); *skip = vp9_sbuv_is_skippable_16x16(xd);
@ -1783,7 +1938,7 @@ static int64_t rd_inter32x32_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff, vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
usrc, vsrc, src_uv_stride, usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride); udst, vdst, dst_uv_stride);
rd_inter32x32_uv_16x16(x, rate, distortion, skip); rd_inter32x32_uv_16x16(x, rate, distortion, skip, 1);
} else { } else {
#endif #endif
int n, r = 0, d = 0; int n, r = 0, d = 0;
@ -1833,6 +1988,14 @@ static int64_t rd_inter32x32_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
} }
static void super_block_64_uvrd(MACROBLOCK *x, int *rate,
int *distortion, int *skip);
static int64_t rd_inter64x64_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel, int *skip) {
super_block_64_uvrd(x, rate, distortion, skip);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
#endif #endif
static int64_t rd_inter4x4_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate, static int64_t rd_inter4x4_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
@ -1984,13 +2147,13 @@ static void super_block_uvrd(MACROBLOCK *x,
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff, vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
usrc, vsrc, src_uv_stride, usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride); udst, vdst, dst_uv_stride);
rd_inter32x32_uv_16x16(x, rate, distortion, skippable); rd_inter32x32_uv_16x16(x, rate, distortion, skippable, 1);
} else { } else {
#endif #endif
int d = 0, r = 0, n, s = 1; int d = 0, r = 0, n, s = 1;
ENTROPY_CONTEXT_PLANES t_above[2], t_left[2]; ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
ENTROPY_CONTEXT_PLANES *ta = xd->above_context; ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
ENTROPY_CONTEXT_PLANES *tl = xd->left_context; ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
memcpy(t_above, xd->above_context, sizeof(t_above)); memcpy(t_above, xd->above_context, sizeof(t_above));
memcpy(t_left, xd->left_context, sizeof(t_left)); memcpy(t_left, xd->left_context, sizeof(t_left));
@ -2016,24 +2179,107 @@ static void super_block_uvrd(MACROBLOCK *x,
} }
d += vp9_mbuverror(x) >> 2; d += vp9_mbuverror(x) >> 2;
xd->above_context = ta + x_idx; xd->above_context = t_above + x_idx;
xd->left_context = tl + y_idx; xd->left_context = t_left + y_idx;
if (mbmi->txfm_size == TX_4X4) {
r += rd_cost_mbuv_4x4(x, 0);
} else {
r += rd_cost_mbuv_8x8(x, 0); r += rd_cost_mbuv_8x8(x, 0);
} }
}
xd->above_context = ta_orig;
xd->left_context = tl_orig;
*distortion = d;
*rate = r;
*skippable = s;
#if CONFIG_TX32X32
}
#endif
}
static void super_block_64_uvrd(MACROBLOCK *x,
int *rate,
int *distortion,
int *skippable) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
const uint8_t *usrc = x->src.u_buffer, *udst = xd->dst.u_buffer;
const uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
ENTROPY_CONTEXT_PLANES t_above[4], t_left[4];
ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
int d = 0, r = 0, n, s = 1;
memcpy(t_above, xd->above_context, sizeof(t_above));
memcpy(t_left, xd->left_context, sizeof(t_left));
#if CONFIG_TX32X32
if (mbmi->txfm_size == TX_32X32) {
int n;
*rate = 0;
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
int r_tmp, d_tmp, s_tmp;
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
usrc + x_idx * 16 + y_idx * 16 * src_uv_stride,
vsrc + x_idx * 16 + y_idx * 16 * src_uv_stride,
src_uv_stride,
udst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
vdst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
dst_uv_stride);
xd->above_context = t_above + x_idx * 2;
xd->left_context = t_left + y_idx * 2;
rd_inter32x32_uv_16x16(x, &r_tmp, &d_tmp, &s_tmp, 0);
r += r_tmp;
d += d_tmp;
s = s && s_tmp;
}
} else {
#endif
for (n = 0; n < 16; n++) {
int x_idx = n & 3, y_idx = n >> 2;
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
if (mbmi->txfm_size == TX_4X4) {
vp9_transform_mbuv_4x4(x);
vp9_quantize_mbuv_4x4(x);
s &= vp9_mbuv_is_skippable_4x4(xd);
} else {
vp9_transform_mbuv_8x8(x);
vp9_quantize_mbuv_8x8(x);
s &= vp9_mbuv_is_skippable_8x8(xd);
}
xd->above_context = t_above + x_idx;
xd->left_context = t_left + y_idx;
d += vp9_mbuverror(x) >> 2;
if (mbmi->txfm_size == TX_4X4) {
r += rd_cost_mbuv_4x4(x, 0);
} else {
r += rd_cost_mbuv_8x8(x, 0);
}
}
#if CONFIG_TX32X32
}
#endif
xd->above_context = ta;
xd->left_context = tl;
*distortion = d; *distortion = d;
*rate = r; *rate = r;
*skippable = s; *skippable = s;
xd->left_context = tl; xd->left_context = tl_orig;
xd->above_context = ta; xd->above_context = ta_orig;
memcpy(xd->above_context, t_above, sizeof(t_above));
memcpy(xd->left_context, t_left, sizeof(t_left));
#if CONFIG_TX32X32
}
#endif
} }
static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi,
@ -2072,6 +2318,45 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi,
return best_rd; return best_rd;
} }
#if CONFIG_SUPERBLOCKS64
static int64_t rd_pick_intra_sb64uv_mode(VP9_COMP *cpi,
MACROBLOCK *x,
int *rate,
int *rate_tokenonly,
int *distortion,
int *skippable) {
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
int64_t best_rd = INT64_MAX, this_rd;
int this_rate_tokenonly, this_rate;
int this_distortion, s;
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
vp9_build_intra_predictors_sb64uv_s(&x->e_mbd);
super_block_64_uvrd(x, &this_rate_tokenonly,
&this_distortion, &s);
this_rate = this_rate_tokenonly +
x->intra_uv_mode_cost[x->e_mbd.frame_type][mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
mode_selected = mode;
best_rd = this_rd;
*rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
*distortion = this_distortion;
*skippable = s;
}
}
x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
return best_rd;
}
#endif // CONFIG_SUPERBLOCKS64
#endif #endif
int vp9_cost_mv_ref(VP9_COMP *cpi, int vp9_cost_mv_ref(VP9_COMP *cpi,
@ -3161,8 +3446,6 @@ static void inter_mode_cost(VP9_COMP *cpi, MACROBLOCK *x,
*skippable = y_skippable && uv_skippable; *skippable = y_skippable && uv_skippable;
} }
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
int idx, MV_REFERENCE_FRAME frame_type, int idx, MV_REFERENCE_FRAME frame_type,
int block_size, int block_size,
@ -3367,7 +3650,28 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
} }
#endif #endif
if (block_size == BLOCK_16X16) { #if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
vp9_build_inter64x64_predictors_sb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
} else
#endif // CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_32X32) {
vp9_build_inter32x32_predictors_sb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
} else
#endif // CONFIG_SUPERBLOCKS
{
assert(block_size == BLOCK_16X16);
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0); vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
if (is_comp_pred) if (is_comp_pred)
vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16); vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
@ -3375,15 +3679,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (is_comp_interintra_pred) { if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16); vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
} }
#endif
} else {
#if CONFIG_SUPERBLOCKS
vp9_build_inter32x32_predictors_sb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
#endif #endif
} }
@ -3397,14 +3692,22 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (threshold < x->encode_breakout) if (threshold < x->encode_breakout)
threshold = x->encode_breakout; threshold = x->encode_breakout;
if (block_size == BLOCK_16X16) {
var = vp9_variance16x16(*(b->base_src), b->src_stride,
xd->predictor, 16, &sse);
} else {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
var = vp9_variance64x64(*(b->base_src), b->src_stride,
xd->dst.y_buffer, xd->dst.y_stride, &sse);
} else
#endif // CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_32X32) {
var = vp9_variance32x32(*(b->base_src), b->src_stride, var = vp9_variance32x32(*(b->base_src), b->src_stride,
xd->dst.y_buffer, xd->dst.y_stride, &sse); xd->dst.y_buffer, xd->dst.y_stride, &sse);
#endif } else
#endif // CONFIG_SUPERBLOCK
{
assert(block_size == BLOCK_16X16);
var = vp9_variance16x16(*(b->base_src), b->src_stride,
xd->predictor, 16, &sse);
} }
if ((int)sse < threshold) { if ((int)sse < threshold) {
@ -3416,15 +3719,29 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// Check u and v to make sure skip is ok // Check u and v to make sure skip is ok
int sse2; int sse2;
if (block_size == BLOCK_16X16) { #if CONFIG_SUPERBLOCKS
sse2 = vp9_uvsse(x); #if CONFIG_SUPERBLOCKS64
} else { if (block_size == BLOCK_64X64) {
unsigned int sse2u, sse2v;
var = vp9_variance32x32(x->src.u_buffer, x->src.uv_stride,
xd->dst.u_buffer, xd->dst.uv_stride, &sse2u);
var = vp9_variance32x32(x->src.v_buffer, x->src.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
sse2 = sse2u + sse2v;
} else
#endif // CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_32X32) {
unsigned int sse2u, sse2v; unsigned int sse2u, sse2v;
var = vp9_variance16x16(x->src.u_buffer, x->src.uv_stride, var = vp9_variance16x16(x->src.u_buffer, x->src.uv_stride,
xd->dst.u_buffer, xd->dst.uv_stride, &sse2u); xd->dst.u_buffer, xd->dst.uv_stride, &sse2u);
var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride, var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride, &sse2v); xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
sse2 = sse2u + sse2v; sse2 = sse2u + sse2v;
} else
#endif // CONFIG_SUPERBLOCKS
{
assert(block_size == BLOCK_16X16);
sse2 = vp9_uvsse(x);
} }
if (sse2 * 2 < threshold) { if (sse2 * 2 < threshold) {
@ -3455,23 +3772,26 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
} }
if (!x->skip) { if (!x->skip) {
if (block_size == BLOCK_16X16) {
vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
if (is_comp_pred)
vp9_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
}
#endif
inter_mode_cost(cpi, x, rate2, distortion,
rate_y, distortion_y, rate_uv, distortion_uv,
skippable, txfm_cache);
} else {
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
int skippable_y, skippable_uv;
// Y cost and distortion
super_block_64_yrd(cpi, x, rate_y, distortion_y,
&skippable_y, txfm_cache);
*rate2 += *rate_y;
*distortion += *distortion_y;
rd_inter64x64_uv(cpi, x, rate_uv, distortion_uv,
cm->full_pixel, &skippable_uv);
*rate2 += *rate_uv;
*distortion += *distortion_uv;
*skippable = skippable_y && skippable_uv;
} else
#endif // CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_32X32) {
int skippable_y, skippable_uv; int skippable_y, skippable_uv;
// Y cost and distortion // Y cost and distortion
@ -3486,7 +3806,25 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
*rate2 += *rate_uv; *rate2 += *rate_uv;
*distortion += *distortion_uv; *distortion += *distortion_uv;
*skippable = skippable_y && skippable_uv; *skippable = skippable_y && skippable_uv;
} else
#endif // CONFIG_SUPERBLOCKS
{
assert(block_size == BLOCK_16X16);
vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
if (is_comp_pred)
vp9_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
}
#endif #endif
inter_mode_cost(cpi, x, rate2, distortion,
rate_y, distortion_y, rate_uv, distortion_uv,
skippable, txfm_cache);
} }
} }
return this_rd; // if 0, this will be re-calculated by caller return this_rd; // if 0, this will be re-calculated by caller
@ -3554,7 +3892,8 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
vpx_memset(&frame_mv, 0, sizeof(frame_mv)); vpx_memset(&frame_mv, 0, sizeof(frame_mv));
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode)); vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
vpx_memset(&best_bmodes, 0, sizeof(best_bmodes)); vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
vpx_memset(&x->mb_context[xd->mb_index], 0, sizeof(PICK_MODE_CONTEXT)); vpx_memset(&x->mb_context[xd->sb_index][xd->mb_index], 0,
sizeof(PICK_MODE_CONTEXT));
for (i = 0; i < MAX_REF_FRAMES; i++) for (i = 0; i < MAX_REF_FRAMES; i++)
frame_mv[NEWMV][i].as_int = INVALID_MV; frame_mv[NEWMV][i].as_int = INVALID_MV;
@ -3787,7 +4126,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
0, 0,
#endif #endif
0); cpi->update_context);
rate2 += rate; rate2 += rate;
distortion2 += distortion; distortion2 += distortion;
@ -4298,16 +4637,16 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
} }
end: end:
store_coding_context( store_coding_context(x, &x->mb_context[xd->sb_index][xd->mb_index],
x, &x->mb_context[xd->mb_index], best_mode_index, &best_partition, best_mode_index, &best_partition,
&mbmi->ref_mvs[mbmi->ref_frame][0], &mbmi->ref_mvs[mbmi->ref_frame][0],
&mbmi->ref_mvs[mbmi->second_ref_frame < 0 &mbmi->ref_mvs[mbmi->second_ref_frame < 0 ? 0 :
? 0 : mbmi->second_ref_frame][0], mbmi->second_ref_frame][0],
best_pred_diff, best_txfm_diff); best_pred_diff, best_txfm_diff);
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, void vp9_rd_pick_intra_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
int *returnrate, int *returnrate,
int *returndist) { int *returndist) {
VP9_COMMON *cm = &cpi->common; VP9_COMMON *cm = &cpi->common;
@ -4335,6 +4674,37 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
*returndist = dist_y + (dist_uv >> 2); *returndist = dist_y + (dist_uv >> 2);
} }
} }
#if CONFIG_SUPERBLOCKS64
void vp9_rd_pick_intra_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
int *returnrate,
int *returndist) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
int rate_y, rate_uv;
int rate_y_tokenonly, rate_uv_tokenonly;
int error_y, error_uv;
int dist_y, dist_uv;
int y_skip, uv_skip;
int64_t txfm_cache[NB_TXFM_MODES];
error_y = rd_pick_intra_sb64y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
&dist_y, &y_skip, txfm_cache);
error_uv = rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
&dist_uv, &uv_skip);
if (cpi->common.mb_no_coeff_skip && y_skip && uv_skip) {
*returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
*returndist = dist_y + (dist_uv >> 2);
} else {
*returnrate = rate_y + rate_uv;
if (cm->mb_no_coeff_skip)
*returnrate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
*returndist = dist_y + (dist_uv >> 2);
}
}
#endif
#endif #endif
void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
@ -4409,11 +4779,12 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
0, 0,
#endif #endif
0); cpi->update_context);
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
error4x4d = rd_pick_intra4x4mby_modes(cpi, x, error4x4d = rd_pick_intra4x4mby_modes(cpi, x,
&rate4x4d, &rate4x4_tokenonly, &rate4x4d, &rate4x4_tokenonly,
&dist4x4d, error16x16, 1, 0); &dist4x4d, error16x16, 1,
cpi->update_context);
#endif #endif
mbmi->mb_skip_coeff = 0; mbmi->mb_skip_coeff = 0;
@ -4426,8 +4797,8 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1); vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
dist = dist16x16 + (distuv8x8 >> 2); dist = dist16x16 + (distuv8x8 >> 2);
mbmi->txfm_size = txfm_size_16x16; mbmi->txfm_size = txfm_size_16x16;
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0, memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff)); sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
} else if (error8x8 > error16x16) { } else if (error8x8 > error16x16) {
if (error4x4 < error16x16) { if (error4x4 < error16x16) {
rate = rateuv; rate = rateuv;
@ -4444,15 +4815,16 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
mbmi->mode = B_PRED; mbmi->mode = B_PRED;
mbmi->txfm_size = TX_4X4; mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2); dist = dist4x4 + (distuv >> 2);
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0, memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff)); sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
} else { } else {
mbmi->txfm_size = txfm_size_16x16; mbmi->txfm_size = txfm_size_16x16;
mbmi->mode = mode16x16; mbmi->mode = mode16x16;
rate = rate16x16 + rateuv8x8; rate = rate16x16 + rateuv8x8;
dist = dist16x16 + (distuv8x8 >> 2); dist = dist16x16 + (distuv8x8 >> 2);
for (i = 0; i < NB_TXFM_MODES; i++) { for (i = 0; i < NB_TXFM_MODES; i++) {
x->mb_context[xd->mb_index].txfm_rd_diff[i] = error16x16 - txfm_cache[i]; x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff[i] =
error16x16 - txfm_cache[i];
} }
} }
if (cpi->common.mb_no_coeff_skip) if (cpi->common.mb_no_coeff_skip)
@ -4473,8 +4845,8 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
mbmi->mode = B_PRED; mbmi->mode = B_PRED;
mbmi->txfm_size = TX_4X4; mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2); dist = dist4x4 + (distuv >> 2);
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0, memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff)); sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
} else { } else {
// FIXME(rbultje) support transform-size selection // FIXME(rbultje) support transform-size selection
mbmi->mode = I8X8_PRED; mbmi->mode = I8X8_PRED;
@ -4482,8 +4854,8 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
set_i8x8_block_modes(x, mode8x8); set_i8x8_block_modes(x, mode8x8);
rate = rate8x8 + rateuv; rate = rate8x8 + rateuv;
dist = dist8x8 + (distuv >> 2); dist = dist8x8 + (distuv >> 2);
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0, memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff)); sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
} }
if (cpi->common.mb_no_coeff_skip) if (cpi->common.mb_no_coeff_skip)
rate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0); rate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
@ -4494,9 +4866,11 @@ void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset, int recon_yoffset, int recon_uvoffset,
int *returnrate, int *returndistortion) { int *returnrate,
int *returndistortion,
int block_size) {
VP9_COMMON *cm = &cpi->common; VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd; MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
@ -4556,7 +4930,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) { if (cpi->ref_frame_flags & flag_list[ref_frame]) {
setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, BLOCK_32X32, setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV], recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_mdcounts, frame_mv[NEARMV], frame_mdcounts,
y_buffer, u_buffer, v_buffer); y_buffer, u_buffer, v_buffer);
@ -4565,6 +4939,34 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
frame_mv[ZEROMV][ref_frame].as_int = 0; frame_mv[ZEROMV][ref_frame].as_int = 0;
} }
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
mbmi->mode = DC_PRED;
if (cm->txfm_mode == ONLY_4X4 || cm->txfm_mode == TX_MODE_SELECT) {
mbmi->txfm_size = TX_4X4;
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_4x4, &rate_uv_tokenonly_4x4,
&dist_uv_4x4, &uv_skip_4x4);
mode_uv_4x4 = mbmi->uv_mode;
}
if (cm->txfm_mode != ONLY_4X4) {
mbmi->txfm_size = TX_8X8;
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_8x8, &rate_uv_tokenonly_8x8,
&dist_uv_8x8, &uv_skip_8x8);
mode_uv_8x8 = mbmi->uv_mode;
}
#if CONFIG_TX32X32
if (cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32;
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_16x16,
&rate_uv_tokenonly_16x16,
&dist_uv_16x16, &uv_skip_16x16);
mode_uv_16x16 = mbmi->uv_mode;
}
#endif // CONFIG_TX32X32
} else
#endif // CONFIG_SUPERBLOCKS64
{
assert(block_size == BLOCK_32X32);
mbmi->mode = DC_PRED; mbmi->mode = DC_PRED;
if (cm->txfm_mode == ONLY_4X4 || cm->txfm_mode == TX_MODE_SELECT) { if (cm->txfm_mode == ONLY_4X4 || cm->txfm_mode == TX_MODE_SELECT) {
mbmi->txfm_size = TX_4X4; mbmi->txfm_size = TX_4X4;
@ -4585,7 +4987,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
&dist_uv_16x16, &uv_skip_16x16); &dist_uv_16x16, &uv_skip_16x16);
mode_uv_16x16 = mbmi->uv_mode; mode_uv_16x16 = mbmi->uv_mode;
} }
#endif #endif // CONFIG_TX32X32
}
for (mode_index = 0; mode_index < MAX_MODES; for (mode_index = 0; mode_index < MAX_MODES;
mode_index += (!switchable_filter_index)) { mode_index += (!switchable_filter_index)) {
@ -4713,9 +5116,19 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
} }
if (ref_frame == INTRA_FRAME) { if (ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS64
if (block_size == BLOCK_64X64) {
vp9_build_intra_predictors_sb64y_s(xd);
super_block_64_yrd(cpi, x, &rate_y, &distortion_y,
&skippable, txfm_cache);
} else
#endif // CONFIG_SUPERBLOCKS64
{
assert(block_size == BLOCK_32X32);
vp9_build_intra_predictors_sby_s(xd); vp9_build_intra_predictors_sby_s(xd);
super_block_yrd(cpi, x, &rate_y, &distortion_y, super_block_yrd(cpi, x, &rate_y, &distortion_y,
&skippable, txfm_cache); &skippable, txfm_cache);
}
if (mbmi->txfm_size == TX_4X4) { if (mbmi->txfm_size == TX_4X4) {
rate_uv = rate_uv_4x4; rate_uv = rate_uv_4x4;
distortion_uv = dist_uv_4x4; distortion_uv = dist_uv_4x4;
@ -4727,7 +5140,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
distortion_uv = dist_uv_16x16; distortion_uv = dist_uv_16x16;
skippable = skippable && uv_skip_16x16; skippable = skippable && uv_skip_16x16;
mbmi->uv_mode = mode_uv_16x16; mbmi->uv_mode = mode_uv_16x16;
#endif #endif // CONFIG_TX32X32
} else { } else {
rate_uv = rate_uv_8x8; rate_uv = rate_uv_8x8;
distortion_uv = dist_uv_8x8; distortion_uv = dist_uv_8x8;
@ -4749,7 +5162,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
#endif #endif
} }
#endif #endif
this_rd = handle_inter_mode(cpi, x, BLOCK_32X32, this_rd = handle_inter_mode(cpi, x, block_size,
&saddone, near_sadidx, mdcounts, txfm_cache, &saddone, near_sadidx, mdcounts, txfm_cache,
&rate2, &distortion2, &skippable, &rate2, &distortion2, &skippable,
&compmode_cost, &compmode_cost,
@ -5021,14 +5434,41 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
} }
end: end:
store_coding_context(x, &x->sb_context[0], best_mode_index, NULL, {
#if CONFIG_SUPERBLOCKS64
PICK_MODE_CONTEXT *p = (block_size == BLOCK_32X32) ?
&x->sb32_context[xd->sb_index] :
&x->sb64_context;
#else
PICK_MODE_CONTEXT *p = &x->sb32_context[xd->sb_index];
#endif
store_coding_context(x, p, best_mode_index, NULL,
&mbmi->ref_mvs[mbmi->ref_frame][0], &mbmi->ref_mvs[mbmi->ref_frame][0],
&mbmi->ref_mvs[mbmi->second_ref_frame < 0 &mbmi->ref_mvs[mbmi->second_ref_frame < 0 ? 0 :
? 0 : mbmi->second_ref_frame][0], mbmi->second_ref_frame][0],
best_pred_diff, best_txfm_diff); best_pred_diff, best_txfm_diff);
}
return best_rd; return best_rd;
} }
int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset,
int *returnrate,
int *returndistortion) {
return vp9_rd_pick_inter_mode_sb(cpi, x, recon_yoffset, recon_uvoffset,
returnrate, returndistortion, BLOCK_32X32);
}
#if CONFIG_SUPERBLOCKS64
int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset,
int *returnrate,
int *returndistortion) {
return vp9_rd_pick_inter_mode_sb(cpi, x, recon_yoffset, recon_uvoffset,
returnrate, returndistortion, BLOCK_64X64);
}
#endif // CONFIG_SUPERBLOCKS64
#endif #endif
void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x, void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
@ -5063,8 +5503,8 @@ void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
// vp9_pick_inter_mode // vp9_pick_inter_mode
// Store metrics so they can be added in to totals if this mode is picked // Store metrics so they can be added in to totals if this mode is picked
x->mb_context[xd->mb_index].distortion = distortion; x->mb_context[xd->sb_index][xd->mb_index].distortion = distortion;
x->mb_context[xd->mb_index].intra_error = intra_error; x->mb_context[xd->sb_index][xd->mb_index].intra_error = intra_error;
*totalrate = rate; *totalrate = rate;
*totaldist = distortion; *totaldist = distortion;

View File

@ -22,16 +22,23 @@ extern void vp9_initialize_me_consts(VP9_COMP *cpi, int QIndex);
extern void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, extern void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
int *r, int *d); int *r, int *d);
extern void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, extern void vp9_rd_pick_intra_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
int *r, int *d);
extern void vp9_rd_pick_intra_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
int *r, int *d); int *r, int *d);
extern void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x, extern void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int ref_yoffset, int ref_uvoffset,
int recon_uvoffset, int *r, int *d); int *r, int *d);
extern int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, extern int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
int recon_yoffset, int recon_uvoffset, int ref_yoffset, int ref_uvoffset,
int *returnrate, int *returndist); int *r, int *d);
extern int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
int ref_yoffset, int ref_uvoffset,
int *r, int *d);
extern void vp9_init_me_luts(); extern void vp9_init_me_luts();

View File

@ -14,6 +14,14 @@
#include "vpx_ports/config.h" #include "vpx_ports/config.h"
#include "vpx/vpx_integer.h" #include "vpx/vpx_integer.h"
unsigned int vp9_sad64x64_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
int max_sad) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 64, 64);
}
unsigned int vp9_sad32x32_c(const uint8_t *src_ptr, unsigned int vp9_sad32x32_c(const uint8_t *src_ptr,
int src_stride, int src_stride,
const uint8_t *ref_ptr, const uint8_t *ref_ptr,
@ -64,6 +72,19 @@ unsigned int vp9_sad4x4_c(const uint8_t *src_ptr,
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4); return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4);
} }
void vp9_sad64x64x3_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
unsigned int *sad_array) {
sad_array[0] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr, ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 1, ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 2, ref_stride, 0x7fffffff);
}
void vp9_sad32x32x3_c(const uint8_t *src_ptr, void vp9_sad32x32x3_c(const uint8_t *src_ptr,
int src_stride, int src_stride,
const uint8_t *ref_ptr, const uint8_t *ref_ptr,
@ -77,6 +98,37 @@ void vp9_sad32x32x3_c(const uint8_t *src_ptr,
ref_ptr + 2, ref_stride, 0x7fffffff); ref_ptr + 2, ref_stride, 0x7fffffff);
} }
void vp9_sad64x64x8_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
uint16_t *sad_array) {
sad_array[0] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr, ref_stride,
0x7fffffff);
sad_array[1] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 1, ref_stride,
0x7fffffff);
sad_array[2] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 2, ref_stride,
0x7fffffff);
sad_array[3] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 3, ref_stride,
0x7fffffff);
sad_array[4] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 4, ref_stride,
0x7fffffff);
sad_array[5] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 5, ref_stride,
0x7fffffff);
sad_array[6] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 6, ref_stride,
0x7fffffff);
sad_array[7] = (uint16_t)vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr + 7, ref_stride,
0x7fffffff);
}
void vp9_sad32x32x8_c(const uint8_t *src_ptr, void vp9_sad32x32x8_c(const uint8_t *src_ptr,
int src_stride, int src_stride,
const uint8_t *ref_ptr, const uint8_t *ref_ptr,
@ -328,6 +380,21 @@ void vp9_sad4x4x8_c(const uint8_t *src_ptr,
0x7fffffff); 0x7fffffff);
} }
void vp9_sad64x64x4d_c(const uint8_t *src_ptr,
int src_stride,
uint8_t *ref_ptr[],
int ref_stride,
unsigned int *sad_array) {
sad_array[0] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad64x64_c(src_ptr, src_stride,
ref_ptr[3], ref_stride, 0x7fffffff);
}
void vp9_sad32x32x4d_c(const uint8_t *src_ptr, void vp9_sad32x32x4d_c(const uint8_t *src_ptr,
int src_stride, int src_stride,
uint8_t *ref_ptr[], uint8_t *ref_ptr[],

View File

@ -141,21 +141,57 @@ static int cost_segmap(MACROBLOCKD *xd,
segcounts[3] * vp9_cost_one(probs[2]); segcounts[3] * vp9_cost_one(probs[2]);
return cost; return cost;
}
static void count_segs(VP9_COMP *cpi,
MODE_INFO *mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
int mb_size, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int segmap_index = mb_row * cm->mb_cols + mb_col;
const int segment_id = mi->mbmi.segment_id;
xd->mode_info_context = mi;
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - mb_size - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - mb_size - mb_col) * 16) << 3;
// Count the number of hits on each segment with no prediction
no_pred_segcounts[segment_id]++;
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
// Test to see if the segment id matches the predicted value.
const int seg_predicted =
(segment_id == vp9_get_pred_mb_segid(cm, xd, segmap_index));
// Get the segment id prediction context
const int pred_context = vp9_get_pred_context(cm, xd, PRED_SEG_ID);
// Store the prediction status for this mb and update counts
// as appropriate
vp9_set_pred_flag(xd, PRED_SEG_ID, seg_predicted);
temporal_predictor_count[pred_context][seg_predicted]++;
if (!seg_predicted)
// Update the "unpredicted" segment count
t_unpred_seg_counts[segment_id]++;
}
} }
void vp9_choose_segmap_coding_method(VP9_COMP *cpi) { void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common; VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd; MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int i;
int no_pred_cost; int no_pred_cost;
int t_pred_cost = INT_MAX; int t_pred_cost = INT_MAX;
int pred_context;
int i;
int mb_row, mb_col; int mb_row, mb_col;
int segmap_index = 0;
unsigned char segment_id;
int temporal_predictor_count[PREDICTION_PROBS][2]; int temporal_predictor_count[PREDICTION_PROBS][2];
int no_pred_segcounts[MAX_MB_SEGMENTS]; int no_pred_segcounts[MAX_MB_SEGMENTS];
@ -165,9 +201,8 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
vp9_prob t_pred_tree[MB_FEATURE_TREE_PROBS]; vp9_prob t_pred_tree[MB_FEATURE_TREE_PROBS];
vp9_prob t_nopred_prob[PREDICTION_PROBS]; vp9_prob t_nopred_prob[PREDICTION_PROBS];
#if CONFIG_SUPERBLOCKS
const int mis = cm->mode_info_stride; const int mis = cm->mode_info_stride;
#endif MODE_INFO *mi_ptr = cm->mi, *mi;
// Set default state for the segment tree probabilities and the // Set default state for the segment tree probabilities and the
// temporal coding probabilities // temporal coding probabilities
@ -183,87 +218,57 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
// First of all generate stats regarding how well the last segment map // First of all generate stats regarding how well the last segment map
// predicts this one // predicts this one
// Initialize macroblock decoder mode info context for the first mb for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 4, mi_ptr += 4 * mis) {
// in the frame mi = mi_ptr;
xd->mode_info_context = cm->mi; for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 4, mi += 4) {
#if CONFIG_SUPERBLOCKS && CONFIG_SUPERBLOCKS64
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 2) { if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 2) { count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, 4, mb_row, mb_col);
} else
#endif
{
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
static const int dx[4] = { +1, -1, +1, +1 }; int x_idx = (i & 1) << 1, y_idx = i & 2;
static const int dy[4] = { 0, +1, 0, -1 }; #if CONFIG_SUPERBLOCKS
int x_idx = i & 1, y_idx = i >> 1; MODE_INFO *sb_mi = mi + y_idx * mis + x_idx;
#endif
if (mb_col + x_idx >= cm->mb_cols || if (mb_col + x_idx >= cm->mb_cols ||
mb_row + y_idx >= cm->mb_rows) { mb_row + y_idx >= cm->mb_rows) {
goto end; continue;
}
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
segmap_index = (mb_row + y_idx) * cm->mb_cols + mb_col + x_idx;
segment_id = xd->mode_info_context->mbmi.segment_id;
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
if (mb_col + 1 < cm->mb_cols)
segment_id = segment_id &&
xd->mode_info_context[1].mbmi.segment_id;
if (mb_row + 1 < cm->mb_rows) {
segment_id = segment_id &&
xd->mode_info_context[mis].mbmi.segment_id;
if (mb_col + 1 < cm->mb_cols)
segment_id = segment_id &&
xd->mode_info_context[mis + 1].mbmi.segment_id;
}
xd->mb_to_bottom_edge = ((cm->mb_rows - 2 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 2 - mb_col) * 16) << 3;
} else {
#endif
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
#if CONFIG_SUPERBLOCKS
}
#endif
// Count the number of hits on each segment with no prediction
no_pred_segcounts[segment_id]++;
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
// Test to see if the segment id matches the predicted value.
int seg_predicted =
(segment_id == vp9_get_pred_mb_segid(cm, xd, segmap_index));
// Get the segment id prediction context
pred_context =
vp9_get_pred_context(cm, xd, PRED_SEG_ID);
// Store the prediction status for this mb and update counts
// as appropriate
vp9_set_pred_flag(xd, PRED_SEG_ID, seg_predicted);
temporal_predictor_count[pred_context][seg_predicted]++;
if (!seg_predicted)
// Update the "unpredicted" segment count
t_unpred_seg_counts[segment_id]++;
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) { if (sb_mi->mbmi.sb_type) {
assert(!i); assert(sb_mi->mbmi.sb_type == BLOCK_SIZE_SB32X32);
xd->mode_info_context += 2; count_segs(cpi, sb_mi, no_pred_segcounts, temporal_predictor_count,
break; t_unpred_seg_counts, 2, mb_row + y_idx, mb_col + x_idx);
} } else
#endif #endif
end: {
xd->mode_info_context += dx[i] + dy[i] * cm->mode_info_stride; int j;
}
for (j = 0; j < 4; j++) {
const int x_idx_mb = x_idx + (j & 1), y_idx_mb = y_idx + (j >> 1);
MODE_INFO *mb_mi = mi + x_idx_mb + y_idx_mb * mis;
if (mb_col + x_idx_mb >= cm->mb_cols ||
mb_row + y_idx_mb >= cm->mb_rows) {
continue;
} }
// this is to account for the border in mode_info_context #if CONFIG_SUPERBLOCKS
xd->mode_info_context -= mb_col; assert(mb_mi->mbmi.sb_type == BLOCK_SIZE_MB16X16);
xd->mode_info_context += cm->mode_info_stride * 2; #endif
count_segs(cpi, mb_mi, no_pred_segcounts,
temporal_predictor_count, t_unpred_seg_counts,
1, mb_row + y_idx_mb, mb_col + x_idx_mb);
}
}
}
}
}
} }
// Work out probability tree for coding segments without prediction // Work out probability tree for coding segments without prediction

View File

@ -25,6 +25,19 @@ unsigned int vp9_get_mb_ss_c(const int16_t *src_ptr) {
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
unsigned int var;
int avg;
variance(src_ptr, source_stride, ref_ptr, recon_stride, 64, 64, &var, &avg);
*sse = var;
return (var - (((int64_t)avg * avg) >> 12));
}
unsigned int vp9_variance32x32_c(const uint8_t *src_ptr, unsigned int vp9_variance32x32_c(const uint8_t *src_ptr,
int source_stride, int source_stride,
const uint8_t *ref_ptr, const uint8_t *ref_ptr,
@ -185,6 +198,27 @@ unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
} }
#if CONFIG_SUPERBLOCKS #if CONFIG_SUPERBLOCKS
unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[65 * 64]; // Temp data bufffer used in filtering
uint8_t temp2[68 * 64];
const int16_t *HFilter, *VFilter;
HFilter = vp9_bilinear_filters[xoffset];
VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line,
1, 65, 64, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 64, 64, 64, 64, VFilter);
return vp9_variance64x64_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr, unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
int src_pixels_per_line, int src_pixels_per_line,
int xoffset, int xoffset,
@ -224,6 +258,15 @@ unsigned int vp9_variance_halfpixvar32x32_h_c(const uint8_t *src_ptr,
return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 0, return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 0,
ref_ptr, recon_stride, sse); ref_ptr, recon_stride, sse);
} }
unsigned int vp9_variance_halfpixvar64x64_h_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 0,
ref_ptr, recon_stride, sse);
}
#endif #endif
@ -245,6 +288,15 @@ unsigned int vp9_variance_halfpixvar32x32_v_c(const uint8_t *src_ptr,
return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 0, 8, return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 0, 8,
ref_ptr, recon_stride, sse); ref_ptr, recon_stride, sse);
} }
unsigned int vp9_variance_halfpixvar64x64_v_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 0, 8,
ref_ptr, recon_stride, sse);
}
#endif #endif
unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr, unsigned int vp9_variance_halfpixvar16x16_hv_c(const uint8_t *src_ptr,
@ -265,6 +317,15 @@ unsigned int vp9_variance_halfpixvar32x32_hv_c(const uint8_t *src_ptr,
return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 8, return vp9_sub_pixel_variance32x32_c(src_ptr, source_stride, 8, 8,
ref_ptr, recon_stride, sse); ref_ptr, recon_stride, sse);
} }
unsigned int vp9_variance_halfpixvar64x64_hv_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
return vp9_sub_pixel_variance64x64_c(src_ptr, source_stride, 8, 8,
ref_ptr, recon_stride, sse);
}
#endif #endif
unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr, unsigned int vp9_sub_pixel_mse16x16_c(const uint8_t *src_ptr,
@ -293,6 +354,19 @@ unsigned int vp9_sub_pixel_mse32x32_c(const uint8_t *src_ptr,
dst_pixels_per_line, sse); dst_pixels_per_line, sse);
return *sse; return *sse;
} }
unsigned int vp9_sub_pixel_mse64x64_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
vp9_sub_pixel_variance64x64_c(src_ptr, src_pixels_per_line,
xoffset, yoffset, dst_ptr,
dst_pixels_per_line, sse);
return *sse;
}
#endif #endif
unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr, unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,