Move eob from BLOCKD to MACROBLOCKD.
Consistent with VP8. Change-Id: I8c316ee49f072e15abbb033a80e9c36617891f07
This commit is contained in:
parent
9615fd8f39
commit
e8c74e2b70
@ -276,8 +276,6 @@ typedef struct blockd {
|
||||
int dst;
|
||||
int dst_stride;
|
||||
|
||||
int eob;
|
||||
|
||||
union b_mode_info bmi;
|
||||
} BLOCKD;
|
||||
|
||||
@ -305,6 +303,7 @@ typedef struct macroblockd {
|
||||
DECLARE_ALIGNED(16, uint8_t, predictor[384]);
|
||||
DECLARE_ALIGNED(16, int16_t, qcoeff[384]);
|
||||
DECLARE_ALIGNED(16, int16_t, dqcoeff[384]);
|
||||
DECLARE_ALIGNED(16, uint16_t, eobs[24]);
|
||||
|
||||
SUPERBLOCKD sb_coeff_data;
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
void vp9_inverse_transform_b_4x4(MACROBLOCKD *xd, int block, int pitch) {
|
||||
BLOCKD *b = &xd->block[block];
|
||||
if (b->eob <= 1)
|
||||
if (xd->eobs[block] <= 1)
|
||||
xd->inv_txm4x4_1(b->dqcoeff, b->diff, pitch);
|
||||
else
|
||||
xd->inv_txm4x4(b->dqcoeff, b->diff, pitch);
|
||||
|
@ -226,11 +226,11 @@ static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
vp9_ht_dequant_idct_add_16x16_c(tx_type, xd->qcoeff,
|
||||
xd->block[0].dequant, xd->predictor,
|
||||
xd->dst.y_buffer, 16, xd->dst.y_stride,
|
||||
xd->block[0].eob);
|
||||
xd->eobs[0]);
|
||||
} else {
|
||||
vp9_dequant_idct_add_16x16(xd->qcoeff, xd->block[0].dequant,
|
||||
xd->predictor, xd->dst.y_buffer,
|
||||
16, xd->dst.y_stride, xd->block[0].eob);
|
||||
16, xd->dst.y_stride, xd->eobs[0]);
|
||||
}
|
||||
vp9_dequant_idct_add_uv_block_8x8(
|
||||
xd->qcoeff + 16 * 16, xd->block[16].dequant,
|
||||
@ -272,10 +272,10 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
tx_type = get_tx_type_8x8(xd, &xd->block[ib]);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_ht_dequant_idct_add_8x8_c(tx_type, q, dq, pre, dst, 16, stride,
|
||||
xd->block[idx].eob);
|
||||
xd->eobs[idx]);
|
||||
} else {
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride,
|
||||
xd->block[idx].eob);
|
||||
xd->eobs[idx]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -348,7 +348,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
vp9_ht_dequant_idct_add_c(tx_type, b->qcoeff,
|
||||
b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 16,
|
||||
b->dst_stride, b->eob);
|
||||
b->dst_stride, xd->eobs[ib + iblock[j]]);
|
||||
} else {
|
||||
xd->itxm_add(b->qcoeff, b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 16, b->dst_stride);
|
||||
@ -381,7 +381,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
vp9_ht_dequant_idct_add_c(tx_type, b->qcoeff,
|
||||
b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 16, b->dst_stride,
|
||||
b->eob);
|
||||
xd->eobs[i]);
|
||||
} else {
|
||||
xd->itxm_add(b->qcoeff, b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 16, b->dst_stride);
|
||||
@ -437,7 +437,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
vp9_ht_dequant_idct_add_c(tx_type, b->qcoeff,
|
||||
b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 16,
|
||||
b->dst_stride, b->eob);
|
||||
b->dst_stride, xd->eobs[i]);
|
||||
} else {
|
||||
xd->itxm_add(b->qcoeff, b->dequant, b->predictor,
|
||||
*(b->base_dst) + b->dst, 16, b->dst_stride);
|
||||
@ -463,13 +463,13 @@ static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
tx_type, xd->qcoeff, xd->block[0].dequant,
|
||||
xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16,
|
||||
xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16,
|
||||
xd->dst.y_stride, xd->dst.y_stride, xd->block[0].eob);
|
||||
xd->dst.y_stride, xd->dst.y_stride, xd->eobs[0]);
|
||||
} else {
|
||||
vp9_dequant_idct_add_16x16(
|
||||
xd->qcoeff, xd->block[0].dequant,
|
||||
xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16,
|
||||
xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16,
|
||||
xd->dst.y_stride, xd->dst.y_stride, xd->block[0].eob);
|
||||
xd->dst.y_stride, xd->dst.y_stride, xd->eobs[0]);
|
||||
}
|
||||
vp9_dequant_idct_add_uv_block_8x8_inplace_c(
|
||||
xd->qcoeff + 16 * 16,
|
||||
@ -492,7 +492,6 @@ static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int16_t *q = xd->block[idx].qcoeff;
|
||||
int16_t *dq = xd->block[0].dequant;
|
||||
int stride = xd->dst.y_stride;
|
||||
BLOCKD *b = &xd->block[ib];
|
||||
tx_type = get_tx_type_8x8(xd, &xd->block[ib]);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_ht_dequant_idct_add_8x8_c(
|
||||
@ -501,7 +500,7 @@ static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
+ x_idx * 16 + (i & 1) * 8,
|
||||
xd->dst.y_buffer + (y_idx * 16 + (i / 2) * 8) * xd->dst.y_stride
|
||||
+ x_idx * 16 + (i & 1) * 8,
|
||||
stride, stride, b->eob);
|
||||
stride, stride, xd->eobs[idx]);
|
||||
} else {
|
||||
vp9_dequant_idct_add_8x8_c(
|
||||
q, dq,
|
||||
@ -509,7 +508,7 @@ static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
+ x_idx * 16 + (i & 1) * 8,
|
||||
xd->dst.y_buffer + (y_idx * 16 + (i / 2) * 8) * xd->dst.y_stride
|
||||
+ x_idx * 16 + (i & 1) * 8,
|
||||
stride, stride, b->eob);
|
||||
stride, stride, xd->eobs[idx]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -542,7 +541,7 @@ static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
+ x_idx * 16 + (i & 3) * 4,
|
||||
xd->dst.y_buffer + (y_idx * 16 + (i / 4) * 4) * xd->dst.y_stride
|
||||
+ x_idx * 16 + (i & 3) * 4,
|
||||
xd->dst.y_stride, xd->dst.y_stride, b->eob);
|
||||
xd->dst.y_stride, xd->dst.y_stride, xd->eobs[i]);
|
||||
} else {
|
||||
xd->itxm_add(
|
||||
b->qcoeff, b->dequant,
|
||||
@ -569,7 +568,7 @@ static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, unsigned int mb_col,
|
||||
BOOL_DECODER* const bc) {
|
||||
int i, n, eobtotal;
|
||||
int n, eobtotal;
|
||||
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
|
||||
VP9_COMMON *const pc = &pbi->common;
|
||||
MODE_INFO *orig_mi = xd->mode_info_context;
|
||||
@ -647,7 +646,7 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
xd->dst.y_buffer + x_idx * 32 +
|
||||
xd->dst.y_stride * y_idx * 32,
|
||||
xd->dst.y_stride, xd->dst.y_stride,
|
||||
xd->block[0].eob);
|
||||
xd->eobs[0]);
|
||||
vp9_dequant_idct_add_uv_block_16x16_c(xd->sb_coeff_data.qcoeff + 1024,
|
||||
xd->block[16].dequant,
|
||||
xd->dst.u_buffer + x_idx * 16 +
|
||||
@ -667,9 +666,6 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
xd->above_context = pc->above_context + mb_col + x_idx;
|
||||
xd->left_context = pc->left_context + y_idx;
|
||||
xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
|
||||
for (i = 0; i < 24; i++) {
|
||||
xd->block[i].eob = 0;
|
||||
}
|
||||
|
||||
eobtotal = vp9_decode_mb_tokens(pbi, xd, bc);
|
||||
if (eobtotal == 0) { // skip loopfilter
|
||||
@ -695,7 +691,7 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, unsigned int mb_col,
|
||||
BOOL_DECODER* const bc) {
|
||||
int i, n, eobtotal;
|
||||
int n, eobtotal;
|
||||
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
|
||||
VP9_COMMON *const pc = &pbi->common;
|
||||
MODE_INFO *orig_mi = xd->mode_info_context;
|
||||
@ -756,7 +752,7 @@ static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
vp9_dequant_idct_add_32x32(xd->sb_coeff_data.qcoeff, xd->block[0].dequant,
|
||||
xd->dst.y_buffer, xd->dst.y_buffer,
|
||||
xd->dst.y_stride, xd->dst.y_stride,
|
||||
xd->block[0].eob);
|
||||
xd->eobs[0]);
|
||||
vp9_dequant_idct_add_uv_block_16x16_c(xd->sb_coeff_data.qcoeff + 1024,
|
||||
xd->block[16].dequant,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
@ -772,9 +768,6 @@ static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
xd->above_context = pc->above_context + mb_col + x_idx;
|
||||
xd->left_context = pc->left_context + y_idx + (mb_row & 2);
|
||||
xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
|
||||
for (i = 0; i < 24; i++) {
|
||||
xd->block[i].eob = 0;
|
||||
}
|
||||
|
||||
eobtotal = vp9_decode_mb_tokens(pbi, xd, bc);
|
||||
if (eobtotal == 0) { // skip loopfilter
|
||||
@ -802,7 +795,6 @@ static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
BOOL_DECODER* const bc) {
|
||||
int eobtotal = 0;
|
||||
MB_PREDICTION_MODE mode;
|
||||
int i;
|
||||
int tx_size;
|
||||
|
||||
assert(!xd->mode_info_context->mbmi.sb_type);
|
||||
@ -817,9 +809,6 @@ static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
|
||||
vp9_reset_mb_tokens_context(xd);
|
||||
} else if (!bool_error(bc)) {
|
||||
for (i = 0; i < 24; i++) {
|
||||
xd->block[i].eob = 0;
|
||||
}
|
||||
if (mode != B_PRED) {
|
||||
eobtotal = vp9_decode_mb_tokens(pbi, xd, bc);
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ void vp9_dequant_idct_add_uv_block_16x16_c(int16_t *q, const int16_t *dq,
|
||||
int stride,
|
||||
MACROBLOCKD *xd) {
|
||||
vp9_dequant_idct_add_16x16_c(q, dq, dstu, dstu, stride, stride,
|
||||
xd->block[16].eob);
|
||||
xd->eobs[16]);
|
||||
vp9_dequant_idct_add_16x16_c(q + 256, dq, dstv, dstv, stride, stride,
|
||||
xd->block[20].eob);
|
||||
xd->eobs[20]);
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
|
||||
DCT_DCT, get_eob(xd, segment_id, 1024),
|
||||
xd->sb_coeff_data.qcoeff,
|
||||
vp9_default_zig_zag1d_32x32, TX_32X32);
|
||||
xd->block[0].eob = c;
|
||||
xd->eobs[0] = c;
|
||||
eobtotal += c;
|
||||
|
||||
// 16x16 chroma blocks
|
||||
@ -288,7 +288,7 @@ int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
|
||||
c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV, DCT_DCT, seg_eob,
|
||||
xd->sb_coeff_data.qcoeff + 1024 + (i - 16) * 64,
|
||||
vp9_default_zig_zag1d_16x16, TX_16X16);
|
||||
xd->block[i].eob = c;
|
||||
xd->eobs[i] = c;
|
||||
eobtotal += c;
|
||||
}
|
||||
|
||||
@ -306,7 +306,7 @@ static int vp9_decode_mb_tokens_16x16(VP9D_COMP* const pbi,
|
||||
get_tx_type(xd, &xd->block[0]),
|
||||
get_eob(xd, segment_id, 256),
|
||||
xd->qcoeff, vp9_default_zig_zag1d_16x16, TX_16X16);
|
||||
xd->block[0].eob = c;
|
||||
xd->eobs[0] = c;
|
||||
eobtotal += c;
|
||||
|
||||
// 8x8 chroma blocks
|
||||
@ -315,7 +315,7 @@ static int vp9_decode_mb_tokens_16x16(VP9D_COMP* const pbi,
|
||||
c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV,
|
||||
DCT_DCT, seg_eob, xd->block[i].qcoeff,
|
||||
vp9_default_zig_zag1d_8x8, TX_8X8);
|
||||
xd->block[i].eob = c;
|
||||
xd->eobs[i] = c;
|
||||
eobtotal += c;
|
||||
}
|
||||
return eobtotal;
|
||||
@ -334,7 +334,7 @@ static int vp9_decode_mb_tokens_8x8(VP9D_COMP* const pbi,
|
||||
get_tx_type(xd, xd->block + i),
|
||||
seg_eob, xd->block[i].qcoeff,
|
||||
vp9_default_zig_zag1d_8x8, TX_8X8);
|
||||
xd->block[i].eob = c;
|
||||
xd->eobs[i] = c;
|
||||
eobtotal += c;
|
||||
}
|
||||
|
||||
@ -347,7 +347,7 @@ static int vp9_decode_mb_tokens_8x8(VP9D_COMP* const pbi,
|
||||
c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV,
|
||||
DCT_DCT, seg_eob, xd->block[i].qcoeff,
|
||||
vp9_default_zig_zag1d_4x4, TX_4X4);
|
||||
xd->block[i].eob = c;
|
||||
xd->eobs[i] = c;
|
||||
eobtotal += c;
|
||||
}
|
||||
} else {
|
||||
@ -355,7 +355,7 @@ static int vp9_decode_mb_tokens_8x8(VP9D_COMP* const pbi,
|
||||
c = decode_coefs(pbi, xd, bc, i, PLANE_TYPE_UV,
|
||||
DCT_DCT, seg_eob, xd->block[i].qcoeff,
|
||||
vp9_default_zig_zag1d_8x8, TX_8X8);
|
||||
xd->block[i].eob = c;
|
||||
xd->eobs[i] = c;
|
||||
eobtotal += c;
|
||||
}
|
||||
}
|
||||
@ -369,7 +369,7 @@ static int decode_coefs_4x4(VP9D_COMP *dx, MACROBLOCKD *xd,
|
||||
TX_TYPE tx_type, const int *scan) {
|
||||
int c = decode_coefs(dx, xd, bc, i, type, tx_type, seg_eob,
|
||||
xd->block[i].qcoeff, scan, TX_4X4);
|
||||
xd->block[i].eob = c;
|
||||
xd->eobs[i] = c;
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ void vp9_dequant_idct_add_y_block_4x4_inplace_c(int16_t *q,
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < 4; j++) {
|
||||
if (xd->block[i * 4 + j].eob > 1) {
|
||||
if (xd->eobs[i * 4 + j] > 1) {
|
||||
xd->itxm_add(q, dq, dst, dst, stride, stride);
|
||||
} else {
|
||||
xd->dc_only_itxm_add(q[0]*dq[0], dst, dst, stride, stride);
|
||||
@ -44,7 +44,7 @@ void vp9_dequant_idct_add_y_block_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < 4; j++) {
|
||||
if (xd->block[i * 4 + j].eob > 1)
|
||||
if (xd->eobs[i * 4 + j] > 1)
|
||||
vp9_dequant_idct_add_c(q, dq, pre, dst, 16, stride);
|
||||
else {
|
||||
vp9_dc_only_idct_add_c(q[0]*dq[0], pre, dst, 16, stride);
|
||||
@ -69,7 +69,7 @@ void vp9_dequant_idct_add_uv_block_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (xd->block[16 + i * 2 + j].eob > 1)
|
||||
if (xd->eobs[16 + i * 2 + j] > 1)
|
||||
vp9_dequant_idct_add_c(q, dq, pre, dstu, 8, stride);
|
||||
else {
|
||||
vp9_dc_only_idct_add_c(q[0]*dq[0], pre, dstu, 8, stride);
|
||||
@ -87,7 +87,7 @@ void vp9_dequant_idct_add_uv_block_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (xd->block[20 + i * 2 + j].eob > 1)
|
||||
if (xd->eobs[20 + i * 2 + j] > 1)
|
||||
vp9_dequant_idct_add_c(q, dq, pre, dstv, 8, stride);
|
||||
else {
|
||||
vp9_dc_only_idct_add_c(q[0]*dq[0], pre, dstv, 8, stride);
|
||||
@ -113,7 +113,7 @@ void vp9_dequant_idct_add_uv_block_4x4_inplace_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (xd->block[16 + i * 2 + j].eob > 1) {
|
||||
if (xd->eobs[16 + i * 2 + j] > 1) {
|
||||
xd->itxm_add(q, dq, dstu, dstu, stride, stride);
|
||||
} else {
|
||||
xd->dc_only_itxm_add(q[0]*dq[0], dstu, dstu, stride, stride);
|
||||
@ -129,7 +129,7 @@ void vp9_dequant_idct_add_uv_block_4x4_inplace_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (xd->block[20 + i * 2 + j].eob > 1) {
|
||||
if (xd->eobs[20 + i * 2 + j] > 1) {
|
||||
xd->itxm_add(q, dq, dstv, dstv, stride, stride);
|
||||
} else {
|
||||
xd->dc_only_itxm_add(q[0]*dq[0], dstv, dstv, stride, stride);
|
||||
@ -149,18 +149,18 @@ void vp9_dequant_idct_add_y_block_8x8_inplace_c(int16_t *q,
|
||||
uint8_t *dst,
|
||||
int stride,
|
||||
MACROBLOCKD *xd) {
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, dst, dst, stride, stride, xd->block[0].eob);
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, dst, dst, stride, stride, xd->eobs[0]);
|
||||
|
||||
vp9_dequant_idct_add_8x8_c(&q[64], dq, dst + 8,
|
||||
dst + 8, stride, stride, xd->block[4].eob);
|
||||
dst + 8, stride, stride, xd->eobs[4]);
|
||||
|
||||
vp9_dequant_idct_add_8x8_c(&q[128], dq, dst + 8 * stride,
|
||||
dst + 8 * stride, stride, stride,
|
||||
xd->block[8].eob);
|
||||
xd->eobs[8]);
|
||||
|
||||
vp9_dequant_idct_add_8x8_c(&q[192], dq, dst + 8 * stride + 8,
|
||||
dst + 8 * stride + 8, stride, stride,
|
||||
xd->block[12].eob);
|
||||
xd->eobs[12]);
|
||||
}
|
||||
|
||||
void vp9_dequant_idct_add_y_block_8x8_c(int16_t *q, const int16_t *dq,
|
||||
@ -170,15 +170,15 @@ void vp9_dequant_idct_add_y_block_8x8_c(int16_t *q, const int16_t *dq,
|
||||
uint8_t *origdest = dst;
|
||||
uint8_t *origpred = pre;
|
||||
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride, xd->block[0].eob);
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride, xd->eobs[0]);
|
||||
vp9_dequant_idct_add_8x8_c(&q[64], dq, origpred + 8,
|
||||
origdest + 8, 16, stride, xd->block[4].eob);
|
||||
origdest + 8, 16, stride, xd->eobs[4]);
|
||||
vp9_dequant_idct_add_8x8_c(&q[128], dq, origpred + 8 * 16,
|
||||
origdest + 8 * stride, 16, stride,
|
||||
xd->block[8].eob);
|
||||
xd->eobs[8]);
|
||||
vp9_dequant_idct_add_8x8_c(&q[192], dq, origpred + 8 * 16 + 8,
|
||||
origdest + 8 * stride + 8, 16, stride,
|
||||
xd->block[12].eob);
|
||||
xd->eobs[12]);
|
||||
}
|
||||
|
||||
void vp9_dequant_idct_add_uv_block_8x8_c(int16_t *q, const int16_t *dq,
|
||||
@ -186,12 +186,12 @@ void vp9_dequant_idct_add_uv_block_8x8_c(int16_t *q, const int16_t *dq,
|
||||
uint8_t *dstu,
|
||||
uint8_t *dstv,
|
||||
int stride, MACROBLOCKD *xd) {
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dstu, 8, stride, xd->block[16].eob);
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dstu, 8, stride, xd->eobs[16]);
|
||||
|
||||
q += 64;
|
||||
pre += 64;
|
||||
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dstv, 8, stride, xd->block[20].eob);
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, pre, dstv, 8, stride, xd->eobs[20]);
|
||||
}
|
||||
|
||||
void vp9_dequant_idct_add_uv_block_8x8_inplace_c(int16_t *q, const int16_t *dq,
|
||||
@ -200,11 +200,11 @@ void vp9_dequant_idct_add_uv_block_8x8_inplace_c(int16_t *q, const int16_t *dq,
|
||||
int stride,
|
||||
MACROBLOCKD *xd) {
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, dstu, dstu, stride, stride,
|
||||
xd->block[16].eob);
|
||||
xd->eobs[16]);
|
||||
|
||||
q += 64;
|
||||
vp9_dequant_idct_add_8x8_c(q, dq, dstv, dstv, stride, stride,
|
||||
xd->block[20].eob);
|
||||
xd->eobs[20]);
|
||||
}
|
||||
|
||||
|
||||
@ -216,7 +216,7 @@ void vp9_dequant_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < 4; j++) {
|
||||
if (xd->block[i * 4 + j].eob > 1)
|
||||
if (xd->eobs[i * 4 + j] > 1)
|
||||
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, 16, stride);
|
||||
else {
|
||||
vp9_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dst, 16, stride);
|
||||
@ -243,7 +243,7 @@ void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (xd->block[16 + i * 2 + j].eob > 1)
|
||||
if (xd->eobs[16 + i * 2 + j] > 1)
|
||||
vp9_dequant_idct_add_lossless_c(q, dq, pre, dstu, 8, stride);
|
||||
else {
|
||||
vp9_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dstu, 8, stride);
|
||||
@ -261,7 +261,7 @@ void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (xd->block[20 + i * 2 + j].eob > 1)
|
||||
if (xd->eobs[20 + i * 2 + j] > 1)
|
||||
vp9_dequant_idct_add_lossless_c(q, dq, pre, dstv, 8, stride);
|
||||
else {
|
||||
vp9_dc_only_inv_walsh_add_c(q[0]*dq[0], pre, dstv, 8, stride);
|
||||
|
@ -32,7 +32,6 @@ DEFINE(vp9_block_quant_shift, offsetof(BLOCK, quant_shift));
|
||||
DEFINE(vp9_blockd_qcoeff, offsetof(BLOCKD, qcoeff));
|
||||
DEFINE(vp9_blockd_dequant, offsetof(BLOCKD, dequant));
|
||||
DEFINE(vp9_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff));
|
||||
DEFINE(vp9_blockd_eob, offsetof(BLOCKD, eob));
|
||||
|
||||
END
|
||||
|
||||
|
@ -88,7 +88,8 @@ typedef struct superblock {
|
||||
DECLARE_ALIGNED(16, int16_t, coeff[32*32+16*16*2]);
|
||||
} SUPERBLOCK;
|
||||
|
||||
typedef struct macroblock {
|
||||
typedef struct macroblock MACROBLOCK;
|
||||
struct macroblock {
|
||||
DECLARE_ALIGNED(16, int16_t, src_diff[384]); // 16x16 Y 8x8 U 8x8 V
|
||||
DECLARE_ALIGNED(16, int16_t, coeff[384]); // 16x16 Y 8x8 U 8x8 V
|
||||
// 16 Y blocks, 4 U blocks, 4 V blocks,
|
||||
@ -171,10 +172,10 @@ typedef struct macroblock {
|
||||
void (*fwd_txm8x4)(int16_t *input, int16_t *output, int pitch);
|
||||
void (*fwd_txm8x8)(int16_t *input, int16_t *output, int pitch);
|
||||
void (*fwd_txm16x16)(int16_t *input, int16_t *output, int pitch);
|
||||
void (*quantize_b_4x4)(BLOCK *b, BLOCKD *d);
|
||||
void (*quantize_b_4x4_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
|
||||
void (*quantize_b_16x16)(BLOCK *b, BLOCKD *d);
|
||||
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
|
||||
} MACROBLOCK;
|
||||
void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx);
|
||||
void (*quantize_b_4x4_pair)(MACROBLOCK *x, int b_idx1, int b_idx2);
|
||||
void (*quantize_b_16x16)(MACROBLOCK *x, int b_idx);
|
||||
void (*quantize_b_8x8)(MACROBLOCK *x, int b_idx);
|
||||
};
|
||||
|
||||
#endif // VP9_ENCODER_VP9_BLOCK_H_
|
||||
|
@ -55,11 +55,11 @@ void vp9_encode_intra4x4block(MACROBLOCK *x, int ib) {
|
||||
tx_type = get_tx_type_4x4(&x->e_mbd, b);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_short_fht4x4(be->src_diff, be->coeff, 16, tx_type);
|
||||
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
||||
vp9_ht_quantize_b_4x4(x, ib, tx_type);
|
||||
vp9_short_iht4x4(b->dqcoeff, b->diff, 16, tx_type);
|
||||
} else {
|
||||
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, b) ;
|
||||
x->quantize_b_4x4(x, ib);
|
||||
vp9_inverse_transform_b_4x4(&x->e_mbd, ib, 32);
|
||||
}
|
||||
|
||||
@ -150,12 +150,12 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
|
||||
tx_type = get_tx_type_8x8(xd, &xd->block[ib]);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 16, tx_type);
|
||||
x->quantize_b_8x8(x->block + idx, xd->block + idx);
|
||||
x->quantize_b_8x8(x, idx);
|
||||
vp9_short_iht8x8(xd->block[idx].dqcoeff, xd->block[ib].diff,
|
||||
16, tx_type);
|
||||
} else {
|
||||
x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32);
|
||||
x->quantize_b_8x8(x->block + idx, xd->block + idx);
|
||||
x->quantize_b_8x8(x, idx);
|
||||
vp9_short_idct8x8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
|
||||
}
|
||||
} else {
|
||||
@ -165,17 +165,17 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
|
||||
tx_type = get_tx_type_4x4(xd, b);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_short_fht4x4(be->src_diff, be->coeff, 16, tx_type);
|
||||
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
||||
vp9_ht_quantize_b_4x4(x, ib + iblock[i], tx_type);
|
||||
vp9_short_iht4x4(b->dqcoeff, b->diff, 16, tx_type);
|
||||
} else if (!(i & 1) && get_tx_type_4x4(xd, b + 1) == DCT_DCT) {
|
||||
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4_pair(be, be + 1, b, b + 1);
|
||||
x->quantize_b_4x4_pair(x, ib + iblock[i], ib + iblock[i] + 1);
|
||||
vp9_inverse_transform_b_4x4(xd, ib + iblock[i], 32);
|
||||
vp9_inverse_transform_b_4x4(xd, ib + iblock[i] + 1, 32);
|
||||
i++;
|
||||
} else {
|
||||
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, b);
|
||||
x->quantize_b_4x4(x, ib + iblock[i]);
|
||||
vp9_inverse_transform_b_4x4(xd, ib + iblock[i], 32);
|
||||
}
|
||||
}
|
||||
@ -208,7 +208,7 @@ static void encode_intra_uv4x4(MACROBLOCK *x, int ib,
|
||||
vp9_subtract_b(be, b, 8);
|
||||
|
||||
x->fwd_txm4x4(be->src_diff, be->coeff, 16);
|
||||
x->quantize_b_4x4(be, b);
|
||||
x->quantize_b_4x4(x, ib);
|
||||
vp9_inverse_transform_b_4x4(&x->e_mbd, ib, 16);
|
||||
|
||||
vp9_recon_uv_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
|
||||
|
@ -315,14 +315,15 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
|
||||
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
|
||||
int tx_size) {
|
||||
const int ref = mb->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME;
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
BLOCK *b = &mb->block[i];
|
||||
BLOCKD *d = &mb->e_mbd.block[i];
|
||||
BLOCKD *d = &xd->block[i];
|
||||
vp9_token_state tokens[257][2];
|
||||
unsigned best_index[257][2];
|
||||
const int16_t *dequant_ptr = d->dequant, *coeff_ptr = b->coeff;
|
||||
int16_t *qcoeff_ptr = d->qcoeff;
|
||||
int16_t *dqcoeff_ptr = d->dqcoeff;
|
||||
int eob = d->eob, final_eob, sz = 0;
|
||||
int eob = xd->eobs[i], final_eob, sz = 0;
|
||||
const int i0 = 0;
|
||||
int rc, x, next;
|
||||
int64_t rdmult, rddiv, rd_cost0, rd_cost1;
|
||||
@ -527,8 +528,8 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
|
||||
}
|
||||
final_eob++;
|
||||
|
||||
d->eob = final_eob;
|
||||
*a = *l = (d->eob > 0);
|
||||
xd->eobs[d - xd->block] = final_eob;
|
||||
*a = *l = (final_eob > 0);
|
||||
}
|
||||
|
||||
void vp9_optimize_mby_4x4(MACROBLOCK *x) {
|
||||
|
@ -21,7 +21,10 @@
|
||||
extern int enc_debug;
|
||||
#endif
|
||||
|
||||
void vp9_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
|
||||
void vp9_ht_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type) {
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
BLOCK *const b = &mb->block[b_idx];
|
||||
BLOCKD *const d = &xd->block[b_idx];
|
||||
int i, rc, eob;
|
||||
int zbin;
|
||||
int x, y, z, sz;
|
||||
@ -84,10 +87,13 @@ void vp9_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
|
||||
}
|
||||
}
|
||||
|
||||
d->eob = eob + 1;
|
||||
xd->eobs[b_idx] = eob + 1;
|
||||
}
|
||||
|
||||
void vp9_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
|
||||
void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx) {
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
BLOCK *const b = &mb->block[b_idx];
|
||||
BLOCKD *const d = &xd->block[b_idx];
|
||||
int i, rc, eob;
|
||||
int zbin;
|
||||
int x, y, z, sz;
|
||||
@ -135,7 +141,7 @@ void vp9_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
|
||||
}
|
||||
}
|
||||
|
||||
d->eob = eob + 1;
|
||||
xd->eobs[b_idx] = eob + 1;
|
||||
}
|
||||
|
||||
void vp9_quantize_mby_4x4_c(MACROBLOCK *x) {
|
||||
@ -144,9 +150,9 @@ void vp9_quantize_mby_4x4_c(MACROBLOCK *x) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
TX_TYPE tx_type = get_tx_type_4x4(&x->e_mbd, &x->e_mbd.block[i]);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_ht_quantize_b_4x4(&x->block[i], &x->e_mbd.block[i], tx_type);
|
||||
vp9_ht_quantize_b_4x4(x, i, tx_type);
|
||||
} else {
|
||||
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_4x4(x, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -155,7 +161,7 @@ void vp9_quantize_mbuv_4x4_c(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i++)
|
||||
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_4x4(x, i);
|
||||
}
|
||||
|
||||
void vp9_quantize_mb_4x4_c(MACROBLOCK *x) {
|
||||
@ -163,7 +169,10 @@ void vp9_quantize_mb_4x4_c(MACROBLOCK *x) {
|
||||
vp9_quantize_mbuv_4x4_c(x);
|
||||
}
|
||||
|
||||
void vp9_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
|
||||
void vp9_regular_quantize_b_8x8(MACROBLOCK *mb, int b_idx) {
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
BLOCK *const b = &mb->block[b_idx];
|
||||
BLOCKD *const d = &xd->block[b_idx];
|
||||
int16_t *qcoeff_ptr = d->qcoeff;
|
||||
int16_t *dqcoeff_ptr = d->dqcoeff;
|
||||
|
||||
@ -236,30 +245,25 @@ void vp9_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
|
||||
}
|
||||
}
|
||||
}
|
||||
d->eob = eob + 1;
|
||||
xd->eobs[b_idx] = eob + 1;
|
||||
} else {
|
||||
d->eob = 0;
|
||||
xd->eobs[b_idx] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void vp9_quantize_mby_8x8(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i ++) {
|
||||
x->e_mbd.block[i].eob = 0;
|
||||
}
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_8x8(x, i);
|
||||
}
|
||||
}
|
||||
|
||||
void vp9_quantize_mbuv_8x8(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i ++)
|
||||
x->e_mbd.block[i].eob = 0;
|
||||
for (i = 16; i < 24; i += 4)
|
||||
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_8x8(x, i);
|
||||
}
|
||||
|
||||
void vp9_quantize_mb_8x8(MACROBLOCK *x) {
|
||||
@ -268,11 +272,7 @@ void vp9_quantize_mb_8x8(MACROBLOCK *x) {
|
||||
}
|
||||
|
||||
void vp9_quantize_mby_16x16(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
x->e_mbd.block[i].eob = 0;
|
||||
x->quantize_b_16x16(&x->block[0], &x->e_mbd.block[0]);
|
||||
x->quantize_b_16x16(x, 0);
|
||||
}
|
||||
|
||||
void vp9_quantize_mb_16x16(MACROBLOCK *x) {
|
||||
@ -286,7 +286,7 @@ static void quantize(int16_t *zbin_boost_orig_ptr,
|
||||
uint8_t *quant_shift_ptr,
|
||||
int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
|
||||
int16_t *dequant_ptr, int zbin_oq_value,
|
||||
int *eob_ptr, const int *scan, int mul) {
|
||||
uint16_t *eob_ptr, const int *scan, int mul) {
|
||||
int i, rc, eob;
|
||||
int zbin;
|
||||
int x, y, z, sz;
|
||||
@ -328,7 +328,10 @@ static void quantize(int16_t *zbin_boost_orig_ptr,
|
||||
*eob_ptr = eob + 1;
|
||||
}
|
||||
|
||||
void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
|
||||
void vp9_regular_quantize_b_16x16(MACROBLOCK *mb, int b_idx) {
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
BLOCK *const b = &mb->block[b_idx];
|
||||
BLOCKD *const d = &xd->block[b_idx];
|
||||
quantize(b->zrun_zbin_boost,
|
||||
b->coeff,
|
||||
256, b->skip_block,
|
||||
@ -337,7 +340,7 @@ void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
|
||||
d->dqcoeff,
|
||||
d->dequant,
|
||||
b->zbin_extra,
|
||||
&d->eob, vp9_default_zig_zag1d_16x16, 1);
|
||||
&xd->eobs[b_idx], vp9_default_zig_zag1d_16x16, 1);
|
||||
}
|
||||
|
||||
void vp9_quantize_sby_32x32(MACROBLOCK *x) {
|
||||
@ -345,7 +348,6 @@ void vp9_quantize_sby_32x32(MACROBLOCK *x) {
|
||||
BLOCK *b = &x->block[0];
|
||||
BLOCKD *d = &xd->block[0];
|
||||
|
||||
d->eob = 0;
|
||||
quantize(b->zrun_zbin_boost,
|
||||
x->sb_coeff_data.coeff,
|
||||
1024, b->skip_block,
|
||||
@ -355,7 +357,7 @@ void vp9_quantize_sby_32x32(MACROBLOCK *x) {
|
||||
xd->sb_coeff_data.dqcoeff,
|
||||
d->dequant,
|
||||
b->zbin_extra,
|
||||
&d->eob,
|
||||
&xd->eobs[0],
|
||||
vp9_default_zig_zag1d_32x32, 2);
|
||||
}
|
||||
|
||||
@ -363,8 +365,6 @@ void vp9_quantize_sbuv_16x16(MACROBLOCK *x) {
|
||||
int i;
|
||||
MACROBLOCKD *xd = &x->e_mbd;
|
||||
|
||||
xd->block[16].eob = 0;
|
||||
xd->block[20].eob = 0;
|
||||
for (i = 16; i < 24; i += 4)
|
||||
quantize(x->block[i].zrun_zbin_boost,
|
||||
x->sb_coeff_data.coeff + 1024 + (i - 16) * 64,
|
||||
@ -375,7 +375,7 @@ void vp9_quantize_sbuv_16x16(MACROBLOCK *x) {
|
||||
xd->sb_coeff_data.dqcoeff + 1024 + (i - 16) * 64,
|
||||
xd->block[i].dequant,
|
||||
x->block[i].zbin_extra,
|
||||
&xd->block[i].eob,
|
||||
&xd->eobs[i],
|
||||
vp9_default_zig_zag1d_16x16, 1);
|
||||
}
|
||||
|
||||
@ -383,10 +383,9 @@ void vp9_quantize_sbuv_16x16(MACROBLOCK *x) {
|
||||
* these two C functions if corresponding optimized routine is not available.
|
||||
* NEON optimized version implements currently the fast quantization for pair
|
||||
* of blocks. */
|
||||
void vp9_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2,
|
||||
BLOCKD *d1, BLOCKD *d2) {
|
||||
vp9_regular_quantize_b_4x4(b1, d1);
|
||||
vp9_regular_quantize_b_4x4(b2, d2);
|
||||
void vp9_regular_quantize_b_4x4_pair(MACROBLOCK *x, int b_idx1, int b_idx2) {
|
||||
vp9_regular_quantize_b_4x4(x, b_idx1);
|
||||
vp9_regular_quantize_b_4x4(x, b_idx2);
|
||||
}
|
||||
|
||||
static void invert_quant(int16_t *quant,
|
||||
|
@ -14,10 +14,10 @@
|
||||
#include "vp9/encoder/vp9_block.h"
|
||||
|
||||
#define prototype_quantize_block(sym) \
|
||||
void (sym)(BLOCK *b,BLOCKD *d)
|
||||
void (sym)(MACROBLOCK *mb, int b_idx)
|
||||
|
||||
#define prototype_quantize_block_pair(sym) \
|
||||
void (sym)(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2)
|
||||
void (sym)(MACROBLOCK *mb, int b_idx1, int b_idx2)
|
||||
|
||||
#define prototype_quantize_mb(sym) \
|
||||
void (sym)(MACROBLOCK *x)
|
||||
@ -27,7 +27,7 @@
|
||||
#endif
|
||||
|
||||
#define prototype_quantize_block_type(sym) \
|
||||
void (sym)(BLOCK *b, BLOCKD *d, TX_TYPE type)
|
||||
void (sym)(MACROBLOCK *mb, int b_ix, TX_TYPE type)
|
||||
extern prototype_quantize_block_type(vp9_ht_quantize_b_4x4);
|
||||
|
||||
#ifndef vp9_quantize_quantb_4x4
|
||||
|
@ -379,7 +379,6 @@ int vp9_uvsse(MACROBLOCK *x) {
|
||||
sse2 += sse1;
|
||||
}
|
||||
return sse2;
|
||||
|
||||
}
|
||||
|
||||
static INLINE int cost_coeffs(MACROBLOCK *mb,
|
||||
@ -388,9 +387,9 @@ static INLINE int cost_coeffs(MACROBLOCK *mb,
|
||||
ENTROPY_CONTEXT *l,
|
||||
TX_SIZE tx_size) {
|
||||
int pt;
|
||||
const int eob = b->eob;
|
||||
MACROBLOCKD *xd = &mb->e_mbd;
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
const int ib = (int)(b - xd->block);
|
||||
const int eob = xd->eobs[ib];
|
||||
int c = 0;
|
||||
int cost = 0, seg_eob;
|
||||
const int segment_id = xd->mode_info_context->mbmi.segment_id;
|
||||
@ -1043,10 +1042,10 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, BLOCK *be,
|
||||
tx_type = get_tx_type_4x4(xd, b);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_short_fht4x4(be->src_diff, be->coeff, 16, tx_type);
|
||||
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
||||
vp9_ht_quantize_b_4x4(x, be - x->block, tx_type);
|
||||
} else {
|
||||
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, b);
|
||||
x->quantize_b_4x4(x, be - x->block);
|
||||
}
|
||||
|
||||
tempa = ta;
|
||||
@ -1342,7 +1341,7 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
||||
vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 16, tx_type);
|
||||
else
|
||||
x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32);
|
||||
x->quantize_b_8x8(x->block + idx, xd->block + idx);
|
||||
x->quantize_b_8x8(x, idx);
|
||||
|
||||
// compute quantization mse of 8x8 block
|
||||
distortion = vp9_block_error_c((x->block + idx)->coeff,
|
||||
@ -1379,14 +1378,14 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
||||
tx_type = get_tx_type_4x4(xd, b);
|
||||
if (tx_type != DCT_DCT) {
|
||||
vp9_short_fht4x4(be->src_diff, be->coeff, 16, tx_type);
|
||||
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
||||
vp9_ht_quantize_b_4x4(x, ib + iblock[i], tx_type);
|
||||
} else if (!(i & 1) && get_tx_type_4x4(xd, b + 1) == DCT_DCT) {
|
||||
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4_pair(be, be + 1, b, b + 1);
|
||||
x->quantize_b_4x4_pair(x, ib + iblock[i], ib + iblock[i] + 1);
|
||||
do_two = 1;
|
||||
} else {
|
||||
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, b);
|
||||
x->quantize_b_4x4(x, ib + iblock[i]);
|
||||
}
|
||||
distortion += vp9_block_error_c(be->coeff, b->dqcoeff, 16 << do_two);
|
||||
rate_t += cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC,
|
||||
@ -2168,7 +2167,7 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x,
|
||||
|
||||
vp9_subtract_b(be, bd, 16);
|
||||
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4(be, bd);
|
||||
x->quantize_b_4x4(x, i);
|
||||
thisdistortion = vp9_block_error(be->coeff, bd->dqcoeff, 16);
|
||||
*distortion += thisdistortion;
|
||||
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
|
||||
@ -2231,7 +2230,7 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
|
||||
if (otherrd) {
|
||||
x->fwd_txm8x8(be->src_diff, be2->coeff, 32);
|
||||
x->quantize_b_8x8(be2, bd2);
|
||||
x->quantize_b_8x8(x, idx);
|
||||
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
|
||||
otherdist += thisdistortion;
|
||||
othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
|
||||
@ -2243,7 +2242,7 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
|
||||
bd = &xd->block[ib + iblock[j]];
|
||||
be = &x->block[ib + iblock[j]];
|
||||
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
|
||||
x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1);
|
||||
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
|
||||
*distortion += thisdistortion;
|
||||
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
|
||||
@ -2261,7 +2260,7 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
|
||||
BLOCKD *bd = &xd->block[ib + iblock[j]];
|
||||
BLOCK *be = &x->block[ib + iblock[j]];
|
||||
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
|
||||
x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j]);
|
||||
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
|
||||
otherdist += thisdistortion;
|
||||
othercost += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
|
||||
@ -2275,7 +2274,7 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
|
||||
}
|
||||
}
|
||||
x->fwd_txm8x8(be->src_diff, be2->coeff, 32);
|
||||
x->quantize_b_8x8(be2, bd2);
|
||||
x->quantize_b_8x8(x, idx);
|
||||
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
|
||||
*distortion += thisdistortion;
|
||||
*labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
|
||||
@ -2538,13 +2537,13 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_4X4) {
|
||||
for (j = 0; j < 16; j++)
|
||||
if (labels[j] == i)
|
||||
best_eobs[j] = x->e_mbd.block[j].eob;
|
||||
best_eobs[j] = x->e_mbd.eobs[j];
|
||||
} else {
|
||||
for (j = 0; j < 4; j++) {
|
||||
int ib = vp9_i8x8_block[j], idx = j * 4;
|
||||
|
||||
if (labels[ib] == i)
|
||||
best_eobs[idx] = x->e_mbd.block[idx].eob;
|
||||
best_eobs[idx] = x->e_mbd.eobs[idx];
|
||||
}
|
||||
}
|
||||
if (other_rd < best_other_rd)
|
||||
@ -2819,7 +2818,7 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
bd->bmi.as_mv[0].as_int = bsi.mvs[i].as_int;
|
||||
if (mbmi->second_ref_frame > 0)
|
||||
bd->bmi.as_mv[1].as_int = bsi.second_mvs[i].as_int;
|
||||
bd->eob = bsi.eobs[i];
|
||||
x->e_mbd.eobs[i] = bsi.eobs[i];
|
||||
}
|
||||
|
||||
*returntotrate = bsi.r;
|
||||
|
@ -105,7 +105,7 @@ static void tokenize_b(VP9_COMP *cpi,
|
||||
int c = 0;
|
||||
int recent_energy = 0;
|
||||
const BLOCKD * const b = xd->block + ib;
|
||||
const int eob = b->eob; /* one beyond last nonzero coeff */
|
||||
const int eob = xd->eobs[ib]; /* one beyond last nonzero coeff */
|
||||
TOKENEXTRA *t = *tp; /* store tokens starting here */
|
||||
int16_t *qcoeff_ptr = b->qcoeff;
|
||||
int seg_eob;
|
||||
@ -245,7 +245,7 @@ int vp9_mby_is_skippable_4x4(MACROBLOCKD *xd) {
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
skip &= (!xd->block[i].eob);
|
||||
skip &= (!xd->eobs[i]);
|
||||
|
||||
return skip;
|
||||
}
|
||||
@ -255,7 +255,7 @@ int vp9_mbuv_is_skippable_4x4(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i++)
|
||||
skip &= (!xd->block[i].eob);
|
||||
skip &= (!xd->eobs[i]);
|
||||
return skip;
|
||||
}
|
||||
|
||||
@ -269,13 +269,13 @@ int vp9_mby_is_skippable_8x8(MACROBLOCKD *xd) {
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < 16; i += 4)
|
||||
skip &= (!xd->block[i].eob);
|
||||
skip &= (!xd->eobs[i]);
|
||||
|
||||
return skip;
|
||||
}
|
||||
|
||||
int vp9_mbuv_is_skippable_8x8(MACROBLOCKD *xd) {
|
||||
return (!xd->block[16].eob) & (!xd->block[20].eob);
|
||||
return (!xd->eobs[16]) & (!xd->eobs[20]);
|
||||
}
|
||||
|
||||
static int mb_is_skippable_8x8(MACROBLOCKD *xd) {
|
||||
@ -290,7 +290,7 @@ static int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd) {
|
||||
|
||||
int vp9_mby_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
skip &= !xd->block[0].eob;
|
||||
skip &= !xd->eobs[0];
|
||||
return skip;
|
||||
}
|
||||
|
||||
@ -300,12 +300,12 @@ static int mb_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
|
||||
int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
skip &= !xd->block[0].eob;
|
||||
skip &= !xd->eobs[0];
|
||||
return skip;
|
||||
}
|
||||
|
||||
int vp9_sbuv_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
return (!xd->block[16].eob) & (!xd->block[20].eob);
|
||||
return (!xd->eobs[16]) & (!xd->eobs[20]);
|
||||
}
|
||||
|
||||
static int sb_is_skippable_32x32(MACROBLOCKD *xd) {
|
||||
|
@ -97,16 +97,16 @@ VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad_sse2_yasm.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_sad4d_sse2_yasm.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_fwalsh_sse2.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_quantize_sse2.asm
|
||||
#VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_quantize_sse2.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subtract_sse2.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_variance_ssse3.c
|
||||
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_variance_impl_ssse3.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3.asm
|
||||
#VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm
|
||||
VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_quantize_sse4.asm
|
||||
#VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_quantize_sse4.asm
|
||||
VP9_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/vp9_quantize_mmx.asm
|
||||
VP9_CX_SRCS-$(ARCH_X86)$(ARCH_X86_64) += encoder/x86/vp9_encodeopt.asm
|
||||
VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt.asm
|
||||
|
Loading…
x
Reference in New Issue
Block a user