Merge "Properly schedule the transform block recursion order" into nextgenv2

This commit is contained in:
Jingning Han
2016-11-04 17:53:53 +00:00
committed by Gerrit Code Review
4 changed files with 26 additions and 15 deletions

View File

@@ -856,7 +856,8 @@ static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue; if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
pack_txb_tokens(w, tp, tok_end, xd, mbmi, plane, plane_bsize, bit_depth, pack_txb_tokens(w, tp, tok_end, xd, mbmi, plane, plane_bsize, bit_depth,
block + i * step, offsetr, offsetc, sub_txs); block, offsetr, offsetc, sub_txs);
block += step;
} }
} }
} }

View File

@@ -891,8 +891,9 @@ static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue; if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
encode_block_inter(plane, block + i * step, offsetr, offsetc, plane_bsize, encode_block_inter(plane, block, offsetr, offsetc, plane_bsize, sub_txs,
sub_txs, arg); arg);
block += step;
} }
} }
} }

View File

@@ -3134,18 +3134,23 @@ static void select_tx_block(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
assert(tx_size < TX_SIZES); assert(tx_size < TX_SIZES);
#endif // CONFIG_EXT_TX #endif // CONFIG_EXT_TX
for (i = 0; i < 4 && this_cost_valid; ++i) { for (i = 0; i < 4 && this_cost_valid; ++i) {
int offsetr = (i >> 1) * bsl; int offsetr = blk_row + (i >> 1) * bsl;
int offsetc = (i & 0x01) * bsl; int offsetc = blk_col + (i & 0x01) * bsl;
select_tx_block(cpi, x, blk_row + offsetr, blk_col + offsetc, plane,
block + i * sub_step, sub_txs, depth + 1, plane_bsize, ta, if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
tl, tx_above, tx_left, &this_rd_stats,
ref_best_rd - tmp_rd, &this_cost_valid); select_tx_block(cpi, x, offsetr, offsetc, plane, block, sub_txs,
depth + 1, plane_bsize, ta, tl, tx_above, tx_left,
&this_rd_stats, ref_best_rd - tmp_rd, &this_cost_valid);
sum_rate += this_rd_stats.rate; sum_rate += this_rd_stats.rate;
sum_dist += this_rd_stats.dist; sum_dist += this_rd_stats.dist;
sum_bsse += this_rd_stats.sse; sum_bsse += this_rd_stats.sse;
all_skip &= this_rd_stats.skip; all_skip &= this_rd_stats.skip;
tmp_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); tmp_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
if (this_rd < tmp_rd) break; if (this_rd < tmp_rd) break;
block += sub_step;
} }
if (this_cost_valid) sum_rd = tmp_rd; if (this_cost_valid) sum_rd = tmp_rd;
} }
@@ -3482,11 +3487,14 @@ static void tx_block_rd(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
assert(bsl > 0); assert(bsl > 0);
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
int offsetr = (i >> 1) * bsl; int offsetr = blk_row + (i >> 1) * bsl;
int offsetc = (i & 0x01) * bsl; int offsetc = blk_col + (i & 0x01) * bsl;
tx_block_rd(cpi, x, blk_row + offsetr, blk_col + offsetc, plane,
block + i * step, sub_txs, plane_bsize, above_ctx, left_ctx, if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
rd_stats);
tx_block_rd(cpi, x, offsetr, offsetc, plane, block, sub_txs, plane_bsize,
above_ctx, left_ctx, rd_stats);
block += step;
} }
} }
} }

View File

@@ -610,7 +610,8 @@ void tokenize_vartx(ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run,
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue; if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
tokenize_vartx(td, t, dry_run, sub_txs, plane_bsize, offsetr, offsetc, tokenize_vartx(td, t, dry_run, sub_txs, plane_bsize, offsetr, offsetc,
block + i * step, plane, arg); block, plane, arg);
block += step;
} }
} }
} }