vp8: K&R formatting cosmetics
Signed-off-by: Diego Biurrun <diego@biurrun.de>
This commit is contained in:
parent
6adf3bc42e
commit
53c20f17c7
385
libavcodec/vp8.c
385
libavcodec/vp8.c
@ -24,12 +24,13 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "internal.h"
|
||||
#include "vp8.h"
|
||||
#include "vp8data.h"
|
||||
#include "rectangle.h"
|
||||
#include "thread.h"
|
||||
#include "vp8.h"
|
||||
#include "vp8data.h"
|
||||
|
||||
#if ARCH_ARM
|
||||
# include "arm/vp8.h"
|
||||
@ -91,7 +92,6 @@ static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
|
||||
{
|
||||
VP8Context *s = avctx->priv_data;
|
||||
@ -127,19 +127,22 @@ static int update_dimensions(VP8Context *s, int width, int height)
|
||||
s->mb_width = (s->avctx->coded_width + 15) / 16;
|
||||
s->mb_height = (s->avctx->coded_height + 15) / 16;
|
||||
|
||||
s->mb_layout = (avctx->active_thread_type == FF_THREAD_SLICE) && (FFMIN(s->num_coeff_partitions, avctx->thread_count) > 1);
|
||||
s->mb_layout = (avctx->active_thread_type == FF_THREAD_SLICE) &&
|
||||
(FFMIN(s->num_coeff_partitions, avctx->thread_count) > 1);
|
||||
if (!s->mb_layout) { // Frame threading and one thread
|
||||
s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks));
|
||||
s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
|
||||
sizeof(*s->macroblocks));
|
||||
s->intra4x4_pred_mode_top = av_mallocz(s->mb_width * 4);
|
||||
}
|
||||
else // Sliced threading
|
||||
s->macroblocks_base = av_mallocz((s->mb_width+2)*(s->mb_height+2)*sizeof(*s->macroblocks));
|
||||
} else // Sliced threading
|
||||
s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
|
||||
sizeof(*s->macroblocks));
|
||||
s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
|
||||
s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
|
||||
s->thread_data = av_mallocz(MAX_THREADS * sizeof(VP8ThreadData));
|
||||
|
||||
for (i = 0; i < MAX_THREADS; i++) {
|
||||
s->thread_data[i].filter_strength = av_mallocz(s->mb_width*sizeof(*s->thread_data[0].filter_strength));
|
||||
s->thread_data[i].filter_strength =
|
||||
av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
|
||||
#if HAVE_THREADS
|
||||
pthread_mutex_init(&s->thread_data[i].lock, NULL);
|
||||
pthread_cond_init(&s->thread_data[i].cond, NULL);
|
||||
@ -248,9 +251,9 @@ static void get_quants(VP8Context *s)
|
||||
|
||||
s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
|
||||
s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
|
||||
s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)];
|
||||
s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
|
||||
/* 101581>>16 is equivalent to 155/100 */
|
||||
s->qmat[i].luma_dc_qmul[1] = (101581 * vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)]) >> 16;
|
||||
s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
|
||||
s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
|
||||
s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
|
||||
|
||||
@ -317,9 +320,11 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
|
||||
|
||||
if (!s->profile)
|
||||
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
|
||||
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab,
|
||||
sizeof(s->put_pixels_tab));
|
||||
else // profile 1-3 use bilinear, 4+ aren't defined so whatever
|
||||
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
|
||||
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab,
|
||||
sizeof(s->put_pixels_tab));
|
||||
|
||||
if (header_size > buf_size - 7 * s->keyframe) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
|
||||
@ -328,7 +333,8 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
|
||||
if (s->keyframe) {
|
||||
if (AV_RL24(buf) != 0x2a019d) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid start code 0x%x\n", AV_RL24(buf));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
width = AV_RL16(buf + 3) & 0x3fff;
|
||||
@ -344,11 +350,15 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
|
||||
for (i = 0; i < 4; i++)
|
||||
for (j = 0; j < 16; j++)
|
||||
memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
|
||||
memcpy(s->prob->token[i][j],
|
||||
vp8_token_default_probs[i][vp8_coeff_band[j]],
|
||||
sizeof(s->prob->token[i][j]));
|
||||
memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
|
||||
memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
|
||||
memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
|
||||
memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
|
||||
sizeof(s->prob->pred16x16));
|
||||
memcpy(s->prob->pred8x8c, vp8_pred8x8c_prob_inter,
|
||||
sizeof(s->prob->pred8x8c));
|
||||
memcpy(s->prob->mvc, vp8_mv_default_prob,
|
||||
sizeof(s->prob->mvc));
|
||||
memset(&s->segmentation, 0, sizeof(s->segmentation));
|
||||
memset(&s->lf_delta, 0, sizeof(s->lf_delta));
|
||||
}
|
||||
@ -382,10 +392,9 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
}
|
||||
|
||||
if (!s->macroblocks_base || /* first frame */
|
||||
width != s->avctx->width || height != s->avctx->height) {
|
||||
width != s->avctx->width || height != s->avctx->height)
|
||||
if ((ret = update_dimensions(s, width, height)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
get_quants(s);
|
||||
|
||||
@ -437,7 +446,8 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
|
||||
static av_always_inline
|
||||
void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
|
||||
{
|
||||
dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
|
||||
dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
|
||||
@ -495,9 +505,8 @@ int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int lay
|
||||
int n, num;
|
||||
VP8Macroblock *top_mb;
|
||||
VP8Macroblock *left_mb = &mb[-1];
|
||||
const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
|
||||
*mbsplits_top,
|
||||
*mbsplits_cur, *firstidx;
|
||||
const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
|
||||
const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
|
||||
VP56mv *top_mv;
|
||||
VP56mv *left_mv = left_mb->bmv;
|
||||
VP56mv *cur_mv = mb->bmv;
|
||||
@ -510,11 +519,10 @@ int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int lay
|
||||
top_mv = top_mb->bmv;
|
||||
|
||||
if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
|
||||
if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1])) {
|
||||
if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1]))
|
||||
part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
|
||||
} else {
|
||||
else
|
||||
part_idx = VP8_SPLITMVMODE_8x8;
|
||||
}
|
||||
} else {
|
||||
part_idx = VP8_SPLITMVMODE_4x4;
|
||||
}
|
||||
@ -560,7 +568,8 @@ int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int lay
|
||||
}
|
||||
|
||||
static av_always_inline
|
||||
void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
|
||||
void decode_mvs(VP8Context *s, VP8Macroblock *mb,
|
||||
int mb_x, int mb_y, int layout)
|
||||
{
|
||||
VP8Macroblock *mb_edge[3] = { 0 /* top */,
|
||||
mb - 1 /* left */,
|
||||
@ -577,8 +586,7 @@ void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout
|
||||
if (!layout) { // layout is inlined (s->mb_layout is not)
|
||||
mb_edge[0] = mb + 2;
|
||||
mb_edge[2] = mb + 1;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
mb_edge[0] = mb - s->mb_width - 1;
|
||||
mb_edge[2] = mb - s->mb_width - 2;
|
||||
}
|
||||
@ -598,7 +606,8 @@ void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout
|
||||
if (cur_sign_bias != sign_bias[edge_ref]) { \
|
||||
/* SWAR negate of the values in mv. */ \
|
||||
mv = ~mv; \
|
||||
mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
|
||||
mv = ((mv & 0x7fff7fff) + \
|
||||
0x00010001) ^ (mv & 0x80008000); \
|
||||
} \
|
||||
if (!n || mv != AV_RN32A(&near_mv[idx])) \
|
||||
AV_WN32A(&near_mv[++idx], mv); \
|
||||
@ -617,7 +626,8 @@ void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout
|
||||
mb->mode = VP8_MVMODE_MV;
|
||||
|
||||
/* If we have three distinct MVs, merge first and last if they're the same */
|
||||
if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
|
||||
if (cnt[CNT_SPLITMV] &&
|
||||
AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
|
||||
cnt[CNT_NEAREST] += 1;
|
||||
|
||||
/* Swap near and nearest if necessary */
|
||||
@ -628,7 +638,6 @@ void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout
|
||||
|
||||
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
|
||||
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
|
||||
|
||||
/* Choose the best mv out of 0,0 and the nearest mv */
|
||||
clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
|
||||
cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
|
||||
@ -688,7 +697,8 @@ void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
|
||||
} else {
|
||||
int i;
|
||||
for (i = 0; i < 16; i++)
|
||||
intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
|
||||
intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
|
||||
vp8_pred4x4_prob_inter);
|
||||
}
|
||||
}
|
||||
|
||||
@ -707,7 +717,8 @@ void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
|
||||
mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
|
||||
|
||||
if (s->keyframe) {
|
||||
mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
|
||||
mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra,
|
||||
vp8_pred16x16_prob_intra);
|
||||
|
||||
if (mb->mode == MODE_I4x4) {
|
||||
decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
|
||||
@ -720,13 +731,15 @@ void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
|
||||
AV_WN32A(s->intra4x4_pred_mode_left, modes);
|
||||
}
|
||||
|
||||
mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
|
||||
mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree,
|
||||
vp8_pred8x8c_prob_intra);
|
||||
mb->ref_frame = VP56_FRAME_CURRENT;
|
||||
} else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
|
||||
// inter MB, 16.2
|
||||
if (vp56_rac_get_prob_branchy(c, s->prob->last))
|
||||
mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
|
||||
VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
|
||||
mb->ref_frame =
|
||||
vp56_rac_get_prob(c, s->prob->golden) ? VP56_FRAME_GOLDEN2 /* altref */
|
||||
: VP56_FRAME_GOLDEN;
|
||||
else
|
||||
mb->ref_frame = VP56_FRAME_PREVIOUS;
|
||||
s->ref_count[mb->ref_frame - 1]++;
|
||||
@ -740,7 +753,8 @@ void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
|
||||
if (mb->mode == MODE_I4x4)
|
||||
decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
|
||||
|
||||
mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
|
||||
mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree,
|
||||
s->prob->pred8x8c);
|
||||
mb->ref_frame = VP56_FRAME_CURRENT;
|
||||
mb->partitioning = VP8_SPLITMVMODE_NONE;
|
||||
AV_ZERO32(&mb->bmv[0]);
|
||||
@ -754,12 +768,14 @@ void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
|
||||
* @param probs probabilities to use when reading trees from the bitstream
|
||||
* @param i initial coeff index, 0 unless a separate DC block is coded
|
||||
* @param qmul array holding the dc/ac dequant factor at position 0/1
|
||||
*
|
||||
* @return 0 if no coeffs were decoded
|
||||
* otherwise, the index of the last coeff decoded plus one
|
||||
*/
|
||||
static int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16],
|
||||
uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
|
||||
int i, uint8_t *token_prob, int16_t qmul[2])
|
||||
int i, uint8_t *token_prob,
|
||||
int16_t qmul[2])
|
||||
{
|
||||
VP56RangeCoder c = *r;
|
||||
goto skip_eob;
|
||||
@ -821,6 +837,7 @@ skip_eob:
|
||||
* @param zero_nhood the initial prediction context for number of surrounding
|
||||
* all-zero blocks (only left/top, so 0-2)
|
||||
* @param qmul array holding the dc/ac dequant factor at position 0/1
|
||||
*
|
||||
* @return 0 if no coeffs were decoded
|
||||
* otherwise, the index of the last coeff decoded plus one
|
||||
*/
|
||||
@ -836,8 +853,8 @@ int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16],
|
||||
}
|
||||
|
||||
static av_always_inline
|
||||
void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb,
|
||||
uint8_t t_nnz[9], uint8_t l_nnz[9])
|
||||
void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c,
|
||||
VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9])
|
||||
{
|
||||
int i, x, y, luma_start = 0, luma_ctx = 3;
|
||||
int nnz_pred, nnz, nnz_total = 0;
|
||||
@ -848,8 +865,8 @@ void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Ma
|
||||
nnz_pred = t_nnz[8] + l_nnz[8];
|
||||
|
||||
// decode DC values and do hadamard
|
||||
nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0, nnz_pred,
|
||||
s->qmat[segment].luma_dc_qmul);
|
||||
nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
|
||||
nnz_pred, s->qmat[segment].luma_dc_qmul);
|
||||
l_nnz[8] = t_nnz[8] = !!nnz;
|
||||
if (nnz) {
|
||||
nnz_total += nnz;
|
||||
@ -867,9 +884,12 @@ void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Ma
|
||||
for (y = 0; y < 4; y++)
|
||||
for (x = 0; x < 4; x++) {
|
||||
nnz_pred = l_nnz[y] + t_nnz[x];
|
||||
nnz = decode_block_coeffs(c, td->block[y][x], s->prob->token[luma_ctx], luma_start,
|
||||
nnz_pred, s->qmat[segment].luma_qmul);
|
||||
// nnz+block_dc may be one more than the actual last index, but we don't care
|
||||
nnz = decode_block_coeffs(c, td->block[y][x],
|
||||
s->prob->token[luma_ctx],
|
||||
luma_start, nnz_pred,
|
||||
s->qmat[segment].luma_qmul);
|
||||
/* nnz+block_dc may be one more than the actual last index,
|
||||
* but we don't care */
|
||||
td->non_zero_count_cache[y][x] = nnz + block_dc;
|
||||
t_nnz[x] = l_nnz[y] = !!nnz;
|
||||
nnz_total += nnz;
|
||||
@ -882,8 +902,10 @@ void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Ma
|
||||
for (y = 0; y < 2; y++)
|
||||
for (x = 0; x < 2; x++) {
|
||||
nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
|
||||
nnz = decode_block_coeffs(c, td->block[i][(y<<1)+x], s->prob->token[2], 0,
|
||||
nnz_pred, s->qmat[segment].chroma_qmul);
|
||||
nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
|
||||
s->prob->token[2],
|
||||
0, nnz_pred,
|
||||
s->qmat[segment].chroma_qmul);
|
||||
td->non_zero_count_cache[i][(y << 1) + x] = nnz;
|
||||
t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
|
||||
nnz_total += nnz;
|
||||
@ -897,7 +919,8 @@ void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Ma
|
||||
}
|
||||
|
||||
static av_always_inline
|
||||
void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
|
||||
void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
|
||||
uint8_t *src_cb, uint8_t *src_cr,
|
||||
int linesize, int uvlinesize, int simple)
|
||||
{
|
||||
AV_COPY128(top_border, src_y + 15 * linesize);
|
||||
@ -908,18 +931,21 @@ void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint
|
||||
}
|
||||
|
||||
static av_always_inline
|
||||
void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
|
||||
int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
|
||||
int simple, int xchg)
|
||||
void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
|
||||
uint8_t *src_cr, int linesize, int uvlinesize, int mb_x,
|
||||
int mb_y, int mb_width, int simple, int xchg)
|
||||
{
|
||||
uint8_t *top_border_m1 = top_border - 32; // for TL prediction
|
||||
src_y -= linesize;
|
||||
src_cb -= uvlinesize;
|
||||
src_cr -= uvlinesize;
|
||||
|
||||
#define XCHG(a,b,xchg) do { \
|
||||
if (xchg) AV_SWAP64(b,a); \
|
||||
else AV_COPY64(b,a); \
|
||||
#define XCHG(a, b, xchg) \
|
||||
do { \
|
||||
if (xchg) \
|
||||
AV_SWAP64(b, a); \
|
||||
else \
|
||||
AV_COPY64(b, a); \
|
||||
} while (0)
|
||||
|
||||
XCHG(top_border_m1 + 8, src_y - 8, xchg);
|
||||
@ -941,22 +967,20 @@ void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_
|
||||
static av_always_inline
|
||||
int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
|
||||
{
|
||||
if (!mb_x) {
|
||||
if (!mb_x)
|
||||
return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
|
||||
} else {
|
||||
else
|
||||
return mb_y ? mode : LEFT_DC_PRED8x8;
|
||||
}
|
||||
}
|
||||
|
||||
static av_always_inline
|
||||
int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
|
||||
{
|
||||
if (!mb_x) {
|
||||
if (!mb_x)
|
||||
return mb_y ? VERT_PRED8x8 : DC_129_PRED8x8;
|
||||
} else {
|
||||
else
|
||||
return mb_y ? mode : HOR_PRED8x8;
|
||||
}
|
||||
}
|
||||
|
||||
static av_always_inline
|
||||
int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
|
||||
@ -968,7 +992,7 @@ int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
|
||||
return !mb_y ? DC_127_PRED8x8 : mode;
|
||||
case HOR_PRED8x8:
|
||||
return !mb_x ? DC_129_PRED8x8 : mode;
|
||||
case PLANE_PRED8x8 /*TM*/:
|
||||
case PLANE_PRED8x8: /* TM */
|
||||
return check_tm_pred8x8_mode(mode, mb_x, mb_y);
|
||||
}
|
||||
return mode;
|
||||
@ -1007,7 +1031,8 @@ int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf
|
||||
return !mb_x ? DC_129_PRED : mode;
|
||||
case TM_VP8_PRED:
|
||||
return check_tm_pred4x4_mode(mode, mb_x, mb_y);
|
||||
case DC_PRED: // 4x4 DC doesn't use the same "H.264-style" exceptions as 16x16/8x8 DC
|
||||
case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
|
||||
* as 16x16/8x8 DC */
|
||||
case DIAG_DOWN_RIGHT_PRED:
|
||||
case VERT_RIGHT_PRED:
|
||||
case HOR_DOWN_PRED:
|
||||
@ -1025,8 +1050,8 @@ void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
int x, y, mode, nnz;
|
||||
uint32_t tr;
|
||||
|
||||
// for the first row, we need to run xchg_mb_border to init the top edge to 127
|
||||
// otherwise, skip it if we aren't going to deblock
|
||||
/* for the first row, we need to run xchg_mb_border to init the top edge
|
||||
* to 127 otherwise, skip it if we aren't going to deblock */
|
||||
if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
|
||||
xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
|
||||
s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
|
||||
@ -1046,8 +1071,7 @@ void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
|
||||
// if we're on the right edge of the frame, said edge is extended
|
||||
// from the top macroblock
|
||||
if (mb_y &&
|
||||
mb_x == s->mb_width-1) {
|
||||
if (mb_y && mb_x == s->mb_width - 1) {
|
||||
tr = tr_right[-1] * 0x01010101u;
|
||||
tr_right = (uint8_t *) &tr;
|
||||
}
|
||||
@ -1067,7 +1091,9 @@ void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
} else if (x == 3)
|
||||
topright = tr_right;
|
||||
|
||||
mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x, mb_y + y, ©);
|
||||
mode = check_intra_pred4x4_mode_emuedge(intra4x4[x],
|
||||
mb_x + x, mb_y + y,
|
||||
©);
|
||||
if (copy) {
|
||||
dst = copy_dst + 12;
|
||||
linesize = 8;
|
||||
@ -1105,9 +1131,11 @@ void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
nnz = td->non_zero_count_cache[y][x];
|
||||
if (nnz) {
|
||||
if (nnz == 1)
|
||||
s->vp8dsp.vp8_idct_dc_add(ptr+4*x, td->block[y][x], s->linesize);
|
||||
s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
|
||||
td->block[y][x], s->linesize);
|
||||
else
|
||||
s->vp8dsp.vp8_idct_add(ptr+4*x, td->block[y][x], s->linesize);
|
||||
s->vp8dsp.vp8_idct_add(ptr + 4 * x,
|
||||
td->block[y][x], s->linesize);
|
||||
}
|
||||
topright += 4;
|
||||
}
|
||||
@ -1176,15 +1204,18 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
|
||||
s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
|
||||
src - my_idx * linesize - mx_idx,
|
||||
EDGE_EMU_LINESIZE, linesize,
|
||||
block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
|
||||
x_off - mx_idx, y_off - my_idx, width, height);
|
||||
block_w + subpel_idx[1][mx],
|
||||
block_h + subpel_idx[1][my],
|
||||
x_off - mx_idx, y_off - my_idx,
|
||||
width, height);
|
||||
src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
|
||||
src_linesize = EDGE_EMU_LINESIZE;
|
||||
}
|
||||
mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
|
||||
} else {
|
||||
ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
|
||||
mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
|
||||
mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
|
||||
linesize, block_h, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1206,9 +1237,10 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
|
||||
* @param mc_func motion compensation function pointers (bilinear or sixtap MC)
|
||||
*/
|
||||
static av_always_inline
|
||||
void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2,
|
||||
ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off,
|
||||
int block_w, int block_h, int width, int height, ptrdiff_t linesize,
|
||||
void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1,
|
||||
uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
|
||||
int x_off, int y_off, int block_w, int block_h,
|
||||
int width, int height, ptrdiff_t linesize,
|
||||
vp8_mc_func mc_func[3][3])
|
||||
{
|
||||
uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
|
||||
@ -1255,8 +1287,7 @@ void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst
|
||||
static av_always_inline
|
||||
void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
ThreadFrame *ref_frame, int x_off, int y_off,
|
||||
int bx_off, int by_off,
|
||||
int block_w, int block_h,
|
||||
int bx_off, int by_off, int block_w, int block_h,
|
||||
int width, int height, VP56mv *mv)
|
||||
{
|
||||
VP56mv uvmv = *mv;
|
||||
@ -1272,10 +1303,14 @@ void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
uvmv.x &= ~7;
|
||||
uvmv.y &= ~7;
|
||||
}
|
||||
x_off >>= 1; y_off >>= 1;
|
||||
bx_off >>= 1; by_off >>= 1;
|
||||
width >>= 1; height >>= 1;
|
||||
block_w >>= 1; block_h >>= 1;
|
||||
x_off >>= 1;
|
||||
y_off >>= 1;
|
||||
bx_off >>= 1;
|
||||
by_off >>= 1;
|
||||
width >>= 1;
|
||||
height >>= 1;
|
||||
block_w >>= 1;
|
||||
block_h >>= 1;
|
||||
vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
|
||||
dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
|
||||
&uvmv, x_off + bx_off, y_off + by_off,
|
||||
@ -1285,7 +1320,9 @@ void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
|
||||
/* Fetch pixels for estimated mv 4 macroblocks ahead.
|
||||
* Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
|
||||
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
|
||||
static av_always_inline
|
||||
void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
|
||||
int mb_xy, int ref)
|
||||
{
|
||||
/* Don't prefetch refs that haven't been used very often this frame. */
|
||||
if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
|
||||
@ -1336,7 +1373,10 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
}
|
||||
|
||||
/* U/V */
|
||||
x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
|
||||
x_off >>= 1;
|
||||
y_off >>= 1;
|
||||
width >>= 1;
|
||||
height >>= 1;
|
||||
for (y = 0; y < 2; y++) {
|
||||
for (x = 0; x < 2; x++) {
|
||||
uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
|
||||
@ -1354,8 +1394,8 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
uvmv.y &= ~7;
|
||||
}
|
||||
vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
|
||||
dst[2] + 4*y*s->uvlinesize + x*4, ref, &uvmv,
|
||||
4*x + x_off, 4*y + y_off, 4, 4,
|
||||
dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
|
||||
&uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
|
||||
width, height, s->uvlinesize,
|
||||
s->put_pixels_tab[2]);
|
||||
}
|
||||
@ -1387,8 +1427,8 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
|
||||
}
|
||||
}
|
||||
|
||||
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td,
|
||||
uint8_t *dst[3], VP8Macroblock *mb)
|
||||
static av_always_inline
|
||||
void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
|
||||
{
|
||||
int x, y, ch;
|
||||
|
||||
@ -1400,9 +1440,13 @@ static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td,
|
||||
if (nnz4 & ~0x01010101) {
|
||||
for (x = 0; x < 4; x++) {
|
||||
if ((uint8_t) nnz4 == 1)
|
||||
s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, td->block[y][x], s->linesize);
|
||||
s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
|
||||
td->block[y][x],
|
||||
s->linesize);
|
||||
else if ((uint8_t) nnz4 > 1)
|
||||
s->vp8dsp.vp8_idct_add(y_dst+4*x, td->block[y][x], s->linesize);
|
||||
s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
|
||||
td->block[y][x],
|
||||
s->linesize);
|
||||
nnz4 >>= 8;
|
||||
if (!nnz4)
|
||||
break;
|
||||
@ -1423,9 +1467,13 @@ static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td,
|
||||
for (y = 0; y < 2; y++) {
|
||||
for (x = 0; x < 2; x++) {
|
||||
if ((uint8_t) nnz4 == 1)
|
||||
s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, td->block[4+ch][(y<<1)+x], s->uvlinesize);
|
||||
s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
|
||||
td->block[4 + ch][(y << 1) + x],
|
||||
s->uvlinesize);
|
||||
else if ((uint8_t) nnz4 > 1)
|
||||
s->vp8dsp.vp8_idct_add(ch_dst+4*x, td->block[4+ch][(y<<1)+x], s->uvlinesize);
|
||||
s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
|
||||
td->block[4 + ch][(y << 1) + x],
|
||||
s->uvlinesize);
|
||||
nnz4 >>= 8;
|
||||
if (!nnz4)
|
||||
goto chroma_idct_end;
|
||||
@ -1436,11 +1484,13 @@ static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td,
|
||||
s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
|
||||
}
|
||||
}
|
||||
chroma_idct_end: ;
|
||||
chroma_idct_end:
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
|
||||
static av_always_inline
|
||||
void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f)
|
||||
{
|
||||
int interior_limit, filter_level;
|
||||
|
||||
@ -1467,10 +1517,13 @@ static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *m
|
||||
|
||||
f->filter_level = filter_level;
|
||||
f->inner_limit = interior_limit;
|
||||
f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
|
||||
f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 ||
|
||||
mb->mode == VP8_MVMODE_SPLIT;
|
||||
}
|
||||
|
||||
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
|
||||
static av_always_inline
|
||||
void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f,
|
||||
int mb_x, int mb_y)
|
||||
{
|
||||
int mbedge_lim, bedge_lim, hev_thresh;
|
||||
int filter_level = f->filter_level;
|
||||
@ -1540,7 +1593,9 @@ static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8Filter
|
||||
}
|
||||
}
|
||||
|
||||
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
|
||||
static av_always_inline
|
||||
void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f,
|
||||
int mb_x, int mb_y)
|
||||
{
|
||||
int mbedge_lim, bedge_lim;
|
||||
int filter_level = f->filter_level;
|
||||
@ -1581,7 +1636,8 @@ static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
|
||||
s->mv_min.y = -MARGIN;
|
||||
s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
VP8Macroblock *mb = s->macroblocks_base + ((s->mb_width+1)*(mb_y + 1) + 1);
|
||||
VP8Macroblock *mb = s->macroblocks_base +
|
||||
((s->mb_width + 1) * (mb_y + 1) + 1);
|
||||
int mb_xy = mb_y * s->mb_width;
|
||||
|
||||
AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
|
||||
@ -1590,7 +1646,8 @@ static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
|
||||
s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
|
||||
if (mb_y == 0)
|
||||
AV_WN32A((mb-s->mb_width-1)->intra4x4_pred_mode_top, DC_PRED*0x01010101);
|
||||
AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
|
||||
DC_PRED * 0x01010101);
|
||||
decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
|
||||
prev_frame && prev_frame->seg_map ?
|
||||
prev_frame->seg_map->data + mb_xy : NULL, 1);
|
||||
@ -1622,11 +1679,14 @@ static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
|
||||
#define update_pos(td, mb_y, mb_x) \
|
||||
do { \
|
||||
int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
|
||||
int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && (num_jobs > 1);\
|
||||
int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
|
||||
(num_jobs > 1); \
|
||||
int is_null = (next_td == NULL) || (prev_td == NULL); \
|
||||
int pos_check = (is_null) ? 1 :\
|
||||
(next_td != td && pos >= next_td->wait_mb_pos) ||\
|
||||
(prev_td != td && pos >= prev_td->wait_mb_pos);\
|
||||
int pos_check = (is_null) ? 1 \
|
||||
: (next_td != td && \
|
||||
pos >= next_td->wait_mb_pos) || \
|
||||
(prev_td != td && \
|
||||
pos >= prev_td->wait_mb_pos); \
|
||||
td->thread_mb_pos = pos; \
|
||||
if (sliced_threading && pos_check) { \
|
||||
pthread_mutex_lock(&td->lock); \
|
||||
@ -1655,10 +1715,14 @@ static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
|
||||
curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
|
||||
curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
|
||||
};
|
||||
if (mb_y == 0) prev_td = td;
|
||||
else prev_td = &s->thread_data[(jobnr + num_jobs - 1)%num_jobs];
|
||||
if (mb_y == s->mb_height-1) next_td = td;
|
||||
else next_td = &s->thread_data[(jobnr + 1)%num_jobs];
|
||||
if (mb_y == 0)
|
||||
prev_td = td;
|
||||
else
|
||||
prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
|
||||
if (mb_y == s->mb_height - 1)
|
||||
next_td = td;
|
||||
else
|
||||
next_td = &s->thread_data[(jobnr + 1) % num_jobs];
|
||||
if (s->mb_layout == 1)
|
||||
mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
|
||||
else {
|
||||
@ -1683,12 +1747,15 @@ static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
|
||||
if (threadnr != 0) {
|
||||
check_thread_pos(td, prev_td, mb_x + 1, mb_y - 1);
|
||||
} else {
|
||||
check_thread_pos(td, prev_td, (s->mb_width+3) + (mb_x+1), mb_y-1);
|
||||
check_thread_pos(td, prev_td,
|
||||
(s->mb_width + 3) + (mb_x + 1), mb_y - 1);
|
||||
}
|
||||
}
|
||||
|
||||
s->vdsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
|
||||
s->vdsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
|
||||
s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
|
||||
s->linesize, 4);
|
||||
s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
|
||||
dst[2] - dst[1], 2);
|
||||
|
||||
if (!s->mb_layout)
|
||||
decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
|
||||
@ -1713,7 +1780,8 @@ static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
|
||||
AV_ZERO64(td->left_nnz);
|
||||
AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
|
||||
|
||||
// Reset DC block predictors if they would exist if the mb had coefficients
|
||||
/* Reset DC block predictors if they would exist
|
||||
* if the mb had coefficients */
|
||||
if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
|
||||
td->left_nnz[8] = 0;
|
||||
s->top_nnz[mb_x][8] = 0;
|
||||
@ -1725,9 +1793,11 @@ static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
|
||||
|
||||
if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
|
||||
if (s->filter.simple)
|
||||
backup_mb_border(s->top_border[mb_x+1], dst[0], NULL, NULL, s->linesize, 0, 1);
|
||||
backup_mb_border(s->top_border[mb_x + 1], dst[0],
|
||||
NULL, NULL, s->linesize, 0, 1);
|
||||
else
|
||||
backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
|
||||
backup_mb_border(s->top_border[mb_x + 1], dst[0],
|
||||
dst[1], dst[2], s->linesize, s->uvlinesize, 0);
|
||||
}
|
||||
|
||||
prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
|
||||
@ -1766,26 +1836,31 @@ static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
|
||||
else
|
||||
mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
|
||||
|
||||
if (mb_y == 0) prev_td = td;
|
||||
else prev_td = &s->thread_data[(jobnr + num_jobs - 1)%num_jobs];
|
||||
if (mb_y == s->mb_height-1) next_td = td;
|
||||
else next_td = &s->thread_data[(jobnr + 1)%num_jobs];
|
||||
if (mb_y == 0)
|
||||
prev_td = td;
|
||||
else
|
||||
prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
|
||||
if (mb_y == s->mb_height - 1)
|
||||
next_td = td;
|
||||
else
|
||||
next_td = &s->thread_data[(jobnr + 1) % num_jobs];
|
||||
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
|
||||
VP8FilterStrength *f = &td->filter_strength[mb_x];
|
||||
if (prev_td != td) {
|
||||
check_thread_pos(td, prev_td, (mb_x+1) + (s->mb_width+3), mb_y-1);
|
||||
}
|
||||
if (prev_td != td)
|
||||
check_thread_pos(td, prev_td,
|
||||
(mb_x + 1) + (s->mb_width + 3), mb_y - 1);
|
||||
if (next_td != td)
|
||||
if (next_td != &s->thread_data[0]) {
|
||||
if (next_td != &s->thread_data[0])
|
||||
check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
|
||||
}
|
||||
|
||||
if (num_jobs == 1) {
|
||||
if (s->filter.simple)
|
||||
backup_mb_border(s->top_border[mb_x+1], dst[0], NULL, NULL, s->linesize, 0, 1);
|
||||
backup_mb_border(s->top_border[mb_x + 1], dst[0],
|
||||
NULL, NULL, s->linesize, 0, 1);
|
||||
else
|
||||
backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
|
||||
backup_mb_border(s->top_border[mb_x + 1], dst[0],
|
||||
dst[1], dst[2], s->linesize, s->uvlinesize, 0);
|
||||
}
|
||||
|
||||
if (s->filter.simple)
|
||||
@ -1808,9 +1883,11 @@ static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
|
||||
VP8ThreadData *next_td = NULL, *prev_td = NULL;
|
||||
VP8Frame *curframe = s->curframe;
|
||||
int mb_y, num_jobs = s->num_jobs;
|
||||
|
||||
td->thread_nr = threadnr;
|
||||
for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
|
||||
if (mb_y >= s->mb_height) break;
|
||||
if (mb_y >= s->mb_height)
|
||||
break;
|
||||
td->thread_mb_pos = mb_y << 16;
|
||||
vp8_decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
|
||||
if (s->deblock_filter)
|
||||
@ -1840,11 +1917,12 @@ int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
|
||||
prev_frame = s->framep[VP56_FRAME_CURRENT];
|
||||
|
||||
referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
|
||||
|| s->update_altref == VP56_FRAME_CURRENT;
|
||||
referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
|
||||
s->update_altref == VP56_FRAME_CURRENT;
|
||||
|
||||
skip_thresh = !referenced ? AVDISCARD_NONREF :
|
||||
!s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
|
||||
skip_thresh = !referenced ? AVDISCARD_NONREF
|
||||
: !s->keyframe ? AVDISCARD_NONKEY
|
||||
: AVDISCARD_ALL;
|
||||
|
||||
if (avctx->skip_frame >= skip_thresh) {
|
||||
s->invisible = 1;
|
||||
@ -1878,40 +1956,43 @@ int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
if (curframe->tf.f->data[0])
|
||||
vp8_release_frame(s, curframe);
|
||||
|
||||
// Given that arithmetic probabilities are updated every frame, it's quite likely
|
||||
// that the values we have on a random interframe are complete junk if we didn't
|
||||
// start decode on a keyframe. So just don't display anything rather than junk.
|
||||
/* Given that arithmetic probabilities are updated every frame, it's quite
|
||||
* likely that the values we have on a random interframe are complete
|
||||
* junk if we didn't start decode on a keyframe. So just don't display
|
||||
* anything rather than junk. */
|
||||
if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
|
||||
!s->framep[VP56_FRAME_GOLDEN] ||
|
||||
!s->framep[VP56_FRAME_GOLDEN2])) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Discarding interframe without a prior keyframe!\n");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
curframe->tf.f->key_frame = s->keyframe;
|
||||
curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
|
||||
curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
|
||||
: AV_PICTURE_TYPE_P;
|
||||
if ((ret = vp8_alloc_frame(s, curframe, referenced))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
// check if golden and altref are swapped
|
||||
if (s->update_altref != VP56_FRAME_NONE) {
|
||||
if (s->update_altref != VP56_FRAME_NONE)
|
||||
s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
|
||||
} else {
|
||||
else
|
||||
s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
|
||||
}
|
||||
if (s->update_golden != VP56_FRAME_NONE) {
|
||||
|
||||
if (s->update_golden != VP56_FRAME_NONE)
|
||||
s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
|
||||
} else {
|
||||
else
|
||||
s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
|
||||
}
|
||||
if (s->update_last) {
|
||||
|
||||
if (s->update_last)
|
||||
s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
|
||||
} else {
|
||||
else
|
||||
s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
|
||||
}
|
||||
|
||||
s->next_framep[VP56_FRAME_CURRENT] = curframe;
|
||||
|
||||
ff_thread_finish_setup(avctx);
|
||||
@ -1920,15 +2001,16 @@ int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
s->uvlinesize = curframe->tf.f->linesize[1];
|
||||
|
||||
memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
|
||||
/* Zero macroblock structures for top/top-left prediction from outside the frame. */
|
||||
/* Zero macroblock structures for top/top-left prediction
|
||||
* from outside the frame. */
|
||||
if (!s->mb_layout)
|
||||
memset(s->macroblocks + s->mb_height*2 - 1, 0, (s->mb_width+1)*sizeof(*s->macroblocks));
|
||||
memset(s->macroblocks + s->mb_height * 2 - 1, 0,
|
||||
(s->mb_width + 1) * sizeof(*s->macroblocks));
|
||||
if (!s->mb_layout && s->keyframe)
|
||||
memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
|
||||
|
||||
memset(s->ref_count, 0, sizeof(s->ref_count));
|
||||
|
||||
|
||||
if (s->mb_layout == 1) {
|
||||
// Make sure the previous frame has read its segmentation map,
|
||||
// if we re-use the same map.
|
||||
@ -1951,7 +2033,8 @@ int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
s->thread_data[i].thread_mb_pos = 0;
|
||||
s->thread_data[i].wait_mb_pos = INT_MAX;
|
||||
}
|
||||
avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs);
|
||||
avctx->execute2(avctx, vp8_decode_mb_row_sliced,
|
||||
s->thread_data, NULL, num_jobs);
|
||||
|
||||
ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
|
||||
memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
|
||||
@ -2033,10 +2116,10 @@ static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define REBASE(pic) \
|
||||
pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
|
||||
#define REBASE(pic) pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
|
||||
|
||||
static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
static int vp8_decode_update_thread_context(AVCodecContext *dst,
|
||||
const AVCodecContext *src)
|
||||
{
|
||||
VP8Context *s = dst->priv_data, *s_src = src->priv_data;
|
||||
int i;
|
||||
|
@ -28,10 +28,11 @@
|
||||
|
||||
#include "libavutil/buffer.h"
|
||||
|
||||
#include "vp56.h"
|
||||
#include "vp8dsp.h"
|
||||
#include "h264pred.h"
|
||||
#include "thread.h"
|
||||
#include "vp56.h"
|
||||
#include "vp8dsp.h"
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
# include <pthread.h>
|
||||
#elif HAVE_W32THREADS
|
||||
@ -82,7 +83,7 @@ typedef struct VP8FilterStrength {
|
||||
|
||||
typedef struct VP8Macroblock {
|
||||
uint8_t skip;
|
||||
// todo: make it possible to check for at least (i4x4 or split_mv)
|
||||
// TODO: make it possible to check for at least (i4x4 or split_mv)
|
||||
// in one op. are others needed?
|
||||
uint8_t mode;
|
||||
uint8_t ref_frame;
|
||||
|
@ -25,7 +25,8 @@ static int parse(AVCodecParserContext *s,
|
||||
const uint8_t **poutbuf, int *poutbuf_size,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
s->pict_type= (buf[0]&0x01) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
|
||||
s->pict_type = (buf[0] & 0x01) ? AV_PICTURE_TYPE_P
|
||||
: AV_PICTURE_TYPE_I;
|
||||
|
||||
*poutbuf = buf;
|
||||
*poutbuf_size = buf_size;
|
||||
|
@ -30,24 +30,21 @@
|
||||
#include "vp8.h"
|
||||
#include "h264pred.h"
|
||||
|
||||
static const uint8_t vp8_pred4x4_mode[] =
|
||||
{
|
||||
static const uint8_t vp8_pred4x4_mode[] = {
|
||||
[DC_PRED8x8] = DC_PRED,
|
||||
[VERT_PRED8x8] = VERT_PRED,
|
||||
[HOR_PRED8x8] = HOR_PRED,
|
||||
[PLANE_PRED8x8] = TM_VP8_PRED,
|
||||
};
|
||||
|
||||
static const int8_t vp8_pred16x16_tree_intra[4][2] =
|
||||
{
|
||||
static const int8_t vp8_pred16x16_tree_intra[4][2] = {
|
||||
{ -MODE_I4x4, 1 }, // '0'
|
||||
{ 2, 3 },
|
||||
{ -DC_PRED8x8, -VERT_PRED8x8 }, // '100', '101'
|
||||
{ -HOR_PRED8x8, -PLANE_PRED8x8 }, // '110', '111'
|
||||
};
|
||||
|
||||
static const int8_t vp8_pred16x16_tree_inter[4][2] =
|
||||
{
|
||||
static const int8_t vp8_pred16x16_tree_inter[4][2] = {
|
||||
{ -DC_PRED8x8, 1 }, // '0'
|
||||
{ 2, 3 },
|
||||
{ -VERT_PRED8x8, -HOR_PRED8x8 }, // '100', '101'
|
||||
@ -64,26 +61,26 @@ static const int vp8_mode_contexts[6][4] = {
|
||||
};
|
||||
|
||||
static const uint8_t vp8_mbsplits[5][16] = {
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 1, 1, 1, 1, 1, 1 },
|
||||
{ 0, 0, 1, 1, 0, 0, 1, 1,
|
||||
0, 0, 1, 1, 0, 0, 1, 1 },
|
||||
{ 0, 0, 1, 1, 0, 0, 1, 1,
|
||||
2, 2, 3, 3, 2, 2, 3, 3 },
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
|
||||
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
|
||||
{ 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3 },
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
static const uint8_t vp8_mbfirstidx[4][16] = {
|
||||
{ 0, 8 }, { 0, 2 }, { 0, 2, 8, 10 },
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7,
|
||||
8, 9, 10, 11, 12, 13, 14, 15 }
|
||||
{ 0, 8 },
|
||||
{ 0, 2 },
|
||||
{ 0, 2, 8, 10 },
|
||||
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
|
||||
};
|
||||
|
||||
static const uint8_t vp8_mbsplit_count[4] = { 2, 2, 4, 16 };
|
||||
static const uint8_t vp8_mbsplit_prob[3] = { 110, 111, 150 };
|
||||
static const uint8_t vp8_mbsplit_count[4] = {
|
||||
2, 2, 4, 16
|
||||
};
|
||||
static const uint8_t vp8_mbsplit_prob[3] = {
|
||||
110, 111, 150
|
||||
};
|
||||
|
||||
static const uint8_t vp8_submv_prob[5][3] = {
|
||||
{ 147, 136, 18 },
|
||||
@ -93,11 +90,14 @@ static const uint8_t vp8_submv_prob[5][3] = {
|
||||
{ 208, 1, 1 }
|
||||
};
|
||||
|
||||
static const uint8_t vp8_pred16x16_prob_intra[4] = { 145, 156, 163, 128 };
|
||||
static const uint8_t vp8_pred16x16_prob_inter[4] = { 112, 86, 140, 37 };
|
||||
static const uint8_t vp8_pred16x16_prob_intra[4] = {
|
||||
145, 156, 163, 128
|
||||
};
|
||||
static const uint8_t vp8_pred16x16_prob_inter[4] = {
|
||||
112, 86, 140, 37
|
||||
};
|
||||
|
||||
static const int8_t vp8_pred4x4_tree[9][2] =
|
||||
{
|
||||
static const int8_t vp8_pred4x4_tree[9][2] = {
|
||||
{ -DC_PRED, 1 }, // '0'
|
||||
{ -TM_VP8_PRED, 2 }, // '10'
|
||||
{ -VERT_PRED, 3 }, // '110'
|
||||
@ -109,23 +109,23 @@ static const int8_t vp8_pred4x4_tree[9][2] =
|
||||
{ -HOR_DOWN_PRED, -HOR_UP_PRED }, // '1111110', '1111111'
|
||||
};
|
||||
|
||||
static const int8_t vp8_pred8x8c_tree[3][2] =
|
||||
{
|
||||
static const int8_t vp8_pred8x8c_tree[3][2] = {
|
||||
{ -DC_PRED8x8, 1 }, // '0'
|
||||
{ -VERT_PRED8x8, 2 }, // '10
|
||||
{ -HOR_PRED8x8, -PLANE_PRED8x8 }, // '110', '111'
|
||||
};
|
||||
|
||||
static const uint8_t vp8_pred8x8c_prob_intra[3] = { 142, 114, 183 };
|
||||
static const uint8_t vp8_pred8x8c_prob_inter[3] = { 162, 101, 204 };
|
||||
|
||||
static const uint8_t vp8_pred4x4_prob_inter[9] =
|
||||
{
|
||||
static const uint8_t vp8_pred8x8c_prob_intra[3] = {
|
||||
142, 114, 183
|
||||
};
|
||||
static const uint8_t vp8_pred8x8c_prob_inter[3] = {
|
||||
162, 101, 204
|
||||
};
|
||||
static const uint8_t vp8_pred4x4_prob_inter[9] = {
|
||||
120, 90, 79, 133, 87, 85, 80, 111, 151
|
||||
};
|
||||
|
||||
static const uint8_t vp8_pred4x4_prob_intra[10][10][9] =
|
||||
{
|
||||
static const uint8_t vp8_pred4x4_prob_intra[10][10][9] = {
|
||||
{
|
||||
{ 39, 53, 200, 87, 26, 21, 43, 232, 171 },
|
||||
{ 56, 34, 51, 104, 114, 102, 29, 93, 77 },
|
||||
@ -248,22 +248,19 @@ static const uint8_t vp8_pred4x4_prob_intra[10][10][9] =
|
||||
},
|
||||
};
|
||||
|
||||
static const int8_t vp8_segmentid_tree[][2] =
|
||||
{
|
||||
static const int8_t vp8_segmentid_tree[][2] = {
|
||||
{ 1, 2 },
|
||||
{ -0, -1 }, // '00', '01'
|
||||
{ -2, -3 }, // '10', '11'
|
||||
};
|
||||
|
||||
static const uint8_t vp8_coeff_band[16] =
|
||||
{
|
||||
static const uint8_t vp8_coeff_band[16] = {
|
||||
0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7
|
||||
};
|
||||
|
||||
/* Inverse of vp8_coeff_band: mappings of bands to coefficient indexes.
|
||||
* Each list is -1-terminated. */
|
||||
static const int8_t vp8_coeff_band_indexes[8][10] =
|
||||
{
|
||||
static const int8_t vp8_coeff_band_indexes[8][10] = {
|
||||
{ 0, -1 },
|
||||
{ 1, -1 },
|
||||
{ 2, -1 },
|
||||
@ -274,24 +271,34 @@ static const int8_t vp8_coeff_band_indexes[8][10] =
|
||||
{ 15, -1 }
|
||||
};
|
||||
|
||||
static const uint8_t vp8_dct_cat1_prob[] = { 159, 0 };
|
||||
static const uint8_t vp8_dct_cat2_prob[] = { 165, 145, 0 };
|
||||
static const uint8_t vp8_dct_cat3_prob[] = { 173, 148, 140, 0 };
|
||||
static const uint8_t vp8_dct_cat4_prob[] = { 176, 155, 140, 135, 0 };
|
||||
static const uint8_t vp8_dct_cat5_prob[] = { 180, 157, 141, 134, 130, 0 };
|
||||
static const uint8_t vp8_dct_cat6_prob[] = { 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 };
|
||||
static const uint8_t vp8_dct_cat1_prob[] = {
|
||||
159, 0
|
||||
};
|
||||
static const uint8_t vp8_dct_cat2_prob[] = {
|
||||
165, 145, 0
|
||||
};
|
||||
static const uint8_t vp8_dct_cat3_prob[] = {
|
||||
173, 148, 140, 0
|
||||
};
|
||||
static const uint8_t vp8_dct_cat4_prob[] = {
|
||||
176, 155, 140, 135, 0
|
||||
};
|
||||
static const uint8_t vp8_dct_cat5_prob[] = {
|
||||
180, 157, 141, 134, 130, 0
|
||||
};
|
||||
static const uint8_t vp8_dct_cat6_prob[] = {
|
||||
254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0
|
||||
};
|
||||
|
||||
// only used for cat3 and above; cat 1 and 2 are referenced directly
|
||||
const uint8_t * const ff_vp8_dct_cat_prob[] =
|
||||
{
|
||||
const uint8_t *const ff_vp8_dct_cat_prob[] = {
|
||||
vp8_dct_cat3_prob,
|
||||
vp8_dct_cat4_prob,
|
||||
vp8_dct_cat5_prob,
|
||||
vp8_dct_cat6_prob,
|
||||
};
|
||||
|
||||
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1] =
|
||||
{
|
||||
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS - 1] = {
|
||||
{
|
||||
{
|
||||
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
|
||||
@ -462,8 +469,7 @@ static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1] =
|
||||
},
|
||||
};
|
||||
|
||||
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1] =
|
||||
{
|
||||
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS - 1] = {
|
||||
{
|
||||
{
|
||||
{ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
|
||||
@ -642,8 +648,7 @@ static const uint8_t zigzag_scan[16]={
|
||||
3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
|
||||
};
|
||||
|
||||
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1] =
|
||||
{
|
||||
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT + 1] = {
|
||||
4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 17,
|
||||
18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, 27, 28,
|
||||
29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43,
|
||||
@ -654,8 +659,7 @@ static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1] =
|
||||
122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 148, 151, 154, 157,
|
||||
};
|
||||
|
||||
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1] =
|
||||
{
|
||||
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT + 1] = {
|
||||
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
||||
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
|
||||
|
@ -24,9 +24,10 @@
|
||||
* VP8 compatible video decoder
|
||||
*/
|
||||
|
||||
#include "libavutil/common.h"
|
||||
|
||||
#include "mathops.h"
|
||||
#include "vp8dsp.h"
|
||||
#include "libavutil/common.h"
|
||||
|
||||
// TODO: Maybe add dequant
|
||||
static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
|
||||
@ -127,7 +128,8 @@ static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
|
||||
}
|
||||
}
|
||||
|
||||
static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
|
||||
static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16],
|
||||
ptrdiff_t stride)
|
||||
{
|
||||
vp8_idct_dc_add_c(dst + stride * 0 + 0, block[0], stride);
|
||||
vp8_idct_dc_add_c(dst + stride * 0 + 4, block[1], stride);
|
||||
@ -135,7 +137,8 @@ static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t s
|
||||
vp8_idct_dc_add_c(dst + stride * 4 + 4, block[3], stride);
|
||||
}
|
||||
|
||||
static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
|
||||
static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16],
|
||||
ptrdiff_t stride)
|
||||
{
|
||||
vp8_idct_dc_add_c(dst + 0, block[0], stride);
|
||||
vp8_idct_dc_add_c(dst + 4, block[1], stride);
|
||||
@ -156,7 +159,8 @@ static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t st
|
||||
|
||||
#define clip_int8(n) (cm[n + 0x80] - 0x80)
|
||||
|
||||
static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap)
|
||||
static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
|
||||
int is4tap)
|
||||
{
|
||||
LOAD_PIXELS
|
||||
int a, f1, f2;
|
||||
@ -197,12 +201,17 @@ static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
|
||||
* E - limit at the macroblock edge
|
||||
* I - limit for interior difference
|
||||
*/
|
||||
static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)
|
||||
static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride,
|
||||
int E, int I)
|
||||
{
|
||||
LOAD_PIXELS
|
||||
return simple_limit(p, stride, E)
|
||||
&& FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I
|
||||
&& FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;
|
||||
return simple_limit(p, stride, E) &&
|
||||
FFABS(p3 - p2) <= I &&
|
||||
FFABS(p2 - p1) <= I &&
|
||||
FFABS(p1 - p0) <= I &&
|
||||
FFABS(q3 - q2) <= I &&
|
||||
FFABS(q2 - q1) <= I &&
|
||||
FFABS(q1 - q0) <= I;
|
||||
}
|
||||
|
||||
// high edge variance
|
||||
@ -235,11 +244,13 @@ static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
|
||||
}
|
||||
|
||||
#define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \
|
||||
static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
|
||||
int flim_E, int flim_I, int hev_thresh)\
|
||||
static maybe_inline \
|
||||
void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \
|
||||
ptrdiff_t stride, \
|
||||
int flim_E, int flim_I, \
|
||||
int hev_thresh) \
|
||||
{ \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < size; i++) \
|
||||
if (normal_limit(dst + i * stridea, strideb, flim_E, flim_I)) { \
|
||||
if (hev(dst + i * stridea, strideb, hev_thresh)) \
|
||||
@ -249,11 +260,13 @@ static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst,
|
||||
} \
|
||||
} \
|
||||
\
|
||||
static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
|
||||
int flim_E, int flim_I, int hev_thresh)\
|
||||
static maybe_inline \
|
||||
void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \
|
||||
ptrdiff_t stride, \
|
||||
int flim_E, int flim_I, \
|
||||
int hev_thresh) \
|
||||
{ \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < size; i++) \
|
||||
if (normal_limit(dst + i * stridea, strideb, flim_E, flim_I)) { \
|
||||
int hv = hev(dst + i * stridea, strideb, hev_thresh); \
|
||||
@ -269,14 +282,18 @@ LOOP_FILTER(h, 16, stride, 1,)
|
||||
|
||||
#define UV_LOOP_FILTER(dir, stridea, strideb) \
|
||||
LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \
|
||||
static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
|
||||
int fE, int fI, int hev_thresh)\
|
||||
static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, \
|
||||
ptrdiff_t stride, int fE, \
|
||||
int fI, int hev_thresh) \
|
||||
{ \
|
||||
vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \
|
||||
vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \
|
||||
} \
|
||||
static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
|
||||
int fE, int fI, int hev_thresh)\
|
||||
\
|
||||
static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \
|
||||
uint8_t *dstV, \
|
||||
ptrdiff_t stride, int fE, \
|
||||
int fI, int hev_thresh) \
|
||||
{ \
|
||||
vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh); \
|
||||
vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh); \
|
||||
@ -314,11 +331,13 @@ static const uint8_t subpel_filters[7][6] = {
|
||||
};
|
||||
|
||||
#define PUT_PIXELS(WIDTH) \
|
||||
static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
|
||||
static void put_vp8_pixels ## WIDTH ## _c(uint8_t *dst, ptrdiff_t dststride, \
|
||||
uint8_t *src, ptrdiff_t srcstride, \
|
||||
int h, int x, int y) \
|
||||
{ \
|
||||
int i; \
|
||||
for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
|
||||
for (i = 0; i < h; i++, dst += dststride, src += srcstride) \
|
||||
memcpy(dst, src, WIDTH); \
|
||||
} \
|
||||
}
|
||||
|
||||
PUT_PIXELS(16)
|
||||
@ -326,20 +345,24 @@ PUT_PIXELS(8)
|
||||
PUT_PIXELS(4)
|
||||
|
||||
#define FILTER_6TAP(src, F, stride) \
|
||||
cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
|
||||
F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
|
||||
cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
|
||||
F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
|
||||
F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
|
||||
|
||||
#define FILTER_4TAP(src, F, stride) \
|
||||
cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
|
||||
F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
|
||||
|
||||
#define VP8_EPEL_H(SIZE, TAPS) \
|
||||
static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
|
||||
static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
|
||||
ptrdiff_t dststride, \
|
||||
uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my) \
|
||||
{ \
|
||||
const uint8_t *filter = subpel_filters[mx - 1]; \
|
||||
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
|
||||
int x, y; \
|
||||
\
|
||||
for (y = 0; y < h; y++) { \
|
||||
for (x = 0; x < SIZE; x++) \
|
||||
dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
|
||||
@ -347,13 +370,17 @@ static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dst
|
||||
src += srcstride; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define VP8_EPEL_V(SIZE, TAPS) \
|
||||
static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
|
||||
static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
|
||||
ptrdiff_t dststride, \
|
||||
uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, int my) \
|
||||
{ \
|
||||
const uint8_t *filter = subpel_filters[my - 1]; \
|
||||
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
|
||||
int x, y; \
|
||||
\
|
||||
for (y = 0; y < h; y++) { \
|
||||
for (x = 0; x < SIZE; x++) \
|
||||
dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
|
||||
@ -361,8 +388,15 @@ static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dst
|
||||
src += srcstride; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
|
||||
static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
|
||||
static void \
|
||||
put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, \
|
||||
ptrdiff_t dststride, \
|
||||
uint8_t *src, \
|
||||
ptrdiff_t srcstride, \
|
||||
int h, int mx, \
|
||||
int my) \
|
||||
{ \
|
||||
const uint8_t *filter = subpel_filters[mx - 1]; \
|
||||
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
|
||||
@ -377,7 +411,6 @@ static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst
|
||||
tmp += SIZE; \
|
||||
src += srcstride; \
|
||||
} \
|
||||
\
|
||||
tmp = tmp_array + (2 - (VTAPS == 4)) * SIZE; \
|
||||
filter = subpel_filters[my - 1]; \
|
||||
\
|
||||
@ -401,6 +434,7 @@ VP8_EPEL_V(4, 4)
|
||||
VP8_EPEL_V(16, 6)
|
||||
VP8_EPEL_V(8, 6)
|
||||
VP8_EPEL_V(4, 6)
|
||||
|
||||
VP8_EPEL_HV(16, 4, 4)
|
||||
VP8_EPEL_HV(8, 4, 4)
|
||||
VP8_EPEL_HV(4, 4, 4)
|
||||
@ -415,11 +449,12 @@ VP8_EPEL_HV(8, 6, 6)
|
||||
VP8_EPEL_HV(4, 6, 6)
|
||||
|
||||
#define VP8_BILINEAR(SIZE) \
|
||||
static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
|
||||
static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
|
||||
uint8_t *src, ptrdiff_t sstride, \
|
||||
int h, int mx, int my) \
|
||||
{ \
|
||||
int a = 8 - mx, b = mx; \
|
||||
int x, y; \
|
||||
\
|
||||
for (y = 0; y < h; y++) { \
|
||||
for (x = 0; x < SIZE; x++) \
|
||||
dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
|
||||
@ -427,11 +462,13 @@ static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, ui
|
||||
src += sstride; \
|
||||
} \
|
||||
} \
|
||||
static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
|
||||
\
|
||||
static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
|
||||
uint8_t *src, ptrdiff_t sstride, \
|
||||
int h, int mx, int my) \
|
||||
{ \
|
||||
int c = 8 - my, d = my; \
|
||||
int x, y; \
|
||||
\
|
||||
for (y = 0; y < h; y++) { \
|
||||
for (x = 0; x < SIZE; x++) \
|
||||
dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3; \
|
||||
@ -440,23 +477,24 @@ static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, ui
|
||||
} \
|
||||
} \
|
||||
\
|
||||
static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
|
||||
static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, \
|
||||
ptrdiff_t dstride, \
|
||||
uint8_t *src, \
|
||||
ptrdiff_t sstride, \
|
||||
int h, int mx, int my) \
|
||||
{ \
|
||||
int a = 8 - mx, b = mx; \
|
||||
int c = 8 - my, d = my; \
|
||||
int x, y; \
|
||||
uint8_t tmp_array[(2 * SIZE + 1) * SIZE]; \
|
||||
uint8_t *tmp = tmp_array; \
|
||||
\
|
||||
for (y = 0; y < h + 1; y++) { \
|
||||
for (x = 0; x < SIZE; x++) \
|
||||
tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
|
||||
tmp += SIZE; \
|
||||
src += sstride; \
|
||||
} \
|
||||
\
|
||||
tmp = tmp_array; \
|
||||
\
|
||||
for (y = 0; y < h; y++) { \
|
||||
for (x = 0; x < SIZE; x++) \
|
||||
dst[x] = (c * tmp[x] + d * tmp[x + SIZE] + 4) >> 3; \
|
||||
|
Loading…
x
Reference in New Issue
Block a user