Implicit weighted prediction experiment

Adds an experiment to use a weighted prediction of two INTER
predictors, where the weight is one of (1/4, 3/4), (3/8, 5/8),
(1/2, 1/2), (5/8, 3/8) or (3/4, 1/4), and is chosen implicitly
based on consistency of the predictors to the already
reconstructed pixels to the top and left of the current macroblock
or superblock.

Currently the weighting is not applied to SPLITMV modes, which
default to the usual (1/2, 1/2) weighting. However the code is in
place controlled by a macro. The same weighting is used for Y and
UV components, where the weight is derived from analyzing the Y
component only.

Results (over compound inter-intra experiment)
derf: +0.18%
yt: +0.34%
hd: +0.49%
stdhd: +0.23%

The experiment suggests bigger benefit for explicitly signaled weights.

Change-Id: I5438539ff4485c5752874cd1eb078ff14bf5235a
This commit is contained in:
Deb Mukherjee 2013-03-12 14:21:08 -07:00
parent 646616602d
commit 23144d2345
12 changed files with 1806 additions and 279 deletions

1
configure vendored
View File

@ -249,6 +249,7 @@ EXPERIMENT_LIST="
useselectrefmv
modelcoefprob
loop_dering
implicit_compoundinter_weight
"
CONFIG_LIST="
external_build

View File

@ -288,7 +288,11 @@ struct scale_factors {
int y_den;
int y_offset_q4;
int y_step_q4;
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
convolve_fn_t predict[2][2][8]; // horiz, vert, weight (0 - 7)
#else
convolve_fn_t predict[2][2][2]; // horiz, vert, avg
#endif
};
typedef struct macroblockd {

View File

@ -122,6 +122,78 @@ static void convolve_avg_horiz_c(const uint8_t *src, int src_stride,
}
}
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static inline uint8_t combine_qtr(uint8_t a, uint8_t b) {
return (((a) + (b) * 3 + 2) >> 2);
}
static inline uint8_t combine_3qtr(uint8_t a, uint8_t b) {
return (((a) * 3 + (b) + 2) >> 2);
}
static inline uint8_t combine_1by8(uint8_t a, uint8_t b) {
return (((a) * 1 + (b) * 7 + 4) >> 3);
}
static inline uint8_t combine_3by8(uint8_t a, uint8_t b) {
return (((a) * 3 + (b) * 5 + 4) >> 3);
}
static inline uint8_t combine_5by8(uint8_t a, uint8_t b) {
return (((a) * 5 + (b) * 3 + 4) >> 3);
}
static inline uint8_t combine_7by8(uint8_t a, uint8_t b) {
return (((a) * 7 + (b) * 1 + 4) >> 3);
}
// TODO(debargha): Implment with a separate weight parameter
static void convolve_wtd_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x0, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int taps,
uint8_t (*combine)(uint8_t a, uint8_t b)) {
int x, y, k, sum;
const int16_t *filter_x_base = filter_x0;
#if ALIGN_FILTERS_256
filter_x_base = (const int16_t *)(((intptr_t)filter_x0) & ~(intptr_t)0xff);
#endif
/* Adjust base pointer address for this source line */
src -= taps / 2 - 1;
for (y = 0; y < h; ++y) {
/* Pointer to filter to use */
const int16_t *filter_x = filter_x0;
/* Initial phase offset */
int x0_q4 = (filter_x - filter_x_base) / taps;
int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
/* Per-pixel src offset */
int src_x = (x_q4 - x0_q4) >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[src_x + k] * filter_x[k];
}
sum += (VP9_FILTER_WEIGHT >> 1);
dst[x] = combine(dst[x], clip_pixel(sum >> VP9_FILTER_SHIFT));
/* Adjust source and filter to use for the next pixel */
x_q4 += x_step_q4;
filter_x = filter_x_base + (x_q4 & 0xf) * taps;
}
src += src_stride;
dst += dst_stride;
}
}
#endif
static void convolve_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@ -207,6 +279,52 @@ static void convolve_avg_vert_c(const uint8_t *src, int src_stride,
}
}
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
static void convolve_wtd_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y0, int y_step_q4,
int w, int h, int taps,
uint8_t (*combine)(uint8_t a, uint8_t b)) {
int x, y, k, sum;
const int16_t *filter_y_base = filter_y0;
#if ALIGN_FILTERS_256
filter_y_base = (const int16_t *)(((intptr_t)filter_y0) & ~(intptr_t)0xff);
#endif
/* Adjust base pointer address for this source column */
src -= src_stride * (taps / 2 - 1);
for (x = 0; x < w; ++x) {
/* Pointer to filter to use */
const int16_t *filter_y = filter_y0;
/* Initial phase offset */
int y0_q4 = (filter_y - filter_y_base) / taps;
int y_q4 = y0_q4;
for (y = 0; y < h; ++y) {
/* Per-pixel src offset */
int src_y = (y_q4 - y0_q4) >> 4;
for (sum = 0, k = 0; k < taps; ++k) {
sum += src[(src_y + k) * src_stride] * filter_y[k];
}
sum += (VP9_FILTER_WEIGHT >> 1);
dst[y * dst_stride] = combine(dst[y * dst_stride],
clip_pixel(sum >> VP9_FILTER_SHIFT));
/* Adjust source and filter to use for the next pixel */
y_q4 += y_step_q4;
filter_y = filter_y_base + (y_q4 & 0xf) * taps;
}
++src;
++dst;
}
}
#endif
static void convolve_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@ -285,6 +403,68 @@ void vp9_convolve8_avg_horiz_c(const uint8_t *src, int src_stride,
w, h, 8);
}
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
void vp9_convolve8_1by8_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_1by8);
}
void vp9_convolve8_qtr_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_qtr);
}
void vp9_convolve8_3by8_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_3by8);
}
void vp9_convolve8_5by8_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_5by8);
}
void vp9_convolve8_3qtr_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_3qtr);
}
void vp9_convolve8_7by8_horiz_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_horiz_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_7by8);
}
#endif
void vp9_convolve8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@ -305,6 +485,68 @@ void vp9_convolve8_avg_vert_c(const uint8_t *src, int src_stride,
w, h, 8);
}
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
void vp9_convolve8_1by8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_1by8);
}
void vp9_convolve8_qtr_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_qtr);
}
void vp9_convolve8_3by8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_3by8);
}
void vp9_convolve8_5by8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_5by8);
}
void vp9_convolve8_3qtr_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_3qtr);
}
void vp9_convolve8_7by8_vert_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
convolve_wtd_vert_c(src, src_stride, dst, dst_stride,
filter_x, x_step_q4, filter_y, y_step_q4,
w, h, 8, combine_7by8);
}
#endif
void vp9_convolve8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
@ -337,6 +579,140 @@ void vp9_convolve8_avg_c(const uint8_t *src, int src_stride,
w, h);
}
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
void vp9_convolve8_1by8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
assert(w <= 16);
assert(h <= 16);
vp9_convolve8(src, src_stride,
temp, 16,
filter_x, x_step_q4,
filter_y, y_step_q4,
w, h);
vp9_convolve_1by8(temp, 16,
dst, dst_stride,
NULL, 0, /* These unused parameter should be removed! */
NULL, 0, /* These unused parameter should be removed! */
w, h);
}
void vp9_convolve8_qtr_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
assert(w <= 16);
assert(h <= 16);
vp9_convolve8(src, src_stride,
temp, 16,
filter_x, x_step_q4,
filter_y, y_step_q4,
w, h);
vp9_convolve_qtr(temp, 16,
dst, dst_stride,
NULL, 0, /* These unused parameter should be removed! */
NULL, 0, /* These unused parameter should be removed! */
w, h);
}
void vp9_convolve8_3by8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
assert(w <= 16);
assert(h <= 16);
vp9_convolve8(src, src_stride,
temp, 16,
filter_x, x_step_q4,
filter_y, y_step_q4,
w, h);
vp9_convolve_3by8(temp, 16,
dst, dst_stride,
NULL, 0, /* These unused parameter should be removed! */
NULL, 0, /* These unused parameter should be removed! */
w, h);
}
void vp9_convolve8_5by8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
assert(w <= 16);
assert(h <= 16);
vp9_convolve8(src, src_stride,
temp, 16,
filter_x, x_step_q4,
filter_y, y_step_q4,
w, h);
vp9_convolve_5by8(temp, 16,
dst, dst_stride,
NULL, 0, /* These unused parameter should be removed! */
NULL, 0, /* These unused parameter should be removed! */
w, h);
}
void vp9_convolve8_3qtr_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
assert(w <= 16);
assert(h <= 16);
vp9_convolve8(src, src_stride,
temp, 16,
filter_x, x_step_q4,
filter_y, y_step_q4,
w, h);
vp9_convolve_3qtr(temp, 16,
dst, dst_stride,
NULL, 0, /* These unused parameter should be removed! */
NULL, 0, /* These unused parameter should be removed! */
w, h);
}
void vp9_convolve8_7by8_c(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h) {
/* Fixed size intermediate buffer places limits on parameters. */
DECLARE_ALIGNED_ARRAY(16, uint8_t, temp, 16 * 16);
assert(w <= 16);
assert(h <= 16);
vp9_convolve8(src, src_stride,
temp, 16,
filter_x, x_step_q4,
filter_y, y_step_q4,
w, h);
vp9_convolve_7by8(temp, 16,
dst, dst_stride,
NULL, 0, /* These unused parameter should be removed! */
NULL, 0, /* These unused parameter should be removed! */
w, h);
}
#endif
void vp9_convolve_copy(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
@ -374,3 +750,101 @@ void vp9_convolve_avg(const uint8_t *src, int src_stride,
dst += dst_stride;
}
}
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
void vp9_convolve_1by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = combine_1by8(dst[x], src[x]);
}
src += src_stride;
dst += dst_stride;
}
}
void vp9_convolve_qtr(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = combine_qtr(dst[x], src[x]);
}
src += src_stride;
dst += dst_stride;
}
}
void vp9_convolve_3by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = combine_3by8(dst[x], src[x]);
}
src += src_stride;
dst += dst_stride;
}
}
void vp9_convolve_5by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = combine_5by8(dst[x], src[x]);
}
src += src_stride;
dst += dst_stride;
}
}
void vp9_convolve_3qtr(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = combine_3qtr(dst[x], src[x]);
}
src += src_stride;
dst += dst_stride;
}
}
void vp9_convolve_7by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h) {
int x, y;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = combine_7by8(dst[x], src[x]);
}
src += src_stride;
dst += dst_stride;
}
}
#endif

View File

@ -10,6 +10,7 @@
#ifndef VP9_COMMON_CONVOLVE_H_
#define VP9_COMMON_CONVOLVE_H_
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
typedef void (*convolve_fn_t)(const uint8_t *src, int src_stride,
@ -32,6 +33,50 @@ void vp9_convolve_avg(const uint8_t *src, int src_stride,
const int16_t *filter_y, int y_step_q4,
int w, int h);
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
// Not a convolution, a block wtd (1/8, 7/8) average for (dst, src)
void vp9_convolve_1by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
// Not a convolution, a block wtd (1/4, 3/4) average for (dst, src)
void vp9_convolve_qtr(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
// Not a convolution, a block wtd (3/8, 5/8) average for (dst, src)
void vp9_convolve_3by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
// Not a convolution, a block wtd (5/8, 3/8) average for (dst, src)
void vp9_convolve_5by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
// Not a convolution, a block wtd (3/4, 1/4) average for (dst, src)
void vp9_convolve_3qtr(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
// Not a convolution, a block wtd (7/8, 1/8) average for (dst, src)
void vp9_convolve_7by8(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h);
#endif
struct subpix_fn_table {
const int16_t (*filter_x)[8];
const int16_t (*filter_y)[8];

File diff suppressed because it is too large Load Diff

View File

@ -276,6 +276,62 @@ specialize vp9_convolve8_avg_horiz ssse3
prototype void vp9_convolve8_avg_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_avg_vert ssse3
#if CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT
prototype void vp9_convolve8_1by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_1by8
prototype void vp9_convolve8_qtr "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_qtr
prototype void vp9_convolve8_3by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_3by8
prototype void vp9_convolve8_5by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_5by8
prototype void vp9_convolve8_3qtr "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_3qtr
prototype void vp9_convolve8_7by8 "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_7by8
prototype void vp9_convolve8_1by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_1by8_horiz
prototype void vp9_convolve8_qtr_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_qtr_horiz
prototype void vp9_convolve8_3by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_3by8_horiz
prototype void vp9_convolve8_5by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_5by8_horiz
prototype void vp9_convolve8_3qtr_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_3qtr_horiz
prototype void vp9_convolve8_7by8_horiz "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_7by8_horiz
prototype void vp9_convolve8_1by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_1by8_vert
prototype void vp9_convolve8_qtr_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_qtr_vert
prototype void vp9_convolve8_3by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_3by8_vert
prototype void vp9_convolve8_5by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_5by8_vert
prototype void vp9_convolve8_3qtr_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_3qtr_vert
prototype void vp9_convolve8_7by8_vert "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"
specialize vp9_convolve8_7by8_vert
#endif
#
# dct
#

View File

@ -815,7 +815,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
second_ref_fb_idx = cm->active_ref_idx[mbmi->second_ref_frame - 1];
setup_pred_block(&xd->second_pre, &cm->yv12_fb[second_ref_fb_idx],
mb_row, mb_col, &xd->scale_factor[1], &xd->scale_factor_uv[1]);
mb_row, mb_col, &xd->scale_factor[1], &xd->scale_factor_uv[1]);
vp9_find_mv_refs(cm, xd, mi, use_prev_in_find_mv_refs ? prev_mi : NULL,
mbmi->second_ref_frame,

View File

@ -195,16 +195,6 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.y_stride,
xd->dst.uv_stride,
mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
}
#endif
}
}
}
@ -212,7 +202,7 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
BOOL_DECODER* const bc) {
TX_TYPE tx_type = get_tx_type_16x16(xd, 0);
#ifdef DEC_DEBUG
#if 0 // def DEC_DEBUG
if (dec_debug) {
int i;
printf("\n");
@ -250,7 +240,7 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
// First do Y
// if the first one is DCT_DCT assume all the rest are as well
TX_TYPE tx_type = get_tx_type_8x8(xd, 0);
#ifdef DEC_DEBUG
#if 0 // def DEC_DEBUG
if (dec_debug) {
int i;
printf("\n");
@ -322,7 +312,7 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd);
}
#ifdef DEC_DEBUG
#if 0 // def DEC_DEBUG
if (dec_debug) {
int i;
printf("\n");
@ -340,6 +330,17 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
TX_TYPE tx_type;
int i, eobtotal = 0;
MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
#if 0 // def DEC_DEBUG
if (dec_debug) {
int i;
printf("\n");
printf("predictor\n");
for (i = 0; i < 384; i++) {
printf("%3d ", xd->predictor[i]);
if (i % 16 == 15) printf("\n");
}
}
#endif
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
int ib = vp9_i8x8_block[i];
@ -420,7 +421,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.uv_stride,
xd);
} else {
#ifdef DEC_DEBUG
#if 0 // def DEC_DEBUG
if (dec_debug) {
int i;
printf("\n");
@ -834,14 +835,14 @@ static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
skip_recon_mb(pbi, xd, mb_row, mb_col);
return;
}
#ifdef DEC_DEBUG
#if 0 // def DEC_DEBUG
if (dec_debug)
printf("Decoding mb: %d %d\n", xd->mode_info_context->mbmi.mode, tx_size);
#endif
// moved to be performed before detokenization
// if (xd->segmentation_enabled)
// mb_init_dequantizer(pbi, xd);
// if (xd->segmentation_enabled)
// mb_init_dequantizer(pbi, xd);
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
@ -852,7 +853,7 @@ static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
}
} else {
#ifdef DEC_DEBUG
#if 0 // def DEC_DEBUG
if (dec_debug)
printf("Decoding mb: %d %d interp %d\n",
xd->mode_info_context->mbmi.mode, tx_size,
@ -872,6 +873,13 @@ static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (dec_debug) {
int i, j;
printf("\n");
printf("predictor y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
printf("%3d ", xd->predictor[i * 16 + j]);
printf("\n");
}
printf("\n");
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
@ -994,9 +1002,10 @@ static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc,
mb_col < pc->cur_tile_mb_col_end; mb_col += 4) {
if (vp9_read(bc, pc->sb64_coded)) {
#ifdef DEC_DEBUG
dec_debug = (pc->current_video_frame == 1 && mb_row == 0 && mb_col == 0);
dec_debug = (pc->current_video_frame == 11 && pc->show_frame &&
mb_row == 8 && mb_col == 0);
if (dec_debug)
printf("Debug\n");
printf("Debug Decode SB64\n");
#endif
set_offsets(pbi, 64, mb_row, mb_col);
vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc);
@ -1019,8 +1028,10 @@ static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc,
if (vp9_read(bc, pc->sb32_coded)) {
#ifdef DEC_DEBUG
dec_debug = (pc->current_video_frame == 1 &&
mb_row + y_idx_sb == 0 && mb_col + x_idx_sb == 0);
dec_debug = (pc->current_video_frame == 11 && pc->show_frame &&
mb_row + y_idx_sb == 8 && mb_col + x_idx_sb == 0);
if (dec_debug)
printf("Debug Decode SB32\n");
#endif
set_offsets(pbi, 32, mb_row + y_idx_sb, mb_col + x_idx_sb);
vp9_decode_mb_mode_mv(pbi,
@ -1043,8 +1054,10 @@ static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc,
continue;
}
#ifdef DEC_DEBUG
dec_debug = (pc->current_video_frame == 1 &&
mb_row + y_idx == 0 && mb_col + x_idx == 0);
dec_debug = (pc->current_video_frame == 11 && pc->show_frame &&
mb_row + y_idx == 8 && mb_col + x_idx == 0);
if (dec_debug)
printf("Debug Decode MB\n");
#endif
set_offsets(pbi, 16, mb_row + y_idx, mb_col + x_idx);

View File

@ -757,7 +757,7 @@ static int pick_mb_modes(VP9_COMP *cpi,
// as a predictor for MBs that follow in the SB
if (cm->frame_type == KEY_FRAME) {
int r, d;
#ifdef ENC_DEBUG
#if 0 // ENC_DEBUG
if (enc_debug)
printf("intra pick_mb_modes %d %d\n", mb_row, mb_col);
#endif
@ -776,7 +776,7 @@ static int pick_mb_modes(VP9_COMP *cpi,
} else {
int seg_id, r, d;
#ifdef ENC_DEBUG
#if 0 // ENC_DEBUG
if (enc_debug)
printf("inter pick_mb_modes %d %d\n", mb_row, mb_col);
#endif
@ -2057,8 +2057,8 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
assert(!xd->mode_info_context->mbmi.sb_type);
#ifdef ENC_DEBUG
enc_debug = (cpi->common.current_video_frame == 1 &&
mb_row == 0 && mb_col == 0 && output_enabled);
enc_debug = (cpi->common.current_video_frame == 11 && cm->show_frame &&
mb_row == 8 && mb_col == 0 && output_enabled);
if (enc_debug)
printf("Encode MB %d %d output %d\n", mb_row, mb_col, output_enabled);
#endif
@ -2105,7 +2105,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
if (mbmi->ref_frame == INTRA_FRAME) {
#ifdef ENC_DEBUG
#if 0 // def ENC_DEBUG
if (enc_debug) {
printf("Mode %d skip %d tx_size %d\n", mbmi->mode, x->skip,
mbmi->txfm_size);
@ -2316,10 +2316,16 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
const int mis = cm->mode_info_stride;
#ifdef ENC_DEBUG
enc_debug = (cpi->common.current_video_frame == 1 &&
mb_row == 0 && mb_col == 0 && output_enabled);
if (enc_debug)
enc_debug = (cpi->common.current_video_frame == 11 && cm->show_frame &&
mb_row == 8 && mb_col == 0 && output_enabled);
if (enc_debug) {
printf("Encode SB32 %d %d output %d\n", mb_row, mb_col, output_enabled);
printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
mi->mbmi.mode, x->skip, mi->mbmi.txfm_size,
mi->mbmi.ref_frame, mi->mbmi.second_ref_frame,
mi->mbmi.mv[0].as_mv.row, mi->mbmi.mv[0].as_mv.col,
mi->mbmi.interp_filter);
}
#endif
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
@ -2537,8 +2543,8 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
const int mis = cm->mode_info_stride;
#ifdef ENC_DEBUG
enc_debug = (cpi->common.current_video_frame == 1 &&
mb_row == 0 && mb_col == 0 && output_enabled);
enc_debug = (cpi->common.current_video_frame == 11 && cm->show_frame &&
mb_row == 8 && mb_col == 0 && output_enabled);
if (enc_debug)
printf("Encode SB64 %d %d output %d\n", mb_row, mb_col, output_enabled);
#endif

View File

@ -2424,13 +2424,15 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm,
&xd->scale_factor[0],
4, 4, 0 /* no avg */, &xd->subpix);
// TODO(debargha): Make this work properly with the
// implicit-compoundinter-weight experiment when implicit
// weighting for splitmv modes is turned on.
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
vp9_build_inter_predictor(*(bd->base_second_pre) + bd->pre,
bd->pre_stride,
bd->predictor, 16,
&bd->bmi.as_mv[1],
&xd->scale_factor[1],
4, 4, 1 /* avg */, &xd->subpix);
vp9_build_inter_predictor(
*(bd->base_second_pre) + bd->pre, bd->pre_stride, bd->predictor, 16,
&bd->bmi.as_mv[1], &xd->scale_factor[1], 4, 4,
1 << (2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT) /* avg */,
&xd->subpix);
}
vp9_subtract_b(be, bd, 16);
@ -2486,12 +2488,14 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm,
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
uint8_t **base_pre = which_mv ? bd->base_second_pre : bd->base_pre;
vp9_build_inter_predictor(*base_pre + bd->pre,
bd->pre_stride,
bd->predictor, 16,
&bd->bmi.as_mv[which_mv],
&xd->scale_factor[which_mv],
8, 8, which_mv, &xd->subpix);
// TODO(debargha): Make this work properly with the
// implicit-compoundinter-weight experiment when implicit
// weighting for splitmv modes is turned on.
vp9_build_inter_predictor(
*base_pre + bd->pre, bd->pre_stride, bd->predictor, 16,
&bd->bmi.as_mv[which_mv], &xd->scale_factor[which_mv], 8, 8,
which_mv << (2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT),
&xd->subpix);
}
vp9_subtract_4b_c(be, bd, 16);
@ -3866,27 +3870,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
unsigned int sse, var;
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
// TODO(jkoleszar): these 2 y/uv should be replaced with one call to
// vp9_build_interintra_16x16_predictors_mb().
vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16,
mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
}
#endif
vp9_build_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
xd->predictor + 320, 8,
mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mbuv(xd, xd->predictor + 256,
xd->predictor + 320, 8);
}
#endif
vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
xd->predictor + 256,
xd->predictor + 320,
16, 8, mb_row, mb_col);
var = vp9_variance16x16(*(b->base_src), b->src_stride,
xd->predictor, 16, &sse);
// Note our transform coeffs are 8 times an orthogonal transform.
@ -3986,24 +3973,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
xd->dst.uv_stride,
mb_row, mb_col);
} else {
// TODO(jkoleszar): These y/uv fns can be replaced with their mb
// equivalent
vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16,
mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
}
#endif
vp9_build_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8,
mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (is_comp_interintra_pred) {
vp9_build_interintra_16x16_predictors_mbuv(xd, &xd->predictor[256],
&xd->predictor[320], 8);
}
#endif
vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
xd->predictor + 256,
xd->predictor + 320,
16, 8, mb_row, mb_col);
}
}
@ -4586,7 +4559,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(x->partition_info, &tmp_best_partition,
sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++) {
xd->block[i].bmi = tmp_best_bmodes[i];
xd->block[i].bmi = xd->mode_info_context->bmi[i] = tmp_best_bmodes[i];
}
}

View File

@ -54,7 +54,10 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
&pred[0], 16,
&subpel_mv,
&xd->scale_factor[which_mv],
16, 16, which_mv, &xd->subpix);
16, 16,
which_mv <<
(2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT),
&xd->subpix);
stride = (stride + 1) >> 1;
@ -62,13 +65,19 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
&pred[256], 8,
&fullpel_mv, &subpel_mv,
&xd->scale_factor_uv[which_mv],
8, 8, which_mv, &xd->subpix);
8, 8,
which_mv <<
(2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT),
&xd->subpix);
vp9_build_inter_predictor_q4(v_mb_ptr, stride,
&pred[320], 8,
&fullpel_mv, &subpel_mv,
&xd->scale_factor_uv[which_mv],
8, 8, which_mv, &xd->subpix);
8, 8,
which_mv <<
(2 * CONFIG_IMPLICIT_COMPOUNDINTER_WEIGHT),
&xd->subpix);
}
void vp9_temporal_filter_apply_c(uint8_t *frame1,

View File

@ -1488,7 +1488,7 @@ static void find_mismatch(vpx_image_t *img1, vpx_image_t *img2,
const unsigned int bsize2 = bsize >> 1;
unsigned int match = 1;
unsigned int i, j;
yloc[0] = yloc[1] = -1;
yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1;
for (i = 0, match = 1; match && i < img1->d_h; i += bsize) {
for (j = 0; match && j < img1->d_w; j += bsize) {
int k, l;
@ -1502,13 +1502,17 @@ static void find_mismatch(vpx_image_t *img1, vpx_image_t *img2,
(i + k) * img2->stride[VPX_PLANE_Y] + j + l)) {
yloc[0] = i + k;
yloc[1] = j + l;
yloc[2] = *(img1->planes[VPX_PLANE_Y] +
(i + k) * img1->stride[VPX_PLANE_Y] + j + l);
yloc[3] = *(img2->planes[VPX_PLANE_Y] +
(i + k) * img2->stride[VPX_PLANE_Y] + j + l);
match = 0;
break;
}
}
}
}
uloc[0] = uloc[1] = -1;
uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1;
for (i = 0, match = 1; match && i < (img1->d_h + 1) / 2; i += bsize2) {
for (j = 0; j < match && (img1->d_w + 1) / 2; j += bsize2) {
int k, l;
@ -1522,13 +1526,17 @@ static void find_mismatch(vpx_image_t *img1, vpx_image_t *img2,
(i + k) * img2->stride[VPX_PLANE_U] + j + l)) {
uloc[0] = i + k;
uloc[1] = j + l;
uloc[2] = *(img1->planes[VPX_PLANE_U] +
(i + k) * img1->stride[VPX_PLANE_U] + j + l);
uloc[3] = *(img2->planes[VPX_PLANE_U] +
(i + k) * img2->stride[VPX_PLANE_V] + j + l);
match = 0;
break;
}
}
}
}
vloc[0] = vloc[1] = -1;
vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1;
for (i = 0, match = 1; match && i < (img1->d_h + 1) / 2; i += bsize2) {
for (j = 0; j < match && (img1->d_w + 1) / 2; j += bsize2) {
int k, l;
@ -1542,6 +1550,10 @@ static void find_mismatch(vpx_image_t *img1, vpx_image_t *img2,
(i + k) * img2->stride[VPX_PLANE_V] + j + l)) {
vloc[0] = i + k;
vloc[1] = j + l;
vloc[2] = *(img1->planes[VPX_PLANE_V] +
(i + k) * img1->stride[VPX_PLANE_V] + j + l);
vloc[3] = *(img2->planes[VPX_PLANE_V] +
(i + k) * img2->stride[VPX_PLANE_V] + j + l);
match = 0;
break;
}
@ -2454,14 +2466,18 @@ static void test_decode(struct stream_state *stream,
ctx_exit_on_error(&stream->decoder, "Failed to get decoder reference frame");
if (!compare_img(&enc_img, &dec_img)) {
int y[2], u[2], v[2];
int y[4], u[4], v[4];
find_mismatch(&enc_img, &dec_img, y, u, v);
stream->decoder.err = 1;
warn_or_exit_on_error(&stream->decoder, fatal == TEST_DECODE_FATAL,
"Stream %d: Encode/decode mismatch on frame %d"
" at Y[%d, %d], U[%d, %d], V[%d, %d]",
"Stream %d: Encode/decode mismatch on frame %d at"
" Y[%d, %d] {%d/%d},"
" U[%d, %d] {%d/%d},"
" V[%d, %d] {%d/%d}",
stream->index, stream->frames_out,
y[0], y[1], u[0], u[1], v[0], v[1]);
y[0], y[1], y[2], y[3],
u[0], u[1], u[2], u[3],
v[0], v[1], v[2], v[3]);
stream->mismatch_seen = stream->frames_out;
}