Add rectangular block size variance/sad functions.

With this, the RD loop properly supports rectangular blocks.

Change-Id: Iece79048fb4e84741ee1ada982da129a7bf00470
This commit is contained in:
Jingning Han 2013-04-15 10:00:34 -07:00 committed by Ronald S. Bultje
parent 15eac18c4e
commit aaf33d7df5
6 changed files with 382 additions and 44 deletions

View File

@ -376,6 +376,20 @@ if [ "$CONFIG_VP9_ENCODER" = "yes" ]; then
# variance
[ $arch = "x86_64" ] && mmx_x86_64=mmx && sse2_x86_64=sse2
#if CONFIG_SBSEGMENT
prototype unsigned int vp9_variance32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance32x16
prototype unsigned int vp9_variance16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance16x32
prototype unsigned int vp9_variance64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance64x32
prototype unsigned int vp9_variance32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance32x64
#endif
prototype unsigned int vp9_variance32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_variance32x32
@ -410,6 +424,20 @@ vp9_variance4x4_mmx=vp9_variance4x4_mmx
prototype unsigned int vp9_sub_pixel_variance64x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance64x64 sse2
#if CONFIG_SBSEGMENT
prototype unsigned int vp9_sub_pixel_variance32x64 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance32x64
prototype unsigned int vp9_sub_pixel_variance64x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance64x32
prototype unsigned int vp9_sub_pixel_variance32x16 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance32x16
prototype unsigned int vp9_sub_pixel_variance16x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance16x32
#endif
prototype unsigned int vp9_sub_pixel_variance32x32 "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp9_sub_pixel_variance32x32 sse2
@ -436,6 +464,20 @@ vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
prototype unsigned int vp9_sad64x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad64x64 sse2
#if CONFIG_SBSEGMENT
prototype unsigned int vp9_sad32x64 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad32x64
prototype unsigned int vp9_sad64x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad64x32
prototype unsigned int vp9_sad32x16 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad32x16
prototype unsigned int vp9_sad16x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad16x32
#endif
prototype unsigned int vp9_sad32x32 "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp9_sad32x32 sse2
@ -529,6 +571,20 @@ specialize vp9_sad4x4x8 sse4
prototype void vp9_sad64x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad64x64x4d sse2
#if CONFIG_SBSEGMENT
prototype void vp9_sad32x64x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x64x4d
prototype void vp9_sad64x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad64x32x4d
prototype void vp9_sad32x16x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x16x4d
prototype void vp9_sad16x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad16x32x4d
#endif
prototype void vp9_sad32x32x4d "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp9_sad32x32x4d sse2

View File

@ -1560,6 +1560,27 @@ VP9_PTR vp9_create_compressor(VP9_CONFIG *oxcf) {
cpi->fn_ptr[BT].sdx8f = SDX8F; \
cpi->fn_ptr[BT].sdx4df = SDX4DF;
#if CONFIG_SBSEGMENT
BFP(BLOCK_32X16, vp9_sad32x16, vp9_variance32x16, vp9_sub_pixel_variance32x16,
NULL, NULL,
NULL, NULL, NULL,
vp9_sad32x16x4d)
BFP(BLOCK_16X32, vp9_sad16x32, vp9_variance16x32, vp9_sub_pixel_variance16x32,
NULL, NULL,
NULL, NULL, NULL,
vp9_sad16x32x4d)
BFP(BLOCK_64X32, vp9_sad64x32, vp9_variance64x32, vp9_sub_pixel_variance64x32,
NULL, NULL,
NULL, NULL, NULL,
vp9_sad64x32x4d)
BFP(BLOCK_32X64, vp9_sad32x64, vp9_variance32x64, vp9_sub_pixel_variance32x64,
NULL, NULL,
NULL, NULL, NULL,
vp9_sad32x64x4d)
#endif
BFP(BLOCK_32X32, vp9_sad32x32, vp9_variance32x32, vp9_sub_pixel_variance32x32,
vp9_variance_halfpixvar32x32_h, vp9_variance_halfpixvar32x32_v,

View File

@ -281,6 +281,12 @@ enum BlockSize {
BLOCK_16X16,
BLOCK_MAX_SEGMENTS,
BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
#if CONFIG_SBSEGMENT
BLOCK_32X16,
BLOCK_16X32,
BLOCK_64X32,
BLOCK_32X64,
#endif
BLOCK_64X64,
BLOCK_MAX_SB_SEGMENTS,
};

View File

@ -2813,7 +2813,15 @@ static void model_rd_from_var_lapndz(int var, int n, int qstep,
static enum BlockSize y_to_uv_block_size(enum BlockSize bs) {
switch (bs) {
case BLOCK_64X64: return BLOCK_32X32;
#if CONFIG_SBSEGMENT
case BLOCK_64X32: return BLOCK_32X16;
case BLOCK_32X64: return BLOCK_16X32;
#endif
case BLOCK_32X32: return BLOCK_16X16;
#if CONFIG_SBSEGMENT
case BLOCK_32X16: return BLOCK_16X8;
case BLOCK_16X32: return BLOCK_8X16;
#endif
case BLOCK_16X16: return BLOCK_8X8;
default:
assert(0);
@ -2824,7 +2832,15 @@ static enum BlockSize y_to_uv_block_size(enum BlockSize bs) {
static enum BlockSize y_bsizet_to_block_size(BLOCK_SIZE_TYPE bs) {
switch (bs) {
case BLOCK_SIZE_SB64X64: return BLOCK_64X64;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB64X32: return BLOCK_64X32;
case BLOCK_SIZE_SB32X64: return BLOCK_32X64;
#endif
case BLOCK_SIZE_SB32X32: return BLOCK_32X32;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB32X16: return BLOCK_32X16;
case BLOCK_SIZE_SB16X32: return BLOCK_16X32;
#endif
case BLOCK_SIZE_MB16X16: return BLOCK_16X16;
default:
assert(0);

View File

@ -23,6 +23,54 @@ unsigned int vp9_sad64x64_c(const uint8_t *src_ptr,
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 64, 64);
}
#if CONFIG_SBSEGMENT
unsigned int vp9_sad64x32_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
unsigned int max_sad) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 64, 32);
}
void vp9_sad64x32x4d_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t* const ref_ptr[],
int ref_stride,
unsigned int *sad_array) {
sad_array[0] = vp9_sad64x32(src_ptr, src_stride,
ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad64x32(src_ptr, src_stride,
ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad64x32(src_ptr, src_stride,
ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad64x32(src_ptr, src_stride,
ref_ptr[3], ref_stride, 0x7fffffff);
}
unsigned int vp9_sad32x64_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
unsigned int max_sad) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 64);
}
void vp9_sad32x64x4d_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t* const ref_ptr[],
int ref_stride,
unsigned int *sad_array) {
sad_array[0] = vp9_sad32x64(src_ptr, src_stride,
ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad32x64(src_ptr, src_stride,
ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad32x64(src_ptr, src_stride,
ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad32x64(src_ptr, src_stride,
ref_ptr[3], ref_stride, 0x7fffffff);
}
#endif
unsigned int vp9_sad32x32_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
@ -31,6 +79,54 @@ unsigned int vp9_sad32x32_c(const uint8_t *src_ptr,
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 32);
}
#if CONFIG_SBSEGMENT
unsigned int vp9_sad32x16_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
unsigned int max_sad) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 32, 16);
}
void vp9_sad32x16x4d_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t* const ref_ptr[],
int ref_stride,
unsigned int *sad_array) {
sad_array[0] = vp9_sad32x16(src_ptr, src_stride,
ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad32x16(src_ptr, src_stride,
ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad32x16(src_ptr, src_stride,
ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad32x16(src_ptr, src_stride,
ref_ptr[3], ref_stride, 0x7fffffff);
}
unsigned int vp9_sad16x32_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,
int ref_stride,
unsigned int max_sad) {
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 32);
}
void vp9_sad16x32x4d_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t* const ref_ptr[],
int ref_stride,
unsigned int *sad_array) {
sad_array[0] = vp9_sad16x32(src_ptr, src_stride,
ref_ptr[0], ref_stride, 0x7fffffff);
sad_array[1] = vp9_sad16x32(src_ptr, src_stride,
ref_ptr[1], ref_stride, 0x7fffffff);
sad_array[2] = vp9_sad16x32(src_ptr, src_stride,
ref_ptr[2], ref_stride, 0x7fffffff);
sad_array[3] = vp9_sad16x32(src_ptr, src_stride,
ref_ptr[3], ref_stride, 0x7fffffff);
}
#endif
unsigned int vp9_sad16x16_c(const uint8_t *src_ptr,
int src_stride,
const uint8_t *ref_ptr,

View File

@ -24,6 +24,144 @@ unsigned int vp9_get_mb_ss_c(const int16_t *src_ptr) {
return sum;
}
#if CONFIG_SBSEGMENT
unsigned int vp9_variance64x32_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
unsigned int var;
int avg;
variance(src_ptr, source_stride, ref_ptr, recon_stride, 64, 32, &var, &avg);
*sse = var;
return (var - (((int64_t)avg * avg) >> 11));
}
unsigned int vp9_sub_pixel_variance64x32_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
uint8_t temp2[68 * 64];
const int16_t *hfilter, *vfilter;
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 32, 64, vfilter);
return vp9_variance64x32_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance32x64_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
unsigned int var;
int avg;
variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 64, &var, &avg);
*sse = var;
return (var - (((int64_t)avg * avg) >> 11));
}
unsigned int vp9_sub_pixel_variance32x64_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
uint8_t temp2[68 * 64];
const int16_t *hfilter, *vfilter;
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 32, vfilter);
return vp9_variance32x64_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance32x16_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
unsigned int var;
int avg;
variance(src_ptr, source_stride, ref_ptr, recon_stride, 32, 16, &var, &avg);
*sse = var;
return (var - (((int64_t)avg * avg) >> 9));
}
unsigned int vp9_sub_pixel_variance32x16_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
uint8_t temp2[36 * 32];
const int16_t *hfilter, *vfilter;
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 16, 32, vfilter);
return vp9_variance32x16_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
unsigned int vp9_variance16x32_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
int recon_stride,
unsigned int *sse) {
unsigned int var;
int avg;
variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 32, &var, &avg);
*sse = var;
return (var - (((int64_t)avg * avg) >> 9));
}
unsigned int vp9_sub_pixel_variance16x32_c(const uint8_t *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
uint8_t temp2[36 * 32];
const int16_t *hfilter, *vfilter;
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 16, vfilter);
return vp9_variance16x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
#endif
unsigned int vp9_variance64x64_c(const uint8_t *src_ptr,
int source_stride,
const uint8_t *ref_ptr,
@ -139,17 +277,18 @@ unsigned int vp9_sub_pixel_variance4x4_c(const uint8_t *src_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint8_t temp2[20 * 16];
const int16_t *HFilter, *VFilter;
uint16_t FData3[5 * 4]; // Temp data bufffer used in filtering
const int16_t *hfilter, *vfilter;
uint16_t fdata3[5 * 4]; // Temp data bufffer used in filtering
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
// First filter 1d Horizontal
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 5, 4, hfilter);
// Now filter Verticaly
var_filter_block2d_bil_second_pass(FData3, temp2, 4, 4, 4, 4, VFilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 4, 4, 4, 4, vfilter);
return vp9_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
}
@ -162,15 +301,16 @@ unsigned int vp9_sub_pixel_variance8x8_c(const uint8_t *src_ptr,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[9 * 8]; // Temp data bufffer used in filtering
uint16_t fdata3[9 * 8]; // Temp data bufffer used in filtering
uint8_t temp2[20 * 16];
const int16_t *HFilter, *VFilter;
const int16_t *hfilter, *vfilter;
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 8, 8, vfilter);
return vp9_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}
@ -182,15 +322,16 @@ unsigned int vp9_sub_pixel_variance16x16_c(const uint8_t *src_ptr,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[17 * 16]; // Temp data bufffer used in filtering
uint16_t fdata3[17 * 16]; // Temp data bufffer used in filtering
uint8_t temp2[20 * 16];
const int16_t *HFilter, *VFilter;
const int16_t *hfilter, *vfilter;
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 16, 16, vfilter);
return vp9_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
@ -202,16 +343,16 @@ unsigned int vp9_sub_pixel_variance64x64_c(const uint8_t *src_ptr,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[65 * 64]; // Temp data bufffer used in filtering
uint16_t fdata3[65 * 64]; // Temp data bufffer used in filtering
uint8_t temp2[68 * 64];
const int16_t *HFilter, *VFilter;
const int16_t *hfilter, *vfilter;
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line,
1, 65, 64, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 64, 64, 64, 64, VFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 65, 64, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 64, 64, 64, 64, vfilter);
return vp9_variance64x64_c(temp2, 64, dst_ptr, dst_pixels_per_line, sse);
}
@ -223,15 +364,16 @@ unsigned int vp9_sub_pixel_variance32x32_c(const uint8_t *src_ptr,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[33 * 32]; // Temp data bufffer used in filtering
uint16_t fdata3[33 * 32]; // Temp data bufffer used in filtering
uint8_t temp2[36 * 32];
const int16_t *HFilter, *VFilter;
const int16_t *hfilter, *vfilter;
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 33, 32, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 32, 32, 32, 32, VFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 33, 32, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 32, 32, 32, 32, vfilter);
return vp9_variance32x32_c(temp2, 32, dst_ptr, dst_pixels_per_line, sse);
}
@ -363,15 +505,16 @@ unsigned int vp9_sub_pixel_variance16x8_c(const uint8_t *src_ptr,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[16 * 9]; // Temp data bufffer used in filtering
uint16_t fdata3[16 * 9]; // Temp data bufffer used in filtering
uint8_t temp2[20 * 16];
const int16_t *HFilter, *VFilter;
const int16_t *hfilter, *vfilter;
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 9, 16, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 16, 16, 8, 16, vfilter);
return vp9_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
}
@ -383,16 +526,16 @@ unsigned int vp9_sub_pixel_variance8x16_c(const uint8_t *src_ptr,
const uint8_t *dst_ptr,
int dst_pixels_per_line,
unsigned int *sse) {
uint16_t FData3[9 * 16]; // Temp data bufffer used in filtering
uint16_t fdata3[9 * 16]; // Temp data bufffer used in filtering
uint8_t temp2[20 * 16];
const int16_t *HFilter, *VFilter;
const int16_t *hfilter, *vfilter;
HFilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
VFilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
hfilter = VP9_BILINEAR_FILTERS_2TAP(xoffset);
vfilter = VP9_BILINEAR_FILTERS_2TAP(yoffset);
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line,
1, 17, 8, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
var_filter_block2d_bil_first_pass(src_ptr, fdata3, src_pixels_per_line,
1, 17, 8, hfilter);
var_filter_block2d_bil_second_pass(fdata3, temp2, 8, 8, 16, 8, vfilter);
return vp9_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
}