18e90d744e
This is the initial patch for supporting 1/8th pel motion. Currently if we configure with enable-high-precision-mv, all motion vectors would default to 1/8 pel. Encode and decode syncs fine with the current code. In the next phase the code will be refactored so that we can choose the 1/8 pel mode adaptively at a frame/segment/mb level. Derf results: http://www.corp.google.com/~debargha/vp8_results/enhinterp_hpmv.html (about 0.83% better than 8-tap interpoaltion) Patch 3: Rebased. Also adding 1/16th pel interpolation for U and V Patch 4: HD results. http://www.corp.google.com/~debargha/vp8_results/enhinterp_hd_hpmv.html Seems impressive (unless I am doing something wrong). Patch 5: Added mmx/sse for bilateral filtering, as well as enforced use of c-versions of subpel filters with 8-taps and 1/16th pel; Also redesigned the 8-tap filters to reduce the cut-off in order to introduce a denoising effect. There is a new configure option sixteenth-subpel-uv which will use 1/16 th pel interpolation for uv, if the motion vectors have 1/8 pel accuracy. With the fixes the results are promising on the derf set. The enhanced interpolation option with 8-taps alone gives 3% improvement over thei derf set: http://www.corp.google.com/~debargha/vp8_results/enhinterpn.html Results on high precision mv and on the hd set are to follow. Patch 6: Adding a missing condition for CONFIG_SIXTEENTH_SUBPEL_UV in vp8/common/x86/x86_systemdependent.c Patch 7: Cleaning up various debug messages. Patch 8: Merge conflict Change-Id: I5b1d844457aefd7414a9e4e0e06c6ed38fd8cc04
569 lines
14 KiB
C
569 lines
14 KiB
C
/*
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#include "vpx_config.h"
|
|
#include "vp8/encoder/variance.h"
|
|
#include "vp8/common/pragmas.h"
|
|
#include "vpx_ports/mem.h"
|
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
#define HALFNDX 8
|
|
#else
|
|
#define HALFNDX 4
|
|
#endif
|
|
|
|
extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
|
extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
|
extern void filter_block1d8_h6_sse2(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
|
extern void filter_block1d8_v6_sse2(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *vp7_filter);
|
|
|
|
extern void vp8_filter_block2d_bil4x4_var_mmx
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
const short *HFilter,
|
|
const short *VFilter,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
|
|
extern unsigned int vp8_get4x4var_mmx
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *SSE,
|
|
int *Sum
|
|
);
|
|
|
|
unsigned int vp8_get_mb_ss_sse2
|
|
(
|
|
const short *src_ptr
|
|
);
|
|
unsigned int vp8_get16x16var_sse2
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *SSE,
|
|
int *Sum
|
|
);
|
|
unsigned int vp8_get8x8var_sse2
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *SSE,
|
|
int *Sum
|
|
);
|
|
void vp8_filter_block2d_bil_var_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int xoffset,
|
|
int yoffset,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
void vp8_half_horiz_vert_variance8x_h_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
void vp8_half_horiz_vert_variance16x_h_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
void vp8_half_horiz_variance8x_h_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
void vp8_half_horiz_variance16x_h_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
void vp8_half_vert_variance8x_h_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
void vp8_half_vert_variance16x_h_sse2
|
|
(
|
|
const unsigned char *ref_ptr,
|
|
int ref_pixels_per_line,
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
unsigned int Height,
|
|
int *sum,
|
|
unsigned int *sumsquared
|
|
);
|
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
|
|
#else
|
|
DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[8][8]);
|
|
#endif
|
|
|
|
unsigned int vp8_variance4x4_wmt(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *sse)
|
|
{
|
|
unsigned int var;
|
|
int avg;
|
|
|
|
vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
|
|
*sse = var;
|
|
return (var - ((avg * avg) >> 4));
|
|
|
|
}
|
|
|
|
unsigned int vp8_variance8x8_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *sse)
|
|
{
|
|
unsigned int var;
|
|
int avg;
|
|
|
|
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
|
|
*sse = var;
|
|
return (var - ((avg * avg) >> 6));
|
|
|
|
}
|
|
|
|
|
|
unsigned int vp8_variance16x16_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *sse)
|
|
{
|
|
unsigned int sse0;
|
|
int sum0;
|
|
|
|
|
|
vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
|
|
*sse = sse0;
|
|
return (sse0 - ((sum0 * sum0) >> 8));
|
|
}
|
|
unsigned int vp8_mse16x16_wmt(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *sse)
|
|
{
|
|
|
|
unsigned int sse0;
|
|
int sum0;
|
|
vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
|
|
*sse = sse0;
|
|
return sse0;
|
|
|
|
}
|
|
|
|
|
|
unsigned int vp8_variance16x8_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *sse)
|
|
{
|
|
unsigned int sse0, sse1, var;
|
|
int sum0, sum1, avg;
|
|
|
|
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
|
|
vp8_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
|
|
|
|
var = sse0 + sse1;
|
|
avg = sum0 + sum1;
|
|
*sse = var;
|
|
return (var - ((avg * avg) >> 7));
|
|
|
|
}
|
|
|
|
unsigned int vp8_variance8x16_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int source_stride,
|
|
const unsigned char *ref_ptr,
|
|
int recon_stride,
|
|
unsigned int *sse)
|
|
{
|
|
unsigned int sse0, sse1, var;
|
|
int sum0, sum1, avg;
|
|
|
|
vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
|
|
vp8_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
|
|
|
|
var = sse0 + sse1;
|
|
avg = sum0 + sum1;
|
|
*sse = var;
|
|
return (var - ((avg * avg) >> 7));
|
|
|
|
}
|
|
|
|
unsigned int vp8_sub_pixel_variance4x4_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
int xoffset,
|
|
int yoffset,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse
|
|
)
|
|
{
|
|
int xsum;
|
|
unsigned int xxsum;
|
|
vp8_filter_block2d_bil4x4_var_mmx(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line,
|
|
vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
|
|
&xsum, &xxsum
|
|
);
|
|
*sse = xxsum;
|
|
return (xxsum - ((xsum * xsum) >> 4));
|
|
}
|
|
|
|
|
|
unsigned int vp8_sub_pixel_variance8x8_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
int xoffset,
|
|
int yoffset,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse
|
|
)
|
|
{
|
|
int xsum;
|
|
unsigned int xxsum;
|
|
|
|
if (xoffset == HALFNDX && yoffset == 0)
|
|
{
|
|
vp8_half_horiz_variance8x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
&xsum, &xxsum);
|
|
}
|
|
else if (xoffset == 0 && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_vert_variance8x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
&xsum, &xxsum);
|
|
}
|
|
else if (xoffset == HALFNDX && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_horiz_vert_variance8x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
&xsum, &xxsum);
|
|
}
|
|
else
|
|
{
|
|
vp8_filter_block2d_bil_var_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
xoffset, yoffset,
|
|
&xsum, &xxsum);
|
|
}
|
|
|
|
*sse = xxsum;
|
|
return (xxsum - ((xsum * xsum) >> 6));
|
|
}
|
|
|
|
unsigned int vp8_sub_pixel_variance16x16_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
int xoffset,
|
|
int yoffset,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse
|
|
)
|
|
{
|
|
int xsum0, xsum1;
|
|
unsigned int xxsum0, xxsum1;
|
|
|
|
|
|
// note we could avoid these if statements if the calling function
|
|
// just called the appropriate functions inside.
|
|
if (xoffset == HALFNDX && yoffset == 0)
|
|
{
|
|
vp8_half_horiz_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum0, &xxsum0);
|
|
}
|
|
else if (xoffset == 0 && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_vert_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum0, &xxsum0);
|
|
}
|
|
else if (xoffset == HALFNDX && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_horiz_vert_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum0, &xxsum0);
|
|
}
|
|
else
|
|
{
|
|
vp8_filter_block2d_bil_var_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
xoffset, yoffset,
|
|
&xsum0, &xxsum0
|
|
);
|
|
|
|
vp8_filter_block2d_bil_var_sse2(
|
|
src_ptr + 8, src_pixels_per_line,
|
|
dst_ptr + 8, dst_pixels_per_line, 16,
|
|
xoffset, yoffset,
|
|
&xsum1, &xxsum1
|
|
);
|
|
xsum0 += xsum1;
|
|
xxsum0 += xxsum1;
|
|
}
|
|
|
|
*sse = xxsum0;
|
|
return (xxsum0 - ((xsum0 * xsum0) >> 8));
|
|
}
|
|
|
|
unsigned int vp8_sub_pixel_mse16x16_wmt(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
int xoffset,
|
|
int yoffset,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse
|
|
)
|
|
{
|
|
vp8_sub_pixel_variance16x16_wmt(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
|
|
return *sse;
|
|
}
|
|
|
|
unsigned int vp8_sub_pixel_variance16x8_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
int xoffset,
|
|
int yoffset,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse
|
|
|
|
)
|
|
{
|
|
int xsum0, xsum1;
|
|
unsigned int xxsum0, xxsum1;
|
|
|
|
if (xoffset == HALFNDX && yoffset == 0)
|
|
{
|
|
vp8_half_horiz_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
&xsum0, &xxsum0);
|
|
}
|
|
else if (xoffset == 0 && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_vert_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
&xsum0, &xxsum0);
|
|
}
|
|
else if (xoffset == HALFNDX && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_horiz_vert_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
&xsum0, &xxsum0);
|
|
}
|
|
else
|
|
{
|
|
vp8_filter_block2d_bil_var_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 8,
|
|
xoffset, yoffset,
|
|
&xsum0, &xxsum0);
|
|
|
|
vp8_filter_block2d_bil_var_sse2(
|
|
src_ptr + 8, src_pixels_per_line,
|
|
dst_ptr + 8, dst_pixels_per_line, 8,
|
|
xoffset, yoffset,
|
|
&xsum1, &xxsum1);
|
|
xsum0 += xsum1;
|
|
xxsum0 += xxsum1;
|
|
}
|
|
|
|
*sse = xxsum0;
|
|
return (xxsum0 - ((xsum0 * xsum0) >> 7));
|
|
}
|
|
|
|
unsigned int vp8_sub_pixel_variance8x16_wmt
|
|
(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
int xoffset,
|
|
int yoffset,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse
|
|
)
|
|
{
|
|
int xsum;
|
|
unsigned int xxsum;
|
|
|
|
if (xoffset == HALFNDX && yoffset == 0)
|
|
{
|
|
vp8_half_horiz_variance8x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum, &xxsum);
|
|
}
|
|
else if (xoffset == 0 && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_vert_variance8x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum, &xxsum);
|
|
}
|
|
else if (xoffset == HALFNDX && yoffset == HALFNDX)
|
|
{
|
|
vp8_half_horiz_vert_variance8x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum, &xxsum);
|
|
}
|
|
else
|
|
{
|
|
vp8_filter_block2d_bil_var_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
xoffset, yoffset,
|
|
&xsum, &xxsum);
|
|
}
|
|
|
|
*sse = xxsum;
|
|
return (xxsum - ((xsum * xsum) >> 7));
|
|
}
|
|
|
|
|
|
unsigned int vp8_variance_halfpixvar16x16_h_wmt(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse)
|
|
{
|
|
int xsum0;
|
|
unsigned int xxsum0;
|
|
|
|
vp8_half_horiz_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum0, &xxsum0);
|
|
|
|
*sse = xxsum0;
|
|
return (xxsum0 - ((xsum0 * xsum0) >> 8));
|
|
}
|
|
|
|
|
|
unsigned int vp8_variance_halfpixvar16x16_v_wmt(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse)
|
|
{
|
|
int xsum0;
|
|
unsigned int xxsum0;
|
|
vp8_half_vert_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum0, &xxsum0);
|
|
|
|
*sse = xxsum0;
|
|
return (xxsum0 - ((xsum0 * xsum0) >> 8));
|
|
}
|
|
|
|
|
|
unsigned int vp8_variance_halfpixvar16x16_hv_wmt(
|
|
const unsigned char *src_ptr,
|
|
int src_pixels_per_line,
|
|
const unsigned char *dst_ptr,
|
|
int dst_pixels_per_line,
|
|
unsigned int *sse)
|
|
{
|
|
int xsum0;
|
|
unsigned int xxsum0;
|
|
|
|
vp8_half_horiz_vert_variance16x_h_sse2(
|
|
src_ptr, src_pixels_per_line,
|
|
dst_ptr, dst_pixels_per_line, 16,
|
|
&xsum0, &xxsum0);
|
|
|
|
*sse = xxsum0;
|
|
return (xxsum0 - ((xsum0 * xsum0) >> 8));
|
|
}
|