2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include "vpx_ports/config.h"
|
|
|
|
#include "vpx_ports/x86.h"
|
2011-02-10 20:41:38 +01:00
|
|
|
#include "vp8/encoder/variance.h"
|
|
|
|
#include "vp8/encoder/onyx_int.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
#if HAVE_MMX
|
2011-03-17 22:07:59 +01:00
|
|
|
static void short_fdct8x4_mmx(short *input, short *output, int pitch)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2010-10-21 19:53:15 +02:00
|
|
|
vp8_short_fdct4x4_mmx(input, output, pitch);
|
|
|
|
vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
|
|
|
|
short *qcoeff_ptr, short *dequant_ptr,
|
|
|
|
short *scan_mask, short *round_ptr,
|
|
|
|
short *quant_ptr, short *dqcoeff_ptr);
|
2011-03-17 22:07:59 +01:00
|
|
|
static void fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2010-10-22 02:04:30 +02:00
|
|
|
short *scan_mask = vp8_default_zig_zag_mask;//d->scan_order_mask_ptr;
|
|
|
|
short *coeff_ptr = b->coeff;
|
|
|
|
short *zbin_ptr = b->zbin;
|
|
|
|
short *round_ptr = b->round;
|
2010-12-28 20:51:46 +01:00
|
|
|
short *quant_ptr = b->quant_fast;
|
2010-10-22 02:04:30 +02:00
|
|
|
short *qcoeff_ptr = d->qcoeff;
|
2010-05-18 17:58:33 +02:00
|
|
|
short *dqcoeff_ptr = d->dqcoeff;
|
2010-10-22 02:04:30 +02:00
|
|
|
short *dequant_ptr = d->dequant;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
d->eob = vp8_fast_quantize_b_impl_mmx(
|
|
|
|
coeff_ptr,
|
|
|
|
zbin_ptr,
|
|
|
|
qcoeff_ptr,
|
|
|
|
dequant_ptr,
|
|
|
|
scan_mask,
|
|
|
|
|
|
|
|
round_ptr,
|
|
|
|
quant_ptr,
|
|
|
|
dqcoeff_ptr
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
|
2011-03-17 22:07:59 +01:00
|
|
|
static int mbblock_error_mmx(MACROBLOCK *mb, int dc)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
short *coeff_ptr = mb->block[0].coeff;
|
|
|
|
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
|
|
|
|
return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc);
|
|
|
|
}
|
|
|
|
|
|
|
|
int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
|
2011-03-17 22:07:59 +01:00
|
|
|
static int mbuverror_mmx(MACROBLOCK *mb)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
short *s_ptr = &mb->coeff[256];
|
|
|
|
short *d_ptr = &mb->e_mbd.dqcoeff[256];
|
|
|
|
return vp8_mbuverror_mmx_impl(s_ptr, d_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
|
|
|
|
short *diff, unsigned char *predictor,
|
|
|
|
int pitch);
|
2011-03-17 22:07:59 +01:00
|
|
|
static void subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
unsigned char *z = *(be->base_src) + be->src;
|
|
|
|
unsigned int src_stride = be->src_stride;
|
|
|
|
short *diff = &be->src_diff[0];
|
|
|
|
unsigned char *predictor = &bd->predictor[0];
|
|
|
|
vp8_subtract_b_mmx_impl(z, src_stride, diff, predictor, pitch);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if HAVE_SSE2
|
|
|
|
int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
|
2011-03-17 22:07:59 +01:00
|
|
|
static int mbblock_error_xmm(MACROBLOCK *mb, int dc)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
short *coeff_ptr = mb->block[0].coeff;
|
|
|
|
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
|
|
|
|
return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc);
|
|
|
|
}
|
|
|
|
|
|
|
|
int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
|
2011-03-17 22:07:59 +01:00
|
|
|
static int mbuverror_xmm(MACROBLOCK *mb)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
short *s_ptr = &mb->coeff[256];
|
|
|
|
short *d_ptr = &mb->e_mbd.dqcoeff[256];
|
|
|
|
return vp8_mbuverror_xmm_impl(s_ptr, d_ptr);
|
|
|
|
}
|
|
|
|
|
2010-10-18 20:15:15 +02:00
|
|
|
void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
|
|
|
|
short *diff, unsigned char *predictor,
|
|
|
|
int pitch);
|
2011-03-17 22:07:59 +01:00
|
|
|
static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
|
2010-10-18 20:15:15 +02:00
|
|
|
{
|
|
|
|
unsigned char *z = *(be->base_src) + be->src;
|
|
|
|
unsigned int src_stride = be->src_stride;
|
|
|
|
short *diff = &be->src_diff[0];
|
|
|
|
unsigned char *predictor = &bd->predictor[0];
|
|
|
|
vp8_subtract_b_sse2_impl(z, src_stride, diff, predictor, pitch);
|
|
|
|
}
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
2010-11-01 21:24:15 +01:00
|
|
|
#if HAVE_SSSE3
|
|
|
|
int vp8_fast_quantize_b_impl_ssse3(short *coeff_ptr,
|
|
|
|
short *qcoeff_ptr, short *dequant_ptr,
|
|
|
|
short *round_ptr,
|
|
|
|
short *quant_ptr, short *dqcoeff_ptr);
|
2011-03-17 22:07:59 +01:00
|
|
|
static void fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d)
|
2010-11-01 21:24:15 +01:00
|
|
|
{
|
|
|
|
d->eob = vp8_fast_quantize_b_impl_ssse3(
|
|
|
|
b->coeff,
|
|
|
|
d->qcoeff,
|
|
|
|
d->dequant,
|
|
|
|
b->round,
|
2010-12-28 20:51:46 +01:00
|
|
|
b->quant_fast,
|
2010-11-01 21:24:15 +01:00
|
|
|
d->dqcoeff
|
|
|
|
);
|
|
|
|
}
|
2011-03-08 15:05:18 +01:00
|
|
|
#if CONFIG_PSNR
|
|
|
|
#if ARCH_X86_64
|
|
|
|
typedef void ssimpf
|
|
|
|
(
|
|
|
|
unsigned char *s,
|
|
|
|
int sp,
|
|
|
|
unsigned char *r,
|
|
|
|
int rp,
|
|
|
|
unsigned long *sum_s,
|
|
|
|
unsigned long *sum_r,
|
|
|
|
unsigned long *sum_sq_s,
|
|
|
|
unsigned long *sum_sq_r,
|
|
|
|
unsigned long *sum_sxr
|
|
|
|
);
|
|
|
|
|
|
|
|
extern ssimpf vp8_ssim_parms_16x16_sse3;
|
|
|
|
extern ssimpf vp8_ssim_parms_8x8_sse3;
|
|
|
|
#endif
|
|
|
|
#endif
|
2010-11-01 21:24:15 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
|
|
|
|
{
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
int flags = x86_simd_caps();
|
|
|
|
int mmx_enabled = flags & HAS_MMX;
|
|
|
|
int xmm_enabled = flags & HAS_SSE;
|
|
|
|
int wmt_enabled = flags & HAS_SSE2;
|
|
|
|
int SSE3Enabled = flags & HAS_SSE3;
|
|
|
|
int SSSE3Enabled = flags & HAS_SSSE3;
|
2010-10-27 14:45:24 +02:00
|
|
|
int SSE4_1Enabled = flags & HAS_SSE4_1;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
/* Note:
|
|
|
|
*
|
|
|
|
* This platform can be built without runtime CPU detection as well. If
|
|
|
|
* you modify any of the function mappings present in this file, be sure
|
|
|
|
* to also update them in static mapings (<arch>/filename_<arch>.h)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Override default functions with fastest ones for this CPU. */
|
|
|
|
#if HAVE_MMX
|
|
|
|
if (mmx_enabled)
|
|
|
|
{
|
|
|
|
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
|
|
|
|
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
|
|
|
|
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
|
|
|
|
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
|
|
|
|
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
|
|
|
|
cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
|
|
|
|
cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
|
|
|
|
cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
|
|
|
|
cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
|
|
|
|
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
|
|
|
|
cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
|
|
|
|
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
|
|
|
|
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
|
2010-10-27 17:28:43 +02:00
|
|
|
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
|
|
|
|
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
|
|
|
|
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
|
|
|
|
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.get16x16prederror = vp8_get16x16pred_error_mmx;
|
|
|
|
cpi->rtcd.variance.get8x8var = vp8_get8x8var_mmx;
|
|
|
|
cpi->rtcd.variance.get16x16var = vp8_get16x16var_mmx;
|
|
|
|
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
|
2010-10-21 19:53:15 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
|
2011-03-17 22:07:59 +01:00
|
|
|
cpi->rtcd.fdct.short8x4 = short_fdct8x4_mmx;
|
2010-06-16 21:52:18 +02:00
|
|
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
|
2011-03-17 22:07:59 +01:00
|
|
|
cpi->rtcd.fdct.fast8x4 = short_fdct8x4_mmx;
|
2010-06-16 21:52:18 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
|
|
|
|
|
|
|
|
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
|
2011-03-17 22:07:59 +01:00
|
|
|
cpi->rtcd.encodemb.mberr = mbblock_error_mmx;
|
|
|
|
cpi->rtcd.encodemb.mbuverr = mbuverror_mmx;
|
|
|
|
cpi->rtcd.encodemb.subb = subtract_b_mmx;
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
|
|
|
|
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
|
|
|
|
|
2011-03-17 22:07:59 +01:00
|
|
|
/*cpi->rtcd.quantize.fastquantb = fast_quantize_b_mmx;*/
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-27 14:45:24 +02:00
|
|
|
#if HAVE_SSE2
|
2010-05-18 17:58:33 +02:00
|
|
|
if (wmt_enabled)
|
|
|
|
{
|
|
|
|
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_wmt;
|
|
|
|
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_wmt;
|
|
|
|
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_wmt;
|
|
|
|
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_wmt;
|
|
|
|
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_wmt;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.var4x4 = vp8_variance4x4_wmt;
|
|
|
|
cpi->rtcd.variance.var8x8 = vp8_variance8x8_wmt;
|
|
|
|
cpi->rtcd.variance.var8x16 = vp8_variance8x16_wmt;
|
|
|
|
cpi->rtcd.variance.var16x8 = vp8_variance16x8_wmt;
|
|
|
|
cpi->rtcd.variance.var16x16 = vp8_variance16x16_wmt;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
|
|
|
|
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
|
|
|
|
cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
|
|
|
|
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
|
|
|
|
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
|
2010-10-27 17:28:43 +02:00
|
|
|
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
|
|
|
|
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
|
|
|
|
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
|
|
|
|
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
|
|
|
|
|
|
|
|
cpi->rtcd.variance.get16x16prederror = vp8_get16x16pred_error_sse2;
|
|
|
|
cpi->rtcd.variance.get8x8var = vp8_get8x8var_sse2;
|
|
|
|
cpi->rtcd.variance.get16x16var = vp8_get16x16var_sse2;
|
2011-03-08 15:05:18 +01:00
|
|
|
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
/* cpi->rtcd.variance.get4x4sse_cs not implemented for wmt */;
|
|
|
|
|
2010-06-24 19:11:30 +02:00
|
|
|
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_sse2;
|
|
|
|
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_sse2;
|
|
|
|
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_sse2;
|
|
|
|
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_sse2;
|
|
|
|
|
2010-10-21 16:26:50 +02:00
|
|
|
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2 ;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
|
2011-03-17 22:07:59 +01:00
|
|
|
cpi->rtcd.encodemb.mberr = mbblock_error_xmm;
|
|
|
|
cpi->rtcd.encodemb.mbuverr = mbuverror_xmm;
|
|
|
|
cpi->rtcd.encodemb.subb = subtract_b_sse2;
|
2010-10-18 20:15:15 +02:00
|
|
|
cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
|
|
|
|
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-02-10 20:57:43 +01:00
|
|
|
cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;
|
2011-03-24 18:31:10 +01:00
|
|
|
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_sse2;
|
2010-12-22 17:23:51 +01:00
|
|
|
|
2011-02-22 09:29:23 +01:00
|
|
|
#if !(CONFIG_REALTIME_ONLY)
|
2010-12-22 17:23:51 +01:00
|
|
|
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
|
2011-02-22 09:29:23 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-27 14:45:24 +02:00
|
|
|
#if HAVE_SSE3
|
2010-05-18 17:58:33 +02:00
|
|
|
if (SSE3Enabled)
|
|
|
|
{
|
|
|
|
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_sse3;
|
|
|
|
cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_sse3;
|
|
|
|
cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_sse3;
|
|
|
|
cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_sse3;
|
|
|
|
cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_sse3;
|
|
|
|
cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_sse3;
|
2011-01-10 10:14:10 +01:00
|
|
|
#if !(CONFIG_REALTIME_ONLY)
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.search.full_search = vp8_full_search_sadx3;
|
2011-01-10 10:14:10 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
|
|
|
|
cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
|
|
|
|
cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
|
|
|
|
cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
|
|
|
|
cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
|
|
|
|
cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-27 14:45:24 +02:00
|
|
|
#if HAVE_SSSE3
|
2010-05-18 17:58:33 +02:00
|
|
|
if (SSSE3Enabled)
|
|
|
|
{
|
|
|
|
cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
|
|
|
|
cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
|
2010-11-01 21:24:15 +01:00
|
|
|
|
2011-03-09 17:16:30 +01:00
|
|
|
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
|
2011-03-04 01:02:45 +01:00
|
|
|
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
|
|
|
|
|
2011-03-17 22:07:59 +01:00
|
|
|
cpi->rtcd.quantize.fastquantb = fast_quantize_b_ssse3;
|
2010-11-01 21:24:15 +01:00
|
|
|
|
2011-03-08 15:05:18 +01:00
|
|
|
#if CONFIG_PSNR
|
|
|
|
#if ARCH_X86_64
|
|
|
|
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse3;
|
|
|
|
cpi->rtcd.variance.ssimpf = vp8_ssim_parms_16x16_sse3;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2010-10-27 14:45:24 +02:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-03-08 15:05:18 +01:00
|
|
|
|
|
|
|
|
2010-10-27 14:45:24 +02:00
|
|
|
#if HAVE_SSE4_1
|
|
|
|
if (SSE4_1Enabled)
|
|
|
|
{
|
|
|
|
cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_sse4;
|
|
|
|
cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_sse4;
|
|
|
|
cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_sse4;
|
|
|
|
cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
|
|
|
|
cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
|
2011-01-10 10:14:10 +01:00
|
|
|
#if !(CONFIG_REALTIME_ONLY)
|
2010-10-27 14:45:24 +02:00
|
|
|
cpi->rtcd.search.full_search = vp8_full_search_sadx8;
|
2011-01-10 10:14:10 +01:00
|
|
|
#endif
|
2010-10-27 14:45:24 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
2010-10-27 14:45:24 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
}
|