fix --disable-runtime-cpu-detect on x86
Change-Id: Ib8e429152c9a8b6032be22b5faac802aa8224caa
This commit is contained in:
parent
bf6b314d89
commit
92b0e544f3
@ -17,7 +17,7 @@
|
||||
#if HAVE_MMX
|
||||
void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q);
|
||||
|
||||
static void dequantize_b_mmx(BLOCKD *d)
|
||||
void vp8_dequantize_b_mmx(BLOCKD *d)
|
||||
{
|
||||
short *sq = (short *) d->qcoeff;
|
||||
short *dq = (short *) d->dqcoeff;
|
||||
@ -28,6 +28,7 @@ static void dequantize_b_mmx(BLOCKD *d)
|
||||
|
||||
void vp8_arch_x86_decode_init(VP8D_COMP *pbi)
|
||||
{
|
||||
#if CONFIG_RUNTIME_CPU_DETECT
|
||||
int flags = x86_simd_caps();
|
||||
|
||||
/* Note:
|
||||
@ -36,12 +37,11 @@ void vp8_arch_x86_decode_init(VP8D_COMP *pbi)
|
||||
* you modify any of the function mappings present in this file, be sure
|
||||
* to also update them in static mapings (<arch>/filename_<arch>.h)
|
||||
*/
|
||||
#if CONFIG_RUNTIME_CPU_DETECT
|
||||
/* Override default functions with fastest ones for this CPU. */
|
||||
#if HAVE_MMX
|
||||
if (flags & HAS_MMX)
|
||||
{
|
||||
pbi->dequant.block = dequantize_b_mmx;
|
||||
pbi->dequant.block = vp8_dequantize_b_mmx;
|
||||
pbi->dequant.idct_add = vp8_dequant_idct_add_mmx;
|
||||
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_mmx;
|
||||
pbi->dequant.dc_idct_add_y_block = vp8_dequant_dc_idct_add_y_block_mmx;
|
||||
|
@ -31,6 +31,12 @@ extern prototype_fdct(vp8_short_fdct8x4_mmx);
|
||||
#undef vp8_fdct_short8x4
|
||||
#define vp8_fdct_short8x4 vp8_short_fdct8x4_mmx
|
||||
|
||||
#undef vp8_fdct_fast4x4
|
||||
#define vp8_fdct_fast4x4 vp8_short_fdct4x4_mmx
|
||||
|
||||
#undef vp8_fdct_fast8x4
|
||||
#define vp8_fdct_fast8x4 vp8_short_fdct8x4_mmx
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
|
||||
#if HAVE_MMX
|
||||
static void short_fdct8x4_mmx(short *input, short *output, int pitch)
|
||||
void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch)
|
||||
{
|
||||
vp8_short_fdct4x4_mmx(input, output, pitch);
|
||||
vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch);
|
||||
@ -26,7 +26,7 @@ int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
|
||||
short *qcoeff_ptr, short *dequant_ptr,
|
||||
short *scan_mask, short *round_ptr,
|
||||
short *quant_ptr, short *dqcoeff_ptr);
|
||||
static void fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
|
||||
void vp8_fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
|
||||
{
|
||||
short *scan_mask = vp8_default_zig_zag_mask;//d->scan_order_mask_ptr;
|
||||
short *coeff_ptr = b->coeff;
|
||||
@ -51,7 +51,7 @@ static void fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
|
||||
}
|
||||
|
||||
int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
|
||||
static int mbblock_error_mmx(MACROBLOCK *mb, int dc)
|
||||
int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc)
|
||||
{
|
||||
short *coeff_ptr = mb->block[0].coeff;
|
||||
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
|
||||
@ -59,7 +59,7 @@ static int mbblock_error_mmx(MACROBLOCK *mb, int dc)
|
||||
}
|
||||
|
||||
int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
|
||||
static int mbuverror_mmx(MACROBLOCK *mb)
|
||||
int vp8_mbuverror_mmx(MACROBLOCK *mb)
|
||||
{
|
||||
short *s_ptr = &mb->coeff[256];
|
||||
short *d_ptr = &mb->e_mbd.dqcoeff[256];
|
||||
@ -69,7 +69,7 @@ static int mbuverror_mmx(MACROBLOCK *mb)
|
||||
void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
|
||||
short *diff, unsigned char *predictor,
|
||||
int pitch);
|
||||
static void subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
|
||||
void vp8_subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
|
||||
{
|
||||
unsigned char *z = *(be->base_src) + be->src;
|
||||
unsigned int src_stride = be->src_stride;
|
||||
@ -82,7 +82,7 @@ static void subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
|
||||
|
||||
#if HAVE_SSE2
|
||||
int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
|
||||
static int mbblock_error_xmm(MACROBLOCK *mb, int dc)
|
||||
int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc)
|
||||
{
|
||||
short *coeff_ptr = mb->block[0].coeff;
|
||||
short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff;
|
||||
@ -90,7 +90,7 @@ static int mbblock_error_xmm(MACROBLOCK *mb, int dc)
|
||||
}
|
||||
|
||||
int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
|
||||
static int mbuverror_xmm(MACROBLOCK *mb)
|
||||
int vp8_mbuverror_xmm(MACROBLOCK *mb)
|
||||
{
|
||||
short *s_ptr = &mb->coeff[256];
|
||||
short *d_ptr = &mb->e_mbd.dqcoeff[256];
|
||||
@ -100,7 +100,7 @@ static int mbuverror_xmm(MACROBLOCK *mb)
|
||||
void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
|
||||
short *diff, unsigned char *predictor,
|
||||
int pitch);
|
||||
static void subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
|
||||
void vp8_subtract_b_sse2(BLOCK *be, BLOCKD *bd, int pitch)
|
||||
{
|
||||
unsigned char *z = *(be->base_src) + be->src;
|
||||
unsigned int src_stride = be->src_stride;
|
||||
@ -178,20 +178,20 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
|
||||
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_mmx;
|
||||
|
||||
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_mmx;
|
||||
cpi->rtcd.fdct.short8x4 = short_fdct8x4_mmx;
|
||||
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_mmx;
|
||||
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_mmx;
|
||||
cpi->rtcd.fdct.fast8x4 = short_fdct8x4_mmx;
|
||||
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_mmx;
|
||||
|
||||
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
|
||||
|
||||
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
|
||||
cpi->rtcd.encodemb.mberr = mbblock_error_mmx;
|
||||
cpi->rtcd.encodemb.mbuverr = mbuverror_mmx;
|
||||
cpi->rtcd.encodemb.subb = subtract_b_mmx;
|
||||
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
|
||||
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
|
||||
cpi->rtcd.encodemb.subb = vp8_subtract_b_mmx;
|
||||
cpi->rtcd.encodemb.submby = vp8_subtract_mby_mmx;
|
||||
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_mmx;
|
||||
|
||||
/*cpi->rtcd.quantize.fastquantb = fast_quantize_b_mmx;*/
|
||||
/*cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_mmx;*/
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -233,9 +233,9 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
|
||||
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_sse2 ;
|
||||
|
||||
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
|
||||
cpi->rtcd.encodemb.mberr = mbblock_error_xmm;
|
||||
cpi->rtcd.encodemb.mbuverr = mbuverror_xmm;
|
||||
cpi->rtcd.encodemb.subb = subtract_b_sse2;
|
||||
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
|
||||
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
|
||||
cpi->rtcd.encodemb.subb = vp8_subtract_b_sse2;
|
||||
cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
|
||||
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user