ARMv6 optimized half pixel variance calculations

Adds following ARMv6 optimized functions to the encoder:
 - vp8_variance_halfpixvar16x16_h_armv6
 - vp8_variance_halfpixvar16x16_v_armv6
 - vp8_variance_halfpixvar16x16_hv_armv6

Change-Id: I1e9c2af7acd2a51b72b3845beecd990db4bebd29
This commit is contained in:
Tero Rintaluoma 2011-02-23 13:27:27 +02:00
parent 945dad277d
commit 8ae92aef66
3 changed files with 51 additions and 1 deletions

View File

@ -46,6 +46,9 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
/*cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/

View File

@ -43,7 +43,41 @@ unsigned int vp8_sub_pixel_variance16x16_armv6
dst_pixels_per_line, sse);
}
#endif
unsigned int vp8_variance_halfpixvar16x16_h_armv6(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse)
{
return vp8_sub_pixel_variance16x16_armv6(src_ptr, source_stride, 4, 0,
ref_ptr, recon_stride, sse);
}
unsigned int vp8_variance_halfpixvar16x16_v_armv6(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse)
{
return vp8_sub_pixel_variance16x16_armv6(src_ptr, source_stride, 0, 4,
ref_ptr, recon_stride, sse);
}
unsigned int vp8_variance_halfpixvar16x16_hv_armv6(
const unsigned char *src_ptr,
int source_stride,
const unsigned char *ref_ptr,
int recon_stride,
unsigned int *sse)
{
return vp8_sub_pixel_variance16x16_armv6(src_ptr, source_stride, 4, 4,
ref_ptr, recon_stride, sse);
}
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7

View File

@ -17,6 +17,9 @@
extern prototype_sad(vp8_sad16x16_armv6);
extern prototype_variance(vp8_variance16x16_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
@ -29,10 +32,20 @@ extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_armv6
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_armv6
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_armv6
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_armv6
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
#endif /* HAVE_ARMV6 */
#if HAVE_ARMV7
extern prototype_sad(vp8_sad4x4_neon);
extern prototype_sad(vp8_sad8x8_neon);