Merge pull request #3326 from ilya-lavrenov:neon_canny

This commit is contained in:
Vadim Pisarevsky
2014-10-11 17:58:23 +00:00
17 changed files with 5462 additions and 143 deletions

View File

@@ -1987,6 +1987,238 @@ void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst )
namespace cv
{
template <typename T, typename WT>
struct Mul_SIMD
{
int operator() (const T *, const T *, T *, int, WT) const
{
return 0;
}
};
#if CV_NEON
template <>
struct Mul_SIMD<uchar, float>
{
int operator() (const uchar * src1, const uchar * src2, uchar * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + x));
uint16x8_t v_src2 = vmovl_u8(vld1_u8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + x));
uint16x8_t v_src2 = vmovl_u8(vld1_u8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
}
return x;
}
};
template <>
struct Mul_SIMD<schar, float>
{
int operator() (const schar * src1, const schar * src2, schar * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + x));
int16x8_t v_src2 = vmovl_s8(vld1_s8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + x));
int16x8_t v_src2 = vmovl_s8(vld1_s8(src2 + x));
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
}
return x;
}
};
template <>
struct Mul_SIMD<ushort, float>
{
int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vld1q_u16(src1 + x), v_src2 = vld1q_u16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src1 = vld1q_u16(src1 + x), v_src2 = vld1q_u16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
}
return x;
}
};
template <>
struct Mul_SIMD<short, float>
{
int operator() (const short * src1, const short * src2, short * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vld1q_s16(src1 + x), v_src2 = vld1q_s16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src1 = vld1q_s16(src1 + x), v_src2 = vld1q_s16(src2 + x);
float32x4_t v_dst1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))),
vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))));
v_dst2 = vmulq_f32(v_dst2, v_scale);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
}
return x;
}
};
template <>
struct Mul_SIMD<float, float>
{
int operator() (const float * src1, const float * src2, float * dst, int width, float scale) const
{
int x = 0;
if( scale == 1.0f )
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vmulq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
float32x4_t v_dst2 = vmulq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1q_f32(dst + x, v_dst1);
vst1q_f32(dst + x + 4, v_dst2);
}
else
{
float32x4_t v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vmulq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
v_dst1 = vmulq_f32(v_dst1, v_scale);
float32x4_t v_dst2 = vmulq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
v_dst2 = vmulq_f32(v_dst2, v_scale);
vst1q_f32(dst + x, v_dst1);
vst1q_f32(dst + x + 4, v_dst2);
}
}
return x;
}
};
#endif
template<typename T, typename WT> static void
mul_( const T* src1, size_t step1, const T* src2, size_t step2,
T* dst, size_t step, Size size, WT scale )
@@ -1995,11 +2227,13 @@ mul_( const T* src1, size_t step1, const T* src2, size_t step2,
step2 /= sizeof(src2[0]);
step /= sizeof(dst[0]);
Mul_SIMD<T, WT> vop;
if( scale == (WT)1. )
{
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int i=0;
int i = vop(src1, src2, dst, size.width, scale);
#if CV_ENABLE_UNROLLED
for(; i <= size.width - 4; i += 4 )
{
@@ -2024,7 +2258,7 @@ mul_( const T* src1, size_t step1, const T* src2, size_t step2,
{
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int i = 0;
int i = vop(src1, src2, dst, size.width, scale);
#if CV_ENABLE_UNROLLED
for(; i <= size.width - 4; i += 4 )
{
@@ -2367,6 +2601,114 @@ void cv::divide(double scale, InputArray src2,
namespace cv
{
template <typename T, typename WT>
struct AddWeighted_SIMD
{
int operator() (const T *, const T *, T *, int, WT, WT, WT) const
{
return 0;
}
};
#if CV_NEON
template <>
struct AddWeighted_SIMD<schar, float>
{
int operator() (const schar * src1, const schar * src2, schar * dst, int width, float alpha, float beta, float gamma) const
{
int x = 0;
float32x4_t g = vdupq_n_f32 (gamma);
for( ; x <= width - 8; x += 8 )
{
int8x8_t in1 = vld1_s8(src1 + x);
int16x8_t in1_16 = vmovl_s8(in1);
float32x4_t in1_f_l = vcvtq_f32_s32(vmovl_s16(vget_low_s16(in1_16)));
float32x4_t in1_f_h = vcvtq_f32_s32(vmovl_s16(vget_high_s16(in1_16)));
int8x8_t in2 = vld1_s8(src2+x);
int16x8_t in2_16 = vmovl_s8(in2);
float32x4_t in2_f_l = vcvtq_f32_s32(vmovl_s16(vget_low_s16(in2_16)));
float32x4_t in2_f_h = vcvtq_f32_s32(vmovl_s16(vget_high_s16(in2_16)));
float32x4_t out_f_l = vaddq_f32(vmulq_n_f32(in1_f_l, alpha), vmulq_n_f32(in2_f_l, beta));
float32x4_t out_f_h = vaddq_f32(vmulq_n_f32(in1_f_h, alpha), vmulq_n_f32(in2_f_h, beta));
out_f_l = vaddq_f32(out_f_l, g);
out_f_h = vaddq_f32(out_f_h, g);
int16x4_t out_16_l = vqmovn_s32(cv_vrndq_s32_f32(out_f_l));
int16x4_t out_16_h = vqmovn_s32(cv_vrndq_s32_f32(out_f_h));
int16x8_t out_16 = vcombine_s16(out_16_l, out_16_h);
int8x8_t out = vqmovn_s16(out_16);
vst1_s8(dst + x, out);
}
return x;
}
};
template <>
struct AddWeighted_SIMD<ushort, float>
{
int operator() (const ushort * src1, const ushort * src2, ushort * dst, int width, float alpha, float beta, float gamma) const
{
int x = 0;
float32x4_t g = vdupq_n_f32(gamma);
for( ; x <= width - 8; x += 8 )
{
uint16x8_t v_src1 = vld1q_u16(src1 + x), v_src2 = vld1q_u16(src2 + x);
float32x4_t v_s1 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src1))), alpha);
float32x4_t v_s2 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src2))), beta);
uint16x4_t v_dst1 = vqmovn_u32(cv_vrndq_u32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
v_s1 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src1))), alpha);
v_s2 = vmulq_n_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src2))), beta);
uint16x4_t v_dst2 = vqmovn_u32(cv_vrndq_u32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
vst1q_u16(dst + x, vcombine_u16(v_dst1, v_dst2));
}
return x;
}
};
template <>
struct AddWeighted_SIMD<short, float>
{
int operator() (const short * src1, const short * src2, short * dst, int width, float alpha, float beta, float gamma) const
{
int x = 0;
float32x4_t g = vdupq_n_f32(gamma);
for( ; x <= width - 8; x += 8 )
{
int16x8_t v_src1 = vld1q_s16(src1 + x), v_src2 = vld1q_s16(src2 + x);
float32x4_t v_s1 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))), alpha);
float32x4_t v_s2 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src2))), beta);
int16x4_t v_dst1 = vqmovn_s32(cv_vrndq_s32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
v_s1 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))), alpha);
v_s2 = vmulq_n_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src2))), beta);
int16x4_t v_dst2 = vqmovn_s32(cv_vrndq_s32_f32(vaddq_f32(vaddq_f32(v_s1, v_s2), g)));
vst1q_s16(dst + x, vcombine_s16(v_dst1, v_dst2));
}
return x;
}
};
#endif
template<typename T, typename WT> static void
addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2,
T* dst, size_t step, Size size, void* _scalars )
@@ -2377,9 +2719,11 @@ addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2,
step2 /= sizeof(src2[0]);
step /= sizeof(dst[0]);
AddWeighted_SIMD<T, WT> vop;
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int x = 0;
int x = vop(src1, src2, dst, size.width, alpha, beta, gamma);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{
@@ -2457,8 +2801,8 @@ addWeighted8u( const uchar* src1, size_t step1,
out_f_l = vaddq_f32(out_f_l, g);
out_f_h = vaddq_f32(out_f_h, g);
uint16x4_t out_16_l = vqmovun_s32(vcvtq_s32_f32(out_f_l));
uint16x4_t out_16_h = vqmovun_s32(vcvtq_s32_f32(out_f_h));
uint16x4_t out_16_l = vqmovun_s32(cv_vrndq_s32_f32(out_f_l));
uint16x4_t out_16_h = vqmovun_s32(cv_vrndq_s32_f32(out_f_h));
uint16x8_t out_16 = vcombine_u16(out_16_l, out_16_h);
uint8x8_t out = vqmovn_u16(out_16);
@@ -2557,6 +2901,213 @@ void cv::addWeighted( InputArray src1, double alpha, InputArray src2,
namespace cv
{
template <typename T>
struct Cmp_SIMD
{
explicit Cmp_SIMD(int)
{
}
int operator () (const T *, const T *, uchar *, int) const
{
return 0;
}
};
#if CV_NEON
template <>
struct Cmp_SIMD<schar>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdupq_n_u8(255);
}
int operator () (const schar * src1, const schar * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, vcgtq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)));
else if (code == CMP_LE)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, vcleq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)));
else if (code == CMP_EQ)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, vceqq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)));
else if (code == CMP_NE)
for ( ; x <= width - 16; x += 16)
vst1q_u8(dst + x, veorq_u8(vceqq_s8(vld1q_s8(src1 + x), vld1q_s8(src2 + x)), v_mask));
return x;
}
int code;
uint8x16_t v_mask;
};
template <>
struct Cmp_SIMD<ushort>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdup_n_u8(255);
}
int operator () (const ushort * src1, const ushort * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vcgtq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, vmovn_u16(v_dst));
}
else if (code == CMP_LE)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vcleq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, vmovn_u16(v_dst));
}
else if (code == CMP_EQ)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vceqq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, vmovn_u16(v_dst));
}
else if (code == CMP_NE)
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_dst = vceqq_u16(vld1q_u16(src1 + x), vld1q_u16(src2 + x));
vst1_u8(dst + x, veor_u8(vmovn_u16(v_dst), v_mask));
}
return x;
}
int code;
uint8x8_t v_mask;
};
template <>
struct Cmp_SIMD<int>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdup_n_u8(255);
}
int operator () (const int * src1, const int * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcgtq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vcgtq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_LE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcleq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vcleq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_EQ)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vceqq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_NE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_s32(vld1q_s32(src1 + x), vld1q_s32(src2 + x));
uint32x4_t v_dst2 = vceqq_s32(vld1q_s32(src1 + x + 4), vld1q_s32(src2 + x + 4));
uint8x8_t v_dst = vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2)));
vst1_u8(dst + x, veor_u8(v_dst, v_mask));
}
return x;
}
int code;
uint8x8_t v_mask;
};
template <>
struct Cmp_SIMD<float>
{
explicit Cmp_SIMD(int code_) :
code(code_)
{
CV_Assert(code == CMP_GT || code == CMP_LE ||
code == CMP_EQ || code == CMP_NE);
v_mask = vdup_n_u8(255);
}
int operator () (const float * src1, const float * src2, uchar * dst, int width) const
{
int x = 0;
if (code == CMP_GT)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcgtq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vcgtq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_LE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vcleq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vcleq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_EQ)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vceqq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
vst1_u8(dst + x, vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2))));
}
else if (code == CMP_NE)
for ( ; x <= width - 8; x += 8)
{
uint32x4_t v_dst1 = vceqq_f32(vld1q_f32(src1 + x), vld1q_f32(src2 + x));
uint32x4_t v_dst2 = vceqq_f32(vld1q_f32(src1 + x + 4), vld1q_f32(src2 + x + 4));
uint8x8_t v_dst = vmovn_u16(vcombine_u16(vmovn_u32(v_dst1), vmovn_u32(v_dst2)));
vst1_u8(dst + x, veor_u8(v_dst, v_mask));
}
return x;
}
int code;
uint8x8_t v_mask;
};
#endif
template<typename T> static void
cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
uchar* dst, size_t step, Size size, int code)
@@ -2570,12 +3121,14 @@ cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
code = code == CMP_GE ? CMP_LE : CMP_GT;
}
Cmp_SIMD<T> vop(code);
if( code == CMP_GT || code == CMP_LE )
{
int m = code == CMP_GT ? 0 : 255;
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int x = 0;
int x = vop(src1, src2, dst, size.width);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{
@@ -2590,7 +3143,7 @@ cmp_(const T* src1, size_t step1, const T* src2, size_t step2,
#endif
for( ; x < size.width; x++ )
dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m);
}
}
}
else if( code == CMP_EQ || code == CMP_NE )
{

View File

@@ -1480,6 +1480,724 @@ cvtScaleAbs_( const T* src, size_t sstep,
}
}
template <typename T, typename DT, typename WT>
struct cvtScale_SIMD
{
int operator () (const T *, DT *, int, WT, WT) const
{
return 0;
}
};
#if CV_NEON
// from uchar
template <>
struct cvtScale_SIMD<uchar, uchar, float>
{
int operator () (const uchar * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, schar, float>
{
int operator () (const uchar * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, ushort, float>
{
int operator () (const uchar * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, short, float>
{
int operator () (const uchar * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, int, float>
{
int operator () (const uchar * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
}
return x;
}
};
template <>
struct cvtScale_SIMD<uchar, float, float>
{
int operator () (const uchar * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src + x));
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from schar
template <>
struct cvtScale_SIMD<schar, uchar, float>
{
int operator () (const schar * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, schar, float>
{
int operator () (const schar * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, ushort, float>
{
int operator () (const schar * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, short, float>
{
int operator () (const schar * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, int, float>
{
int operator () (const schar * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
}
return x;
}
};
template <>
struct cvtScale_SIMD<schar, float, float>
{
int operator () (const schar * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from ushort
template <>
struct cvtScale_SIMD<ushort, uchar, float>
{
int operator () (const ushort * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, schar, float>
{
int operator () (const ushort * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, ushort, float>
{
int operator () (const ushort * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, short, float>
{
int operator () (const ushort * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, int, float>
{
int operator () (const ushort * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
}
return x;
}
};
template <>
struct cvtScale_SIMD<ushort, float, float>
{
int operator () (const ushort * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from short
template <>
struct cvtScale_SIMD<short, uchar, float>
{
int operator () (const short * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<short, schar, float>
{
int operator () (const short * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<short, ushort, float>
{
int operator () (const short * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<short, float, float>
{
int operator () (const short * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vld1q_s16(src + x);
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift));
vst1q_f32(dst + x + 4, vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift));
}
return x;
}
};
// from int
template <>
struct cvtScale_SIMD<int, uchar, float>
{
int operator () (const int * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<int, schar, float>
{
int operator () (const int * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<int, ushort, float>
{
int operator () (const int * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<int, short, float>
{
int operator () (const int * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
// from float
template <>
struct cvtScale_SIMD<float, uchar, float>
{
int operator () (const float * src, uchar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1_u8(dst + x, vqmovn_u16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, schar, float>
{
int operator () (const float * src, schar * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1_s8(dst + x, vqmovn_s16(v_dst));
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, ushort, float>
{
int operator () (const float * src, ushort * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
vst1q_u16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, short, float>
{
int operator () (const float * src, short * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 8; x += 8)
{
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
vst1q_s16(dst + x, v_dst);
}
return x;
}
};
template <>
struct cvtScale_SIMD<float, int, float>
{
int operator () (const float * src, int * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 4; x += 4)
vst1q_s32(dst + x, cv_vrndq_s32_f32(vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift)));
return x;
}
};
template <>
struct cvtScale_SIMD<float, float, float>
{
int operator () (const float * src, float * dst, int width, float scale, float shift) const
{
int x = 0;
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
for ( ; x <= width - 4; x += 4)
vst1q_f32(dst + x, vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift));
return x;
}
};
#endif
template<typename T, typename DT, typename WT> static void
cvtScale_( const T* src, size_t sstep,
DT* dst, size_t dstep, Size size,
@@ -1488,9 +2206,11 @@ cvtScale_( const T* src, size_t sstep,
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
cvtScale_SIMD<T, DT, WT> vop;
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
int x = vop(src, dst, size.width, scale, shift);
#if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
@@ -1755,6 +2475,25 @@ struct Cvt_SIMD<schar, short>
}
};
template <>
struct Cvt_SIMD<schar, ushort>
{
int operator() (const schar * src, ushort * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src + x));
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(vmovl_s16(vget_low_s16(v_src))),
vqmovun_s32(vmovl_s16(vget_high_s16(v_src)))));
}
return x;
}
};
template <>
struct Cvt_SIMD<schar, int>
{
@@ -1810,6 +2549,49 @@ struct Cvt_SIMD<ushort, uchar>
}
};
template <>
struct Cvt_SIMD<ushort, schar>
{
int operator() (const ushort * src, schar * dst, int width) const
{
int x = 0;
for ( ; x <= width - 16; x += 16)
{
uint16x8_t v_src1 = vld1q_u16(src + x), v_src2 = vld1q_u16(src + x + 8);
int32x4_t v_dst10 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src1)));
int32x4_t v_dst11 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src1)));
int32x4_t v_dst20 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src2)));
int32x4_t v_dst21 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src2)));
vst1q_s8(dst + x, vcombine_s8(vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst10), vqmovn_s32(v_dst11))),
vqmovn_s16(vcombine_s16(vqmovn_s32(v_dst20), vqmovn_s32(v_dst21)))));
}
return x;
}
};
template <>
struct Cvt_SIMD<ushort, short>
{
int operator() (const ushort * src, short * dst, int width) const
{
int x = 0;
for ( ; x <= width - 8; x += 8)
{
uint16x8_t v_src = vld1q_u16(src + x);
int32x4_t v_dst0 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(v_src)));
int32x4_t v_dst1 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(v_src)));
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_dst0), vqmovn_s32(v_dst1)));
}
return x;
}
};
template <>
struct Cvt_SIMD<ushort, int>
{

View File

@@ -2804,7 +2804,8 @@ dotProd_(const T* src1, const T* src2, int len)
{
int i = 0;
double result = 0;
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for( ; i <= len - 4; i += 4 )
result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
(double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
@@ -2833,10 +2834,12 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
{
int j, len0 = len & -4, blockSize0 = (1 << 13), blockSize;
__m128i z = _mm_setzero_si128();
CV_DECL_ALIGNED(16) int buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
__m128i s = _mm_setzero_si128();
__m128i s = z;
j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
@@ -2860,7 +2863,7 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
s0 = _mm_madd_epi16(s0, s1);
s = _mm_add_epi32(s, s0);
}
CV_DECL_ALIGNED(16) int buf[4];
_mm_store_si128((__m128i*)buf, s);
r += buf[0] + buf[1] + buf[2] + buf[3];
@@ -2869,6 +2872,45 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
i += blockSize;
}
}
#elif CV_NEON
int len0 = len & -8, blockSize0 = (1 << 15), blockSize;
uint32x4_t v_zero = vdupq_n_u32(0u);
CV_DECL_ALIGNED(16) uint buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
uint32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
uint8x16_t v_src1 = vld1q_u8(src1 + j), v_src2 = vld1q_u8(src2 + j);
uint16x8_t v_src10 = vmovl_u8(vget_low_u8(v_src1)), v_src20 = vmovl_u8(vget_low_u8(v_src2));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
v_src10 = vmovl_u8(vget_high_u8(v_src1));
v_src20 = vmovl_u8(vget_high_u8(v_src2));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
}
for( ; j <= blockSize - 8; j += 8 )
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + j)), v_src2 = vmovl_u8(vld1_u8(src2 + j));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src1), vget_low_u16(v_src2));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src1), vget_high_u16(v_src2));
}
vst1q_u32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return r + dotProd_(src1, src2, len - i);
}
@@ -2876,7 +2918,51 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
static double dotProd_8s(const schar* src1, const schar* src2, int len)
{
return dotProd_(src1, src2, len);
int i = 0;
double r = 0.0;
#if CV_NEON
int len0 = len & -8, blockSize0 = (1 << 14), blockSize;
int32x4_t v_zero = vdupq_n_s32(0);
CV_DECL_ALIGNED(16) int buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
int32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
int8x16_t v_src1 = vld1q_s8(src1 + j), v_src2 = vld1q_s8(src2 + j);
int16x8_t v_src10 = vmovl_s8(vget_low_s8(v_src1)), v_src20 = vmovl_s8(vget_low_s8(v_src2));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
v_src10 = vmovl_s8(vget_high_s8(v_src1));
v_src20 = vmovl_s8(vget_high_s8(v_src2));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
}
for( ; j <= blockSize - 8; j += 8 )
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + j)), v_src2 = vmovl_s8(vld1_s8(src2 + j));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src1), vget_low_s16(v_src2));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src1), vget_high_s16(v_src2));
}
vst1q_s32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return r + dotProd_(src1, src2, len - i);
}
static double dotProd_16u(const ushort* src1, const ushort* src2, int len)
@@ -2914,13 +3000,36 @@ static double dotProd_32s(const int* src1, const int* src2, int len)
static double dotProd_32f(const float* src1, const float* src2, int len)
{
double r = 0.0;
int i = 0;
#if (ARITHM_USE_IPP == 1)
double r = 0;
if (0 <= ippsDotProd_32f64f(src1, src2, len, &r))
return r;
setIppErrorStatus();
#elif CV_NEON
int len0 = len & -4, blockSize0 = (1 << 13), blockSize;
float32x4_t v_zero = vdupq_n_f32(0.0f);
CV_DECL_ALIGNED(16) float buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
float32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 4; j += 4 )
v_sum = vmlaq_f32(v_sum, vld1q_f32(src1 + j), vld1q_f32(src2 + j));
vst1q_f32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return dotProd_(src1, src2, len);
return r + dotProd_(src1, src2, len - i);
}
static double dotProd_64f(const double* src1, const double* src2, int len)

View File

@@ -90,20 +90,20 @@ struct Sum_SIMD<uchar, int>
uint8x16_t v_src = vld1q_u8(src0 + x);
uint16x8_t v_half = vmovl_u8(vget_low_u8(v_src));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_half)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_half)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_half));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_half));
v_half = vmovl_u8(vget_high_u8(v_src));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_half)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_half)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_half));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_half));
}
for ( ; x <= len - 8; x += 8)
{
uint16x8_t v_src = vmovl_u8(vld1_u8(src0 + x));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_src)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_src)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_src));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_src));
}
unsigned int CV_DECL_ALIGNED(16) ar[4];
@@ -133,20 +133,20 @@ struct Sum_SIMD<schar, int>
int8x16_t v_src = vld1q_s8(src0 + x);
int16x8_t v_half = vmovl_s8(vget_low_s8(v_src));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_half)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_half)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_half));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_half));
v_half = vmovl_s8(vget_high_s8(v_src));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_half)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_half)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_half));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_half));
}
for ( ; x <= len - 8; x += 8)
{
int16x8_t v_src = vmovl_s8(vld1_s8(src0 + x));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_src)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_src)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_src));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_src));
}
int CV_DECL_ALIGNED(16) ar[4];
@@ -175,13 +175,13 @@ struct Sum_SIMD<ushort, int>
{
uint16x8_t v_src = vld1q_u16(src0 + x);
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_low_u16(v_src)));
v_sum = vaddq_u32(v_sum, vmovl_u16(vget_high_u16(v_src)));
v_sum = vaddw_u16(v_sum, vget_low_u16(v_src));
v_sum = vaddw_u16(v_sum, vget_high_u16(v_src));
}
for ( ; x <= len - 4; x += 4)
v_sum = vaddq_u32(v_sum, vmovl_u16(vld1_u16(src0 + x)));
v_sum = vaddw_u16(v_sum, vld1_u16(src0 + x));
unsigned int CV_DECL_ALIGNED(16) ar[4];
vst1q_u32(ar, v_sum);
@@ -208,13 +208,13 @@ struct Sum_SIMD<short, int>
{
int16x8_t v_src = vld1q_s16(src0 + x);
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_low_s16(v_src)));
v_sum = vaddq_s32(v_sum, vmovl_s16(vget_high_s16(v_src)));
v_sum = vaddw_s16(v_sum, vget_low_s16(v_src));
v_sum = vaddw_s16(v_sum, vget_high_s16(v_src));
}
for ( ; x <= len - 4; x += 4)
v_sum = vaddq_s32(v_sum, vmovl_s16(vld1_s16(src0 + x)));
v_sum = vaddw_s16(v_sum, vld1_s16(src0 + x));
int CV_DECL_ALIGNED(16) ar[4];
vst1q_s32(ar, v_sum);
@@ -426,6 +426,38 @@ static int countNonZero8u( const uchar* src, int len )
nz += tab[val & 255] + tab[val >> 8];
}
}
#elif CV_NEON
int len0 = len & -16, blockSize1 = (1 << 8) - 16, blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
uint8x16_t v_zero = vdupq_n_u8(0), v_1 = vdupq_n_u8(1);
const uchar * src0 = src;
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint8x16_t v_pz = v_zero;
for( ; k <= blockSizej - 16; k += 16 )
v_pz = vaddq_u8(v_pz, vandq_u8(vceqq_u8(vld1q_u8(src0 + k), v_zero), v_1));
uint16x8_t v_p1 = vmovl_u8(vget_low_u8(v_pz)), v_p2 = vmovl_u8(vget_high_u8(v_pz));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_p1), vget_high_u16(v_p1)), v_nz);
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_p2), vget_high_u16(v_p2)), v_nz);
src0 += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
for( ; i < len; i++ )
nz += src[i] != 0;
@@ -433,13 +465,116 @@ static int countNonZero8u( const uchar* src, int len )
}
static int countNonZero16u( const ushort* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
uint16x8_t v_zero = vdupq_n_u16(0), v_1 = vdupq_n_u16(1);
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint16x8_t v_pz = v_zero;
for( ; k <= blockSizej - 8; k += 8 )
v_pz = vaddq_u16(v_pz, vandq_u16(vceqq_u16(vld1q_u16(src + k), v_zero), v_1));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_pz), vget_high_u16(v_pz)), v_nz);
src += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
return nz + countNonZero_(src, len - i);
}
static int countNonZero32s( const int* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
int32x4_t v_zero = vdupq_n_s32(0.0f);
uint16x8_t v_1 = vdupq_n_u16(1u), v_zerou = vdupq_n_u16(0u);
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint16x8_t v_pz = v_zerou;
for( ; k <= blockSizej - 8; k += 8 )
v_pz = vaddq_u16(v_pz, vandq_u16(vcombine_u16(vmovn_u32(vceqq_s32(vld1q_s32(src + k), v_zero)),
vmovn_u32(vceqq_s32(vld1q_s32(src + k + 4), v_zero))), v_1));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_pz), vget_high_u16(v_pz)), v_nz);
src += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
return nz + countNonZero_(src, len - i);
}
static int countNonZero32f( const float* src, int len )
{ return countNonZero_(src, len); }
{
int i = 0, nz = 0;
#if CV_NEON
int len0 = len & -8, blockSize1 = (1 << 15), blockSize0 = blockSize1 << 6;
uint32x4_t v_nz = vdupq_n_u32(0u);
float32x4_t v_zero = vdupq_n_f32(0.0f);
uint16x8_t v_1 = vdupq_n_u16(1u), v_zerou = vdupq_n_u16(0u);
while( i < len0 )
{
int blockSizei = std::min(len0 - i, blockSize0), j = 0;
while (j < blockSizei)
{
int blockSizej = std::min(blockSizei - j, blockSize1), k = 0;
uint16x8_t v_pz = v_zerou;
for( ; k <= blockSizej - 8; k += 8 )
v_pz = vaddq_u16(v_pz, vandq_u16(vcombine_u16(vmovn_u32(vceqq_f32(vld1q_f32(src + k), v_zero)),
vmovn_u32(vceqq_f32(vld1q_f32(src + k + 4), v_zero))), v_1));
v_nz = vaddq_u32(vaddl_u16(vget_low_u16(v_pz), vget_high_u16(v_pz)), v_nz);
src += blockSizej;
j += blockSizej;
}
i += blockSizei;
}
CV_DECL_ALIGNED(16) unsigned int buf[4];
vst1q_u32(buf, v_nz);
nz += i - saturate_cast<int>(buf[0] + buf[1] + buf[2] + buf[3]);
#endif
return nz + countNonZero_(src, len - i);
}
static int countNonZero64f( const double* src, int len )
{ return countNonZero_(src, len); }
@@ -1956,6 +2091,14 @@ float normL1_(const float* a, const float* b, int n)
d = buf[0] + buf[1] + buf[2] + buf[3];
}
else
#elif CV_NEON
float32x4_t v_sum = vdupq_n_f32(0.0f);
for ( ; j <= n - 4; j += 4)
v_sum = vaddq_f32(v_sum, vabdq_f32(vld1q_f32(a + j), vld1q_f32(b + j)));
float CV_DECL_ALIGNED(16) buf[4];
vst1q_f32(buf, v_sum);
d = buf[0] + buf[1] + buf[2] + buf[3];
#endif
{
for( ; j <= n - 4; j += 4 )
@@ -1996,6 +2139,19 @@ int normL1_(const uchar* a, const uchar* b, int n)
d = _mm_cvtsi128_si32(_mm_add_epi32(d0, _mm_unpackhi_epi64(d0, d0)));
}
else
#elif CV_NEON
uint32x4_t v_sum = vdupq_n_u32(0.0f);
for ( ; j <= n - 16; j += 16)
{
uint8x16_t v_dst = vabdq_u8(vld1q_u8(a + j), vld1q_u8(b + j));
uint16x8_t v_low = vmovl_u8(vget_low_u8(v_dst)), v_high = vmovl_u8(vget_high_u8(v_dst));
v_sum = vaddq_u32(v_sum, vaddl_u16(vget_low_u16(v_low), vget_low_u16(v_high)));
v_sum = vaddq_u32(v_sum, vaddl_u16(vget_high_u16(v_low), vget_high_u16(v_high)));
}
uint CV_DECL_ALIGNED(16) buf[4];
vst1q_u32(buf, v_sum);
d = buf[0] + buf[1] + buf[2] + buf[3];
#endif
{
for( ; j <= n - 4; j += 4 )