Refactored NEON optimization usage
This commit is contained in:
@@ -122,8 +122,13 @@ CV_INLINE IppiSize ippiSize(int width, int height)
|
||||
#if defined ANDROID && defined __ARM_NEON__
|
||||
#include "arm_neon.h"
|
||||
#define CV_NEON 1
|
||||
|
||||
#define CPU_HAS_NEON_FEATURE (true)
|
||||
//TODO: make real check using stuff from "cpu-features.h"
|
||||
//((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON)
|
||||
#else
|
||||
#define CV_NEON 0
|
||||
#define CPU_HAS_NEON_FEATURE (false)
|
||||
#endif
|
||||
|
||||
#ifndef IPPI_CALL
|
||||
|
@@ -44,11 +44,6 @@
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#if ANDROID && HAVE_NEON
|
||||
#include <cpu-features.h>
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
|
||||
@@ -115,9 +110,8 @@ Hamming::ResultType Hamming::operator()(const unsigned char* a, const unsigned c
|
||||
{
|
||||
#if __GNUC__
|
||||
ResultType result = 0;
|
||||
#if ANDROID && HAVE_NEON
|
||||
static uint64_t features = android_getCpuFeatures();
|
||||
if ((features & ANDROID_CPU_ARM_FEATURE_NEON))
|
||||
#if CV_NEON
|
||||
if (CPU_HAS_NEON_FEATURE)
|
||||
{
|
||||
for (size_t i = 0; i < size; i += 16)
|
||||
{
|
||||
@@ -126,7 +120,7 @@ Hamming::ResultType Hamming::operator()(const unsigned char* a, const unsigned c
|
||||
//uint8x16_t veorq_u8 (uint8x16_t, uint8x16_t)
|
||||
uint8x16_t AxorB = veorq_u8 (A_vec, B_vec);
|
||||
|
||||
uint8x16_t bitsSet += vcntq_u8 (AxorB);
|
||||
uint8x16_t bitsSet = vcntq_u8 (AxorB);
|
||||
//uint16x8_t vpadalq_u8 (uint16x8_t, uint8x16_t)
|
||||
uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);
|
||||
uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);
|
||||
@@ -138,25 +132,27 @@ Hamming::ResultType Hamming::operator()(const unsigned char* a, const unsigned c
|
||||
}
|
||||
else
|
||||
#endif
|
||||
//for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)
|
||||
typedef unsigned long long pop_t;
|
||||
const size_t modulo = size % sizeof(pop_t);
|
||||
const pop_t * a2 = reinterpret_cast<const pop_t*> (a);
|
||||
const pop_t * b2 = reinterpret_cast<const pop_t*> (b);
|
||||
const pop_t * a2_end = a2 + (size/sizeof(pop_t));
|
||||
|
||||
for (; a2 != a2_end; ++a2, ++b2)
|
||||
result += __builtin_popcountll((*a2) ^ (*b2));
|
||||
|
||||
if (modulo)
|
||||
{
|
||||
//in the case where size is not divisible by sizeof(size_t)
|
||||
//need to mask off the bits at the end
|
||||
pop_t a_final=0,b_final=0;
|
||||
memcpy(&a_final,a2,modulo);
|
||||
memcpy(&b_final,b2,modulo);
|
||||
result += __builtin_popcountll(a_final ^ b_final);
|
||||
}
|
||||
//for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)
|
||||
typedef unsigned long long pop_t;
|
||||
const size_t modulo = size % sizeof(pop_t);
|
||||
const pop_t * a2 = reinterpret_cast<const pop_t*> (a);
|
||||
const pop_t * b2 = reinterpret_cast<const pop_t*> (b);
|
||||
const pop_t * a2_end = a2 + (size/sizeof(pop_t));
|
||||
|
||||
for (; a2 != a2_end; ++a2, ++b2)
|
||||
result += __builtin_popcountll((*a2) ^ (*b2));
|
||||
|
||||
if (modulo)
|
||||
{
|
||||
//in the case where size is not divisible by sizeof(size_t)
|
||||
//need to mask off the bits at the end
|
||||
pop_t a_final=0,b_final=0;
|
||||
memcpy(&a_final,a2,modulo);
|
||||
memcpy(&b_final,b2,modulo);
|
||||
result += __builtin_popcountll(a_final ^ b_final);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
#else
|
||||
return HammingLUT()(a,b,size);
|
||||
|
Reference in New Issue
Block a user