Merge pull request #2836 from s98felix:2.4
This commit is contained in:
commit
fc41e8850b
@ -217,6 +217,7 @@ OCV_OPTION(ENABLE_SSSE3 "Enable SSSE3 instructions"
|
||||
OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions" OFF IF ((CV_ICC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_AVX "Enable AVX instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_AVX2 "Enable AVX2 instructions" OFF IF ((MSVC OR CMAKE_COMPILER_IS_GNUCXX) AND (X86 OR X86_64)) )
|
||||
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND ARM )
|
||||
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND ARM )
|
||||
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
|
||||
|
@ -140,7 +140,11 @@ if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
# SSE3 and further should be disabled under MingW because it generates compiler errors
|
||||
if(NOT MINGW)
|
||||
if(ENABLE_AVX)
|
||||
add_extra_compiler_option(-mavx)
|
||||
ocv_check_flag_support(CXX "-mavx" _varname)
|
||||
endif()
|
||||
|
||||
if(ENABLE_AVX2)
|
||||
ocv_check_flag_support(CXX "-mavx2" _varname)
|
||||
endif()
|
||||
|
||||
# GCC depresses SSEx instructions when -mavx is used. Instead, it generates new AVX instructions or AVX equivalence for all SSEx instructions when needed.
|
||||
@ -216,10 +220,6 @@ if(MSVC)
|
||||
set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} /Zi")
|
||||
endif()
|
||||
|
||||
if(ENABLE_AVX AND NOT MSVC_VERSION LESS 1600)
|
||||
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:AVX")
|
||||
endif()
|
||||
|
||||
if(ENABLE_SSE4_1 AND CV_ICC AND NOT OPENCV_EXTRA_FLAGS MATCHES "/arch:")
|
||||
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:SSE4.1")
|
||||
endif()
|
||||
@ -238,7 +238,7 @@ if(MSVC)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX)
|
||||
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1 OR ENABLE_AVX OR ENABLE_AVX2)
|
||||
set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Oi")
|
||||
endif()
|
||||
|
||||
|
@ -526,6 +526,28 @@ macro(ocv_glob_module_sources)
|
||||
list(APPEND lib_srcs ${cl_kernels} "${CMAKE_CURRENT_BINARY_DIR}/opencl_kernels.cpp" "${CMAKE_CURRENT_BINARY_DIR}/opencl_kernels.hpp")
|
||||
endif()
|
||||
|
||||
if(ENABLE_AVX)
|
||||
file(GLOB avx_srcs "src/avx/*.cpp")
|
||||
foreach(src ${avx_srcs})
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS -mavx)
|
||||
elseif(MSVC AND NOT MSVC_VERSION LESS 1600)
|
||||
set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS /arch:AVX)
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
if(ENABLE_AVX2)
|
||||
file(GLOB avx2_srcs "src/avx2/*.cpp")
|
||||
foreach(src ${avx2_srcs})
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS -mavx2)
|
||||
elseif(MSVC AND NOT MSVC_VERSION LESS 1800)
|
||||
set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS /arch:AVX2)
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
source_group("Include" FILES ${lib_hdrs})
|
||||
source_group("Include\\detail" FILES ${lib_hdrs_detail})
|
||||
|
||||
|
@ -317,6 +317,7 @@ Returns true if the specified feature is supported by the host hardware.
|
||||
* ``CV_CPU_SSE4_2`` - SSE 4.2
|
||||
* ``CV_CPU_POPCNT`` - POPCOUNT
|
||||
* ``CV_CPU_AVX`` - AVX
|
||||
* ``CV_CPU_AVX2`` - AVX2
|
||||
|
||||
The function returns true if the host hardware supports the specified feature. When user calls ``setUseOptimized(false)``, the subsequent calls to ``checkHardwareSupport()`` will return false until ``setUseOptimized(true)`` is called. This way user can dynamically switch on and off the optimized code in OpenCV.
|
||||
|
||||
|
@ -284,6 +284,7 @@ CV_EXPORTS_W int64 getCPUTickCount();
|
||||
- CV_CPU_SSE4_2 - SSE 4.2
|
||||
- CV_CPU_POPCNT - POPCOUNT
|
||||
- CV_CPU_AVX - AVX
|
||||
- CV_CPU_AVX2 - AVX2
|
||||
|
||||
\note {Note that the function output is not static. Once you called cv::useOptimized(false),
|
||||
most of the hardware acceleration is disabled and thus the function will returns false,
|
||||
|
@ -1706,6 +1706,7 @@ CVAPI(double) cvGetTickFrequency( void );
|
||||
#define CV_CPU_SSE4_2 7
|
||||
#define CV_CPU_POPCNT 8
|
||||
#define CV_CPU_AVX 10
|
||||
#define CV_CPU_AVX2 11
|
||||
#define CV_HARDWARE_MAX_FEATURE 255
|
||||
|
||||
CVAPI(int) cvCheckHardwareSupport(int feature);
|
||||
|
@ -141,6 +141,10 @@ CV_INLINE IppiSize ippiSize(const cv::Size & _size)
|
||||
# define __xgetbv() 0
|
||||
# endif
|
||||
# endif
|
||||
# if defined __AVX2__
|
||||
# include <immintrin.h>
|
||||
# define CV_AVX2 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
@ -176,6 +180,9 @@ CV_INLINE IppiSize ippiSize(const cv::Size & _size)
|
||||
#ifndef CV_AVX
|
||||
# define CV_AVX 0
|
||||
#endif
|
||||
#ifndef CV_AVX2
|
||||
# define CV_AVX2 0
|
||||
#endif
|
||||
#ifndef CV_NEON
|
||||
# define CV_NEON 0
|
||||
#endif
|
||||
|
@ -253,6 +253,39 @@ struct HWFeatures
|
||||
f.have[CV_CPU_AVX] = (((cpuid_data[2] & (1<<28)) != 0)&&((cpuid_data[2] & (1<<27)) != 0));//OS uses XSAVE_XRSTORE and CPU support AVX
|
||||
}
|
||||
|
||||
#if defined _MSC_VER && (defined _M_IX86 || defined _M_X64)
|
||||
__cpuidex(cpuid_data, 7, 0);
|
||||
#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
|
||||
#ifdef __x86_64__
|
||||
asm __volatile__
|
||||
(
|
||||
"movl $7, %%eax\n\t"
|
||||
"movl $0, %%ecx\n\t"
|
||||
"cpuid\n\t"
|
||||
:[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3])
|
||||
:
|
||||
: "cc"
|
||||
);
|
||||
#else
|
||||
asm volatile
|
||||
(
|
||||
"pushl %%ebx\n\t"
|
||||
"movl $7,%%eax\n\t"
|
||||
"movl $0,%%ecx\n\t"
|
||||
"cpuid\n\t"
|
||||
"popl %%ebx\n\t"
|
||||
: "=a"(cpuid_data[0]), "=b"(cpuid_data[1]), "=c"(cpuid_data[2]), "=d"(cpuid_data[3])
|
||||
:
|
||||
: "cc"
|
||||
);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if( f.x86_family >= 6 )
|
||||
{
|
||||
f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0;
|
||||
}
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
|
176
modules/imgproc/src/avx/imgwarp_avx.cpp
Normal file
176
modules/imgproc/src/avx/imgwarp_avx.cpp
Normal file
@ -0,0 +1,176 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "imgwarp_avx.hpp"
|
||||
|
||||
#if CV_AVX
|
||||
int VResizeLinearVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1];
|
||||
float* dst = (float*)_dst;
|
||||
int x = 0;
|
||||
|
||||
__m256 b0 = _mm256_set1_ps(beta[0]), b1 = _mm256_set1_ps(beta[1]);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&31) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1;
|
||||
x0 = _mm256_load_ps(S0 + x);
|
||||
x1 = _mm256_load_ps(S0 + x + 8);
|
||||
y0 = _mm256_load_ps(S1 + x);
|
||||
y1 = _mm256_load_ps(S1 + x + 8);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
|
||||
|
||||
_mm256_storeu_ps( dst + x, x0);
|
||||
_mm256_storeu_ps( dst + x + 8, x1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1;
|
||||
x0 = _mm256_loadu_ps(S0 + x);
|
||||
x1 = _mm256_loadu_ps(S0 + x + 8);
|
||||
y0 = _mm256_loadu_ps(S1 + x);
|
||||
y1 = _mm256_loadu_ps(S1 + x + 8);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
|
||||
|
||||
_mm256_storeu_ps( dst + x, x0);
|
||||
_mm256_storeu_ps( dst + x + 8, x1);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
int VResizeCubicVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
float* dst = (float*)_dst;
|
||||
int x = 0;
|
||||
__m256 b0 = _mm256_set1_ps(beta[0]), b1 = _mm256_set1_ps(beta[1]),
|
||||
b2 = _mm256_set1_ps(beta[2]), b3 = _mm256_set1_ps(beta[3]);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&31) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1, s0, s1;
|
||||
x0 = _mm256_load_ps(S0 + x);
|
||||
x1 = _mm256_load_ps(S0 + x + 8);
|
||||
y0 = _mm256_load_ps(S1 + x);
|
||||
y1 = _mm256_load_ps(S1 + x + 8);
|
||||
|
||||
s0 = _mm256_mul_ps(x0, b0);
|
||||
s1 = _mm256_mul_ps(x1, b0);
|
||||
y0 = _mm256_mul_ps(y0, b1);
|
||||
y1 = _mm256_mul_ps(y1, b1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
x0 = _mm256_load_ps(S2 + x);
|
||||
x1 = _mm256_load_ps(S2 + x + 8);
|
||||
y0 = _mm256_load_ps(S3 + x);
|
||||
y1 = _mm256_load_ps(S3 + x + 8);
|
||||
|
||||
x0 = _mm256_mul_ps(x0, b2);
|
||||
x1 = _mm256_mul_ps(x1, b2);
|
||||
y0 = _mm256_mul_ps(y0, b3);
|
||||
y1 = _mm256_mul_ps(y1, b3);
|
||||
s0 = _mm256_add_ps(s0, x0);
|
||||
s1 = _mm256_add_ps(s1, x1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
_mm256_storeu_ps( dst + x, s0);
|
||||
_mm256_storeu_ps( dst + x + 8, s1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1, s0, s1;
|
||||
x0 = _mm256_loadu_ps(S0 + x);
|
||||
x1 = _mm256_loadu_ps(S0 + x + 8);
|
||||
y0 = _mm256_loadu_ps(S1 + x);
|
||||
y1 = _mm256_loadu_ps(S1 + x + 8);
|
||||
|
||||
s0 = _mm256_mul_ps(x0, b0);
|
||||
s1 = _mm256_mul_ps(x1, b0);
|
||||
y0 = _mm256_mul_ps(y0, b1);
|
||||
y1 = _mm256_mul_ps(y1, b1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
x0 = _mm256_loadu_ps(S2 + x);
|
||||
x1 = _mm256_loadu_ps(S2 + x + 8);
|
||||
y0 = _mm256_loadu_ps(S3 + x);
|
||||
y1 = _mm256_loadu_ps(S3 + x + 8);
|
||||
|
||||
x0 = _mm256_mul_ps(x0, b2);
|
||||
x1 = _mm256_mul_ps(x1, b2);
|
||||
y0 = _mm256_mul_ps(y0, b3);
|
||||
y1 = _mm256_mul_ps(y1, b3);
|
||||
s0 = _mm256_add_ps(s0, x0);
|
||||
s1 = _mm256_add_ps(s1, x1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
_mm256_storeu_ps( dst + x, s0);
|
||||
_mm256_storeu_ps( dst + x + 8, s1);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
#else
|
||||
int VResizeLinearVec_32f_avx(const uchar**, uchar*, const uchar*, int ) { return 0; }
|
||||
|
||||
int VResizeCubicVec_32f_avx(const uchar**, uchar*, const uchar*, int ) { return 0; }
|
||||
#endif
|
||||
|
||||
/* End of file. */
|
51
modules/imgproc/src/avx/imgwarp_avx.hpp
Normal file
51
modules/imgproc/src/avx/imgwarp_avx.hpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef _CV_IMGWARP_AVX_H_
|
||||
#define _CV_IMGWARP_AVX_H_
|
||||
|
||||
int VResizeLinearVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
|
||||
int VResizeCubicVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
|
||||
#endif
|
||||
|
||||
/* End of file. */
|
431
modules/imgproc/src/avx2/imgwarp_avx2.cpp
Normal file
431
modules/imgproc/src/avx2/imgwarp_avx2.cpp
Normal file
@ -0,0 +1,431 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "imgwarp_avx2.hpp"
|
||||
|
||||
const int INTER_RESIZE_COEF_BITS=11;
|
||||
const int INTER_RESIZE_COEF_SCALE=1 << INTER_RESIZE_COEF_BITS;
|
||||
|
||||
#if CV_AVX2
|
||||
int VResizeLinearVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width )
|
||||
{
|
||||
const int** src = (const int**)_src;
|
||||
const short* beta = (const short*)_beta;
|
||||
const int *S0 = src[0], *S1 = src[1];
|
||||
int x = 0;
|
||||
__m256i b0 = _mm256_set1_epi16(beta[0]), b1 = _mm256_set1_epi16(beta[1]);
|
||||
__m256i delta = _mm256_set1_epi16(2);
|
||||
const int index[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
|
||||
__m256i shuffle = _mm256_load_si256((const __m256i*)index);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&31) == 0 )
|
||||
for( ; x <= width - 32; x += 32 )
|
||||
{
|
||||
__m256i x0, x1, x2, y0, y1, y2;
|
||||
x0 = _mm256_load_si256((const __m256i*)(S0 + x));
|
||||
x1 = _mm256_load_si256((const __m256i*)(S0 + x + 8));
|
||||
y0 = _mm256_load_si256((const __m256i*)(S1 + x));
|
||||
y1 = _mm256_load_si256((const __m256i*)(S1 + x + 8));
|
||||
x0 = _mm256_packs_epi32(_mm256_srai_epi32(x0, 4), _mm256_srai_epi32(x1, 4));
|
||||
y0 = _mm256_packs_epi32(_mm256_srai_epi32(y0, 4), _mm256_srai_epi32(y1, 4));
|
||||
|
||||
x1 = _mm256_load_si256((const __m256i*)(S0 + x + 16));
|
||||
x2 = _mm256_load_si256((const __m256i*)(S0 + x + 24));
|
||||
y1 = _mm256_load_si256((const __m256i*)(S1 + x + 16));
|
||||
y2 = _mm256_load_si256((const __m256i*)(S1 + x + 24));
|
||||
x1 = _mm256_packs_epi32(_mm256_srai_epi32(x1, 4), _mm256_srai_epi32(x2, 4));
|
||||
y1 = _mm256_packs_epi32(_mm256_srai_epi32(y1, 4), _mm256_srai_epi32(y2, 4));
|
||||
|
||||
x0 = _mm256_adds_epi16(_mm256_mulhi_epi16(x0, b0), _mm256_mulhi_epi16(y0, b1));
|
||||
x1 = _mm256_adds_epi16(_mm256_mulhi_epi16(x1, b0), _mm256_mulhi_epi16(y1, b1));
|
||||
|
||||
x0 = _mm256_srai_epi16(_mm256_adds_epi16(x0, delta), 2);
|
||||
x1 = _mm256_srai_epi16(_mm256_adds_epi16(x1, delta), 2);
|
||||
x0 = _mm256_packus_epi16(x0, x1);
|
||||
x0 = _mm256_permutevar8x32_epi32(x0, shuffle);
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x), x0);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 32; x += 32 )
|
||||
{
|
||||
__m256i x0, x1, x2, y0, y1, y2;
|
||||
x0 = _mm256_loadu_si256((const __m256i*)(S0 + x));
|
||||
x1 = _mm256_loadu_si256((const __m256i*)(S0 + x + 8));
|
||||
y0 = _mm256_loadu_si256((const __m256i*)(S1 + x));
|
||||
y1 = _mm256_loadu_si256((const __m256i*)(S1 + x + 8));
|
||||
x0 = _mm256_packs_epi32(_mm256_srai_epi32(x0, 4), _mm256_srai_epi32(x1, 4));
|
||||
y0 = _mm256_packs_epi32(_mm256_srai_epi32(y0, 4), _mm256_srai_epi32(y1, 4));
|
||||
|
||||
x1 = _mm256_loadu_si256((const __m256i*)(S0 + x + 16));
|
||||
x2 = _mm256_loadu_si256((const __m256i*)(S0 + x + 24));
|
||||
y1 = _mm256_loadu_si256((const __m256i*)(S1 + x + 16));
|
||||
y2 = _mm256_loadu_si256((const __m256i*)(S1 + x + 24));
|
||||
x1 = _mm256_packs_epi32(_mm256_srai_epi32(x1, 4), _mm256_srai_epi32(x2, 4));
|
||||
y1 = _mm256_packs_epi32(_mm256_srai_epi32(y1, 4), _mm256_srai_epi32(y2, 4));
|
||||
|
||||
x0 = _mm256_adds_epi16(_mm256_mulhi_epi16(x0, b0), _mm256_mulhi_epi16(y0, b1));
|
||||
x1 = _mm256_adds_epi16(_mm256_mulhi_epi16(x1, b0), _mm256_mulhi_epi16(y1, b1));
|
||||
|
||||
x0 = _mm256_srai_epi16(_mm256_adds_epi16(x0, delta), 2);
|
||||
x1 = _mm256_srai_epi16(_mm256_adds_epi16(x1, delta), 2);
|
||||
x0 = _mm256_packus_epi16(x0, x1);
|
||||
x0 = _mm256_permutevar8x32_epi32(x0, shuffle);
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x), x0);
|
||||
}
|
||||
|
||||
for( ; x < width - 8; x += 8 )
|
||||
{
|
||||
__m256i x0, y0;
|
||||
x0 = _mm256_srai_epi32(_mm256_loadu_si256((const __m256i*)(S0 + x)), 4);
|
||||
y0 = _mm256_srai_epi32(_mm256_loadu_si256((const __m256i*)(S1 + x)), 4);
|
||||
x0 = _mm256_packs_epi32(x0, x0);
|
||||
y0 = _mm256_packs_epi32(y0, y0);
|
||||
x0 = _mm256_adds_epi16(_mm256_mulhi_epi16(x0, b0), _mm256_mulhi_epi16(y0, b1));
|
||||
x0 = _mm256_srai_epi16(_mm256_adds_epi16(x0, delta), 2);
|
||||
x0 = _mm256_packus_epi16(x0, x0);
|
||||
*(int*)(dst + x) = _mm_cvtsi128_si32(_mm256_extracti128_si256(x0, 0));
|
||||
*(int*)(dst + x + 4) = _mm_cvtsi128_si32(_mm256_extracti128_si256(x0, 1));
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeLinearVec_32f16_avx2(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1];
|
||||
ushort* dst = (ushort*)_dst;
|
||||
int x = 0;
|
||||
|
||||
__m256 b0 = _mm256_set1_ps(beta[0]), b1 = _mm256_set1_ps(beta[1]);
|
||||
__m256i preshift = _mm256_set1_epi32(shiftval);
|
||||
__m256i postshift = _mm256_set1_epi16((short)shiftval);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&31) == 0 )
|
||||
for( ; x <= width - 32; x += 32 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1;
|
||||
__m256i t0, t1, t2;
|
||||
x0 = _mm256_load_ps(S0 + x);
|
||||
x1 = _mm256_load_ps(S0 + x + 8);
|
||||
y0 = _mm256_load_ps(S1 + x);
|
||||
y1 = _mm256_load_ps(S1 + x + 8);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
|
||||
t0 = _mm256_add_epi32(_mm256_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm256_add_epi32(_mm256_cvtps_epi32(x1), preshift);
|
||||
t0 = _mm256_add_epi16(_mm256_packs_epi32(t0, t2), postshift);
|
||||
|
||||
x0 = _mm256_load_ps(S0 + x + 16);
|
||||
x1 = _mm256_load_ps(S0 + x + 24);
|
||||
y0 = _mm256_load_ps(S1 + x + 16);
|
||||
y1 = _mm256_load_ps(S1 + x + 24);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
|
||||
t1 = _mm256_add_epi32(_mm256_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm256_add_epi32(_mm256_cvtps_epi32(x1), preshift);
|
||||
t1 = _mm256_add_epi16(_mm256_packs_epi32(t1, t2), postshift);
|
||||
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x), t0);
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x + 16), t1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 32; x += 32 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1;
|
||||
__m256i t0, t1, t2;
|
||||
x0 = _mm256_loadu_ps(S0 + x);
|
||||
x1 = _mm256_loadu_ps(S0 + x + 8);
|
||||
y0 = _mm256_loadu_ps(S1 + x);
|
||||
y1 = _mm256_loadu_ps(S1 + x + 8);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
|
||||
t0 = _mm256_add_epi32(_mm256_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm256_add_epi32(_mm256_cvtps_epi32(x1), preshift);
|
||||
t0 = _mm256_add_epi16(_mm256_packs_epi32(t0, t2), postshift);
|
||||
|
||||
x0 = _mm256_loadu_ps(S0 + x + 16);
|
||||
x1 = _mm256_loadu_ps(S0 + x + 24);
|
||||
y0 = _mm256_loadu_ps(S1 + x + 16);
|
||||
y1 = _mm256_loadu_ps(S1 + x + 24);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
|
||||
t1 = _mm256_add_epi32(_mm256_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm256_add_epi32(_mm256_cvtps_epi32(x1), preshift);
|
||||
t1 = _mm256_add_epi16(_mm256_packs_epi32(t1, t2), postshift);
|
||||
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x), t0);
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x + 16), t1);
|
||||
}
|
||||
|
||||
for( ; x < width - 8; x += 8 )
|
||||
{
|
||||
__m256 x0, y0;
|
||||
__m256i t0;
|
||||
x0 = _mm256_loadu_ps(S0 + x);
|
||||
y0 = _mm256_loadu_ps(S1 + x);
|
||||
|
||||
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
|
||||
t0 = _mm256_add_epi32(_mm256_cvtps_epi32(x0), preshift);
|
||||
t0 = _mm256_add_epi16(_mm256_packs_epi32(t0, t0), postshift);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm256_extracti128_si256(t0, 0));
|
||||
_mm_storel_epi64( (__m128i*)(dst + x + 4), _mm256_extracti128_si256(t0, 1));
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
int VResizeCubicVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width )
|
||||
{
|
||||
const int** src = (const int**)_src;
|
||||
const short* beta = (const short*)_beta;
|
||||
const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
int x = 0;
|
||||
float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE);
|
||||
__m256 b0 = _mm256_set1_ps(beta[0]*scale), b1 = _mm256_set1_ps(beta[1]*scale),
|
||||
b2 = _mm256_set1_ps(beta[2]*scale), b3 = _mm256_set1_ps(beta[3]*scale);
|
||||
const int shuffle = 0xd8; // 11 | 01 | 10 | 00
|
||||
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&31) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256i x0, x1, y0, y1;
|
||||
__m256 s0, s1, f0, f1;
|
||||
x0 = _mm256_load_si256((const __m256i*)(S0 + x));
|
||||
x1 = _mm256_load_si256((const __m256i*)(S0 + x + 8));
|
||||
y0 = _mm256_load_si256((const __m256i*)(S1 + x));
|
||||
y1 = _mm256_load_si256((const __m256i*)(S1 + x + 8));
|
||||
|
||||
s0 = _mm256_mul_ps(_mm256_cvtepi32_ps(x0), b0);
|
||||
s1 = _mm256_mul_ps(_mm256_cvtepi32_ps(x1), b0);
|
||||
f0 = _mm256_mul_ps(_mm256_cvtepi32_ps(y0), b1);
|
||||
f1 = _mm256_mul_ps(_mm256_cvtepi32_ps(y1), b1);
|
||||
s0 = _mm256_add_ps(s0, f0);
|
||||
s1 = _mm256_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm256_load_si256((const __m256i*)(S2 + x));
|
||||
x1 = _mm256_load_si256((const __m256i*)(S2 + x + 8));
|
||||
y0 = _mm256_load_si256((const __m256i*)(S3 + x));
|
||||
y1 = _mm256_load_si256((const __m256i*)(S3 + x + 8));
|
||||
|
||||
f0 = _mm256_mul_ps(_mm256_cvtepi32_ps(x0), b2);
|
||||
f1 = _mm256_mul_ps(_mm256_cvtepi32_ps(x1), b2);
|
||||
s0 = _mm256_add_ps(s0, f0);
|
||||
s1 = _mm256_add_ps(s1, f1);
|
||||
f0 = _mm256_mul_ps(_mm256_cvtepi32_ps(y0), b3);
|
||||
f1 = _mm256_mul_ps(_mm256_cvtepi32_ps(y1), b3);
|
||||
s0 = _mm256_add_ps(s0, f0);
|
||||
s1 = _mm256_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm256_cvtps_epi32(s0);
|
||||
x1 = _mm256_cvtps_epi32(s1);
|
||||
|
||||
x0 = _mm256_packs_epi32(x0, x1);
|
||||
x0 = _mm256_permute4x64_epi64(x0, shuffle);
|
||||
x0 = _mm256_packus_epi16(x0, x0);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm256_extracti128_si256(x0, 0));
|
||||
_mm_storel_epi64( (__m128i*)(dst + x + 8), _mm256_extracti128_si256(x0, 1));
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256i x0, x1, y0, y1;
|
||||
__m256 s0, s1, f0, f1;
|
||||
x0 = _mm256_loadu_si256((const __m256i*)(S0 + x));
|
||||
x1 = _mm256_loadu_si256((const __m256i*)(S0 + x + 8));
|
||||
y0 = _mm256_loadu_si256((const __m256i*)(S1 + x));
|
||||
y1 = _mm256_loadu_si256((const __m256i*)(S1 + x + 8));
|
||||
|
||||
s0 = _mm256_mul_ps(_mm256_cvtepi32_ps(x0), b0);
|
||||
s1 = _mm256_mul_ps(_mm256_cvtepi32_ps(x1), b0);
|
||||
f0 = _mm256_mul_ps(_mm256_cvtepi32_ps(y0), b1);
|
||||
f1 = _mm256_mul_ps(_mm256_cvtepi32_ps(y1), b1);
|
||||
s0 = _mm256_add_ps(s0, f0);
|
||||
s1 = _mm256_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm256_loadu_si256((const __m256i*)(S2 + x));
|
||||
x1 = _mm256_loadu_si256((const __m256i*)(S2 + x + 8));
|
||||
y0 = _mm256_loadu_si256((const __m256i*)(S3 + x));
|
||||
y1 = _mm256_loadu_si256((const __m256i*)(S3 + x + 8));
|
||||
|
||||
f0 = _mm256_mul_ps(_mm256_cvtepi32_ps(x0), b2);
|
||||
f1 = _mm256_mul_ps(_mm256_cvtepi32_ps(x1), b2);
|
||||
s0 = _mm256_add_ps(s0, f0);
|
||||
s1 = _mm256_add_ps(s1, f1);
|
||||
f0 = _mm256_mul_ps(_mm256_cvtepi32_ps(y0), b3);
|
||||
f1 = _mm256_mul_ps(_mm256_cvtepi32_ps(y1), b3);
|
||||
s0 = _mm256_add_ps(s0, f0);
|
||||
s1 = _mm256_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm256_cvtps_epi32(s0);
|
||||
x1 = _mm256_cvtps_epi32(s1);
|
||||
|
||||
x0 = _mm256_packs_epi32(x0, x1);
|
||||
x0 = _mm256_permute4x64_epi64(x0, shuffle);
|
||||
x0 = _mm256_packus_epi16(x0, x0);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm256_extracti128_si256(x0, 0));
|
||||
_mm_storel_epi64( (__m128i*)(dst + x + 8), _mm256_extracti128_si256(x0, 1));
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeCubicVec_32f16_avx2(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
ushort* dst = (ushort*)_dst;
|
||||
int x = 0;
|
||||
__m256 b0 = _mm256_set1_ps(beta[0]), b1 = _mm256_set1_ps(beta[1]),
|
||||
b2 = _mm256_set1_ps(beta[2]), b3 = _mm256_set1_ps(beta[3]);
|
||||
__m256i preshift = _mm256_set1_epi32(shiftval);
|
||||
__m256i postshift = _mm256_set1_epi16((short)shiftval);
|
||||
const int shuffle = 0xd8; // 11 | 01 | 10 | 00
|
||||
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&31) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1, s0, s1;
|
||||
__m256i t0, t1;
|
||||
x0 = _mm256_load_ps(S0 + x);
|
||||
x1 = _mm256_load_ps(S0 + x + 8);
|
||||
y0 = _mm256_load_ps(S1 + x);
|
||||
y1 = _mm256_load_ps(S1 + x + 8);
|
||||
|
||||
s0 = _mm256_mul_ps(x0, b0);
|
||||
s1 = _mm256_mul_ps(x1, b0);
|
||||
y0 = _mm256_mul_ps(y0, b1);
|
||||
y1 = _mm256_mul_ps(y1, b1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
x0 = _mm256_load_ps(S2 + x);
|
||||
x1 = _mm256_load_ps(S2 + x + 8);
|
||||
y0 = _mm256_load_ps(S3 + x);
|
||||
y1 = _mm256_load_ps(S3 + x + 8);
|
||||
|
||||
x0 = _mm256_mul_ps(x0, b2);
|
||||
x1 = _mm256_mul_ps(x1, b2);
|
||||
y0 = _mm256_mul_ps(y0, b3);
|
||||
y1 = _mm256_mul_ps(y1, b3);
|
||||
s0 = _mm256_add_ps(s0, x0);
|
||||
s1 = _mm256_add_ps(s1, x1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
t0 = _mm256_add_epi32(_mm256_cvtps_epi32(s0), preshift);
|
||||
t1 = _mm256_add_epi32(_mm256_cvtps_epi32(s1), preshift);
|
||||
|
||||
t0 = _mm256_add_epi16(_mm256_packs_epi32(t0, t1), postshift);
|
||||
t0 = _mm256_permute4x64_epi64(t0, shuffle);
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x), t0);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m256 x0, x1, y0, y1, s0, s1;
|
||||
__m256i t0, t1;
|
||||
x0 = _mm256_loadu_ps(S0 + x);
|
||||
x1 = _mm256_loadu_ps(S0 + x + 8);
|
||||
y0 = _mm256_loadu_ps(S1 + x);
|
||||
y1 = _mm256_loadu_ps(S1 + x + 8);
|
||||
|
||||
s0 = _mm256_mul_ps(x0, b0);
|
||||
s1 = _mm256_mul_ps(x1, b0);
|
||||
y0 = _mm256_mul_ps(y0, b1);
|
||||
y1 = _mm256_mul_ps(y1, b1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
x0 = _mm256_loadu_ps(S2 + x);
|
||||
x1 = _mm256_loadu_ps(S2 + x + 8);
|
||||
y0 = _mm256_loadu_ps(S3 + x);
|
||||
y1 = _mm256_loadu_ps(S3 + x + 8);
|
||||
|
||||
x0 = _mm256_mul_ps(x0, b2);
|
||||
x1 = _mm256_mul_ps(x1, b2);
|
||||
y0 = _mm256_mul_ps(y0, b3);
|
||||
y1 = _mm256_mul_ps(y1, b3);
|
||||
s0 = _mm256_add_ps(s0, x0);
|
||||
s1 = _mm256_add_ps(s1, x1);
|
||||
s0 = _mm256_add_ps(s0, y0);
|
||||
s1 = _mm256_add_ps(s1, y1);
|
||||
|
||||
t0 = _mm256_add_epi32(_mm256_cvtps_epi32(s0), preshift);
|
||||
t1 = _mm256_add_epi32(_mm256_cvtps_epi32(s1), preshift);
|
||||
|
||||
t0 = _mm256_add_epi16(_mm256_packs_epi32(t0, t1), postshift);
|
||||
t0 = _mm256_permute4x64_epi64(t0, shuffle);
|
||||
_mm256_storeu_si256( (__m256i*)(dst + x), t0);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
#else
|
||||
int VResizeLinearVec_32s8u_avx2(const uchar**, uchar*, const uchar*, int ) { return 0; }
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeLinearVec_32f16_avx2(const uchar**, uchar*, const uchar*, int ) { return 0; }
|
||||
|
||||
int VResizeCubicVec_32s8u_avx2(const uchar**, uchar*, const uchar*, int ) { return 0; }
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeCubicVec_32f16_avx2(const uchar**, uchar*, const uchar*, int ) { return 0; }
|
||||
#endif
|
||||
|
||||
// Template instantiations.
|
||||
template int VResizeLinearVec_32f16_avx2<SHRT_MIN>(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
template int VResizeLinearVec_32f16_avx2<0>(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
|
||||
template int VResizeCubicVec_32f16_avx2<SHRT_MIN>(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
template int VResizeCubicVec_32f16_avx2<0>(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
|
||||
/* End of file. */
|
57
modules/imgproc/src/avx2/imgwarp_avx2.hpp
Normal file
57
modules/imgproc/src/avx2/imgwarp_avx2.hpp
Normal file
@ -0,0 +1,57 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef _CV_IMGWARP_AVX2_H_
|
||||
#define _CV_IMGWARP_AVX2_H_
|
||||
|
||||
int VResizeLinearVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width );
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeLinearVec_32f16_avx2(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
|
||||
int VResizeCubicVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width );
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeCubicVec_32f16_avx2(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
|
||||
|
||||
#endif
|
||||
|
||||
/* End of file. */
|
@ -47,6 +47,8 @@
|
||||
// */
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "avx/imgwarp_avx.hpp"
|
||||
#include "avx2/imgwarp_avx2.hpp"
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
@ -451,350 +453,423 @@ struct HResizeNoVec
|
||||
|
||||
#if CV_SSE2
|
||||
|
||||
static int VResizeLinearVec_32s8u_sse2(const uchar** _src, uchar* dst, const uchar* _beta, int width )
|
||||
{
|
||||
const int** src = (const int**)_src;
|
||||
const short* beta = (const short*)_beta;
|
||||
const int *S0 = src[0], *S1 = src[1];
|
||||
int x = 0;
|
||||
__m128i b0 = _mm_set1_epi16(beta[0]), b1 = _mm_set1_epi16(beta[1]);
|
||||
__m128i delta = _mm_set1_epi16(2);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&15) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128i x0, x1, x2, y0, y1, y2;
|
||||
x0 = _mm_load_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_load_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
|
||||
x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
|
||||
y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
|
||||
|
||||
x1 = _mm_load_si128((const __m128i*)(S0 + x + 8));
|
||||
x2 = _mm_load_si128((const __m128i*)(S0 + x + 12));
|
||||
y1 = _mm_load_si128((const __m128i*)(S1 + x + 8));
|
||||
y2 = _mm_load_si128((const __m128i*)(S1 + x + 12));
|
||||
x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
|
||||
y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
|
||||
|
||||
x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
|
||||
x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
|
||||
|
||||
x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
|
||||
x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128i x0, x1, x2, y0, y1, y2;
|
||||
x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
|
||||
x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
|
||||
y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
|
||||
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 8));
|
||||
x2 = _mm_loadu_si128((const __m128i*)(S0 + x + 12));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 8));
|
||||
y2 = _mm_loadu_si128((const __m128i*)(S1 + x + 12));
|
||||
x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
|
||||
y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
|
||||
|
||||
x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
|
||||
x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
|
||||
|
||||
x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
|
||||
x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
|
||||
}
|
||||
|
||||
for( ; x < width - 4; x += 4 )
|
||||
{
|
||||
__m128i x0, y0;
|
||||
x0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S0 + x)), 4);
|
||||
y0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S1 + x)), 4);
|
||||
x0 = _mm_packs_epi32(x0, x0);
|
||||
y0 = _mm_packs_epi32(y0, y0);
|
||||
x0 = _mm_adds_epi16(_mm_mulhi_epi16(x0, b0), _mm_mulhi_epi16(y0, b1));
|
||||
x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
|
||||
x0 = _mm_packus_epi16(x0, x0);
|
||||
*(int*)(dst + x) = _mm_cvtsi128_si32(x0);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
struct VResizeLinearVec_32s8u
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
if( !checkHardwareSupport(CV_CPU_SSE2) )
|
||||
return 0;
|
||||
int processed = 0;
|
||||
|
||||
const int** src = (const int**)_src;
|
||||
const short* beta = (const short*)_beta;
|
||||
const int *S0 = src[0], *S1 = src[1];
|
||||
int x = 0;
|
||||
__m128i b0 = _mm_set1_epi16(beta[0]), b1 = _mm_set1_epi16(beta[1]);
|
||||
__m128i delta = _mm_set1_epi16(2);
|
||||
if( checkHardwareSupport(CV_CPU_AVX2) )
|
||||
processed += VResizeLinearVec_32s8u_avx2(_src, dst, _beta, width);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&15) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128i x0, x1, x2, y0, y1, y2;
|
||||
x0 = _mm_load_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_load_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
|
||||
x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
|
||||
y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
|
||||
if( !processed && checkHardwareSupport(CV_CPU_SSE2) )
|
||||
processed += VResizeLinearVec_32s8u_sse2(_src, dst, _beta, width);
|
||||
|
||||
x1 = _mm_load_si128((const __m128i*)(S0 + x + 8));
|
||||
x2 = _mm_load_si128((const __m128i*)(S0 + x + 12));
|
||||
y1 = _mm_load_si128((const __m128i*)(S1 + x + 8));
|
||||
y2 = _mm_load_si128((const __m128i*)(S1 + x + 12));
|
||||
x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
|
||||
y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
|
||||
|
||||
x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
|
||||
x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
|
||||
|
||||
x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
|
||||
x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128i x0, x1, x2, y0, y1, y2;
|
||||
x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
|
||||
x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4));
|
||||
y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4));
|
||||
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 8));
|
||||
x2 = _mm_loadu_si128((const __m128i*)(S0 + x + 12));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 8));
|
||||
y2 = _mm_loadu_si128((const __m128i*)(S1 + x + 12));
|
||||
x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4));
|
||||
y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4));
|
||||
|
||||
x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 ));
|
||||
x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 ));
|
||||
|
||||
x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
|
||||
x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1));
|
||||
}
|
||||
|
||||
for( ; x < width - 4; x += 4 )
|
||||
{
|
||||
__m128i x0, y0;
|
||||
x0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S0 + x)), 4);
|
||||
y0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S1 + x)), 4);
|
||||
x0 = _mm_packs_epi32(x0, x0);
|
||||
y0 = _mm_packs_epi32(y0, y0);
|
||||
x0 = _mm_adds_epi16(_mm_mulhi_epi16(x0, b0), _mm_mulhi_epi16(y0, b1));
|
||||
x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2);
|
||||
x0 = _mm_packus_epi16(x0, x0);
|
||||
*(int*)(dst + x) = _mm_cvtsi128_si32(x0);
|
||||
}
|
||||
|
||||
return x;
|
||||
return processed;
|
||||
}
|
||||
};
|
||||
|
||||
template<int shiftval>
|
||||
int VResizeLinearVec_32f16_sse2(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1];
|
||||
ushort* dst = (ushort*)_dst;
|
||||
int x = 0;
|
||||
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
|
||||
__m128i preshift = _mm_set1_epi32(shiftval);
|
||||
__m128i postshift = _mm_set1_epi16((short)shiftval);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&15) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
__m128i t0, t1, t2;
|
||||
x0 = _mm_load_ps(S0 + x);
|
||||
x1 = _mm_load_ps(S0 + x + 4);
|
||||
y0 = _mm_load_ps(S1 + x);
|
||||
y1 = _mm_load_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
|
||||
|
||||
x0 = _mm_load_ps(S0 + x + 8);
|
||||
x1 = _mm_load_ps(S0 + x + 12);
|
||||
y0 = _mm_load_ps(S1 + x + 8);
|
||||
y1 = _mm_load_ps(S1 + x + 12);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
|
||||
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), t0);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
__m128i t0, t1, t2;
|
||||
x0 = _mm_loadu_ps(S0 + x);
|
||||
x1 = _mm_loadu_ps(S0 + x + 4);
|
||||
y0 = _mm_loadu_ps(S1 + x);
|
||||
y1 = _mm_loadu_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
|
||||
|
||||
x0 = _mm_loadu_ps(S0 + x + 8);
|
||||
x1 = _mm_loadu_ps(S0 + x + 12);
|
||||
y0 = _mm_loadu_ps(S1 + x + 8);
|
||||
y1 = _mm_loadu_ps(S1 + x + 12);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
|
||||
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), t0);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
|
||||
}
|
||||
|
||||
for( ; x < width - 4; x += 4 )
|
||||
{
|
||||
__m128 x0, y0;
|
||||
__m128i t0;
|
||||
x0 = _mm_loadu_ps(S0 + x);
|
||||
y0 = _mm_loadu_ps(S1 + x);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t0), postshift);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), t0);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
template<int shiftval> struct VResizeLinearVec_32f16
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
if( !checkHardwareSupport(CV_CPU_SSE2) )
|
||||
return 0;
|
||||
int processed = 0;
|
||||
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1];
|
||||
ushort* dst = (ushort*)_dst;
|
||||
int x = 0;
|
||||
if( checkHardwareSupport(CV_CPU_AVX2) )
|
||||
processed += VResizeLinearVec_32f16_avx2<shiftval>(_src, _dst, _beta, width);
|
||||
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
|
||||
__m128i preshift = _mm_set1_epi32(shiftval);
|
||||
__m128i postshift = _mm_set1_epi16((short)shiftval);
|
||||
if( !processed && checkHardwareSupport(CV_CPU_SSE2) )
|
||||
processed += VResizeLinearVec_32f16_sse2<shiftval>(_src, _dst, _beta, width);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&15) == 0 )
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
__m128i t0, t1, t2;
|
||||
x0 = _mm_load_ps(S0 + x);
|
||||
x1 = _mm_load_ps(S0 + x + 4);
|
||||
y0 = _mm_load_ps(S1 + x);
|
||||
y1 = _mm_load_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
|
||||
|
||||
x0 = _mm_load_ps(S0 + x + 8);
|
||||
x1 = _mm_load_ps(S0 + x + 12);
|
||||
y0 = _mm_load_ps(S1 + x + 8);
|
||||
y1 = _mm_load_ps(S1 + x + 12);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
|
||||
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), t0);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 16; x += 16 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
__m128i t0, t1, t2;
|
||||
x0 = _mm_loadu_ps(S0 + x);
|
||||
x1 = _mm_loadu_ps(S0 + x + 4);
|
||||
y0 = _mm_loadu_ps(S1 + x);
|
||||
y1 = _mm_loadu_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift);
|
||||
|
||||
x0 = _mm_loadu_ps(S0 + x + 8);
|
||||
x1 = _mm_loadu_ps(S0 + x + 12);
|
||||
y0 = _mm_loadu_ps(S1 + x + 8);
|
||||
y1 = _mm_loadu_ps(S1 + x + 12);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift);
|
||||
t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift);
|
||||
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), t0);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x + 8), t1);
|
||||
}
|
||||
|
||||
for( ; x < width - 4; x += 4 )
|
||||
{
|
||||
__m128 x0, y0;
|
||||
__m128i t0;
|
||||
x0 = _mm_loadu_ps(S0 + x);
|
||||
y0 = _mm_loadu_ps(S1 + x);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift);
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t0), postshift);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), t0);
|
||||
}
|
||||
|
||||
return x;
|
||||
return processed;
|
||||
}
|
||||
};
|
||||
|
||||
typedef VResizeLinearVec_32f16<SHRT_MIN> VResizeLinearVec_32f16u;
|
||||
typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s;
|
||||
|
||||
static int VResizeLinearVec_32f_sse(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1];
|
||||
float* dst = (float*)_dst;
|
||||
int x = 0;
|
||||
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&15) == 0 )
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
x0 = _mm_load_ps(S0 + x);
|
||||
x1 = _mm_load_ps(S0 + x + 4);
|
||||
y0 = _mm_load_ps(S1 + x);
|
||||
y1 = _mm_load_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
|
||||
_mm_storeu_ps( dst + x, x0);
|
||||
_mm_storeu_ps( dst + x + 4, x1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
x0 = _mm_loadu_ps(S0 + x);
|
||||
x1 = _mm_loadu_ps(S0 + x + 4);
|
||||
y0 = _mm_loadu_ps(S1 + x);
|
||||
y1 = _mm_loadu_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
|
||||
_mm_storeu_ps( dst + x, x0);
|
||||
_mm_storeu_ps( dst + x + 4, x1);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
struct VResizeLinearVec_32f
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
if( !checkHardwareSupport(CV_CPU_SSE) )
|
||||
return 0;
|
||||
int processed = 0;
|
||||
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1];
|
||||
float* dst = (float*)_dst;
|
||||
int x = 0;
|
||||
if( checkHardwareSupport(CV_CPU_AVX) )
|
||||
processed += VResizeLinearVec_32f_avx(_src, _dst, _beta, width);
|
||||
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]);
|
||||
if( !processed && checkHardwareSupport(CV_CPU_SSE) )
|
||||
processed += VResizeLinearVec_32f_sse(_src, _dst, _beta, width);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1)&15) == 0 )
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
x0 = _mm_load_ps(S0 + x);
|
||||
x1 = _mm_load_ps(S0 + x + 4);
|
||||
y0 = _mm_load_ps(S1 + x);
|
||||
y1 = _mm_load_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
|
||||
_mm_storeu_ps( dst + x, x0);
|
||||
_mm_storeu_ps( dst + x + 4, x1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1;
|
||||
x0 = _mm_loadu_ps(S0 + x);
|
||||
x1 = _mm_loadu_ps(S0 + x + 4);
|
||||
y0 = _mm_loadu_ps(S1 + x);
|
||||
y1 = _mm_loadu_ps(S1 + x + 4);
|
||||
|
||||
x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1));
|
||||
x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1));
|
||||
|
||||
_mm_storeu_ps( dst + x, x0);
|
||||
_mm_storeu_ps( dst + x + 4, x1);
|
||||
}
|
||||
|
||||
return x;
|
||||
return processed;
|
||||
}
|
||||
};
|
||||
|
||||
static int VResizeCubicVec_32s8u_sse2(const uchar** _src, uchar* dst, const uchar* _beta, int width )
|
||||
{
|
||||
const int** src = (const int**)_src;
|
||||
const short* beta = (const short*)_beta;
|
||||
const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
int x = 0;
|
||||
float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE);
|
||||
__m128 b0 = _mm_set1_ps(beta[0]*scale), b1 = _mm_set1_ps(beta[1]*scale),
|
||||
b2 = _mm_set1_ps(beta[2]*scale), b3 = _mm_set1_ps(beta[3]*scale);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128i x0, x1, y0, y1;
|
||||
__m128 s0, s1, f0, f1;
|
||||
x0 = _mm_load_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_load_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
|
||||
|
||||
s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
|
||||
s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_load_si128((const __m128i*)(S2 + x));
|
||||
x1 = _mm_load_si128((const __m128i*)(S2 + x + 4));
|
||||
y0 = _mm_load_si128((const __m128i*)(S3 + x));
|
||||
y1 = _mm_load_si128((const __m128i*)(S3 + x + 4));
|
||||
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_cvtps_epi32(s0);
|
||||
x1 = _mm_cvtps_epi32(s1);
|
||||
|
||||
x0 = _mm_packs_epi32(x0, x1);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128i x0, x1, y0, y1;
|
||||
__m128 s0, s1, f0, f1;
|
||||
x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
|
||||
|
||||
s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
|
||||
s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_loadu_si128((const __m128i*)(S2 + x));
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S2 + x + 4));
|
||||
y0 = _mm_loadu_si128((const __m128i*)(S3 + x));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S3 + x + 4));
|
||||
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_cvtps_epi32(s0);
|
||||
x1 = _mm_cvtps_epi32(s1);
|
||||
|
||||
x0 = _mm_packs_epi32(x0, x1);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
struct VResizeCubicVec_32s8u
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
if( !checkHardwareSupport(CV_CPU_SSE2) )
|
||||
return 0;
|
||||
int processed = 0;
|
||||
|
||||
const int** src = (const int**)_src;
|
||||
const short* beta = (const short*)_beta;
|
||||
const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
int x = 0;
|
||||
float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE);
|
||||
__m128 b0 = _mm_set1_ps(beta[0]*scale), b1 = _mm_set1_ps(beta[1]*scale),
|
||||
b2 = _mm_set1_ps(beta[2]*scale), b3 = _mm_set1_ps(beta[3]*scale);
|
||||
if( checkHardwareSupport(CV_CPU_AVX2) )
|
||||
processed += VResizeCubicVec_32s8u_avx2(_src, dst, _beta, width);
|
||||
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128i x0, x1, y0, y1;
|
||||
__m128 s0, s1, f0, f1;
|
||||
x0 = _mm_load_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_load_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_load_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_load_si128((const __m128i*)(S1 + x + 4));
|
||||
if( !processed && checkHardwareSupport(CV_CPU_SSE2) )
|
||||
processed += VResizeCubicVec_32s8u_sse2(_src, dst, _beta, width);
|
||||
|
||||
s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
|
||||
s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_load_si128((const __m128i*)(S2 + x));
|
||||
x1 = _mm_load_si128((const __m128i*)(S2 + x + 4));
|
||||
y0 = _mm_load_si128((const __m128i*)(S3 + x));
|
||||
y1 = _mm_load_si128((const __m128i*)(S3 + x + 4));
|
||||
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_cvtps_epi32(s0);
|
||||
x1 = _mm_cvtps_epi32(s1);
|
||||
|
||||
x0 = _mm_packs_epi32(x0, x1);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128i x0, x1, y0, y1;
|
||||
__m128 s0, s1, f0, f1;
|
||||
x0 = _mm_loadu_si128((const __m128i*)(S0 + x));
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4));
|
||||
y0 = _mm_loadu_si128((const __m128i*)(S1 + x));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4));
|
||||
|
||||
s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0);
|
||||
s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_loadu_si128((const __m128i*)(S2 + x));
|
||||
x1 = _mm_loadu_si128((const __m128i*)(S2 + x + 4));
|
||||
y0 = _mm_loadu_si128((const __m128i*)(S3 + x));
|
||||
y1 = _mm_loadu_si128((const __m128i*)(S3 + x + 4));
|
||||
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3);
|
||||
f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3);
|
||||
s0 = _mm_add_ps(s0, f0);
|
||||
s1 = _mm_add_ps(s1, f1);
|
||||
|
||||
x0 = _mm_cvtps_epi32(s0);
|
||||
x1 = _mm_cvtps_epi32(s1);
|
||||
|
||||
x0 = _mm_packs_epi32(x0, x1);
|
||||
_mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0));
|
||||
}
|
||||
|
||||
return x;
|
||||
return processed;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<int shiftval> struct VResizeCubicVec_32f16
|
||||
template<int shiftval>
|
||||
int VResizeCubicVec_32f16_sse2(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
if( !checkHardwareSupport(CV_CPU_SSE2) )
|
||||
return 0;
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
ushort* dst = (ushort*)_dst;
|
||||
int x = 0;
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
|
||||
b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
|
||||
__m128i preshift = _mm_set1_epi32(shiftval);
|
||||
__m128i postshift = _mm_set1_epi16((short)shiftval);
|
||||
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
ushort* dst = (ushort*)_dst;
|
||||
int x = 0;
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
|
||||
b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
|
||||
__m128i preshift = _mm_set1_epi32(shiftval);
|
||||
__m128i postshift = _mm_set1_epi16((short)shiftval);
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1, s0, s1;
|
||||
__m128i t0, t1;
|
||||
x0 = _mm_load_ps(S0 + x);
|
||||
x1 = _mm_load_ps(S0 + x + 4);
|
||||
y0 = _mm_load_ps(S1 + x);
|
||||
y1 = _mm_load_ps(S1 + x + 4);
|
||||
|
||||
s0 = _mm_mul_ps(x0, b0);
|
||||
s1 = _mm_mul_ps(x1, b0);
|
||||
y0 = _mm_mul_ps(y0, b1);
|
||||
y1 = _mm_mul_ps(y1, b1);
|
||||
s0 = _mm_add_ps(s0, y0);
|
||||
s1 = _mm_add_ps(s1, y1);
|
||||
|
||||
x0 = _mm_load_ps(S2 + x);
|
||||
x1 = _mm_load_ps(S2 + x + 4);
|
||||
y0 = _mm_load_ps(S3 + x);
|
||||
y1 = _mm_load_ps(S3 + x + 4);
|
||||
|
||||
x0 = _mm_mul_ps(x0, b2);
|
||||
x1 = _mm_mul_ps(x1, b2);
|
||||
y0 = _mm_mul_ps(y0, b3);
|
||||
y1 = _mm_mul_ps(y1, b3);
|
||||
s0 = _mm_add_ps(s0, x0);
|
||||
s1 = _mm_add_ps(s1, x1);
|
||||
s0 = _mm_add_ps(s0, y0);
|
||||
s1 = _mm_add_ps(s1, y1);
|
||||
|
||||
t0 = _mm_add_epi32(_mm_cvtps_epi32(s0), preshift);
|
||||
t1 = _mm_add_epi32(_mm_cvtps_epi32(s1), preshift);
|
||||
|
||||
t0 = _mm_add_epi16(_mm_packs_epi32(t0, t1), postshift);
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), t0);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1, s0, s1;
|
||||
@ -832,28 +907,72 @@ template<int shiftval> struct VResizeCubicVec_32f16
|
||||
_mm_storeu_si128( (__m128i*)(dst + x), t0);
|
||||
}
|
||||
|
||||
return x;
|
||||
return x;
|
||||
}
|
||||
|
||||
template<int shiftval> struct VResizeCubicVec_32f16
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
int processed = 0;
|
||||
|
||||
if( checkHardwareSupport(CV_CPU_AVX2) )
|
||||
processed += VResizeCubicVec_32f16_avx2<shiftval>(_src, _dst, _beta, width);
|
||||
|
||||
if( !processed && checkHardwareSupport(CV_CPU_SSE2) )
|
||||
processed += VResizeCubicVec_32f16_sse2<shiftval>(_src, _dst, _beta, width);
|
||||
|
||||
return processed;
|
||||
}
|
||||
};
|
||||
|
||||
typedef VResizeCubicVec_32f16<SHRT_MIN> VResizeCubicVec_32f16u;
|
||||
typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s;
|
||||
|
||||
struct VResizeCubicVec_32f
|
||||
static int VResizeCubicVec_32f_sse(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
if( !checkHardwareSupport(CV_CPU_SSE) )
|
||||
return 0;
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
float* dst = (float*)_dst;
|
||||
int x = 0;
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
|
||||
b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
|
||||
|
||||
const float** src = (const float**)_src;
|
||||
const float* beta = (const float*)_beta;
|
||||
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
|
||||
float* dst = (float*)_dst;
|
||||
int x = 0;
|
||||
__m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]),
|
||||
b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]);
|
||||
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 )
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1, s0, s1;
|
||||
x0 = _mm_load_ps(S0 + x);
|
||||
x1 = _mm_load_ps(S0 + x + 4);
|
||||
y0 = _mm_load_ps(S1 + x);
|
||||
y1 = _mm_load_ps(S1 + x + 4);
|
||||
|
||||
s0 = _mm_mul_ps(x0, b0);
|
||||
s1 = _mm_mul_ps(x1, b0);
|
||||
y0 = _mm_mul_ps(y0, b1);
|
||||
y1 = _mm_mul_ps(y1, b1);
|
||||
s0 = _mm_add_ps(s0, y0);
|
||||
s1 = _mm_add_ps(s1, y1);
|
||||
|
||||
x0 = _mm_load_ps(S2 + x);
|
||||
x1 = _mm_load_ps(S2 + x + 4);
|
||||
y0 = _mm_load_ps(S3 + x);
|
||||
y1 = _mm_load_ps(S3 + x + 4);
|
||||
|
||||
x0 = _mm_mul_ps(x0, b2);
|
||||
x1 = _mm_mul_ps(x1, b2);
|
||||
y0 = _mm_mul_ps(y0, b3);
|
||||
y1 = _mm_mul_ps(y1, b3);
|
||||
s0 = _mm_add_ps(s0, x0);
|
||||
s1 = _mm_add_ps(s1, x1);
|
||||
s0 = _mm_add_ps(s0, y0);
|
||||
s1 = _mm_add_ps(s1, y1);
|
||||
|
||||
_mm_storeu_ps( dst + x, s0);
|
||||
_mm_storeu_ps( dst + x + 4, s1);
|
||||
}
|
||||
else
|
||||
for( ; x <= width - 8; x += 8 )
|
||||
{
|
||||
__m128 x0, x1, y0, y1, s0, s1;
|
||||
@ -887,7 +1006,22 @@ struct VResizeCubicVec_32f
|
||||
_mm_storeu_ps( dst + x + 4, s1);
|
||||
}
|
||||
|
||||
return x;
|
||||
return x;
|
||||
}
|
||||
|
||||
struct VResizeCubicVec_32f
|
||||
{
|
||||
int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const
|
||||
{
|
||||
int processed = 0;
|
||||
|
||||
if( checkHardwareSupport(CV_CPU_AVX) )
|
||||
processed += VResizeCubicVec_32f_avx(_src, _dst, _beta, width);
|
||||
|
||||
if( !processed && checkHardwareSupport(CV_CPU_SSE) )
|
||||
processed += VResizeCubicVec_32f_sse(_src, _dst, _beta, width);
|
||||
|
||||
return processed;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -3002,9 +3002,8 @@ void printVersionInfo(bool useStdOut)
|
||||
#if CV_SSE4_2
|
||||
if (checkHardwareSupport(CV_CPU_SSE4_2)) cpu_features += " sse4.2";
|
||||
#endif
|
||||
#if CV_AVX
|
||||
if (checkHardwareSupport(CV_CPU_AVX)) cpu_features += " avx";
|
||||
#endif
|
||||
if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2";
|
||||
#if CV_NEON
|
||||
cpu_features += " neon"; // NEON is currently not checked at runtime
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user