Merge pull request #813 from bitwangyaoyao:master_fix
This commit is contained in:
		@@ -126,7 +126,8 @@ inline int divUp(int total, int grain)
 | 
			
		||||
/////////////////////// add subtract multiply divide /////////////////////////
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template<typename T>
 | 
			
		||||
void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, String kernelName, const char **kernelString, void *_scalar)
 | 
			
		||||
void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, 
 | 
			
		||||
                    String kernelName, const char **kernelString, void *_scalar, int op_type = 0)
 | 
			
		||||
{
 | 
			
		||||
    if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
 | 
			
		||||
    {
 | 
			
		||||
@@ -182,14 +183,25 @@ void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, String
 | 
			
		||||
        scalar = (T)scalar1;
 | 
			
		||||
        args.push_back( std::make_pair( sizeof(T), (void *)&scalar ));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth);
 | 
			
		||||
    switch(op_type)
 | 
			
		||||
    {
 | 
			
		||||
        case MAT_ADD:
 | 
			
		||||
            openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth, "-D ARITHM_ADD");
 | 
			
		||||
            break;
 | 
			
		||||
        case MAT_SUB:
 | 
			
		||||
            openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth, "-D ARITHM_SUB");
 | 
			
		||||
            break;
 | 
			
		||||
        default:
 | 
			
		||||
            openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, String kernelName, const char **kernelString)
 | 
			
		||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, 
 | 
			
		||||
                           String kernelName, const char **kernelString, int op_type = 0)
 | 
			
		||||
{
 | 
			
		||||
    arithmetic_run<char>(src1, src2, dst, kernelName, kernelString, (void *)NULL);
 | 
			
		||||
    arithmetic_run<char>(src1, src2, dst, kernelName, kernelString, (void *)NULL, op_type);
 | 
			
		||||
}
 | 
			
		||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask, String kernelName, const char **kernelString)
 | 
			
		||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask, 
 | 
			
		||||
                           String kernelName, const char **kernelString, int op_type = 0)
 | 
			
		||||
{
 | 
			
		||||
    if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
 | 
			
		||||
    {
 | 
			
		||||
@@ -244,24 +256,34 @@ static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst,
 | 
			
		||||
    args.push_back( std::make_pair( sizeof(cl_int), (void *)&cols ));
 | 
			
		||||
    args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_step1 ));
 | 
			
		||||
 | 
			
		||||
    openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth);
 | 
			
		||||
    switch (op_type)
 | 
			
		||||
    {
 | 
			
		||||
        case MAT_ADD:
 | 
			
		||||
            openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_ADD");
 | 
			
		||||
            break;
 | 
			
		||||
        case MAT_SUB:
 | 
			
		||||
            openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_SUB");
 | 
			
		||||
            break;
 | 
			
		||||
        default:
 | 
			
		||||
            openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
void cv::ocl::add(const oclMat &src1, const oclMat &src2, oclMat &dst)
 | 
			
		||||
{
 | 
			
		||||
    arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add);
 | 
			
		||||
    arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add, MAT_ADD);
 | 
			
		||||
}
 | 
			
		||||
void cv::ocl::add(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask)
 | 
			
		||||
{
 | 
			
		||||
    arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add);
 | 
			
		||||
    arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add, MAT_ADD);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cv::ocl::subtract(const oclMat &src1, const oclMat &src2, oclMat &dst)
 | 
			
		||||
{
 | 
			
		||||
    arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add);
 | 
			
		||||
    arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add, MAT_SUB);
 | 
			
		||||
}
 | 
			
		||||
void cv::ocl::subtract(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask)
 | 
			
		||||
{
 | 
			
		||||
    arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add);
 | 
			
		||||
    arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add, MAT_SUB);
 | 
			
		||||
}
 | 
			
		||||
typedef void (*MulDivFunc)(const oclMat &src1, const oclMat &src2, oclMat &dst, String kernelName,
 | 
			
		||||
                           const char **kernelString, void *scalar);
 | 
			
		||||
@@ -347,12 +369,9 @@ void arithmetic_scalar_run(const oclMat &src1, const Scalar &src2, oclMat &dst,
 | 
			
		||||
    args.push_back( std::make_pair( sizeof(cl_int) , (void *)&cols ));
 | 
			
		||||
    args.push_back( std::make_pair( sizeof(cl_int) , (void *)&dst_step1 ));
 | 
			
		||||
    if(isMatSubScalar != 0)
 | 
			
		||||
    {
 | 
			
		||||
        isMatSubScalar = isMatSubScalar > 0 ? 1 : 0;
 | 
			
		||||
        args.push_back( std::make_pair( sizeof(cl_int) , (void *)&isMatSubScalar));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth);
 | 
			
		||||
        openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_SUB");
 | 
			
		||||
    else
 | 
			
		||||
        openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_ADD");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void arithmetic_scalar_run(const oclMat &src, oclMat &dst, String kernelName, const char **kernelString, double scalar)
 | 
			
		||||
 
 | 
			
		||||
@@ -49,7 +49,12 @@
 | 
			
		||||
#elif defined (cl_amd_fp64)
 | 
			
		||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef ARITHM_ADD
 | 
			
		||||
  #define ARITHM_OP(A,B) ((A)+(B))
 | 
			
		||||
#elif defined ARITHM_SUB
 | 
			
		||||
  #define ARITHM_OP(A,B) ((A)-(B))
 | 
			
		||||
#endif
 | 
			
		||||
/**************************************add with scalar without mask**************************************/
 | 
			
		||||
__kernel void arithm_s_add_C1_D0 (__global   uchar *src1, int src1_step, int src1_offset,
 | 
			
		||||
@@ -62,7 +67,7 @@ __kernel void arithm_s_add_C1_D0 (__global   uchar *src1, int src1_step, int src
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 2;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -83,7 +88,7 @@ __kernel void arithm_s_add_C1_D0 (__global   uchar *src1, int src1_step, int src
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        uchar4 data = *((__global uchar4 *)(dst + dst_index));
 | 
			
		||||
        int4 tmp = convert_int4_sat(src1_data) + src2_data;
 | 
			
		||||
        int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
 | 
			
		||||
        uchar4 tmp_data = convert_uchar4_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
 | 
			
		||||
@@ -105,7 +110,7 @@ __kernel void arithm_s_add_C1_D2 (__global   ushort *src1, int src1_step, int sr
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -120,7 +125,7 @@ __kernel void arithm_s_add_C1_D2 (__global   ushort *src1, int src1_step, int sr
 | 
			
		||||
        int2 src2_data = (int2)(src2.x, src2.x);
 | 
			
		||||
 | 
			
		||||
        ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
 | 
			
		||||
        int2    tmp = convert_int2_sat(src1_data) + src2_data;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
 | 
			
		||||
        ushort2 tmp_data = convert_ushort2_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.x = (dst_index + 0 >= dst_start) ? tmp_data.x : data.x;
 | 
			
		||||
@@ -140,7 +145,7 @@ __kernel void arithm_s_add_C1_D3 (__global   short *src1, int src1_step, int src
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -155,7 +160,7 @@ __kernel void arithm_s_add_C1_D3 (__global   short *src1, int src1_step, int src
 | 
			
		||||
        int2 src2_data = (int2)(src2.x, src2.x);
 | 
			
		||||
        short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2    tmp = convert_int2_sat(src1_data) + src2_data;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
 | 
			
		||||
        short2 tmp_data = convert_short2_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.x = (dst_index + 0 >= dst_start) ? tmp_data.x : data.x;
 | 
			
		||||
@@ -181,7 +186,7 @@ __kernel void arithm_s_add_C1_D4 (__global   int *src1, int src1_step, int src1_
 | 
			
		||||
        int src_data2 = src2.x;
 | 
			
		||||
        int dst_data  = *((__global int *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int data = convert_int_sat((long)src_data1 + (long)src_data2);
 | 
			
		||||
        int data = convert_int_sat(ARITHM_OP((long)src_data1, (long)src_data2));
 | 
			
		||||
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -203,7 +208,7 @@ __kernel void arithm_s_add_C1_D5 (__global   float *src1, int src1_step, int src
 | 
			
		||||
        float src_data2 = src2.x;
 | 
			
		||||
        float dst_data  = *((__global float *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        float data = src_data1 + src_data2;
 | 
			
		||||
        float data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -227,7 +232,7 @@ __kernel void arithm_s_add_C1_D6 (__global   double *src1, int src1_step, int sr
 | 
			
		||||
        double src2_data = src2.x;
 | 
			
		||||
        double dst_data  = *((__global double *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        double data = src_data1 + src2_data;
 | 
			
		||||
        double data = ARITHM_OP(src_data1, src2_data);
 | 
			
		||||
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -245,7 +250,7 @@ __kernel void arithm_s_add_C2_D0 (__global   uchar *src1, int src1_step, int src
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -260,7 +265,7 @@ __kernel void arithm_s_add_C2_D0 (__global   uchar *src1, int src1_step, int src
 | 
			
		||||
        int4 src2_data = (int4)(src2.x, src2.y, src2.x, src2.y);
 | 
			
		||||
 | 
			
		||||
        uchar4 data = *((__global uchar4 *)(dst + dst_index));
 | 
			
		||||
        int4 tmp = convert_int4_sat(src1_data) + src2_data;
 | 
			
		||||
        int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
 | 
			
		||||
        uchar4 tmp_data = convert_uchar4_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.xy = (dst_index + 0 >= dst_start) ? tmp_data.xy : data.xy;
 | 
			
		||||
@@ -286,7 +291,7 @@ __kernel void arithm_s_add_C2_D2 (__global   ushort *src1, int src1_step, int sr
 | 
			
		||||
        int2 src_data2 = (int2)(src2.x, src2.y);
 | 
			
		||||
        ushort2 dst_data  = *((__global ushort2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2    tmp = convert_int2_sat(src_data1) + src_data2;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
 | 
			
		||||
        ushort2 data = convert_ushort2_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -309,7 +314,7 @@ __kernel void arithm_s_add_C2_D3 (__global   short *src1, int src1_step, int src
 | 
			
		||||
        int2 src_data2 = (int2)(src2.x, src2.y);
 | 
			
		||||
        short2 dst_data  = *((__global short2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2    tmp = convert_int2_sat(src_data1) + src_data2;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
 | 
			
		||||
        short2 data = convert_short2_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -332,7 +337,7 @@ __kernel void arithm_s_add_C2_D4 (__global   int *src1, int src1_step, int src1_
 | 
			
		||||
        int2 src_data2 = (int2)(src2.x, src2.y);
 | 
			
		||||
        int2 dst_data  = *((__global int2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2 data = convert_int2_sat(convert_long2_sat(src_data1) + convert_long2_sat(src_data2));
 | 
			
		||||
        int2 data = convert_int2_sat(ARITHM_OP(convert_long2_sat(src_data1), convert_long2_sat(src_data2)));
 | 
			
		||||
        *((__global int2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -353,7 +358,7 @@ __kernel void arithm_s_add_C2_D5 (__global   float *src1, int src1_step, int src
 | 
			
		||||
        float2 src_data2 = (float2)(src2.x, src2.y);
 | 
			
		||||
        float2 dst_data  = *((__global float2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        float2 data = src_data1 + src_data2;
 | 
			
		||||
        float2 data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
        *((__global float2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -376,280 +381,13 @@ __kernel void arithm_s_add_C2_D6 (__global   double *src1, int src1_step, int sr
 | 
			
		||||
        double2 src_data2 = (double2)(src2.x, src2.y);
 | 
			
		||||
        double2 dst_data  = *((__global double2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        double2 data = src_data1 + src_data2;
 | 
			
		||||
        double2 data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
 | 
			
		||||
        *((__global double2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
__kernel void arithm_s_add_C3_D0 (__global   uchar *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   uchar *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 2;
 | 
			
		||||
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
 | 
			
		||||
 | 
			
		||||
        int dst_start  = mad24(y, dst_step, dst_offset);
 | 
			
		||||
        int dst_end    = mad24(y, dst_step, dst_offset + dst_step1);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
 | 
			
		||||
 | 
			
		||||
        uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
 | 
			
		||||
        uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
 | 
			
		||||
        uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
 | 
			
		||||
 | 
			
		||||
        int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x);
 | 
			
		||||
        int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y);
 | 
			
		||||
        int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z);
 | 
			
		||||
 | 
			
		||||
        uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
 | 
			
		||||
        uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
 | 
			
		||||
        uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        uchar4 tmp_data_0 = convert_uchar4_sat(convert_int4_sat(src1_data_0) + src2_data_0);
 | 
			
		||||
        uchar4 tmp_data_1 = convert_uchar4_sat(convert_int4_sat(src1_data_1) + src2_data_1);
 | 
			
		||||
        uchar4 tmp_data_2 = convert_uchar4_sat(convert_int4_sat(src1_data_2) + src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
 | 
			
		||||
        data_0.w   = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
 | 
			
		||||
                     ? tmp_data_0.w : data_0.w;
 | 
			
		||||
 | 
			
		||||
        data_1.xy  = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
 | 
			
		||||
                     ? tmp_data_1.xy : data_1.xy;
 | 
			
		||||
        data_1.zw  = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                     ? tmp_data_1.zw : data_1.zw;
 | 
			
		||||
 | 
			
		||||
        data_2.x   = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                     ? tmp_data_2.x : data_2.x;
 | 
			
		||||
        data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
 | 
			
		||||
                     ? tmp_data_2.yzw : data_2.yzw;
 | 
			
		||||
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index + 0)) = data_0;
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index + 4)) = data_1;
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index + 8)) = data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_C3_D2 (__global   ushort *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   ushort *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
 | 
			
		||||
 | 
			
		||||
        int dst_start  = mad24(y, dst_step, dst_offset);
 | 
			
		||||
        int dst_end    = mad24(y, dst_step, dst_offset + dst_step1);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
 | 
			
		||||
 | 
			
		||||
        ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        int2 src2_data_0 = (int2)(src2.x, src2.y);
 | 
			
		||||
        int2 src2_data_1 = (int2)(src2.z, src2.x);
 | 
			
		||||
        int2 src2_data_2 = (int2)(src2.y, src2.z);
 | 
			
		||||
 | 
			
		||||
        ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        ushort2 tmp_data_0 = convert_ushort2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
 | 
			
		||||
        ushort2 tmp_data_1 = convert_ushort2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
 | 
			
		||||
        ushort2 tmp_data_2 = convert_ushort2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
 | 
			
		||||
 | 
			
		||||
        data_1.x  = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.x : data_1.x;
 | 
			
		||||
        data_1.y  = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.y : data_1.y;
 | 
			
		||||
 | 
			
		||||
        data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_2.xy : data_2.xy;
 | 
			
		||||
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_C3_D3 (__global   short *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   short *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
 | 
			
		||||
 | 
			
		||||
        int dst_start  = mad24(y, dst_step, dst_offset);
 | 
			
		||||
        int dst_end    = mad24(y, dst_step, dst_offset + dst_step1);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
 | 
			
		||||
 | 
			
		||||
        short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        int2 src2_data_0 = (int2)(src2.x, src2.y);
 | 
			
		||||
        int2 src2_data_1 = (int2)(src2.z, src2.x);
 | 
			
		||||
        int2 src2_data_2 = (int2)(src2.y, src2.z);
 | 
			
		||||
 | 
			
		||||
        short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        short2 tmp_data_0 = convert_short2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
 | 
			
		||||
        short2 tmp_data_1 = convert_short2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
 | 
			
		||||
        short2 tmp_data_2 = convert_short2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
 | 
			
		||||
 | 
			
		||||
        data_1.x  = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.x : data_1.x;
 | 
			
		||||
        data_1.y  = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.y : data_1.y;
 | 
			
		||||
 | 
			
		||||
        data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_2.xy : data_2.xy;
 | 
			
		||||
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_C3_D4 (__global   int *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   int *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 12));
 | 
			
		||||
 | 
			
		||||
        int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        int src2_data_0 = src2.x;
 | 
			
		||||
        int src2_data_1 = src2.y;
 | 
			
		||||
        int src2_data_2 = src2.z;
 | 
			
		||||
 | 
			
		||||
        int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        int tmp_data_0 = convert_int_sat((long)src1_data_0 + (long)src2_data_0);
 | 
			
		||||
        int tmp_data_1 = convert_int_sat((long)src1_data_1 + (long)src2_data_1);
 | 
			
		||||
        int tmp_data_2 = convert_int_sat((long)src1_data_2 + (long)src2_data_2);
 | 
			
		||||
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index + 0))= tmp_data_0;
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index + 4))= tmp_data_1;
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index + 8))= tmp_data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_C3_D5 (__global   float *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   float *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  float4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 12));
 | 
			
		||||
 | 
			
		||||
        float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        float src2_data_0 = src2.x;
 | 
			
		||||
        float src2_data_1 = src2.y;
 | 
			
		||||
        float src2_data_2 = src2.z;
 | 
			
		||||
 | 
			
		||||
        float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        float tmp_data_0 = src1_data_0 + src2_data_0;
 | 
			
		||||
        float tmp_data_1 = src1_data_1 + src2_data_1;
 | 
			
		||||
        float tmp_data_2 = src1_data_2 + src2_data_2;
 | 
			
		||||
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index + 0))= tmp_data_0;
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index + 4))= tmp_data_1;
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index + 8))= tmp_data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if defined (DOUBLE_SUPPORT)
 | 
			
		||||
__kernel void arithm_s_add_C3_D6 (__global   double *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   double *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  double4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 24));
 | 
			
		||||
 | 
			
		||||
        double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
 | 
			
		||||
        double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
 | 
			
		||||
        double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
 | 
			
		||||
 | 
			
		||||
        double src2_data_0 = src2.x;
 | 
			
		||||
        double src2_data_1 = src2.y;
 | 
			
		||||
        double src2_data_2 = src2.z;
 | 
			
		||||
 | 
			
		||||
        double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
 | 
			
		||||
        double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
 | 
			
		||||
        double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
 | 
			
		||||
 | 
			
		||||
        double tmp_data_0 = src1_data_0 + src2_data_0;
 | 
			
		||||
        double tmp_data_1 = src1_data_1 + src2_data_1;
 | 
			
		||||
        double tmp_data_2 = src1_data_2 + src2_data_2;
 | 
			
		||||
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index + 0 ))= tmp_data_0;
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index + 8 ))= tmp_data_1;
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index + 16))= tmp_data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
__kernel void arithm_s_add_C4_D0 (__global   uchar *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                  __global   uchar *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                  int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
@@ -665,7 +403,7 @@ __kernel void arithm_s_add_C4_D0 (__global   uchar *src1, int src1_step, int src
 | 
			
		||||
 | 
			
		||||
        uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index));
 | 
			
		||||
 | 
			
		||||
        uchar4 data = convert_uchar4_sat(convert_int4_sat(src_data1) + src2);
 | 
			
		||||
        uchar4 data = convert_uchar4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
 | 
			
		||||
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -685,7 +423,7 @@ __kernel void arithm_s_add_C4_D2 (__global   ushort *src1, int src1_step, int sr
 | 
			
		||||
 | 
			
		||||
        ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
 | 
			
		||||
        ushort4 data = convert_ushort4_sat(convert_int4_sat(src_data1) + src2);
 | 
			
		||||
        ushort4 data = convert_ushort4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
 | 
			
		||||
 | 
			
		||||
        *((__global ushort4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -705,7 +443,7 @@ __kernel void arithm_s_add_C4_D3 (__global   short *src1, int src1_step, int src
 | 
			
		||||
 | 
			
		||||
        short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
 | 
			
		||||
        short4 data = convert_short4_sat(convert_int4_sat(src_data1) + src2);
 | 
			
		||||
        short4 data = convert_short4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
 | 
			
		||||
 | 
			
		||||
        *((__global short4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -725,7 +463,7 @@ __kernel void arithm_s_add_C4_D4 (__global   int *src1, int src1_step, int src1_
 | 
			
		||||
 | 
			
		||||
        int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
 | 
			
		||||
        int4 data = convert_int4_sat(convert_long4_sat(src_data1) + convert_long4_sat(src2));
 | 
			
		||||
        int4 data = convert_int4_sat(ARITHM_OP(convert_long4_sat(src_data1), convert_long4_sat(src2)));
 | 
			
		||||
 | 
			
		||||
        *((__global int4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -745,7 +483,7 @@ __kernel void arithm_s_add_C4_D5 (__global   float *src1, int src1_step, int src
 | 
			
		||||
 | 
			
		||||
        float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
 | 
			
		||||
        float4 data = src_data1 + src2;
 | 
			
		||||
        float4 data = ARITHM_OP(src_data1, src2);
 | 
			
		||||
 | 
			
		||||
        *((__global float4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
@@ -767,7 +505,7 @@ __kernel void arithm_s_add_C4_D6 (__global   double *src1, int src1_step, int sr
 | 
			
		||||
 | 
			
		||||
        double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
 | 
			
		||||
        double4 data = src_data1 + src2;
 | 
			
		||||
        double4 data = ARITHM_OP(src_data1, src2);
 | 
			
		||||
 | 
			
		||||
        *((__global double4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -51,6 +51,11 @@
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef ARITHM_ADD
 | 
			
		||||
  #define ARITHM_OP(A,B) ((A)+(B))
 | 
			
		||||
#elif defined ARITHM_SUB
 | 
			
		||||
  #define ARITHM_OP(A,B) ((A)-(B))
 | 
			
		||||
#endif
 | 
			
		||||
/**************************************add with scalar with mask**************************************/
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C1_D0 (__global   uchar *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   uchar *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
@@ -64,7 +69,7 @@ __kernel void arithm_s_add_with_mask_C1_D0 (__global   uchar *src1, int src1_ste
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 2;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -94,7 +99,7 @@ __kernel void arithm_s_add_with_mask_C1_D0 (__global   uchar *src1, int src1_ste
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        uchar4 data = *((__global uchar4 *)(dst + dst_index));
 | 
			
		||||
        int4 tmp = convert_int4_sat(src1_data) + src2_data;
 | 
			
		||||
        int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
 | 
			
		||||
        uchar4 tmp_data = convert_uchar4_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
 | 
			
		||||
@@ -117,7 +122,7 @@ __kernel void arithm_s_add_with_mask_C1_D2 (__global   ushort *src1, int src1_st
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -134,7 +139,7 @@ __kernel void arithm_s_add_with_mask_C1_D2 (__global   ushort *src1, int src1_st
 | 
			
		||||
        uchar2  mask_data = vload2(0, mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
 | 
			
		||||
        int2    tmp = convert_int2_sat(src1_data) + src2_data;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
 | 
			
		||||
        ushort2 tmp_data = convert_ushort2_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
 | 
			
		||||
@@ -155,7 +160,7 @@ __kernel void arithm_s_add_with_mask_C1_D3 (__global   short *src1, int src1_ste
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -172,7 +177,7 @@ __kernel void arithm_s_add_with_mask_C1_D3 (__global   short *src1, int src1_ste
 | 
			
		||||
        uchar2  mask_data = vload2(0, mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
 | 
			
		||||
        int2    tmp = convert_int2_sat(src1_data) + src2_data;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
 | 
			
		||||
        short2 tmp_data = convert_short2_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
 | 
			
		||||
@@ -202,7 +207,7 @@ __kernel void arithm_s_add_with_mask_C1_D4 (__global   int   *src1, int src1_ste
 | 
			
		||||
        int src_data2 = src2.x;
 | 
			
		||||
        int dst_data  = *((__global int *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int data = convert_int_sat((long)src_data1 + (long)src_data2);
 | 
			
		||||
        int data = convert_int_sat(ARITHM_OP((long)src_data1, (long)src_data2));
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -230,7 +235,7 @@ __kernel void arithm_s_add_with_mask_C1_D5 (__global   float   *src1, int src1_s
 | 
			
		||||
        float src_data2 = src2.x;
 | 
			
		||||
        float dst_data  = *((__global float *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        float data = src_data1 + src_data2;
 | 
			
		||||
        float data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -260,7 +265,7 @@ __kernel void arithm_s_add_with_mask_C1_D6 (__global   double   *src1, int src1_
 | 
			
		||||
        double src_data2 = src2.x;
 | 
			
		||||
        double dst_data  = *((__global double *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        double data = src_data1 + src_data2;
 | 
			
		||||
        double data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -279,7 +284,7 @@ __kernel void arithm_s_add_with_mask_C2_D0 (__global   uchar *src1, int src1_ste
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
        
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
@@ -296,7 +301,7 @@ __kernel void arithm_s_add_with_mask_C2_D0 (__global   uchar *src1, int src1_ste
 | 
			
		||||
        uchar2 mask_data = vload2(0, mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        uchar4 data = *((__global uchar4 *)(dst + dst_index));
 | 
			
		||||
        int4  tmp = convert_int4_sat(src1_data) + src2_data;
 | 
			
		||||
        int4  tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
 | 
			
		||||
        uchar4 tmp_data = convert_uchar4_sat(tmp);
 | 
			
		||||
 | 
			
		||||
        data.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.xy : data.xy;
 | 
			
		||||
@@ -326,7 +331,7 @@ __kernel void arithm_s_add_with_mask_C2_D2 (__global   ushort *src1, int src1_st
 | 
			
		||||
        int2 src_data2 = (int2)(src2.x, src2.y);
 | 
			
		||||
        ushort2 dst_data  = *((__global ushort2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2    tmp = convert_int2_sat(src_data1) + src_data2;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
 | 
			
		||||
        ushort2 data = convert_ushort2_sat(tmp);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
@@ -354,7 +359,7 @@ __kernel void arithm_s_add_with_mask_C2_D3 (__global   short *src1, int src1_ste
 | 
			
		||||
        int2 src_data2 = (int2)(src2.x, src2.y);
 | 
			
		||||
        short2 dst_data  = *((__global short2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2    tmp = convert_int2_sat(src_data1) + src_data2;
 | 
			
		||||
        int2    tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
 | 
			
		||||
        short2 data = convert_short2_sat(tmp);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
@@ -382,7 +387,7 @@ __kernel void arithm_s_add_with_mask_C2_D4 (__global   int *src1, int src1_step,
 | 
			
		||||
        int2 src_data2 = (int2)(src2.x, src2.y);
 | 
			
		||||
        int2 dst_data  = *((__global int2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int2 data = convert_int2_sat(convert_long2_sat(src_data1) + convert_long2_sat(src_data2));
 | 
			
		||||
        int2 data = convert_int2_sat(ARITHM_OP(convert_long2_sat(src_data1), convert_long2_sat(src_data2)));
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global int2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -409,7 +414,7 @@ __kernel void arithm_s_add_with_mask_C2_D5 (__global   float *src1, int src1_ste
 | 
			
		||||
        float2 src_data2 = (float2)(src2.x, src2.y);
 | 
			
		||||
        float2 dst_data  = *((__global float2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        float2 data = src_data1 + src_data2;
 | 
			
		||||
        float2 data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global float2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -438,7 +443,7 @@ __kernel void arithm_s_add_with_mask_C2_D6 (__global   double *src1, int src1_st
 | 
			
		||||
        double2 src_data2 = (double2)(src2.x, src2.y);
 | 
			
		||||
        double2 dst_data  = *((__global double2 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        double2 data = src_data1 + src_data2;
 | 
			
		||||
        double2 data = ARITHM_OP(src_data1, src_data2);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global double2 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -446,317 +451,11 @@ __kernel void arithm_s_add_with_mask_C2_D6 (__global   double *src1, int src1_st
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C3_D0 (__global   uchar *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   uchar *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 2;
 | 
			
		||||
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
 | 
			
		||||
        int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
 | 
			
		||||
 | 
			
		||||
        int dst_start  = mad24(y, dst_step, dst_offset);
 | 
			
		||||
        int dst_end    = mad24(y, dst_step, dst_offset + dst_step1);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
 | 
			
		||||
 | 
			
		||||
        uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
 | 
			
		||||
        uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
 | 
			
		||||
        uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
 | 
			
		||||
 | 
			
		||||
        int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x);
 | 
			
		||||
        int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y);
 | 
			
		||||
        int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z);
 | 
			
		||||
 | 
			
		||||
        uchar4 mask_data = vload4(0, mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
 | 
			
		||||
        uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
 | 
			
		||||
        uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        uchar4 tmp_data_0 = convert_uchar4_sat(convert_int4_sat(src1_data_0) + src2_data_0);
 | 
			
		||||
        uchar4 tmp_data_1 = convert_uchar4_sat(convert_int4_sat(src1_data_1) + src2_data_1);
 | 
			
		||||
        uchar4 tmp_data_2 = convert_uchar4_sat(convert_int4_sat(src1_data_2) + src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
 | 
			
		||||
        data_0.w   = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
 | 
			
		||||
                     ? tmp_data_0.w : data_0.w;
 | 
			
		||||
 | 
			
		||||
        data_1.xy  = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
 | 
			
		||||
                     ? tmp_data_1.xy : data_1.xy;
 | 
			
		||||
        data_1.zw  = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                     ? tmp_data_1.zw : data_1.zw;
 | 
			
		||||
 | 
			
		||||
        data_2.x   = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                     ? tmp_data_2.x : data_2.x;
 | 
			
		||||
        data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
 | 
			
		||||
                     ? tmp_data_2.yzw : data_2.yzw;
 | 
			
		||||
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index + 0)) = data_0;
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index + 4)) = data_1;
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index + 8)) = data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C3_D2 (__global   ushort *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   ushort *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
 | 
			
		||||
        int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
 | 
			
		||||
 | 
			
		||||
        int dst_start  = mad24(y, dst_step, dst_offset);
 | 
			
		||||
        int dst_end    = mad24(y, dst_step, dst_offset + dst_step1);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
 | 
			
		||||
 | 
			
		||||
        ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        int2 src2_data_0 = (int2)(src2.x, src2.y);
 | 
			
		||||
        int2 src2_data_1 = (int2)(src2.z, src2.x);
 | 
			
		||||
        int2 src2_data_2 = (int2)(src2.y, src2.z);
 | 
			
		||||
 | 
			
		||||
        uchar2 mask_data = vload2(0, mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        ushort2 tmp_data_0 = convert_ushort2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
 | 
			
		||||
        ushort2 tmp_data_1 = convert_ushort2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
 | 
			
		||||
        ushort2 tmp_data_2 = convert_ushort2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
 | 
			
		||||
 | 
			
		||||
        data_1.x  = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.x : data_1.x;
 | 
			
		||||
        data_1.y  = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.y : data_1.y;
 | 
			
		||||
 | 
			
		||||
        data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_2.xy : data_2.xy;
 | 
			
		||||
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
 | 
			
		||||
        *((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C3_D3 (__global   short *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   short *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        x = x << 1;
 | 
			
		||||
 | 
			
		||||
#ifdef dst_align
 | 
			
		||||
#undef dst_align
 | 
			
		||||
#endif
 | 
			
		||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
 | 
			
		||||
        int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
 | 
			
		||||
 | 
			
		||||
        int dst_start  = mad24(y, dst_step, dst_offset);
 | 
			
		||||
        int dst_end    = mad24(y, dst_step, dst_offset + dst_step1);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
 | 
			
		||||
 | 
			
		||||
        short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        int2 src2_data_0 = (int2)(src2.x, src2.y);
 | 
			
		||||
        int2 src2_data_1 = (int2)(src2.z, src2.x);
 | 
			
		||||
        int2 src2_data_2 = (int2)(src2.y, src2.z);
 | 
			
		||||
 | 
			
		||||
        uchar2 mask_data = vload2(0, mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        short2 tmp_data_0 = convert_short2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
 | 
			
		||||
        short2 tmp_data_1 = convert_short2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
 | 
			
		||||
        short2 tmp_data_2 = convert_short2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
 | 
			
		||||
 | 
			
		||||
        data_1.x  = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.x : data_1.x;
 | 
			
		||||
        data_1.y  = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_1.y : data_1.y;
 | 
			
		||||
 | 
			
		||||
        data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
 | 
			
		||||
                    ? tmp_data_2.xy : data_2.xy;
 | 
			
		||||
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
 | 
			
		||||
        *((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C3_D4 (__global   int *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   int *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
 | 
			
		||||
        int mask_index = mad24(y, mask_step, x + mask_offset);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 12));
 | 
			
		||||
 | 
			
		||||
        int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        int src2_data_0 = src2.x;
 | 
			
		||||
        int src2_data_1 = src2.y;
 | 
			
		||||
        int src2_data_2 = src2.z;
 | 
			
		||||
 | 
			
		||||
        uchar mask_data = * (mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        int tmp_data_0 = convert_int_sat((long)src1_data_0 + (long)src2_data_0);
 | 
			
		||||
        int tmp_data_1 = convert_int_sat((long)src1_data_1 + (long)src2_data_1);
 | 
			
		||||
        int tmp_data_2 = convert_int_sat((long)src1_data_2 + (long)src2_data_2);
 | 
			
		||||
 | 
			
		||||
        data_0 = mask_data ? tmp_data_0 : data_0;
 | 
			
		||||
        data_1 = mask_data ? tmp_data_1 : data_1;
 | 
			
		||||
        data_2 = mask_data ? tmp_data_2 : data_2;
 | 
			
		||||
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index + 0))= data_0;
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index + 4))= data_1;
 | 
			
		||||
        *((__global int *)((__global char *)dst + dst_index + 8))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C3_D5 (__global   float *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   float *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            float4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
 | 
			
		||||
        int mask_index = mad24(y, mask_step, x + mask_offset);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 12));
 | 
			
		||||
 | 
			
		||||
        float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
 | 
			
		||||
        float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
 | 
			
		||||
        float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
 | 
			
		||||
 | 
			
		||||
        float src2_data_0 = src2.x;
 | 
			
		||||
        float src2_data_1 = src2.y;
 | 
			
		||||
        float src2_data_2 = src2.z;
 | 
			
		||||
 | 
			
		||||
        uchar mask_data = * (mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
 | 
			
		||||
        float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
 | 
			
		||||
        float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
 | 
			
		||||
 | 
			
		||||
        float tmp_data_0 = src1_data_0 + src2_data_0;
 | 
			
		||||
        float tmp_data_1 = src1_data_1 + src2_data_1;
 | 
			
		||||
        float tmp_data_2 = src1_data_2 + src2_data_2;
 | 
			
		||||
 | 
			
		||||
        data_0 = mask_data ? tmp_data_0 : data_0;
 | 
			
		||||
        data_1 = mask_data ? tmp_data_1 : data_1;
 | 
			
		||||
        data_2 = mask_data ? tmp_data_2 : data_2;
 | 
			
		||||
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index + 0))= data_0;
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index + 4))= data_1;
 | 
			
		||||
        *((__global float *)((__global char *)dst + dst_index + 8))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if defined (DOUBLE_SUPPORT)
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C3_D6 (__global   double *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   double *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar  *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            double4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
    if (x < cols && y < rows)
 | 
			
		||||
    {
 | 
			
		||||
        int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
 | 
			
		||||
        int mask_index = mad24(y, mask_step, x + mask_offset);
 | 
			
		||||
        int dst_index  = mad24(y, dst_step, dst_offset + (x * 24));
 | 
			
		||||
 | 
			
		||||
        double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
 | 
			
		||||
        double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
 | 
			
		||||
        double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
 | 
			
		||||
 | 
			
		||||
        double src2_data_0 = src2.x;
 | 
			
		||||
        double src2_data_1 = src2.y;
 | 
			
		||||
        double src2_data_2 = src2.z;
 | 
			
		||||
 | 
			
		||||
        uchar mask_data = * (mask + mask_index);
 | 
			
		||||
 | 
			
		||||
        double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
 | 
			
		||||
        double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
 | 
			
		||||
        double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
 | 
			
		||||
 | 
			
		||||
        double tmp_data_0 = src1_data_0 + src2_data_0;
 | 
			
		||||
        double tmp_data_1 = src1_data_1 + src2_data_1;
 | 
			
		||||
        double tmp_data_2 = src1_data_2 + src2_data_2;
 | 
			
		||||
 | 
			
		||||
        data_0 = mask_data ? tmp_data_0 : data_0;
 | 
			
		||||
        data_1 = mask_data ? tmp_data_1 : data_1;
 | 
			
		||||
        data_2 = mask_data ? tmp_data_2 : data_2;
 | 
			
		||||
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index + 0 ))= data_0;
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index + 8 ))= data_1;
 | 
			
		||||
        *((__global double *)((__global char *)dst + dst_index + 16))= data_2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
__kernel void arithm_s_add_with_mask_C4_D0 (__global   uchar *src1, int src1_step, int src1_offset,
 | 
			
		||||
                                            __global   uchar *dst,  int dst_step,  int dst_offset,
 | 
			
		||||
                                            __global   uchar *mask, int mask_step, int mask_offset,
 | 
			
		||||
                                            int4 src2, int rows, int cols, int dst_step1)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
    int x = get_global_id(0);
 | 
			
		||||
    int y = get_global_id(1);
 | 
			
		||||
 | 
			
		||||
@@ -771,7 +470,7 @@ __kernel void arithm_s_add_with_mask_C4_D0 (__global   uchar *src1, int src1_ste
 | 
			
		||||
        uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index));
 | 
			
		||||
        uchar4 dst_data  = *((__global uchar4 *)(dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        uchar4 data = convert_uchar4_sat(convert_int4_sat(src_data1) + src2);
 | 
			
		||||
        uchar4 data = convert_uchar4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global uchar4 *)(dst + dst_index)) = data;
 | 
			
		||||
@@ -797,7 +496,7 @@ __kernel void arithm_s_add_with_mask_C4_D2 (__global   ushort *src1, int src1_st
 | 
			
		||||
        ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
        ushort4 dst_data  = *((__global ushort4 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        ushort4 data = convert_ushort4_sat(convert_int4_sat(src_data1) + src2);
 | 
			
		||||
        ushort4 data = convert_ushort4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global ushort4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -823,7 +522,7 @@ __kernel void arithm_s_add_with_mask_C4_D3 (__global   short *src1, int src1_ste
 | 
			
		||||
        short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
        short4 dst_data  = *((__global short4 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        short4 data = convert_short4_sat(convert_int4_sat(src_data1) + src2);
 | 
			
		||||
        short4 data = convert_short4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global short4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -849,7 +548,7 @@ __kernel void arithm_s_add_with_mask_C4_D4 (__global   int *src1, int src1_step,
 | 
			
		||||
        int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
        int4 dst_data  = *((__global int4 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        int4 data = convert_int4_sat(convert_long4_sat(src_data1) + convert_long4_sat(src2));
 | 
			
		||||
        int4 data = convert_int4_sat(ARITHM_OP(convert_long4_sat(src_data1), convert_long4_sat(src2)));
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global int4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -875,7 +574,7 @@ __kernel void arithm_s_add_with_mask_C4_D5 (__global   float *src1, int src1_ste
 | 
			
		||||
        float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
        float4 dst_data  = *((__global float4 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        float4 data = src_data1 + src2;
 | 
			
		||||
        float4 data = ARITHM_OP(src_data1, src2);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global float4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
@@ -903,7 +602,7 @@ __kernel void arithm_s_add_with_mask_C4_D6 (__global   double *src1, int src1_st
 | 
			
		||||
        double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index));
 | 
			
		||||
        double4 dst_data  = *((__global double4 *)((__global char *)dst  + dst_index));
 | 
			
		||||
 | 
			
		||||
        double4 data = src_data1 + src2;
 | 
			
		||||
        double4 data = ARITHM_OP(src_data1, src2);
 | 
			
		||||
        data = mask_data ? data : dst_data;
 | 
			
		||||
 | 
			
		||||
        *((__global double4 *)((__global char *)dst + dst_index)) = data;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user