Warning fixes continued

This commit is contained in:
Andrey Kamaev
2012-06-09 15:00:04 +00:00
parent f6b451c607
commit f2d3b9b4a1
127 changed files with 6298 additions and 6277 deletions

View File

@@ -44,17 +44,17 @@
This is a variation of
"Stereo Processing by Semiglobal Matching and Mutual Information"
by Heiko Hirschmuller.
We match blocks rather than individual pixels, thus the algorithm is called
SGBM (Semi-global block matching)
*/
*/
#include "precomp.hpp"
#include <limits.h>
namespace cv
{
typedef uchar PixType;
typedef short CostType;
typedef short DispType;
@@ -105,7 +105,7 @@ StereoSGBM::~StereoSGBM()
row1[x] and row2[x-d]. The subpixel algorithm from
"Depth Discontinuities by Pixel-to-Pixel Stereo" by Stan Birchfield and C. Tomasi
is used, hence the suffix BT.
the temporary buffer should contain width2*2 elements
*/
static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
@@ -119,25 +119,25 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int D = maxD - minD, width1 = maxX1 - minX1, width2 = maxX2 - minX2;
const PixType *row1 = img1.ptr<PixType>(y), *row2 = img2.ptr<PixType>(y);
PixType *prow1 = buffer + width2*2, *prow2 = prow1 + width*cn*2;
tab += tabOfs;
for( c = 0; c < cn*2; c++ )
{
prow1[width*c] = prow1[width*c + width-1] =
prow1[width*c] = prow1[width*c + width-1] =
prow2[width*c] = prow2[width*c + width-1] = tab[0];
}
int n1 = y > 0 ? -(int)img1.step : 0, s1 = y < img1.rows-1 ? (int)img1.step : 0;
int n2 = y > 0 ? -(int)img2.step : 0, s2 = y < img2.rows-1 ? (int)img2.step : 0;
if( cn == 1 )
{
for( x = 1; x < width-1; x++ )
{
prow1[x] = tab[(row1[x+1] - row1[x-1])*2 + row1[x+n1+1] - row1[x+n1-1] + row1[x+s1+1] - row1[x+s1-1]];
prow2[width-1-x] = tab[(row2[x+1] - row2[x-1])*2 + row2[x+n2+1] - row2[x+n2-1] + row2[x+s2+1] - row2[x+s2-1]];
prow1[x+width] = row1[x];
prow2[width-1-x+width] = row2[x];
}
@@ -149,35 +149,35 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
prow1[x] = tab[(row1[x*3+3] - row1[x*3-3])*2 + row1[x*3+n1+3] - row1[x*3+n1-3] + row1[x*3+s1+3] - row1[x*3+s1-3]];
prow1[x+width] = tab[(row1[x*3+4] - row1[x*3-2])*2 + row1[x*3+n1+4] - row1[x*3+n1-2] + row1[x*3+s1+4] - row1[x*3+s1-2]];
prow1[x+width*2] = tab[(row1[x*3+5] - row1[x*3-1])*2 + row1[x*3+n1+5] - row1[x*3+n1-1] + row1[x*3+s1+5] - row1[x*3+s1-1]];
prow2[width-1-x] = tab[(row2[x*3+3] - row2[x*3-3])*2 + row2[x*3+n2+3] - row2[x*3+n2-3] + row2[x*3+s2+3] - row2[x*3+s2-3]];
prow2[width-1-x+width] = tab[(row2[x*3+4] - row2[x*3-2])*2 + row2[x*3+n2+4] - row2[x*3+n2-2] + row2[x*3+s2+4] - row2[x*3+s2-2]];
prow2[width-1-x+width*2] = tab[(row2[x*3+5] - row2[x*3-1])*2 + row2[x*3+n2+5] - row2[x*3+n2-1] + row2[x*3+s2+5] - row2[x*3+s2-1]];
prow1[x+width*3] = row1[x*3];
prow1[x+width*4] = row1[x*3+1];
prow1[x+width*5] = row1[x*3+2];
prow2[width-1-x+width*3] = row2[x*3];
prow2[width-1-x+width*4] = row2[x*3+1];
prow2[width-1-x+width*5] = row2[x*3+2];
}
}
memset( cost, 0, width1*D*sizeof(cost[0]) );
buffer -= minX2;
cost -= minX1*D + minD; // simplify the cost indices inside the loop
#if CV_SSE2
#if CV_SSE2
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
#endif
#if 1
#if 1
for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width )
{
int diff_scale = c < cn ? 0 : 2;
// precompute
// v0 = min(row2[x-1/2], row2[x], row2[x+1/2]) and
// v1 = max(row2[x-1/2], row2[x], row2[x+1/2]) and
@@ -191,7 +191,7 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
buffer[x] = (PixType)v0;
buffer[x + width2] = (PixType)v1;
}
for( x = minX1; x < maxX1; x++ )
{
int u = prow1[x];
@@ -199,14 +199,14 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int ur = x < width-1 ? (u + prow1[x+1])/2 : u;
int u0 = min(ul, ur); u0 = min(u0, u);
int u1 = max(ul, ur); u1 = max(u1, u);
#if CV_SSE2
if( useSIMD )
{
__m128i _u = _mm_set1_epi8((char)u), _u0 = _mm_set1_epi8((char)u0);
__m128i _u1 = _mm_set1_epi8((char)u1), z = _mm_setzero_si128();
__m128i ds = _mm_cvtsi32_si128(diff_scale);
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-x-1 + d));
@@ -215,10 +215,10 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
__m128i c0 = _mm_max_epu8(_mm_subs_epu8(_u, _v1), _mm_subs_epu8(_v0, _u));
__m128i c1 = _mm_max_epu8(_mm_subs_epu8(_v, _u1), _mm_subs_epu8(_u0, _v));
__m128i diff = _mm_min_epu8(c0, c1);
c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_srl_epi16(_mm_unpacklo_epi8(diff,z), ds)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_srl_epi16(_mm_unpackhi_epi8(diff,z), ds)));
}
@@ -233,7 +233,7 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int v1 = buffer[width-x-1 + d + width2];
int c0 = max(0, u - v1); c0 = max(c0, v0 - u);
int c1 = max(0, v - u1); c1 = max(c1, u0 - v);
cost[x*D + d] = (CostType)(cost[x*D+d] + (min(c0, c1) >> diff_scale));
}
}
@@ -249,14 +249,14 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
if( useSIMD )
{
__m128i _u = _mm_set1_epi8(u), z = _mm_setzero_si128();
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-1-x + d));
__m128i diff = _mm_adds_epu8(_mm_subs_epu8(_u,_v), _mm_subs_epu8(_v,_u));
__m128i c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
__m128i c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_unpacklo_epi8(diff,z)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_unpackhi_epi8(diff,z)));
}
@@ -282,22 +282,22 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
minD <= d < maxD.
disp2full is the reverse disparity map, that is:
disp2full(x+roi.x,y+roi.y)=d means that img2(x+roi.x, y+roi.y) ~ img1(x+roi.x+d, y+roi.y)
note that disp1buf will have the same size as the roi and
disp2full will have the same size as img1 (or img2).
On exit disp2buf is not the final disparity, it is an intermediate result that becomes
final after all the tiles are processed.
the disparity in disp1buf is written with sub-pixel accuracy
(4 fractional bits, see CvStereoSGBM::DISP_SCALE),
using quadratic interpolation, while the disparity in disp2buf
is written as is, without interpolation.
disp2cost also has the same size as img1 (or img2).
It contains the minimum current cost, used to find the best disparity, corresponding to the minimal cost.
*/
*/
static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
Mat& disp1, const StereoSGBM& params,
Mat& disp1, const StereoSGBM& params,
Mat& buffer )
{
#if CV_SSE2
@@ -312,15 +312,15 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
#endif
#endif
const int ALIGN = 16;
const int DISP_SHIFT = StereoSGBM::DISP_SHIFT;
const int DISP_SCALE = StereoSGBM::DISP_SCALE;
const CostType MAX_COST = SHRT_MAX;
int minD = params.minDisparity, maxD = minD + params.numberOfDisparities;
Size SADWindowSize;
SADWindowSize.width = SADWindowSize.height = params.SADWindowSize > 0 ? params.SADWindowSize : 5;
@@ -336,28 +336,28 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
int npasses = params.fullDP ? 2 : 1;
const int TAB_OFS = 256*4, TAB_SIZE = 256 + TAB_OFS*2;
PixType clipTab[TAB_SIZE];
for( k = 0; k < TAB_SIZE; k++ )
clipTab[k] = (PixType)(min(max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
if( minX1 >= maxX1 )
{
disp1 = Scalar::all(INVALID_DISP_SCALED);
return;
}
CV_Assert( D % 16 == 0 );
// NR - the number of directions. the loop on x below that computes Lr assumes that NR == 8.
// if you change NR, please, modify the loop as well.
int D2 = D+16, NRD2 = NR2*D2;
// the number of L_r(.,.) and min_k L_r(.,.) lines in the buffer:
// for 8-way dynamic programming we need the current row and
// the previous row, i.e. 2 rows in total
const int NLR = 2;
const int LrBorder = NLR - 1;
// for each possible stereo match (img1(x,y) <=> img2(x-d,y))
// we keep pixel difference cost (C) and the summary cost over NR directions (S).
// we also keep all the partial costs for the previous line L_r(x,d) and also min_k L_r(x, k)
@@ -370,29 +370,29 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
CSBufSize*2*sizeof(CostType) + // C, S
width*16*img1.channels()*sizeof(PixType) + // temp buffer for computing per-pixel cost
width*(sizeof(CostType) + sizeof(DispType)) + 1024; // disp2cost + disp2
if( !buffer.data || !buffer.isContinuous() ||
buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize )
buffer.create(1, (int)totalBufSize, CV_8U);
// summary cost over different (nDirs) directions
CostType* Cbuf = (CostType*)alignPtr(buffer.data, ALIGN);
CostType* Sbuf = Cbuf + CSBufSize;
CostType* hsumBuf = Sbuf + CSBufSize;
CostType* pixDiff = hsumBuf + costBufSize*hsumBufNRows;
CostType* disp2cost = pixDiff + costBufSize + (LrSize + minLrSize)*NLR;
DispType* disp2ptr = (DispType*)(disp2cost + width);
PixType* tempBuf = (PixType*)(disp2ptr + width);
// add P2 to every C(x,y). it saves a few operations in the inner loops
for( k = 0; k < width1*D; k++ )
Cbuf[k] = (CostType)P2;
for( int pass = 1; pass <= npasses; pass++ )
{
int x1, y1, x2, y2, dx, dy;
if( pass == 1 )
{
y1 = 0; y2 = height; dy = 1;
@@ -403,9 +403,9 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
y1 = height-1; y2 = -1; dy = -1;
x1 = width1-1; x2 = -1; dx = -1;
}
CostType *Lr[NLR]={0}, *minLr[NLR]={0};
for( k = 0; k < NLR; k++ )
{
// shift Lr[k] and minLr[k] pointers, because we allocated them with the borders,
@@ -418,26 +418,26 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
minLr[k] = pixDiff + costBufSize + LrSize*NLR + minLrSize*k + NR2*2;
memset( minLr[k] - LrBorder*NR2, 0, minLrSize*sizeof(CostType) );
}
for( int y = y1; y != y2; y += dy )
{
int x, d;
DispType* disp1ptr = disp1.ptr<DispType>(y);
CostType* C = Cbuf + (!params.fullDP ? 0 : y*costBufSize);
CostType* S = Sbuf + (!params.fullDP ? 0 : y*costBufSize);
if( pass == 1 ) // compute C on the first pass, and reuse it on the second pass, if any.
{
int dy1 = y == 0 ? 0 : y + SH2, dy2 = y == 0 ? SH2 : dy1;
for( k = dy1; k <= dy2; k++ )
{
CostType* hsumAdd = hsumBuf + (min(k, height-1) % hsumBufNRows)*costBufSize;
if( k < height )
{
calcPixelCostBT( img1, img2, k, minD, maxD, pixDiff, tempBuf, clipTab, TAB_OFS, ftzero );
memset(hsumAdd, 0, D*sizeof(CostType));
for( x = 0; x <= SW2*D; x += D )
{
@@ -445,17 +445,17 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
for( d = 0; d < D; d++ )
hsumAdd[d] = (CostType)(hsumAdd[d] + pixDiff[x + d]*scale);
}
if( y > 0 )
{
const CostType* hsumSub = hsumBuf + (max(y - SH2 - 1, 0) % hsumBufNRows)*costBufSize;
const CostType* Cprev = !params.fullDP || y == 0 ? C : C - costBufSize;
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + max(x - (SW2+1)*D, 0);
#if CV_SSE2
if( useSIMD )
{
@@ -490,13 +490,13 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
{
const CostType* pixAdd = pixDiff + min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + max(x - (SW2+1)*D, 0);
for( d = 0; d < D; d++ )
hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
}
}
}
if( y == 0 )
{
int scale = k == 0 ? SH2 + 1 : 1;
@@ -504,18 +504,18 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
C[x] = (CostType)(C[x] + hsumAdd[x]*scale);
}
}
// also, clear the S buffer
for( k = 0; k < width1*D; k++ )
S[k] = 0;
}
// clear the left and the right borders
memset( Lr[0] - NRD2*LrBorder - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( Lr[0] + width1*NRD2 - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( minLr[0] - NR2*LrBorder, 0, NR2*LrBorder*sizeof(CostType) );
memset( minLr[0] + width1*NR2, 0, NR2*LrBorder*sizeof(CostType) );
/*
[formula 13 in the paper]
compute L_r(p, d) = C(p, d) +
@@ -537,87 +537,87 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
for( x = x1; x != x2; x += dx )
{
int xm = x*NR2, xd = xm*D2;
int delta0 = minLr[0][xm - dx*NR2] + P2, delta1 = minLr[1][xm - NR2 + 1] + P2;
int delta2 = minLr[1][xm + 2] + P2, delta3 = minLr[1][xm + NR2 + 3] + P2;
CostType* Lr_p0 = Lr[0] + xd - dx*NRD2;
CostType* Lr_p1 = Lr[1] + xd - NRD2 + D2;
CostType* Lr_p2 = Lr[1] + xd + D2*2;
CostType* Lr_p3 = Lr[1] + xd + NRD2 + D2*3;
Lr_p0[-1] = Lr_p0[D] = Lr_p1[-1] = Lr_p1[D] =
Lr_p2[-1] = Lr_p2[D] = Lr_p3[-1] = Lr_p3[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
CostType* Sp = S + x*D;
#if CV_SSE2
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _delta1 = _mm_set1_epi16((short)delta1);
__m128i _delta2 = _mm_set1_epi16((short)delta2);
__m128i _delta3 = _mm_set1_epi16((short)delta3);
__m128i _minL0 = _mm_set1_epi16((short)MAX_COST);
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d));
__m128i L0, L1, L2, L3;
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L1 = _mm_load_si128((const __m128i*)(Lr_p1 + d));
L2 = _mm_load_si128((const __m128i*)(Lr_p2 + d));
L3 = _mm_load_si128((const __m128i*)(Lr_p3 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d - 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d + 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d - 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d + 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d - 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
L1 = _mm_min_epi16(L1, _delta1);
L1 = _mm_adds_epi16(_mm_subs_epi16(L1, _delta1), Cpd);
L2 = _mm_min_epi16(L2, _delta2);
L2 = _mm_adds_epi16(_mm_subs_epi16(L2, _delta2), Cpd);
L3 = _mm_min_epi16(L3, _delta3);
L3 = _mm_adds_epi16(_mm_subs_epi16(L3, _delta3), Cpd);
_mm_store_si128( (__m128i*)(Lr_p + d), L0);
_mm_store_si128( (__m128i*)(Lr_p + d + D2), L1);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*2), L2);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*3), L3);
__m128i t0 = _mm_min_epi16(_mm_unpacklo_epi16(L0, L2), _mm_unpackhi_epi16(L0, L2));
__m128i t1 = _mm_min_epi16(_mm_unpacklo_epi16(L1, L3), _mm_unpackhi_epi16(L1, L3));
t0 = _mm_min_epi16(_mm_unpacklo_epi16(t0, t1), _mm_unpackhi_epi16(t0, t1));
_minL0 = _mm_min_epi16(_minL0, t0);
__m128i Sval = _mm_load_si128((const __m128i*)(Sp + d));
L0 = _mm_adds_epi16(L0, L1);
L2 = _mm_adds_epi16(L2, L3);
Sval = _mm_adds_epi16(Sval, L0);
Sval = _mm_adds_epi16(Sval, L2);
_mm_store_si128((__m128i*)(Sp + d), Sval);
}
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_mm_storel_epi64((__m128i*)&minLr[0][xm], _minL0);
}
@@ -625,28 +625,28 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
#endif
{
int minL0 = MAX_COST, minL1 = MAX_COST, minL2 = MAX_COST, minL3 = MAX_COST;
for( d = 0; d < D; d++ )
{
int Cpd = Cp[d], L0, L1, L2, L3;
L0 = Cpd + min((int)Lr_p0[d], min(Lr_p0[d-1] + P1, min(Lr_p0[d+1] + P1, delta0))) - delta0;
L1 = Cpd + min((int)Lr_p1[d], min(Lr_p1[d-1] + P1, min(Lr_p1[d+1] + P1, delta1))) - delta1;
L1 = Cpd + min((int)Lr_p1[d], min(Lr_p1[d-1] + P1, min(Lr_p1[d+1] + P1, delta1))) - delta1;
L2 = Cpd + min((int)Lr_p2[d], min(Lr_p2[d-1] + P1, min(Lr_p2[d+1] + P1, delta2))) - delta2;
L3 = Cpd + min((int)Lr_p3[d], min(Lr_p3[d-1] + P1, min(Lr_p3[d+1] + P1, delta3))) - delta3;
Lr_p[d] = (CostType)L0;
minL0 = min(minL0, L0);
Lr_p[d + D2] = (CostType)L1;
minL1 = min(minL1, L1);
Lr_p[d + D2*2] = (CostType)L2;
minL2 = min(minL2, L2);
Lr_p[d + D2*3] = (CostType)L3;
minL3 = min(minL3, L3);
Sp[d] = saturate_cast<CostType>(Sp[d] + L0 + L1 + L2 + L3);
}
minLr[0][xm] = (CostType)minL0;
@@ -655,7 +655,7 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
minLr[0][xm+3] = (CostType)minL3;
}
}
if( pass == npasses )
{
for( x = 0; x < width; x++ )
@@ -663,73 +663,73 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
disp1ptr[x] = disp2ptr[x] = (DispType)INVALID_DISP_SCALED;
disp2cost[x] = MAX_COST;
}
for( x = width1 - 1; x >= 0; x-- )
{
CostType* Sp = S + x*D;
int minS = MAX_COST, bestDisp = -1;
if( npasses == 1 )
{
int xm = x*NR2, xd = xm*D2;
int minL0 = MAX_COST;
int delta0 = minLr[0][xm + NR2] + P2;
CostType* Lr_p0 = Lr[0] + xd + NRD2;
Lr_p0[-1] = Lr_p0[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
#if CV_SSE2
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _minL0 = _mm_set1_epi16((short)minL0);
__m128i _minS = _mm_set1_epi16(MAX_COST), _bestDisp = _mm_set1_epi16(-1);
__m128i _d8 = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7), _8 = _mm_set1_epi16(8);
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d)), L0;
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
_mm_store_si128((__m128i*)(Lr_p + d), L0);
_minL0 = _mm_min_epi16(_minL0, L0);
L0 = _mm_adds_epi16(L0, *(__m128i*)(Sp + d));
_mm_store_si128((__m128i*)(Sp + d), L0);
__m128i mask = _mm_cmpgt_epi16(_minS, L0);
_minS = _mm_min_epi16(_minS, L0);
_bestDisp = _mm_xor_si128(_bestDisp, _mm_and_si128(_mm_xor_si128(_bestDisp,_d8), mask));
_d8 = _mm_adds_epi16(_d8, _8);
}
short CV_DECL_ALIGNED(16) bestDispBuf[8];
_mm_store_si128((__m128i*)bestDispBuf, _bestDisp);
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 4));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 2));
__m128i qS = _mm_min_epi16(_minS, _mm_srli_si128(_minS, 8));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 4));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 2));
minLr[0][xm] = (CostType)_mm_cvtsi128_si32(_minL0);
minS = (CostType)_mm_cvtsi128_si32(qS);
qS = _mm_shuffle_epi32(_mm_unpacklo_epi16(qS, qS), 0);
qS = _mm_cmpeq_epi16(_minS, qS);
int idx = _mm_movemask_epi8(_mm_packs_epi16(qS, qS)) & 255;
bestDisp = bestDispBuf[LSBTab[idx]];
}
else
@@ -738,10 +738,10 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
for( d = 0; d < D; d++ )
{
int L0 = Cp[d] + min((int)Lr_p0[d], min(Lr_p0[d-1] + P1, min(Lr_p0[d+1] + P1, delta0))) - delta0;
Lr_p[d] = (CostType)L0;
minL0 = min(minL0, L0);
int Sval = Sp[d] = saturate_cast<CostType>(Sp[d] + L0);
if( Sval < minS )
{
@@ -764,7 +764,7 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
}
}
}
for( d = 0; d < D; d++ )
{
if( Sp[d]*(100 - uniquenessRatio) < minS*100 && std::abs(bestDisp - d) > 1 )
@@ -773,13 +773,13 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
if( d < D )
continue;
d = bestDisp;
int x2 = x + minX1 - d - minD;
if( disp2cost[x2] > minS )
int _x2 = x + minX1 - d - minD;
if( disp2cost[_x2] > minS )
{
disp2cost[x2] = (CostType)minS;
disp2ptr[x2] = (DispType)(d + minD);
disp2cost[_x2] = (CostType)minS;
disp2ptr[_x2] = (DispType)(d + minD);
}
if( 0 < d && d < D-1 )
{
// do subpixel quadratic interpolation:
@@ -792,24 +792,24 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
d *= DISP_SCALE;
disp1ptr[x + minX1] = (DispType)(d + minD*DISP_SCALE);
}
for( x = minX1; x < maxX1; x++ )
{
// we round the computed disparity both towards -inf and +inf and check
// if either of the corresponding disparities in disp2 is consistent.
// This is to give the computed disparity a chance to look valid if it is.
int d = disp1ptr[x];
if( d == INVALID_DISP_SCALED )
int d1 = disp1ptr[x];
if( d1 == INVALID_DISP_SCALED )
continue;
int _d = d >> DISP_SHIFT;
int d_ = (d + DISP_SCALE-1) >> DISP_SHIFT;
int _d = d1 >> DISP_SHIFT;
int d_ = (d1 + DISP_SCALE-1) >> DISP_SHIFT;
int _x = x - _d, x_ = x - d_;
if( 0 <= _x && _x < width && disp2ptr[_x] >= minD && std::abs(disp2ptr[_x] - _d) > disp12MaxDiff &&
0 <= x_ && x_ < width && disp2ptr[x_] >= minD && std::abs(disp2ptr[x_] - d_) > disp12MaxDiff )
disp1ptr[x] = (DispType)INVALID_DISP_SCALED;
}
}
// now shift the cyclic buffers
std::swap( Lr[0], Lr[1] );
std::swap( minLr[0], minLr[1] );
@@ -825,13 +825,13 @@ void StereoSGBM::operator ()( InputArray _left, InputArray _right,
Mat left = _left.getMat(), right = _right.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == DataType<PixType>::depth );
_disp.create( left.size(), CV_16S );
Mat disp = _disp.getMat();
computeDisparitySGBM( left, right, disp, *this, buffer );
medianBlur(disp, disp, 3);
if( speckleWindowSize > 0 )
filterSpeckles(disp, (minDisparity - 1)*DISP_SCALE, speckleWindowSize, DISP_SCALE*speckleRange, buffer);
}
@@ -844,33 +844,33 @@ Rect getValidDisparityROI( Rect roi1, Rect roi2,
{
int SW2 = SADWindowSize/2;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities - 1;
int xmin = max(roi1.x, roi2.x + maxD) + SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int ymin = max(roi1.y, roi2.y) + SW2;
int ymax = min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;
Rect r(xmin, ymin, xmax - xmin, ymax - ymin);
return r.width > 0 && r.height > 0 ? r : Rect();
}
}
}
void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSize,
double _maxDiff, InputOutputArray __buf )
{
Mat img = _img.getMat();
Mat temp, &_buf = __buf.needed() ? __buf.getMatRef() : temp;
CV_Assert( img.type() == CV_16SC1 );
int newVal = cvRound(_newval);
int maxDiff = cvRound(_maxDiff);
int width = img.cols, height = img.rows, npixels = width*height;
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
_buf.create(1, (int)bufSize, CV_8U);
uchar* buf = _buf.data;
int i, j, dstep = (int)(img.step/sizeof(short));
int* labels = (int*)buf;
@@ -879,33 +879,33 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
buf += npixels*sizeof(wbuf[0]);
uchar* rtype = (uchar*)buf;
int curlabel = 0;
// clear out label assignments
memset(labels, 0, npixels*sizeof(labels[0]));
for( i = 0; i < height; i++ )
{
short* ds = img.ptr<short>(i);
int* ls = labels + width*i;
for( j = 0; j < width; j++ )
{
if( ds[j] != newVal ) // not a bad disparity
if( ds[j] != newVal ) // not a bad disparity
{
if( ls[j] ) // has a label, check for bad label
{
if( ls[j] ) // has a label, check for bad label
{
if( rtype[ls[j]] ) // small region, zero out disparity
ds[j] = (short)newVal;
}
// no label, assign and propagate
else
{
Point2s* ws = wbuf; // initialize wavefront
Point2s p((short)j, (short)i); // current pixel
curlabel++; // next label
int count = 0; // current region size
Point2s* ws = wbuf; // initialize wavefront
Point2s p((short)j, (short)i); // current pixel
curlabel++; // next label
int count = 0; // current region size
ls[j] = curlabel;
// wavefront propagation
while( ws >= wbuf ) // wavefront not empty
{
@@ -914,50 +914,50 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
short* dpp = &img.at<short>(p.y, p.x);
short dp = *dpp;
int* lpp = labels + width*p.y + p.x;
if( p.x < width-1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff )
{
lpp[+1] = curlabel;
*ws++ = Point2s(p.x+1, p.y);
}
if( p.x > 0 && !lpp[-1] && dpp[-1] != newVal && std::abs(dp - dpp[-1]) <= maxDiff )
{
lpp[-1] = curlabel;
*ws++ = Point2s(p.x-1, p.y);
}
if( p.y < height-1 && !lpp[+width] && dpp[+dstep] != newVal && std::abs(dp - dpp[+dstep]) <= maxDiff )
{
lpp[+width] = curlabel;
*ws++ = Point2s(p.x, p.y+1);
}
if( p.y > 0 && !lpp[-width] && dpp[-dstep] != newVal && std::abs(dp - dpp[-dstep]) <= maxDiff )
{
lpp[-width] = curlabel;
*ws++ = Point2s(p.x, p.y-1);
}
// pop most recent and propagate
// NB: could try least recent, maybe better convergence
p = *--ws;
}
// assign label type
if( count <= maxSpeckleSize ) // speckle region
if( count <= maxSpeckleSize ) // speckle region
{
rtype[ls[j]] = 1; // small region label
rtype[ls[j]] = 1; // small region label
ds[j] = (short)newVal;
}
else
rtype[ls[j]] = 0; // large region label
rtype[ls[j]] = 0; // large region label
}
}
}
}
}
}
void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDisparity,
int numberOfDisparities, int disp12MaxDiff )
{
@@ -971,32 +971,32 @@ void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDis
const int DISP_SHIFT = 4, DISP_SCALE = 1 << DISP_SHIFT;
int INVALID_DISP = minD - 1, INVALID_DISP_SCALED = INVALID_DISP*DISP_SCALE;
int costType = cost.type();
disp12MaxDiff *= DISP_SCALE;
CV_Assert( numberOfDisparities > 0 && disp.type() == CV_16S &&
(costType == CV_16S || costType == CV_32S) &&
disp.size() == cost.size() );
for( int y = 0; y < rows; y++ )
{
short* dptr = disp.ptr<short>(y);
for( x = 0; x < cols; x++ )
{
disp2buf[x] = INVALID_DISP_SCALED;
disp2cost[x] = INT_MAX;
}
if( costType == CV_16S )
{
const short* cptr = cost.ptr<short>(y);
for( x = minX1; x < maxX1; x++ )
{
int d = dptr[x], c = cptr[x];
int x2 = x - ((d + DISP_SCALE/2) >> DISP_SHIFT);
if( disp2cost[x2] > c )
{
disp2cost[x2] = c;
@@ -1007,12 +1007,12 @@ void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDis
else
{
const int* cptr = cost.ptr<int>(y);
for( x = minX1; x < maxX1; x++ )
{
int d = dptr[x], c = cptr[x];
int x2 = x - ((d + DISP_SCALE/2) >> DISP_SHIFT);
if( disp2cost[x2] < c )
{
disp2cost[x2] = c;
@@ -1020,7 +1020,7 @@ void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDis
}
}
}
for( x = minX1; x < maxX1; x++ )
{
// we round the computed disparity both towards -inf and +inf and check