fixed some warnings under win64
This commit is contained in:
@@ -1096,7 +1096,7 @@ NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImag
|
||||
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
|
||||
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
||||
|
||||
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, d_vecPixelMask.length());
|
||||
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length()));
|
||||
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
|
||||
@@ -1120,7 +1120,7 @@ NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImag
|
||||
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
|
||||
|
||||
Ncv32u szNppCompactTmpBuf;
|
||||
nppsStCompactGetSize_32u(d_vecPixelMask.length(), &szNppCompactTmpBuf, devProp);
|
||||
nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp);
|
||||
if (bDoAtomicCompaction)
|
||||
{
|
||||
szNppCompactTmpBuf = 0;
|
||||
@@ -1206,7 +1206,7 @@ NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImag
|
||||
gridInit, blockInit, cuStream,
|
||||
d_ptrNowData->ptr(),
|
||||
d_ptrNowTmp->ptr(),
|
||||
d_vecPixelMask.length(), d_pixelMask.stride(),
|
||||
static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(),
|
||||
anchorsRoi, pixelStep);
|
||||
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
|
||||
|
||||
@@ -1221,7 +1221,7 @@ NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImag
|
||||
else
|
||||
{
|
||||
NCVStatus nppSt;
|
||||
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), d_vecPixelMask.length(),
|
||||
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
|
||||
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
|
||||
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
|
||||
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
|
||||
@@ -1276,7 +1276,7 @@ NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &d_integralImag
|
||||
else
|
||||
{
|
||||
NCVStatus nppSt;
|
||||
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), d_vecPixelMask.length(),
|
||||
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
|
||||
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
|
||||
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
|
||||
ncvAssertReturnNcvStat(nppSt);
|
||||
@@ -1783,7 +1783,7 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
|
||||
detectionsOnThisScale,
|
||||
d_hypothesesIntermediate,
|
||||
dstNumRects,
|
||||
d_hypothesesIntermediate.length(),
|
||||
static_cast<Ncv32u>(d_hypothesesIntermediate.length()),
|
||||
haar.ClassifierSize.width,
|
||||
haar.ClassifierSize.height,
|
||||
(Ncv32f)scale,
|
||||
@@ -1880,7 +1880,7 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
|
||||
if (dstNumRects > d_dstRects.length())
|
||||
{
|
||||
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
|
||||
dstNumRects = d_dstRects.length();
|
||||
dstNumRects = static_cast<Ncv32u>(d_dstRects.length());
|
||||
}
|
||||
|
||||
if (dstNumRects != 0)
|
||||
|
@@ -457,7 +457,7 @@ NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cuda
|
||||
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
|
||||
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
|
||||
|
||||
NCVMemStackAllocator gpuCounter(devProp.textureAlignment);
|
||||
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
|
||||
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width,
|
||||
@@ -475,7 +475,7 @@ NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cud
|
||||
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
|
||||
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
|
||||
|
||||
NCVMemStackAllocator gpuCounter(devProp.textureAlignment);
|
||||
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
|
||||
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f),
|
||||
@@ -493,7 +493,7 @@ NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, c
|
||||
ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR);
|
||||
ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI);
|
||||
|
||||
NCVMemStackAllocator gpuCounter(devProp.textureAlignment);
|
||||
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
|
||||
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width,
|
||||
@@ -511,7 +511,7 @@ NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep,
|
||||
NcvSize32u roiSize, Ncv8u *pBuffer,
|
||||
Ncv32u bufSize, cudaDeviceProp &devProp)
|
||||
{
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, devProp.textureAlignment, pBuffer);
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
|
||||
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
|
||||
@@ -526,7 +526,7 @@ NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep,
|
||||
NcvSize32u roiSize, Ncv8u *pBuffer,
|
||||
Ncv32u bufSize, cudaDeviceProp &devProp)
|
||||
{
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, devProp.textureAlignment, pBuffer);
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
|
||||
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
|
||||
@@ -541,7 +541,7 @@ NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep,
|
||||
NcvSize32u roiSize, Ncv8u *pBuffer,
|
||||
Ncv32u bufSize, cudaDeviceProp &devProp)
|
||||
{
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, devProp.textureAlignment, pBuffer);
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
|
||||
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator);
|
||||
@@ -1506,7 +1506,7 @@ NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDevicePr
|
||||
return NPPST_SUCCESS;
|
||||
}
|
||||
|
||||
NCVMemStackAllocator gpuCounter(devProp.textureAlignment);
|
||||
NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment));
|
||||
ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE,
|
||||
@@ -1535,7 +1535,7 @@ NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen,
|
||||
Ncv32u elemRemove, Ncv8u *pBuffer,
|
||||
Ncv32u bufSize, cudaDeviceProp &devProp)
|
||||
{
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, devProp.textureAlignment, pBuffer);
|
||||
NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer);
|
||||
ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR);
|
||||
|
||||
NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove,
|
||||
|
@@ -355,7 +355,7 @@ NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
|
||||
seg.clear();
|
||||
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
size = alignUp(size, this->_alignment);
|
||||
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
|
||||
this->currentSize += size;
|
||||
this->_maxSize = std::max(this->_maxSize, this->currentSize);
|
||||
|
||||
@@ -464,7 +464,7 @@ NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
|
||||
break;
|
||||
}
|
||||
|
||||
this->currentSize += alignUp(size, this->_alignment);
|
||||
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
|
||||
this->_maxSize = std::max(this->_maxSize, this->currentSize);
|
||||
|
||||
seg.begin.memtype = this->_memType;
|
||||
@@ -480,8 +480,8 @@ NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
|
||||
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
|
||||
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
|
||||
|
||||
ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
|
||||
currentSize -= alignUp(seg.size, this->_alignment);
|
||||
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
|
||||
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
|
||||
|
||||
switch (this->_memType)
|
||||
{
|
||||
|
Reference in New Issue
Block a user