Minor fixes

As the opencv's build-bot did not want to compile this revision, I had
to do some changes. In particular,
1) Removed unsigned int vs int comparisons, that were treated as errors
2) Removed unused variables and functions
3) Removed functions without previous declaration
4) Fixed whitespaces
This commit is contained in:
Alex Leontiev 2013-09-01 01:02:06 +08:00
parent ccc71ac190
commit 11fa0651c6
3 changed files with 10 additions and 84 deletions

View File

@ -12,7 +12,7 @@ problems (that is, finding a function to minimize some functional)
problem, primal-dual algorithm then can be used to perform denoising and this
is exactly what is implemented.
It should be noted, that this implementation was taken from the July 2013 blog entry [Mordvintsev]_, which also contained
It should be noted, that this implementation was taken from the July 2013 blog entry [Mordvintsev]_, which also contained
(slightly more general) ready-to-use
source code on Python. Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky
at the end of July 2013 and finally it was slightly adapted by later authors.
@ -38,10 +38,7 @@ into play.
:param result: Here the denoised image will be stored. There is no need to do pre-allocation of storage space, as it will be automatically allocated, if necessary.
:param lambda: Corresponds to :math:`\lambda` in the formulas above. As it is enlarged, the smooth (blurred) images are treated
more favorably than detailed (but maybe more noised) ones. Roughly speaking, as it becomes smaller, the result will be
more blur but more sever outliers will be removed.
:param lambda: Corresponds to :math:`\lambda` in the formulas above. As it is enlarged, the smooth (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly speaking, as it becomes smaller, the result will be more blur but more sever outliers will be removed.
:param niters: Number of iterations that the algorithm will run. Of course, as more iterations as better, but it is hard to quantitatively refine this statement, so just use the default and increase it if the results are poor.

View File

@ -1,5 +1,5 @@
#include "precomp.hpp"
#define ALEX_DEBUG
#undef ALEX_DEBUG
#include "debug.hpp"
#include <vector>
#include <algorithm>
@ -18,93 +18,24 @@ namespace cv{namespace optim{
float _scale;
};
void solve_TVL1(const Mat& img, Mat& res, double _clambda, int niters)
{
const float L2 = 8.0f, tau = 0.02f, sigma = 1./(L2*tau), theta = 1.f, img_scale = 1.f/255;
float clambda = (float)_clambda, threshold = clambda*tau;
const int workdepth = CV_32F;
int i, x, y, rows=img.rows, cols=img.cols;
Mat X, P = Mat::zeros(rows, cols, CV_MAKETYPE(workdepth, 2));
img.convertTo(X, workdepth, 1./255);
for( i = 0; i < niters; i++ )
{
float currsigma = i == 0 ? 1 + sigma : sigma;
// P_ = P + sigma*nabla(X)
// P(x,y) = P_(x,y)/max(||P(x,y)||,1)
for( y = 0; y < rows; y++ )
{
const float* x_curr = X.ptr<float>(y);
const float* x_next = X.ptr<float>(std::min(y+1, rows-1));
Point2f* p_curr = P.ptr<Point2f>(y);
float dx, dy, m;
for( x = 0; x < cols-1; x++ )
{
dx = (x_curr[x+1] - x_curr[x])*currsigma + p_curr[x].x;
dy = (x_next[x] - x_curr[x])*currsigma + p_curr[x].y;
m = 1.f/std::max(std::sqrt(dx*dx + dy*dy), 1.f);
p_curr[x].x = dx*m;
p_curr[x].y = dy*m;
}
dy = (x_next[x] - x_curr[x])*currsigma + p_curr[x].y;
m = 1.f/std::max(std::abs(dy), 1.f);
p_curr[x].x = 0.f;
p_curr[x].y = dy*m;
}
// X1 = X + tau*(-nablaT(P))
// X2 = X1 + clip(img - X1, -clambda*tau, clambda*tau)
// X = X2 + theta*(X2 - X)
for( y = 0; y < rows; y++ )
{
const uchar* img_curr = img.ptr<uchar>(y);
float* x_curr = X.ptr<float>(y);
const Point2f* p_curr = P.ptr<Point2f>(y);
const Point2f* p_prev = P.ptr<Point2f>(std::max(y - 1, 0));
x = 0;
float x_new = x_curr[x] + tau*(p_curr[x].y - p_prev[x].y);
x_new += std::min(std::max(img_curr[x]*img_scale - x_new, -threshold), threshold);
x_curr[x] = x_new + theta*(x_new - x_curr[x]);
for( x = 1; x < cols; x++ )
{
x_new = x_curr[x] + tau*(p_curr[x].x - p_curr[x-1].x + p_curr[x].y - p_prev[x].y);
x_new += std::min(std::max(img_curr[x]*img_scale - x_new, -threshold), threshold);
x_curr[x] = x_new + theta*(x_new - x_curr[x]);
}
}
}
res.create(X.rows,X.cols,CV_8U);
X.convertTo(res, CV_8U, 255);
}
void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda, int niters){
CV_Assert(observations.size()>0 && niters>0 && lambda>0);
#if 0
solve_TVL1(observations[0],result,lambda,niters);
return;
#endif
const float L2 = 8.0f, tau = 0.02f, sigma = 1./(L2*tau), theta = 1.f, img_scale = 1.f/255;
float clambda = (float)lambda, threshold = clambda*tau;
const float L2 = 8.0f, tau = 0.02f, sigma = 1./(L2*tau), theta = 1.f;
float clambda = (float)lambda;
float s=0;
const int workdepth = CV_32F;
int i, x, y, rows=observations[0].rows, cols=observations[0].cols,count;
for(i=1;i<observations.size();i++){
for(i=1;i<(int)observations.size();i++){
CV_Assert(observations[i].rows==rows && observations[i].cols==cols);
}
Mat X, P = Mat::zeros(rows, cols, CV_MAKETYPE(workdepth, 2));
observations[0].convertTo(X, workdepth, 1./255);
std::vector< Mat_<float> > Rs(observations.size());
for(count=0;count<Rs.size();count++){
for(count=0;count<(int)Rs.size();count++){
Rs[count]=Mat::zeros(rows,cols,workdepth);
}
@ -136,7 +67,7 @@ namespace cv{namespace optim{
//Rs = clip(Rs + sigma*(X-imgs), -clambda, clambda)
for(count=0;count<Rs.size();count++){
for(count=0;count<(int)Rs.size();count++){
std::transform<MatIterator_<float>,MatConstIterator_<uchar>,MatIterator_<float>,AddFloatToCharScaled>(
Rs[count].begin(),Rs[count].end(),observations[count].begin<uchar>(),
Rs[count].begin(),AddFloatToCharScaled(-sigma/255.0));
@ -147,7 +78,6 @@ namespace cv{namespace optim{
for( y = 0; y < rows; y++ )
{
const uchar* img_curr = observations[0].ptr<uchar>(y);
float* x_curr = X.ptr<float>(y);
const Point2f* p_curr = P.ptr<Point2f>(y);
const Point2f* p_prev = P.ptr<Point2f>(std::max(y - 1, 0));
@ -155,7 +85,7 @@ namespace cv{namespace optim{
// X1 = X + tau*(-nablaT(P))
x = 0;
s=0.0;
for(count=0;count<Rs.size();count++){
for(count=0;count<(int)Rs.size();count++){
s=s+Rs[count](y,x);
}
float x_new = x_curr[x] + tau*(p_curr[x].y - p_prev[x].y)-tau*s;
@ -166,7 +96,7 @@ namespace cv{namespace optim{
for(x = 1; x < cols; x++ )
{
s=0.0;
for(count=0;count<Rs.size();count++){
for(count=0;count<(int)Rs.size();count++){
s+=Rs[count](y,x);
}
// X1 = X + tau*(-nablaT(P))

View File

@ -48,7 +48,6 @@ TEST(Optim_denoise_tvl1, regression_basic){
//cv::imshow("test", images[0]);
cv::optim::denoise_TVL1(images, res);
printf("hello here!\n");
//cv::imshow("denoised", res);
//cv::waitKey();