Merge new changes
This commit is contained in:
commit
90b6af1496
@ -192,17 +192,31 @@ Helper function, that shift Mat filling new regions with zeros.
|
||||
:param dst: result image
|
||||
|
||||
:param shift: shift value
|
||||
|
||||
AlignMTB::computeBitmaps
|
||||
---------------------------
|
||||
Computes median threshold and exclude bitmaps of given image.
|
||||
|
||||
.. ocv:function:: void computeBitmaps(Mat& img, Mat& tb, Mat& eb)
|
||||
|
||||
:param img: input image
|
||||
|
||||
:param tb: median threshold bitmap
|
||||
|
||||
:param eb: exclude bitmap
|
||||
|
||||
createAlignMTB
|
||||
---------------------------
|
||||
Creates AlignMTB object
|
||||
|
||||
.. ocv:function:: Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4)
|
||||
.. ocv:function:: Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true)
|
||||
|
||||
:param max_bits: logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are usually good enough (31 and 63 pixels shift respectively).
|
||||
|
||||
:param exclude_range: range for exclusion bitmap that is constructed to suppress noise around the median value.
|
||||
|
||||
:param cut: if true cuts images, otherwise fills the new regions with zeros.
|
||||
|
||||
ExposureCalibrate
|
||||
---------------------------
|
||||
.. ocv:class:: ExposureCalibrate : public Algorithm
|
||||
@ -234,12 +248,33 @@ createCalibrateDebevec
|
||||
---------------------------
|
||||
Creates CalibrateDebevec object
|
||||
|
||||
.. ocv:function:: Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 50, float lambda = 10.0f)
|
||||
.. ocv:function:: createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false)
|
||||
|
||||
:param samples: number of pixel locations to use
|
||||
|
||||
:param lambda: smoothness term weight. Greater values produce smoother results, but can alter the response.
|
||||
|
||||
:param random: if true sample pixel locations are chosen at random, otherwise the form a rectangular grid.
|
||||
|
||||
CalibrateRobertson
|
||||
---------------------------
|
||||
.. ocv:class:: CalibrateRobertson : public ExposureCalibrate
|
||||
|
||||
Inverse camera response function is extracted for each brightness value by minimizing an objective function as linear system.
|
||||
This algorithm uses all image pixels.
|
||||
|
||||
For more information see [RB99]_.
|
||||
|
||||
createCalibrateRobertson
|
||||
---------------------------
|
||||
Creates CalibrateRobertson object
|
||||
|
||||
.. ocv:function:: createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f)
|
||||
|
||||
:param max_iter: maximal number of Gauss-Seidel solver iterations.
|
||||
|
||||
:param threshold: target difference between results of two successive steps of the minimization.
|
||||
|
||||
ExposureMerge
|
||||
---------------------------
|
||||
.. ocv:class:: ExposureMerge : public Algorithm
|
||||
@ -264,7 +299,7 @@ MergeDebevec
|
||||
---------------------------
|
||||
.. ocv:class:: MergeDebevec : public ExposureMerge
|
||||
|
||||
The resulting HDR image is calculated as weighted average of he exposures considering exposure values and camera response.
|
||||
The resulting HDR image is calculated as weighted average of the exposures considering exposure values and camera response.
|
||||
|
||||
For more information see [DM97]_.
|
||||
|
||||
@ -296,7 +331,6 @@ Short version of process, that doesn't take extra arguments.
|
||||
|
||||
:param dst: result image
|
||||
|
||||
|
||||
createMergeMertens
|
||||
---------------------------
|
||||
Creates MergeMertens object
|
||||
@ -308,6 +342,20 @@ Creates MergeMertens object
|
||||
:param saturation_weight: saturation measure weight
|
||||
|
||||
:param exposure_weight: well-exposedness measure weight
|
||||
|
||||
MergeRobertson
|
||||
---------------------------
|
||||
.. ocv:class:: MergeRobertson : public ExposureMerge
|
||||
|
||||
The resulting HDR image is calculated as weighted average of the exposures considering exposure values and camera response.
|
||||
|
||||
For more information see [RB99]_.
|
||||
|
||||
createMergeRobertson
|
||||
---------------------------
|
||||
Creates MergeRobertson object
|
||||
|
||||
.. ocv:function:: Ptr<MergeRobertson> createMergeRobertson()
|
||||
|
||||
References
|
||||
==========
|
||||
@ -327,3 +375,5 @@ References
|
||||
.. [DM97] P. Debevec, J. Malik, "Recovering High Dynamic Range Radiance Maps from Photographs", Proceedings OF ACM SIGGRAPH, 1997, 369 - 378.
|
||||
|
||||
.. [MK07] T. Mertens, J. Kautz, F. Van Reeth, "Exposure Fusion", Proceedings of the 15th Pacific Conference on Computer Graphics and Applications, 2007, 382 - 390.
|
||||
|
||||
.. [RB99] M. Robertson , S. Borman , R. Stevenson , "Dynamic range improvement through multiple exposures ", Proceedings of the Int. Conf. on Image Processing , 1999, 159 - 163.
|
||||
|
@ -80,6 +80,8 @@ CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs,
|
||||
float h = 3, float hColor = 3,
|
||||
int templateWindowSize = 7, int searchWindowSize = 21);
|
||||
|
||||
enum { LDR_SIZE = 256 };
|
||||
|
||||
class CV_EXPORTS_W Tonemap : public Algorithm
|
||||
{
|
||||
public:
|
||||
@ -164,7 +166,7 @@ createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation =
|
||||
class CV_EXPORTS_W ExposureAlign : public Algorithm
|
||||
{
|
||||
public:
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArrayOfArrays dst,
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
|
||||
const std::vector<float>& times, InputArray response) = 0;
|
||||
};
|
||||
|
||||
@ -173,22 +175,26 @@ public:
|
||||
class CV_EXPORTS_W AlignMTB : public ExposureAlign
|
||||
{
|
||||
public:
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArrayOfArrays dst,
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
|
||||
const std::vector<float>& times, InputArray response) = 0;
|
||||
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArrayOfArrays dst) = 0;
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;
|
||||
|
||||
CV_WRAP virtual void calculateShift(InputArray img0, InputArray img1, Point& shift) = 0;
|
||||
CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;
|
||||
CV_WRAP virtual void computeBitmaps(Mat& img, Mat& tb, Mat& eb) = 0;
|
||||
|
||||
CV_WRAP virtual int getMaxBits() const = 0;
|
||||
CV_WRAP virtual void setMaxBits(int max_bits) = 0;
|
||||
|
||||
CV_WRAP virtual int getExcludeRange() const = 0;
|
||||
CV_WRAP virtual void setExcludeRange(int exclude_range) = 0;
|
||||
|
||||
CV_WRAP virtual bool getCut() const = 0;
|
||||
CV_WRAP virtual void setCut(bool value) = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4);
|
||||
CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);
|
||||
|
||||
class CV_EXPORTS_W ExposureCalibrate : public Algorithm
|
||||
{
|
||||
@ -206,9 +212,12 @@ public:
|
||||
|
||||
CV_WRAP virtual int getSamples() const = 0;
|
||||
CV_WRAP virtual void setSamples(int samples) = 0;
|
||||
|
||||
CV_WRAP virtual bool getRandom() const = 0;
|
||||
CV_WRAP virtual void setRandom(bool random) = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 50, float lambda = 10.0f);
|
||||
CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false);
|
||||
|
||||
// "Dynamic range improvement through multiple exposures", Robertson et al., 1999
|
||||
|
||||
@ -220,9 +229,11 @@ public:
|
||||
|
||||
CV_WRAP virtual float getThreshold() const = 0;
|
||||
CV_WRAP virtual void setThreshold(float threshold) = 0;
|
||||
|
||||
CV_WRAP virtual Mat getRadiance() const = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int samples = 50, float lambda = 10.0f);
|
||||
CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
|
||||
|
||||
class CV_EXPORTS_W ExposureMerge : public Algorithm
|
||||
{
|
||||
@ -275,6 +286,8 @@ public:
|
||||
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times) = 0;
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
|
||||
|
||||
} // cv
|
||||
|
||||
#endif
|
||||
|
@ -50,25 +50,25 @@ namespace cv
|
||||
class AlignMTBImpl : public AlignMTB
|
||||
{
|
||||
public:
|
||||
AlignMTBImpl(int max_bits, int exclude_range) :
|
||||
AlignMTBImpl(int max_bits, int exclude_range, bool cut) :
|
||||
max_bits(max_bits),
|
||||
exclude_range(exclude_range),
|
||||
cut(cut),
|
||||
name("AlignMTB")
|
||||
{
|
||||
}
|
||||
|
||||
void process(InputArrayOfArrays src, OutputArrayOfArrays dst,
|
||||
void process(InputArrayOfArrays src, std::vector<Mat>& dst,
|
||||
const std::vector<float>& times, InputArray response)
|
||||
{
|
||||
process(src, dst);
|
||||
}
|
||||
|
||||
void process(InputArrayOfArrays _src, OutputArray _dst)
|
||||
void process(InputArrayOfArrays _src, std::vector<Mat>& dst)
|
||||
{
|
||||
std::vector<Mat> src, dst;
|
||||
std::vector<Mat> src;
|
||||
_src.getMatVector(src);
|
||||
_dst.getMatVector(dst);
|
||||
|
||||
|
||||
checkImageDimensions(src);
|
||||
dst.resize(src.size());
|
||||
|
||||
@ -76,17 +76,41 @@ public:
|
||||
dst[pivot] = src[pivot];
|
||||
Mat gray_base;
|
||||
cvtColor(src[pivot], gray_base, COLOR_RGB2GRAY);
|
||||
std::vector<Point> shifts;
|
||||
|
||||
for(size_t i = 0; i < src.size(); i++) {
|
||||
if(i == pivot) {
|
||||
shifts.push_back(Point(0, 0));
|
||||
continue;
|
||||
}
|
||||
Mat gray;
|
||||
cvtColor(src[i], gray, COLOR_RGB2GRAY);
|
||||
Point shift;
|
||||
calculateShift(gray_base, gray, shift);
|
||||
shifts.push_back(shift);
|
||||
shiftMat(src[i], dst[i], shift);
|
||||
}
|
||||
if(cut) {
|
||||
Point max(0, 0), min(0, 0);
|
||||
for(size_t i = 0; i < shifts.size(); i++) {
|
||||
if(shifts[i].x > max.x) {
|
||||
max.x = shifts[i].x;
|
||||
}
|
||||
if(shifts[i].y > max.y) {
|
||||
max.y = shifts[i].y;
|
||||
}
|
||||
if(shifts[i].x < min.x) {
|
||||
min.x = shifts[i].x;
|
||||
}
|
||||
if(shifts[i].y < min.y) {
|
||||
min.y = shifts[i].y;
|
||||
}
|
||||
}
|
||||
Point size = dst[0].size();
|
||||
for(size_t i = 0; i < dst.size(); i++) {
|
||||
dst[i] = dst[i](Rect(max, min + size));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void calculateShift(InputArray _img0, InputArray _img1, Point& shift)
|
||||
@ -109,8 +133,8 @@ public:
|
||||
|
||||
shift *= 2;
|
||||
Mat tb1, tb2, eb1, eb2;
|
||||
computeBitmaps(pyr0[level], tb1, eb1, exclude_range);
|
||||
computeBitmaps(pyr1[level], tb2, eb2, exclude_range);
|
||||
computeBitmaps(pyr0[level], tb1, eb1);
|
||||
computeBitmaps(pyr1[level], tb2, eb2);
|
||||
|
||||
int min_err = pyr0[level].total();
|
||||
Point new_shift(shift);
|
||||
@ -140,12 +164,13 @@ public:
|
||||
_dst.create(src.size(), src.type());
|
||||
Mat dst = _dst.getMat();
|
||||
|
||||
dst = Mat::zeros(src.size(), src.type());
|
||||
Mat res = Mat::zeros(src.size(), src.type());
|
||||
int width = src.cols - abs(shift.x);
|
||||
int height = src.rows - abs(shift.y);
|
||||
Rect dst_rect(max(shift.x, 0), max(shift.y, 0), width, height);
|
||||
Rect src_rect(max(-shift.x, 0), max(-shift.y, 0), width, height);
|
||||
src(src_rect).copyTo(dst(dst_rect));
|
||||
src(src_rect).copyTo(res(dst_rect));
|
||||
res.copyTo(dst);
|
||||
}
|
||||
|
||||
int getMaxBits() const { return max_bits; }
|
||||
@ -154,11 +179,15 @@ public:
|
||||
int getExcludeRange() const { return exclude_range; }
|
||||
void setExcludeRange(int val) { exclude_range = val; }
|
||||
|
||||
bool getCut() const { return cut; }
|
||||
void setCut(bool val) { cut = val; }
|
||||
|
||||
void write(FileStorage& fs) const
|
||||
{
|
||||
fs << "name" << name
|
||||
<< "max_bits" << max_bits
|
||||
<< "exclude_range" << exclude_range;
|
||||
<< "exclude_range" << exclude_range
|
||||
<< "cut" << static_cast<int>(cut);
|
||||
}
|
||||
|
||||
void read(const FileNode& fn)
|
||||
@ -167,11 +196,21 @@ public:
|
||||
CV_Assert(n.isString() && String(n) == name);
|
||||
max_bits = fn["max_bits"];
|
||||
exclude_range = fn["exclude_range"];
|
||||
int cut_val = fn["cut"];
|
||||
cut = static_cast<bool>(cut_val);
|
||||
}
|
||||
|
||||
void computeBitmaps(Mat& img, Mat& tb, Mat& eb)
|
||||
{
|
||||
int median = getMedian(img);
|
||||
compare(img, median, tb, CMP_GT);
|
||||
compare(abs(img - median), exclude_range, eb, CMP_GT);
|
||||
}
|
||||
|
||||
protected:
|
||||
String name;
|
||||
int max_bits, exclude_range;
|
||||
bool cut;
|
||||
|
||||
void downsample(Mat& src, Mat& dst)
|
||||
{
|
||||
@ -204,31 +243,25 @@ protected:
|
||||
{
|
||||
int channels = 0;
|
||||
Mat hist;
|
||||
int hist_size = 256;
|
||||
float range[] = {0, 256} ;
|
||||
int hist_size = LDR_SIZE;
|
||||
float range[] = {0, LDR_SIZE} ;
|
||||
const float* ranges[] = {range};
|
||||
calcHist(&img, 1, &channels, Mat(), hist, 1, &hist_size, ranges);
|
||||
float *ptr = hist.ptr<float>();
|
||||
int median = 0, sum = 0;
|
||||
int thresh = img.total() / 2;
|
||||
while(sum < thresh && median < 256) {
|
||||
while(sum < thresh && median < LDR_SIZE) {
|
||||
sum += static_cast<int>(ptr[median]);
|
||||
median++;
|
||||
}
|
||||
return median;
|
||||
}
|
||||
|
||||
void computeBitmaps(Mat& img, Mat& tb, Mat& eb, int exclude_range)
|
||||
{
|
||||
int median = getMedian(img);
|
||||
compare(img, median, tb, CMP_GT);
|
||||
compare(abs(img - median), exclude_range, eb, CMP_GT);
|
||||
}
|
||||
};
|
||||
|
||||
CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits, int exclude_range)
|
||||
Ptr<AlignMTB> createAlignMTB(int max_bits, int exclude_range, bool cut)
|
||||
{
|
||||
return new AlignMTBImpl(max_bits, exclude_range);
|
||||
return new AlignMTBImpl(max_bits, exclude_range, cut);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
//#include "opencv2/highgui.hpp"
|
||||
#include "hdr_common.hpp"
|
||||
|
||||
namespace cv
|
||||
@ -51,11 +52,12 @@ namespace cv
|
||||
class CalibrateDebevecImpl : public CalibrateDebevec
|
||||
{
|
||||
public:
|
||||
CalibrateDebevecImpl(int samples, float lambda) :
|
||||
CalibrateDebevecImpl(int samples, float lambda, bool random) :
|
||||
samples(samples),
|
||||
lambda(lambda),
|
||||
name("CalibrateDebevec"),
|
||||
w(tringleWeights())
|
||||
w(tringleWeights()),
|
||||
random(random)
|
||||
{
|
||||
}
|
||||
|
||||
@ -71,28 +73,44 @@ public:
|
||||
int channels = images[0].channels();
|
||||
int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
|
||||
|
||||
dst.create(256, 1, CV_32FCC);
|
||||
dst.create(LDR_SIZE, 1, CV_32FCC);
|
||||
Mat result = dst.getMat();
|
||||
|
||||
std::vector<Point> sample_points;
|
||||
if(random) {
|
||||
for(int i = 0; i < samples; i++) {
|
||||
sample_points.push_back(Point(rand() % images[0].cols, rand() % images[0].rows));
|
||||
}
|
||||
} else {
|
||||
int x_points = sqrt(static_cast<double>(samples) * images[0].cols / images[0].rows);
|
||||
int y_points = samples / x_points;
|
||||
int step_x = images[0].cols / x_points;
|
||||
int step_y = images[0].rows / y_points;
|
||||
|
||||
for(int i = 0, x = step_x / 2; i < x_points; i++, x += step_x) {
|
||||
for(int j = 0, y = step_y; j < y_points; j++, y += step_y) {
|
||||
sample_points.push_back(Point(x, y));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Mat> result_split(channels);
|
||||
for(int channel = 0; channel < channels; channel++) {
|
||||
Mat A = Mat::zeros(samples * images.size() + 257, 256 + samples, CV_32F);
|
||||
Mat A = Mat::zeros(sample_points.size() * images.size() + LDR_SIZE + 1, LDR_SIZE + sample_points.size(), CV_32F);
|
||||
Mat B = Mat::zeros(A.rows, 1, CV_32F);
|
||||
|
||||
int eq = 0;
|
||||
for(int i = 0; i < samples; i++) {
|
||||
|
||||
int pos = 3 * (rand() % images[0].total()) + channel;
|
||||
for(size_t i = 0; i < sample_points.size(); i++) {
|
||||
for(size_t j = 0; j < images.size(); j++) {
|
||||
|
||||
int val = (images[j].ptr() + pos)[0];
|
||||
int val = images[j].ptr()[3*(sample_points[i].y * images[j].cols + sample_points[j].x) + channel];
|
||||
A.at<float>(eq, val) = w.at<float>(val);
|
||||
A.at<float>(eq, 256 + i) = -w.at<float>(val);
|
||||
A.at<float>(eq, LDR_SIZE + i) = -w.at<float>(val);
|
||||
B.at<float>(eq, 0) = w.at<float>(val) * log(times[j]);
|
||||
eq++;
|
||||
}
|
||||
}
|
||||
A.at<float>(eq, 128) = 1;
|
||||
A.at<float>(eq, LDR_SIZE / 2) = 1;
|
||||
eq++;
|
||||
|
||||
for(int i = 0; i < 254; i++) {
|
||||
@ -103,7 +121,7 @@ public:
|
||||
}
|
||||
Mat solution;
|
||||
solve(A, B, solution, DECOMP_SVD);
|
||||
solution.rowRange(0, 256).copyTo(result_split[channel]);
|
||||
solution.rowRange(0, LDR_SIZE).copyTo(result_split[channel]);
|
||||
}
|
||||
merge(result_split, result);
|
||||
exp(result, result);
|
||||
@ -115,11 +133,15 @@ public:
|
||||
float getLambda() const { return lambda; }
|
||||
void setLambda(float val) { lambda = val; }
|
||||
|
||||
bool getRandom() const { return random; }
|
||||
void setRandom(bool val) { random = val; }
|
||||
|
||||
void write(FileStorage& fs) const
|
||||
{
|
||||
fs << "name" << name
|
||||
<< "samples" << samples
|
||||
<< "lambda" << lambda;
|
||||
<< "lambda" << lambda
|
||||
<< "random" << static_cast<int>(random);
|
||||
}
|
||||
|
||||
void read(const FileNode& fn)
|
||||
@ -128,18 +150,21 @@ public:
|
||||
CV_Assert(n.isString() && String(n) == name);
|
||||
samples = fn["samples"];
|
||||
lambda = fn["lambda"];
|
||||
int random_val = fn["random"];
|
||||
random = static_cast<bool>(random_val);
|
||||
}
|
||||
|
||||
protected:
|
||||
String name;
|
||||
int samples;
|
||||
float lambda;
|
||||
bool random;
|
||||
Mat w;
|
||||
};
|
||||
|
||||
Ptr<CalibrateDebevec> createCalibrateDebevec(int samples, float lambda)
|
||||
Ptr<CalibrateDebevec> createCalibrateDebevec(int samples, float lambda, bool random)
|
||||
{
|
||||
return new CalibrateDebevecImpl(samples, lambda);
|
||||
return new CalibrateDebevecImpl(samples, lambda, random);
|
||||
}
|
||||
|
||||
class CalibrateRobertsonImpl : public CalibrateRobertson
|
||||
@ -165,20 +190,14 @@ public:
|
||||
int channels = images[0].channels();
|
||||
int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
|
||||
|
||||
dst.create(256, 1, CV_32FCC);
|
||||
dst.create(LDR_SIZE, 1, CV_32FCC);
|
||||
Mat response = dst.getMat();
|
||||
|
||||
response = Mat::zeros(256, 1, CV_32FCC);
|
||||
for(int i = 0; i < 256; i++) {
|
||||
for(int c = 0; c < channels; c++) {
|
||||
response.at<Vec3f>(i)[c] = i / 128.0;
|
||||
}
|
||||
}
|
||||
response = linearResponse(3) / (LDR_SIZE / 2.0f);
|
||||
|
||||
Mat card = Mat::zeros(256, 1, CV_32FCC);
|
||||
for(int i = 0; i < images.size(); i++) {
|
||||
Mat card = Mat::zeros(LDR_SIZE, 1, CV_32FCC);
|
||||
for(size_t i = 0; i < images.size(); i++) {
|
||||
uchar *ptr = images[i].ptr();
|
||||
for(int pos = 0; pos < images[i].total(); pos++) {
|
||||
for(size_t pos = 0; pos < images[i].total(); pos++) {
|
||||
for(int c = 0; c < channels; c++, ptr++) {
|
||||
card.at<Vec3f>(*ptr)[c] += 1;
|
||||
}
|
||||
@ -186,43 +205,34 @@ public:
|
||||
}
|
||||
card = 1.0 / card;
|
||||
|
||||
Ptr<MergeRobertson> merge = createMergeRobertson();
|
||||
for(int iter = 0; iter < max_iter; iter++) {
|
||||
|
||||
Scalar channel_err(0, 0, 0);
|
||||
Mat radiance = Mat::zeros(images[0].size(), CV_32FCC);
|
||||
Mat wsum = Mat::zeros(images[0].size(), CV_32FCC);
|
||||
for(int i = 0; i < images.size(); i++) {
|
||||
Mat im, w;
|
||||
LUT(images[i], weight, w);
|
||||
LUT(images[i], response, im);
|
||||
radiance = Mat::zeros(images[0].size(), CV_32FCC);
|
||||
merge->process(images, radiance, times, response);
|
||||
|
||||
Mat err_mat;
|
||||
pow(im - times[i] * radiance, 2.0f, err_mat);
|
||||
err_mat = w.mul(err_mat);
|
||||
channel_err += sum(err_mat);
|
||||
|
||||
radiance += times[i] * w.mul(im);
|
||||
wsum += pow(times[i], 2) * w;
|
||||
}
|
||||
float err = (channel_err[0] + channel_err[1] + channel_err[2]) / (channels * radiance.total());
|
||||
radiance = radiance.mul(1 / wsum);
|
||||
|
||||
float* rad_ptr = radiance.ptr<float>();
|
||||
response = Mat::zeros(256, 1, CV_32FC3);
|
||||
for(int i = 0; i < images.size(); i++) {
|
||||
Mat new_response = Mat::zeros(LDR_SIZE, 1, CV_32FC3);
|
||||
for(size_t i = 0; i < images.size(); i++) {
|
||||
uchar *ptr = images[i].ptr();
|
||||
for(int pos = 0; pos < images[i].total(); pos++) {
|
||||
float* rad_ptr = radiance.ptr<float>();
|
||||
for(size_t pos = 0; pos < images[i].total(); pos++) {
|
||||
for(int c = 0; c < channels; c++, ptr++, rad_ptr++) {
|
||||
response.at<Vec3f>(*ptr)[c] += times[i] * *rad_ptr;
|
||||
new_response.at<Vec3f>(*ptr)[c] += times[i] * *rad_ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
response = response.mul(card);
|
||||
new_response = new_response.mul(card);
|
||||
for(int c = 0; c < 3; c++) {
|
||||
for(int i = 0; i < 256; i++) {
|
||||
response.at<Vec3f>(i)[c] /= response.at<Vec3f>(128)[c];
|
||||
float middle = new_response.at<Vec3f>(LDR_SIZE / 2)[c];
|
||||
for(int i = 0; i < LDR_SIZE; i++) {
|
||||
new_response.at<Vec3f>(i)[c] /= middle;
|
||||
}
|
||||
}
|
||||
float diff = sum(sum(abs(new_response - response)))[0] / channels;
|
||||
new_response.copyTo(response);
|
||||
if(diff < threshold) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -232,6 +242,8 @@ public:
|
||||
float getThreshold() const { return threshold; }
|
||||
void setThreshold(float val) { threshold = val; }
|
||||
|
||||
Mat getRadiance() const { return radiance; }
|
||||
|
||||
void write(FileStorage& fs) const
|
||||
{
|
||||
fs << "name" << name
|
||||
@ -251,7 +263,7 @@ protected:
|
||||
String name;
|
||||
int max_iter;
|
||||
float threshold;
|
||||
Mat weight;
|
||||
Mat weight, radiance;
|
||||
};
|
||||
|
||||
Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter, float threshold)
|
||||
|
@ -61,21 +61,22 @@ void checkImageDimensions(const std::vector<Mat>& images)
|
||||
|
||||
Mat tringleWeights()
|
||||
{
|
||||
Mat w(256, 1, CV_32F);
|
||||
for(int i = 0; i < 256; i++) {
|
||||
w.at<float>(i) = i < 128 ? i + 1.0f : 256.0f - i;
|
||||
Mat w(LDR_SIZE, 1, CV_32F);
|
||||
int half = LDR_SIZE / 2;
|
||||
for(int i = 0; i < LDR_SIZE; i++) {
|
||||
w.at<float>(i) = i < half ? i + 1.0f : LDR_SIZE - i;
|
||||
}
|
||||
return w;
|
||||
}
|
||||
|
||||
Mat RobertsonWeights()
|
||||
{
|
||||
Mat weight(256, 1, CV_32FC3);
|
||||
for(int i = 0; i < 256; i++) {
|
||||
float value = exp(-4.0f * pow(i - 127.5f, 2.0f) / pow(127.5f, 2.0f));
|
||||
for(int c = 0; c < 3; c++) {
|
||||
weight.at<Vec3f>(i)[c] = value;
|
||||
}
|
||||
Mat weight(LDR_SIZE, 1, CV_32FC3);
|
||||
float q = (LDR_SIZE - 1) / 4.0f;
|
||||
for(int i = 0; i < LDR_SIZE; i++) {
|
||||
float value = i / q - 2.0f;
|
||||
value = exp(-value * value);
|
||||
weight.at<Vec3f>(i) = Vec3f::all(value);
|
||||
}
|
||||
return weight;
|
||||
}
|
||||
@ -92,4 +93,13 @@ void mapLuminance(Mat src, Mat dst, Mat lum, Mat new_lum, float saturation)
|
||||
merge(channels, dst);
|
||||
}
|
||||
|
||||
Mat linearResponse(int channels)
|
||||
{
|
||||
Mat response = Mat(LDR_SIZE, 1, CV_MAKETYPE(CV_32F, channels));
|
||||
for(int i = 0; i < LDR_SIZE; i++) {
|
||||
response.at<Vec3f>(i) = Vec3f::all(i);
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -56,6 +56,7 @@ void mapLuminance(Mat src, Mat dst, Mat lum, Mat new_lum, float saturation);
|
||||
|
||||
Mat RobertsonWeights();
|
||||
|
||||
Mat linearResponse(int channels);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "hdr_common.hpp"
|
||||
#include <iostream>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
@ -77,9 +76,10 @@ public:
|
||||
|
||||
if(response.empty()) {
|
||||
response = linearResponse(channels);
|
||||
response.at<Vec3f>(0) = response.at<Vec3f>(1);
|
||||
}
|
||||
log(response, response);
|
||||
CV_Assert(response.rows == 256 && response.cols == 1 &&
|
||||
CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
|
||||
response.channels() == channels);
|
||||
|
||||
Mat exp_values(times);
|
||||
@ -125,23 +125,6 @@ public:
|
||||
protected:
|
||||
String name;
|
||||
Mat weights;
|
||||
|
||||
Mat linearResponse(int channels)
|
||||
{
|
||||
Mat single_response = Mat(256, 1, CV_32F);
|
||||
for(int i = 1; i < 256; i++) {
|
||||
single_response.at<float>(i) = static_cast<float>(i);
|
||||
}
|
||||
single_response.at<float>(0) = static_cast<float>(1);
|
||||
|
||||
std::vector<Mat> splitted(channels);
|
||||
for(int c = 0; c < channels; c++) {
|
||||
splitted[c] = single_response;
|
||||
}
|
||||
Mat result;
|
||||
merge(splitted, result);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
Ptr<MergeDebevec> createMergeDebevec()
|
||||
@ -329,9 +312,9 @@ public:
|
||||
|
||||
Mat response = input_response.getMat();
|
||||
if(response.empty()) {
|
||||
response = linearResponse(channels);
|
||||
response = linearResponse(channels) / (LDR_SIZE / 2.0f);
|
||||
}
|
||||
CV_Assert(response.rows == 256 && response.cols == 1 &&
|
||||
CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
|
||||
response.channels() == channels);
|
||||
|
||||
result = Mat::zeros(images[0].size(), CV_32FCC);
|
||||
@ -355,17 +338,6 @@ public:
|
||||
protected:
|
||||
String name;
|
||||
Mat weight;
|
||||
|
||||
Mat linearResponse(int channels)
|
||||
{
|
||||
Mat response = Mat::zeros(256, 1, CV_32FC3);
|
||||
for(int i = 0; i < 256; i++) {
|
||||
for(int c = 0; c < 3; c++) {
|
||||
response.at<Vec3f>(i)[c] = static_cast<float>(i) / 128.0f;
|
||||
}
|
||||
}
|
||||
return response;
|
||||
}
|
||||
};
|
||||
|
||||
Ptr<MergeRobertson> createMergeRobertson()
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
@ -185,9 +185,33 @@ TEST(Photo_MergeDebevec, regression)
|
||||
Ptr<MergeDebevec> merge = createMergeDebevec();
|
||||
|
||||
Mat result, expected;
|
||||
loadImage(test_path + "merge/debevec.exr", expected);
|
||||
loadImage(test_path + "merge/debevec.hdr", expected);
|
||||
merge->process(images, result, times, response);
|
||||
imwrite("test.exr", result);
|
||||
|
||||
Ptr<Tonemap> map = createTonemap();
|
||||
map->process(result, result);
|
||||
map->process(expected, expected);
|
||||
|
||||
checkEqual(expected, result, 1e-2f);
|
||||
}
|
||||
|
||||
TEST(Photo_MergeRobertson, regression)
|
||||
{
|
||||
string test_path = string(cvtest::TS::ptr()->get_data_path()) + "hdr/";
|
||||
|
||||
vector<Mat> images;
|
||||
vector<float> times;
|
||||
loadExposureSeq(test_path + "exposures/", images, times);
|
||||
|
||||
Ptr<MergeRobertson> merge = createMergeRobertson();
|
||||
|
||||
Mat result, expected;
|
||||
loadImage(test_path + "merge/robertson.hdr", expected);
|
||||
merge->process(images, result, times);
|
||||
Ptr<Tonemap> map = createTonemap();
|
||||
map->process(result, result);
|
||||
map->process(expected, expected);
|
||||
|
||||
checkEqual(expected, result, 1e-2f);
|
||||
}
|
||||
|
||||
@ -201,7 +225,26 @@ TEST(Photo_CalibrateDebevec, regression)
|
||||
loadExposureSeq(test_path + "exposures/", images, times);
|
||||
loadResponseCSV(test_path + "calibrate/debevec.csv", expected);
|
||||
Ptr<CalibrateDebevec> calibrate = createCalibrateDebevec();
|
||||
srand(1);
|
||||
|
||||
calibrate->process(images, response, times);
|
||||
Mat diff = abs(response - expected);
|
||||
diff = diff.mul(1.0f / response);
|
||||
double max;
|
||||
minMaxLoc(diff, NULL, &max);
|
||||
ASSERT_FALSE(max > 0.1);
|
||||
}
|
||||
|
||||
TEST(Photo_CalibrateRobertson, regression)
|
||||
{
|
||||
string test_path = string(cvtest::TS::ptr()->get_data_path()) + "hdr/";
|
||||
|
||||
vector<Mat> images;
|
||||
vector<float> times;
|
||||
Mat response, expected;
|
||||
loadExposureSeq(test_path + "exposures/", images, times);
|
||||
loadResponseCSV(test_path + "calibrate/robertson.csv", expected);
|
||||
|
||||
Ptr<CalibrateRobertson> calibrate = createCalibrateRobertson();
|
||||
calibrate->process(images, response, times);
|
||||
checkEqual(expected, response, 1e-3f);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user