Class naming update

Documentation improvement
Bug in output format for JPG set fixed
This commit is contained in:
Grigory Serebryakov 2014-08-26 08:55:59 +04:00
parent f81b3101e8
commit 57cf3d1766
6 changed files with 112 additions and 105 deletions

View File

@ -225,7 +225,7 @@ int main( int argc, char* argv[] )
"Annotations are in a separate directory\n", "Annotations are in a separate directory\n",
(( pngoutput ) ? "JPG" : "PNG") ); (( pngoutput ) ? "JPG" : "PNG") );
PngTrainingSetGenerator creator( infoname ); PngDatasetGenerator creator( infoname );
creator.create( imagename, bgcolor, bgthreshold, bgfilename, num, creator.create( imagename, bgcolor, bgthreshold, bgfilename, num,
invert, maxintensitydev, maxxangle, maxyangle, maxzangle, invert, maxintensitydev, maxxangle, maxyangle, maxzangle,
showsamples, width, height ); showsamples, width, height );
@ -238,7 +238,7 @@ int main( int argc, char* argv[] )
"Output format: %s\n", "Output format: %s\n",
(( pngoutput ) ? "JPG" : "PNG") ); (( pngoutput ) ? "JPG" : "PNG") );
TestSamplesGenerator creator( infoname ); JpgDatasetGrenerator creator( infoname );
creator.create( imagename, bgcolor, bgthreshold, bgfilename, num, creator.create( imagename, bgcolor, bgthreshold, bgfilename, num,
invert, maxintensitydev, maxxangle, maxyangle, maxzangle, invert, maxintensitydev, maxxangle, maxyangle, maxzangle,
showsamples, width, height ); showsamples, width, height );

View File

@ -2942,13 +2942,13 @@ void cvCreateTrainingSamples( const char* filename,
} }
SamplesGenerator::SamplesGenerator( IOutput* _writer ) DatasetGenerator::DatasetGenerator( IOutput* _writer )
:writer(_writer) :writer(_writer)
{ {
} }
void SamplesGenerator::showSamples(bool* show, CvMat *img) const void DatasetGenerator::showSamples(bool* show, CvMat *img) const
{ {
if( *show ) if( *show )
{ {
@ -2960,7 +2960,7 @@ void SamplesGenerator::showSamples(bool* show, CvMat *img) const
} }
} }
void SamplesGenerator::create(const char* imgfilename, int bgcolor, int bgthreshold, void DatasetGenerator::create(const char* imgfilename, int bgcolor, int bgthreshold,
const char* bgfilename, int count, const char* bgfilename, int count,
int invert, int maxintensitydev, int invert, int maxintensitydev,
double maxxangle, double maxyangle, double maxzangle, double maxxangle, double maxyangle, double maxzangle,
@ -3023,18 +3023,18 @@ void SamplesGenerator::create(const char* imgfilename, int bgcolor, int bgthresh
} }
} }
SamplesGenerator::~SamplesGenerator() DatasetGenerator::~DatasetGenerator()
{ {
delete writer; delete writer;
} }
TestSamplesGenerator::TestSamplesGenerator(const char* filename) JpgDatasetGrenerator::JpgDatasetGrenerator(const char* filename)
:SamplesGenerator(IOutput::createOutput(filename,IOutput::JPG_TEST_SET)) :DatasetGenerator(IOutput::createOutput(filename,IOutput::JPG_TEST_SET))
{ {
} }
CvSize TestSamplesGenerator::scaleObjectSize(const CvSize& bgImgSize, CvSize JpgDatasetGrenerator::scaleObjectSize(const CvSize& bgImgSize,
const CvSize& , const CvSize& ,
const CvSize& sampleSize) const const CvSize& sampleSize) const
{ {
@ -3058,7 +3058,7 @@ CvSize TestSamplesGenerator::scaleObjectSize(const CvSize& bgImgSize,
return cvSize( width, height ); return cvSize( width, height );
} }
CvRect SamplesGenerator::getObjectPosition(const CvSize& bgImgSize, CvRect DatasetGenerator::getObjectPosition(const CvSize& bgImgSize,
const CvSize& imgSize, const CvSize& imgSize,
const CvSize& sampleSize) const const CvSize& sampleSize) const
{ {
@ -3073,12 +3073,12 @@ CvRect SamplesGenerator::getObjectPosition(const CvSize& bgImgSize,
} }
PngTrainingSetGenerator::PngTrainingSetGenerator(const char* filename) PngDatasetGenerator::PngDatasetGenerator(const char* filename)
:SamplesGenerator(IOutput::createOutput(filename,IOutput::PNG_TRAINING_SET)) :DatasetGenerator(IOutput::createOutput(filename,IOutput::PNG_TRAINING_SET))
{ {
} }
CvSize PngTrainingSetGenerator::scaleObjectSize( const CvSize& bgImgSize, CvSize PngDatasetGenerator::scaleObjectSize( const CvSize& bgImgSize,
const CvSize& imgSize, const CvSize& imgSize,
const CvSize& ) const const CvSize& ) const
{ {

View File

@ -202,17 +202,17 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
int maxtreesplits, int minpos, bool bg_vecfile = false ); int maxtreesplits, int minpos, bool bg_vecfile = false );
class SamplesGenerator class DatasetGenerator
{ {
public: public:
SamplesGenerator( IOutput* _writer ); DatasetGenerator( IOutput* _writer );
void create( const char* imgfilename, int bgcolor, int bgthreshold, void create( const char* imgfilename, int bgcolor, int bgthreshold,
const char* bgfilename, int count, const char* bgfilename, int count,
int invert, int maxintensitydev, int invert, int maxintensitydev,
double maxxangle, double maxyangle, double maxzangle, double maxxangle, double maxyangle, double maxzangle,
bool showsamples, bool showsamples,
int winwidth, int winheight); int winwidth, int winheight);
virtual ~SamplesGenerator(); virtual ~DatasetGenerator();
private: private:
virtual void showSamples( bool* showSamples, CvMat* img ) const; virtual void showSamples( bool* showSamples, CvMat* img ) const;
@ -226,20 +226,21 @@ private:
IOutput* writer; IOutput* writer;
}; };
class TestSamplesGenerator: public SamplesGenerator /* Provides the functionality of test set generating */
class JpgDatasetGrenerator: public DatasetGenerator
{ {
public: public:
TestSamplesGenerator(const char* filename); JpgDatasetGrenerator(const char* filename);
private: private:
CvSize scaleObjectSize(const CvSize& bgImgSize, CvSize scaleObjectSize(const CvSize& bgImgSize,
const CvSize& , const CvSize& ,
const CvSize& sampleSize) const; const CvSize& sampleSize) const;
}; };
class PngTrainingSetGenerator: public SamplesGenerator class PngDatasetGenerator: public DatasetGenerator
{ {
public: public:
PngTrainingSetGenerator(const char *filename); PngDatasetGenerator(const char *filename);
private: private:
CvSize scaleObjectSize(const CvSize& bgImgSize, CvSize scaleObjectSize(const CvSize& bgImgSize,
const CvSize& imgSize , const CvSize& imgSize ,

View File

@ -35,10 +35,10 @@ IOutput* IOutput::createOutput(const char *filename,
IOutput* output = 0; IOutput* output = 0;
switch (type) { switch (type) {
case IOutput::PNG_TRAINING_SET: case IOutput::PNG_TRAINING_SET:
output = new PngTrainingSetOutput(); output = new PngDatasetOutput();
break; break;
case IOutput::JPG_TEST_SET: case IOutput::JPG_TEST_SET:
output = new TestSamplesOutput(); output = new JpgDatasetOutput();
break; break;
default: default:
#if CV_VERBOSE #if CV_VERBOSE
@ -53,7 +53,7 @@ IOutput* IOutput::createOutput(const char *filename,
return 0; return 0;
} }
bool PngTrainingSetOutput::init( const char* annotationsListFileName ) bool PngDatasetOutput::init( const char* annotationsListFileName )
{ {
IOutput::init( annotationsListFileName ); IOutput::init( annotationsListFileName );
@ -112,7 +112,7 @@ bool PngTrainingSetOutput::init( const char* annotationsListFileName )
return true; return true;
} }
bool PngTrainingSetOutput::write( const CvMat& img, bool PngDatasetOutput::write( const CvMat& img,
const CvRect& boundingBox ) const CvRect& boundingBox )
{ {
CvRect bbox = scaleBoundingBox(cvGetSize(&img), boundingBox); CvRect bbox = scaleBoundingBox(cvGetSize(&img), boundingBox);
@ -153,7 +153,7 @@ bool PngTrainingSetOutput::write( const CvMat& img,
return true; return true;
} }
void PngTrainingSetOutput::writeImage(const CvMat &img) const void PngDatasetOutput::writeImage(const CvMat &img) const
{ {
CvSize origsize = cvGetSize(&img); CvSize origsize = cvGetSize(&img);
@ -173,7 +173,7 @@ void PngTrainingSetOutput::writeImage(const CvMat &img) const
return; return;
} }
CvRect PngTrainingSetOutput::scaleBoundingBox(const CvSize& imgSize, const CvRect& bbox) CvRect PngDatasetOutput::scaleBoundingBox(const CvSize& imgSize, const CvRect& bbox)
{ {
double scale = MAX( (float) destImgWidth / imgSize.width, double scale = MAX( (float) destImgWidth / imgSize.width,
(float) destImgHeight / imgSize.height ); (float) destImgHeight / imgSize.height );
@ -231,7 +231,7 @@ bool IOutput::init(const char *filename)
return true; return true;
} }
bool TestSamplesOutput::write( const CvMat& img, bool JpgDatasetOutput::write( const CvMat& img,
const CvRect& boundingBox ) const CvRect& boundingBox )
{ {
sprintf( imgFileName, "%04d_%04d_%04d_%04d_%04d.jpg", sprintf( imgFileName, "%04d_%04d_%04d_%04d_%04d.jpg",
@ -242,7 +242,7 @@ bool TestSamplesOutput::write( const CvMat& img,
boundingBox.height ); boundingBox.height );
fprintf( annotationsList, "%s %d %d %d %d %d\n", fprintf( annotationsList, "%s %d %d %d %d %d\n",
imgFullPath, imgFileName,
1, 1,
boundingBox.x, boundingBox.x,
boundingBox.y, boundingBox.y,

View File

@ -3,16 +3,16 @@
#include "ioutput.h" #include "ioutput.h"
class PngTrainingSetOutput: public IOutput class PngDatasetOutput: public IOutput
{ {
friend IOutput* IOutput::createOutput(const char *filename, OutputType type); friend IOutput* IOutput::createOutput(const char *filename, OutputType type);
public: public:
virtual bool write( const CvMat& img, virtual bool write( const CvMat& img,
const CvRect& boundingBox); const CvRect& boundingBox);
virtual ~PngTrainingSetOutput(){} virtual ~PngDatasetOutput(){}
private: private:
PngTrainingSetOutput() PngDatasetOutput()
: extension("png") : extension("png")
, destImgWidth(640) , destImgWidth(640)
, destImgHeight(480) , destImgHeight(480)
@ -36,14 +36,14 @@ private:
int destImgHeight ; int destImgHeight ;
}; };
class TestSamplesOutput: public IOutput class JpgDatasetOutput: public IOutput
{ {
friend IOutput* IOutput::createOutput(const char *filename, OutputType type); friend IOutput* IOutput::createOutput(const char *filename, OutputType type);
public: public:
virtual bool write( const CvMat& img, virtual bool write( const CvMat& img,
const CvRect& boundingBox ); const CvRect& boundingBox );
virtual ~TestSamplesOutput(){} virtual ~JpgDatasetOutput(){}
private: private:
TestSamplesOutput(){} JpgDatasetOutput(){}
}; };
#endif // CVSAMPLESOUTPUT_H #endif // CVSAMPLESOUTPUT_H

View File

@ -6,7 +6,7 @@ Cascade Classifier Training
Introduction Introduction
============ ============
The work with a cascade classifier inlcudes two major stages: training and detection. The work with a cascade classifier includes two major stages: training and detection.
Detection stage is described in a documentation of ``objdetect`` module of general OpenCV documentation. Documentation gives some basic information about cascade classifier. Detection stage is described in a documentation of ``objdetect`` module of general OpenCV documentation. Documentation gives some basic information about cascade classifier.
Current guide is describing how to train a cascade classifier: preparation of a training data and running the training application. Current guide is describing how to train a cascade classifier: preparation of a training data and running the training application.
@ -14,26 +14,30 @@ Important notes
--------------- ---------------
There are two applications in OpenCV to train cascade classifier: ``opencv_haartraining`` and ``opencv_traincascade``. ``opencv_traincascade`` is a newer version, written in C++ in accordance to OpenCV 2.x API. But the main difference between this two applications is that ``opencv_traincascade`` supports both Haar [Viola2001]_ and LBP [Liao2007]_ (Local Binary Patterns) features. LBP features are integer in contrast to Haar features, so both training and detection with LBP are several times faster then with Haar features. Regarding the LBP and Haar detection quality, it depends on training: the quality of training dataset first of all and training parameters too. It's possible to train a LBP-based classifier that will provide almost the same quality as Haar-based one. There are two applications in OpenCV to train cascade classifier: ``opencv_haartraining`` and ``opencv_traincascade``. ``opencv_traincascade`` is a newer version, written in C++ in accordance to OpenCV 2.x API. But the main difference between this two applications is that ``opencv_traincascade`` supports both Haar [Viola2001]_ and LBP [Liao2007]_ (Local Binary Patterns) features. LBP features are integer in contrast to Haar features, so both training and detection with LBP are several times faster then with Haar features. Regarding the LBP and Haar detection quality, it depends on training: the quality of training dataset first of all and training parameters too. It's possible to train a LBP-based classifier that will provide almost the same quality as Haar-based one.
``opencv_traincascade`` and ``opencv_haartraining`` store the trained classifier in different file formats. Note, the newer cascade detection interface (see ``CascadeClassifier`` class in ``objdetect`` module) support both formats. ``opencv_traincascade`` can save (export) a trained cascade in the older format. But ``opencv_traincascade`` and ``opencv_haartraining`` can not load (import) a classifier in another format for the futher training after interruption. ``opencv_traincascade`` and ``opencv_haartraining`` store the trained classifier in different file formats. Note, the newer cascade detection interface (see ``CascadeClassifier`` class in ``objdetect`` module) support both formats. ``opencv_traincascade`` can save (export) a trained cascade in the older format. But ``opencv_traincascade`` and ``opencv_haartraining`` can not load (import) a classifier in another format for the further training after interruption.
Note that ``opencv_traincascade`` application can use TBB for multi-threading. To use it in multicore mode OpenCV must be built with TBB. Note that ``opencv_traincascade`` application can use TBB for multi-threading. To use it in multicore mode OpenCV must be built with TBB.
Also there are some auxilary utilities related to the training. Also there are some auxiliary utilities related to the training.
* ``opencv_createsamples`` is used to prepare a training dataset of positive and test samples. ``opencv_createsamples`` produces dataset of positive samples in a format that is supported by both ``opencv_haartraining`` and ``opencv_traincascade`` applications. The output is a file with \*.vec extension, it is a binary format which contains images. * ``opencv_createsamples`` is used to prepare a training dataset of positive and test samples. ``opencv_createsamples`` produces dataset of positive samples in a format that is supported by both ``opencv_haartraining`` and ``opencv_traincascade`` applications. The output is a file with \*.vec extension, it is a binary format which contains images.
* ``opencv_performance`` may be used to evaluate the quality of classifiers, but for trained by ``opencv_haartraining`` only. It takes a collection of marked up images, runs the classifier and reports the performance, i.e. number of found objects, number of missed objects, number of false alarms and other information. * ``opencv_performance`` may be used to evaluate the quality of classifiers, but for trained by ``opencv_haartraining`` only. It takes a collection of marked up images, runs the classifier and reports the performance, i.e. number of found objects, number of missed objects, number of false alarms and other information.
Since ``opencv_haartraining`` is an obsolete application, only ``opencv_traincascade`` will be described futher. ``opencv_createsamples`` utility is needed to prepare a training data for ``opencv_traincascade``, so it will be described too. Since ``opencv_haartraining`` is an obsolete application, only ``opencv_traincascade`` will be described further. ``opencv_createsamples`` utility is needed to prepare a training data for ``opencv_traincascade``, so it will be described too.
``opencv_createsamples`` utility
================================
An ``opencv_createsamples`` utility provides functionality for dataset generating, writing and viewing. The term *dataset* is used here for both training set and test set.
Training data preparation Training data preparation
========================= =========================
For training we need a set of samples. There are two types of samples: negative and positive. Negative samples correspond to non-object images. Positive samples correspond to images with detected objects. Set of negative samples must be prepared manually, whereas set of positive samples is created using ``opencv_createsamples`` utility. For training we need a set of samples. There are two types of samples: negative and positive. Negative samples correspond to non-object images. Positive samples correspond to images with detected objects. Set of negative samples must be prepared manually, whereas set of positive samples is created using ``opencv_createsamples`` utility.
Negative Samples Negative Samples
---------------- ----------------
Negative samples are taken from arbitrary images. These images must not contain detected objects. Negative samples are enumerated in a special file. It is a text file in which each line contains an image filename (relative to the directory of the description file) of negative sample image. This file must be created manually. Note that negative samples and sample images are also called background samples or background samples images, and are used interchangeably in this document. Described images may be of different sizes. But each image should be (but not nessesarily) larger then a training window size, because these images are used to subsample negative image to the training size. Negative samples are taken from arbitrary images. These images must not contain detected objects. Negative samples are enumerated in a special file. It is a text file in which each line contains an image filename (relative to the directory of the description file) of negative sample image. This file must be created manually. Note that negative samples and sample images are also called background samples or background samples images, and are used interchangeably in this document. Described images may be of different sizes. But each image should be (but not necessarily) larger then a training window size, because these images are used to subsample negative image to the training size.
An example of description file: An example of description file:
@ -57,7 +61,7 @@ Positive Samples
---------------- ----------------
Positive samples are created by ``opencv_createsamples`` utility. They may be created from a single image with object or from a collection of previously marked up images. Positive samples are created by ``opencv_createsamples`` utility. They may be created from a single image with object or from a collection of previously marked up images.
Please note that you need a large dataset of positive samples before you give it to the mentioned utility, because it only applies perspective transformation. For example you may need only one positive sample for absolutely rigid object like an OpenCV logo, but you definetely need hundreds and even thousands of positive samples for faces. In the case of faces you should consider all the race and age groups, emotions and perhaps beard styles. Please note that you need a large dataset of positive samples before you give it to the mentioned utility, because it only applies perspective transformation. For example you may need only one positive sample for absolutely rigid object like an OpenCV logo, but you definitely need hundreds and even thousands of positive samples for faces. In the case of faces you should consider all the race and age groups, emotions and perhaps beard styles.
So, a single object image may contain a company logo. Then a large set of positive samples is created from the given object image by random rotating, changing the logo intensity as well as placing the logo on arbitrary background. The amount and range of randomness can be controlled by command line arguments of ``opencv_createsamples`` utility. So, a single object image may contain a company logo. Then a large set of positive samples is created from the given object image by random rotating, changing the logo intensity as well as placing the logo on arbitrary background. The amount and range of randomness can be controlled by command line arguments of ``opencv_createsamples`` utility.
@ -123,10 +127,11 @@ Command line arguments:
The ``opencv_createsamples`` utility may work in a number of modes, namely: The ``opencv_createsamples`` utility may work in a number of modes, namely:
* Creating training set from a single image and a collection of backgrounds with a single ``vec`` file as an output; * Creating training set from a single image and a collection of backgrounds:
* with a single ``vec`` file as an output;
* with a collection of JPG images and a file with annotations list as an output;
* with a collection of PNG images and associated files with annotations as an output;
* Converting the marked-up collection of samples into a ``vec`` format; * Converting the marked-up collection of samples into a ``vec`` format;
* Creating training set from a single image, as specified above, but with a collection of PNG images and associated annotation files as a result;
* Creating test set that consists of JPG samples collection and a signle file with annotations;
* Showing the content of the ``vec`` file. * Showing the content of the ``vec`` file.
Creating training set from a single image and a collection of backgrounds with a single ``vec`` file as an output Creating training set from a single image and a collection of backgrounds with a single ``vec`` file as an output
@ -135,6 +140,68 @@ Creating training set from a single image and a collection of backgrounds with a
The following procedure is used to create a sample object instance: The following procedure is used to create a sample object instance:
The source image is rotated randomly around all three axes. The chosen angle is limited my ``-max?angle``. Then pixels having the intensity from [``bg_color-bg_color_threshold``; ``bg_color+bg_color_threshold``] range are interpreted as transparent. White noise is added to the intensities of the foreground. If the ``-inv`` key is specified then foreground pixel intensities are inverted. If ``-randinv`` key is specified then algorithm randomly selects whether inversion should be applied to this sample. Finally, the obtained image is placed onto an arbitrary background from the background description file, resized to the desired size specified by ``-w`` and ``-h`` and stored to the vec-file, specified by the ``-vec`` command line option. The source image is rotated randomly around all three axes. The chosen angle is limited my ``-max?angle``. Then pixels having the intensity from [``bg_color-bg_color_threshold``; ``bg_color+bg_color_threshold``] range are interpreted as transparent. White noise is added to the intensities of the foreground. If the ``-inv`` key is specified then foreground pixel intensities are inverted. If ``-randinv`` key is specified then algorithm randomly selects whether inversion should be applied to this sample. Finally, the obtained image is placed onto an arbitrary background from the background description file, resized to the desired size specified by ``-w`` and ``-h`` and stored to the vec-file, specified by the ``-vec`` command line option.
Creating training set as a collection of JPG images
---------------------------------------------------
To obtain such behaviour the ``-img``, ``-bg`` and ``-info`` keys should be specified. The file name specified with ``-info`` key should include at least one level of directory hierarchy, that directory
will be used as the top-level directory for the training set.
For example, with the ``opencv_createsamples`` called as following:
.. code-block:: text
opencv_createsamples -img /home/user/logo.png -bg /home/user/bg.txt -info /home/user/annotations.lst -pngoutput -maxxangle 0.1 -maxyangle 0.1 -maxzangle 0.1
The output will have the following structure:
.. code-block:: text
/home/user/
annotations/
0001_0107_0099_0195_0139.txt
0002_0107_0115_0195_0139.txt
...
neg/
<background files here>
pos/
0001_0107_0099_0195_0139.png
0002_0107_0115_0195_0139.png
...
annotations.lst
With ``*.txt`` files in ``annotations`` directory containing information about object bounding box on the sample in a next format:
.. code-block:: text
Image filename : "createsamples/pos/0002_0107_0115_0195_0139.png"
Bounding box for object 1 "PASperson" (Xmin, Ymin) - (Xmax, Ymax) : (107, 115) - (302, 254)
And ``annotations.lst`` file containing the list of all annotations file:
.. code-block:: text
createsamples/annotations/0001_0109_0209_0195_0139.txt
createsamples/annotations/0002_0241_0245_0139_0100.txt
Creating test set as a collection of JPG images
-----------------------------------------------
This variant of ``opencv_createsamples`` usage is very similar to the previous one, but generates the output in a different format;
Directory structure:
.. code-block:: text
info.dat
img1.jpg
img2.jpg
File info.dat:
.. code-block:: text
img1.jpg 1 140 100 45 45
img2.jpg 2 100 200 50 50 50 30 25 25
Converting the marked-up collection of samples into a ``vec`` format Converting the marked-up collection of samples into a ``vec`` format
-------------------------------------------------------------------- --------------------------------------------------------------------
@ -168,67 +235,6 @@ In order to create positive samples from such collection, ``-info`` argument sho
The scheme of samples creation in this case is as follows. The object instances are taken from images. Then they are resized to target samples size and stored in output vec-file. No distortion is applied, so the only affecting arguments are ``-w``, ``-h``, ``-show`` and ``-num``. The scheme of samples creation in this case is as follows. The object instances are taken from images. Then they are resized to target samples size and stored in output vec-file. No distortion is applied, so the only affecting arguments are ``-w``, ``-h``, ``-show`` and ``-num``.
Creating training set from a single image, but with a collection of PNG images and associated annotation files as a result
--------------------------------------------------------------------------------------------------------------------------
To obtain such behaviour the ``-img``, ``-bg`` and ``-info`` keys should be specified. The file name specified with ``-info`` key should include at least one level of directory hierarchy, that directory
will be used as the top-level dir for the training set.
For example, with the ``opencv_createsamples`` called as following:
opencv_createsamples -img /home/user/logo.png -bg /home/user/bg.txt -info /home/user/annotations.lst -pngoutput -maxxangle 0.1 -maxyangle 0.1 -maxzangle 0.1
The output will have the following structure:
.. code-block:: text
/home/user/
annotations/
0001_0107_0099_0195_0139.txt
0002_0107_0115_0195_0139.txt
...
neg/
<background files here>
pos/
0001_0107_0099_0195_0139.png
0002_0107_0115_0195_0139.png
...
annotations.lst
With ``*.txt`` files in ``annotations`` directory containing information about object bounding box on the sample in a next format:
.. code-block:: text
Image filename : "createsamples/pos/0002_0107_0115_0195_0139.png"
Bounding box for object 1 "PASperson" (Xmin, Ymin) - (Xmax, Ymax) : (107, 115) - (302, 254)
And ``annotations.lst`` file containing the list of all annotations file:
.. code-block:: text
createsamples/annotations/0001_0109_0209_0195_0139.txt
createsamples/annotations/0002_0241_0245_0139_0100.txt
Creating test set that consists of JPG samples collection and a signle file with annotations
--------------------------------------------------------------------------------------------
This variant of ``opencv_createsamples`` usage is very similar to the previous one, but generates the output in a different format;
Directory structure:
.. code-block:: text
info.dat
img1.jpg
img2.jpg
File info.dat:
.. code-block:: text
img1.jpg 1 140 100 45 45
img2.jpg 2 100 200 50 50 50 30 25 25
Showing the content of the ``vec`` file Showing the content of the ``vec`` file
--------------------------------------- ---------------------------------------
@ -240,7 +246,7 @@ Example of vec-file is available here ``opencv/data/vec_files/trainingfaces_24-2
Cascade Training Cascade Training
================ ================
The next step is the training of classifier. As mentioned above ``opencv_traincascade`` or ``opencv_haartraining`` may be used to train a cascade classifier, but only the newer ``opencv_traincascade`` will be described futher. The next step is the training of classifier. As mentioned above ``opencv_traincascade`` or ``opencv_haartraining`` may be used to train a cascade classifier, but only the newer ``opencv_traincascade`` will be described further.
Command line arguments of ``opencv_traincascade`` application grouped by purposes: Command line arguments of ``opencv_traincascade`` application grouped by purposes: