Added ASIFT (to activate, check parameter Feature2D/Affine) (fixed issue 27)

git-svn-id: http://find-object.googlecode.com/svn/trunk/find_object@385 620bd6b2-0a58-f614-fd9a-1bd335dccda9
This commit is contained in:
matlabbe 2014-08-22 22:36:09 +00:00
parent dcf406429e
commit 629b2aefaa
8 changed files with 409 additions and 144 deletions

View File

@ -102,6 +102,7 @@ void showUsage()
" --objects \"path\" Directory of the objects to detect.\n"
" --config \"path\" Path to configuration file (default: %s).\n"
" --scene \"path\" Path to a scene image file.\n"
" --debug Show debug log.\n"
" --params Show all parameters.\n"
" --My/Parameter \"value\" Set find-Object's parameter (look --params for parameters' name).\n"
" It will override the one in --config. Example to set 4 threads:\n"
@ -209,6 +210,13 @@ int main(int argc, char* argv[])
guiMode = false;
continue;
}
if(strcmp(argv[i], "-debug") == 0 ||
strcmp(argv[i], "--debug") == 0)
{
ULogger::setPrintWhere(true);
ULogger::setLevel(ULogger::kDebug);
continue;
}
if(strcmp(argv[i], "-help") == 0 ||
strcmp(argv[i], "--help") == 0)
{

View File

@ -42,6 +42,7 @@ public:
enum TimeStamp{
kTimeKeypointDetection,
kTimeDescriptorExtraction,
kTimeSkewAffine,
kTimeIndexing,
kTimeMatching,
kTimeHomography,

View File

@ -53,6 +53,13 @@ class DescriptorExtractor;
class FINDOBJECT_EXP FindObject : public QObject
{
Q_OBJECT;
public:
static void affineSkew(float tilt,
float phi,
const cv::Mat & image,
cv::Mat & skewImage,
cv::Mat & skewMask,
cv::Mat & Ai);
public:
FindObject(QObject * parent = 0);

View File

@ -97,6 +97,8 @@ class FINDOBJECT_EXP Settings
PARAMETER(Feature2D, 1Detector, QString, "7:Dense;Fast;GFTT;MSER;ORB;SIFT;Star;SURF;BRISK" , "Keypoint detector.");
PARAMETER(Feature2D, 2Descriptor, QString, "3:Brief;ORB;SIFT;SURF;BRISK;FREAK", "Keypoint descriptor.");
PARAMETER(Feature2D, 3MaxFeatures, int, 0, "Maximum features per image. If the number of features extracted is over this threshold, only X features with the highest response are kept. 0 means all features are kept.");
PARAMETER(Feature2D, 4Affine, bool, false, "(ASIFT) Extract features on multiple affine transformations of the image.");
PARAMETER(Feature2D, 5AffineCount, int, 6, "(ASIFT) Higher the value, more affine transformations will be done.");
PARAMETER(Feature2D, Brief_bytes, int, 32, "Bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.");
@ -205,7 +207,7 @@ class FINDOBJECT_EXP Settings
PARAMETER(General, autoStartCamera, bool, false, "Automatically start the camera when the application is opened.");
PARAMETER(General, autoUpdateObjects, bool, true, "Automatically update objects on every parameter changes, otherwise you would need to press \"Update objects\" on the objects panel.");
PARAMETER(General, nextObjID, uint, 1, "Next object ID to use.");
PARAMETER(General, imageFormats, QString, "*.png *.jpg *.bmp *.tiff *.ppm", "Image formats supported.");
PARAMETER(General, imageFormats, QString, "*.png *.jpg *.bmp *.tiff *.ppm *.pgm", "Image formats supported.");
PARAMETER(General, videoFormats, QString, "*.avi *.m4v *.mp4", "Video formats supported.");
PARAMETER(General, mirrorView, bool, true, "Flip the camera image horizontally (like all webcam applications).");
PARAMETER(General, invertedSearch, bool, true, "Instead of matching descriptors from the objects to those in a vocabulary created with descriptors extracted from the scene, we create a vocabulary from all the objects' descriptors and we match scene's descriptors to this vocabulary. It is the inverted search mode.");
@ -283,7 +285,9 @@ public:
KeypointDetector(cv::FeatureDetector * featureDetector);
KeypointDetector(GPUFeature2D * gpuFeature2D);
void detect(const cv::Mat & image, std::vector<cv::KeyPoint> & keypoints);
void detect(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & mask = cv::Mat());
private:
cv::FeatureDetector * featureDetector_;

View File

@ -202,61 +202,275 @@ std::vector<cv::KeyPoint> limitKeypoints(const std::vector<cv::KeyPoint> & keypo
return kptsKept;
}
// taken from ASIFT example https://github.com/Itseez/opencv/blob/master/samples/python2/asift.py
// affine - is an affine transform matrix from skew_img to img
void FindObject::affineSkew(
float tilt,
float phi,
const cv::Mat & image,
cv::Mat & skewImage,
cv::Mat & skewMask,
cv::Mat & Ai)
{
float h = image.rows;
float w = image.cols;
cv::Mat A = cv::Mat::zeros(2,3,CV_32FC1);
A.at<float>(0,0) = A.at<float>(1,1) = 1;
skewMask = cv::Mat::ones(h, w, CV_8U) * 255;
if(phi != 0.0)
{
phi = phi*CV_PI/180.0f; // deg2rad
float s = std::sin(phi);
float c = std::cos(phi);
cv::Mat A22 = (cv::Mat_<float>(2, 2) <<
c, -s,
s, c);
cv::Mat cornersIn = (cv::Mat_<float>(4, 2) <<
0,0,
w,0,
w,h,
0,h);
cv::Mat cornersOut = cornersIn * A22.t();
cv::Rect rect = cv::boundingRect(cornersOut.reshape(2,4));
A = (cv::Mat_<float>(2, 3) <<
c, -s, -rect.x,
s, c, -rect.y);
cv::warpAffine(image, skewImage, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
}
else
{
skewImage = image;
}
if(tilt != 1.0)
{
float s = 0.8*std::sqrt(tilt*tilt-1);
cv::Mat out, out2;
cv::GaussianBlur(skewImage, out, cv::Size(0, 0), s, 0.01);
cv::resize(out, out2, cv::Size(0, 0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
skewImage = out2;
A.row(0) /= tilt;
}
if(phi != 0.0 || tilt != 1.0)
{
cv::Mat mask = skewMask;
cv::warpAffine(mask, skewMask, A, skewImage.size(), cv::INTER_NEAREST);
}
cv::invertAffineTransform(A, Ai);
}
class AffineExtractionThread : public QThread
{
public:
AffineExtractionThread(
KeypointDetector * detector,
DescriptorExtractor * extractor,
const cv::Mat & image,
float tilt,
float phi) :
detector_(detector),
extractor_(extractor),
image_(image),
tilt_(tilt),
phi_(phi),
timeSkewAffine_(0),
timeDetection_(0),
timeExtraction_(0)
{
UASSERT(detector && extractor);
}
const cv::Mat & image() const {return image_;}
const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;}
const cv::Mat & descriptors() const {return descriptors_;}
int timeSkewAffine() const {return timeSkewAffine_;}
int timeDetection() const {return timeDetection_;}
int timeExtraction() const {return timeExtraction_;}
protected:
virtual void run()
{
QTime timeStep;
timeStep.start();
cv::Mat skewImage, skewMask, Ai;
FindObject::affineSkew(tilt_, phi_, image_, skewImage, skewMask, Ai);
timeSkewAffine_=timeStep.restart();
//Detect features
detector_->detect(skewImage, keypoints_, skewMask);
if(keypoints_.size())
{
int maxFeatures = Settings::getFeature2D_3MaxFeatures();
if(maxFeatures > 0 && (int)keypoints_.size() > maxFeatures)
{
keypoints_ = limitKeypoints(keypoints_, maxFeatures);
}
timeDetection_=timeStep.restart();
//Extract descriptors
extractor_->compute(skewImage, keypoints_, descriptors_);
timeExtraction_=timeStep.restart();
// Transform points to original image coordinates
for(unsigned int i=0; i<keypoints_.size(); ++i)
{
cv::Mat p = (cv::Mat_<float>(3, 1) << keypoints_[i].pt.x, keypoints_[i].pt.y, 1);
cv::Mat pa = Ai * p;
keypoints_[i].pt.x = pa.at<float>(0,0);
keypoints_[i].pt.y = pa.at<float>(1,0);
}
}
else
{
timeDetection_=timeStep.restart();
}
}
private:
KeypointDetector * detector_;
DescriptorExtractor * extractor_;
cv::Mat image_;
float tilt_;
float phi_;
std::vector<cv::KeyPoint> keypoints_;
cv::Mat descriptors_;
int timeSkewAffine_;
int timeDetection_;
int timeExtraction_;
};
class ExtractFeaturesThread : public QThread
{
public:
ExtractFeaturesThread(int objectId, const cv::Mat & image) :
ExtractFeaturesThread(
KeypointDetector * detector,
DescriptorExtractor * extractor,
int objectId,
const cv::Mat & image) :
detector_(detector),
extractor_(extractor),
objectId_(objectId),
image_(image)
image_(image),
timeSkewAffine_(0),
timeDetection_(0),
timeExtraction_(0)
{
UASSERT(detector && extractor);
}
int objectId() const {return objectId_;}
const cv::Mat & image() const {return image_;}
const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;}
const cv::Mat & descriptors() const {return descriptors_;}
int timeSkewAffine() const {return timeSkewAffine_;}
int timeDetection() const {return timeDetection_;}
int timeExtraction() const {return timeExtraction_;}
protected:
virtual void run()
{
QTime time;
time.start();
UINFO("Extracting descriptors from object %d...", objectId_);
KeypointDetector * detector = Settings::createKeypointDetector();
keypoints_.clear();
descriptors_ = cv::Mat();
detector->detect(image_, keypoints_);
delete detector;
if(keypoints_.size())
QTime timeStep;
timeStep.start();
if(!Settings::getFeature2D_4Affine())
{
int maxFeatures = Settings::getFeature2D_3MaxFeatures();
if(maxFeatures > 0 && (int)keypoints_.size() > maxFeatures)
{
int previousCount = (int)keypoints_.size();
keypoints_ = limitKeypoints(keypoints_, maxFeatures);
UINFO("obj=%d, %d keypoints removed, (kept %d), min/max response=%f/%f", objectId_, previousCount-(int)keypoints_.size(), (int)keypoints_.size(), keypoints_.size()?keypoints_.back().response:0.0f, keypoints_.size()?keypoints_.front().response:0.0f);
}
keypoints_.clear();
descriptors_ = cv::Mat();
detector_->detect(image_, keypoints_);
DescriptorExtractor * extractor = Settings::createDescriptorExtractor();
extractor->compute(image_, keypoints_, descriptors_);
delete extractor;
if((int)keypoints_.size() != descriptors_.rows)
if(keypoints_.size())
{
UERROR("obj=%d kpt=%d != descriptors=%d", objectId_, (int)keypoints_.size(), descriptors_.rows);
int maxFeatures = Settings::getFeature2D_3MaxFeatures();
if(maxFeatures > 0 && (int)keypoints_.size() > maxFeatures)
{
int previousCount = (int)keypoints_.size();
keypoints_ = limitKeypoints(keypoints_, maxFeatures);
UDEBUG("obj=%d, %d keypoints removed, (kept %d), min/max response=%f/%f", objectId_, previousCount-(int)keypoints_.size(), (int)keypoints_.size(), keypoints_.size()?keypoints_.back().response:0.0f, keypoints_.size()?keypoints_.front().response:0.0f);
}
timeDetection_+=timeStep.restart();
extractor_->compute(image_, keypoints_, descriptors_);
timeExtraction_+=timeStep.restart();
if((int)keypoints_.size() != descriptors_.rows)
{
UERROR("obj=%d kpt=%d != descriptors=%d", objectId_, (int)keypoints_.size(), descriptors_.rows);
}
}
else
{
timeDetection_+=timeStep.restart();
UWARN("no features detected in object %d !?!", objectId_);
}
}
else
{
UWARN("no features detected in object %d !?!", objectId_);
//ASIFT
std::vector<float> tilts;
std::vector<float> phis;
tilts.push_back(1.0f);
phis.push_back(0.0f);
int nTilt = Settings::getFeature2D_5AffineCount();
for(int t=1; t<nTilt; ++t)
{
float tilt = std::pow(2.0f, 0.5f*float(t));
float inc = 72.0f / float(tilt);
for(float phi=0.0f; phi<180.0f; phi+=inc)
{
tilts.push_back(tilt);
phis.push_back(phi);
}
}
//multi-threaded
unsigned int threadCounts = Settings::getGeneral_threads();
if(threadCounts == 0)
{
threadCounts = tilts.size();
}
for(unsigned int i=0; i<tilts.size(); i+=threadCounts)
{
QVector<AffineExtractionThread*> threads;
for(unsigned int k=i; k<i+threadCounts && k<tilts.size(); ++k)
{
threads.push_back(new AffineExtractionThread(detector_, extractor_, image_, tilts[k], phis[k]));
threads.back()->start();
}
for(int k=0; k<threads.size(); ++k)
{
threads[k]->wait();
keypoints_.insert(keypoints_.end(), threads[k]->keypoints().begin(), threads[k]->keypoints().end());
descriptors_.push_back(threads[k]->descriptors());
timeSkewAffine_ += threads[k]->timeSkewAffine();
timeDetection_ += threads[k]->timeDetection();
timeExtraction_ += threads[k]->timeExtraction();
}
}
}
UINFO("%d descriptors extracted from object %d (in %d ms)", descriptors_.rows, objectId_, time.elapsed());
}
private:
KeypointDetector * detector_;
DescriptorExtractor * extractor_;
int objectId_;
cv::Mat image_;
std::vector<cv::KeyPoint> keypoints_;
cv::Mat descriptors_;
int timeSkewAffine_;
int timeDetection_;
int timeExtraction_;
};
void FindObject::updateObjects()
@ -278,7 +492,7 @@ void FindObject::updateObjects()
QVector<ExtractFeaturesThread*> threads;
for(int k=i; k<i+threadCounts && k<objectsList.size(); ++k)
{
threads.push_back(new ExtractFeaturesThread(objectsList.at(k)->id(), objectsList.at(k)->image()));
threads.push_back(new ExtractFeaturesThread(detector_, extractor_, objectsList.at(k)->id(), objectsList.at(k)->image()));
threads.back()->start();
}
@ -677,30 +891,15 @@ bool FindObject::detect(const cv::Mat & image, find_object::DetectionInfo & info
grayscaleImg = image;
}
QTime time;
time.start();
// EXTRACT KEYPOINTS
detector_->detect(grayscaleImg, info.sceneKeypoints_);
info.timeStamps_.insert(DetectionInfo::kTimeKeypointDetection, time.restart());
bool emptyScene = info.sceneKeypoints_.size() == 0;
if(info.sceneKeypoints_.size())
{
int maxFeatures = Settings::getFeature2D_3MaxFeatures();
if(maxFeatures > 0 && (int)info.sceneKeypoints_.size() > maxFeatures)
{
info.sceneKeypoints_ = limitKeypoints(info.sceneKeypoints_, maxFeatures);
}
// EXTRACT DESCRIPTORS
extractor_->compute(grayscaleImg, info.sceneKeypoints_, info.sceneDescriptors_);
if((int)info.sceneKeypoints_.size() != info.sceneDescriptors_.rows)
{
UERROR("kpt=%d != descriptors=%d", (int)info.sceneKeypoints_.size(), info.sceneDescriptors_.rows);
}
}
info.timeStamps_.insert(DetectionInfo::kTimeDescriptorExtraction, time.restart());
// DETECT FEATURES AND EXTRACT DESCRIPTORS
ExtractFeaturesThread extractThread(detector_, extractor_, -1, grayscaleImg);
extractThread.start();
extractThread.wait();
info.sceneKeypoints_ = extractThread.keypoints();
info.sceneDescriptors_ = extractThread.descriptors();
info.timeStamps_.insert(DetectionInfo::kTimeKeypointDetection, extractThread.timeDetection());
info.timeStamps_.insert(DetectionInfo::kTimeDescriptorExtraction, extractThread.timeExtraction());
info.timeStamps_.insert(DetectionInfo::kTimeSkewAffine, extractThread.timeSkewAffine());
bool consistentNNData = (vocabulary_->size()!=0 && vocabulary_->wordToObjects().begin().value()!=-1 && Settings::getGeneral_invertedSearch()) ||
((vocabulary_->size()==0 || vocabulary_->wordToObjects().begin().value()==-1) && !Settings::getGeneral_invertedSearch());
@ -713,6 +912,8 @@ bool FindObject::detect(const cv::Mat & image, find_object::DetectionInfo & info
objectsDescriptors_.begin().value().type() == info.sceneDescriptors_.type()) // binary descriptor issue, if the dataTree is not yet updated with modified settings
{
success = true;
QTime time;
time.start();
QMultiMap<int, int> words;
@ -778,7 +979,10 @@ bool FindObject::detect(const cv::Mat & image, find_object::DetectionInfo & info
matched = false;
}
}
if(!matched && !Settings::getNearestNeighbor_3nndrRatioUsed() && !Settings::getNearestNeighbor_5minDistanceUsed())
if(!matched &&
!Settings::getNearestNeighbor_3nndrRatioUsed() &&
!Settings::getNearestNeighbor_5minDistanceUsed() &&
dists.at<float>(i,0) >= 0.0f)
{
matched = true; // no criterion, match to the nearest descriptor
}
@ -1024,7 +1228,7 @@ bool FindObject::detect(const cv::Mat & image, find_object::DetectionInfo & info
{
UWARN("Cannot search, objects must be updated");
}
else if(emptyScene)
else if(info.sceneKeypoints_.size() == 0)
{
// Accept but warn the user
UWARN("No features detected in the scene!?!");

View File

@ -786,6 +786,7 @@ void MainWindow::updateVocabulary()
QApplication::processEvents();
QTime time;
time.start();
findObject_->updateVocabulary();
if(findObject_->vocabulary()->size())
@ -946,9 +947,13 @@ void MainWindow::update(const cv::Mat & image)
if(findObject_->detect(sceneImage_, info))
{
ui_->label_timeDetection->setNum(info.timeStamps_.value(DetectionInfo::kTimeKeypointDetection, 0));
ui_->label_timeSkewAffine->setNum(info.timeStamps_.value(DetectionInfo::kTimeSkewAffine, 0));
ui_->label_timeExtraction->setNum(info.timeStamps_.value(DetectionInfo::kTimeDescriptorExtraction, 0));
ui_->imageView_source->setData(info.sceneKeypoints_, cvtCvMat2QImage(sceneImage_));
ui_->label_timeIndexing->setNum(info.timeStamps_.value(DetectionInfo::kTimeIndexing, 0));
if(!findObject_->vocabulary()->size())
{
ui_->label_timeIndexing->setNum(info.timeStamps_.value(DetectionInfo::kTimeIndexing, 0));
}
ui_->label_timeMatching->setNum(info.timeStamps_.value(DetectionInfo::kTimeMatching, 0));
ui_->label_timeHomographies->setNum(info.timeStamps_.value(DetectionInfo::kTimeHomography, 0));
@ -1135,7 +1140,13 @@ void MainWindow::update(const cv::Mat & image)
}
else
{
this->statusBar()->showMessage(tr("Cannot search, objects must be updated!"));
if(findObject_->vocabulary()->size())
{
this->statusBar()->showMessage(tr("Cannot search, objects must be updated!"));
}
ui_->label_timeDetection->setNum(info.timeStamps_.value(DetectionInfo::kTimeKeypointDetection, 0));
ui_->label_timeSkewAffine->setNum(info.timeStamps_.value(DetectionInfo::kTimeSkewAffine, 0));
ui_->label_timeExtraction->setNum(info.timeStamps_.value(DetectionInfo::kTimeDescriptorExtraction, 0));
ui_->imageView_source->setData(info.sceneKeypoints_, cvtCvMat2QImage(sceneImage_));
}

View File

@ -38,8 +38,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <opencv2/nonfree/gpu.hpp>
#include <opencv2/gpu/gpu.hpp>
#define VERBOSE 0
namespace find_object {
ParametersMap Settings::defaultParameters_;
@ -199,7 +197,8 @@ public:
virtual ~GPUFeature2D() {}
virtual void detectKeypoints(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints) = 0;
std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & mask = cv::Mat()) = 0;
virtual void computeDescriptors(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints,
@ -225,12 +224,15 @@ public:
}
virtual ~GPUSURF() {}
void detectKeypoints(const cv::Mat & image, std::vector<cv::KeyPoint> & keypoints)
void detectKeypoints(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & mask = cv::Mat())
{
cv::gpu::GpuMat imgGpu(image);
cv::gpu::GpuMat maskGpu(mask);
try
{
surf_(imgGpu, cv::gpu::GpuMat(), keypoints);
surf_(imgGpu, maskGpu, keypoints);
}
catch(cv::Exception &e)
{
@ -292,10 +294,13 @@ public:
virtual ~GPUFAST() {}
protected:
void detectKeypoints(const cv::Mat & image, std::vector<cv::KeyPoint> & keypoints)
void detectKeypoints(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & mask = cv::Mat())
{
cv::gpu::GpuMat imgGpu(image);
fast_(imgGpu, cv::gpu::GpuMat(), keypoints);
cv::gpu::GpuMat maskGpu(mask);
fast_(imgGpu, maskGpu, keypoints);
}
void computeDescriptors( const cv::Mat& image,
std::vector<cv::KeyPoint>& keypoints,
@ -335,12 +340,15 @@ public:
virtual ~GPUORB() {}
protected:
void detectKeypoints(const cv::Mat & image, std::vector<cv::KeyPoint> & keypoints)
void detectKeypoints(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & mask = cv::Mat())
{
cv::gpu::GpuMat imgGpu(image);
cv::gpu::GpuMat maskGpu(mask);
try
{
orb_(imgGpu, cv::gpu::GpuMat(), keypoints);
orb_(imgGpu, maskGpu, keypoints);
}
catch(cv::Exception &e)
{
@ -411,7 +419,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_Dense_initImgBound(),
getFeature2D_Dense_varyXyStepWithScale(),
getFeature2D_Dense_varyImgBoundWithScale());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 1:
@ -422,14 +430,14 @@ KeypointDetector * Settings::createKeypointDetector()
detectorGPU = new GPUFAST(
getFeature2D_Fast_threshold(),
getFeature2D_Fast_nonmaxSuppression());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s GPU\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s GPU", strategies.at(index).toStdString().c_str());
}
else
{
detector = new cv::FastFeatureDetector(
getFeature2D_Fast_threshold(),
getFeature2D_Fast_nonmaxSuppression());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
}
break;
@ -443,7 +451,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_GFTT_blockSize(),
getFeature2D_GFTT_useHarrisDetector(),
getFeature2D_GFTT_k());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 3:
@ -459,7 +467,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_MSER_areaThreshold(),
getFeature2D_MSER_minMargin(),
getFeature2D_MSER_edgeBlurSize());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 4:
@ -478,7 +486,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_ORB_patchSize(),
getFeature2D_Fast_threshold(),
getFeature2D_Fast_nonmaxSuppression());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s (GPU)\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s (GPU)", strategies.at(index).toStdString().c_str());
}
else
{
@ -491,7 +499,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_ORB_WTA_K(),
getFeature2D_ORB_scoreType(),
getFeature2D_ORB_patchSize());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
}
break;
@ -504,7 +512,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_SIFT_contrastThreshold(),
getFeature2D_SIFT_edgeThreshold(),
getFeature2D_SIFT_sigma());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 6:
@ -516,7 +524,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_Star_lineThresholdProjected(),
getFeature2D_Star_lineThresholdBinarized(),
getFeature2D_Star_suppressNonmaxSize());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 7:
@ -531,7 +539,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_SURF_extended(),
getFeature2D_SURF_keypointsRatio(),
getFeature2D_SURF_upright());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s (GPU)\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s (GPU)", strategies.at(index).toStdString().c_str());
}
else
{
@ -541,7 +549,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_SURF_nOctaveLayers(),
getFeature2D_SURF_extended(),
getFeature2D_SURF_upright());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
}
break;
@ -552,7 +560,7 @@ KeypointDetector * Settings::createKeypointDetector()
getFeature2D_BRISK_thresh(),
getFeature2D_BRISK_octaves(),
getFeature2D_BRISK_patternScale());
if(VERBOSE)printf("Settings::createFeaturesDetector() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
default:
@ -595,7 +603,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
{
extractor = new cv::BriefDescriptorExtractor(
getFeature2D_Brief_bytes());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 1:
@ -614,7 +622,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_ORB_patchSize(),
getFeature2D_Fast_threshold(),
getFeature2D_Fast_nonmaxSuppression());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s (GPU)\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s (GPU)", strategies.at(index).toStdString().c_str());
}
else
{
@ -627,7 +635,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_ORB_WTA_K(),
getFeature2D_ORB_scoreType(),
getFeature2D_ORB_patchSize());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
}
break;
@ -640,7 +648,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_SIFT_contrastThreshold(),
getFeature2D_SIFT_edgeThreshold(),
getFeature2D_SIFT_sigma());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 3:
@ -655,7 +663,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_SURF_extended(),
getFeature2D_SURF_keypointsRatio(),
getFeature2D_SURF_upright());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s (GPU)\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s (GPU)", strategies.at(index).toStdString().c_str());
}
else
{
@ -665,7 +673,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_SURF_nOctaveLayers(),
getFeature2D_SURF_extended(),
getFeature2D_SURF_upright());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
}
break;
@ -676,7 +684,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_BRISK_thresh(),
getFeature2D_BRISK_octaves(),
getFeature2D_BRISK_patternScale());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
case 5:
@ -687,7 +695,7 @@ DescriptorExtractor * Settings::createDescriptorExtractor()
getFeature2D_FREAK_scaleNormalized(),
getFeature2D_FREAK_patternScale(),
getFeature2D_FREAK_nOctaves());
if(VERBOSE)printf("Settings::createDescriptorsExtractor() type=%s\n", strategies.at(index).toStdString().c_str());
UDEBUG("type=%s", strategies.at(index).toStdString().c_str());
}
break;
default:
@ -745,14 +753,14 @@ cv::flann::IndexParams * Settings::createFlannIndexParams()
case 0:
if(strategies.at(index).compare("Linear") == 0)
{
if(VERBOSE)printf("Settings::getFlannIndexParams() type=%s\n", "Linear");
UDEBUG("type=%s", "Linear");
params = new cv::flann::LinearIndexParams();
}
break;
case 1:
if(strategies.at(index).compare("KDTree") == 0)
{
if(VERBOSE)printf("Settings::getFlannIndexParams() type=%s\n", "KDTree");
UDEBUG("type=%s", "KDTree");
params = new cv::flann::KDTreeIndexParams(
getNearestNeighbor_KDTree_trees());
}
@ -772,7 +780,7 @@ cv::flann::IndexParams * Settings::createFlannIndexParams()
centers_init = (cvflann::flann_centers_init_t)index;
}
}
if(VERBOSE)printf("Settings::getFlannIndexParams() type=%s\n", "KMeans");
UDEBUG("type=%s", "KMeans");
params = new cv::flann::KMeansIndexParams(
getNearestNeighbor_KMeans_branching(),
getNearestNeighbor_KMeans_iterations(),
@ -795,7 +803,7 @@ cv::flann::IndexParams * Settings::createFlannIndexParams()
centers_init = (cvflann::flann_centers_init_t)index;
}
}
if(VERBOSE)printf("Settings::getFlannIndexParams() type=%s\n", "Composite");
UDEBUG("type=%s", "Composite");
params = new cv::flann::CompositeIndexParams(
getNearestNeighbor_Composite_trees(),
getNearestNeighbor_Composite_branching(),
@ -807,7 +815,7 @@ cv::flann::IndexParams * Settings::createFlannIndexParams()
case 4:
if(strategies.at(index).compare("Autotuned") == 0)
{
if(VERBOSE)printf("Settings::getFlannIndexParams() type=%s\n", "Autotuned");
UDEBUG("type=%s", "Autotuned");
params = new cv::flann::AutotunedIndexParams(
getNearestNeighbor_Autotuned_target_precision(),
getNearestNeighbor_Autotuned_build_weight(),
@ -818,7 +826,7 @@ cv::flann::IndexParams * Settings::createFlannIndexParams()
case 5:
if(strategies.at(index).compare("Lsh") == 0)
{
if(VERBOSE)printf("Settings::getFlannIndexParams() type=%s\n", "Lsh");
UDEBUG("type=%s", "Lsh");
params = new cv::flann::LshIndexParams(
getNearestNeighbor_Lsh_table_number(),
getNearestNeighbor_Lsh_key_size(),
@ -858,7 +866,6 @@ cvflann::flann_distance_t Settings::getFlannDistanceType()
}
}
}
if(VERBOSE)printf("Settings::getFlannDistanceType() distance=%d\n", distance);
return distance;
}
@ -896,7 +903,7 @@ int Settings::getHomographyMethod()
}
}
}
if(VERBOSE)printf("Settings::getHomographyMethod() method=%d\n", method);
UDEBUG("method=%d", method);
return method;
}
@ -912,15 +919,17 @@ KeypointDetector::KeypointDetector(GPUFeature2D * gpuFeature2D) :
{
Q_ASSERT(gpuFeature2D_!=0);
}
void KeypointDetector::detect(const cv::Mat & image, std::vector<cv::KeyPoint> & keypoints)
void KeypointDetector::detect(const cv::Mat & image,
std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & mask)
{
if(featureDetector_)
{
featureDetector_->detect(image, keypoints);
featureDetector_->detect(image, keypoints, mask);
}
else // assume GPU
{
gpuFeature2D_->detectKeypoints(image, keypoints);
gpuFeature2D_->detectKeypoints(image, keypoints, mask);
}
}

View File

@ -7,7 +7,7 @@
<x>0</x>
<y>0</y>
<width>826</width>
<height>506</height>
<height>523</height>
</rect>
</property>
<property name="windowTitle">
@ -344,7 +344,7 @@
<x>0</x>
<y>0</y>
<width>198</width>
<height>376</height>
<height>393</height>
</rect>
</property>
<layout class="QVBoxLayout" name="verticalLayout_objects">
@ -449,83 +449,83 @@
<number>0</number>
</property>
<item row="4" column="1">
<widget class="QLabel" name="label_timeMatching">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="3" column="2">
<widget class="QLabel" name="label_9">
<property name="text">
<string>ms</string>
</property>
</widget>
</item>
<item row="4" column="2">
<widget class="QLabel" name="label_10">
<property name="text">
<string>ms</string>
</property>
</widget>
</item>
<item row="3" column="0">
<widget class="QLabel" name="label_8">
<property name="text">
<string>Descriptors indexing</string>
</property>
</widget>
</item>
<item row="3" column="1">
<widget class="QLabel" name="label_timeIndexing">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="5" column="1">
<widget class="QLabel" name="label_timeMatching">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="4" column="2">
<widget class="QLabel" name="label_9">
<property name="text">
<string>ms</string>
</property>
</widget>
</item>
<item row="5" column="2">
<widget class="QLabel" name="label_10">
<property name="text">
<string>ms</string>
</property>
</widget>
</item>
<item row="4" column="0">
<widget class="QLabel" name="label_8">
<property name="text">
<string>Descriptors indexing</string>
</property>
</widget>
</item>
<item row="6" column="2">
<widget class="QLabel" name="label_12">
<property name="text">
<string>ms</string>
</property>
</widget>
</item>
<item row="7" column="1">
<item row="8" column="1">
<widget class="QLabel" name="label_minMatchedDistance">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="7" column="0">
<item row="8" column="0">
<widget class="QLabel" name="label_13">
<property name="text">
<string>Min matched distance</string>
</property>
</widget>
</item>
<item row="8" column="0">
<item row="9" column="0">
<widget class="QLabel" name="label_14">
<property name="text">
<string>Max matched distance</string>
</property>
</widget>
</item>
<item row="8" column="1">
<item row="9" column="1">
<widget class="QLabel" name="label_maxMatchedDistance">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="5" column="0">
<item row="6" column="0">
<widget class="QLabel" name="label_11">
<property name="text">
<string>Homograhies</string>
</property>
</widget>
</item>
<item row="2" column="0">
<item row="3" column="0">
<widget class="QLabel" name="label_2">
<property name="text">
<string>Descriptors extraction</string>
@ -539,14 +539,14 @@
</property>
</widget>
</item>
<item row="2" column="1">
<item row="3" column="1">
<widget class="QLabel" name="label_timeExtraction">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="2" column="2">
<item row="3" column="2">
<widget class="QLabel" name="label_4">
<property name="text">
<string>ms</string>
@ -567,7 +567,7 @@
</property>
</widget>
</item>
<item row="4" column="0">
<item row="5" column="0">
<widget class="QLabel" name="label_7">
<property name="text">
<string>Descriptors matching</string>
@ -595,80 +595,101 @@
</property>
</widget>
</item>
<item row="9" column="0">
<item row="10" column="0">
<widget class="QLabel" name="label_17">
<property name="text">
<string>Vocabulary size</string>
</property>
</widget>
</item>
<item row="9" column="1">
<item row="10" column="1">
<widget class="QLabel" name="label_vocabularySize">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="10" column="0">
<item row="11" column="0">
<widget class="QLabel" name="label_18">
<property name="text">
<string>IP address</string>
</property>
</widget>
</item>
<item row="11" column="0">
<item row="12" column="0">
<widget class="QLabel" name="label_19">
<property name="text">
<string>Output detection port</string>
</property>
</widget>
</item>
<item row="10" column="1">
<item row="11" column="1">
<widget class="QLabel" name="label_ipAddress">
<property name="text">
<string>0.0.0.0</string>
</property>
</widget>
</item>
<item row="11" column="1">
<item row="12" column="1">
<widget class="QLabel" name="label_port">
<property name="text">
<string>0</string>
</property>
</widget>
</item>
<item row="6" column="0">
<item row="7" column="0">
<widget class="QLabel" name="label_20">
<property name="text">
<string>Objects detected</string>
</property>
</widget>
</item>
<item row="6" column="1">
<item row="7" column="1">
<widget class="QLabel" name="label_objectsDetected">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="5" column="1">
<item row="6" column="1">
<widget class="QLabel" name="label_timeHomographies">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="12" column="0">
<item row="13" column="0">
<widget class="QLabel" name="label_21">
<property name="text">
<string>Input image port</string>
</property>
</widget>
</item>
<item row="12" column="1">
<item row="13" column="1">
<widget class="QLabel" name="label_port_image">
<property name="text">
<string>NA</string>
<string>-</string>
</property>
</widget>
</item>
<item row="2" column="0">
<widget class="QLabel" name="label_22">
<property name="text">
<string>Affine transforms</string>
</property>
</widget>
</item>
<item row="2" column="1">
<widget class="QLabel" name="label_timeSkewAffine">
<property name="text">
<string>000</string>
</property>
</widget>
</item>
<item row="2" column="2">
<widget class="QLabel" name="label_23">
<property name="text">
<string>ms</string>
</property>
</widget>
</item>