Updated to OpenCV 2.4 (as well as all feature descriptors and detectors parameters).

Updated to OpenCV C++ interface (using cv::Mat instead of IplImage).


git-svn-id: http://find-object.googlecode.com/svn/trunk/find_object@97 620bd6b2-0a58-f614-fd9a-1bd335dccda9
This commit is contained in:
matlabbe 2012-05-07 22:36:59 +00:00
parent 8e44f2bedf
commit 5c4625a300
16 changed files with 388 additions and 394 deletions

View File

@ -36,7 +36,7 @@ SET(SRC_FILES
../src/MainWindow.cpp ../src/MainWindow.cpp
../src/AddObjectDialog.cpp ../src/AddObjectDialog.cpp
../src/KeypointItem.cpp ../src/KeypointItem.cpp
../src/qtipl.cpp ../src/QtOpenCV.cpp
../src/Camera.cpp ../src/Camera.cpp
../src/ParametersToolBox.cpp ../src/ParametersToolBox.cpp
../src/Settings.cpp ../src/Settings.cpp

BIN
bin/box.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
bin/box_in_scene.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

View File

@ -11,7 +11,7 @@ SET(SRC_FILES
main.cpp main.cpp
../src/ObjWidget.cpp ../src/ObjWidget.cpp
../src/KeypointItem.cpp ../src/KeypointItem.cpp
../src/qtipl.cpp ../src/QtOpenCV.cpp
../src/Settings.cpp ../src/Settings.cpp
../src/Camera.cpp ../src/Camera.cpp
${moc_srcs} ${moc_srcs}

View File

@ -16,10 +16,12 @@
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp> #include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp> // for homography #include <opencv2/calib3d/calib3d.hpp> // for homography
// From this project (see src folder) // From this project (see src folder)
#include "ObjWidget.h" #include "ObjWidget.h"
#include "QtOpenCV.h"
void showUsage() void showUsage()
{ {
@ -44,10 +46,10 @@ int main(int argc, char * argv[])
ObjWidget sceneWidget; ObjWidget sceneWidget;
//Load as grayscale //Load as grayscale
IplImage * objectImg = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE); cv::Mat objectImg = cv::imread(argv[1], cv::IMREAD_GRAYSCALE);
IplImage * sceneImg = cvLoadImage(argv[2], CV_LOAD_IMAGE_GRAYSCALE); cv::Mat sceneImg = cv::imread(argv[2], cv::IMREAD_GRAYSCALE);
if(objectImg && sceneImg) if(!objectImg.empty() && !sceneImg.empty())
{ {
std::vector<cv::KeyPoint> objectKeypoints; std::vector<cv::KeyPoint> objectKeypoints;
std::vector<cv::KeyPoint> sceneKeypoints; std::vector<cv::KeyPoint> sceneKeypoints;
@ -58,13 +60,15 @@ int main(int argc, char * argv[])
// EXTRACT KEYPOINTS // EXTRACT KEYPOINTS
//////////////////////////// ////////////////////////////
// The detector can be any of (see OpenCV features2d.hpp): // The detector can be any of (see OpenCV features2d.hpp):
// cv::FeatureDetector * detector = new cv::OrbFeatureDetector(); // cv::FeatureDetector * detector = new cv::DenseFeatureDetector();
// cv::FeatureDetector * detector = new cv::FastFeatureDetector(); // cv::FeatureDetector * detector = new cv::FastFeatureDetector();
// cv::FeatureDetector * detector = new cv::MserFeatureDetector(); // cv::FeatureDetector * detector = new cv::GFTTDetector();
// cv::FeatureDetector * detector = new cv::SiftFeatureDetector(); // cv::FeatureDetector * detector = new cv::MSER();
// cv::FeatureDetector * detector = new cv::SurfFeatureDetector(); // cv::FeatureDetector * detector = new cv::ORB();
// cv::FeatureDetector * detector = new cv::SIFT();
// cv::FeatureDetector * detector = new cv::StarFeatureDetector(); // cv::FeatureDetector * detector = new cv::StarFeatureDetector();
cv::FeatureDetector * detector = new cv::SurfFeatureDetector(); // cv::FeatureDetector * detector = new cv::SURF();
cv::FeatureDetector * detector = new cv::SURF(600.0);
detector->detect(objectImg, objectKeypoints); detector->detect(objectImg, objectKeypoints);
printf("Object: %d keypoints detected in %d ms\n", (int)objectKeypoints.size(), time.restart()); printf("Object: %d keypoints detected in %d ms\n", (int)objectKeypoints.size(), time.restart());
detector->detect(sceneImg, sceneKeypoints); detector->detect(sceneImg, sceneKeypoints);
@ -75,10 +79,10 @@ int main(int argc, char * argv[])
//////////////////////////// ////////////////////////////
// The extractor can be any of (see OpenCV features2d.hpp): // The extractor can be any of (see OpenCV features2d.hpp):
// cv::DescriptorExtractor * extractor = new cv::BriefDescriptorExtractor(); // cv::DescriptorExtractor * extractor = new cv::BriefDescriptorExtractor();
// cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // cv::DescriptorExtractor * extractor = new cv::ORB();
// cv::DescriptorExtractor * extractor = new cv::SiftDescriptorExtractor(); // cv::DescriptorExtractor * extractor = new cv::SIFT();
// cv::DescriptorExtractor * extractor = new cv::SurfDescriptorExtractor(); // cv::DescriptorExtractor * extractor = new cv::SURF();
cv::DescriptorExtractor * extractor = new cv::SurfDescriptorExtractor(); cv::DescriptorExtractor * extractor = new cv::SURF(600.0);
extractor->compute(objectImg, objectKeypoints, objectDescriptors); extractor->compute(objectImg, objectKeypoints, objectDescriptors);
printf("Object: %d descriptors extracted in %d ms\n", objectDescriptors.rows, time.restart()); printf("Object: %d descriptors extracted in %d ms\n", objectDescriptors.rows, time.restart());
extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors); extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);
@ -187,7 +191,7 @@ int main(int argc, char * argv[])
} }
QPen rectPen(color); QPen rectPen(color);
rectPen.setWidth(4); rectPen.setWidth(4);
QGraphicsRectItem * rectItem = new QGraphicsRectItem(objWidget.image().rect()); QGraphicsRectItem * rectItem = new QGraphicsRectItem(objWidget.pixmap().rect());
rectItem->setPen(rectPen); rectItem->setPen(rectPen);
rectItem->setTransform(hTransform); rectItem->setTransform(hTransform);
sceneWidget.addRect(rectItem); sceneWidget.addRect(rectItem);
@ -201,54 +205,44 @@ int main(int argc, char * argv[])
// Wait for gui // Wait for gui
objWidget.setGraphicsViewMode(false); objWidget.setGraphicsViewMode(false);
objWidget.setWindowTitle("Object"); objWidget.setWindowTitle("Object");
if(objWidget.image().width() <= 800) if(objWidget.pixmap().width() <= 800)
{ {
objWidget.setGeometry(0, 0, objWidget.image().width(), objWidget.image().height()); objWidget.setMinimumSize(objWidget.pixmap().width(), objWidget.pixmap().height());
} }
else else
{ {
objWidget.setGeometry(0, 0, 800, 600); objWidget.setMinimumSize(800, 600);
objWidget.setAutoScale(false); objWidget.setAutoScale(false);
} }
objWidget.show();
sceneWidget.setGraphicsViewMode(false); sceneWidget.setGraphicsViewMode(false);
sceneWidget.setWindowTitle("Scene"); sceneWidget.setWindowTitle("Scene");
if(sceneWidget.image().width() <= 800) if(sceneWidget.pixmap().width() <= 800)
{ {
sceneWidget.setGeometry(0, 0, sceneWidget.image().width(), sceneWidget.image().height()); sceneWidget.setMinimumSize(sceneWidget.pixmap().width(), sceneWidget.pixmap().height());
} }
else else
{ {
sceneWidget.setGeometry(0, 0, 800, 600); sceneWidget.setMinimumSize(800, 600);
sceneWidget.setAutoScale(false); sceneWidget.setAutoScale(false);
} }
sceneWidget.show(); sceneWidget.show();
objWidget.show();
int r = app.exec(); int r = app.exec();
printf("Closing...\n"); printf("Closing...\n");
//////////////////////////// ////////////////////////////
//Cleanup //Cleanup
//////////////////////////// ////////////////////////////
delete detector; delete detector;
delete extractor; delete extractor;
if(objectImg) {
cvReleaseImage(&objectImg);
}
if(sceneImg) {
cvReleaseImage(&sceneImg);
}
return r; return r;
} }
else else
{ {
if(objectImg) {
cvReleaseImage(&objectImg);
}
if(sceneImg) {
cvReleaseImage(&sceneImg);
}
printf("Images are not valid!\n"); printf("Images are not valid!\n");
showUsage(); showUsage();
} }

View File

@ -7,7 +7,7 @@
#include "ObjWidget.h" #include "ObjWidget.h"
#include "KeypointItem.h" #include "KeypointItem.h"
#include "Camera.h" #include "Camera.h"
#include "qtipl.h" #include "QtOpenCV.h"
#include "Settings.h" #include "Settings.h"
#include <stdio.h> #include <stdio.h>
@ -16,14 +16,13 @@
#include <QtGui/QGraphicsPixmapItem> #include <QtGui/QGraphicsPixmapItem>
#include <QtGui/QMessageBox> #include <QtGui/QMessageBox>
#include <opencv2/imgproc/imgproc_c.h> #include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui_c.h> #include <opencv2/highgui/highgui.hpp>
AddObjectDialog::AddObjectDialog(Camera * camera, const IplImage * image, bool mirrorView, QWidget * parent, Qt::WindowFlags f) : AddObjectDialog::AddObjectDialog(Camera * camera, const cv::Mat & image, bool mirrorView, QWidget * parent, Qt::WindowFlags f) :
QDialog(parent, f), QDialog(parent, f),
camera_(camera), camera_(camera),
object_(0), object_(0)
cvImage_(0)
{ {
ui_ = new Ui_addObjectDialog(); ui_ = new Ui_addObjectDialog();
ui_->setupUi(this); ui_->setupUi(this);
@ -38,24 +37,19 @@ AddObjectDialog::AddObjectDialog(Camera * camera, const IplImage * image, bool m
connect(ui_->cameraView, SIGNAL(roiChanged(const QRect &)), this, SLOT(updateNextButton(const QRect &))); connect(ui_->cameraView, SIGNAL(roiChanged(const QRect &)), this, SLOT(updateNextButton(const QRect &)));
ui_->cameraView->setMirrorView(mirrorView); ui_->cameraView->setMirrorView(mirrorView);
if((camera_ && camera_->isRunning()) || !image) if((camera_ && camera_->isRunning()) || image.empty())
{ {
this->setState(kTakePicture); this->setState(kTakePicture);
} }
else if(image) else if(!image.empty())
{ {
cv::Mat img(image); update(image);
update(img);
this->setState(kSelectFeatures); this->setState(kSelectFeatures);
} }
} }
AddObjectDialog::~AddObjectDialog() AddObjectDialog::~AddObjectDialog()
{ {
if(cvImage_)
{
cvReleaseImage(&cvImage_);
}
if(object_) if(object_)
{ {
delete object_; delete object_;
@ -214,11 +208,11 @@ void AddObjectDialog::setState(int state)
std::vector<cv::KeyPoint> selectedKeypoints = ui_->cameraView->selectedKeypoints(); std::vector<cv::KeyPoint> selectedKeypoints = ui_->cameraView->selectedKeypoints();
// Select keypoints // Select keypoints
if(cvImage_ && if(!cvImage_.empty() &&
((ui_->comboBox_selection->currentIndex() == 1 && selectedKeypoints.size()) || ((ui_->comboBox_selection->currentIndex() == 1 && selectedKeypoints.size()) ||
(ui_->comboBox_selection->currentIndex() == 0 && !roi_.isNull()))) (ui_->comboBox_selection->currentIndex() == 0 && !roi_.isNull())))
{ {
CvRect roi; cv::Rect roi;
if(ui_->comboBox_selection->currentIndex() == 1) if(ui_->comboBox_selection->currentIndex() == 1)
{ {
roi = computeROI(selectedKeypoints); roi = computeROI(selectedKeypoints);
@ -230,7 +224,7 @@ void AddObjectDialog::setState(int state)
roi.width = roi_.width(); roi.width = roi_.width();
roi.height = roi_.height(); roi.height = roi_.height();
} }
cvSetImageROI(cvImage_, roi); cv::Mat imgRoi(cvImage_, roi);
if(ui_->comboBox_selection->currentIndex() == 1) if(ui_->comboBox_selection->currentIndex() == 1)
{ {
@ -248,13 +242,12 @@ void AddObjectDialog::setState(int state)
// Extract keypoints // Extract keypoints
selectedKeypoints.clear(); selectedKeypoints.clear();
cv::FeatureDetector * detector = Settings::createFeaturesDetector(); cv::FeatureDetector * detector = Settings::createFeaturesDetector();
detector->detect(cvImage_, selectedKeypoints); detector->detect(imgRoi, selectedKeypoints);
delete detector; delete detector;
} }
ui_->objectView->setData(selectedKeypoints, cv::Mat(), cvImage_, Settings::currentDetectorType(), ""); ui_->objectView->setData(selectedKeypoints, cv::Mat(), imgRoi, Settings::currentDetectorType(), "");
ui_->objectView->setMinimumSize(roi.width, roi.height); ui_->objectView->setMinimumSize(roi.width, roi.height);
ui_->objectView->update(); ui_->objectView->update();
cvResetImageROI(cvImage_);
ui_->pushButton_next->setEnabled(true); ui_->pushButton_next->setEnabled(true);
} }
else else
@ -275,7 +268,7 @@ void AddObjectDialog::setState(int state)
{ {
// Extract descriptors // Extract descriptors
cv::DescriptorExtractor * extractor = Settings::createDescriptorsExtractor(); cv::DescriptorExtractor * extractor = Settings::createDescriptorsExtractor();
extractor->compute(ui_->objectView->iplImage(), keypoints, descriptors); extractor->compute(ui_->objectView->cvImage(), keypoints, descriptors);
delete extractor; delete extractor;
if(keypoints.size() != (unsigned int)descriptors.rows) if(keypoints.size() != (unsigned int)descriptors.rows)
@ -289,7 +282,7 @@ void AddObjectDialog::setState(int state)
delete object_; delete object_;
object_ = 0; object_ = 0;
} }
object_ = new ObjWidget(0, keypoints, descriptors, ui_->objectView->iplImage(), Settings::currentDetectorType(), Settings::currentDescriptorType()); object_ = new ObjWidget(0, keypoints, descriptors, ui_->objectView->cvImage(), Settings::currentDetectorType(), Settings::currentDescriptorType());
this->accept(); this->accept();
} }
@ -298,22 +291,17 @@ void AddObjectDialog::setState(int state)
void AddObjectDialog::update(const cv::Mat & image) void AddObjectDialog::update(const cv::Mat & image)
{ {
if(cvImage_) cvImage_ = cv::Mat();
{ if(!image.empty())
cvReleaseImage(&cvImage_);
cvImage_ = 0;
}
IplImage iplImage = image;
cvImage_ = cvCloneImage(& iplImage);
if(cvImage_)
{ {
// convert to grayscale // convert to grayscale
if(cvImage_->nChannels != 1 || cvImage_->depth != IPL_DEPTH_8U) if(image.channels() != 1 || image.depth() != CV_8U)
{ {
IplImage * imageGrayScale = cvCreateImage(cvSize(cvImage_->width, cvImage_->height), IPL_DEPTH_8U, 1); cv::cvtColor(image, cvImage_, CV_BGR2GRAY);
cvCvtColor(cvImage_, imageGrayScale, CV_BGR2GRAY); }
cvReleaseImage(&cvImage_); else
cvImage_ = imageGrayScale; {
cvImage_ = image;
} }
// Extract keypoints // Extract keypoints
@ -327,9 +315,9 @@ void AddObjectDialog::update(const cv::Mat & image)
} }
} }
CvRect AddObjectDialog::computeROI(const std::vector<cv::KeyPoint> & kpts) cv::Rect AddObjectDialog::computeROI(const std::vector<cv::KeyPoint> & kpts)
{ {
CvRect roi = cvRect(0,0,0,0); cv::Rect roi(0,0,0,0);
int x1=0,x2=0,h1=0,h2=0; int x1=0,x2=0,h1=0,h2=0;
for(unsigned int i=0; i<kpts.size(); ++i) for(unsigned int i=0; i<kpts.size(); ++i)
{ {

View File

@ -8,7 +8,7 @@
#include <QtGui/QDialog> #include <QtGui/QDialog>
#include <QtCore/QTimer> #include <QtCore/QTimer>
#include <opencv2/features2d/features2d.hpp> #include <opencv2/features2d/features2d.hpp>
#include <opencv2/core/core_c.h> #include <opencv2/core/core.hpp>
class Ui_addObjectDialog; class Ui_addObjectDialog;
class ObjWidget; class ObjWidget;
@ -20,7 +20,7 @@ class AddObjectDialog : public QDialog {
Q_OBJECT Q_OBJECT
public: public:
AddObjectDialog(Camera * camera, const IplImage * image, bool mirrorView, QWidget * parent = 0, Qt::WindowFlags f = 0); AddObjectDialog(Camera * camera, const cv::Mat & image, bool mirrorView, QWidget * parent = 0, Qt::WindowFlags f = 0);
virtual ~AddObjectDialog(); virtual ~AddObjectDialog();
// ownership transferred to caller // ownership transferred to caller
@ -41,12 +41,12 @@ protected:
private: private:
void setState(int state); void setState(int state);
CvRect computeROI(const std::vector<cv::KeyPoint> & kpts); cv::Rect computeROI(const std::vector<cv::KeyPoint> & kpts);
private: private:
Ui_addObjectDialog * ui_; Ui_addObjectDialog * ui_;
Camera * camera_; Camera * camera_;
ObjWidget * object_; ObjWidget * object_;
IplImage * cvImage_; cv::Mat cvImage_;
enum State{kTakePicture, kSelectFeatures, kVerifySelection, kClosing}; enum State{kTakePicture, kSelectFeatures, kVerifySelection, kClosing};
int state_; int state_;

View File

@ -5,7 +5,7 @@
#include "MainWindow.h" #include "MainWindow.h"
#include "AddObjectDialog.h" #include "AddObjectDialog.h"
#include "ui_mainWindow.h" #include "ui_mainWindow.h"
#include "qtipl.h" #include "QtOpenCV.h"
#include "KeypointItem.h" #include "KeypointItem.h"
#include "ObjWidget.h" #include "ObjWidget.h"
#include "Camera.h" #include "Camera.h"
@ -188,14 +188,14 @@ void MainWindow::saveObjects(const QString & dirPath)
{ {
for(int i=0; i<objects_.size(); ++i) for(int i=0; i<objects_.size(); ++i)
{ {
objects_.at(i)->image().save(QString("%1/%2.bmp").arg(dirPath).arg(objects_.at(i)->id())); objects_.at(i)->pixmap().save(QString("%1/%2.bmp").arg(dirPath).arg(objects_.at(i)->id()));
} }
} }
} }
void MainWindow::loadObjects() void MainWindow::loadObjects()
{ {
QString dirPath = QFileDialog::getExistingDirectory(this, tr("Load objects..."), Settings::workingDirectory()); QString dirPath = QFileDialog::getExistingDirectory(this, tr("Loading objects... Select a directory."), Settings::workingDirectory());
if(!dirPath.isEmpty()) if(!dirPath.isEmpty())
{ {
loadObjects(dirPath); loadObjects(dirPath);
@ -203,7 +203,7 @@ void MainWindow::loadObjects()
} }
bool MainWindow::saveObjects() bool MainWindow::saveObjects()
{ {
QString dirPath = QFileDialog::getExistingDirectory(this, tr("Save objects..."), Settings::workingDirectory()); QString dirPath = QFileDialog::getExistingDirectory(this, tr("Saving objects... Select a directory."), Settings::workingDirectory());
if(!dirPath.isEmpty()) if(!dirPath.isEmpty())
{ {
saveObjects(dirPath); saveObjects(dirPath);
@ -219,10 +219,9 @@ void MainWindow::removeObject(ObjWidget * object)
objects_.removeOne(object); objects_.removeOne(object);
object->deleteLater(); object->deleteLater();
this->updateData(); this->updateData();
if(!camera_->isRunning() && ui_->imageView_source->iplImage()) if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{ {
cv::Mat image(ui_->imageView_source->iplImage(), true); this->update(ui_->imageView_source->cvImage());
this->update(image);
} }
} }
} }
@ -235,10 +234,9 @@ void MainWindow::removeAllObjects()
} }
objects_.clear(); objects_.clear();
this->updateData(); this->updateData();
if(!camera_->isRunning() && ui_->imageView_source->iplImage()) if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{ {
cv::Mat image(ui_->imageView_source->iplImage(), true); this->update(ui_->imageView_source->cvImage());
this->update(image);
} }
} }
@ -247,13 +245,13 @@ void MainWindow::addObjectFromScene()
disconnect(camera_, SIGNAL(imageReceived(const cv::Mat &)), this, SLOT(update(const cv::Mat &))); disconnect(camera_, SIGNAL(imageReceived(const cv::Mat &)), this, SLOT(update(const cv::Mat &)));
AddObjectDialog * dialog; AddObjectDialog * dialog;
bool resumeCamera = camera_->isRunning(); bool resumeCamera = camera_->isRunning();
if(!ui_->actionStart_camera->isEnabled() || !ui_->imageView_source->iplImage()) if(!ui_->actionStart_camera->isEnabled() || ui_->imageView_source->cvImage().empty())
{ {
dialog = new AddObjectDialog(camera_, ui_->imageView_source->iplImage(), ui_->imageView_source->isMirrorView(), this); dialog = new AddObjectDialog(camera_, cv::Mat(), ui_->imageView_source->isMirrorView(), this);
} }
else else
{ {
dialog = new AddObjectDialog(0, ui_->imageView_source->iplImage(), ui_->imageView_source->isMirrorView(), this); dialog = new AddObjectDialog(0, ui_->imageView_source->cvImage(), ui_->imageView_source->isMirrorView(), this);
} }
if(dialog->exec() == QDialog::Accepted) if(dialog->exec() == QDialog::Accepted)
{ {
@ -266,15 +264,14 @@ void MainWindow::addObjectFromScene()
objectsModified_ = true; objectsModified_ = true;
} }
} }
if(resumeCamera || !ui_->imageView_source->iplImage()) if(resumeCamera || ui_->imageView_source->cvImage().empty())
{ {
this->startProcessing(); this->startProcessing();
} }
else else
{ {
connect(camera_, SIGNAL(imageReceived(const cv::Mat &)), this, SLOT(update(const cv::Mat &))); connect(camera_, SIGNAL(imageReceived(const cv::Mat &)), this, SLOT(update(const cv::Mat &)));
cv::Mat image(ui_->imageView_source->iplImage(), true); this->update(ui_->imageView_source->cvImage());
this->update(image);
} }
delete dialog; delete dialog;
} }
@ -298,8 +295,8 @@ void MainWindow::addObjectFromFile(const QString & filePath)
printf("Load file %s\n", filePath.toStdString().c_str()); printf("Load file %s\n", filePath.toStdString().c_str());
if(!filePath.isNull()) if(!filePath.isNull())
{ {
IplImage * img = cvLoadImage(filePath.toStdString().c_str(), CV_LOAD_IMAGE_GRAYSCALE); cv::Mat img = cv::imread(filePath.toStdString().c_str(), cv::IMREAD_GRAYSCALE);
if(img) if(!img.empty())
{ {
int id = 0; int id = 0;
QFileInfo file(filePath); QFileInfo file(filePath);
@ -334,7 +331,6 @@ void MainWindow::addObjectFromFile(const QString & filePath)
} }
objects_.append(new ObjWidget(id, std::vector<cv::KeyPoint>(), cv::Mat(), img, "", "")); objects_.append(new ObjWidget(id, std::vector<cv::KeyPoint>(), cv::Mat(), img, "", ""));
this->showObject(objects_.last()); this->showObject(objects_.last());
cvReleaseImage(&img);
} }
} }
} }
@ -344,12 +340,10 @@ void MainWindow::loadSceneFromFile()
QString fileName = QFileDialog::getOpenFileName(this, tr("Load scene..."), Settings::workingDirectory(), tr("Image Files (%1)").arg(Settings::getGeneral_imageFormats())); QString fileName = QFileDialog::getOpenFileName(this, tr("Load scene..."), Settings::workingDirectory(), tr("Image Files (%1)").arg(Settings::getGeneral_imageFormats()));
if(!fileName.isEmpty()) if(!fileName.isEmpty())
{ {
IplImage * img = cvLoadImage(fileName.toStdString().c_str()); cv::Mat img = cv::imread(fileName.toStdString().c_str());
if(img) if(!img.empty())
{ {
cv::Mat imageMat(img); this->update(img);
this->update(imageMat);
cvReleaseImage(&img);
ui_->label_timeRefreshRate->setVisible(false); ui_->label_timeRefreshRate->setVisible(false);
} }
} }
@ -387,7 +381,7 @@ void MainWindow::showObject(ObjWidget * obj)
obj->setMirrorView(ui_->imageView_source->isMirrorView()); obj->setMirrorView(ui_->imageView_source->isMirrorView());
QList<ObjWidget*> objs = ui_->objects_area->findChildren<ObjWidget*>(); QList<ObjWidget*> objs = ui_->objects_area->findChildren<ObjWidget*>();
QVBoxLayout * vLayout = new QVBoxLayout(); QVBoxLayout * vLayout = new QVBoxLayout();
obj->setMinimumSize(obj->image().width(), obj->image().height()); obj->setMinimumSize(obj->pixmap().width(), obj->pixmap().height());
int id = Settings::getGeneral_nextObjID(); int id = Settings::getGeneral_nextObjID();
if(obj->id() == 0) if(obj->id() == 0)
{ {
@ -430,7 +424,7 @@ void MainWindow::updateObjects()
{ {
for(int i=0; i<objects_.size(); ++i) for(int i=0; i<objects_.size(); ++i)
{ {
IplImage * img = cvCloneImage(objects_.at(i)->iplImage()); const cv::Mat & img = objects_.at(i)->cvImage();
cv::FeatureDetector * detector = Settings::createFeaturesDetector(); cv::FeatureDetector * detector = Settings::createFeaturesDetector();
std::vector<cv::KeyPoint> keypoints; std::vector<cv::KeyPoint> keypoints;
detector->detect(img, keypoints); detector->detect(img, keypoints);
@ -461,10 +455,9 @@ void MainWindow::updateObjects()
} }
updateData(); updateData();
} }
if(!camera_->isRunning() && ui_->imageView_source->iplImage()) if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{ {
cv::Mat image(ui_->imageView_source->iplImage(), true); this->update(ui_->imageView_source->cvImage());
this->update(image);
} }
this->statusBar()->clearMessage(); this->statusBar()->clearMessage();
} }
@ -799,7 +792,7 @@ void MainWindow::update(const cv::Mat & image)
H.at<double>(0,2), H.at<double>(1,2), H.at<double>(2,2)); H.at<double>(0,2), H.at<double>(1,2), H.at<double>(2,2));
// find center of object // find center of object
QRect rect = objects_.at(j)->image().rect(); QRect rect = objects_.at(j)->pixmap().rect();
objectsDetected.insert(objects_.at(j)->id(), QPair<QRect, QTransform>(rect, hTransform)); objectsDetected.insert(objects_.at(j)->id(), QPair<QRect, QTransform>(rect, hTransform));
// Example getting the center of the object in the scene using the homography // Example getting the center of the object in the scene using the homography
//QPoint pos(rect.width()/2, rect.height()/2); //QPoint pos(rect.width()/2, rect.height()/2);
@ -899,10 +892,9 @@ void MainWindow::notifyParametersChanged()
{ {
this->statusBar()->showMessage(tr("A parameter has changed... \"Update objects\" may be required.")); this->statusBar()->showMessage(tr("A parameter has changed... \"Update objects\" may be required."));
} }
if(!camera_->isRunning() && ui_->imageView_source->iplImage()) if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{ {
cv::Mat image(ui_->imageView_source->iplImage(), true); this->update(ui_->imageView_source->cvImage());
this->update(image);
ui_->label_timeRefreshRate->setVisible(false); ui_->label_timeRefreshRate->setVisible(false);
} }

View File

@ -4,7 +4,7 @@
#include "ObjWidget.h" #include "ObjWidget.h"
#include "KeypointItem.h" #include "KeypointItem.h"
#include "qtipl.h" #include "QtOpenCV.h"
#include "Settings.h" #include "Settings.h"
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
@ -29,41 +29,35 @@
ObjWidget::ObjWidget(QWidget * parent) : ObjWidget::ObjWidget(QWidget * parent) :
QWidget(parent), QWidget(parent),
iplImage_(0),
graphicsView_(0), graphicsView_(0),
id_(0), id_(0),
detectorType_("NA"), detectorType_("NA"),
descriptorType_("NA"), descriptorType_("NA"),
graphicsViewInitialized_(false), graphicsViewInitialized_(false),
alpha_(50) alpha_(100)
{ {
setupUi(); setupUi();
} }
ObjWidget::ObjWidget(int id, ObjWidget::ObjWidget(int id,
const std::vector<cv::KeyPoint> & keypoints, const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors, const cv::Mat & descriptors,
const IplImage * iplImage, const cv::Mat & image,
const QString & detectorType, const QString & detectorType,
const QString & descriptorType, const QString & descriptorType,
QWidget * parent) : QWidget * parent) :
QWidget(parent), QWidget(parent),
iplImage_(0),
graphicsView_(0), graphicsView_(0),
id_(id), id_(id),
detectorType_("NA"), detectorType_("NA"),
descriptorType_("NA"), descriptorType_("NA"),
graphicsViewInitialized_(false), graphicsViewInitialized_(false),
alpha_(50) alpha_(100)
{ {
setupUi(); setupUi();
this->setData(keypoints, descriptors, iplImage, detectorType, descriptorType); this->setData(keypoints, descriptors, image, detectorType, descriptorType);
} }
ObjWidget::~ObjWidget() ObjWidget::~ObjWidget()
{ {
if(iplImage_)
{
cvReleaseImage(&iplImage_);
}
} }
void ObjWidget::setupUi() void ObjWidget::setupUi()
@ -247,7 +241,7 @@ void ObjWidget::setTextLabel(const QString & text)
void ObjWidget::setData(const std::vector<cv::KeyPoint> & keypoints, void ObjWidget::setData(const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors, const cv::Mat & descriptors,
const IplImage * image, const cv::Mat & image,
const QString & detectorType, const QString & detectorType,
const QString & descriptorType) const QString & descriptorType)
{ {
@ -261,30 +255,16 @@ void ObjWidget::setData(const std::vector<cv::KeyPoint> & keypoints,
detectorType_ = detectorType; detectorType_ = detectorType;
descriptorType_ = descriptorType; descriptorType_ = descriptorType;
mouseCurrentPos_ = mousePressedPos_; // this will reset roi selection mouseCurrentPos_ = mousePressedPos_; // this will reset roi selection
if(iplImage_)
{
cvReleaseImage(&iplImage_);
iplImage_ = 0;
}
if(image)
{
/* create destination image
Note that cvGetSize will return the width and the height of ROI */
iplImage_ = cvCreateImage(cvGetSize(image),
image->depth,
image->nChannels);
/* copy subimage */ cvImage_ = image.clone();
cvCopy(image, iplImage_, NULL); pixmap_ = QPixmap::fromImage(cvtCvMat2QImage(cvImage_));
image_ = QPixmap::fromImage(Ipl2QImage(iplImage_));
//this->setMinimumSize(image_.size()); //this->setMinimumSize(image_.size());
}
if(graphicsViewMode_->isChecked()) if(graphicsViewMode_->isChecked())
{ {
this->setupGraphicsView(); this->setupGraphicsView();
} }
label_->setVisible(!image); label_->setVisible(image.empty());
} }
void ObjWidget::resetKptsColor() void ObjWidget::resetKptsColor()
@ -385,7 +365,7 @@ void ObjWidget::save(QDataStream & streamPtr) const
descriptors_.type() << descriptors_.type() <<
dataSize; dataSize;
streamPtr << QByteArray((char*)descriptors_.data, dataSize); streamPtr << QByteArray((char*)descriptors_.data, dataSize);
streamPtr << image_; streamPtr << pixmap_;
} }
void ObjWidget::load(QDataStream & streamPtr) void ObjWidget::load(QDataStream & streamPtr)
@ -416,14 +396,9 @@ void ObjWidget::load(QDataStream & streamPtr)
QByteArray data; QByteArray data;
streamPtr >> data; streamPtr >> data;
descriptors = cv::Mat(rows, cols, type, data.data()).clone(); descriptors = cv::Mat(rows, cols, type, data.data()).clone();
streamPtr >> image_; streamPtr >> pixmap_;
this->setData(kpts, descriptors, 0, detectorType, descriptorType); this->setData(kpts, descriptors, cv::Mat(), detectorType, descriptorType);
if(iplImage_) cvImage_ = cvtQImage2CvMat(pixmap_.toImage());
{
cvReleaseImage(&iplImage_);
iplImage_ = 0;
}
iplImage_ = QImage2Ipl(image_.toImage());
//this->setMinimumSize(image_.size()); //this->setMinimumSize(image_.size());
} }
@ -433,10 +408,10 @@ void ObjWidget::computeScaleOffsets(float & scale, float & offsetX, float & offs
offsetX = 0.0f; offsetX = 0.0f;
offsetY = 0.0f; offsetY = 0.0f;
if(!image_.isNull()) if(!pixmap_.isNull())
{ {
float w = image_.width(); float w = pixmap_.width();
float h = image_.height(); float h = pixmap_.height();
float widthRatio = float(this->rect().width()) / w; float widthRatio = float(this->rect().width()) / w;
float heightRatio = float(this->rect().height()) / h; float heightRatio = float(this->rect().height()) / h;
@ -475,7 +450,7 @@ void ObjWidget::paintEvent(QPaintEvent *event)
} }
else else
{ {
if(!image_.isNull()) if(!pixmap_.isNull())
{ {
//Scale //Scale
float ratio, offsetX, offsetY; float ratio, offsetX, offsetY;
@ -484,7 +459,7 @@ void ObjWidget::paintEvent(QPaintEvent *event)
if(mirrorView_->isChecked()) if(mirrorView_->isChecked())
{ {
painter.translate(offsetX+image_.width()*ratio, offsetY); painter.translate(offsetX+pixmap_.width()*ratio, offsetY);
painter.scale(-ratio, ratio); painter.scale(-ratio, ratio);
} }
else else
@ -495,7 +470,7 @@ void ObjWidget::paintEvent(QPaintEvent *event)
if(showImage_->isChecked()) if(showImage_->isChecked())
{ {
painter.drawPixmap(QPoint(0,0), image_); painter.drawPixmap(QPoint(0,0), pixmap_);
} }
if(showFeatures_->isChecked()) if(showFeatures_->isChecked())
@ -523,15 +498,15 @@ void ObjWidget::paintEvent(QPaintEvent *event)
if(mirrorView_->isChecked()) if(mirrorView_->isChecked())
{ {
int l = left; int l = left;
left = qAbs(right - image_.width()); left = qAbs(right - pixmap_.width());
right = qAbs(l - image_.width()); right = qAbs(l - pixmap_.width());
} }
painter.setPen(Qt::NoPen); painter.setPen(Qt::NoPen);
painter.setBrush(QBrush(QColor(0,0,0,100))); painter.setBrush(QBrush(QColor(0,0,0,100)));
painter.drawRect(0, 0, image_.width(), top-1); painter.drawRect(0, 0, pixmap_.width(), top-1);
painter.drawRect(0, top, left, bottom-top); painter.drawRect(0, top, left, bottom-top);
painter.drawRect(right, top, image_.width()-right, bottom-top); painter.drawRect(right, top, pixmap_.width()-right, bottom-top);
painter.drawRect(0, bottom, image_.width(), image_.height()-bottom); painter.drawRect(0, bottom, pixmap_.width(), pixmap_.height()-bottom);
painter.restore(); painter.restore();
} }
} }
@ -570,7 +545,7 @@ void ObjWidget::mouseMoveEvent(QMouseEvent * event)
void ObjWidget::mouseReleaseEvent(QMouseEvent * event) void ObjWidget::mouseReleaseEvent(QMouseEvent * event)
{ {
if(!image_.isNull()) if(!pixmap_.isNull())
{ {
int left,top,bottom,right; int left,top,bottom,right;
@ -582,8 +557,8 @@ void ObjWidget::mouseReleaseEvent(QMouseEvent * event)
if(mirrorView_->isChecked()) if(mirrorView_->isChecked())
{ {
int l = left; int l = left;
left = qAbs(right - image_.width()); left = qAbs(right - pixmap_.width());
right = qAbs(l - image_.width()); right = qAbs(l - pixmap_.width());
} }
emit roiChanged(QRect(left, top, right-left, bottom-top)); emit roiChanged(QRect(left, top, right-left, bottom-top));
@ -754,15 +729,15 @@ std::vector<cv::KeyPoint> ObjWidget::selectedKeypoints() const
void ObjWidget::setupGraphicsView() void ObjWidget::setupGraphicsView()
{ {
if(!image_.isNull()) if(!pixmap_.isNull())
{ {
graphicsView_->scene()->setSceneRect(image_.rect()); graphicsView_->scene()->setSceneRect(pixmap_.rect());
QList<KeypointItem*> items; QList<KeypointItem*> items;
if(image_.width() > 0 && image_.height() > 0) if(pixmap_.width() > 0 && pixmap_.height() > 0)
{ {
QRectF sceneRect = graphicsView_->sceneRect(); QRectF sceneRect = graphicsView_->sceneRect();
QGraphicsPixmapItem * pixmapItem = graphicsView_->scene()->addPixmap(image_); QGraphicsPixmapItem * pixmapItem = graphicsView_->scene()->addPixmap(pixmap_);
pixmapItem->setVisible(this->isImageShown()); pixmapItem->setVisible(this->isImageShown());
this->drawKeypoints(); this->drawKeypoints();

View File

@ -27,7 +27,7 @@ public:
ObjWidget(int id, ObjWidget(int id,
const std::vector<cv::KeyPoint> & keypoints, const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors, const cv::Mat & descriptors,
const IplImage * image, const cv::Mat & image,
const QString & detectorType = "NA", const QString & detectorType = "NA",
const QString & descriptorType = "NA", const QString & descriptorType = "NA",
QWidget * parent = 0); QWidget * parent = 0);
@ -36,7 +36,7 @@ public:
void setId(int id); void setId(int id);
void setData(const std::vector<cv::KeyPoint> & keypoints, void setData(const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors, const cv::Mat & descriptors,
const IplImage * image, const cv::Mat & image,
const QString & detectorType, const QString & detectorType,
const QString & descriptorType); const QString & descriptorType);
void setTextLabel(const QString & text); void setTextLabel(const QString & text);
@ -53,8 +53,8 @@ public:
const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;} const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;}
const cv::Mat & descriptors() const {return descriptors_;} const cv::Mat & descriptors() const {return descriptors_;}
const QPixmap & image() const {return image_;} const QPixmap & pixmap() const {return pixmap_;}
const IplImage * iplImage() const {return iplImage_;} const cv::Mat & cvImage() const {return cvImage_;}
int id() const {return id_;} int id() const {return id_;}
QColor defaultColor() const; QColor defaultColor() const;
bool isImageShown() const; bool isImageShown() const;
@ -95,8 +95,8 @@ private:
private: private:
std::vector<cv::KeyPoint> keypoints_; std::vector<cv::KeyPoint> keypoints_;
cv::Mat descriptors_; cv::Mat descriptors_;
QPixmap image_; QPixmap pixmap_;
IplImage * iplImage_; cv::Mat cvImage_;
QList<KeypointItem*> keypointItems_; QList<KeypointItem*> keypointItems_;
QGraphicsView * graphicsView_; QGraphicsView * graphicsView_;
int id_; int id_;

112
src/QtOpenCV.cpp Normal file
View File

@ -0,0 +1,112 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#include "QtOpenCV.h"
#include <opencv2/core/core_c.h>
#include <stdio.h>
QImage cvtCvMat2QImage(const cv::Mat & image)
{
QImage qtemp;
if(!image.empty() && image.depth() == CV_8U)
{
const unsigned char * data = image.data;
qtemp = QImage(image.cols, image.rows, QImage::Format_RGB32);
for(int y = 0; y < image.rows; ++y, data += image.cols*image.elemSize())
{
for(int x = 0; x < image.cols; ++x)
{
QRgb * p = ((QRgb*)qtemp.scanLine (y)) + x;
*p = qRgb(data[x * image.channels()+2], data[x * image.channels()+1], data[x * image.channels()]);
}
}
}
else if(!image.empty() && image.depth() != CV_8U)
{
printf("Wrong image format, must be 8_bits\n");
}
return qtemp;
}
cv::Mat cvtQImage2CvMat(const QImage & image)
{
cv::Mat cvImage;
if(!image.isNull() && image.depth() == 32 && image.format() == QImage::Format_RGB32)
{
// assume RGB (3 channels)
int channels = 3;
cvImage = cv::Mat(image.height(), image.width(), CV_8UC3);
unsigned char * data = cvImage.data;
const IplImage test = cvImage;
printf("%d vs %d\n", cvImage.cols*cvImage.elemSize(), test.widthStep);
for(int y = 0; y < image.height(); ++y, data+=cvImage.cols*cvImage.elemSize())
{
for(int x = 0; x < image.width(); ++x)
{
QRgb rgb = image.pixel(x, y);
data[x * channels+2] = qRed(rgb); //r
data[x * channels+1] = qGreen(rgb); //g
data[x * channels] = qBlue(rgb); //b
}
}
}
else
{
printf("Failed to convert image : depth=%d(!=32) format=%d(!=%d)\n", image.depth(), image.format(), QImage::Format_RGB32);
}
return cvImage;
}
QImage cvtIplImage2QImage(const IplImage * image)
{
QImage qtemp;
if (image && image->depth == IPL_DEPTH_8U && cvGetSize(image).width>0)
{
const char * data = image->imageData;
qtemp= QImage(image->width, image->height,QImage::Format_RGB32);
for(int y = 0; y < image->height; ++y, data +=image->widthStep )
{
for(int x = 0; x < image->width; ++x)
{
uint *p = (uint*)qtemp.scanLine (y) + x;
*p = qRgb(data[x * image->nChannels+2], data[x * image->nChannels+1],data[x * image->nChannels]);
}
}
}
else if(image && image->depth != IPL_DEPTH_8U)
{
printf("Wrong iplImage format, must be 8_bits\n");
}
return qtemp;
}
// Returned image must be released explicitly (using cvReleaseImage()).
IplImage * cvtQImage2IplImage(const QImage & image)
{
IplImage * iplTmp = 0;
if(!image.isNull() && image.depth() == 32 && image.format() == QImage::Format_RGB32)
{
// assume RGB (3 channels)
int channels = 3;
iplTmp = cvCreateImage(cvSize(image.width(), image.height()), IPL_DEPTH_8U, channels);
char * data = iplTmp->imageData;
for(int y = 0; y < image.height(); ++y, data+=iplTmp->widthStep)
{
for(int x = 0; x < image.width(); ++x)
{
QRgb rgb = image.pixel(x, y);
data[x * channels+2] = qRed(rgb); //r
data[x * channels+1] = qGreen(rgb); //g
data[x * channels] = qBlue(rgb); //b
}
}
}
else
{
printf("Failed to convert image : depth=%d(!=32) format=%d(!=%d)\n", image.depth(), image.format(), QImage::Format_RGB32);
}
return iplTmp;
}

23
src/QtOpenCV.h Normal file
View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#ifndef QTOPENCV_H
#define QTOPENCV_H
#include <QtGui/QImage>
#include <opencv2/core/core.hpp>
// Convert OpenCV matrix to QImage
QImage cvtCvMat2QImage(const cv::Mat & image);
// Convert QImage to OpenCV matrix
cv::Mat cvtQImage2CvMat(const QImage & image);
// Convert IplImage to QImage
QImage cvtIplImage2QImage(const IplImage * image);
// Convert QImage to IplImage
IplImage * cvtQImage2IplImage(const QImage & image);
#endif // QTOPENCV_H

View File

@ -8,6 +8,7 @@
#include <QtCore/QStringList> #include <QtCore/QStringList>
#include <QtCore/QDir> #include <QtCore/QDir>
#include <stdio.h> #include <stdio.h>
#include <opencv2/nonfree/features2d.hpp>
ParametersMap Settings::defaultParameters_; ParametersMap Settings::defaultParameters_;
ParametersMap Settings::parameters_; ParametersMap Settings::parameters_;
@ -108,15 +109,14 @@ cv::FeatureDetector * Settings::createFeaturesDetector()
case 0: case 0:
if(strategies.at(index).compare("Dense") == 0) if(strategies.at(index).compare("Dense") == 0)
{ {
cv::DenseFeatureDetector::Params params; detector = new cv::DenseFeatureDetector(
params.initFeatureScale = getDense_initFeatureScale(); getDense_initFeatureScale(),
params.featureScaleLevels = getDense_featureScaleLevels(); getDense_featureScaleLevels(),
params.featureScaleMul = getDense_featureScaleMul(); getDense_featureScaleMul(),
params.initXyStep = getDense_initXyStep(); getDense_initXyStep(),
params.initImgBound = getDense_initImgBound(); getDense_initImgBound(),
params.varyXyStepWithScale = getDense_varyXyStepWithScale(); getDense_varyXyStepWithScale(),
params.varyImgBoundWithScale = getDense_varyImgBoundWithScale(); getDense_varyImgBoundWithScale());
detector = new cv::DenseFeatureDetector(params);
} }
break; break;
case 1: case 1:
@ -128,83 +128,77 @@ cv::FeatureDetector * Settings::createFeaturesDetector()
} }
break; break;
case 2: case 2:
if(strategies.at(index).compare("GoodFeaturesToTrack") == 0) if(strategies.at(index).compare("GFTT") == 0)
{ {
cv::GoodFeaturesToTrackDetector::Params params; detector = new cv::GFTTDetector(
params.maxCorners = getGoodFeaturesToTrack_maxCorners(); getGFTT_maxCorners(),
params.qualityLevel = getGoodFeaturesToTrack_qualityLevel(); getGFTT_qualityLevel(),
params.minDistance = getGoodFeaturesToTrack_minDistance(); getGFTT_minDistance(),
params.blockSize = getGoodFeaturesToTrack_blockSize(); getGFTT_blockSize(),
params.useHarrisDetector = getGoodFeaturesToTrack_useHarrisDetector(); getGFTT_useHarrisDetector(),
params.k = getGoodFeaturesToTrack_k(); getGFTT_k());
detector = new cv::GoodFeaturesToTrackDetector(params);
} }
break; break;
case 3: case 3:
if(strategies.at(index).compare("Mser") == 0) if(strategies.at(index).compare("MSER") == 0)
{ {
CvMSERParams params = cvMSERParams(); detector = new cv::MSER(
params.delta = getMser_delta(); getMSER_delta(),
params.maxArea = getMser_maxArea(); getMSER_minArea(),
params.minArea = getMser_minArea(); getMSER_maxArea(),
params.maxVariation = getMser_maxVariation(); getMSER_maxVariation(),
params.minDiversity = getMser_minDiversity(); getMSER_minDiversity(),
params.maxEvolution = getMser_maxEvolution(); getMSER_maxEvolution(),
params.areaThreshold = getMser_areaThreshold(); getMSER_areaThreshold(),
params.minMargin = getMser_minMargin(); getMSER_minMargin(),
params.edgeBlurSize = getMser_edgeBlurSize(); getMSER_edgeBlurSize());
detector = new cv::MserFeatureDetector(params);
} }
break; break;
case 4: case 4:
if(strategies.at(index).compare("Orb") == 0) if(strategies.at(index).compare("ORB") == 0)
{ {
cv::ORB::CommonParams params; detector = new cv::ORB(
params.scale_factor_ = getOrb_scaleFactor(); getORB_nFeatures(),
params.n_levels_ = getOrb_nLevels(); getORB_scaleFactor(),
params.first_level_ = getOrb_firstLevel(); getORB_nLevels(),
params.edge_threshold_ = getOrb_edgeThreshold(); getORB_edgeThreshold(),
detector = new cv::OrbFeatureDetector( getORB_firstLevel(),
getOrb_nFeatures(), getORB_WTA_K(),
params); getORB_scoreType(),
getORB_patchSize());
} }
break; break;
case 5: case 5:
if(strategies.at(index).compare("Sift") == 0) if(strategies.at(index).compare("SIFT") == 0)
{ {
cv::SIFT::DetectorParams detectorParams; detector = new cv::SIFT(
detectorParams.edgeThreshold = getSift_edgeThreshold(); getSIFT_nfeatures(),
detectorParams.threshold = getSift_threshold(); getSIFT_nOctaveLayers(),
cv::SIFT::CommonParams commonParams; getSIFT_contrastThreshold(),
commonParams.angleMode = getSift_angleMode(); getSIFT_edgeThreshold(),
commonParams.firstOctave = getSift_firstOctave(); getSIFT_sigma());
commonParams.nOctaveLayers = getSift_nOctaveLayers();
commonParams.nOctaves = getSift_nOctaves();
detector = new cv::SiftFeatureDetector(
detectorParams,
commonParams);
} }
break; break;
case 6: case 6:
if(strategies.at(index).compare("Star") == 0) if(strategies.at(index).compare("Star") == 0)
{ {
CvStarDetectorParams params = cvStarDetectorParams(); detector = new cv::StarFeatureDetector(
params.lineThresholdBinarized = getStar_lineThresholdBinarized(); getStar_maxSize(),
params.lineThresholdProjected = getStar_lineThresholdProjected(); getStar_responseThreshold(),
params.maxSize = getStar_maxSize(); getStar_lineThresholdProjected(),
params.responseThreshold = getStar_responseThreshold(); getStar_lineThresholdBinarized(),
params.suppressNonmaxSize = getStar_suppressNonmaxSize(); getStar_suppressNonmaxSize());
detector = new cv::StarFeatureDetector(params);
} }
break; break;
case 7: case 7:
if(strategies.at(index).compare("Surf") == 0) if(strategies.at(index).compare("SURF") == 0)
{ {
detector = new cv::SurfFeatureDetector( detector = new cv::SURF(
getSurf_hessianThreshold(), getSURF_hessianThreshold(),
getSurf_octaves(), getSURF_nOctaves(),
getSurf_octaveLayers(), getSURF_nOctaveLayers(),
getSurf_upright()); getSURF_extended(),
getSURF_upright());
} }
break; break;
default: default:
@ -240,41 +234,39 @@ cv::DescriptorExtractor * Settings::createDescriptorsExtractor()
} }
break; break;
case 1: case 1:
if(strategies.at(index).compare("Orb") == 0) if(strategies.at(index).compare("ORB") == 0)
{ {
cv::ORB::CommonParams params; extractor = new cv::ORB(
params.scale_factor_ = getOrb_scaleFactor(); getORB_nFeatures(),
params.n_levels_ = getOrb_nLevels(); getORB_scaleFactor(),
params.first_level_ = getOrb_firstLevel(); getORB_nLevels(),
params.edge_threshold_ = getOrb_edgeThreshold(); getORB_edgeThreshold(),
extractor = new cv::OrbDescriptorExtractor(params); getORB_firstLevel(),
getORB_WTA_K(),
getORB_scoreType(),
getORB_patchSize());
} }
break; break;
case 2: case 2:
if(strategies.at(index).compare("Sift") == 0) if(strategies.at(index).compare("SIFT") == 0)
{ {
cv::SIFT::DescriptorParams descriptorParams; extractor = new cv::SIFT(
descriptorParams.isNormalize = getSift_isNormalize(); getSIFT_nfeatures(),
descriptorParams.magnification = getSift_magnification(); getSIFT_nOctaveLayers(),
descriptorParams.recalculateAngles = getSift_recalculateAngles(); getSIFT_contrastThreshold(),
cv::SIFT::CommonParams commonParams; getSIFT_edgeThreshold(),
commonParams.angleMode = getSift_angleMode(); getSIFT_sigma());
commonParams.firstOctave = getSift_firstOctave();
commonParams.nOctaveLayers = getSift_nOctaveLayers();
commonParams.nOctaves = getSift_nOctaves();
extractor = new cv::SiftDescriptorExtractor(
descriptorParams,
commonParams);
} }
break; break;
case 3: case 3:
if(strategies.at(index).compare("Surf") == 0) if(strategies.at(index).compare("SURF") == 0)
{ {
extractor = new cv::SurfDescriptorExtractor( extractor = new cv::SURF(
getSurf_octaves(), getSURF_hessianThreshold(),
getSurf_octaveLayers(), getSURF_nOctaves(),
getSurf_extended(), getSURF_nOctaveLayers(),
getSurf_upright()); getSURF_extended(),
getSURF_upright());
} }
break; break;
default: default:

View File

@ -59,66 +59,65 @@ class Settings
PARAMETER(Camera, videoFilePath, QString, ""); PARAMETER(Camera, videoFilePath, QString, "");
//List format : [Index:item0;item1;item3;...] //List format : [Index:item0;item1;item3;...]
PARAMETER(Detector, Type, QString, "7:Dense;Fast;GoodFeaturesToTrack;Mser;Orb;Sift;Star;Surf"); PARAMETER(Detector, Type, QString, "7:Dense;Fast;GFTT;MSER;ORB;SIFT;Star;SURF");
PARAMETER(Descriptor, Type, QString, "3:Brief;Orb;Sift;Surf"); PARAMETER(Descriptor, Type, QString, "3:Brief;ORB;SIFT;SURF");
PARAMETER(Brief, bytes, int, 32); PARAMETER(Brief, bytes, int, 32);
PARAMETER(Dense, initFeatureScale, float, cv::DenseFeatureDetector::Params().initFeatureScale); PARAMETER(Dense, initFeatureScale, float, 1.f);
PARAMETER(Dense, featureScaleLevels, int, cv::DenseFeatureDetector::Params().featureScaleLevels); PARAMETER(Dense, featureScaleLevels, int, 1);
PARAMETER(Dense, featureScaleMul, float, cv::DenseFeatureDetector::Params().featureScaleMul); PARAMETER(Dense, featureScaleMul, float, 0.1f);
PARAMETER(Dense, initXyStep, int, cv::DenseFeatureDetector::Params().initXyStep); PARAMETER(Dense, initXyStep, int, 6);
PARAMETER(Dense, initImgBound, int, cv::DenseFeatureDetector::Params().initImgBound); PARAMETER(Dense, initImgBound, int, 0);
PARAMETER(Dense, varyXyStepWithScale, bool, cv::DenseFeatureDetector::Params().varyXyStepWithScale); PARAMETER(Dense, varyXyStepWithScale, bool, true);
PARAMETER(Dense, varyImgBoundWithScale, bool, cv::DenseFeatureDetector::Params().varyImgBoundWithScale); PARAMETER(Dense, varyImgBoundWithScale, bool, false);
PARAMETER(Fast, threshold, int, 20); PARAMETER(Fast, threshold, int, 10);
PARAMETER(Fast, nonmaxSuppression, bool, true); PARAMETER(Fast, nonmaxSuppression, bool, true);
PARAMETER(GoodFeaturesToTrack, maxCorners, int, cv::GoodFeaturesToTrackDetector::Params().maxCorners); PARAMETER(GFTT, maxCorners, int, 1000);
PARAMETER(GoodFeaturesToTrack, qualityLevel, double, cv::GoodFeaturesToTrackDetector::Params().qualityLevel); PARAMETER(GFTT, qualityLevel, double, 0.01);
PARAMETER(GoodFeaturesToTrack, minDistance, double, cv::GoodFeaturesToTrackDetector::Params().minDistance); PARAMETER(GFTT, minDistance, double, 1);
PARAMETER(GoodFeaturesToTrack, blockSize, int, cv::GoodFeaturesToTrackDetector::Params().blockSize); PARAMETER(GFTT, blockSize, int, 3);
PARAMETER(GoodFeaturesToTrack, useHarrisDetector, bool, cv::GoodFeaturesToTrackDetector::Params().useHarrisDetector); PARAMETER(GFTT, useHarrisDetector, bool, false);
PARAMETER(GoodFeaturesToTrack, k, double, cv::GoodFeaturesToTrackDetector::Params().k); PARAMETER(GFTT, k, double, 0.04);
PARAMETER(Orb, nFeatures, uint, 700); PARAMETER(ORB, nFeatures, int, 500);
PARAMETER(Orb, scaleFactor, float, cv::ORB::CommonParams().scale_factor_); PARAMETER(ORB, scaleFactor, float, 1.2f);
PARAMETER(Orb, nLevels, uint, cv::ORB::CommonParams().n_levels_); PARAMETER(ORB, nLevels, int, 8);
PARAMETER(Orb, firstLevel, uint, cv::ORB::CommonParams().first_level_); PARAMETER(ORB, edgeThreshold, int, 31);
PARAMETER(Orb, edgeThreshold, uint, cv::ORB::CommonParams().edge_threshold_); PARAMETER(ORB, firstLevel, int, 0);
PARAMETER(ORB, WTA_K, int, 2);
PARAMETER(ORB, scoreType, int, 0);
PARAMETER(ORB, patchSize, int, 31);
PARAMETER(Mser, delta, int, cvMSERParams().delta); PARAMETER(MSER, delta, int, 5);
PARAMETER(Mser, minArea, int, cvMSERParams().minArea); PARAMETER(MSER, minArea, int, 60);
PARAMETER(Mser, maxArea, int, cvMSERParams().maxArea); PARAMETER(MSER, maxArea, int, 14400);
PARAMETER(Mser, maxVariation, float, cvMSERParams().maxVariation); PARAMETER(MSER, maxVariation, double, 0.25);
PARAMETER(Mser, minDiversity, float, cvMSERParams().minDiversity); PARAMETER(MSER, minDiversity, double, 0.2);
PARAMETER(Mser, maxEvolution, int, cvMSERParams().maxEvolution); PARAMETER(MSER, maxEvolution, int, 200);
PARAMETER(Mser, areaThreshold, double, cvMSERParams().areaThreshold); PARAMETER(MSER, areaThreshold, double, 1.01);
PARAMETER(Mser, minMargin, double, cvMSERParams().minMargin); PARAMETER(MSER, minMargin, double, 0.003);
PARAMETER(Mser, edgeBlurSize, int, cvMSERParams().edgeBlurSize); PARAMETER(MSER, edgeBlurSize, int, 5);
PARAMETER(Sift, threshold, double, cv::SIFT::DetectorParams::GET_DEFAULT_THRESHOLD()); PARAMETER(SIFT, nfeatures, int, 0);
PARAMETER(Sift, edgeThreshold, double, cv::SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD()); PARAMETER(SIFT, nOctaveLayers, int, 3);
PARAMETER(Sift, nOctaves, int, cv::SIFT::CommonParams::DEFAULT_NOCTAVES); PARAMETER(SIFT, contrastThreshold, double, 0.04);
PARAMETER(Sift, nOctaveLayers, int, cv::SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS); PARAMETER(SIFT, edgeThreshold, double, 10);
PARAMETER(Sift, firstOctave, int, cv::SIFT::CommonParams::DEFAULT_FIRST_OCTAVE); PARAMETER(SIFT, sigma, double, 1.6);
PARAMETER(Sift, angleMode, int, cv::SIFT::CommonParams::FIRST_ANGLE);
PARAMETER(Sift, magnification, double, cv::SIFT::DescriptorParams::GET_DEFAULT_MAGNIFICATION());
PARAMETER(Sift, isNormalize, bool, cv::SIFT::DescriptorParams::DEFAULT_IS_NORMALIZE);
PARAMETER(Sift, recalculateAngles, bool, true);
PARAMETER(Star, maxSize, int, cvStarDetectorParams().maxSize); PARAMETER(Star, maxSize, int, 45);
PARAMETER(Star, responseThreshold, int, cvStarDetectorParams().responseThreshold); PARAMETER(Star, responseThreshold, int, 30);
PARAMETER(Star, lineThresholdProjected, int, cvStarDetectorParams().lineThresholdProjected); PARAMETER(Star, lineThresholdProjected, int, 10);
PARAMETER(Star, lineThresholdBinarized, int, cvStarDetectorParams().lineThresholdBinarized); PARAMETER(Star, lineThresholdBinarized, int, 8);
PARAMETER(Star, suppressNonmaxSize, int, cvStarDetectorParams().suppressNonmaxSize); PARAMETER(Star, suppressNonmaxSize, int, 5);
PARAMETER(Surf, hessianThreshold, double, 600.0); PARAMETER(SURF, hessianThreshold, double, 600.0);
PARAMETER(Surf, octaves, int, 3); PARAMETER(SURF, nOctaves, int, 4);
PARAMETER(Surf, octaveLayers, int, 4); PARAMETER(SURF, nOctaveLayers, int, 2);
PARAMETER(Surf, upright, bool, false); PARAMETER(SURF, extended, bool, true);
PARAMETER(Surf, extended, bool, false); PARAMETER(SURF, upright, bool, false);
PARAMETER(NearestNeighbor, nndrRatioUsed, bool, true); PARAMETER(NearestNeighbor, nndrRatioUsed, bool, true);
PARAMETER(NearestNeighbor, nndrRatio, float, 0.8f); PARAMETER(NearestNeighbor, nndrRatio, float, 0.8f);
@ -126,7 +125,7 @@ class Settings
PARAMETER(NearestNeighbor, minDistance, float, 1.6f); PARAMETER(NearestNeighbor, minDistance, float, 1.6f);
PARAMETER(General, autoStartCamera, bool, false); PARAMETER(General, autoStartCamera, bool, false);
PARAMETER(General, autoUpdateObjects, bool, false); PARAMETER(General, autoUpdateObjects, bool, true);
PARAMETER(General, nextObjID, uint, 1); PARAMETER(General, nextObjID, uint, 1);
PARAMETER(General, imageFormats, QString, "*.png *.jpg *.bmp *.tiff") PARAMETER(General, imageFormats, QString, "*.png *.jpg *.bmp *.tiff")
PARAMETER(General, videoFormats, QString, "*.avi *.m4v") PARAMETER(General, videoFormats, QString, "*.avi *.m4v")

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#include "qtipl.h"
#include <opencv2/core/core_c.h>
#include <stdio.h>
// TODO : support only from gray 8bits ?
QImage Ipl2QImage(const IplImage *newImage)
{
QImage qtemp;
if (newImage && newImage->depth == IPL_DEPTH_8U && cvGetSize(newImage).width>0)
{
int x;
int y;
char* data = newImage->imageData;
qtemp= QImage(newImage->width, newImage->height,QImage::Format_RGB32);
for( y = 0; y < newImage->height; ++y, data +=newImage->widthStep )
{
for( x = 0; x < newImage->width; ++x)
{
uint *p = (uint*)qtemp.scanLine (y) + x;
*p = qRgb(data[x * newImage->nChannels+2], data[x * newImage->nChannels+1],data[x * newImage->nChannels]);
}
}
}
else
{
//Wrong IplImage format
}
return qtemp;
}
IplImage * QImage2Ipl(const QImage & image)
{
IplImage * iplTmp = 0;
if(!image.isNull() && image.depth() == 32 && image.format() == QImage::Format_RGB32)
{
int x;
int y;
// assume RGB (3 channels)
int channels = 3;
iplTmp = cvCreateImage(cvSize(image.width(), image.height()), IPL_DEPTH_8U, channels);
char* data = iplTmp->imageData;
for( y = 0; y < image.height(); ++y, data+=iplTmp->widthStep)
{
for( x = 0; x < image.width(); ++x)
{
QRgb rgb = image.pixel(x, y);
data[x * channels+2] = qRed(rgb); //r
data[x * channels+1] = qGreen(rgb); //g
data[x * channels] = qBlue(rgb); //b
}
}
}
else
{
printf("failed to convert image : depth=%d(!=32) format=%d(!=%d)\n", image.depth(), image.format(), QImage::Format_RGB32);
}
return iplTmp;
}

View File

@ -1,17 +0,0 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#ifndef QTIPL_H
#define QTIPL_H
#include <QtGui/QImage>
#include <opencv2/core/core.hpp>
// IplImage to QImage
QImage Ipl2QImage(const IplImage *newImage);
// QImage to IplImage
IplImage * QImage2Ipl(const QImage & image);
#endif