Updated to OpenCV 2.4 (as well as all feature descriptors and detectors parameters).

Updated to OpenCV C++ interface (using cv::Mat instead of IplImage).


git-svn-id: http://find-object.googlecode.com/svn/trunk/find_object@97 620bd6b2-0a58-f614-fd9a-1bd335dccda9
This commit is contained in:
matlabbe 2012-05-07 22:36:59 +00:00
parent 8e44f2bedf
commit 5c4625a300
16 changed files with 388 additions and 394 deletions

View File

@ -36,7 +36,7 @@ SET(SRC_FILES
../src/MainWindow.cpp
../src/AddObjectDialog.cpp
../src/KeypointItem.cpp
../src/qtipl.cpp
../src/QtOpenCV.cpp
../src/Camera.cpp
../src/ParametersToolBox.cpp
../src/Settings.cpp

BIN
bin/box.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
bin/box_in_scene.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

View File

@ -11,7 +11,7 @@ SET(SRC_FILES
main.cpp
../src/ObjWidget.cpp
../src/KeypointItem.cpp
../src/qtipl.cpp
../src/QtOpenCV.cpp
../src/Settings.cpp
../src/Camera.cpp
${moc_srcs}

View File

@ -16,10 +16,12 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp> // for homography
// From this project (see src folder)
#include "ObjWidget.h"
#include "QtOpenCV.h"
void showUsage()
{
@ -44,10 +46,10 @@ int main(int argc, char * argv[])
ObjWidget sceneWidget;
//Load as grayscale
IplImage * objectImg = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
IplImage * sceneImg = cvLoadImage(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat objectImg = cv::imread(argv[1], cv::IMREAD_GRAYSCALE);
cv::Mat sceneImg = cv::imread(argv[2], cv::IMREAD_GRAYSCALE);
if(objectImg && sceneImg)
if(!objectImg.empty() && !sceneImg.empty())
{
std::vector<cv::KeyPoint> objectKeypoints;
std::vector<cv::KeyPoint> sceneKeypoints;
@ -58,13 +60,15 @@ int main(int argc, char * argv[])
// EXTRACT KEYPOINTS
////////////////////////////
// The detector can be any of (see OpenCV features2d.hpp):
// cv::FeatureDetector * detector = new cv::OrbFeatureDetector();
// cv::FeatureDetector * detector = new cv::DenseFeatureDetector();
// cv::FeatureDetector * detector = new cv::FastFeatureDetector();
// cv::FeatureDetector * detector = new cv::MserFeatureDetector();
// cv::FeatureDetector * detector = new cv::SiftFeatureDetector();
// cv::FeatureDetector * detector = new cv::SurfFeatureDetector();
// cv::FeatureDetector * detector = new cv::GFTTDetector();
// cv::FeatureDetector * detector = new cv::MSER();
// cv::FeatureDetector * detector = new cv::ORB();
// cv::FeatureDetector * detector = new cv::SIFT();
// cv::FeatureDetector * detector = new cv::StarFeatureDetector();
cv::FeatureDetector * detector = new cv::SurfFeatureDetector();
// cv::FeatureDetector * detector = new cv::SURF();
cv::FeatureDetector * detector = new cv::SURF(600.0);
detector->detect(objectImg, objectKeypoints);
printf("Object: %d keypoints detected in %d ms\n", (int)objectKeypoints.size(), time.restart());
detector->detect(sceneImg, sceneKeypoints);
@ -75,10 +79,10 @@ int main(int argc, char * argv[])
////////////////////////////
// The extractor can be any of (see OpenCV features2d.hpp):
// cv::DescriptorExtractor * extractor = new cv::BriefDescriptorExtractor();
// cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor();
// cv::DescriptorExtractor * extractor = new cv::SiftDescriptorExtractor();
// cv::DescriptorExtractor * extractor = new cv::SurfDescriptorExtractor();
cv::DescriptorExtractor * extractor = new cv::SurfDescriptorExtractor();
// cv::DescriptorExtractor * extractor = new cv::ORB();
// cv::DescriptorExtractor * extractor = new cv::SIFT();
// cv::DescriptorExtractor * extractor = new cv::SURF();
cv::DescriptorExtractor * extractor = new cv::SURF(600.0);
extractor->compute(objectImg, objectKeypoints, objectDescriptors);
printf("Object: %d descriptors extracted in %d ms\n", objectDescriptors.rows, time.restart());
extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);
@ -187,7 +191,7 @@ int main(int argc, char * argv[])
}
QPen rectPen(color);
rectPen.setWidth(4);
QGraphicsRectItem * rectItem = new QGraphicsRectItem(objWidget.image().rect());
QGraphicsRectItem * rectItem = new QGraphicsRectItem(objWidget.pixmap().rect());
rectItem->setPen(rectPen);
rectItem->setTransform(hTransform);
sceneWidget.addRect(rectItem);
@ -201,54 +205,44 @@ int main(int argc, char * argv[])
// Wait for gui
objWidget.setGraphicsViewMode(false);
objWidget.setWindowTitle("Object");
if(objWidget.image().width() <= 800)
if(objWidget.pixmap().width() <= 800)
{
objWidget.setGeometry(0, 0, objWidget.image().width(), objWidget.image().height());
objWidget.setMinimumSize(objWidget.pixmap().width(), objWidget.pixmap().height());
}
else
{
objWidget.setGeometry(0, 0, 800, 600);
objWidget.setMinimumSize(800, 600);
objWidget.setAutoScale(false);
}
objWidget.show();
sceneWidget.setGraphicsViewMode(false);
sceneWidget.setWindowTitle("Scene");
if(sceneWidget.image().width() <= 800)
if(sceneWidget.pixmap().width() <= 800)
{
sceneWidget.setGeometry(0, 0, sceneWidget.image().width(), sceneWidget.image().height());
sceneWidget.setMinimumSize(sceneWidget.pixmap().width(), sceneWidget.pixmap().height());
}
else
{
sceneWidget.setGeometry(0, 0, 800, 600);
sceneWidget.setMinimumSize(800, 600);
sceneWidget.setAutoScale(false);
}
sceneWidget.show();
objWidget.show();
int r = app.exec();
printf("Closing...\n");
////////////////////////////
//Cleanup
////////////////////////////
delete detector;
delete extractor;
if(objectImg) {
cvReleaseImage(&objectImg);
}
if(sceneImg) {
cvReleaseImage(&sceneImg);
}
return r;
}
else
{
if(objectImg) {
cvReleaseImage(&objectImg);
}
if(sceneImg) {
cvReleaseImage(&sceneImg);
}
printf("Images are not valid!\n");
showUsage();
}

View File

@ -7,7 +7,7 @@
#include "ObjWidget.h"
#include "KeypointItem.h"
#include "Camera.h"
#include "qtipl.h"
#include "QtOpenCV.h"
#include "Settings.h"
#include <stdio.h>
@ -16,14 +16,13 @@
#include <QtGui/QGraphicsPixmapItem>
#include <QtGui/QMessageBox>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
AddObjectDialog::AddObjectDialog(Camera * camera, const IplImage * image, bool mirrorView, QWidget * parent, Qt::WindowFlags f) :
AddObjectDialog::AddObjectDialog(Camera * camera, const cv::Mat & image, bool mirrorView, QWidget * parent, Qt::WindowFlags f) :
QDialog(parent, f),
camera_(camera),
object_(0),
cvImage_(0)
object_(0)
{
ui_ = new Ui_addObjectDialog();
ui_->setupUi(this);
@ -38,24 +37,19 @@ AddObjectDialog::AddObjectDialog(Camera * camera, const IplImage * image, bool m
connect(ui_->cameraView, SIGNAL(roiChanged(const QRect &)), this, SLOT(updateNextButton(const QRect &)));
ui_->cameraView->setMirrorView(mirrorView);
if((camera_ && camera_->isRunning()) || !image)
if((camera_ && camera_->isRunning()) || image.empty())
{
this->setState(kTakePicture);
}
else if(image)
else if(!image.empty())
{
cv::Mat img(image);
update(img);
update(image);
this->setState(kSelectFeatures);
}
}
AddObjectDialog::~AddObjectDialog()
{
if(cvImage_)
{
cvReleaseImage(&cvImage_);
}
if(object_)
{
delete object_;
@ -214,11 +208,11 @@ void AddObjectDialog::setState(int state)
std::vector<cv::KeyPoint> selectedKeypoints = ui_->cameraView->selectedKeypoints();
// Select keypoints
if(cvImage_ &&
if(!cvImage_.empty() &&
((ui_->comboBox_selection->currentIndex() == 1 && selectedKeypoints.size()) ||
(ui_->comboBox_selection->currentIndex() == 0 && !roi_.isNull())))
{
CvRect roi;
cv::Rect roi;
if(ui_->comboBox_selection->currentIndex() == 1)
{
roi = computeROI(selectedKeypoints);
@ -230,7 +224,7 @@ void AddObjectDialog::setState(int state)
roi.width = roi_.width();
roi.height = roi_.height();
}
cvSetImageROI(cvImage_, roi);
cv::Mat imgRoi(cvImage_, roi);
if(ui_->comboBox_selection->currentIndex() == 1)
{
@ -248,13 +242,12 @@ void AddObjectDialog::setState(int state)
// Extract keypoints
selectedKeypoints.clear();
cv::FeatureDetector * detector = Settings::createFeaturesDetector();
detector->detect(cvImage_, selectedKeypoints);
detector->detect(imgRoi, selectedKeypoints);
delete detector;
}
ui_->objectView->setData(selectedKeypoints, cv::Mat(), cvImage_, Settings::currentDetectorType(), "");
ui_->objectView->setData(selectedKeypoints, cv::Mat(), imgRoi, Settings::currentDetectorType(), "");
ui_->objectView->setMinimumSize(roi.width, roi.height);
ui_->objectView->update();
cvResetImageROI(cvImage_);
ui_->pushButton_next->setEnabled(true);
}
else
@ -275,7 +268,7 @@ void AddObjectDialog::setState(int state)
{
// Extract descriptors
cv::DescriptorExtractor * extractor = Settings::createDescriptorsExtractor();
extractor->compute(ui_->objectView->iplImage(), keypoints, descriptors);
extractor->compute(ui_->objectView->cvImage(), keypoints, descriptors);
delete extractor;
if(keypoints.size() != (unsigned int)descriptors.rows)
@ -289,7 +282,7 @@ void AddObjectDialog::setState(int state)
delete object_;
object_ = 0;
}
object_ = new ObjWidget(0, keypoints, descriptors, ui_->objectView->iplImage(), Settings::currentDetectorType(), Settings::currentDescriptorType());
object_ = new ObjWidget(0, keypoints, descriptors, ui_->objectView->cvImage(), Settings::currentDetectorType(), Settings::currentDescriptorType());
this->accept();
}
@ -298,22 +291,17 @@ void AddObjectDialog::setState(int state)
void AddObjectDialog::update(const cv::Mat & image)
{
if(cvImage_)
{
cvReleaseImage(&cvImage_);
cvImage_ = 0;
}
IplImage iplImage = image;
cvImage_ = cvCloneImage(& iplImage);
if(cvImage_)
cvImage_ = cv::Mat();
if(!image.empty())
{
// convert to grayscale
if(cvImage_->nChannels != 1 || cvImage_->depth != IPL_DEPTH_8U)
if(image.channels() != 1 || image.depth() != CV_8U)
{
IplImage * imageGrayScale = cvCreateImage(cvSize(cvImage_->width, cvImage_->height), IPL_DEPTH_8U, 1);
cvCvtColor(cvImage_, imageGrayScale, CV_BGR2GRAY);
cvReleaseImage(&cvImage_);
cvImage_ = imageGrayScale;
cv::cvtColor(image, cvImage_, CV_BGR2GRAY);
}
else
{
cvImage_ = image;
}
// Extract keypoints
@ -327,9 +315,9 @@ void AddObjectDialog::update(const cv::Mat & image)
}
}
CvRect AddObjectDialog::computeROI(const std::vector<cv::KeyPoint> & kpts)
cv::Rect AddObjectDialog::computeROI(const std::vector<cv::KeyPoint> & kpts)
{
CvRect roi = cvRect(0,0,0,0);
cv::Rect roi(0,0,0,0);
int x1=0,x2=0,h1=0,h2=0;
for(unsigned int i=0; i<kpts.size(); ++i)
{

View File

@ -8,7 +8,7 @@
#include <QtGui/QDialog>
#include <QtCore/QTimer>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/core/core_c.h>
#include <opencv2/core/core.hpp>
class Ui_addObjectDialog;
class ObjWidget;
@ -20,7 +20,7 @@ class AddObjectDialog : public QDialog {
Q_OBJECT
public:
AddObjectDialog(Camera * camera, const IplImage * image, bool mirrorView, QWidget * parent = 0, Qt::WindowFlags f = 0);
AddObjectDialog(Camera * camera, const cv::Mat & image, bool mirrorView, QWidget * parent = 0, Qt::WindowFlags f = 0);
virtual ~AddObjectDialog();
// ownership transferred to caller
@ -41,12 +41,12 @@ protected:
private:
void setState(int state);
CvRect computeROI(const std::vector<cv::KeyPoint> & kpts);
cv::Rect computeROI(const std::vector<cv::KeyPoint> & kpts);
private:
Ui_addObjectDialog * ui_;
Camera * camera_;
ObjWidget * object_;
IplImage * cvImage_;
cv::Mat cvImage_;
enum State{kTakePicture, kSelectFeatures, kVerifySelection, kClosing};
int state_;

View File

@ -5,7 +5,7 @@
#include "MainWindow.h"
#include "AddObjectDialog.h"
#include "ui_mainWindow.h"
#include "qtipl.h"
#include "QtOpenCV.h"
#include "KeypointItem.h"
#include "ObjWidget.h"
#include "Camera.h"
@ -188,14 +188,14 @@ void MainWindow::saveObjects(const QString & dirPath)
{
for(int i=0; i<objects_.size(); ++i)
{
objects_.at(i)->image().save(QString("%1/%2.bmp").arg(dirPath).arg(objects_.at(i)->id()));
objects_.at(i)->pixmap().save(QString("%1/%2.bmp").arg(dirPath).arg(objects_.at(i)->id()));
}
}
}
void MainWindow::loadObjects()
{
QString dirPath = QFileDialog::getExistingDirectory(this, tr("Load objects..."), Settings::workingDirectory());
QString dirPath = QFileDialog::getExistingDirectory(this, tr("Loading objects... Select a directory."), Settings::workingDirectory());
if(!dirPath.isEmpty())
{
loadObjects(dirPath);
@ -203,7 +203,7 @@ void MainWindow::loadObjects()
}
bool MainWindow::saveObjects()
{
QString dirPath = QFileDialog::getExistingDirectory(this, tr("Save objects..."), Settings::workingDirectory());
QString dirPath = QFileDialog::getExistingDirectory(this, tr("Saving objects... Select a directory."), Settings::workingDirectory());
if(!dirPath.isEmpty())
{
saveObjects(dirPath);
@ -219,10 +219,9 @@ void MainWindow::removeObject(ObjWidget * object)
objects_.removeOne(object);
object->deleteLater();
this->updateData();
if(!camera_->isRunning() && ui_->imageView_source->iplImage())
if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{
cv::Mat image(ui_->imageView_source->iplImage(), true);
this->update(image);
this->update(ui_->imageView_source->cvImage());
}
}
}
@ -235,10 +234,9 @@ void MainWindow::removeAllObjects()
}
objects_.clear();
this->updateData();
if(!camera_->isRunning() && ui_->imageView_source->iplImage())
if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{
cv::Mat image(ui_->imageView_source->iplImage(), true);
this->update(image);
this->update(ui_->imageView_source->cvImage());
}
}
@ -247,13 +245,13 @@ void MainWindow::addObjectFromScene()
disconnect(camera_, SIGNAL(imageReceived(const cv::Mat &)), this, SLOT(update(const cv::Mat &)));
AddObjectDialog * dialog;
bool resumeCamera = camera_->isRunning();
if(!ui_->actionStart_camera->isEnabled() || !ui_->imageView_source->iplImage())
if(!ui_->actionStart_camera->isEnabled() || ui_->imageView_source->cvImage().empty())
{
dialog = new AddObjectDialog(camera_, ui_->imageView_source->iplImage(), ui_->imageView_source->isMirrorView(), this);
dialog = new AddObjectDialog(camera_, cv::Mat(), ui_->imageView_source->isMirrorView(), this);
}
else
{
dialog = new AddObjectDialog(0, ui_->imageView_source->iplImage(), ui_->imageView_source->isMirrorView(), this);
dialog = new AddObjectDialog(0, ui_->imageView_source->cvImage(), ui_->imageView_source->isMirrorView(), this);
}
if(dialog->exec() == QDialog::Accepted)
{
@ -266,15 +264,14 @@ void MainWindow::addObjectFromScene()
objectsModified_ = true;
}
}
if(resumeCamera || !ui_->imageView_source->iplImage())
if(resumeCamera || ui_->imageView_source->cvImage().empty())
{
this->startProcessing();
}
else
{
connect(camera_, SIGNAL(imageReceived(const cv::Mat &)), this, SLOT(update(const cv::Mat &)));
cv::Mat image(ui_->imageView_source->iplImage(), true);
this->update(image);
this->update(ui_->imageView_source->cvImage());
}
delete dialog;
}
@ -298,8 +295,8 @@ void MainWindow::addObjectFromFile(const QString & filePath)
printf("Load file %s\n", filePath.toStdString().c_str());
if(!filePath.isNull())
{
IplImage * img = cvLoadImage(filePath.toStdString().c_str(), CV_LOAD_IMAGE_GRAYSCALE);
if(img)
cv::Mat img = cv::imread(filePath.toStdString().c_str(), cv::IMREAD_GRAYSCALE);
if(!img.empty())
{
int id = 0;
QFileInfo file(filePath);
@ -334,7 +331,6 @@ void MainWindow::addObjectFromFile(const QString & filePath)
}
objects_.append(new ObjWidget(id, std::vector<cv::KeyPoint>(), cv::Mat(), img, "", ""));
this->showObject(objects_.last());
cvReleaseImage(&img);
}
}
}
@ -344,12 +340,10 @@ void MainWindow::loadSceneFromFile()
QString fileName = QFileDialog::getOpenFileName(this, tr("Load scene..."), Settings::workingDirectory(), tr("Image Files (%1)").arg(Settings::getGeneral_imageFormats()));
if(!fileName.isEmpty())
{
IplImage * img = cvLoadImage(fileName.toStdString().c_str());
if(img)
cv::Mat img = cv::imread(fileName.toStdString().c_str());
if(!img.empty())
{
cv::Mat imageMat(img);
this->update(imageMat);
cvReleaseImage(&img);
this->update(img);
ui_->label_timeRefreshRate->setVisible(false);
}
}
@ -387,7 +381,7 @@ void MainWindow::showObject(ObjWidget * obj)
obj->setMirrorView(ui_->imageView_source->isMirrorView());
QList<ObjWidget*> objs = ui_->objects_area->findChildren<ObjWidget*>();
QVBoxLayout * vLayout = new QVBoxLayout();
obj->setMinimumSize(obj->image().width(), obj->image().height());
obj->setMinimumSize(obj->pixmap().width(), obj->pixmap().height());
int id = Settings::getGeneral_nextObjID();
if(obj->id() == 0)
{
@ -430,7 +424,7 @@ void MainWindow::updateObjects()
{
for(int i=0; i<objects_.size(); ++i)
{
IplImage * img = cvCloneImage(objects_.at(i)->iplImage());
const cv::Mat & img = objects_.at(i)->cvImage();
cv::FeatureDetector * detector = Settings::createFeaturesDetector();
std::vector<cv::KeyPoint> keypoints;
detector->detect(img, keypoints);
@ -461,10 +455,9 @@ void MainWindow::updateObjects()
}
updateData();
}
if(!camera_->isRunning() && ui_->imageView_source->iplImage())
if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{
cv::Mat image(ui_->imageView_source->iplImage(), true);
this->update(image);
this->update(ui_->imageView_source->cvImage());
}
this->statusBar()->clearMessage();
}
@ -799,7 +792,7 @@ void MainWindow::update(const cv::Mat & image)
H.at<double>(0,2), H.at<double>(1,2), H.at<double>(2,2));
// find center of object
QRect rect = objects_.at(j)->image().rect();
QRect rect = objects_.at(j)->pixmap().rect();
objectsDetected.insert(objects_.at(j)->id(), QPair<QRect, QTransform>(rect, hTransform));
// Example getting the center of the object in the scene using the homography
//QPoint pos(rect.width()/2, rect.height()/2);
@ -899,10 +892,9 @@ void MainWindow::notifyParametersChanged()
{
this->statusBar()->showMessage(tr("A parameter has changed... \"Update objects\" may be required."));
}
if(!camera_->isRunning() && ui_->imageView_source->iplImage())
if(!camera_->isRunning() && !ui_->imageView_source->cvImage().empty())
{
cv::Mat image(ui_->imageView_source->iplImage(), true);
this->update(image);
this->update(ui_->imageView_source->cvImage());
ui_->label_timeRefreshRate->setVisible(false);
}

View File

@ -4,7 +4,7 @@
#include "ObjWidget.h"
#include "KeypointItem.h"
#include "qtipl.h"
#include "QtOpenCV.h"
#include "Settings.h"
#include <opencv2/highgui/highgui.hpp>
@ -29,41 +29,35 @@
ObjWidget::ObjWidget(QWidget * parent) :
QWidget(parent),
iplImage_(0),
graphicsView_(0),
id_(0),
detectorType_("NA"),
descriptorType_("NA"),
graphicsViewInitialized_(false),
alpha_(50)
alpha_(100)
{
setupUi();
}
ObjWidget::ObjWidget(int id,
const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors,
const IplImage * iplImage,
const cv::Mat & image,
const QString & detectorType,
const QString & descriptorType,
QWidget * parent) :
QWidget(parent),
iplImage_(0),
graphicsView_(0),
id_(id),
detectorType_("NA"),
descriptorType_("NA"),
graphicsViewInitialized_(false),
alpha_(50)
alpha_(100)
{
setupUi();
this->setData(keypoints, descriptors, iplImage, detectorType, descriptorType);
this->setData(keypoints, descriptors, image, detectorType, descriptorType);
}
ObjWidget::~ObjWidget()
{
if(iplImage_)
{
cvReleaseImage(&iplImage_);
}
}
void ObjWidget::setupUi()
@ -247,7 +241,7 @@ void ObjWidget::setTextLabel(const QString & text)
void ObjWidget::setData(const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors,
const IplImage * image,
const cv::Mat & image,
const QString & detectorType,
const QString & descriptorType)
{
@ -261,30 +255,16 @@ void ObjWidget::setData(const std::vector<cv::KeyPoint> & keypoints,
detectorType_ = detectorType;
descriptorType_ = descriptorType;
mouseCurrentPos_ = mousePressedPos_; // this will reset roi selection
if(iplImage_)
{
cvReleaseImage(&iplImage_);
iplImage_ = 0;
}
if(image)
{
/* create destination image
Note that cvGetSize will return the width and the height of ROI */
iplImage_ = cvCreateImage(cvGetSize(image),
image->depth,
image->nChannels);
/* copy subimage */
cvCopy(image, iplImage_, NULL);
cvImage_ = image.clone();
pixmap_ = QPixmap::fromImage(cvtCvMat2QImage(cvImage_));
//this->setMinimumSize(image_.size());
image_ = QPixmap::fromImage(Ipl2QImage(iplImage_));
//this->setMinimumSize(image_.size());
}
if(graphicsViewMode_->isChecked())
{
this->setupGraphicsView();
}
label_->setVisible(!image);
label_->setVisible(image.empty());
}
void ObjWidget::resetKptsColor()
@ -385,7 +365,7 @@ void ObjWidget::save(QDataStream & streamPtr) const
descriptors_.type() <<
dataSize;
streamPtr << QByteArray((char*)descriptors_.data, dataSize);
streamPtr << image_;
streamPtr << pixmap_;
}
void ObjWidget::load(QDataStream & streamPtr)
@ -416,14 +396,9 @@ void ObjWidget::load(QDataStream & streamPtr)
QByteArray data;
streamPtr >> data;
descriptors = cv::Mat(rows, cols, type, data.data()).clone();
streamPtr >> image_;
this->setData(kpts, descriptors, 0, detectorType, descriptorType);
if(iplImage_)
{
cvReleaseImage(&iplImage_);
iplImage_ = 0;
}
iplImage_ = QImage2Ipl(image_.toImage());
streamPtr >> pixmap_;
this->setData(kpts, descriptors, cv::Mat(), detectorType, descriptorType);
cvImage_ = cvtQImage2CvMat(pixmap_.toImage());
//this->setMinimumSize(image_.size());
}
@ -433,10 +408,10 @@ void ObjWidget::computeScaleOffsets(float & scale, float & offsetX, float & offs
offsetX = 0.0f;
offsetY = 0.0f;
if(!image_.isNull())
if(!pixmap_.isNull())
{
float w = image_.width();
float h = image_.height();
float w = pixmap_.width();
float h = pixmap_.height();
float widthRatio = float(this->rect().width()) / w;
float heightRatio = float(this->rect().height()) / h;
@ -475,7 +450,7 @@ void ObjWidget::paintEvent(QPaintEvent *event)
}
else
{
if(!image_.isNull())
if(!pixmap_.isNull())
{
//Scale
float ratio, offsetX, offsetY;
@ -484,7 +459,7 @@ void ObjWidget::paintEvent(QPaintEvent *event)
if(mirrorView_->isChecked())
{
painter.translate(offsetX+image_.width()*ratio, offsetY);
painter.translate(offsetX+pixmap_.width()*ratio, offsetY);
painter.scale(-ratio, ratio);
}
else
@ -495,7 +470,7 @@ void ObjWidget::paintEvent(QPaintEvent *event)
if(showImage_->isChecked())
{
painter.drawPixmap(QPoint(0,0), image_);
painter.drawPixmap(QPoint(0,0), pixmap_);
}
if(showFeatures_->isChecked())
@ -523,15 +498,15 @@ void ObjWidget::paintEvent(QPaintEvent *event)
if(mirrorView_->isChecked())
{
int l = left;
left = qAbs(right - image_.width());
right = qAbs(l - image_.width());
left = qAbs(right - pixmap_.width());
right = qAbs(l - pixmap_.width());
}
painter.setPen(Qt::NoPen);
painter.setBrush(QBrush(QColor(0,0,0,100)));
painter.drawRect(0, 0, image_.width(), top-1);
painter.drawRect(0, 0, pixmap_.width(), top-1);
painter.drawRect(0, top, left, bottom-top);
painter.drawRect(right, top, image_.width()-right, bottom-top);
painter.drawRect(0, bottom, image_.width(), image_.height()-bottom);
painter.drawRect(right, top, pixmap_.width()-right, bottom-top);
painter.drawRect(0, bottom, pixmap_.width(), pixmap_.height()-bottom);
painter.restore();
}
}
@ -570,7 +545,7 @@ void ObjWidget::mouseMoveEvent(QMouseEvent * event)
void ObjWidget::mouseReleaseEvent(QMouseEvent * event)
{
if(!image_.isNull())
if(!pixmap_.isNull())
{
int left,top,bottom,right;
@ -582,8 +557,8 @@ void ObjWidget::mouseReleaseEvent(QMouseEvent * event)
if(mirrorView_->isChecked())
{
int l = left;
left = qAbs(right - image_.width());
right = qAbs(l - image_.width());
left = qAbs(right - pixmap_.width());
right = qAbs(l - pixmap_.width());
}
emit roiChanged(QRect(left, top, right-left, bottom-top));
@ -754,15 +729,15 @@ std::vector<cv::KeyPoint> ObjWidget::selectedKeypoints() const
void ObjWidget::setupGraphicsView()
{
if(!image_.isNull())
if(!pixmap_.isNull())
{
graphicsView_->scene()->setSceneRect(image_.rect());
graphicsView_->scene()->setSceneRect(pixmap_.rect());
QList<KeypointItem*> items;
if(image_.width() > 0 && image_.height() > 0)
if(pixmap_.width() > 0 && pixmap_.height() > 0)
{
QRectF sceneRect = graphicsView_->sceneRect();
QGraphicsPixmapItem * pixmapItem = graphicsView_->scene()->addPixmap(image_);
QGraphicsPixmapItem * pixmapItem = graphicsView_->scene()->addPixmap(pixmap_);
pixmapItem->setVisible(this->isImageShown());
this->drawKeypoints();

View File

@ -27,7 +27,7 @@ public:
ObjWidget(int id,
const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors,
const IplImage * image,
const cv::Mat & image,
const QString & detectorType = "NA",
const QString & descriptorType = "NA",
QWidget * parent = 0);
@ -36,7 +36,7 @@ public:
void setId(int id);
void setData(const std::vector<cv::KeyPoint> & keypoints,
const cv::Mat & descriptors,
const IplImage * image,
const cv::Mat & image,
const QString & detectorType,
const QString & descriptorType);
void setTextLabel(const QString & text);
@ -53,8 +53,8 @@ public:
const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;}
const cv::Mat & descriptors() const {return descriptors_;}
const QPixmap & image() const {return image_;}
const IplImage * iplImage() const {return iplImage_;}
const QPixmap & pixmap() const {return pixmap_;}
const cv::Mat & cvImage() const {return cvImage_;}
int id() const {return id_;}
QColor defaultColor() const;
bool isImageShown() const;
@ -95,8 +95,8 @@ private:
private:
std::vector<cv::KeyPoint> keypoints_;
cv::Mat descriptors_;
QPixmap image_;
IplImage * iplImage_;
QPixmap pixmap_;
cv::Mat cvImage_;
QList<KeypointItem*> keypointItems_;
QGraphicsView * graphicsView_;
int id_;

112
src/QtOpenCV.cpp Normal file
View File

@ -0,0 +1,112 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#include "QtOpenCV.h"
#include <opencv2/core/core_c.h>
#include <stdio.h>
QImage cvtCvMat2QImage(const cv::Mat & image)
{
QImage qtemp;
if(!image.empty() && image.depth() == CV_8U)
{
const unsigned char * data = image.data;
qtemp = QImage(image.cols, image.rows, QImage::Format_RGB32);
for(int y = 0; y < image.rows; ++y, data += image.cols*image.elemSize())
{
for(int x = 0; x < image.cols; ++x)
{
QRgb * p = ((QRgb*)qtemp.scanLine (y)) + x;
*p = qRgb(data[x * image.channels()+2], data[x * image.channels()+1], data[x * image.channels()]);
}
}
}
else if(!image.empty() && image.depth() != CV_8U)
{
printf("Wrong image format, must be 8_bits\n");
}
return qtemp;
}
cv::Mat cvtQImage2CvMat(const QImage & image)
{
cv::Mat cvImage;
if(!image.isNull() && image.depth() == 32 && image.format() == QImage::Format_RGB32)
{
// assume RGB (3 channels)
int channels = 3;
cvImage = cv::Mat(image.height(), image.width(), CV_8UC3);
unsigned char * data = cvImage.data;
const IplImage test = cvImage;
printf("%d vs %d\n", cvImage.cols*cvImage.elemSize(), test.widthStep);
for(int y = 0; y < image.height(); ++y, data+=cvImage.cols*cvImage.elemSize())
{
for(int x = 0; x < image.width(); ++x)
{
QRgb rgb = image.pixel(x, y);
data[x * channels+2] = qRed(rgb); //r
data[x * channels+1] = qGreen(rgb); //g
data[x * channels] = qBlue(rgb); //b
}
}
}
else
{
printf("Failed to convert image : depth=%d(!=32) format=%d(!=%d)\n", image.depth(), image.format(), QImage::Format_RGB32);
}
return cvImage;
}
QImage cvtIplImage2QImage(const IplImage * image)
{
QImage qtemp;
if (image && image->depth == IPL_DEPTH_8U && cvGetSize(image).width>0)
{
const char * data = image->imageData;
qtemp= QImage(image->width, image->height,QImage::Format_RGB32);
for(int y = 0; y < image->height; ++y, data +=image->widthStep )
{
for(int x = 0; x < image->width; ++x)
{
uint *p = (uint*)qtemp.scanLine (y) + x;
*p = qRgb(data[x * image->nChannels+2], data[x * image->nChannels+1],data[x * image->nChannels]);
}
}
}
else if(image && image->depth != IPL_DEPTH_8U)
{
printf("Wrong iplImage format, must be 8_bits\n");
}
return qtemp;
}
// Returned image must be released explicitly (using cvReleaseImage()).
IplImage * cvtQImage2IplImage(const QImage & image)
{
IplImage * iplTmp = 0;
if(!image.isNull() && image.depth() == 32 && image.format() == QImage::Format_RGB32)
{
// assume RGB (3 channels)
int channels = 3;
iplTmp = cvCreateImage(cvSize(image.width(), image.height()), IPL_DEPTH_8U, channels);
char * data = iplTmp->imageData;
for(int y = 0; y < image.height(); ++y, data+=iplTmp->widthStep)
{
for(int x = 0; x < image.width(); ++x)
{
QRgb rgb = image.pixel(x, y);
data[x * channels+2] = qRed(rgb); //r
data[x * channels+1] = qGreen(rgb); //g
data[x * channels] = qBlue(rgb); //b
}
}
}
else
{
printf("Failed to convert image : depth=%d(!=32) format=%d(!=%d)\n", image.depth(), image.format(), QImage::Format_RGB32);
}
return iplTmp;
}

23
src/QtOpenCV.h Normal file
View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#ifndef QTOPENCV_H
#define QTOPENCV_H
#include <QtGui/QImage>
#include <opencv2/core/core.hpp>
// Convert OpenCV matrix to QImage
QImage cvtCvMat2QImage(const cv::Mat & image);
// Convert QImage to OpenCV matrix
cv::Mat cvtQImage2CvMat(const QImage & image);
// Convert IplImage to QImage
QImage cvtIplImage2QImage(const IplImage * image);
// Convert QImage to IplImage
IplImage * cvtQImage2IplImage(const QImage & image);
#endif // QTOPENCV_H

View File

@ -8,6 +8,7 @@
#include <QtCore/QStringList>
#include <QtCore/QDir>
#include <stdio.h>
#include <opencv2/nonfree/features2d.hpp>
ParametersMap Settings::defaultParameters_;
ParametersMap Settings::parameters_;
@ -108,15 +109,14 @@ cv::FeatureDetector * Settings::createFeaturesDetector()
case 0:
if(strategies.at(index).compare("Dense") == 0)
{
cv::DenseFeatureDetector::Params params;
params.initFeatureScale = getDense_initFeatureScale();
params.featureScaleLevels = getDense_featureScaleLevels();
params.featureScaleMul = getDense_featureScaleMul();
params.initXyStep = getDense_initXyStep();
params.initImgBound = getDense_initImgBound();
params.varyXyStepWithScale = getDense_varyXyStepWithScale();
params.varyImgBoundWithScale = getDense_varyImgBoundWithScale();
detector = new cv::DenseFeatureDetector(params);
detector = new cv::DenseFeatureDetector(
getDense_initFeatureScale(),
getDense_featureScaleLevels(),
getDense_featureScaleMul(),
getDense_initXyStep(),
getDense_initImgBound(),
getDense_varyXyStepWithScale(),
getDense_varyImgBoundWithScale());
}
break;
case 1:
@ -128,83 +128,77 @@ cv::FeatureDetector * Settings::createFeaturesDetector()
}
break;
case 2:
if(strategies.at(index).compare("GoodFeaturesToTrack") == 0)
if(strategies.at(index).compare("GFTT") == 0)
{
cv::GoodFeaturesToTrackDetector::Params params;
params.maxCorners = getGoodFeaturesToTrack_maxCorners();
params.qualityLevel = getGoodFeaturesToTrack_qualityLevel();
params.minDistance = getGoodFeaturesToTrack_minDistance();
params.blockSize = getGoodFeaturesToTrack_blockSize();
params.useHarrisDetector = getGoodFeaturesToTrack_useHarrisDetector();
params.k = getGoodFeaturesToTrack_k();
detector = new cv::GoodFeaturesToTrackDetector(params);
detector = new cv::GFTTDetector(
getGFTT_maxCorners(),
getGFTT_qualityLevel(),
getGFTT_minDistance(),
getGFTT_blockSize(),
getGFTT_useHarrisDetector(),
getGFTT_k());
}
break;
case 3:
if(strategies.at(index).compare("Mser") == 0)
if(strategies.at(index).compare("MSER") == 0)
{
CvMSERParams params = cvMSERParams();
params.delta = getMser_delta();
params.maxArea = getMser_maxArea();
params.minArea = getMser_minArea();
params.maxVariation = getMser_maxVariation();
params.minDiversity = getMser_minDiversity();
params.maxEvolution = getMser_maxEvolution();
params.areaThreshold = getMser_areaThreshold();
params.minMargin = getMser_minMargin();
params.edgeBlurSize = getMser_edgeBlurSize();
detector = new cv::MserFeatureDetector(params);
detector = new cv::MSER(
getMSER_delta(),
getMSER_minArea(),
getMSER_maxArea(),
getMSER_maxVariation(),
getMSER_minDiversity(),
getMSER_maxEvolution(),
getMSER_areaThreshold(),
getMSER_minMargin(),
getMSER_edgeBlurSize());
}
break;
case 4:
if(strategies.at(index).compare("Orb") == 0)
if(strategies.at(index).compare("ORB") == 0)
{
cv::ORB::CommonParams params;
params.scale_factor_ = getOrb_scaleFactor();
params.n_levels_ = getOrb_nLevels();
params.first_level_ = getOrb_firstLevel();
params.edge_threshold_ = getOrb_edgeThreshold();
detector = new cv::OrbFeatureDetector(
getOrb_nFeatures(),
params);
detector = new cv::ORB(
getORB_nFeatures(),
getORB_scaleFactor(),
getORB_nLevels(),
getORB_edgeThreshold(),
getORB_firstLevel(),
getORB_WTA_K(),
getORB_scoreType(),
getORB_patchSize());
}
break;
case 5:
if(strategies.at(index).compare("Sift") == 0)
if(strategies.at(index).compare("SIFT") == 0)
{
cv::SIFT::DetectorParams detectorParams;
detectorParams.edgeThreshold = getSift_edgeThreshold();
detectorParams.threshold = getSift_threshold();
cv::SIFT::CommonParams commonParams;
commonParams.angleMode = getSift_angleMode();
commonParams.firstOctave = getSift_firstOctave();
commonParams.nOctaveLayers = getSift_nOctaveLayers();
commonParams.nOctaves = getSift_nOctaves();
detector = new cv::SiftFeatureDetector(
detectorParams,
commonParams);
detector = new cv::SIFT(
getSIFT_nfeatures(),
getSIFT_nOctaveLayers(),
getSIFT_contrastThreshold(),
getSIFT_edgeThreshold(),
getSIFT_sigma());
}
break;
case 6:
if(strategies.at(index).compare("Star") == 0)
{
CvStarDetectorParams params = cvStarDetectorParams();
params.lineThresholdBinarized = getStar_lineThresholdBinarized();
params.lineThresholdProjected = getStar_lineThresholdProjected();
params.maxSize = getStar_maxSize();
params.responseThreshold = getStar_responseThreshold();
params.suppressNonmaxSize = getStar_suppressNonmaxSize();
detector = new cv::StarFeatureDetector(params);
detector = new cv::StarFeatureDetector(
getStar_maxSize(),
getStar_responseThreshold(),
getStar_lineThresholdProjected(),
getStar_lineThresholdBinarized(),
getStar_suppressNonmaxSize());
}
break;
case 7:
if(strategies.at(index).compare("Surf") == 0)
if(strategies.at(index).compare("SURF") == 0)
{
detector = new cv::SurfFeatureDetector(
getSurf_hessianThreshold(),
getSurf_octaves(),
getSurf_octaveLayers(),
getSurf_upright());
detector = new cv::SURF(
getSURF_hessianThreshold(),
getSURF_nOctaves(),
getSURF_nOctaveLayers(),
getSURF_extended(),
getSURF_upright());
}
break;
default:
@ -240,41 +234,39 @@ cv::DescriptorExtractor * Settings::createDescriptorsExtractor()
}
break;
case 1:
if(strategies.at(index).compare("Orb") == 0)
if(strategies.at(index).compare("ORB") == 0)
{
cv::ORB::CommonParams params;
params.scale_factor_ = getOrb_scaleFactor();
params.n_levels_ = getOrb_nLevels();
params.first_level_ = getOrb_firstLevel();
params.edge_threshold_ = getOrb_edgeThreshold();
extractor = new cv::OrbDescriptorExtractor(params);
extractor = new cv::ORB(
getORB_nFeatures(),
getORB_scaleFactor(),
getORB_nLevels(),
getORB_edgeThreshold(),
getORB_firstLevel(),
getORB_WTA_K(),
getORB_scoreType(),
getORB_patchSize());
}
break;
case 2:
if(strategies.at(index).compare("Sift") == 0)
if(strategies.at(index).compare("SIFT") == 0)
{
cv::SIFT::DescriptorParams descriptorParams;
descriptorParams.isNormalize = getSift_isNormalize();
descriptorParams.magnification = getSift_magnification();
descriptorParams.recalculateAngles = getSift_recalculateAngles();
cv::SIFT::CommonParams commonParams;
commonParams.angleMode = getSift_angleMode();
commonParams.firstOctave = getSift_firstOctave();
commonParams.nOctaveLayers = getSift_nOctaveLayers();
commonParams.nOctaves = getSift_nOctaves();
extractor = new cv::SiftDescriptorExtractor(
descriptorParams,
commonParams);
extractor = new cv::SIFT(
getSIFT_nfeatures(),
getSIFT_nOctaveLayers(),
getSIFT_contrastThreshold(),
getSIFT_edgeThreshold(),
getSIFT_sigma());
}
break;
case 3:
if(strategies.at(index).compare("Surf") == 0)
if(strategies.at(index).compare("SURF") == 0)
{
extractor = new cv::SurfDescriptorExtractor(
getSurf_octaves(),
getSurf_octaveLayers(),
getSurf_extended(),
getSurf_upright());
extractor = new cv::SURF(
getSURF_hessianThreshold(),
getSURF_nOctaves(),
getSURF_nOctaveLayers(),
getSURF_extended(),
getSURF_upright());
}
break;
default:

View File

@ -59,66 +59,65 @@ class Settings
PARAMETER(Camera, videoFilePath, QString, "");
//List format : [Index:item0;item1;item3;...]
PARAMETER(Detector, Type, QString, "7:Dense;Fast;GoodFeaturesToTrack;Mser;Orb;Sift;Star;Surf");
PARAMETER(Descriptor, Type, QString, "3:Brief;Orb;Sift;Surf");
PARAMETER(Detector, Type, QString, "7:Dense;Fast;GFTT;MSER;ORB;SIFT;Star;SURF");
PARAMETER(Descriptor, Type, QString, "3:Brief;ORB;SIFT;SURF");
PARAMETER(Brief, bytes, int, 32);
PARAMETER(Dense, initFeatureScale, float, cv::DenseFeatureDetector::Params().initFeatureScale);
PARAMETER(Dense, featureScaleLevels, int, cv::DenseFeatureDetector::Params().featureScaleLevels);
PARAMETER(Dense, featureScaleMul, float, cv::DenseFeatureDetector::Params().featureScaleMul);
PARAMETER(Dense, initXyStep, int, cv::DenseFeatureDetector::Params().initXyStep);
PARAMETER(Dense, initImgBound, int, cv::DenseFeatureDetector::Params().initImgBound);
PARAMETER(Dense, varyXyStepWithScale, bool, cv::DenseFeatureDetector::Params().varyXyStepWithScale);
PARAMETER(Dense, varyImgBoundWithScale, bool, cv::DenseFeatureDetector::Params().varyImgBoundWithScale);
PARAMETER(Dense, initFeatureScale, float, 1.f);
PARAMETER(Dense, featureScaleLevels, int, 1);
PARAMETER(Dense, featureScaleMul, float, 0.1f);
PARAMETER(Dense, initXyStep, int, 6);
PARAMETER(Dense, initImgBound, int, 0);
PARAMETER(Dense, varyXyStepWithScale, bool, true);
PARAMETER(Dense, varyImgBoundWithScale, bool, false);
PARAMETER(Fast, threshold, int, 20);
PARAMETER(Fast, threshold, int, 10);
PARAMETER(Fast, nonmaxSuppression, bool, true);
PARAMETER(GoodFeaturesToTrack, maxCorners, int, cv::GoodFeaturesToTrackDetector::Params().maxCorners);
PARAMETER(GoodFeaturesToTrack, qualityLevel, double, cv::GoodFeaturesToTrackDetector::Params().qualityLevel);
PARAMETER(GoodFeaturesToTrack, minDistance, double, cv::GoodFeaturesToTrackDetector::Params().minDistance);
PARAMETER(GoodFeaturesToTrack, blockSize, int, cv::GoodFeaturesToTrackDetector::Params().blockSize);
PARAMETER(GoodFeaturesToTrack, useHarrisDetector, bool, cv::GoodFeaturesToTrackDetector::Params().useHarrisDetector);
PARAMETER(GoodFeaturesToTrack, k, double, cv::GoodFeaturesToTrackDetector::Params().k);
PARAMETER(GFTT, maxCorners, int, 1000);
PARAMETER(GFTT, qualityLevel, double, 0.01);
PARAMETER(GFTT, minDistance, double, 1);
PARAMETER(GFTT, blockSize, int, 3);
PARAMETER(GFTT, useHarrisDetector, bool, false);
PARAMETER(GFTT, k, double, 0.04);
PARAMETER(Orb, nFeatures, uint, 700);
PARAMETER(Orb, scaleFactor, float, cv::ORB::CommonParams().scale_factor_);
PARAMETER(Orb, nLevels, uint, cv::ORB::CommonParams().n_levels_);
PARAMETER(Orb, firstLevel, uint, cv::ORB::CommonParams().first_level_);
PARAMETER(Orb, edgeThreshold, uint, cv::ORB::CommonParams().edge_threshold_);
PARAMETER(ORB, nFeatures, int, 500);
PARAMETER(ORB, scaleFactor, float, 1.2f);
PARAMETER(ORB, nLevels, int, 8);
PARAMETER(ORB, edgeThreshold, int, 31);
PARAMETER(ORB, firstLevel, int, 0);
PARAMETER(ORB, WTA_K, int, 2);
PARAMETER(ORB, scoreType, int, 0);
PARAMETER(ORB, patchSize, int, 31);
PARAMETER(Mser, delta, int, cvMSERParams().delta);
PARAMETER(Mser, minArea, int, cvMSERParams().minArea);
PARAMETER(Mser, maxArea, int, cvMSERParams().maxArea);
PARAMETER(Mser, maxVariation, float, cvMSERParams().maxVariation);
PARAMETER(Mser, minDiversity, float, cvMSERParams().minDiversity);
PARAMETER(Mser, maxEvolution, int, cvMSERParams().maxEvolution);
PARAMETER(Mser, areaThreshold, double, cvMSERParams().areaThreshold);
PARAMETER(Mser, minMargin, double, cvMSERParams().minMargin);
PARAMETER(Mser, edgeBlurSize, int, cvMSERParams().edgeBlurSize);
PARAMETER(MSER, delta, int, 5);
PARAMETER(MSER, minArea, int, 60);
PARAMETER(MSER, maxArea, int, 14400);
PARAMETER(MSER, maxVariation, double, 0.25);
PARAMETER(MSER, minDiversity, double, 0.2);
PARAMETER(MSER, maxEvolution, int, 200);
PARAMETER(MSER, areaThreshold, double, 1.01);
PARAMETER(MSER, minMargin, double, 0.003);
PARAMETER(MSER, edgeBlurSize, int, 5);
PARAMETER(Sift, threshold, double, cv::SIFT::DetectorParams::GET_DEFAULT_THRESHOLD());
PARAMETER(Sift, edgeThreshold, double, cv::SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD());
PARAMETER(Sift, nOctaves, int, cv::SIFT::CommonParams::DEFAULT_NOCTAVES);
PARAMETER(Sift, nOctaveLayers, int, cv::SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS);
PARAMETER(Sift, firstOctave, int, cv::SIFT::CommonParams::DEFAULT_FIRST_OCTAVE);
PARAMETER(Sift, angleMode, int, cv::SIFT::CommonParams::FIRST_ANGLE);
PARAMETER(Sift, magnification, double, cv::SIFT::DescriptorParams::GET_DEFAULT_MAGNIFICATION());
PARAMETER(Sift, isNormalize, bool, cv::SIFT::DescriptorParams::DEFAULT_IS_NORMALIZE);
PARAMETER(Sift, recalculateAngles, bool, true);
PARAMETER(SIFT, nfeatures, int, 0);
PARAMETER(SIFT, nOctaveLayers, int, 3);
PARAMETER(SIFT, contrastThreshold, double, 0.04);
PARAMETER(SIFT, edgeThreshold, double, 10);
PARAMETER(SIFT, sigma, double, 1.6);
PARAMETER(Star, maxSize, int, cvStarDetectorParams().maxSize);
PARAMETER(Star, responseThreshold, int, cvStarDetectorParams().responseThreshold);
PARAMETER(Star, lineThresholdProjected, int, cvStarDetectorParams().lineThresholdProjected);
PARAMETER(Star, lineThresholdBinarized, int, cvStarDetectorParams().lineThresholdBinarized);
PARAMETER(Star, suppressNonmaxSize, int, cvStarDetectorParams().suppressNonmaxSize);
PARAMETER(Star, maxSize, int, 45);
PARAMETER(Star, responseThreshold, int, 30);
PARAMETER(Star, lineThresholdProjected, int, 10);
PARAMETER(Star, lineThresholdBinarized, int, 8);
PARAMETER(Star, suppressNonmaxSize, int, 5);
PARAMETER(Surf, hessianThreshold, double, 600.0);
PARAMETER(Surf, octaves, int, 3);
PARAMETER(Surf, octaveLayers, int, 4);
PARAMETER(Surf, upright, bool, false);
PARAMETER(Surf, extended, bool, false);
PARAMETER(SURF, hessianThreshold, double, 600.0);
PARAMETER(SURF, nOctaves, int, 4);
PARAMETER(SURF, nOctaveLayers, int, 2);
PARAMETER(SURF, extended, bool, true);
PARAMETER(SURF, upright, bool, false);
PARAMETER(NearestNeighbor, nndrRatioUsed, bool, true);
PARAMETER(NearestNeighbor, nndrRatio, float, 0.8f);
@ -126,7 +125,7 @@ class Settings
PARAMETER(NearestNeighbor, minDistance, float, 1.6f);
PARAMETER(General, autoStartCamera, bool, false);
PARAMETER(General, autoUpdateObjects, bool, false);
PARAMETER(General, autoUpdateObjects, bool, true);
PARAMETER(General, nextObjID, uint, 1);
PARAMETER(General, imageFormats, QString, "*.png *.jpg *.bmp *.tiff")
PARAMETER(General, videoFormats, QString, "*.avi *.m4v")

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#include "qtipl.h"
#include <opencv2/core/core_c.h>
#include <stdio.h>
// TODO : support only from gray 8bits ?
QImage Ipl2QImage(const IplImage *newImage)
{
QImage qtemp;
if (newImage && newImage->depth == IPL_DEPTH_8U && cvGetSize(newImage).width>0)
{
int x;
int y;
char* data = newImage->imageData;
qtemp= QImage(newImage->width, newImage->height,QImage::Format_RGB32);
for( y = 0; y < newImage->height; ++y, data +=newImage->widthStep )
{
for( x = 0; x < newImage->width; ++x)
{
uint *p = (uint*)qtemp.scanLine (y) + x;
*p = qRgb(data[x * newImage->nChannels+2], data[x * newImage->nChannels+1],data[x * newImage->nChannels]);
}
}
}
else
{
//Wrong IplImage format
}
return qtemp;
}
IplImage * QImage2Ipl(const QImage & image)
{
IplImage * iplTmp = 0;
if(!image.isNull() && image.depth() == 32 && image.format() == QImage::Format_RGB32)
{
int x;
int y;
// assume RGB (3 channels)
int channels = 3;
iplTmp = cvCreateImage(cvSize(image.width(), image.height()), IPL_DEPTH_8U, channels);
char* data = iplTmp->imageData;
for( y = 0; y < image.height(); ++y, data+=iplTmp->widthStep)
{
for( x = 0; x < image.width(); ++x)
{
QRgb rgb = image.pixel(x, y);
data[x * channels+2] = qRed(rgb); //r
data[x * channels+1] = qGreen(rgb); //g
data[x * channels] = qBlue(rgb); //b
}
}
}
else
{
printf("failed to convert image : depth=%d(!=32) format=%d(!=%d)\n", image.depth(), image.format(), QImage::Format_RGB32);
}
return iplTmp;
}

View File

@ -1,17 +0,0 @@
/*
* Copyright (C) 2011, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
*/
#ifndef QTIPL_H
#define QTIPL_H
#include <QtGui/QImage>
#include <opencv2/core/core.hpp>
// IplImage to QImage
QImage Ipl2QImage(const IplImage *newImage);
// QImage to IplImage
IplImage * QImage2Ipl(const QImage & image);
#endif