code for tracking lines

This commit is contained in:
Apoorva Gupta 2023-02-25 13:43:39 +05:30
parent 7fe3495541
commit 03881adbde
4 changed files with 118 additions and 20 deletions

View File

@ -74,6 +74,7 @@ void CsvReader::readPLY(vector<Point3f> &list_vertex, vector<vector<int> > &list
tmp_triangle[0] = StringToInt(id0);
tmp_triangle[1] = StringToInt(id1);
tmp_triangle[2] = StringToInt(id2);
std::cout << "id0: " << tmp_triangle[0] << " id1: " << tmp_triangle[1] << " id2: " << tmp_triangle[2] << std::endl;
list_triangles.push_back(tmp_triangle);
count++;

View File

@ -19,6 +19,9 @@
#include <opencv2/xfeatures2d.hpp>
#endif
#include <opencv2/line_descriptor.hpp>
using namespace cv::line_descriptor;
// For text
const int fontFace = cv::FONT_ITALIC;
const double fontScale = 0.75;

View File

@ -47,7 +47,7 @@ int main(int argc, char *argv[])
"{inliers in |30 | minimum inliers for Kalman update }"
"{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS - (5) AP3P}"
"{fast f |true | use of robust fast match }"
"{feature |ORB | feature name (ORB, KAZE, AKAZE, BRISK, SIFT, SURF, BINBOOST, VGG) }"
"{feature |ORB | feature name (Line, ORB, KAZE, AKAZE, BRISK, SIFT, SURF, BINBOOST, VGG) }"
"{FLANN |false | use FLANN library for descriptors matching }"
"{save | | path to the directory where to save the image results }"
"{displayFiltered |false | display filtered pose (from Kalman filter) }"
@ -101,7 +101,7 @@ int main(int argc, char *argv[])
// PnP parameters
int pnpMethod = SOLVEPNP_ITERATIVE;
string featureName = "ORB";
string featureName = "Line";
bool useFLANN = false;
// Save results

View File

@ -13,9 +13,11 @@
#include "ModelRegistration.h"
#include "Utils.h"
#include <opencv2/line_descriptor.hpp>
using namespace cv;
using namespace std;
using namespace cv::line_descriptor;
/** GLOBAL VARIABLES **/
// Boolean the know if the registration it's done
@ -103,7 +105,7 @@ int main(int argc, char *argv[])
"{model | | path to output yml model }"
"{mesh | | path to ply mesh }"
"{keypoints k |2000 | number of keypoints to detect (only for ORB) }"
"{feature |ORB | feature name (ORB, KAZE, AKAZE, BRISK, SIFT, SURF, BINBOOST, VGG) }"
"{feature |ORB | feature name (Line, ORB, KAZE, AKAZE, BRISK, SIFT, SURF, BINBOOST, VGG) }"
;
CommandLineParser parser(argc, argv, keys);
@ -143,7 +145,15 @@ int main(int argc, char *argv[])
//Instantiate robust matcher: detector, extractor, matcher
RobustMatcher rmatcher;
Ptr<Feature2D> detector, descriptor;
createFeatures(featureName, numKeyPoints, detector, descriptor);
Ptr<BinaryDescriptor> bd = BinaryDescriptor::createBinaryDescriptor();
if(featureName == "Line")
{
bd = BinaryDescriptor::createBinaryDescriptor();
}
else
{
createFeatures(featureName, numKeyPoints, detector, descriptor);
}
rmatcher.setFeatureDetector(detector);
rmatcher.setDescriptorExtractor(descriptor);
@ -249,26 +259,110 @@ int main(int argc, char *argv[])
vector<KeyPoint> keypoints_model;
Mat descriptors;
// Compute keypoints and descriptors
rmatcher.computeKeyPoints(img_in, keypoints_model);
rmatcher.computeDescriptors(img_in, keypoints_model, descriptors);
/* compute lines */
std::vector<KeyLine> keylines;
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
Point2f point2d(keypoints_model[i].pt);
Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
if (on_surface)
cv::Rect bbox(78, 152, 430, 550); // define the bounding box
cv::Mat roi = img_in(bbox); // extract the region of interest (ROI) inside the bounding box
imshow( "roi", roi );
waitKey();
if(featureName == "Line")
{
/* create a binary mask */
cv::Mat mask = Mat::ones( roi.size(), CV_8UC1 );
bd->detect( roi, keylines, mask );
bd->compute( roi, keylines, descriptors);
cv::Mat output = img_in.clone();
/* draw lines extracted from octave 0 */
if( output.channels() == 1 )
cvtColor( output, output, COLOR_GRAY2BGR );
for ( size_t i = 0; i < keylines.size(); i++ )
{
model.add_correspondence(point2d, point3d);
model.add_descriptor(descriptors.row(i));
model.add_keypoint(keypoints_model[i]);
KeyLine kl = keylines[i];
std::cout << "keyline length: " << kl.lineLength << std::endl;
if( kl.octave == 0 && kl.lineLength > 100)
{
/* get a random color */
int R = ( rand() % (int) ( 255 + 1 ) );
int G = ( rand() % (int) ( 255 + 1 ) );
int B = ( rand() % (int) ( 255 + 1 ) );
/* get extremes of line */
Point pt1 = Point2f( kl.startPointX + bbox.x, kl.startPointY + bbox.y);
Point pt2 = Point2f( kl.endPointX + bbox.x, kl.endPointY + bbox.y);
/* draw line */
line( output, pt1, pt2, Scalar( B, G, R ), 3 );
}
}
else
{
model.add_outlier(point2d);
/* show lines on image */
imshow( "LSD lines", output );
imwrite( "lsd_lines.jpeg", output);
waitKey();
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keylines.size(); ++i) {
KeyLine kl = keylines[i];
if( kl.octave == 0 && kl.lineLength > 100)
{
/* get extremes of line */
Point pt1 = Point2f( kl.startPointX + bbox.x, kl.startPointY + bbox.y);
Point pt2 = Point2f( kl.endPointX + bbox.x, kl.endPointY + bbox.y);
std::vector<Point2f> point2d;
// point2d.push_back(keylines[i].pt); // mid point of the line
point2d.push_back(pt1);
point2d.push_back(pt2);
for (unsigned int j = 0; j < point2d.size(); j++)
{
Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d[j], point3d);
if (on_surface)
{
model.add_correspondence(point2d[j], point3d);
model.add_descriptor(descriptors.row(i));
KeyPoint mid_point;
mid_point.pt = point2d[j];
model.add_keypoint(mid_point);
}
else
{
model.add_outlier(point2d[j]);
}
}
}
}
}
else
{
// Compute keypoints and descriptors
rmatcher.computeKeyPoints(roi, keypoints_model);
rmatcher.computeDescriptors(roi, keypoints_model, descriptors);
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
Point2f point2d(keypoints_model[i].pt);
Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
if (on_surface)
{
model.add_correspondence(point2d, point3d);
model.add_descriptor(descriptors.row(i));
model.add_keypoint(keypoints_model[i]);
}
else
{
model.add_outlier(point2d);
}
}
}
model.set_trainingImagePath(img_path);
// save the model into a *.yaml file