two object pose estimation but failing
This commit is contained in:
parent
b130ae7c9a
commit
7a7f46052b
@ -63,119 +63,121 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
|
||||
// pubInfo_ = node->create_publisher<find_object_2d::msg::DetectionInfo>("info", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1));
|
||||
|
||||
// this->connect(this, SIGNAL(objectsFound(const find_object::DetectionInfo &, const find_object::Header &, const cv::Mat &, float)), this, SLOT(publish(const find_object::DetectionInfo &, const find_object::Header &, const cv::Mat &, float)));
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
void FindObjectROS::estimate_pose()
|
||||
{
|
||||
std::cout << "OpenCV version string " << cv::getVersionString() << std::endl;
|
||||
num_objects_ = 1;
|
||||
objectHeight_.resize(num_objects_);
|
||||
objectHeight_.push_back(168);
|
||||
objectHeight_.push_back(116);
|
||||
objectWidth_.resize(num_objects_);
|
||||
objectWidth_.push_back(160);
|
||||
objectWidth_.push_back(155);
|
||||
// Read RGB Image
|
||||
cv::Mat rgb_image = cv::imread("/media/psf/freelancing/greenhouse/yolov3/runs/detect/exp8/stereo_image952.jpeg");
|
||||
cv::Mat rgb_image = cv::imread("/media/psf/freelancing/greenhouse/stereo/stereo_5.jpg");
|
||||
// read depth Image
|
||||
cv::Mat depth = cv::imread("/media/psf/freelancing/greenhouse/depth_try1/depth_img_952.jpeg");
|
||||
cv::Mat depth_img = cv::imread("/media/psf/freelancing/greenhouse/depth/depth_4.jpg", cv::IMREAD_ANYDEPTH);
|
||||
// bounding box dimensions
|
||||
// Convert the image to a single-channel 16-bit image
|
||||
cv::Mat depth;
|
||||
depth_img.convertTo(depth, CV_16UC1);
|
||||
|
||||
std::vector<geometry_msgs::msg::TransformStamped> transforms;
|
||||
// store bounding boxes. (This should come from yolo ideally)
|
||||
float objectWidth = 372; // width of bounding box in pixels
|
||||
float objectHeight = 554; // height of bounding box in pixels
|
||||
std::vector<std::vector<cv::Point2f>> imagePoints_;
|
||||
std::vector<cv::Point2f> imagePoints_oj;
|
||||
imagePoints_oj.resize(4);
|
||||
std::vector<cv::Point2f> imagePoints(4);
|
||||
imagePoints_oj[0] = cv::Point2f(350,548);
|
||||
// pt = iter->map(QPointF(objectWidth, 0));
|
||||
imagePoints_oj[1] = cv::Point2f(510,548);
|
||||
// pt = iter->map(QPointF(objectWidth, objectHeight));
|
||||
imagePoints_oj[2] = cv::Point2f(510,716);
|
||||
// pt = iter->map(QPointF(0, objectHeight));
|
||||
imagePoints_oj[3] = cv::Point2f(350,716);
|
||||
imagePoints_.push_back(imagePoints_oj); //pUSHED
|
||||
|
||||
geometry_msgs::msg::TransformStamped transform;
|
||||
transform.transform.rotation.x=0;
|
||||
transform.transform.rotation.y=0;
|
||||
transform.transform.rotation.z=0;
|
||||
transform.transform.rotation.w=1;
|
||||
transform.transform.translation.x=0;
|
||||
transform.transform.translation.y=0;
|
||||
transform.transform.translation.z=0;
|
||||
transform.child_frame_id = "l_trail";
|
||||
transform.header.frame_id = "camera_link";
|
||||
// transform.header.stamp.sec = ros::Time::now();
|
||||
tf2::Quaternion q;
|
||||
float depthConstant = 1.0f/641.587158203125;
|
||||
std::vector<cv::Point3f> objectPoints(6);
|
||||
std::vector<cv::Point2f> imagePoints(6);
|
||||
cv::Vec3f point_1 = this->getDepth(depth,
|
||||
246,665,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
cv::Vec3f point_2 = this->getDepth(depth,
|
||||
618, 665,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
cv::Vec3f point_3 = this->getDepth(depth,
|
||||
618, 111,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
cv::Vec3f point_4 = this->getDepth(depth,
|
||||
246, 111,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
cv::Vec3f point_5 = this->getDepth(depth,
|
||||
432, 386,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
cv::Vec3f point_6 = this->getDepth(depth,
|
||||
537,224,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
objectPoints[0] = cv::Point3f(point_1.val[0], point_1.val[1], point_1.val[2]);
|
||||
objectPoints[1] = cv::Point3f(point_2.val[0], point_2.val[1], point_2.val[2]);
|
||||
objectPoints[2] = cv::Point3f(point_3.val[0], point_3.val[1], point_3.val[2]);
|
||||
objectPoints[3] = cv::Point3f(point_4.val[0], point_4.val[1], point_4.val[2]);
|
||||
objectPoints[4] = cv::Point3f(point_5.val[0], point_5.val[1], point_5.val[2]);
|
||||
objectPoints[5] = cv::Point3f(point_6.val[0], point_6.val[1], point_6.val[2]);
|
||||
|
||||
// QPointF pt = iter->map(QPointF(0, 0));
|
||||
imagePoints[0] = cv::Point2f(246,665);
|
||||
// pt = iter->map(QPointF(objectWidth, 0));
|
||||
imagePoints[1] = cv::Point2f(618, 665);
|
||||
// pt = iter->map(QPointF(objectWidth, objectHeight));
|
||||
imagePoints[2] = cv::Point2f(618,111);
|
||||
// pt = iter->map(QPointF(0, objectHeight));
|
||||
imagePoints[3] = cv::Point2f(246, 111);
|
||||
imagePoints[4] = cv::Point2f(432, 386);
|
||||
imagePoints[5] = cv::Point2f(537, 224);
|
||||
|
||||
|
||||
cv::Mat cameraMatrix = cv::Mat::eye(3,3,CV_64FC1);
|
||||
cameraMatrix.at<double>(0,0) = 1.0f/depthConstant;
|
||||
cameraMatrix.at<double>(1,1) = 1.0f/depthConstant;
|
||||
cameraMatrix.at<double>(0,2) = float(depth.cols/2)-0.5f;
|
||||
cameraMatrix.at<double>(1,2) = float(depth.rows/2)-0.5f;
|
||||
cv::Mat distCoeffs;
|
||||
std::cout << "depth img cols: " << depth.cols << " rows: " << depth.rows << std::endl;
|
||||
cv::Mat rvec(1,3, CV_64FC1);
|
||||
cv::Mat tvec(1,3, CV_64FC1);
|
||||
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
|
||||
std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
|
||||
|
||||
cv::Mat R;
|
||||
cv::Rodrigues(rvec, R);
|
||||
tf2::Matrix3x3 rotationMatrix(
|
||||
R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2),
|
||||
R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2),
|
||||
R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2));
|
||||
rotationMatrix.getRotation(q);
|
||||
|
||||
QPointF center = QPointF(objectWidth/2, objectHeight/2);
|
||||
cv::Vec3f center3D = this->getDepth(depth,
|
||||
center.x()+0.5f, center.y()+0.5f,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
|
||||
transform.transform.translation.x = tvec.at<double>(0)*(center3D.val[2]/tvec.at<double>(2));
|
||||
transform.transform.translation.y = tvec.at<double>(1)*(center3D.val[2]/tvec.at<double>(2));
|
||||
transform.transform.translation.z = tvec.at<double>(2)*(center3D.val[2]/tvec.at<double>(2));
|
||||
std::cout << "Pose x: " << transform.transform.translation.x << " y: " << transform.transform.translation.y << " z: " << transform.transform.translation.z << std::endl;
|
||||
// set x axis going front of the object, with z up and y left
|
||||
tf2::Quaternion q2;
|
||||
q2.setRPY(CV_PI/2.0, CV_PI/2.0, 0);
|
||||
q *= q2;
|
||||
transform.transform.rotation = tf2::toMsg(q.normalized());
|
||||
transforms.push_back(transform);
|
||||
if(transforms.size())
|
||||
imagePoints[0] = cv::Point2f(819,600);
|
||||
// pt = iter->map(QPointF(objectWidth, 0));
|
||||
imagePoints[1] = cv::Point2f(974,600);
|
||||
// pt = iter->map(QPointF(objectWidth, objectHeight));
|
||||
imagePoints[2] = cv::Point2f(974,716);
|
||||
// pt = iter->map(QPointF(0, objectHeight));
|
||||
imagePoints[3] = cv::Point2f(819,716);
|
||||
imagePoints_.push_back(imagePoints); //pUSHED
|
||||
for(unsigned int i = 0; i < num_objects_; i++)
|
||||
{
|
||||
tfBroadcaster_->sendTransform(transforms);
|
||||
std::vector<geometry_msgs::msg::TransformStamped> transforms;
|
||||
// store bounding boxes. (This should come from yolo ideally)
|
||||
float objectWidth = objectWidth_[i]; // width of bounding box in pixels
|
||||
float objectHeight = objectHeight_[i]; // height of bounding box in pixels
|
||||
geometry_msgs::msg::TransformStamped transform;
|
||||
transform.transform.rotation.x=0;
|
||||
transform.transform.rotation.y=0;
|
||||
transform.transform.rotation.z=0;
|
||||
transform.transform.rotation.w=1;
|
||||
transform.transform.translation.x=0;
|
||||
transform.transform.translation.y=0;
|
||||
transform.transform.translation.z=0;
|
||||
transform.child_frame_id = "trail_" + std::to_string(i);
|
||||
transform.header.frame_id = "camera_link";
|
||||
// transform.header.stamp.sec = ros::Time::now();
|
||||
tf2::Quaternion q;
|
||||
float depthConstant = 1.0f/640.8887939453125; // 0.00156033 // 1.0f/cameraInfoMsg->K[4];
|
||||
std::cout << "depthconstant: " << depthConstant << std::endl;
|
||||
std::vector<cv::Point3f> objectPoints(4);
|
||||
std::cout << "image_points: " << imagePoints_[i] << std::endl;
|
||||
std::vector<cv::Point2f> imagePoints = imagePoints_[i];
|
||||
std::cout << "Image points: " << imagePoints[0].x <<" y: " << imagePoints[0].y << std::endl;
|
||||
|
||||
objectPoints[0] = cv::Point3f(-0.5, -(objectHeight/objectWidth)/2.0f,0);
|
||||
objectPoints[1] = cv::Point3f(0.5,-(objectHeight/objectWidth)/2.0f,0);
|
||||
objectPoints[2] = cv::Point3f(0.5,(objectHeight/objectWidth)/2.0f,0);
|
||||
objectPoints[3] = cv::Point3f(-0.5,(objectHeight/objectWidth)/2.0f,0);
|
||||
cv::Mat cameraMatrix = cv::Mat::eye(3,3,CV_64FC1);
|
||||
cameraMatrix.at<double>(0,0) = 1.0f/depthConstant;
|
||||
cameraMatrix.at<double>(1,1) = 1.0f/depthConstant;
|
||||
cameraMatrix.at<double>(0,2) = float(depth.cols/2)-0.5f;
|
||||
cameraMatrix.at<double>(1,2) = float(depth.rows/2)-0.5f;
|
||||
cv::Mat distCoeffs;
|
||||
cv::Mat rvec(1,3, CV_64FC1);
|
||||
cv::Mat tvec(1,3, CV_64FC1);
|
||||
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
|
||||
std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
|
||||
cv::Mat R;
|
||||
cv::Rodrigues(rvec, R);
|
||||
tf2::Matrix3x3 rotationMatrix(
|
||||
R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2),
|
||||
R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2),
|
||||
R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2));
|
||||
rotationMatrix.getRotation(q);
|
||||
QPointF center = QPointF(imagePoints[0].x + objectWidth/2, imagePoints[0].y + objectHeight/2);
|
||||
cv::Vec3f center3D = this->getDepth(depth,
|
||||
center.x()+0.5f, center.y()+0.5f,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
|
||||
std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
|
||||
|
||||
transform.transform.translation.x = tvec.at<double>(0)*(center3D.val[2]/tvec.at<double>(2));
|
||||
transform.transform.translation.y = tvec.at<double>(1)*(center3D.val[2]/tvec.at<double>(2));
|
||||
transform.transform.translation.z = tvec.at<double>(2)*(center3D.val[2]/tvec.at<double>(2));
|
||||
std::cout << "Pose x: " << transform.transform.translation.x << " y: " << transform.transform.translation.y << " z: " << transform.transform.translation.z << std::endl;
|
||||
// set x axis going front of the object, with z up and y left
|
||||
tf2::Quaternion q2;
|
||||
q2.setRPY(CV_PI/2.0, CV_PI/2.0, 0);
|
||||
q *= q2;
|
||||
transform.transform.rotation = tf2::toMsg(q.normalized());
|
||||
transforms.push_back(transform);
|
||||
if(transforms.size())
|
||||
{
|
||||
tfBroadcaster_->sendTransform(transforms);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,24 +201,24 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
|
||||
cv::Vec3f pt;
|
||||
|
||||
// Use correct principal point from calibration
|
||||
float center_x = cx; //cameraInfo.K.at(2)
|
||||
float center_y = cy; //cameraInfo.K.at(5)
|
||||
float center_x = cx; //cameraInfo.K.at(2) // 656.1123046875
|
||||
float center_y = cy; //cameraInfo.K.at(5) // 361.80828857421875
|
||||
|
||||
// std::cout << "cx: " << center_x << " cy: " << center_y << std::endl;
|
||||
// std::cout << "fx: " << fx << " fy: " << fy << std::endl;
|
||||
std::cout << "cx: " << center_x << " cy: " << center_y << std::endl;
|
||||
std::cout << "fx: " << fx << " fy: " << fy << std::endl;
|
||||
bool isInMM = depthImage.type() == CV_16UC1; // is in mm?
|
||||
|
||||
// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
|
||||
float unit_scaling = isInMM?0.001f:1.0f;
|
||||
float constant_x = unit_scaling / fx; //cameraInfo.K.at(0)
|
||||
float constant_y = unit_scaling / fy; //cameraInfo.K.at(4)
|
||||
float constant_x = unit_scaling / fx; //cameraInfo.K.at(0) // 642.437744140625
|
||||
float constant_y = unit_scaling / fy; //cameraInfo.K.at(4) // 640.8887939453125
|
||||
float bad_point = std::numeric_limits<float>::quiet_NaN ();
|
||||
|
||||
float depth;
|
||||
bool isValid;
|
||||
if(isInMM)
|
||||
{
|
||||
// std::cout << "depth is in mm!!" << std::endl;
|
||||
std::cout << "depth is in mm!!" << std::endl;
|
||||
depth = (float)depthImage.at<uint16_t>(y,x);
|
||||
isValid = depth != 0.0f;
|
||||
}
|
||||
@ -225,14 +227,14 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
|
||||
|
||||
depth = depthImage.at<float>(y,x);
|
||||
isValid = std::isfinite(depth) && depth > 0.0f;
|
||||
// std::cout << "depth is NOT in mm!! " << depth << std::endl;
|
||||
std::cout << "depth is NOT in mm!! " << depth << std::endl;
|
||||
}
|
||||
|
||||
// Check for invalid measurements
|
||||
if (!isValid)
|
||||
{
|
||||
pt.val[0] = pt.val[1] = pt.val[2] = bad_point;
|
||||
// std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
|
||||
std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user