updated find pose to read from bag and estimate pose

This commit is contained in:
Apoorva Gupta 2023-03-14 10:08:47 +05:30
parent 524459ec04
commit 8fdea465cb
5 changed files with 106 additions and 26 deletions

View File

@ -57,9 +57,11 @@ find_package(pcl_ros REQUIRED)
find_package(rclcpp REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(pcl_msgs REQUIRED)
find_package(image_transport REQUIRED)
find_package(message_filters REQUIRED)
add_executable(convert_pointcloud_to_depth src/convert_pointcloud_to_depth.cpp)
ament_target_dependencies(convert_pointcloud_to_depth ${Libraries} rclcpp_components rclcpp sensor_msgs cv_bridge pcl_conversions)
ament_target_dependencies(convert_pointcloud_to_depth ${Libraries} rclcpp_components rclcpp sensor_msgs cv_bridge pcl_conversions image_transport message_filters)
target_link_libraries(convert_pointcloud_to_depth ${PCL_LBRARIES} ${Boost_LIBRARIES})
include_directories(
@ -75,7 +77,7 @@ include_directories(${Qt5Core_INCLUDE_DIRS} ${Qt5Widgets_INCLUDE_DIRS})
#ADD_LIBRARY(find_object ${SRC_FILES})
add_executable(find_pose_node src/FindObjectROS.cpp src/find_pose_node.cpp)
set_target_properties(find_pose_node PROPERTIES OUTPUT_NAME find_pose_node)
ament_target_dependencies(find_pose_node rclcpp rclcpp_components cv_bridge tf2_msgs tf2_geometry_msgs ${AMENT_LIBRARIES})
ament_target_dependencies(find_pose_node rclcpp rclcpp_components cv_bridge sensor_msgs image_transport message_filters tf2_msgs tf2_geometry_msgs ${AMENT_LIBRARIES})
target_link_libraries(find_pose_node Qt5::Core Qt5::Widgets ${OpenCV_LIBRARIES} ${LIBRARIES})

View File

@ -42,6 +42,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// #include <QtCore/QPair>
// #include <QtCore/QRect>
// #include <QtGui/QTransform>
#include <image_transport/image_transport.h>
#include <image_transport/subscriber_filter.h>
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/sync_policies/exact_time.h>
#include <sensor_msgs/msg/camera_info.hpp>
class FindObjectROS
{
@ -53,12 +60,16 @@ public:
// public Q_SLOTS:
// void publish(const find_object::DetectionInfo & info, const find_object::Header & header, const cv::Mat & depth, float depthConstant);
void estimate_pose(float objecWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4);
geometry_msgs::msg::TransformStamped estimate_pose(cv::Mat rgb_image, cv::Mat depth, float objecWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4);
private:
cv::Vec3f getDepth(const cv::Mat & depthImage,
int x, int y,
float cx, float cy,
float fx, float fy);
void estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSharedPtr rgbMsg,
const sensor_msgs::msg::Image::ConstSharedPtr depthMsg,
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg);
double distance(geometry_msgs::msg::TransformStamped point1, geometry_msgs::msg::TransformStamped point2);
private:
rclcpp::Node * node_;
@ -72,6 +83,14 @@ private:
unsigned int num_objects_;
std::vector<double> objectHeight_;
std::vector<double> objectWidth_;
image_transport::SubscriberFilter rgbSub_;
image_transport::SubscriberFilter depthSub_;
message_filters::Subscriber<sensor_msgs::msg::CameraInfo> cameraInfoSub_;
typedef message_filters::sync_policies::ApproximateTime<
sensor_msgs::msg::Image,
sensor_msgs::msg::Image,
sensor_msgs::msg::CameraInfo> MyApproxSyncPolicy;
message_filters::Synchronizer<MyApproxSyncPolicy> * approxSync_;
};

View File

@ -11,7 +11,8 @@
<build_depend>libpcl-all-dev</build_depend>
<depend>rclcpp</depend>
<depend>pcl</depend>
<depend>sensor_msgs</depend>
<build_depend>sensor_msgs</build_depend>
<exec_depend>sensor_msgs</exec_depend>
<build_depend>pcl_conversions</build_depend>
<exec_depend>pcl_conversions</exec_depend>
<build_depend>depth_image_proc</build_depend>
@ -21,7 +22,10 @@
<exec_depend>cv_bridge</exec_depend>
<build_depend>pcl_msgs</build_depend>
<exec_depend>pcl_msgs</exec_depend>
<build_depend>image_transport</build_depend>
<exec_depend>image_transport</exec_depend>
<build_depend>message_filters</build_depend>
<exec_depend>message_filters</exec_depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>

View File

@ -51,32 +51,78 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
objFramePrefix_("object"),
usePnP_(true)
{
int queueSize = 10;
tfBroadcaster_ = std::make_shared<tf2_ros::TransformBroadcaster>(node);
objFramePrefix_ = node->declare_parameter("object_prefix", objFramePrefix_);
usePnP_ = node->declare_parameter("pnp", usePnP_);
RCLCPP_INFO(node->get_logger(), "object_prefix = %s", objFramePrefix_.c_str());
RCLCPP_INFO(node->get_logger(), "pnp = %s", usePnP_?"true":"false");
image_transport::TransportHints hints(node);
rgbSub_.subscribe(node, "/camera/color/image_raw", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
depthSub_.subscribe(node, "/camera/aligned_depth_to_color/image_raw", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
cameraInfoSub_.subscribe(node, "/camera/color/camera_info", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
// pub_ = node->create_publisher<std_msgs::msg::Float32MultiArray>("objects", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1));
// pubStamped_ = node->create_publisher<find_object_2d::msg::ObjectsStamped>("objectsStamped", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1));
// pubInfo_ = node->create_publisher<find_object_2d::msg::DetectionInfo>("info", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1));
approxSync_ = new message_filters::Synchronizer<MyApproxSyncPolicy>(MyApproxSyncPolicy(queueSize), rgbSub_, depthSub_, cameraInfoSub_);
approxSync_->registerCallback(std::bind(&FindObjectROS::estimate_pose_on_bag, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
// this->connect(this, SIGNAL(objectsFound(const find_object::DetectionInfo &, const find_object::Header &, const cv::Mat &, float)), this, SLOT(publish(const find_object::DetectionInfo &, const find_object::Header &, const cv::Mat &, float)));
num_objects_ = 2;
objectHeight_.resize(num_objects_);
}
void FindObjectROS::estimate_pose(float objectWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4)
// This function takes a fixed bounding box currently. In future the bounding box will come from yolo.
void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSharedPtr rgbMsg,
const sensor_msgs::msg::Image::ConstSharedPtr depthMsg,
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg)
{
if(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)!=0 &&
depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0)
{
RCLCPP_ERROR(node_->get_logger(), "find_object_ros: Depth image type must be 32FC1 or 16UC1");
return;
}
if(rgbMsg->data.size())
{
cv_bridge::CvImageConstPtr ptr = cv_bridge::toCvShare(rgbMsg);
cv_bridge::CvImageConstPtr ptrDepth = cv_bridge::toCvShare(depthMsg);
float depthConstant = 1.0f/cameraInfoMsg->k[4];
cv::Mat image;
cv_bridge::CvImageConstPtr imgPtr = cv_bridge::toCvShare(rgbMsg);
try
{
if(rgbMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
rgbMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
{
image = cv_bridge::cvtColor(imgPtr, "mono8")->image;
}
else
{
image = cv_bridge::cvtColor(imgPtr, "bgr8")->image;
}
geometry_msgs::msg::TransformStamped right_trail = estimate_pose(image, ptrDepth->image, 115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
geometry_msgs::msg::TransformStamped left_trail = estimate_pose(image, ptrDepth->image, 160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
std::cout << "Distance in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
std::cout << std::endl;
}
catch(const cv_bridge::Exception & e)
{
RCLCPP_ERROR(node_->get_logger(), "find_object_ros: Could not convert input image to mono8 or bgr8 format, encoding detected is %s... cv_bridge exception: %s", rgbMsg->encoding.c_str(), e.what());
}
}
}
geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_image, cv::Mat depth, float objectWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4)
{
// Read RGB Image
cv::Mat rgb_image = cv::imread("/media/psf/freelancing/greenhouse/stereo/stereo_5.jpg");
// cv::Mat rgb_image = cv::imread("/media/psf/freelancing/greenhouse/stereo/stereo_5.jpg");
// read depth Image
cv::Mat depth_img = cv::imread("/media/psf/freelancing/greenhouse/depth/depth_4.jpg", cv::IMREAD_ANYDEPTH);
// cv::Mat depth_img = cv::imread("/media/psf/freelancing/greenhouse/depth/depth_4.jpg", cv::IMREAD_ANYDEPTH);
// bounding box dimensions
// Convert the image to a single-channel 16-bit image
cv::Mat depth;
depth_img.convertTo(depth, CV_16UC1);
// cv::Mat depth;
// depth_img.convertTo(depth, CV_16UC1);
std::vector<geometry_msgs::msg::TransformStamped> transforms;
// store bounding boxes. (This should come from yolo ideally)
@ -99,7 +145,7 @@ void FindObjectROS::estimate_pose(float objectWidth, float objectHeight, cv::Poi
// transform.header.stamp.sec = ros::Time::now();
tf2::Quaternion q;
float depthConstant = 1.0f/640.8887939453125; // 0.00156033 // 1.0f/cameraInfoMsg->K[4];
std::cout << "depthconstant: " << depthConstant << std::endl;
// std::cout << "depthconstant: " << depthConstant << std::endl;
std::vector<cv::Point3f> objectPoints(4);
std::vector<cv::Point2f> imagePoints(4);
objectPoints[0] = cv::Point3f(-0.5, -(objectHeight/objectWidth)/2.0f,0);
@ -150,8 +196,8 @@ void FindObjectROS::estimate_pose(float objectWidth, float objectHeight, cv::Poi
center.x()+0.5f, center.y()+0.5f,
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
1.0f/depthConstant, 1.0f/depthConstant);
std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
// std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
// std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
transform.transform.translation.x = tvec.at<double>(0)*(center3D.val[2]/tvec.at<double>(2));
transform.transform.translation.y = tvec.at<double>(1)*(center3D.val[2]/tvec.at<double>(2));
@ -163,11 +209,11 @@ void FindObjectROS::estimate_pose(float objectWidth, float objectHeight, cv::Poi
q *= q2;
transform.transform.rotation = tf2::toMsg(q.normalized());
transforms.push_back(transform);
return transform;
// if(transforms.size())
// {
// tfBroadcaster_->sendTransform(transforms);
// }
std::cout << "Done!!!!!!!!!!!!" << std::endl;
}
cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
@ -193,8 +239,8 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
float center_x = cx; //cameraInfo.K.at(2) // 656.1123046875
float center_y = cy; //cameraInfo.K.at(5) // 361.80828857421875
std::cout << "cx: " << center_x << " cy: " << center_y << std::endl;
std::cout << "fx: " << fx << " fy: " << fy << std::endl;
// std::cout << "cx: " << center_x << " cy: " << center_y << std::endl;
// std::cout << "fx: " << fx << " fy: " << fy << std::endl;
bool isInMM = depthImage.type() == CV_16UC1; // is in mm?
// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
@ -207,7 +253,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
bool isValid;
if(isInMM)
{
std::cout << "depth is in mm!!" << std::endl;
// std::cout << "depth is in mm!!" << std::endl;
depth = (float)depthImage.at<uint16_t>(y,x);
isValid = depth != 0.0f;
}
@ -216,7 +262,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
depth = depthImage.at<float>(y,x);
isValid = std::isfinite(depth) && depth > 0.0f;
std::cout << "depth is NOT in mm!! " << depth << std::endl;
// std::cout << "depth is NOT in mm!! " << depth << std::endl;
}
// Check for invalid measurements
@ -235,3 +281,12 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
}
return pt;
}
double FindObjectROS::distance(geometry_msgs::msg::TransformStamped point1, geometry_msgs::msg::TransformStamped point2)
{
geometry_msgs::msg::Vector3 vector;
vector.x = point2.transform.translation.x - point1.transform.translation.x;
vector.y = point2.transform.translation.y - point1.transform.translation.y;
vector.z = point2.transform.translation.z - point1.transform.translation.z;
return std::sqrt(vector.x*vector.x + vector.y*vector.y + vector.z*vector.z);
}

View File

@ -35,8 +35,8 @@ class FindPoseNode : public rclcpp::Node {
findObjectROS_(0)
{
findObjectROS_ = new FindObjectROS(this);
findObjectROS_->estimate_pose(115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
findObjectROS_->estimate_pose(160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
// findObjectROS_->estimate_pose(115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
// findObjectROS_->estimate_pose(160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
}
private:
@ -47,7 +47,7 @@ class FindPoseNode : public rclcpp::Node {
int main(int argc, char** argv)
{
rclcpp::init(argc, argv);
auto node = std::make_shared<FindPoseNode>();
rclcpp::spin(node);
auto node = std::make_shared<FindPoseNode>();
rclcpp::spin(node);
rclcpp::shutdown();
}