pipeline works but might be slow on runtime

This commit is contained in:
Apoorva Gupta 2023-03-21 14:08:41 +05:30
parent 987530bbfe
commit cf0fdc175b
7 changed files with 59 additions and 31 deletions

View File

@ -59,6 +59,7 @@ find_package(sensor_msgs REQUIRED)
find_package(pcl_msgs REQUIRED)
find_package(image_transport REQUIRED)
find_package(message_filters REQUIRED)
find_package(yolov3_msg REQUIRED)
add_executable(convert_pointcloud_to_depth src/convert_pointcloud_to_depth.cpp)
ament_target_dependencies(convert_pointcloud_to_depth ${Libraries} rclcpp_components rclcpp sensor_msgs cv_bridge pcl_conversions image_transport message_filters)
@ -77,7 +78,7 @@ include_directories(${Qt5Core_INCLUDE_DIRS} ${Qt5Widgets_INCLUDE_DIRS})
#ADD_LIBRARY(find_object ${SRC_FILES})
add_executable(find_pose_node src/FindObjectROS.cpp src/find_pose_node.cpp)
set_target_properties(find_pose_node PROPERTIES OUTPUT_NAME find_pose_node)
ament_target_dependencies(find_pose_node rclcpp rclcpp_components cv_bridge sensor_msgs image_transport message_filters tf2_msgs tf2_geometry_msgs ${AMENT_LIBRARIES})
ament_target_dependencies(find_pose_node rclcpp rclcpp_components yolov3_msg cv_bridge sensor_msgs image_transport message_filters tf2_msgs tf2_geometry_msgs ${AMENT_LIBRARIES})
target_link_libraries(find_pose_node Qt5::Core Qt5::Widgets ${OpenCV_LIBRARIES} ${LIBRARIES})

View File

@ -49,6 +49,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/sync_policies/exact_time.h>
#include <sensor_msgs/msg/camera_info.hpp>
#include "yolov3_msg/msg/bounding_boxes.hpp"
#include "yolov3_msg/msg/bounding_box.hpp"
class FindObjectROS
{
@ -68,7 +70,8 @@ private:
float fx, float fy);
void estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSharedPtr rgbMsg,
const sensor_msgs::msg::Image::ConstSharedPtr depthMsg,
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg);
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg,
const yolov3_msg::msg::BoundingBoxes::ConstSharedPtr bboxes);
double distance(geometry_msgs::msg::TransformStamped point1, geometry_msgs::msg::TransformStamped point2);
private:
@ -86,10 +89,13 @@ private:
image_transport::SubscriberFilter rgbSub_;
image_transport::SubscriberFilter depthSub_;
message_filters::Subscriber<sensor_msgs::msg::CameraInfo> cameraInfoSub_;
message_filters::Subscriber<yolov3_msg::msg::BoundingBoxes> bboxSub_;
typedef message_filters::sync_policies::ApproximateTime<
sensor_msgs::msg::Image,
sensor_msgs::msg::Image,
sensor_msgs::msg::CameraInfo> MyApproxSyncPolicy;
sensor_msgs::msg::CameraInfo,
yolov3_msg::msg::BoundingBoxes> MyApproxSyncPolicy;
message_filters::Synchronizer<MyApproxSyncPolicy> * approxSync_;

View File

@ -22,6 +22,8 @@
<exec_depend>cv_bridge</exec_depend>
<build_depend>pcl_msgs</build_depend>
<exec_depend>pcl_msgs</exec_depend>
<build_depend>yolov3_msg</build_depend>
<exec_depend>yolov3_msg</exec_depend>
<build_depend>image_transport</build_depend>
<exec_depend>image_transport</exec_depend>
<build_depend>message_filters</build_depend>

View File

@ -62,9 +62,10 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
rgbSub_.subscribe(node, "/camera/color/image_raw", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
depthSub_.subscribe(node, "/camera/aligned_depth_to_color/image_raw", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
cameraInfoSub_.subscribe(node, "/camera/color/camera_info", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
bboxSub_.subscribe(node, "/bboxes", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
approxSync_ = new message_filters::Synchronizer<MyApproxSyncPolicy>(MyApproxSyncPolicy(queueSize), rgbSub_, depthSub_, cameraInfoSub_);
approxSync_->registerCallback(std::bind(&FindObjectROS::estimate_pose_on_bag, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
approxSync_ = new message_filters::Synchronizer<MyApproxSyncPolicy>(MyApproxSyncPolicy(queueSize), rgbSub_, depthSub_, cameraInfoSub_, bboxSub_);
approxSync_->registerCallback(std::bind(&FindObjectROS::estimate_pose_on_bag, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, std::placeholders::_4));
num_objects_ = 2;
objectHeight_.resize(num_objects_);
@ -74,7 +75,8 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
// This function takes a fixed bounding box currently. In future the bounding box will come from yolo.
void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSharedPtr rgbMsg,
const sensor_msgs::msg::Image::ConstSharedPtr depthMsg,
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg)
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg,
const yolov3_msg::msg::BoundingBoxes::ConstSharedPtr bboxes)
{
if(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)!=0 &&
depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0)
@ -101,9 +103,29 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
{
image = cv_bridge::cvtColor(imgPtr, "bgr8")->image;
}
geometry_msgs::msg::TransformStamped right_trail = estimate_pose(image, ptrDepth->image, 115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
geometry_msgs::msg::TransformStamped left_trail = estimate_pose(image, ptrDepth->image, 160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
std::cout << "Distance in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
size_t num_detections = bboxes->bounding_boxes.size();
std::cout << "num_detections: " << num_detections << std::endl;
std::vector<geometry_msgs::msg::TransformStamped> pose_vec;
for(size_t i = 0; i < num_detections ; i++)
{
yolov3_msg::msg::BoundingBox object_bbox = bboxes->bounding_boxes[i];
float object_width = object_bbox.xmax - object_bbox.xmin;
float object_height = object_bbox.ymax - object_bbox.ymin;
// top left is the 0,0
cv::Point2f top_left = cv::Point2f(float(object_bbox.xmin), float(object_bbox.ymin));
cv::Point2d top_right = cv::Point2f(float(object_bbox.xmin), float(object_bbox.ymax));
cv::Point2f bottom_right = cv::Point2f(float(object_bbox.xmax), float(object_bbox.ymax));
cv::Point2d bottom_left = cv::Point2f(float(object_bbox.xmax), float(object_bbox.ymin));
std::string object_class = object_bbox.class_id;
geometry_msgs::msg::TransformStamped pose = estimate_pose(image, ptrDepth->image, object_width, object_height, top_left, top_right, bottom_right, bottom_left);
pose.child_frame_id = object_class;
pose_vec.push_back(pose);
std::cout << "Pose for "<< object_class << " x: " << pose.transform.translation.x << " y: " << pose.transform.translation.y << " z: " << pose.transform.translation.z << std::endl;
}
// geometry_msgs::msg::TransformStamped right_trail = estimate_pose(image, ptrDepth->image, 115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
// geometry_msgs::msg::TransformStamped left_trail = estimate_pose(image, ptrDepth->image, 160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
// std::cout << "Distance in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
std::cout << std::endl;
}
catch(const cv_bridge::Exception & e)
@ -115,14 +137,7 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_image, cv::Mat depth, float objectWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4)
{
// Read RGB Image
// cv::Mat rgb_image = cv::imread("/media/psf/freelancing/greenhouse/stereo/stereo_5.jpg");
// read depth Image
// cv::Mat depth_img = cv::imread("/media/psf/freelancing/greenhouse/depth/depth_4.jpg", cv::IMREAD_ANYDEPTH);
// bounding box dimensions
// Convert the image to a single-channel 16-bit image
// cv::Mat depth;
// depth_img.convertTo(depth, CV_16UC1);
std::vector<geometry_msgs::msg::TransformStamped> transforms;
// store bounding boxes. (This should come from yolo ideally)
@ -190,8 +205,8 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2));
rotationMatrix.getRotation(q);
// QPointF center = QPointF(imagePoints[0].x + objectWidth/2, imagePoints[0].y + objectHeight/2);
QPointF center = QPointF(442.252, 609.895);
QPointF center = QPointF(imagePoints[0].x + objectWidth/2, imagePoints[0].y + objectHeight/2);
// QPointF center = QPointF(442.252, 609.895);
cv::Vec3f center3D = this->getDepth(depth,
center.x()+0.5f, center.y()+0.5f,
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
@ -202,7 +217,7 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
transform.transform.translation.x = tvec.at<double>(0)*(center3D.val[2]/tvec.at<double>(2));
transform.transform.translation.y = tvec.at<double>(1)*(center3D.val[2]/tvec.at<double>(2));
transform.transform.translation.z = tvec.at<double>(2)*(center3D.val[2]/tvec.at<double>(2));
std::cout << "Pose x: " << transform.transform.translation.x << " y: " << transform.transform.translation.y << " z: " << transform.transform.translation.z << std::endl;
// std::cout << "Pose x: " << transform.transform.translation.x << " y: " << transform.transform.translation.y << " z: " << transform.transform.translation.z << std::endl;
// set x axis going front of the object, with z up and y left
tf2::Quaternion q2;
q2.setRPY(CV_PI/2.0, CV_PI/2.0, 0);
@ -269,7 +284,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
if (!isValid)
{
pt.val[0] = pt.val[1] = pt.val[2] = bad_point;
std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
// std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
}
else
{
@ -277,7 +292,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
pt.val[0] = (float(x) - center_x) * depth * constant_x;
pt.val[1] = (float(y) - center_y) * depth * constant_y;
pt.val[2] = depth*unit_scaling;
std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
// td::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
}
return pt;
}

View File

@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 3.5)
project(yolov4_msg)
project(yolov3_msg)
# Find dependencies
#find_package(ament_cmake REQUIRED)
@ -9,26 +9,26 @@ find_package(rclpy REQUIRED)
find_package(std_msgs REQUIRED)
# Create the executable
#add_executable(yolov4_msg yolov4_msg.py)
#add_executable(yolov3_msg yolov3_msg.py)
# Set the permissions of the Python script to be executable
find_package(rosidl_default_generators REQUIRED)
rosidl_generate_interfaces(yolov4_msg
rosidl_generate_interfaces(yolov3_msg
"msg/BoundingBox.msg"
"msg/BoundingBoxes.msg"
DEPENDENCIES builtin_interfaces std_msgs
)
# Link the executable with the necessary libraries
#ament_target_dependencies(yolov4_msg rclpy)
#ament_target_dependencies(yolov3_msg rclpy)
# Install the executable
#install(TARGETS yolov4_msg DESTINATION lib/yolov4_msg)
#install(TARGETS yolov3_msg DESTINATION lib/yolov3_msg)
# Install the script, setup.py, and package.xml files
#install(PROGRAMS yolov4_msg.py
#install(PROGRAMS yolov3_msg.py
# DESTINATION lib/yolov3_on_bag)
#install(FILES package.xml
# DESTINATION share/yolov3_on_bag)
@ -39,7 +39,7 @@ rosidl_generate_interfaces(yolov4_msg
# Package information
#ament_package()
#add_executable(yolov4_msg yolov4_msg.py)
#add_executable(yolov3_msg yolov3_msg.py)
ament_export_dependencies(rosidl_default_runtime)

View File

@ -41,7 +41,7 @@ from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, time_sync
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox
import numpy as np
from yolov4_msg.msg import BoundingBox, BoundingBoxes
from yolov3_msg.msg import BoundingBox, BoundingBoxes
from rclpy.clock import Clock
@torch.no_grad()
@ -152,6 +152,8 @@ def run(weights=ROOT / 'yolov3.pt', # model.pt path(s)
LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
# Stream results
im0 = annotator.result()
if update:
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
# cv2.imwrite(save_path, im0)
return im0, bounding_boxes_msg
@ -177,6 +179,8 @@ class DetectOnBag(Node):
detected_img, bboxes = run('src/yolov3/runs/train/exp14/weights/best.pt', cv_image)
self.detected_img_pub.publish(self.numpy_array_to_image_msg(detected_img))
bboxes.image_header = msg.header
bboxes.header.stamp = msg.header.stamp
bboxes.header.frame_id = 'detection'
self.bboxes_pub.publish(bboxes)
def numpy_array_to_image_msg(self, numpy_array):

View File

@ -1,6 +1,6 @@
<?xml version="1.0"?>
<package format="3">
<name>yolov4_msg</name>
<name>yolov3_msg</name>
<version>0.1.0</version>
<description>
This is a ROS2 package for running yolov3 on rosbag