Compare commits

..

No commits in common. "65415a835501f601f23121c7c6ca740596e388c6" and "46177f166aa0aef42360c4d36ab53c016763b927" have entirely different histories.

296 changed files with 429 additions and 15119 deletions

View File

@ -59,7 +59,7 @@ find_package(sensor_msgs REQUIRED)
find_package(pcl_msgs REQUIRED)
find_package(image_transport REQUIRED)
find_package(message_filters REQUIRED)
find_package(pipe_msgs REQUIRED)
find_package(yolov3_msg REQUIRED)
add_executable(convert_pointcloud_to_depth src/convert_pointcloud_to_depth.cpp)
ament_target_dependencies(convert_pointcloud_to_depth ${Libraries} rclcpp_components rclcpp sensor_msgs cv_bridge pcl_conversions image_transport message_filters)
@ -78,7 +78,7 @@ include_directories(${Qt5Core_INCLUDE_DIRS} ${Qt5Widgets_INCLUDE_DIRS})
#ADD_LIBRARY(find_object ${SRC_FILES})
add_executable(find_pose_node src/FindObjectROS.cpp src/find_pose_node.cpp)
set_target_properties(find_pose_node PROPERTIES OUTPUT_NAME find_pose_node)
ament_target_dependencies(find_pose_node rclcpp rclcpp_components pipe_msgs cv_bridge sensor_msgs image_transport message_filters tf2_msgs tf2_geometry_msgs ${AMENT_LIBRARIES})
ament_target_dependencies(find_pose_node rclcpp rclcpp_components yolov3_msg cv_bridge sensor_msgs image_transport message_filters tf2_msgs tf2_geometry_msgs ${AMENT_LIBRARIES})
target_link_libraries(find_pose_node Qt5::Core Qt5::Widgets ${OpenCV_LIBRARIES} ${LIBRARIES})
@ -89,13 +89,7 @@ install(TARGETS
DESTINATION lib/${PROJECT_NAME})
#############
## Install ##
#############
install(DIRECTORY
launch/.
DESTINATION share/${PROJECT_NAME}/launch
)

View File

@ -49,8 +49,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/sync_policies/exact_time.h>
#include <sensor_msgs/msg/camera_info.hpp>
#include "pipe_msgs/msg/bounding_boxes.hpp"
#include "pipe_msgs/msg/bounding_box.hpp"
#include "yolov3_msg/msg/bounding_boxes.hpp"
#include "yolov3_msg/msg/bounding_box.hpp"
class FindObjectROS
{
@ -71,28 +71,31 @@ private:
void estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSharedPtr rgbMsg,
const sensor_msgs::msg::Image::ConstSharedPtr depthMsg,
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg,
const pipe_msgs::msg::BoundingBoxes::ConstSharedPtr bboxes);
const yolov3_msg::msg::BoundingBoxes::ConstSharedPtr bboxes);
double distance(geometry_msgs::msg::TransformStamped point1, geometry_msgs::msg::TransformStamped point2);
double angular_distance(geometry_msgs::msg::TransformStamped point1, geometry_msgs::msg::TransformStamped point2);
private:
rclcpp::Node * node_;
rclcpp::Publisher<std_msgs::msg::Float32MultiArray>::SharedPtr pub_;
// rclcpp::Publisher<find_object_2d::msg::ObjectsStamped>::SharedPtr pubStamped_;
// rclcpp::Publisher<find_object_2d::msg::DetectionInfo>::SharedPtr pubInfo_;
std::string objFramePrefix_;
bool usePnP_;
std::shared_ptr<tf2_ros::TransformBroadcaster> tfBroadcaster_;
unsigned int num_objects_;
std::vector<double> objectHeight_;
std::vector<double> objectWidth_;
image_transport::SubscriberFilter rgbSub_;
image_transport::SubscriberFilter depthSub_;
message_filters::Subscriber<sensor_msgs::msg::CameraInfo> cameraInfoSub_;
message_filters::Subscriber<pipe_msgs::msg::BoundingBoxes> bboxSub_;
bool tf_broadcast_;
message_filters::Subscriber<yolov3_msg::msg::BoundingBoxes> bboxSub_;
typedef message_filters::sync_policies::ApproximateTime<
sensor_msgs::msg::Image,
sensor_msgs::msg::Image,
sensor_msgs::msg::CameraInfo,
pipe_msgs::msg::BoundingBoxes> MyApproxSyncPolicy;
yolov3_msg::msg::BoundingBoxes> MyApproxSyncPolicy;
message_filters::Synchronizer<MyApproxSyncPolicy> * approxSync_;

View File

@ -1,32 +0,0 @@
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, SetEnvironmentVariable
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
SetEnvironmentVariable('RCUTILS_LOGGING_USE_STDOUT', '1'),
SetEnvironmentVariable('RCUTILS_LOGGING_BUFFERED_STREAM', '0'),
# Launch arguments
DeclareLaunchArgument('object_prefix', default_value='testingggggg', description='TF prefix of objects.'),
DeclareLaunchArgument('rgb_topic', default_value='rgb_img', description='Image topic.'),
DeclareLaunchArgument('depth_topic', default_value='depth_img', description='Registered depth topic.'),
DeclareLaunchArgument('camera_info_topic', default_value='camera_info', description='Camera info topic.'),
DeclareLaunchArgument('bounding_box_topic', default_value='bboxes', description='Bouding box topic.'),
# Nodes to launch
Node(
package='find-pose', executable='find_pose_node', output='screen',
parameters=[{
'object_prefix':LaunchConfiguration('object_prefix'),
}],
remappings=[
('rgb_img', LaunchConfiguration('rgb_topic')),
('depth_img', LaunchConfiguration('depth_topic')),
('camera_info', LaunchConfiguration('camera_info_topic'))]),
])

View File

@ -22,15 +22,14 @@
<exec_depend>cv_bridge</exec_depend>
<build_depend>pcl_msgs</build_depend>
<exec_depend>pcl_msgs</exec_depend>
<build_depend>pipe_msgs</build_depend>
<exec_depend>pipe_msgs</exec_depend>
<build_depend>yolov3_msg</build_depend>
<exec_depend>yolov3_msg</exec_depend>
<build_depend>image_transport</build_depend>
<exec_depend>image_transport</exec_depend>
<build_depend>message_filters</build_depend>
<exec_depend>message_filters</exec_depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<exec_depend>ros2launch</exec_depend>
<export>
<build_type>ament_cmake</build_type>

View File

@ -49,13 +49,15 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FindObjectROS::FindObjectROS(rclcpp::Node * node) :
node_(node),
objFramePrefix_("object"),
tf_broadcast_(true)
usePnP_(true)
{
int queueSize = 10;
tfBroadcaster_ = std::make_shared<tf2_ros::TransformBroadcaster>(node);
objFramePrefix_ = node->declare_parameter("object_prefix", objFramePrefix_);
usePnP_ = node->declare_parameter("pnp", usePnP_);
RCLCPP_INFO(node->get_logger(), "object_prefix = %s", objFramePrefix_.c_str());
RCLCPP_INFO(node->get_logger(), "pnp = %s", usePnP_?"true":"false");
image_transport::TransportHints hints(node);
rgbSub_.subscribe(node, "rgb_img", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
depthSub_.subscribe(node, "/depth_img", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
@ -64,6 +66,9 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
approxSync_ = new message_filters::Synchronizer<MyApproxSyncPolicy>(MyApproxSyncPolicy(queueSize), rgbSub_, depthSub_, cameraInfoSub_, bboxSub_);
approxSync_->registerCallback(std::bind(&FindObjectROS::estimate_pose_on_bag, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, std::placeholders::_4));
num_objects_ = 2;
objectHeight_.resize(num_objects_);
}
@ -71,8 +76,9 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSharedPtr rgbMsg,
const sensor_msgs::msg::Image::ConstSharedPtr depthMsg,
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg,
const pipe_msgs::msg::BoundingBoxes::ConstSharedPtr bboxes)
const yolov3_msg::msg::BoundingBoxes::ConstSharedPtr bboxes)
{
std::cout << "callback!!!!!!!!" << std::endl;
if(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)!=0 &&
depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0)
{
@ -100,13 +106,12 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
}
size_t num_detections = bboxes->bounding_boxes.size();
std::cout << "Number of detections: " << num_detections << std::endl;
std::cout << "num_detections: " << num_detections << std::endl;
geometry_msgs::msg::TransformStamped left_trail;
geometry_msgs::msg::TransformStamped right_trail;
std::vector<geometry_msgs::msg::TransformStamped> transforms;
for(size_t i = 0; i < num_detections ; i++)
{
pipe_msgs::msg::BoundingBox object_bbox = bboxes->bounding_boxes[i];
yolov3_msg::msg::BoundingBox object_bbox = bboxes->bounding_boxes[i];
float object_width = object_bbox.xmax - object_bbox.xmin;
float object_height = object_bbox.ymax - object_bbox.ymin;
// top left is the 0,0
@ -122,15 +127,10 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
if (object_class == std::string("r_trail"))
right_trail = pose;
std::cout << "Pose for "<< object_class << " x: " << pose.transform.translation.x << " y: " << pose.transform.translation.y << " z: " << pose.transform.translation.z << std::endl;
transforms.push_back(pose);
}
if(tf_broadcast_ && transforms.size())
{
std::cout << "broadcast tf" << std::endl;
tfBroadcaster_->sendTransform(transforms);
}
std::cout << "Euclidean distance between two pipes in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
std::cout << "Angular distance between two pipes in radians !!!!!!" << angular_distance(left_trail, right_trail) << std::endl;
// geometry_msgs::msg::TransformStamped right_trail = estimate_pose(image, ptrDepth->image, 115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
// geometry_msgs::msg::TransformStamped left_trail = estimate_pose(image, ptrDepth->image, 160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
std::cout << "Distance in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
std::cout << std::endl;
}
catch(const cv_bridge::Exception & e)
@ -142,6 +142,16 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_image, cv::Mat depth, float objectWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4)
{
std::vector<geometry_msgs::msg::TransformStamped> transforms;
// store bounding boxes. (This should come from yolo ideally)
// float objectWidth = 160; // width of bounding box in pixels
// float objectHeight = 168; // height of bounding box in pixels
// float objectWidth = 155; // width of bounding box in pixels
// float objectHeight = 116; // height of bounding box in pixels
geometry_msgs::msg::TransformStamped transform;
transform.transform.rotation.x=0;
transform.transform.rotation.y=0;
@ -150,10 +160,12 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
transform.transform.translation.x=0;
transform.transform.translation.y=0;
transform.transform.translation.z=0;
transform.child_frame_id = "";
transform.child_frame_id = "l_trail";
transform.header.frame_id = "camera_link";
// transform.header.stamp.sec = ros::Time::now();
tf2::Quaternion q;
float depthConstant = 1.0f/640.8887939453125; // 0.00156033 // 1.0f/cameraInfoMsg->K[4];
// std::cout << "depthconstant: " << depthConstant << std::endl;
std::vector<cv::Point3f> objectPoints(4);
std::vector<cv::Point2f> imagePoints(4);
objectPoints[0] = cv::Point3f(-0.5, -(objectHeight/objectWidth)/2.0f,0);
@ -161,6 +173,14 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
objectPoints[2] = cv::Point3f(0.5,(objectHeight/objectWidth)/2.0f,0);
objectPoints[3] = cv::Point3f(-0.5,(objectHeight/objectWidth)/2.0f,0);
// QPointF pt = iter->map(QPointF(0, 0));
// imagePoints[0] = cv::Point2f(350,548);
// // pt = iter->map(QPointF(objectWidth, 0));
// imagePoints[1] = cv::Point2f(510,548);
// // pt = iter->map(QPointF(objectWidth, objectHeight));
// imagePoints[2] = cv::Point2f(510,716);
// // pt = iter->map(QPointF(0, objectHeight));
// imagePoints[3] = cv::Point2f(350,716);
// QPointF pt = iter->map(QPointF(0, 0));
imagePoints[0] = point_1;
// pt = iter->map(QPointF(objectWidth, 0));
imagePoints[1] = point_2;
@ -180,7 +200,7 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
cv::Mat tvec(1,3, CV_64FC1);
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
// std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
// std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
cv::Mat R;
cv::Rodrigues(rvec, R);
@ -196,8 +216,8 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
center.x()+0.5f, center.y()+0.5f,
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
1.0f/depthConstant, 1.0f/depthConstant);
// std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
// std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
// std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
// std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
transform.transform.translation.x = tvec.at<double>(0)*(center3D.val[2]/tvec.at<double>(2));
transform.transform.translation.y = tvec.at<double>(1)*(center3D.val[2]/tvec.at<double>(2));
@ -208,7 +228,12 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
q2.setRPY(CV_PI/2.0, CV_PI/2.0, 0);
q *= q2;
transform.transform.rotation = tf2::toMsg(q.normalized());
transforms.push_back(transform);
return transform;
// if(transforms.size())
// {
// tfBroadcaster_->sendTransform(transforms);
// }
}
cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
@ -248,6 +273,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
bool isValid;
if(isInMM)
{
// std::cout << "depth is in mm!!" << std::endl;
depth = (float)depthImage.at<uint16_t>(y,x);
isValid = depth != 0.0f;
}
@ -256,12 +282,14 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
depth = depthImage.at<float>(y,x);
isValid = std::isfinite(depth) && depth > 0.0f;
// std::cout << "depth is NOT in mm!! " << depth << std::endl;
}
// Check for invalid measurements
if (!isValid)
{
pt.val[0] = pt.val[1] = pt.val[2] = bad_point;
// std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
}
else
{
@ -269,7 +297,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
pt.val[0] = (float(x) - center_x) * depth * constant_x;
pt.val[1] = (float(y) - center_y) * depth * constant_y;
pt.val[2] = depth*unit_scaling;
// std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
// td::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
}
return pt;
}
@ -282,21 +310,3 @@ double FindObjectROS::distance(geometry_msgs::msg::TransformStamped point1, geom
vector.z = point2.transform.translation.z - point1.transform.translation.z;
return std::sqrt(vector.x*vector.x + vector.y*vector.y + vector.z*vector.z);
}
double FindObjectROS::angular_distance(geometry_msgs::msg::TransformStamped point1, geometry_msgs::msg::TransformStamped point2)
{
tf2::Quaternion quat1(point1.transform.rotation.x, point1.transform.rotation.y, point1.transform.rotation.z, point1.transform.rotation.w);
tf2::Quaternion quat2(point2.transform.rotation.x, point2.transform.rotation.y, point2.transform.rotation.z, point2.transform.rotation.w);
// Assume that 'pos1' and 'pos2' are the positions of the two points
// and 'quat1' and 'quat2' are their corresponding quaternions
tf2::Vector3 diff;
diff[0] = point2.transform.translation.x - point1.transform.translation.x;
diff[1] = point2.transform.translation.y - point1.transform.translation.y;
diff[2] = point2.transform.translation.z - point1.transform.translation.z;
tf2::Quaternion rot = quat2 * quat1.inverse();
tf2::Quaternion diff_quat(0, diff.getX(), diff.getY(), diff.getZ());
tf2::Quaternion result = rot * diff_quat * rot.inverse();
double angle = 2 * std::acos(result.getW());
return angle;
}

View File

@ -1,36 +0,0 @@
cmake_minimum_required(VERSION 3.8)
project(pipe_msgs)
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# find dependencies
find_package(ament_cmake REQUIRED)
# uncomment the following section in order to fill in
# further dependencies manually.
# find_package(<dependency> REQUIRED)
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# comment the line when a copyright and license is added to all source files
set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# comment the line when this package is in a git repo and when
# a copyright and license is added to all source files
set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
find_package(std_msgs REQUIRED)
find_package(rosidl_default_generators REQUIRED)
rosidl_generate_interfaces(${PROJECT_NAME}
"msg/BoundingBox.msg"
"msg/BoundingBoxes.msg"
DEPENDENCIES std_msgs # Add packages that above messages depend on, in this case geometry_msgs for Sphere.msg
)
ament_package()

View File

@ -1,22 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>pipe_msgs</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="eic.apoorva@gmail.com">parallels</maintainer>
<license>TODO: License declaration</license>
<buildtool_depend>ament_cmake</buildtool_depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<depend>std_msgs</depend>
<build_depend>rosidl_default_generators</build_depend>
<exec_depend>rosidl_default_runtime</exec_depend>
<member_of_group>rosidl_interface_packages</member_of_group>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>

View File

Before

Width:  |  Height:  |  Size: 476 KiB

After

Width:  |  Height:  |  Size: 476 KiB

View File

Before

Width:  |  Height:  |  Size: 165 KiB

After

Width:  |  Height:  |  Size: 165 KiB

View File

@ -131,15 +131,11 @@ def run(weights=ROOT / 'yolov3.pt', # model.pt path(s)
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
number_of_uniq_det = {}
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
number_of_uniq_det[names[int(c)]] = int(n)
# Write results
append_count = 0
last_name = ""
for *xyxy, conf, cls in reversed(det):
bounding_box = BoundingBox()
c = int(cls) # integer class
@ -151,15 +147,7 @@ def run(weights=ROOT / 'yolov3.pt', # model.pt path(s)
bounding_box.xmax = int(xyxy[2])
bounding_box.ymax = int(xyxy[3])
bounding_box.id = c
if last_name != names[c]:
append_count = 0
if number_of_uniq_det[names[c]] > 1:
bounding_box.class_id = names[c] + "_" + str(append_count)
append_count+=1
last_name = names[c]
else:
bounding_box.class_id = names[c]
append_count = 0
bounding_box.class_id = names[c]
bounding_boxes_msg.bounding_boxes.append(bounding_box)
# Print time (inference-only)
LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')

Some files were not shown because too many files have changed in this diff Show More