Compare commits
2 Commits
cf0fdc175b
...
0e1db823a8
| Author | SHA1 | Date | |
|---|---|---|---|
| 0e1db823a8 | |||
| 61e7117511 |
@ -59,9 +59,9 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
|
||||
RCLCPP_INFO(node->get_logger(), "object_prefix = %s", objFramePrefix_.c_str());
|
||||
RCLCPP_INFO(node->get_logger(), "pnp = %s", usePnP_?"true":"false");
|
||||
image_transport::TransportHints hints(node);
|
||||
rgbSub_.subscribe(node, "/camera/color/image_raw", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
depthSub_.subscribe(node, "/camera/aligned_depth_to_color/image_raw", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
cameraInfoSub_.subscribe(node, "/camera/color/camera_info", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
rgbSub_.subscribe(node, "rgb_img", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
depthSub_.subscribe(node, "/depth_img", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
cameraInfoSub_.subscribe(node, "/camera_info", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
bboxSub_.subscribe(node, "/bboxes", rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
|
||||
approxSync_ = new message_filters::Synchronizer<MyApproxSyncPolicy>(MyApproxSyncPolicy(queueSize), rgbSub_, depthSub_, cameraInfoSub_, bboxSub_);
|
||||
@ -78,6 +78,7 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
|
||||
const sensor_msgs::msg::CameraInfo::ConstSharedPtr cameraInfoMsg,
|
||||
const yolov3_msg::msg::BoundingBoxes::ConstSharedPtr bboxes)
|
||||
{
|
||||
std::cout << "callback!!!!!!!!" << std::endl;
|
||||
if(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)!=0 &&
|
||||
depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0)
|
||||
{
|
||||
@ -106,7 +107,8 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
|
||||
|
||||
size_t num_detections = bboxes->bounding_boxes.size();
|
||||
std::cout << "num_detections: " << num_detections << std::endl;
|
||||
std::vector<geometry_msgs::msg::TransformStamped> pose_vec;
|
||||
geometry_msgs::msg::TransformStamped left_trail;
|
||||
geometry_msgs::msg::TransformStamped right_trail;
|
||||
for(size_t i = 0; i < num_detections ; i++)
|
||||
{
|
||||
yolov3_msg::msg::BoundingBox object_bbox = bboxes->bounding_boxes[i];
|
||||
@ -120,12 +122,15 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
|
||||
std::string object_class = object_bbox.class_id;
|
||||
geometry_msgs::msg::TransformStamped pose = estimate_pose(image, ptrDepth->image, object_width, object_height, top_left, top_right, bottom_right, bottom_left);
|
||||
pose.child_frame_id = object_class;
|
||||
pose_vec.push_back(pose);
|
||||
if (object_class == std::string("l_trail"))
|
||||
left_trail = pose;
|
||||
if (object_class == std::string("r_trail"))
|
||||
right_trail = pose;
|
||||
std::cout << "Pose for "<< object_class << " x: " << pose.transform.translation.x << " y: " << pose.transform.translation.y << " z: " << pose.transform.translation.z << std::endl;
|
||||
}
|
||||
// geometry_msgs::msg::TransformStamped right_trail = estimate_pose(image, ptrDepth->image, 115, 116, cv::Point2f(819,600), cv::Point2f(974,600), cv::Point2f(974,716), cv::Point2f(819,716));
|
||||
// geometry_msgs::msg::TransformStamped left_trail = estimate_pose(image, ptrDepth->image, 160, 168, cv::Point2f(350,548), cv::Point2f(510,548), cv::Point2f(510,716), cv::Point2f(350,716));
|
||||
// std::cout << "Distance in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
|
||||
std::cout << "Distance in meters !!!!!!" << distance(left_trail, right_trail) << std::endl;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
catch(const cv_bridge::Exception & e)
|
||||
@ -195,7 +200,7 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
|
||||
cv::Mat tvec(1,3, CV_64FC1);
|
||||
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
|
||||
std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
|
||||
// std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
|
||||
|
||||
cv::Mat R;
|
||||
cv::Rodrigues(rvec, R);
|
||||
|
||||
@ -43,6 +43,7 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterb
|
||||
import numpy as np
|
||||
from yolov3_msg.msg import BoundingBox, BoundingBoxes
|
||||
from rclpy.clock import Clock
|
||||
from sensor_msgs.msg import CameraInfo
|
||||
|
||||
@torch.no_grad()
|
||||
def run(weights=ROOT / 'yolov3.pt', # model.pt path(s)
|
||||
@ -161,27 +162,59 @@ class DetectOnBag(Node):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__('detect_on_bag')
|
||||
self.camera_info = None
|
||||
self.depth_img = None
|
||||
self.subscription = self.create_subscription(
|
||||
Image,
|
||||
'/camera/color/image_raw',
|
||||
self.image_callback,
|
||||
10)
|
||||
self.subscription # prevent unused variable warning
|
||||
self.subscription = self.create_subscription(
|
||||
Image,
|
||||
'/camera/aligned_depth_to_color/image_raw',
|
||||
self.image_depth_callback,
|
||||
10)
|
||||
self.subscription
|
||||
self.subscription = self.create_subscription(
|
||||
CameraInfo,
|
||||
'/camera/color/camera_info',
|
||||
self.camera_info_callback,
|
||||
10)
|
||||
self.subscription
|
||||
self.detected_img_pub = self.create_publisher(Image, '/detection_image', 10)
|
||||
self.bboxes_pub = self.create_publisher(BoundingBoxes, '/bboxes', 10)
|
||||
self.depth_pub = self.create_publisher(Image, '/depth_img', 10)
|
||||
self.img_pub = self.create_publisher(Image, '/rgb_img', 10)
|
||||
self.info_pub = self.create_publisher(CameraInfo, '/camera_info', 10)
|
||||
|
||||
def camera_info_callback(self, msg):
|
||||
print("camera info cb")
|
||||
# print(msg.header.stamp)
|
||||
self.camera_info = msg
|
||||
|
||||
def image_depth_callback(self, msg):
|
||||
print("depth info cb")
|
||||
# print(msg.header.stamp)
|
||||
self.depth_img = msg
|
||||
|
||||
def image_callback(self, msg):
|
||||
self.get_logger().info('Image')
|
||||
print(msg.header.stamp)
|
||||
print("rgb img cb")
|
||||
# print(msg.header.stamp)
|
||||
|
||||
bridge = CvBridge()
|
||||
cv_image = bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
|
||||
detected_img, bboxes = run('src/yolov3/runs/train/exp14/weights/best.pt', cv_image)
|
||||
detected_img, bboxes = run('src/yolov3/runs/train/exp15/weights/best.pt', cv_image)
|
||||
self.detected_img_pub.publish(self.numpy_array_to_image_msg(detected_img))
|
||||
bboxes.image_header = msg.header
|
||||
bboxes.header.stamp = msg.header.stamp
|
||||
bboxes.header.frame_id = 'detection'
|
||||
self.bboxes_pub.publish(bboxes)
|
||||
if self.depth_img:
|
||||
self.depth_pub.publish(self.depth_img)
|
||||
self.img_pub.publish(msg)
|
||||
if self.camera_info:
|
||||
self.info_pub.publish(self.camera_info)
|
||||
|
||||
def numpy_array_to_image_msg(self, numpy_array):
|
||||
# Create a CvBridge object
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user