find pose cleanup and final launch file update
This commit is contained in:
parent
821e6cc0ba
commit
d7ec4d8754
@ -79,15 +79,9 @@ private:
|
||||
private:
|
||||
rclcpp::Node * node_;
|
||||
rclcpp::Publisher<std_msgs::msg::Float32MultiArray>::SharedPtr pub_;
|
||||
// rclcpp::Publisher<find_object_2d::msg::ObjectsStamped>::SharedPtr pubStamped_;
|
||||
// rclcpp::Publisher<find_object_2d::msg::DetectionInfo>::SharedPtr pubInfo_;
|
||||
|
||||
std::string objFramePrefix_;
|
||||
bool usePnP_;
|
||||
std::shared_ptr<tf2_ros::TransformBroadcaster> tfBroadcaster_;
|
||||
unsigned int num_objects_;
|
||||
std::vector<double> objectHeight_;
|
||||
std::vector<double> objectWidth_;
|
||||
image_transport::SubscriberFilter rgbSub_;
|
||||
image_transport::SubscriberFilter depthSub_;
|
||||
message_filters::Subscriber<sensor_msgs::msg::CameraInfo> cameraInfoSub_;
|
||||
|
||||
@ -12,11 +12,12 @@ def generate_launch_description():
|
||||
SetEnvironmentVariable('RCUTILS_LOGGING_BUFFERED_STREAM', '0'),
|
||||
|
||||
# Launch arguments
|
||||
DeclareLaunchArgument('object_prefix', default_value='object', description='TF prefix of objects.'),
|
||||
DeclareLaunchArgument('object_prefix', default_value='testingggggg', description='TF prefix of objects.'),
|
||||
|
||||
DeclareLaunchArgument('rgb_topic', default_value='camera/rgb/image_rect_color', description='Image topic.'),
|
||||
DeclareLaunchArgument('depth_topic', default_value='camera/depth_registered/image_raw', description='Registered depth topic.'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='camera/rgb/camera_info', description='Camera info topic.'),
|
||||
DeclareLaunchArgument('rgb_topic', default_value='rgb_img', description='Image topic.'),
|
||||
DeclareLaunchArgument('depth_topic', default_value='depth_img', description='Registered depth topic.'),
|
||||
DeclareLaunchArgument('camera_info_topic', default_value='camera_info', description='Camera info topic.'),
|
||||
DeclareLaunchArgument('bounding_box_topic', default_value='bboxes', description='Bouding box topic.'),
|
||||
|
||||
# Nodes to launch
|
||||
Node(
|
||||
@ -25,7 +26,7 @@ def generate_launch_description():
|
||||
'object_prefix':LaunchConfiguration('object_prefix'),
|
||||
}],
|
||||
remappings=[
|
||||
('rgb/image_rect_color', LaunchConfiguration('rgb_topic')),
|
||||
('depth_registered/image_raw', LaunchConfiguration('depth_topic')),
|
||||
('depth_registered/camera_info', LaunchConfiguration('camera_info_topic'))]),
|
||||
('rgb_img', LaunchConfiguration('rgb_topic')),
|
||||
('depth_img', LaunchConfiguration('depth_topic')),
|
||||
('camera_info', LaunchConfiguration('camera_info_topic'))]),
|
||||
])
|
||||
|
||||
@ -49,16 +49,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
FindObjectROS::FindObjectROS(rclcpp::Node * node) :
|
||||
node_(node),
|
||||
objFramePrefix_("object"),
|
||||
usePnP_(true),
|
||||
tf_broadcast_(true)
|
||||
{
|
||||
int queueSize = 10;
|
||||
tfBroadcaster_ = std::make_shared<tf2_ros::TransformBroadcaster>(node);
|
||||
|
||||
objFramePrefix_ = node->declare_parameter("object_prefix", objFramePrefix_);
|
||||
usePnP_ = node->declare_parameter("pnp", usePnP_);
|
||||
RCLCPP_INFO(node->get_logger(), "object_prefix = %s", objFramePrefix_.c_str());
|
||||
RCLCPP_INFO(node->get_logger(), "pnp = %s", usePnP_?"true":"false");
|
||||
image_transport::TransportHints hints(node);
|
||||
rgbSub_.subscribe(node, "rgb_img", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
depthSub_.subscribe(node, "/depth_img", hints.getTransport(), rclcpp::QoS(1).reliability((rmw_qos_reliability_policy_t)1).get_rmw_qos_profile());
|
||||
@ -67,9 +64,6 @@ FindObjectROS::FindObjectROS(rclcpp::Node * node) :
|
||||
|
||||
approxSync_ = new message_filters::Synchronizer<MyApproxSyncPolicy>(MyApproxSyncPolicy(queueSize), rgbSub_, depthSub_, cameraInfoSub_, bboxSub_);
|
||||
approxSync_->registerCallback(std::bind(&FindObjectROS::estimate_pose_on_bag, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, std::placeholders::_4));
|
||||
|
||||
num_objects_ = 2;
|
||||
objectHeight_.resize(num_objects_);
|
||||
}
|
||||
|
||||
|
||||
@ -110,7 +104,6 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
|
||||
geometry_msgs::msg::TransformStamped left_trail;
|
||||
geometry_msgs::msg::TransformStamped right_trail;
|
||||
std::vector<geometry_msgs::msg::TransformStamped> transforms;
|
||||
std::vector<std::string>> detection_names;
|
||||
for(size_t i = 0; i < num_detections ; i++)
|
||||
{
|
||||
yolov3_msg::msg::BoundingBox object_bbox = bboxes->bounding_boxes[i];
|
||||
@ -123,7 +116,6 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
|
||||
cv::Point2d bottom_left = cv::Point2f(float(object_bbox.xmax), float(object_bbox.ymin));
|
||||
std::string object_class = object_bbox.class_id;
|
||||
geometry_msgs::msg::TransformStamped pose = estimate_pose(image, ptrDepth->image, object_width, object_height, top_left, top_right, bottom_right, bottom_left);
|
||||
detection_names.push_back(object_class);
|
||||
pose.child_frame_id = object_class;
|
||||
if (object_class == std::string("l_trail"))
|
||||
left_trail = pose;
|
||||
@ -150,13 +142,6 @@ void FindObjectROS::estimate_pose_on_bag(const sensor_msgs::msg::Image::ConstSha
|
||||
|
||||
geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_image, cv::Mat depth, float objectWidth, float objectHeight, cv::Point2f point_1, cv::Point2f point_2, cv::Point2f point_3, cv::Point2f point_4)
|
||||
{
|
||||
// store bounding boxes. (This should come from yolo ideally)
|
||||
// float objectWidth = 160; // width of bounding box in pixels
|
||||
// float objectHeight = 168; // height of bounding box in pixels
|
||||
// float objectWidth = 155; // width of bounding box in pixels
|
||||
// float objectHeight = 116; // height of bounding box in pixels
|
||||
|
||||
|
||||
geometry_msgs::msg::TransformStamped transform;
|
||||
transform.transform.rotation.x=0;
|
||||
transform.transform.rotation.y=0;
|
||||
@ -165,12 +150,10 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
|
||||
transform.transform.translation.x=0;
|
||||
transform.transform.translation.y=0;
|
||||
transform.transform.translation.z=0;
|
||||
transform.child_frame_id = "l_trail";
|
||||
transform.child_frame_id = "";
|
||||
transform.header.frame_id = "camera_link";
|
||||
// transform.header.stamp.sec = ros::Time::now();
|
||||
tf2::Quaternion q;
|
||||
float depthConstant = 1.0f/640.8887939453125; // 0.00156033 // 1.0f/cameraInfoMsg->K[4];
|
||||
// std::cout << "depthconstant: " << depthConstant << std::endl;
|
||||
std::vector<cv::Point3f> objectPoints(4);
|
||||
std::vector<cv::Point2f> imagePoints(4);
|
||||
objectPoints[0] = cv::Point3f(-0.5, -(objectHeight/objectWidth)/2.0f,0);
|
||||
@ -178,14 +161,6 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
|
||||
objectPoints[2] = cv::Point3f(0.5,(objectHeight/objectWidth)/2.0f,0);
|
||||
objectPoints[3] = cv::Point3f(-0.5,(objectHeight/objectWidth)/2.0f,0);
|
||||
// QPointF pt = iter->map(QPointF(0, 0));
|
||||
// imagePoints[0] = cv::Point2f(350,548);
|
||||
// // pt = iter->map(QPointF(objectWidth, 0));
|
||||
// imagePoints[1] = cv::Point2f(510,548);
|
||||
// // pt = iter->map(QPointF(objectWidth, objectHeight));
|
||||
// imagePoints[2] = cv::Point2f(510,716);
|
||||
// // pt = iter->map(QPointF(0, objectHeight));
|
||||
// imagePoints[3] = cv::Point2f(350,716);
|
||||
// QPointF pt = iter->map(QPointF(0, 0));
|
||||
imagePoints[0] = point_1;
|
||||
// pt = iter->map(QPointF(objectWidth, 0));
|
||||
imagePoints[1] = point_2;
|
||||
@ -205,7 +180,7 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
|
||||
cv::Mat tvec(1,3, CV_64FC1);
|
||||
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
|
||||
// std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
|
||||
// std::cout << "Pose tvec x: " << tvec.at<double>(0) << " y: " << tvec.at<double>(1) << " z: " << tvec.at<double>(2) << std::endl;
|
||||
|
||||
cv::Mat R;
|
||||
cv::Rodrigues(rvec, R);
|
||||
@ -221,8 +196,8 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
|
||||
center.x()+0.5f, center.y()+0.5f,
|
||||
float(depth.cols/2)-0.5f, float(depth.rows/2)-0.5f,
|
||||
1.0f/depthConstant, 1.0f/depthConstant);
|
||||
// std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
|
||||
// std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
|
||||
// std::cout << "center x: " << center.x() << " y: " << center.y() << std::endl;
|
||||
// std::cout << "center 3D x: " << center3D[0] << " y: " << center3D[1] <<" z: " << center3D[1] << std::endl;
|
||||
|
||||
transform.transform.translation.x = tvec.at<double>(0)*(center3D.val[2]/tvec.at<double>(2));
|
||||
transform.transform.translation.y = tvec.at<double>(1)*(center3D.val[2]/tvec.at<double>(2));
|
||||
@ -233,12 +208,7 @@ geometry_msgs::msg::TransformStamped FindObjectROS::estimate_pose(cv::Mat rgb_im
|
||||
q2.setRPY(CV_PI/2.0, CV_PI/2.0, 0);
|
||||
q *= q2;
|
||||
transform.transform.rotation = tf2::toMsg(q.normalized());
|
||||
|
||||
return transform;
|
||||
// if(transforms.size())
|
||||
// {
|
||||
// tfBroadcaster_->sendTransform(transforms);
|
||||
// }
|
||||
}
|
||||
|
||||
cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
|
||||
@ -278,7 +248,6 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
|
||||
bool isValid;
|
||||
if(isInMM)
|
||||
{
|
||||
// std::cout << "depth is in mm!!" << std::endl;
|
||||
depth = (float)depthImage.at<uint16_t>(y,x);
|
||||
isValid = depth != 0.0f;
|
||||
}
|
||||
@ -287,14 +256,12 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
|
||||
|
||||
depth = depthImage.at<float>(y,x);
|
||||
isValid = std::isfinite(depth) && depth > 0.0f;
|
||||
// std::cout << "depth is NOT in mm!! " << depth << std::endl;
|
||||
}
|
||||
|
||||
// Check for invalid measurements
|
||||
if (!isValid)
|
||||
{
|
||||
pt.val[0] = pt.val[1] = pt.val[2] = bad_point;
|
||||
// std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -302,7 +269,7 @@ cv::Vec3f FindObjectROS::getDepth(const cv::Mat & depthImage,
|
||||
pt.val[0] = (float(x) - center_x) * depth * constant_x;
|
||||
pt.val[1] = (float(y) - center_y) * depth * constant_y;
|
||||
pt.val[2] = depth*unit_scaling;
|
||||
// td::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
|
||||
// std::cout << "depth pt: " << pt.val[0] << " y: " << pt.val[1] << " z: " << pt.val[2] << std::endl;
|
||||
}
|
||||
return pt;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user