intial commit
This commit is contained in:
commit
717b044430
207
CMakeLists.txt
Normal file
207
CMakeLists.txt
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.0.2)
|
||||||
|
project(navigation_crop_row)
|
||||||
|
|
||||||
|
## Compile as C++11, supported in ROS Kinetic and newer
|
||||||
|
# add_compile_options(-std=c++11)
|
||||||
|
|
||||||
|
## Find catkin macros and libraries
|
||||||
|
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
|
||||||
|
## is used, also find other catkin packages
|
||||||
|
find_package(catkin REQUIRED COMPONENTS
|
||||||
|
roscpp
|
||||||
|
rospy
|
||||||
|
sensor_msgs
|
||||||
|
std_msgs
|
||||||
|
)
|
||||||
|
|
||||||
|
## System dependencies are found with CMake's conventions
|
||||||
|
# find_package(Boost REQUIRED COMPONENTS system)
|
||||||
|
|
||||||
|
|
||||||
|
## Uncomment this if the package has a setup.py. This macro ensures
|
||||||
|
## modules and global scripts declared therein get installed
|
||||||
|
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
|
||||||
|
# catkin_python_setup()
|
||||||
|
|
||||||
|
################################################
|
||||||
|
## Declare ROS messages, services and actions ##
|
||||||
|
################################################
|
||||||
|
|
||||||
|
## To declare and build messages, services or actions from within this
|
||||||
|
## package, follow these steps:
|
||||||
|
## * Let MSG_DEP_SET be the set of packages whose message types you use in
|
||||||
|
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
|
||||||
|
## * In the file package.xml:
|
||||||
|
## * add a build_depend tag for "message_generation"
|
||||||
|
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
|
||||||
|
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
|
||||||
|
## but can be declared for certainty nonetheless:
|
||||||
|
## * add a exec_depend tag for "message_runtime"
|
||||||
|
## * In this file (CMakeLists.txt):
|
||||||
|
## * add "message_generation" and every package in MSG_DEP_SET to
|
||||||
|
## find_package(catkin REQUIRED COMPONENTS ...)
|
||||||
|
## * add "message_runtime" and every package in MSG_DEP_SET to
|
||||||
|
## catkin_package(CATKIN_DEPENDS ...)
|
||||||
|
## * uncomment the add_*_files sections below as needed
|
||||||
|
## and list every .msg/.srv/.action file to be processed
|
||||||
|
## * uncomment the generate_messages entry below
|
||||||
|
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
|
||||||
|
|
||||||
|
## Generate messages in the 'msg' folder
|
||||||
|
# add_message_files(
|
||||||
|
# FILES
|
||||||
|
# Message1.msg
|
||||||
|
# Message2.msg
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Generate services in the 'srv' folder
|
||||||
|
# add_service_files(
|
||||||
|
# FILES
|
||||||
|
# Service1.srv
|
||||||
|
# Service2.srv
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Generate actions in the 'action' folder
|
||||||
|
# add_action_files(
|
||||||
|
# FILES
|
||||||
|
# Action1.action
|
||||||
|
# Action2.action
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Generate added messages and services with any dependencies listed here
|
||||||
|
# generate_messages(
|
||||||
|
# DEPENDENCIES
|
||||||
|
# sensor_msgs# std_msgs
|
||||||
|
# )
|
||||||
|
|
||||||
|
################################################
|
||||||
|
## Declare ROS dynamic reconfigure parameters ##
|
||||||
|
################################################
|
||||||
|
|
||||||
|
## To declare and build dynamic reconfigure parameters within this
|
||||||
|
## package, follow these steps:
|
||||||
|
## * In the file package.xml:
|
||||||
|
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
|
||||||
|
## * In this file (CMakeLists.txt):
|
||||||
|
## * add "dynamic_reconfigure" to
|
||||||
|
## find_package(catkin REQUIRED COMPONENTS ...)
|
||||||
|
## * uncomment the "generate_dynamic_reconfigure_options" section below
|
||||||
|
## and list every .cfg file to be processed
|
||||||
|
|
||||||
|
## Generate dynamic reconfigure parameters in the 'cfg' folder
|
||||||
|
# generate_dynamic_reconfigure_options(
|
||||||
|
# cfg/DynReconf1.cfg
|
||||||
|
# cfg/DynReconf2.cfg
|
||||||
|
# )
|
||||||
|
|
||||||
|
###################################
|
||||||
|
## catkin specific configuration ##
|
||||||
|
###################################
|
||||||
|
## The catkin_package macro generates cmake config files for your package
|
||||||
|
## Declare things to be passed to dependent projects
|
||||||
|
## INCLUDE_DIRS: uncomment this if your package contains header files
|
||||||
|
## LIBRARIES: libraries you create in this project that dependent projects also need
|
||||||
|
## CATKIN_DEPENDS: catkin_packages dependent projects also need
|
||||||
|
## DEPENDS: system dependencies of this project that dependent projects also need
|
||||||
|
catkin_package(
|
||||||
|
# INCLUDE_DIRS include
|
||||||
|
# LIBRARIES navigation_crop_row
|
||||||
|
# CATKIN_DEPENDS roscpp rospy sensor_msgs std_msgs
|
||||||
|
# DEPENDS system_lib
|
||||||
|
)
|
||||||
|
|
||||||
|
###########
|
||||||
|
## Build ##
|
||||||
|
###########
|
||||||
|
|
||||||
|
## Specify additional locations of header files
|
||||||
|
## Your package locations should be listed before other locations
|
||||||
|
include_directories(
|
||||||
|
# include
|
||||||
|
${catkin_INCLUDE_DIRS}
|
||||||
|
)
|
||||||
|
|
||||||
|
## Declare a C++ library
|
||||||
|
# add_library(${PROJECT_NAME}
|
||||||
|
# src/${PROJECT_NAME}/navigation_crop_row.cpp
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Add cmake target dependencies of the library
|
||||||
|
## as an example, code may need to be generated before libraries
|
||||||
|
## either from message generation or dynamic reconfigure
|
||||||
|
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
|
||||||
|
|
||||||
|
## Declare a C++ executable
|
||||||
|
## With catkin_make all packages are built within a single CMake context
|
||||||
|
## The recommended prefix ensures that target names across packages don't collide
|
||||||
|
# add_executable(${PROJECT_NAME}_node src/navigation_crop_row_node.cpp)
|
||||||
|
|
||||||
|
## Rename C++ executable without prefix
|
||||||
|
## The above recommended prefix causes long target names, the following renames the
|
||||||
|
## target back to the shorter version for ease of user use
|
||||||
|
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
|
||||||
|
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
|
||||||
|
|
||||||
|
## Add cmake target dependencies of the executable
|
||||||
|
## same as for the library above
|
||||||
|
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
|
||||||
|
|
||||||
|
## Specify libraries to link a library or executable target against
|
||||||
|
# target_link_libraries(${PROJECT_NAME}_node
|
||||||
|
# ${catkin_LIBRARIES}
|
||||||
|
# )
|
||||||
|
|
||||||
|
#############
|
||||||
|
## Install ##
|
||||||
|
#############
|
||||||
|
|
||||||
|
# all install targets should use catkin DESTINATION variables
|
||||||
|
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
|
||||||
|
|
||||||
|
## Mark executable scripts (Python etc.) for installation
|
||||||
|
## in contrast to setup.py, you can choose the destination
|
||||||
|
# catkin_install_python(PROGRAMS
|
||||||
|
# scripts/my_python_script
|
||||||
|
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Mark executables for installation
|
||||||
|
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
|
||||||
|
# install(TARGETS ${PROJECT_NAME}_node
|
||||||
|
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Mark libraries for installation
|
||||||
|
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
|
||||||
|
# install(TARGETS ${PROJECT_NAME}
|
||||||
|
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
|
||||||
|
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
|
||||||
|
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Mark cpp header files for installation
|
||||||
|
# install(DIRECTORY include/${PROJECT_NAME}/
|
||||||
|
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
|
||||||
|
# FILES_MATCHING PATTERN "*.h"
|
||||||
|
# PATTERN ".svn" EXCLUDE
|
||||||
|
# )
|
||||||
|
|
||||||
|
## Mark other files for installation (e.g. launch and bag files, etc.)
|
||||||
|
# install(FILES
|
||||||
|
# # myfile1
|
||||||
|
# # myfile2
|
||||||
|
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
|
||||||
|
# )
|
||||||
|
|
||||||
|
#############
|
||||||
|
## Testing ##
|
||||||
|
#############
|
||||||
|
|
||||||
|
## Add gtest based cpp test target and link libraries
|
||||||
|
# catkin_add_gtest(${PROJECT_NAME}-test test/test_navigation_crop_row.cpp)
|
||||||
|
# if(TARGET ${PROJECT_NAME}-test)
|
||||||
|
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
|
||||||
|
# endif()
|
||||||
|
|
||||||
|
## Add folders to be run by python nosetests
|
||||||
|
# catkin_add_nosetests(test)
|
||||||
71
package.xml
Normal file
71
package.xml
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<package format="2">
|
||||||
|
<name>navigation_crop_row</name>
|
||||||
|
<version>0.0.0</version>
|
||||||
|
<description>The navigation_crop_row package</description>
|
||||||
|
|
||||||
|
<!-- One maintainer tag required, multiple allowed, one person per tag -->
|
||||||
|
<!-- Example: -->
|
||||||
|
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
|
||||||
|
<maintainer email="caroline@todo.todo">caroline</maintainer>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- One license tag required, multiple allowed, one license per tag -->
|
||||||
|
<!-- Commonly used license strings: -->
|
||||||
|
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
|
||||||
|
<license>TODO</license>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Url tags are optional, but multiple are allowed, one per tag -->
|
||||||
|
<!-- Optional attribute type can be: website, bugtracker, or repository -->
|
||||||
|
<!-- Example: -->
|
||||||
|
<!-- <url type="website">http://wiki.ros.org/navigation_crop_row</url> -->
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Author tags are optional, multiple are allowed, one per tag -->
|
||||||
|
<!-- Authors do not have to be maintainers, but could be -->
|
||||||
|
<!-- Example: -->
|
||||||
|
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
|
||||||
|
|
||||||
|
|
||||||
|
<!-- The *depend tags are used to specify dependencies -->
|
||||||
|
<!-- Dependencies can be catkin packages or system dependencies -->
|
||||||
|
<!-- Examples: -->
|
||||||
|
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
|
||||||
|
<!-- <depend>roscpp</depend> -->
|
||||||
|
<!-- Note that this is equivalent to the following: -->
|
||||||
|
<!-- <build_depend>roscpp</build_depend> -->
|
||||||
|
<!-- <exec_depend>roscpp</exec_depend> -->
|
||||||
|
<!-- Use build_depend for packages you need at compile time: -->
|
||||||
|
<!-- <build_depend>message_generation</build_depend> -->
|
||||||
|
<!-- Use build_export_depend for packages you need in order to build against this package: -->
|
||||||
|
<!-- <build_export_depend>message_generation</build_export_depend> -->
|
||||||
|
<!-- Use buildtool_depend for build tool packages: -->
|
||||||
|
<!-- <buildtool_depend>catkin</buildtool_depend> -->
|
||||||
|
<!-- Use exec_depend for packages you need at runtime: -->
|
||||||
|
<!-- <exec_depend>message_runtime</exec_depend> -->
|
||||||
|
<!-- Use test_depend for packages you need only for testing: -->
|
||||||
|
<!-- <test_depend>gtest</test_depend> -->
|
||||||
|
<!-- Use doc_depend for packages you need only for building documentation: -->
|
||||||
|
<!-- <doc_depend>doxygen</doc_depend> -->
|
||||||
|
<buildtool_depend>catkin</buildtool_depend>
|
||||||
|
<build_depend>roscpp</build_depend>
|
||||||
|
<build_depend>rospy</build_depend>
|
||||||
|
<build_depend>sensor_msgs</build_depend>
|
||||||
|
<build_depend>std_msgs</build_depend>
|
||||||
|
<build_export_depend>roscpp</build_export_depend>
|
||||||
|
<build_export_depend>rospy</build_export_depend>
|
||||||
|
<build_export_depend>sensor_msgs</build_export_depend>
|
||||||
|
<build_export_depend>std_msgs</build_export_depend>
|
||||||
|
<exec_depend>roscpp</exec_depend>
|
||||||
|
<exec_depend>rospy</exec_depend>
|
||||||
|
<exec_depend>sensor_msgs</exec_depend>
|
||||||
|
<exec_depend>std_msgs</exec_depend>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- The export tag contains other, unspecified, tags -->
|
||||||
|
<export>
|
||||||
|
<!-- Other tools can request additional information be placed here -->
|
||||||
|
|
||||||
|
</export>
|
||||||
|
</package>
|
||||||
BIN
scripts/__pycache__/hsv_toolbar_ros.cpython-38.pyc
Normal file
BIN
scripts/__pycache__/hsv_toolbar_ros.cpython-38.pyc
Normal file
Binary file not shown.
116
scripts/hsv_toolbar_ros.py
Executable file
116
scripts/hsv_toolbar_ros.py
Executable file
@ -0,0 +1,116 @@
|
|||||||
|
# TBD: automatic thresholding, feature map (transformation), PLC library
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import rospy
|
||||||
|
import time
|
||||||
|
import rospy
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import time
|
||||||
|
|
||||||
|
from cv_bridge import CvBridge, CvBridgeError
|
||||||
|
from geometry_msgs.msg import Twist
|
||||||
|
from sensor_msgs.msg import Image
|
||||||
|
|
||||||
|
image_ros = True
|
||||||
|
|
||||||
|
max_value = 255
|
||||||
|
max_value_H = 360//2
|
||||||
|
low_H = 0
|
||||||
|
low_S = 0
|
||||||
|
low_V = 0
|
||||||
|
high_H = max_value_H
|
||||||
|
high_S = max_value
|
||||||
|
high_V = max_value
|
||||||
|
window_capture_name = 'Video Capture'
|
||||||
|
window_detection_name = 'Object Detection'
|
||||||
|
low_H_name = 'Low H'
|
||||||
|
low_S_name = 'Low S'
|
||||||
|
low_V_name = 'Low V'
|
||||||
|
high_H_name = 'High H'
|
||||||
|
high_S_name = 'High S'
|
||||||
|
high_V_name = 'High V'
|
||||||
|
bridge_object = CvBridge()
|
||||||
|
def on_low_H_thresh_trackbar(val):
|
||||||
|
global low_H
|
||||||
|
global high_H
|
||||||
|
low_H = val
|
||||||
|
low_H = min(high_H-1, low_H)
|
||||||
|
cv2.setTrackbarPos(low_H_name, window_detection_name, low_H)
|
||||||
|
def on_high_H_thresh_trackbar(val):
|
||||||
|
global low_H
|
||||||
|
global high_H
|
||||||
|
high_H = val
|
||||||
|
high_H = max(high_H, low_H+1)
|
||||||
|
cv2.setTrackbarPos(high_H_name, window_detection_name, high_H)
|
||||||
|
def on_low_S_thresh_trackbar(val):
|
||||||
|
global low_S
|
||||||
|
global high_S
|
||||||
|
low_S = val
|
||||||
|
low_S = min(high_S-1, low_S)
|
||||||
|
cv2.setTrackbarPos(low_S_name, window_detection_name, low_S)
|
||||||
|
def on_high_S_thresh_trackbar(val):
|
||||||
|
global low_S
|
||||||
|
global high_S
|
||||||
|
high_S = val
|
||||||
|
high_S = max(high_S, low_S+1)
|
||||||
|
cv2.setTrackbarPos(high_S_name, window_detection_name, high_S)
|
||||||
|
def on_low_V_thresh_trackbar(val):
|
||||||
|
global low_V
|
||||||
|
global high_V
|
||||||
|
low_V = val
|
||||||
|
low_V = min(high_V-1, low_V)
|
||||||
|
cv2.setTrackbarPos(low_V_name, window_detection_name, low_V)
|
||||||
|
def on_high_V_thresh_trackbar(val):
|
||||||
|
global low_V
|
||||||
|
global high_V
|
||||||
|
high_V = val
|
||||||
|
high_V = max(high_V, low_V+1)
|
||||||
|
cv2.setTrackbarPos(high_V_name, window_detection_name, high_V)
|
||||||
|
def camera_callback(data):
|
||||||
|
rospy.loginfo("callback started")
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Select bgr8 because its the OpenCV encoding by default
|
||||||
|
cv_image = bridge_object.imgmsg_to_cv2(data, desired_encoding="passthrough")
|
||||||
|
#cv2.imshow("Original_image",cv_image)
|
||||||
|
except CvBridgeError as e:
|
||||||
|
print(e)
|
||||||
|
cv2.waitKey(10)
|
||||||
|
#ret1, cv_image = capture.read()
|
||||||
|
|
||||||
|
frame_HSV = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
|
||||||
|
frame_threshold = cv2.inRange(frame_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V))
|
||||||
|
|
||||||
|
|
||||||
|
cv2.imshow(window_capture_name, cv_image)
|
||||||
|
cv2.imshow(window_detection_name, frame_threshold)
|
||||||
|
|
||||||
|
if not image_ros:
|
||||||
|
capture = cv2.VideoCapture("/home/andrew/Desktop/webcam_data/webcam/harvest2.mp4")
|
||||||
|
if (capture.isOpened()== False):
|
||||||
|
print("Error opening file.Please check path or name")
|
||||||
|
else:
|
||||||
|
bridge_object = CvBridge()
|
||||||
|
image_sub = rospy.Subscriber("/zed/zed_node/left/image_rect_color",Image,camera_callback) # rectified image from left camerarate = rospy.Rate(10) # 10hz
|
||||||
|
|
||||||
|
|
||||||
|
cv2.namedWindow(window_capture_name)
|
||||||
|
cv2.namedWindow(window_detection_name)
|
||||||
|
cv2.createTrackbar(low_H_name, window_detection_name , low_H, max_value_H, on_low_H_thresh_trackbar)
|
||||||
|
cv2.createTrackbar(high_H_name, window_detection_name , high_H, max_value_H, on_high_H_thresh_trackbar)
|
||||||
|
cv2.createTrackbar(low_S_name, window_detection_name , low_S, max_value, on_low_S_thresh_trackbar)
|
||||||
|
cv2.createTrackbar(high_S_name, window_detection_name , high_S, max_value, on_high_S_thresh_trackbar)
|
||||||
|
cv2.createTrackbar(low_V_name, window_detection_name , low_V, max_value, on_low_V_thresh_trackbar)
|
||||||
|
cv2.createTrackbar(high_V_name, window_detection_name , high_V, max_value, on_high_V_thresh_trackbar)
|
||||||
|
rospy.init_node('hsv_toolbar', anonymous=True)
|
||||||
|
rospy.loginfo("Node is starting ...")
|
||||||
|
|
||||||
|
|
||||||
|
#------------------#
|
||||||
|
while True:
|
||||||
|
time.sleep(0.05)
|
||||||
|
|
||||||
|
cv2.destroyAllWindows()
|
||||||
14
scripts/initial_opencv.py
Executable file
14
scripts/initial_opencv.py
Executable file
@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
camera =cv2.VideoCapture("/dev/video0")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
_,image = camera.read()
|
||||||
|
cv2.imshow("image",image)
|
||||||
|
if cv2.waitKey(1) & 0xFF == ord("s"):
|
||||||
|
break
|
||||||
|
|
||||||
|
camera.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
102
scripts/initial_row_detection.py
Executable file
102
scripts/initial_row_detection.py
Executable file
@ -0,0 +1,102 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
# tutorial: https://medium.com/@jamesthesken/crop-row-detection-using-python-and-opencv-93e456ddd974
|
||||||
|
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
img = cv2.imread('/home/caroline/Pictures/trees1.png')
|
||||||
|
#cv2.imshow("img",img)
|
||||||
|
b,g,r = cv2.split(img) # split image in blue, green and red channels
|
||||||
|
#get grayscale image by 'amplifying' the color green and getting rid of red & blue
|
||||||
|
gscale = 2*g-r-b
|
||||||
|
|
||||||
|
#Canny edge detection:
|
||||||
|
#1. noise reduction with 5x5 Gaussian filter
|
||||||
|
#2. Finding Intensity Gradient: filtering with sobel kernel in horizintal
|
||||||
|
# & vertical direction to get 1st derivative in horizontal & vertical direction
|
||||||
|
# calculation of edge gradient & direction for each pixel
|
||||||
|
# => getting gradient, magnitude & direction
|
||||||
|
#3. Non-maximum Suppression: scan to remove unwanted pixels (not constituting edge)
|
||||||
|
# checking if pixel is local maximum in its neighbourhood in direction of gradient
|
||||||
|
# => binary image with hin edges
|
||||||
|
#4. Hysteresis Thresholding: threshold values maxVal & minVal to determine edges & non-edges (discarded)
|
||||||
|
# edges between values are only edges if connected to to "sure-edges"
|
||||||
|
# also removes small pixels as edges are assumed to be long lines
|
||||||
|
gscale = cv2.Canny(gscale,280,290,apertureSize = 3) #(input image, minVal, maxVal, aperture_size=size of Sobel kernel to find image gradients: default=3)
|
||||||
|
# default L2gradient=False => Edge_Gradient(G)=|G_x|+|G_y|; True => Edge_Gradient(G)=sqrt(G_x²+G_y²)
|
||||||
|
gscale2 = cv2.Canny(gscale,100,290,apertureSize = 3)
|
||||||
|
gscale3 = cv2.Canny(gscale,250,290,apertureSize = 3)
|
||||||
|
gscale4 = cv2.Canny(gscale,280,350,apertureSize = 3)
|
||||||
|
|
||||||
|
""" plt.figure()
|
||||||
|
plt.plot(), plt.imshow(gscale)
|
||||||
|
plt.title('Canny Edge-Detection Results')
|
||||||
|
plt.xticks([]), plt.yticks([])
|
||||||
|
plt.show() """
|
||||||
|
|
||||||
|
##
|
||||||
|
plt.subplot(221)
|
||||||
|
plt.plot(),plt.imshow(gscale)
|
||||||
|
plt.title('Canny Edge-Detection 1'), plt.xticks([]), plt.yticks([])
|
||||||
|
plt.subplot(222)
|
||||||
|
plt.plot(),plt.imshow(gscale2)
|
||||||
|
plt.title('Canny Edge-Detection 2'), plt.xticks([]), plt.yticks([])
|
||||||
|
plt.subplot(223)
|
||||||
|
plt.plot(),plt.imshow(gscale3)
|
||||||
|
plt.title('Canny Edge-Detection 3'), plt.xticks([]), plt.yticks([])
|
||||||
|
plt.subplot(224)
|
||||||
|
plt.plot(),plt.imshow(gscale4)
|
||||||
|
plt.title('Canny Edge-Detection 4'), plt.xticks([]), plt.yticks([])
|
||||||
|
plt.show()
|
||||||
|
##
|
||||||
|
|
||||||
|
|
||||||
|
size = np.size(gscale) #returns the product of the array dimensions
|
||||||
|
skel = np.zeros(gscale.shape,np.uint8) #array of zeros
|
||||||
|
ret,gscale = cv2.threshold(gscale,128,255,0) #thresholding the image
|
||||||
|
#(grayscale source image, threshod value to classify pixel values, max value, type of thresholding)
|
||||||
|
#output: threshold that was used, thresholded image
|
||||||
|
|
||||||
|
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) # get kernel: (shape,size,anchor=new cv.Point(-1.-1))
|
||||||
|
# anchor position [-1,-1] at center of image, only cross-shaped element depends on anchor position
|
||||||
|
done = False
|
||||||
|
while not done:
|
||||||
|
eroded = cv2.erode(gscale,element) # erodes away boundaries of foreground object & removes small noise
|
||||||
|
temp = cv2.dilate(eroded,element) # increase object area
|
||||||
|
temp = cv2.subtract(gscale,temp) # subtract images of same type & depth
|
||||||
|
skel = cv2.bitwise_or(skel,temp)
|
||||||
|
gscale = eroded.copy()
|
||||||
|
zeros = size - cv2.countNonZero(gscale)
|
||||||
|
if zeros==size:
|
||||||
|
done = True
|
||||||
|
|
||||||
|
|
||||||
|
lines = cv2.HoughLines(skel,1,np.pi/180,130) #lines=vector storing lines (r,theta)
|
||||||
|
#(output of edge detector, resolution of r in pixels, resolution of theta in rad, threshold: min # of intersections, srn & stn)
|
||||||
|
a,b,c = lines.shape
|
||||||
|
|
||||||
|
#draw the lines
|
||||||
|
for i in range(a):
|
||||||
|
rho = lines[i][0][0]
|
||||||
|
theta = lines[i][0][1]
|
||||||
|
a = np.cos(theta)
|
||||||
|
b = np.sin(theta)
|
||||||
|
x0 = a*rho
|
||||||
|
y0 = b*rho
|
||||||
|
x1 = int(x0 + 1000*(-b))
|
||||||
|
y1 = int(y0 + 1000*(a))
|
||||||
|
x2 = int(x0 - 1000*(-b))
|
||||||
|
y2 = int(y0 - 1000*(a))
|
||||||
|
|
||||||
|
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2, cv2.LINE_AA)
|
||||||
|
#display lines: image, point1, point2, color, thickness int, line type, shift)
|
||||||
|
|
||||||
|
plt.subplot(121)#OpenCV reads images as BGR, this corrects so it is displayed as RGB
|
||||||
|
plt.plot(),plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
||||||
|
plt.title('Row Detection'), plt.xticks([]), plt.yticks([])
|
||||||
|
plt.subplot(122)
|
||||||
|
plt.plot(),plt.imshow(skel,cmap='gray')
|
||||||
|
plt.title('Skeletal Image'), plt.xticks([]), plt.yticks([])
|
||||||
|
plt.show()
|
||||||
265
scripts/initial_test.py
Executable file
265
scripts/initial_test.py
Executable file
@ -0,0 +1,265 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# run bagfile: rosbag play -l 'filename'
|
||||||
|
# run python script: python3 initial_test.py
|
||||||
|
|
||||||
|
import roslib
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import rospy
|
||||||
|
import cv2
|
||||||
|
import time
|
||||||
|
from std_msgs.msg import String
|
||||||
|
from sensor_msgs.msg import Image
|
||||||
|
from cv_bridge import CvBridge, CvBridgeError
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
#import hsv_toolbar_ros as tb
|
||||||
|
|
||||||
|
LAB_boolean = True
|
||||||
|
HSV_boolean = True
|
||||||
|
RGB_boolean = True
|
||||||
|
|
||||||
|
def rescale_frame(frame, percent):
|
||||||
|
width = int(frame.shape[1] * percent/ 100)
|
||||||
|
height = int(frame.shape[0] * percent/ 100)
|
||||||
|
dim = (width, height)
|
||||||
|
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
|
||||||
|
|
||||||
|
#class Image_Segmentation():
|
||||||
|
|
||||||
|
|
||||||
|
class Image_Receiver():
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
#rospy.init_node('zed_image_left', anonymous=True)
|
||||||
|
self.bridge = CvBridge()
|
||||||
|
rospy.Subscriber("/zed/zed_node/left/image_rect_color", Image, self.callback)
|
||||||
|
plt.ion()
|
||||||
|
|
||||||
|
def region_selection(self,image):
|
||||||
|
rows, columns = image.shape[:2]
|
||||||
|
bottom_left = [0*columns /6, 6*rows / 6]
|
||||||
|
top_left = [1.5*columns / 6, 2*rows /6]
|
||||||
|
bottom_right = [6*columns / 6, 6*rows / 6]
|
||||||
|
top_right = [4.5*columns / 6, 2*rows / 6]
|
||||||
|
polygons = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
|
||||||
|
|
||||||
|
mask = np.zeros_like(image)
|
||||||
|
cv2.fillPoly(mask,polygons,(255,255,255))
|
||||||
|
masked_image = cv2.bitwise_and(image,mask)
|
||||||
|
return masked_image
|
||||||
|
|
||||||
|
def otsu(self,images):
|
||||||
|
otsu_threshold = []
|
||||||
|
image_result = []
|
||||||
|
for i in range(len(images)):
|
||||||
|
threshold, result = cv2.threshold(images[i], 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||||
|
otsu_threshold.append(threshold)
|
||||||
|
image_result.append(result)
|
||||||
|
return otsu_threshold, image_result
|
||||||
|
|
||||||
|
def display_binary(self,images,color_space, rescale):
|
||||||
|
color_spaces = ", ".join(color_space)
|
||||||
|
images_rescaled = images
|
||||||
|
if rescale == True:
|
||||||
|
for i in range(len(images)):
|
||||||
|
images_rescaled[i] = rescale_frame(images[i],30)
|
||||||
|
horizontal_concat = []
|
||||||
|
for i in range(1,len(images),3):
|
||||||
|
horizontal_concat.append(np.concatenate((images_rescaled[i-1], images_rescaled[i], images_rescaled[i+1]), axis=1))
|
||||||
|
cube_concat = horizontal_concat[0]
|
||||||
|
for i in range(1,len(horizontal_concat)):
|
||||||
|
cube_concat = np.concatenate((cube_concat,horizontal_concat[i]), axis=0)
|
||||||
|
cv2.imshow('Binary Otsu Images for '+color_spaces+' color space', cube_concat)
|
||||||
|
|
||||||
|
def hough(self,images,RGBimage):
|
||||||
|
RGBimages = []
|
||||||
|
hough_lines = []
|
||||||
|
for i in range(len(images)):
|
||||||
|
RGBimages.append(RGBimage)
|
||||||
|
for i in range(len(images)):
|
||||||
|
lines = cv2.HoughLines(images[i],1,np.pi/180,200)
|
||||||
|
|
||||||
|
for rho,theta in lines[0]:
|
||||||
|
a = np.cos(theta)
|
||||||
|
b = np.sin(theta)
|
||||||
|
x0 = a*rho
|
||||||
|
y0 = b*rho
|
||||||
|
x1 = int(x0 + 1000*(-b))
|
||||||
|
y1 = int(y0 + 1000*(a))
|
||||||
|
x2 = int(x0 - 1000*(-b))
|
||||||
|
y2 = int(y0 - 1000*(a))
|
||||||
|
|
||||||
|
cv2.line(RGBimages[i],(x1,y1),(x2,y2),(0,0,255),2,cv2.LINE_AA)
|
||||||
|
hough_lines.append(lines)
|
||||||
|
return RGBimages, hough_lines
|
||||||
|
|
||||||
|
def hough_linesP(self,image, binary_images):
|
||||||
|
RGBimages = []
|
||||||
|
hough_lines = []
|
||||||
|
for i in range(len(binary_images)):
|
||||||
|
RGBimages.append(image)
|
||||||
|
for i in range(len(binary_images)):
|
||||||
|
lines = cv2.HoughLinesP(binary_images[i],2,np.pi/120,10,minLineLength=250,maxLineGap=75)
|
||||||
|
for line in lines:
|
||||||
|
x1,y1,x2,y2 = line[0]
|
||||||
|
cv2.line(image,(x1,y1),(x2,y2),(255,255,0),3)
|
||||||
|
line_img = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
|
||||||
|
hough_lines.append(lines)
|
||||||
|
#cv2.imshow("line_img",image)
|
||||||
|
return image, hough_lines
|
||||||
|
|
||||||
|
def hough_lineP(self,image, green_image):
|
||||||
|
lines = cv2.HoughLinesP(green_image,2,np.pi/120,10,minLineLength=250,maxLineGap=75)
|
||||||
|
for line in lines:
|
||||||
|
x1,y1,x2,y2 = line[0]
|
||||||
|
cv2.line(image,(x1,y1),(x2,y2),(255,255,0),3)
|
||||||
|
line_img = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
|
||||||
|
#cv2.imshow("line_img",image)
|
||||||
|
return image
|
||||||
|
|
||||||
|
def Rgb_space(self,image):
|
||||||
|
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||||
|
R,G,B = cv2.split(rgb_image)
|
||||||
|
hist = self.histogram(rgb_image,["R","G","B"])
|
||||||
|
|
||||||
|
def Lab_space(self,image):
|
||||||
|
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
||||||
|
L,A,B = cv2.split(lab_image)
|
||||||
|
hist = self.histogram(lab_image,["L","A","B"])
|
||||||
|
|
||||||
|
return lab_image,L,A,B
|
||||||
|
|
||||||
|
def Hsv_space(self,image):
|
||||||
|
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
||||||
|
H,S,V = cv2.split(hsv_image)
|
||||||
|
hist = self.histogram(hsv_image,["H","S","V"])
|
||||||
|
|
||||||
|
"""
|
||||||
|
plt.clf()
|
||||||
|
plt.subplot(131)
|
||||||
|
plt.title("L")
|
||||||
|
plt.imshow(L)
|
||||||
|
plt.subplot(132)
|
||||||
|
plt.title("a")
|
||||||
|
plt.imshow(A)
|
||||||
|
plt.subplot(133)
|
||||||
|
plt.title("b")
|
||||||
|
plt.imshow(B)
|
||||||
|
plt.draw()
|
||||||
|
plt.pause(0.005)
|
||||||
|
"""
|
||||||
|
return hsv_image,H,S,V
|
||||||
|
|
||||||
|
def callback(self, image_data):
|
||||||
|
try:
|
||||||
|
self.left_image = self.bridge.imgmsg_to_cv2(image_data, "bgr8")
|
||||||
|
except CvBridgeError as e :
|
||||||
|
print(e)
|
||||||
|
rescaled = rescale_frame(self.left_image,75)
|
||||||
|
denoised_image = cv2.GaussianBlur(rescaled, (5, 5), 0)
|
||||||
|
cv2.imshow("original_image", denoised_image)
|
||||||
|
print('Image size: {width}x{height}'.format(width=rescaled.shape[0],height=rescaled.shape[1]))
|
||||||
|
roi_image = self.region_selection(denoised_image)
|
||||||
|
color_image = roi_image
|
||||||
|
channels = []
|
||||||
|
color_spaces = []
|
||||||
|
if RGB_boolean == True:
|
||||||
|
color_image,chR,chG,chB = self.Lab_space(roi_image)
|
||||||
|
channels.extend([chR,chG,chB])
|
||||||
|
color_spaces.extend(["R","G","B"])
|
||||||
|
if LAB_boolean == True:
|
||||||
|
color_image,chL,chA,chB = self.Lab_space(roi_image)
|
||||||
|
channels.extend([chL,chA,chB])
|
||||||
|
color_spaces.extend(["L","A","B"])
|
||||||
|
if HSV_boolean == True:
|
||||||
|
color_image,chH,chS,chV = self.Hsv_space(roi_image)
|
||||||
|
channels.extend([chH,chS,chV])
|
||||||
|
color_spaces.extend(["H","S","V"])
|
||||||
|
if channels and color_spaces:
|
||||||
|
otsu_threshold, otsu_result = self.otsu(channels)
|
||||||
|
print("Obtained thresholds: ",otsu_threshold)
|
||||||
|
self.display_binary(otsu_result,color_spaces,True)
|
||||||
|
#hough_images, lines = self.hough(otsu_result,roi_image)#
|
||||||
|
#hough_images, lines = self.hough_linesP(roi_image, otsu_result)
|
||||||
|
#self.display_binary(hough_images,color_spaces,False)
|
||||||
|
hough_lines = self.hough_lineP(otsu_result[0],otsu_result[0])
|
||||||
|
cv2.imshow("Hough Line P Function", hough_lines)
|
||||||
|
cv2.waitKey(3)
|
||||||
|
|
||||||
|
#frame_threshold = cv2.inRange(color_image, (tb.low_H, tb.low_S, tb.low_V), (tb.high_H, tb.high_S, tb.high_V))
|
||||||
|
|
||||||
|
#cv2.imshow(window_capture_name, cv_image)
|
||||||
|
#cv2.imshow(window_detection_name, frame_threshold)
|
||||||
|
|
||||||
|
def histogram(self,image,name):
|
||||||
|
# create the histogram plot, with three lines, one for
|
||||||
|
# each color
|
||||||
|
# plt.clf()
|
||||||
|
plt.figure()
|
||||||
|
plt.xlim([0, 256])
|
||||||
|
for i in range(3):
|
||||||
|
hist = cv2.calcHist([image],[i],None,[256], [0,256])
|
||||||
|
plt.plot(hist)
|
||||||
|
plt.legend(name)
|
||||||
|
plt.title("Color Histogram")#of legend color space
|
||||||
|
plt.xlabel("Color value")
|
||||||
|
plt.ylabel("Pixel count")
|
||||||
|
plt.show()
|
||||||
|
return hist
|
||||||
|
|
||||||
|
#plt.draw()
|
||||||
|
plt.pause(0.005)
|
||||||
|
cv2.waitKey(0) # übergangsweise
|
||||||
|
plt.close('all')
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
#Start the node imageprocessing
|
||||||
|
rospy.init_node('zed_image_left', anonymous=True)
|
||||||
|
#calls the function that OpenCV requires
|
||||||
|
detector = Image_Receiver()
|
||||||
|
|
||||||
|
rate = rospy.Rate(10) # 10hz
|
||||||
|
#TODOPublish the offset value obtained from the front subscriber
|
||||||
|
|
||||||
|
|
||||||
|
#Keep the node alive unti; keyboard interrupt
|
||||||
|
try:
|
||||||
|
rospy.spin()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
rospy.loginfo("Detector node terminated.")
|
||||||
|
print("Shutting down")
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
# Execution guard for python files
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def imshow(img):
|
||||||
|
cv2.imshow("..",img)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
img = cv2.imread('/home/andrew/Desktop/camera/crop_row_detection/images/behrImages/first.jpg')
|
||||||
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
|
||||||
|
while True:
|
||||||
|
resized_image = rescale_frame(img,25)
|
||||||
|
#imshow(resized_image)
|
||||||
|
L,A,B=cv2.split(resized_image)
|
||||||
|
cv2.imshow("L_Channel",L) # For L Channel
|
||||||
|
cv2.imshow("A_Channel",A) # For A Channel (Here's what You need)
|
||||||
|
cv2.imshow("B_Channel",B) # For B Channel
|
||||||
|
ch = cv2.waitKey(1)
|
||||||
|
if ch == 27:
|
||||||
|
break
|
||||||
|
cv2.waitKey(5)
|
||||||
|
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
91
scripts/opencv_tutorial.py
Executable file
91
scripts/opencv_tutorial.py
Executable file
@ -0,0 +1,91 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
# https://pyimagesearch.com/2018/07/19/opencv-tutorial-a-guide-to-learn-opencv/
|
||||||
|
# import the necessary packages
|
||||||
|
import cv2
|
||||||
|
import imutils
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
def rescale_frame(frame, percent): # rescale image considering aspect ratio
|
||||||
|
width = int(frame.shape[1] * percent/ 100)
|
||||||
|
height = int(frame.shape[0] * percent/ 100)
|
||||||
|
dim = (width, height)
|
||||||
|
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
|
||||||
|
# alternative: resized = imutils.resize(image, width=300) => give goal height or width
|
||||||
|
|
||||||
|
def extract_channels(image,color_space):
|
||||||
|
(ch1,ch2,ch3) = cv2.split(image)
|
||||||
|
cv2.imshow("Channel "+color_space[0],ch1)
|
||||||
|
cv2.imshow("Channel "+color_space[1],ch2)
|
||||||
|
cv2.imshow("Channel "+color_space[2],ch3)
|
||||||
|
|
||||||
|
def rotate_bound(image, angle):
|
||||||
|
(h, w) = image.shape[:2] # image dimesnsions
|
||||||
|
(cX, cY) = (w // 2, h // 2) # center of image
|
||||||
|
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0) # neg angle to rotate clockwise
|
||||||
|
cos = np.abs(M[0, 0]) # cos & sin value from rotation matrix
|
||||||
|
sin = np.abs(M[0, 1])
|
||||||
|
# compute the new bounding dimensions of the image
|
||||||
|
nW = int((h * sin) + (w * cos))
|
||||||
|
nH = int((h * cos) + (w * sin))
|
||||||
|
# adjust the rotation matrix to take into account translation
|
||||||
|
M[0, 2] += (nW / 2) - cX
|
||||||
|
M[1, 2] += (nH / 2) - cY
|
||||||
|
# perform the actual rotation and return the image
|
||||||
|
return cv2.warpAffine(image, M, (nW, nH))
|
||||||
|
|
||||||
|
|
||||||
|
# load the input image and show its dimensions, keeping in mind that
|
||||||
|
# images are represented as a multi-dimensional NumPy array with
|
||||||
|
# shape no. rows (height) x no. columns (width) x no. channels (depth)
|
||||||
|
image = cv2.imread('/home/caroline/Pictures/first.jpg') #numpy array
|
||||||
|
|
||||||
|
# resize image ignoring aspect ratio
|
||||||
|
resized_image = cv2.resize(image, (300, 300))
|
||||||
|
cv2.imshow("Fixed Resizing", resized_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
|
||||||
|
image = rescale_frame(image,20)
|
||||||
|
|
||||||
|
|
||||||
|
(h, w, d) = image.shape #height=rows, width=columns, depth=channels => 3 for RGB
|
||||||
|
print("width={}, height={}, depth={}".format(w, h, d))
|
||||||
|
# display the image to our screen -- we will need to click the window
|
||||||
|
# open by OpenCV and press a key on our keyboard to continue execution
|
||||||
|
cv2.imshow("Image", image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
|
||||||
|
# grayscale value 0-255
|
||||||
|
# (B, G, R) with range [0, 255]
|
||||||
|
|
||||||
|
# access the RGB pixel located at x=50, y=100, keepind in mind that
|
||||||
|
# OpenCV stores images in BGR order rather than RGB
|
||||||
|
(B, G, R) = image[100, 50]
|
||||||
|
print("R={}, G={}, B={}".format(R, G, B))
|
||||||
|
|
||||||
|
# array slicing and cropping
|
||||||
|
# extract a 100x100 pixel square ROI (Region of Interest) from the
|
||||||
|
# input image starting at x=320,y=60 at ending at x=420,y=160
|
||||||
|
""" roi = image[60:160, 320:420]
|
||||||
|
cv2.imshow("ROI", roi)
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
# transform image into different color spaces and split channels
|
||||||
|
""" image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
||||||
|
extract_channels(image_lab,"LAB") # lightness, red/green value, blue/yellow value => very vast color space
|
||||||
|
cv2.waitKey(0)
|
||||||
|
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # hue:color 0°-360°, saturation: amount of grey 0-1, value: brightness/intensity 0-1
|
||||||
|
extract_channels(image_hsv,"HSV")
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
# rotate image
|
||||||
|
# loop over rotation angles without cutting image
|
||||||
|
""" for angle in np.arange(0, 360, 30): # range [0, 360] in 15° increments
|
||||||
|
rotated = rotate_bound(image, angle)
|
||||||
|
cv2.imshow("Rotated", rotated)
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
# blurring: reduce high-frequency noise => gaussianBlur function
|
||||||
|
blurred = cv2.GaussianBlur(image, (11, 11), 0) # 11x11 kernel: larger kernel create more blurry images
|
||||||
|
cv2.imshow("Blurred", blurred)
|
||||||
|
cv2.waitKey(0)
|
||||||
24
scripts/rotation_problem.py
Executable file
24
scripts/rotation_problem.py
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
#https://pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
import imutils
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
# construct the argument parse and parse the arguments
|
||||||
|
""" ap = argparse.ArgumentParser()
|
||||||
|
ap.add_argument("-i", "--image", required=True,
|
||||||
|
help="path to the image file")
|
||||||
|
args = vars(ap.parse_args()) """
|
||||||
|
|
||||||
|
image = cv2.imread('/home/caroline/Pictures/first.jpg')
|
||||||
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||||
|
gray = cv2.GaussianBlur(gray, (3, 3), 0)
|
||||||
|
edged = cv2.Canny(gray,90, 200)
|
||||||
|
|
||||||
|
resized_image = cv2.resize(edged, (800, 800))
|
||||||
|
cv2.imshow("Edged", resized_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
|
||||||
|
# further tutorial on rotating and cutting off...
|
||||||
104
scripts/row_detection_tutorial.py
Executable file
104
scripts/row_detection_tutorial.py
Executable file
@ -0,0 +1,104 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
# Crop row navigation for autonomous field robot
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def rescale_frame(frame, percent): # rescale image considering aspect ratio
|
||||||
|
width = int(frame.shape[1] * percent/ 100)
|
||||||
|
height = int(frame.shape[0] * percent/ 100)
|
||||||
|
dim = (width, height)
|
||||||
|
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
|
||||||
|
|
||||||
|
image = cv2.imread('/home/caroline/Pictures/first.jpg')
|
||||||
|
image=rescale_frame(image, 20)
|
||||||
|
|
||||||
|
### TBD: add Gaussian Blurr
|
||||||
|
|
||||||
|
### Image Segmentation
|
||||||
|
|
||||||
|
## RGB
|
||||||
|
# apply mask that generates grayscale image highlighting green ROIs
|
||||||
|
b,g,r = cv2.split(image)
|
||||||
|
gscale = 2*g-r-b
|
||||||
|
|
||||||
|
""" cv2.imshow("Image after ExG mask is applied", gscale)
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
|
||||||
|
# obtain binary image by applying intensity threshold
|
||||||
|
""" color = ('b','g','r')
|
||||||
|
for i,col in enumerate(color):
|
||||||
|
hist = cv2.calcHist(image,[i],None,[256], [0,256])# image histogram
|
||||||
|
plt.plot(hist, color=col)
|
||||||
|
plt.title('Histogram of initial image')
|
||||||
|
plt.show()
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
|
||||||
|
""" hist2 = cv2.calcHist(gscale,[0],None,[256], [0,256])# image histogram
|
||||||
|
plt.plot(hist2,'k')
|
||||||
|
plt.title('Histogram of grayscale image')
|
||||||
|
plt.show()
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
ret2,gscale2 = cv2.threshold(gscale,127,225,0)
|
||||||
|
ret,gscale = cv2.threshold(gscale,180,200,0)
|
||||||
|
""" cv2.imshow("Binary image", gscale)
|
||||||
|
cv2.waitKey(0) """
|
||||||
|
|
||||||
|
while True:
|
||||||
|
Hori = np.concatenate((gscale, gscale2), axis=1) # left and right image
|
||||||
|
cv2.imshow('Comparing Different Thresholds', Hori)
|
||||||
|
cv2.waitKey(1)
|
||||||
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||||
|
break
|
||||||
|
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
## YUV
|
||||||
|
# get U and V Layer
|
||||||
|
image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
|
||||||
|
(Y,U,V) = cv2.split(image_yuv)
|
||||||
|
task=2 #1: Layers; 2: histogram, 3: binary image, 4: combined layers
|
||||||
|
if task==1:
|
||||||
|
while True:
|
||||||
|
cv2.imshow("U-Layer",U)
|
||||||
|
cv2.imshow("V-Layer",V)
|
||||||
|
cv2.waitKey(1)
|
||||||
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||||
|
break
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
elif task==2:
|
||||||
|
for i in range(2):
|
||||||
|
hist_yuv = cv2.calcHist(image_yuv,[i],None,[256], [0,256])
|
||||||
|
plt.plot(hist_yuv)
|
||||||
|
plt.legend('U' 'V')
|
||||||
|
plt.title('Histogram of U and V Layer')
|
||||||
|
plt.show()
|
||||||
|
cv2.waitKey(0)
|
||||||
|
|
||||||
|
elif task==3:
|
||||||
|
ret_u,binary_u = cv2.threshold(U,120,130,0)
|
||||||
|
ret_v,binary_v = cv2.threshold(V,125,135,0)
|
||||||
|
while True:
|
||||||
|
binary_uv = np.concatenate((binary_u, binary_v), axis=1)
|
||||||
|
cv2.imshow('U and V layer as binary image', binary_uv)
|
||||||
|
cv2.waitKey(1)
|
||||||
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||||
|
break
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
elif task==4:
|
||||||
|
uv_combi = cv2.bitwise_and(binary_u,binary_v)
|
||||||
|
cv2.imshow('U and V layer as binary image', uv_combi)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Loading…
x
Reference in New Issue
Block a user