266 lines
8.9 KiB
Python
Executable File
266 lines
8.9 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
# run bagfile: rosbag play -l 'filename'
|
|
# run python script: python3 initial_test.py
|
|
|
|
import roslib
|
|
import sys
|
|
import argparse
|
|
import rospy
|
|
import cv2
|
|
import time
|
|
from std_msgs.msg import String
|
|
from sensor_msgs.msg import Image
|
|
from cv_bridge import CvBridge, CvBridgeError
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
#import hsv_toolbar_ros as tb
|
|
|
|
LAB_boolean = True
|
|
HSV_boolean = True
|
|
RGB_boolean = True
|
|
|
|
def rescale_frame(frame, percent):
|
|
width = int(frame.shape[1] * percent/ 100)
|
|
height = int(frame.shape[0] * percent/ 100)
|
|
dim = (width, height)
|
|
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
|
|
|
|
#class Image_Segmentation():
|
|
|
|
|
|
class Image_Receiver():
|
|
|
|
def __init__(self):
|
|
#rospy.init_node('zed_image_left', anonymous=True)
|
|
self.bridge = CvBridge()
|
|
rospy.Subscriber("/zed/zed_node/left/image_rect_color", Image, self.callback)
|
|
plt.ion()
|
|
|
|
def region_selection(self,image):
|
|
rows, columns = image.shape[:2]
|
|
bottom_left = [0*columns /6, 6*rows / 6]
|
|
top_left = [1.5*columns / 6, 2*rows /6]
|
|
bottom_right = [6*columns / 6, 6*rows / 6]
|
|
top_right = [4.5*columns / 6, 2*rows / 6]
|
|
polygons = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
|
|
|
|
mask = np.zeros_like(image)
|
|
cv2.fillPoly(mask,polygons,(255,255,255))
|
|
masked_image = cv2.bitwise_and(image,mask)
|
|
return masked_image
|
|
|
|
def otsu(self,images):
|
|
otsu_threshold = []
|
|
image_result = []
|
|
for i in range(len(images)):
|
|
threshold, result = cv2.threshold(images[i], 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
otsu_threshold.append(threshold)
|
|
image_result.append(result)
|
|
return otsu_threshold, image_result
|
|
|
|
def display_binary(self,images,color_space, rescale):
|
|
color_spaces = ", ".join(color_space)
|
|
images_rescaled = images
|
|
if rescale == True:
|
|
for i in range(len(images)):
|
|
images_rescaled[i] = rescale_frame(images[i],30)
|
|
horizontal_concat = []
|
|
for i in range(1,len(images),3):
|
|
horizontal_concat.append(np.concatenate((images_rescaled[i-1], images_rescaled[i], images_rescaled[i+1]), axis=1))
|
|
cube_concat = horizontal_concat[0]
|
|
for i in range(1,len(horizontal_concat)):
|
|
cube_concat = np.concatenate((cube_concat,horizontal_concat[i]), axis=0)
|
|
cv2.imshow('Binary Otsu Images for '+color_spaces+' color space', cube_concat)
|
|
|
|
def hough(self,images,RGBimage):
|
|
RGBimages = []
|
|
hough_lines = []
|
|
for i in range(len(images)):
|
|
RGBimages.append(RGBimage)
|
|
for i in range(len(images)):
|
|
lines = cv2.HoughLines(images[i],1,np.pi/180,200)
|
|
|
|
for rho,theta in lines[0]:
|
|
a = np.cos(theta)
|
|
b = np.sin(theta)
|
|
x0 = a*rho
|
|
y0 = b*rho
|
|
x1 = int(x0 + 1000*(-b))
|
|
y1 = int(y0 + 1000*(a))
|
|
x2 = int(x0 - 1000*(-b))
|
|
y2 = int(y0 - 1000*(a))
|
|
|
|
cv2.line(RGBimages[i],(x1,y1),(x2,y2),(0,0,255),2,cv2.LINE_AA)
|
|
hough_lines.append(lines)
|
|
return RGBimages, hough_lines
|
|
|
|
def hough_linesP(self,image, binary_images):
|
|
RGBimages = []
|
|
hough_lines = []
|
|
for i in range(len(binary_images)):
|
|
RGBimages.append(image)
|
|
for i in range(len(binary_images)):
|
|
lines = cv2.HoughLinesP(binary_images[i],2,np.pi/120,10,minLineLength=250,maxLineGap=75)
|
|
for line in lines:
|
|
x1,y1,x2,y2 = line[0]
|
|
cv2.line(image,(x1,y1),(x2,y2),(255,255,0),3)
|
|
line_img = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
|
|
hough_lines.append(lines)
|
|
#cv2.imshow("line_img",image)
|
|
return image, hough_lines
|
|
|
|
def hough_lineP(self,image, green_image):
|
|
lines = cv2.HoughLinesP(green_image,2,np.pi/120,10,minLineLength=250,maxLineGap=75)
|
|
for line in lines:
|
|
x1,y1,x2,y2 = line[0]
|
|
cv2.line(image,(x1,y1),(x2,y2),(255,255,0),3)
|
|
line_img = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)
|
|
#cv2.imshow("line_img",image)
|
|
return image
|
|
|
|
def Rgb_space(self,image):
|
|
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
R,G,B = cv2.split(rgb_image)
|
|
hist = self.histogram(rgb_image,["R","G","B"])
|
|
|
|
def Lab_space(self,image):
|
|
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
|
L,A,B = cv2.split(lab_image)
|
|
hist = self.histogram(lab_image,["L","A","B"])
|
|
|
|
return lab_image,L,A,B
|
|
|
|
def Hsv_space(self,image):
|
|
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
H,S,V = cv2.split(hsv_image)
|
|
hist = self.histogram(hsv_image,["H","S","V"])
|
|
|
|
"""
|
|
plt.clf()
|
|
plt.subplot(131)
|
|
plt.title("L")
|
|
plt.imshow(L)
|
|
plt.subplot(132)
|
|
plt.title("a")
|
|
plt.imshow(A)
|
|
plt.subplot(133)
|
|
plt.title("b")
|
|
plt.imshow(B)
|
|
plt.draw()
|
|
plt.pause(0.005)
|
|
"""
|
|
return hsv_image,H,S,V
|
|
|
|
def callback(self, image_data):
|
|
try:
|
|
self.left_image = self.bridge.imgmsg_to_cv2(image_data, "bgr8")
|
|
except CvBridgeError as e :
|
|
print(e)
|
|
rescaled = rescale_frame(self.left_image,75)
|
|
denoised_image = cv2.GaussianBlur(rescaled, (5, 5), 0)
|
|
cv2.imshow("original_image", denoised_image)
|
|
print('Image size: {width}x{height}'.format(width=rescaled.shape[0],height=rescaled.shape[1]))
|
|
roi_image = self.region_selection(denoised_image)
|
|
color_image = roi_image
|
|
channels = []
|
|
color_spaces = []
|
|
if RGB_boolean == True:
|
|
color_image,chR,chG,chB = self.Lab_space(roi_image)
|
|
channels.extend([chR,chG,chB])
|
|
color_spaces.extend(["R","G","B"])
|
|
if LAB_boolean == True:
|
|
color_image,chL,chA,chB = self.Lab_space(roi_image)
|
|
channels.extend([chL,chA,chB])
|
|
color_spaces.extend(["L","A","B"])
|
|
if HSV_boolean == True:
|
|
color_image,chH,chS,chV = self.Hsv_space(roi_image)
|
|
channels.extend([chH,chS,chV])
|
|
color_spaces.extend(["H","S","V"])
|
|
if channels and color_spaces:
|
|
otsu_threshold, otsu_result = self.otsu(channels)
|
|
print("Obtained thresholds: ",otsu_threshold)
|
|
self.display_binary(otsu_result,color_spaces,True)
|
|
#hough_images, lines = self.hough(otsu_result,roi_image)#
|
|
#hough_images, lines = self.hough_linesP(roi_image, otsu_result)
|
|
#self.display_binary(hough_images,color_spaces,False)
|
|
hough_lines = self.hough_lineP(otsu_result[0],otsu_result[0])
|
|
cv2.imshow("Hough Line P Function", hough_lines)
|
|
cv2.waitKey(3)
|
|
|
|
#frame_threshold = cv2.inRange(color_image, (tb.low_H, tb.low_S, tb.low_V), (tb.high_H, tb.high_S, tb.high_V))
|
|
|
|
#cv2.imshow(window_capture_name, cv_image)
|
|
#cv2.imshow(window_detection_name, frame_threshold)
|
|
|
|
def histogram(self,image,name):
|
|
# create the histogram plot, with three lines, one for
|
|
# each color
|
|
# plt.clf()
|
|
plt.figure()
|
|
plt.xlim([0, 256])
|
|
for i in range(3):
|
|
hist = cv2.calcHist([image],[i],None,[256], [0,256])
|
|
plt.plot(hist)
|
|
plt.legend(name)
|
|
plt.title("Color Histogram")#of legend color space
|
|
plt.xlabel("Color value")
|
|
plt.ylabel("Pixel count")
|
|
plt.show()
|
|
return hist
|
|
|
|
#plt.draw()
|
|
plt.pause(0.005)
|
|
cv2.waitKey(0) # übergangsweise
|
|
plt.close('all')
|
|
|
|
def main():
|
|
|
|
#Start the node imageprocessing
|
|
rospy.init_node('zed_image_left', anonymous=True)
|
|
#calls the function that OpenCV requires
|
|
detector = Image_Receiver()
|
|
|
|
rate = rospy.Rate(10) # 10hz
|
|
#TODOPublish the offset value obtained from the front subscriber
|
|
|
|
|
|
#Keep the node alive unti; keyboard interrupt
|
|
try:
|
|
rospy.spin()
|
|
except KeyboardInterrupt:
|
|
rospy.loginfo("Detector node terminated.")
|
|
print("Shutting down")
|
|
cv2.destroyAllWindows()
|
|
|
|
# Execution guard for python files
|
|
if __name__ == '__main__':
|
|
main()
|
|
|
|
"""
|
|
|
|
def imshow(img):
|
|
cv2.imshow("..",img)
|
|
cv2.waitKey(0)
|
|
cv2.destroyAllWindows()
|
|
|
|
img = cv2.imread('/home/andrew/Desktop/camera/crop_row_detection/images/behrImages/first.jpg')
|
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
|
|
while True:
|
|
resized_image = rescale_frame(img,25)
|
|
#imshow(resized_image)
|
|
L,A,B=cv2.split(resized_image)
|
|
cv2.imshow("L_Channel",L) # For L Channel
|
|
cv2.imshow("A_Channel",A) # For A Channel (Here's what You need)
|
|
cv2.imshow("B_Channel",B) # For B Channel
|
|
ch = cv2.waitKey(1)
|
|
if ch == 27:
|
|
break
|
|
cv2.waitKey(5)
|
|
|
|
cv2.waitKey(0)
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
"""
|