forked from 4isSorin/Line_Following_kogrob
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathline_follower_cnn.py
More file actions
213 lines (174 loc) · 6.77 KB
/
line_follower_cnn.py
File metadata and controls
213 lines (174 loc) · 6.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
#!/usr/bin/env python3
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.compat.v1 import ConfigProto
from tensorflow.keras import __version__ as keras_version
import tensorflow as tf
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CompressedImage
from geometry_msgs.msg import Twist
import rospy
import rospkg
try:
from queue import Queue
except ImportError:
from Queue import Queue
import threading
import numpy as np
import h5py
import time
# Set image size
image_size = 24
# Initialize Tensorflow session
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# Initialize ROS node and get CNN model path
rospy.init_node('line_follower')
rospack = rospkg.RosPack()
path = rospack.get_path('turtlebot3_mogi')
model_path = path + "/network_model/model.best.h5"
print("[INFO] Version:")
print("OpenCV version: %s" % cv2.__version__)
print("Tensorflow version: %s" % tf.__version__)
keras_version = str(keras_version).encode('utf8')
print("Keras version: %s" % keras_version)
print("CNN model: %s" % model_path)
f = h5py.File(model_path, mode='r')
model_version = f.attrs.get('keras_version')
print("Model's Keras version: %s" % model_version)
if model_version != keras_version:
print('You are using Keras version ', keras_version, ', but the model was built using ', model_version)
# Finally load model:
model = load_model(model_path)
class BufferQueue(Queue):
"""Slight modification of the standard Queue that discards the oldest item
when adding an item and the queue is full.
"""
def put(self, item, *args, **kwargs):
# The base implementation, for reference:
# https://github.com/python/cpython/blob/2.7/Lib/Queue.py#L107
# https://github.com/python/cpython/blob/3.8/Lib/queue.py#L121
with self.mutex:
if self.maxsize > 0 and self._qsize() == self.maxsize:
self._get()
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
class cvThread(threading.Thread):
"""
Thread that displays and processes the current image
It is its own thread so that all display can be done
in one thread to overcome imshow limitations and
https://github.com/ros-perception/image_pipeline/issues/85
"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.image = None
# Initialize published Twist message
self.cmd_vel = Twist()
self.cmd_vel.linear.x = 0
self.cmd_vel.angular.z = 0
self.last_time = time.time()
def run(self):
# Create a single OpenCV window
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
cv2.resizeWindow("frame", 800,600)
while True:
self.image = self.queue.get()
# Process the current image
mask = self.processImage(self.image)
# Add processed images as small images on top of main image
result = self.addSmallPictures(self.image, [mask])
cv2.imshow("frame", result)
# Check for 'q' key to exit
k = cv2.waitKey(1) & 0xFF
if k in [27, ord('q')]:
# Stop every motion
self.cmd_vel.linear.x = 0
self.cmd_vel.angular.z = 0
pub.publish(self.cmd_vel)
# Quit
rospy.signal_shutdown('Quit')
def processImage(self, img):
image = cv2.resize(img, (image_size, image_size))
image = img_to_array(image)
image = np.array(image, dtype="float") / 255.0
image = image.reshape(-1, image_size, image_size, 3)
with tf.device('/gpu:0'):
prediction = np.argmax(model(image, training=False))
print("Prediction %d, elapsed time %.3f" % (prediction, time.time()-self.last_time))
self.last_time = time.time()
if prediction == 0: # Forward blue
self.cmd_vel.angular.z = 0
self.cmd_vel.linear.x = 0.2
elif prediction == 1: # Left blue
self.cmd_vel.angular.z = 0.2
self.cmd_vel.linear.x = 0.05
elif prediction == 2: # Right blue
self.cmd_vel.angular.z = -0.2
self.cmd_vel.linear.x = 0.05
elif prediction == 3: # Forward green
self.cmd_vel.angular.z = 0
self.cmd_vel.linear.x = 0.3
elif prediction == 4: # Left green
self.cmd_vel.angular.z = 0.2
self.cmd_vel.linear.x = 0.05
elif prediction == 5: # Right green
self.cmd_vel.angular.z = -0.2
self.cmd_vel.linear.x = 0.05
elif prediction == 6: # Forward yellow
self.cmd_vel.angular.z = 0
self.cmd_vel.linear.x = 0.1
elif prediction == 7: # Left yellow
self.cmd_vel.angular.z = 0.2
self.cmd_vel.linear.x = 0.05
elif prediction == 8: # Right yellow
self.cmd_vel.angular.z = -0.2
self.cmd_vel.linear.x = 0.05
else: # Nothing
self.cmd_vel.angular.z = -0.1
self.cmd_vel.linear.x = 0.0
# Publish cmd_vel
pub.publish(self.cmd_vel)
# Return processed frames
return cv2.resize(img, (image_size, image_size))
# Add small images to the top row of the main image
def addSmallPictures(self, img, small_images, size=(160, 120)):
x_base_offset = 40
y_base_offset = 10
x_offset = x_base_offset
y_offset = y_base_offset
for small in small_images:
small = cv2.resize(small, size)
if len(small.shape) == 2:
small = np.dstack((small, small, small))
img[y_offset: y_offset + size[1], x_offset: x_offset + size[0]] = small
x_offset += size[0] + x_base_offset
return img
def queueMonocular(msg):
try:
# Convert your ROS Image message to OpenCV2
#cv2Img = bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8") # in case of non-compressed image stream only
cv2Img = bridge.compressed_imgmsg_to_cv2(msg, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
else:
qMono.put(cv2Img)
queueSize = 1
qMono = BufferQueue(queueSize)
bridge = CvBridge()
# Define your image topic
image_topic = "/camera/image/compressed"
# Set up your subscriber and define its callback
rospy.Subscriber(image_topic, CompressedImage, queueMonocular)
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
# Start image processing thread
cvThreadHandle = cvThread(qMono)
cvThreadHandle.setDaemon(True)
cvThreadHandle.start()
# Spin until Ctrl+C
rospy.spin()