-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathpredict_area.py
121 lines (99 loc) · 3.9 KB
/
predict_area.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import cv2
import pandas
import numpy as np
import keras
import glob
import os
import json
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, Callback
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras import backend as keras
from skimage.draw import line
from tqdm import tqdm_notebook as tqdm
from keras_tqdm import TQDMNotebookCallback
from models import *
model = unet_mid("weights/area-mid.hdf5")
#model = unet_small("weights/lane-small.hdf5")
def predict_model(image):
#files = glob.glob(PATH_TEST + "*.jpg")
#image = cv2.imread(files[index])
image = cv2.normalize(image.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
image = cv2.resize(image, (256,128))
test = np.array([image])
test = test.reshape(len(test),128,256,3)
regions = model.predict(test, verbose=0)
return regions[0]
import numpy as np
import cv2
cap = cv2.VideoCapture("drive-1.mp4")
#out = cv2.VideoWriter('output.mp4', -1, 30, (960,960))
#raw = cv2.VideoWriter('raw.mp4', -1, 20, (1280,720))
frame_n = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if frame is None:
continue;
frame = cv2.resize(frame, (1280,720))
regions_raw = predict_model(frame)
regions = cv2.cvtColor(regions_raw, cv2.COLOR_BGR2RGB)
regions[regions > 0.5 ] = 1.0
regions[regions < 0.5 ] = 0
kernel = np.ones((5,5),np.uint8)
regions = cv2.morphologyEx(regions, cv2.MORPH_CLOSE, kernel)
regions = cv2.resize(regions, (1280,720))
regions *= 255
regions = cv2.normalize(regions.astype('uint8'), None, 0, 255, cv2.NORM_MINMAX)
driveable_mask = np.copy(regions);
driveable_mask[:, :, 0] = 0
driveable_mask[:, :, 2] = 0
driveable_mask = cv2.cvtColor(driveable_mask, cv2.COLOR_RGB2GRAY)
display = np.zeros((720,1280,3), np.uint8)
(_, contours, _) = cv2.findContours(driveable_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
cv2.drawContours(display, [c], -1, [0, 200, 0, 20], -1) # debug
M = cv2.moments(driveable_mask)
# calculate x,y coordinate of center
try:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# put text and highlight the center
cv2.circle(display, (cX, cY), 5, (255, 255, 255), -1)
if cX < 520:
cv2.putText(display, "Turn Left: " + str(abs(640 - cX) ), (100,200),cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif cX > 760:
cv2.putText(display, "Turn Right: " + str(abs(640 - cX) ), (100,200),cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
pass
else:
cv2.putText(display, "Straight: " + str(abs(640 - cX) ), (100,200),cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
pass
except :
pass
alt_mask = np.copy(regions);
alt_mask[:, :, 0] = 0
alt_mask[:, :, 1] = 0
alt_mask = cv2.cvtColor(alt_mask, cv2.COLOR_RGB2GRAY)
(_, contours, _) = cv2.findContours(alt_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
cv2.drawContours(display, [c], -1, [0, 0, 255, 0.5], -1) # debug
dst = cv2.addWeighted(frame, 1.0, display, 0.3, 0)
merged = np.concatenate((dst, regions), axis=0)
merged = cv2.resize(merged, (960,960))
cv2.imshow("Result", merged)
#out.write(merged)
#raw.write(frame)
# frame_n += 1
#if frame_n > (24 * 60):
# out.release()
#out = cv2.VideoWriter('output.mp4', -1, 30, (960,960))
# frame_n = 0;
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()