diff --git a/cnn_keras.py b/cnn_keras.py new file mode 100644 index 0000000..0b311ca --- /dev/null +++ b/cnn_keras.py @@ -0,0 +1,105 @@ +import numpy as np +import pickle +import cv2, os +from glob import glob +from keras import optimizers +from keras.models import Sequential +from keras.layers import Dense +from keras.layers import Dropout +from keras.layers import Flatten +from keras.layers import BatchNormalization +from keras.layers.convolutional import Conv2D +from keras.layers.convolutional import MaxPooling2D +from keras.utils import np_utils +from keras.callbacks import ModelCheckpoint +from keras import backend as K +import matplotlib.pyplot as plt +K.common.set_image_dim_ordering('tf') + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + + +def get_image_size(): + img = cv2.imread('gestures/1/100.jpg', 0) + return img.shape + + +def get_num_of_classes(): + return len(glob('gestures/*')) + + +image_x, image_y = get_image_size() + + +# hyperparameters +INIT_LR = 3e-3 +EPOCH = 50 +BS = 250 + + +def cnn_model(): + num_of_classes = get_num_of_classes() + model = Sequential() + model.add(Conv2D(16, (2, 2), input_shape=(image_x, image_y, 1), activation='relu')) + model.add(BatchNormalization()) + model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')) + model.add(Conv2D(32, (3, 3), activation='relu')) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), padding='same')) + model.add(Conv2D(64, (5, 5), activation='relu')) + model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same')) + model.add(Flatten()) + model.add(Dense(128, activation='relu')) + model.add(Dropout(0.5)) + model.add(Dense(num_of_classes, activation='softmax')) + sgd = optimizers.SGD(lr=INIT_LR) + model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) + filepath = "cnn_model_keras2.h5" + checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') + callbacks_list = [checkpoint1] + from keras.utils import plot_model + plot_model(model, to_file='model.png', show_shapes=True) + return model, callbacks_list + + +def train(): + with open("train_images", "rb") as f: + train_images = np.array(pickle.load(f)) + with open("train_labels", "rb") as f: + train_labels = np.array(pickle.load(f), dtype=np.int32) + + with open("val_images", "rb") as f: + val_images = np.array(pickle.load(f)) + with open("val_labels", "rb") as f: + val_labels = np.array(pickle.load(f), dtype=np.int32) + + train_images = np.reshape(train_images, (train_images.shape[0], image_x, image_y, 1)) + val_images = np.reshape(val_images, (val_images.shape[0], image_x, image_y, 1)) + train_labels = np_utils.to_categorical(train_labels) + val_labels = np_utils.to_categorical(val_labels) + + print(val_labels.shape) + + model, callbacks_list = cnn_model() + model.summary() + m = model.fit(train_images, train_labels, validation_data=(val_images, val_labels), epochs=EPOCH, batch_size=BS, callbacks=callbacks_list) + scores = model.evaluate(val_images, val_labels, verbose=0) + print("CNN Error: %.2f%%" % (100-scores[1]*100)) + model.save('cnn_model_keras2.h5') + + # plot the training loss and accuracy + n = EPOCH + plt.style.use("ggplot") + plt.figure() + plt.plot(np.arange(0, n), m.history["loss"], label="train_loss") + plt.plot(np.arange(0, n), m.history["val_loss"], label="val_loss") + plt.plot(np.arange(0, n), m.history["accuracy"], label="train_accuracy") + plt.plot(np.arange(0, n), m.history["val_accuracy"], label="val_accuracy") + plt.title("Training Loss and Accuracy") + plt.xlabel("Epoch #") + plt.ylabel("Loss/Accuracy") + plt.legend(loc="upper right") + plt.savefig("plot.png") + + +train() +K.clear_session() diff --git a/create_gestures.py b/create_gestures.py new file mode 100644 index 0000000..c368942 --- /dev/null +++ b/create_gestures.py @@ -0,0 +1,116 @@ +import cv2 +import numpy as np +import pickle, os, sqlite3, random +from PIL import Image + +image_x, image_y = 50, 50 + + +def get_hand_hist(): + with open("hist", "rb") as f: + hist = pickle.load(f) + return hist + + +def init_create_folder_database(): + # create the folder and database if not exist + if not os.path.exists("gestures"): + os.mkdir("gestures") + if not os.path.exists("gesture_db.db"): + conn = sqlite3.connect("gesture_db.db") + create_table_cmd = "CREATE TABLE gesture ( g_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, g_name TEXT NOT NULL )" + conn.execute(create_table_cmd) + conn.commit() + + +def create_folder(folder_name): + if not os.path.exists(folder_name): + os.mkdir(folder_name) + + +def store_in_db(g_id, g_name): + conn = sqlite3.connect("gesture_db.db") + cmd = "INSERT INTO gesture (g_id, g_name) VALUES (%s, \'%s\')" % (g_id, g_name) + try: + conn.execute(cmd) + except sqlite3.IntegrityError: + choice = input("g_id already exists. Want to change the record? (y/n): ") + if choice.lower() == 'y': + cmd = "UPDATE gesture SET g_name = \'%s\' WHERE g_id = %s" % (g_name, g_id) + conn.execute(cmd) + else: + print("Doing nothing...") + return + conn.commit() + + +def store_images(g_id): + total_pics = 1200 + hist = get_hand_hist() + cam = cv2.VideoCapture(0) + if not cam.read()[0]: + cam = cv2.VideoCapture(0) + x, y, w, h = 300, 100, 300, 300 + + create_folder("gestures/" + str(g_id)) + pic_no = 0 + flag_start_capturing = False + frames = 0 + + while True: + img = cam.read()[1] + img = cv2.flip(img, 1) + imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1) + disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) + cv2.filter2D(dst, -1, disc, dst) + blur = cv2.GaussianBlur(dst, (11, 11), 0) + blur = cv2.medianBlur(blur, 15) + # thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] + thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 4) + thresh = cv2.merge((thresh, thresh, thresh)) + thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY) + thresh = thresh[y:y + h, x:x + w] + contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1] + + if len(contours) > 0: + contour = max(contours, key=cv2.contourArea) + if cv2.contourArea(contour) > 1000 and frames > 50: + save_img = thresh + pic_no += 1 + rand = random.randint(0, 10) + if rand % 2 == 0: + save_img = cv2.flip(save_img, 1) + cv2.putText(img, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255)) + + f = "gestures/temp.jpg" + cv2.imwrite(f, save_img) + im = Image.open(f) + im = im.resize((50, 50), Image.ANTIALIAS) + im.save(f, 'JPEG', quality=90) + sim = cv2.imread(f) + cv2.imwrite("gestures/" + str(g_id) + "/" + str(pic_no) + ".jpg", sim) + os.remove(f) + + cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2) + cv2.putText(img, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255)) + cv2.imshow("Capturing gesture", img) + cv2.imshow("thresh", thresh) + keypress = cv2.waitKey(1) + if keypress == ord('c'): + if not flag_start_capturing: + flag_start_capturing = True + else: + flag_start_capturing = False + frames = 0 + if flag_start_capturing: + frames += 1 + if pic_no == total_pics: + break + + +init_create_folder_database() +g_id = input("Enter gesture no.: ") +g_name = input("Enter gesture name/text: ") +store_in_db(g_id, g_name) +store_images(g_id) \ No newline at end of file diff --git a/display_all_gestures.py b/display_all_gestures.py new file mode 100644 index 0000000..3a0cc0e --- /dev/null +++ b/display_all_gestures.py @@ -0,0 +1,44 @@ +import cv2, os, random +import numpy as np + + +def get_image_size(): + img = cv2.imread('gestures/0/100.jpg', 0) + return img.shape + + +gestures = os.listdir('gestures/') +gestures.sort(key = int) +begin_index = 0 +end_index = 5 +image_x, image_y = get_image_size() + +if len(gestures)%5 != 0: + rows = int(len(gestures)/5)+1 +else: + rows = int(len(gestures)/5) + +full_img = None +for i in range(rows): + col_img = None + for j in range(begin_index, end_index): + img_path = "gestures/%s/%d.jpg" % (j, random.randint(1, 1200)) + img = cv2.imread(img_path, 0) + if np.any(img is None): + img = np.zeros((image_y, image_x), dtype = np.uint8) + if np.any(col_img is None): + col_img = img + else: + col_img = np.hstack((col_img, img)) + + begin_index += 5 + end_index += 5 + if np.any(full_img is None): + full_img = col_img + else: + full_img = np.vstack((full_img, col_img)) + + +cv2.imshow("gestures", full_img) +cv2.imwrite('full_img.jpg', full_img) +cv2.waitKey(0) \ No newline at end of file diff --git a/flip_images.py b/flip_images.py new file mode 100644 index 0000000..82cc0cc --- /dev/null +++ b/flip_images.py @@ -0,0 +1,17 @@ +import cv2, os + +def flip_images(): + gest_folder = "gestures" + images_labels = [] + images = [] + labels = [] + for g_id in os.listdir(gest_folder): + for i in range(1200): + path = gest_folder+"/"+g_id+"/"+str(i+1)+".jpg" + new_path = gest_folder+"/"+g_id+"/"+str(i+1+1200)+".jpg" + print(path) + img = cv2.imread(path, 0) + img = cv2.flip(img, 1) + cv2.imwrite(new_path, img) + +flip_images() diff --git a/get_model_reports.py b/get_model_reports.py new file mode 100644 index 0000000..fa154bc --- /dev/null +++ b/get_model_reports.py @@ -0,0 +1,112 @@ +from keras.models import load_model +from sklearn.metrics import classification_report, confusion_matrix +import pickle +import numpy as np +import time +import matplotlib.pyplot as plt + +def plot_confusion_matrix(cm, + target_names, + title='Confusion matrix', + cmap=None, + normalize=True): + """ + given a sklearn confusion matrix (cm), make a nice plot + + Arguments + --------- + cm: confusion matrix from sklearn.metrics.confusion_matrix + + target_names: given classification classes such as [0, 1, 2] + the class names, for example: ['high', 'medium', 'low'] + + title: the text to display at the top of the matrix + + cmap: the gradient of the values displayed from matplotlib.pyplot.cm + see http://matplotlib.org/examples/color/colormaps_reference.html + plt.get_cmap('jet') or plt.cm.Blues + + normalize: If False, plot the raw numbers + If True, plot the proportions + + Usage + ----- + plot_confusion_matrix(cm = cm, # confusion matrix created by + # sklearn.metrics.confusion_matrix + normalize = True, # show proportions + target_names = y_labels_vals, # list of names of the classes + title = best_estimator_name) # title of graph + + Citation + --------- + http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html + + """ + import itertools + + accuracy = np.trace(cm) / float(np.sum(cm)) + misclass = 1 - accuracy + + if cmap is None: + cmap = plt.get_cmap('Blues') + + plt.figure(figsize=(20, 20)) + plt.imshow(cm, interpolation='nearest', cmap=cmap) + plt.title(title) + plt.colorbar() + + if target_names is not None: + tick_marks = np.arange(len(target_names)) + plt.xticks(tick_marks, target_names, rotation=45) + plt.yticks(tick_marks, target_names) + + if normalize: + cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] + + + thresh = cm.max() / 1.5 if normalize else cm.max() / 2 + for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): + if normalize: + plt.text(j, i, "{:0.4f}".format(cm[i, j]), + horizontalalignment="center", + color="white" if cm[i, j] > thresh else "black") + else: + plt.text(j, i, "{:,}".format(cm[i, j]), + horizontalalignment="center", + color="white" if cm[i, j] > thresh else "black") + + + plt.tight_layout() + plt.ylabel('True label') + plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) + plt.savefig('confusion_matrix.png') + + +image_x, image_y = 50, 50 +with open("test_images", "rb") as f: + test_images = np.array(pickle.load(f)) +with open("test_labels", "rb") as f: + test_labels = np.array(pickle.load(f), dtype=np.int32) +test_images = np.reshape(test_images, (test_images.shape[0], image_x, image_y, 1)) + + +model = load_model('cnn_model_keras2.h5') +pred_labels = [] + +start_time = time.time() +pred_probabs = model.predict(test_images) +end_time = time.time() +pred_time = end_time-start_time +avg_pred_time = pred_time/test_images.shape[0] +print("Time taken to predict %d test images is %ds" %(test_images.shape[0], pred_time)) +print('Average prediction time: %fs' % (avg_pred_time)) + +for pred_probab in pred_probabs: + pred_labels.append(list(pred_probab).index(max(pred_probab))) + +cm = confusion_matrix(test_labels, np.array(pred_labels)) +classification_report = classification_report(test_labels, np.array(pred_labels)) +print('\n\nClassification Report') +print('---------------------------') +print(classification_report) +plot_confusion_matrix(cm, range(44), normalize=False) \ No newline at end of file diff --git a/load_images.py b/load_images.py new file mode 100644 index 0000000..347b478 --- /dev/null +++ b/load_images.py @@ -0,0 +1,61 @@ +import cv2 +from glob import glob +import numpy as np +import random +from sklearn.utils import shuffle +import pickle +import os + + +def pickle_images_labels(): + img_labels = [] + imgs = glob("gestures/*/*.jpg") + imgs.sort() + for image in imgs: + print(image) + label = image[image.find(os.sep)+1: image.rfind(os.sep)] + img = cv2.imread(image, 0) + img_labels.append((np.array(img, dtype=np.uint8), int(label))) + return img_labels + + +images_labels = pickle_images_labels() +images_labels = shuffle(shuffle(shuffle(shuffle(images_labels)))) +images, labels = zip(*images_labels) +print("Length of images_labels", len(images_labels)) + +train_images = images[:int(5/6*len(images))] +print("Length of train_images", len(train_images)) +with open("train_images", "wb") as f: + pickle.dump(train_images, f) +del train_images + +train_labels = labels[:int(5/6*len(labels))] +print("Length of train_labels", len(train_labels)) +with open("train_labels", "wb") as f: + pickle.dump(train_labels, f) +del train_labels + +test_images = images[int(5/6*len(images)):int(11/12*len(images))] +print("Length of test_images", len(test_images)) +with open("test_images", "wb") as f: + pickle.dump(test_images, f) +del test_images + +test_labels = labels[int(5/6*len(labels)):int(11/12*len(images))] +print("Length of test_labels", len(test_labels)) +with open("test_labels", "wb") as f: + pickle.dump(test_labels, f) +del test_labels + +val_images = images[int(11/12*len(images)):] +print("Length of test_images", len(val_images)) +with open("val_images", "wb") as f: + pickle.dump(val_images, f) +del val_images + +val_labels = labels[int(11/12*len(labels)):] +print("Length of val_labels", len(val_labels)) +with open("val_labels", "wb") as f: + pickle.dump(val_labels, f) +del val_labels diff --git a/recognize_gesture.py b/recognize_gesture.py new file mode 100644 index 0000000..111a9d1 --- /dev/null +++ b/recognize_gesture.py @@ -0,0 +1,151 @@ +import cv2, pickle +import numpy as np +import logging +import tensorflow as tf +from cnn_tf import cnn_model_fn +import os +import sqlite3 +from keras.models import load_model +from PIL import Image + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +# tf.logging.set_verbosity(tf.logging.ERROR) +logging.getLogger("tensorflow").setLevel(logging.ERROR) +classifier = tf.estimator.Estimator(model_dir="tmp/cnn_model2", model_fn=cnn_model_fn) +prediction = None +model = load_model('cnn_model_keras2.h5') + + +def get_image_size(): + img = cv2.imread('gestures/0/100.jpg', 0) + return img.shape + + +image_x, image_y = get_image_size() + + +def tf_process_image(img): + img = cv2.resize(img, (image_x, image_y)) + img = np.array(img, dtype=np.float32) + np_array = np.array(img) + return np_array + + +def tf_predict(classifier, image): + # need help with prediction using tensorflow + global prediction + processed_array = tf_process_image(image) + pred_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": processed_array}, shuffle=False) + pred = classifier.predict(input_fn=pred_input_fn) + prediction = next(pred) + print(prediction) + + +def keras_process_image(img): + img = cv2.resize(img, (image_x, image_y)) + img = np.array(img, dtype=np.float32) + img = np.reshape(img, (1, image_x, image_y, 1)) + return img + + +def keras_predict(model, image): + processed = keras_process_image(image) + pred_probab = model.predict(processed)[0] + pred_class = list(pred_probab).index(max(pred_probab)) + return max(pred_probab), pred_class + + +def get_pred_text_from_db(pred_class): + conn = sqlite3.connect("gesture_db.db") + cmd = "SELECT g_name FROM gesture WHERE g_id=" + str(pred_class) + cursor = conn.execute(cmd) + for row in cursor: + return row[0] + + +def split_sentence(text, num_of_words): + # Splits a text into group of num_of_words + list_words = text.split(" ") + length = len(list_words) + _sentence = [] + b_index = 0 + e_index = num_of_words + while length > 0: + part = "" + for word in list_words[b_index:e_index]: + part = part + " " + word + _sentence.append(part) + b_index += num_of_words + e_index += num_of_words + length -= num_of_words + return _sentence + + +def put__text_in_blackboard(blackboard, _text): + y = 200 + for text in _text: + cv2.putText(blackboard, text, (4, y), cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 255, 255)) + y += 50 + + +def get_hand_hist(): + with open("hist", "rb") as f: + hist = pickle.load(f) + return hist + + +def recognize(): + global prediction + cam = cv2.VideoCapture(0) + if not cam.read()[0]: + cam = cv2.VideoCapture(0) + hist = get_hand_hist() + x, y, w, h = 300, 100, 300, 300 + while True: + text = "" + img = cam.read()[1] + img = cv2.flip(img, 1) + img = cv2.resize(img, (640, 480)) + imgCrop = img[y:y + h, x:x + w] + imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1) + disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) + cv2.filter2D(dst, -1, disc, dst) + blur = cv2.GaussianBlur(dst, (11, 11), 0) + blur = cv2.medianBlur(blur, 15) + # thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] + thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 4) + thresh = cv2.merge((thresh, thresh, thresh)) + thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY) + thresh = thresh[y:y + h, x:x + w] + (openCV_ver, _, __) = cv2.__version__.split(".") + if openCV_ver == '3': + contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1] + elif openCV_ver == '4': + contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0] + if len(contours) > 0: + contour = max(contours, key=cv2.contourArea) + # print(cv2.contourArea(contour)) + if cv2.contourArea(contour) > 1000: + save_img = thresh + + pred_probab, pred_class = keras_predict(model, save_img) + + if pred_probab * 100 > 90: + text = get_pred_text_from_db(pred_class) + print(text) + + blackboard = np.zeros((480, 640, 3), dtype=np.uint8) + _text = split_sentence(text, 2) + put__text_in_blackboard(blackboard, _text) + # cv2.putText(blackboard, text, (30, 200), cv2.FONT_HERSHEY_TRIPLEX, 1.3, (255, 255, 255)) + cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2) + res = np.hstack((img, blackboard)) + cv2.imshow("Recognizing gesture", res) + cv2.imshow("thresh", thresh) + if cv2.waitKey(1) == ord('q'): + break + + +keras_predict(model, np.zeros((50, 50), dtype=np.uint8)) +recognize() diff --git a/set_hand_hist.py b/set_hand_hist.py new file mode 100644 index 0000000..2226c85 --- /dev/null +++ b/set_hand_hist.py @@ -0,0 +1,77 @@ +import cv2 +import numpy as np +import pickle + + +def build_squares(img): + x, y, w, h = 420, 140, 10, 10 + d = 10 + imgCrop = None + crop = None + for i in range(10): + for j in range(5): + if np.any(imgCrop == None): + imgCrop = img[y:y + h, x:x + w] + else: + imgCrop = np.hstack((imgCrop, img[y:y + h, x:x + w])) + # print(imgCrop.shape) + cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1) + x += w + d + if np.any(crop == None): + crop = imgCrop + else: + crop = np.vstack((crop, imgCrop)) + imgCrop = None + x = 420 + y += h + d + return crop + + +def get_hand_hist(): + cam = cv2.VideoCapture(0) + if not cam.read()[0]: + cam = cv2.VideoCapture(0) + x, y, w, h = 300, 100, 300, 300 + flagPressedC, flagPressedS = False, False + imgCrop = None + while True: + img = cam.read()[1] + img = cv2.flip(img, 1) + img = cv2.resize(img, (640, 480)) + hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + + keypress = cv2.waitKey(1) + if keypress == ord('c'): + hsvCrop = cv2.cvtColor(imgCrop, cv2.COLOR_BGR2HSV) + flagPressedC = True + hist = cv2.calcHist([hsvCrop], [0, 1], None, [180, 256], [0, 180, 0, 256]) + cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) + elif keypress == ord('s'): + flagPressedS = True + break + if flagPressedC: + dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1) + dst1 = dst.copy() + disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) + cv2.filter2D(dst, -1, disc, dst) + blur = cv2.GaussianBlur(dst, (11, 11), 0) + blur = cv2.medianBlur(blur, 15) + ''' + ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) + Should it be res instead of ret? + ''' + thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 4) + thresh = cv2.merge((thresh, thresh, thresh)) + # cv2.imshow("res", res) + cv2.imshow("Thresh", thresh) + if not flagPressedS: + imgCrop = build_squares(img) + # cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2) + cv2.imshow("Set hand histogram", img) + cam.release() + cv2.destroyAllWindows() + with open("hist", "wb") as f: + pickle.dump(hist, f) + + +get_hand_hist()