-
Notifications
You must be signed in to change notification settings - Fork 54
/
Copy pathmusic_player_webcam.py
87 lines (76 loc) · 4.16 KB
/
music_player_webcam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#Coded by:- Kushal Bhavsra
#From:- Techmicra IT solution
import time
import cv2
import label_image
import os,random
import subprocess
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
size = 4
# We load the xml file
classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
global text
webcam = cv2.VideoCapture(0) # Using default WebCam connected to the PC.
now = time.time()###For calculate seconds of video
future = now + 60 ####here is second of time which taken by emotion recognition system ,you can change it
while True:
(rval, im) = webcam.read()
im = cv2.flip(im, 1, 0) # Flip to act as a mirror
# Resize the image to speed up detection
mini = cv2.resize(im, (int(im.shape[1] / size), int(im.shape[0] / size)))
# detect MultiScale / faces
faces = classifier.detectMultiScale(mini)
# Draw rectangles around each face
for f in faces:
(x, y, w, h) = [v * size for v in f] # Scale the shapesize backup
sub_face = im[y:y + h, x:x + w]
FaceFileName = "test.jpg" # Saving the current image from the webcam for testing.
cv2.imwrite(FaceFileName, sub_face)
text = label_image.main(FaceFileName) # Getting the Result from the label_image file, i.e., Classification Result.
text = text.title() # Title Case looks Stunning.
font = cv2.FONT_HERSHEY_TRIPLEX
if text == 'Angry':
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7)
cv2.putText(im, text, (x + h, y), font, 1, (0, 25,255), 2)
if text == 'Smile':
cv2.rectangle(im, (x, y), (x + w, y + h), (0,260,0), 7)
cv2.putText(im, text, (x + h, y), font, 1, (0,260,0), 2)
if text == 'Fear':
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 255), 7)
cv2.putText(im, text, (x + h, y), font, 1, (0, 255, 255), 2)
if text == 'Sad':
cv2.rectangle(im, (x, y), (x + w, y + h), (0,191,255), 7)
cv2.putText(im, text, (x + h, y), font, 1, (0,191,255), 2)
# Show the image/
cv2.imshow('Music player with Emotion recognition', im)
key = cv2.waitKey(30)& 0xff
if time.time() > future:##after 20second music will play
try:
cv2.destroyAllWindows()
mp = 'C:/Program Files (x86)/Windows Media Player/wmplayer.exe'
if text == 'Angry':
randomfile = random.choice(os.listdir("C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Angry/"))
print('You are angry !!!! please calm down:) ,I will play song for you :' + randomfile)
file = ('C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Angry/' + randomfile)
subprocess.call([mp, file])
if text == 'Smile':
randomfile = random.choice(os.listdir("C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Smile/"))
print('You are smiling :) ,I playing special song for you: ' + randomfile)
file = ('C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Smile/' + randomfile)
subprocess.call([mp, file])
if text == 'Fear':
randomfile = random.choice(os.listdir("C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Fear/"))
print('You have fear of something ,I playing song for you: ' + randomfile)
file = ('C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Fear/' + randomfile)
subprocess.call([mp, file])
if text == 'Sad':
randomfile = random.choice(os.listdir("C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Sad/"))
print('You are sad,dont worry:) ,I playing song for you: ' + randomfile)
file = ('C:/Users/kusha/PycharmProjects/Music_player_with_Emotions_recognition/songs/Sad/' + randomfile)
subprocess.call([mp, file])
break
except :
print('Please stay focus in Camera frame atleast 15 seconds & run again this program:)')
break
if key == 27: # The Esc key
break