forked from SW-Pond/sound-based-gesture-recognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvelocity.py
94 lines (75 loc) · 2.9 KB
/
velocity.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import numpy as np
class VelocityAnalyzer:
def __init__(self, f_domain_q, v_plot_q, f_bin_res, peak_freqs, action_q):
self.C = 343 # Speed of sound in air (m/s)
# Thresholds for defining what constitutes detected movement
self.MIN_FREQ_SHIFT = 30
self.AMP_CUTOFF_FACTOR = 0.3
# Angles (degrees) from mic
self.L_SPKR_ANGLE = -45
self.R_SPKR_ANGLE = 45
self.f_bin_res = f_bin_res
self.l_f = np.min(peak_freqs)
self.r_f = np.max(peak_freqs)
self.l_f_idx = int(np.round(self.l_f / self.f_bin_res))
self.r_f_idx = int(np.round(self.r_f / self.f_bin_res))
self.f_domain_q = f_domain_q
self.v_plot_q = v_plot_q
self.action_q = action_q
def get_v(self):
while True:
if not self.f_domain_q.empty():
data = self.f_domain_q.get()
freqs = data[0]
amps = data[1]
v_vec = self.scan(amps)
self.v_plot_q.put(v_vec)
def scan(self, amps):
l_v_vec, l_shift = self.peak_scan(amps, 'L')
r_v_vec, r_shift = self.peak_scan(amps, 'R')
# For Mapper, 1 ==> new movement/update direction
self.action_q.put([1, l_shift, r_shift])
v_x = l_v_vec[0] + r_v_vec[0]
v_y = l_v_vec[1] + r_v_vec[1]
v_vec = [v_x, v_y]
return v_vec
def peak_scan(self, amps, peak):
peak_idx = 0
speaker_angle = 0
speaker_freq = 0
if peak == 'L':
speaker_angle = self.L_SPKR_ANGLE
speaker_freq = self.l_f
peak_idx = self.l_f_idx
else:
speaker_angle = self.R_SPKR_ANGLE
speaker_freq = self.r_f
peak_idx = self.r_f_idx
amp_cutoff = self.AMP_CUTOFF_FACTOR * amps[peak_idx]
freq_shift = 0
low_shift = 0
high_shift = 0
low_idx = peak_idx - 1
high_idx = peak_idx + 1
# Scan bins left of peak
while amps[low_idx] > amp_cutoff and low_idx >= peak_idx - 22:
low_shift -= self.f_bin_res
low_idx -= 1
# Scan bins right of peak
while amps[high_idx] > amp_cutoff and high_idx <= peak_idx + 22:
high_shift += self.f_bin_res
high_idx += 1
if np.abs(low_shift) > high_shift:
freq_shift = low_shift
if high_shift > np.abs(low_shift):
freq_shift = high_shift
if np.abs(freq_shift) < self.MIN_FREQ_SHIFT:
freq_shift = 0
mic_freq = speaker_freq + freq_shift
# Handle division by zero
v = self.C * (mic_freq - speaker_freq) / (mic_freq + speaker_freq) \
if (mic_freq + speaker_freq) != 0 else 0
v_x = v * np.sin(speaker_angle * (np.pi / 180))
v_y = v * np.cos(speaker_angle * (np.pi / 180))
v_vec = [v_x, v_y]
return v_vec, freq_shift