-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathclient_debug.py
99 lines (84 loc) · 3.94 KB
/
client_debug.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import sys
import cv2
import torch
from PyQt5.QtWidgets import QApplication
from client_ui import RobotControlUI
from utils.general import non_max_suppression, scale_boxes
from utils.plots import Annotator
from models.experimental import attempt_load
import pathlib
import os
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QLabel, QVBoxLayout
# Fix for Windows path issue
pathlib.PosixPath = pathlib.WindowsPath
class DebugRobotControlUI(RobotControlUI):
def __init__(self):
super().__init__()
self.setWindowTitle("机械臂控制界面")
# 加载YOLOv5模型
current_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_dir, "best.pt")
self.model = attempt_load(model_path, device='cpu')
self.model.conf = 0.2 # 设置置信度阈值
# 修改为本地调试地址
self.SERVER_IP = "127.0.0.1"
self.SERVER_PORT = 11113
def process_frame(self, frame):
"""使用YOLOv5模型进行手势识别并渲染结果"""
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img).to('cpu').float() / 255.0
img = img.unsqueeze(0) if img.ndimension() == 3 else img
img = img.permute(0, 3, 1, 2)
# 进行预测
pred = self.model(img)[0]
pred = non_max_suppression(pred, self.model.conf, 0.45)
# 处理预测结果
temp = -1
for det in pred:
if len(det):
det[:, :4] = scale_boxes(img.shape[2:], det[:, :4], frame.shape).round()
for *xyxy, conf, cls in det:
# 记录检测到的标签
label_id = int(cls)
temp = label_id
self.label_counts[label_id] = self.label_counts.get(label_id, 0) + 1
if self.frame_count % 10 == 0:
# 在图像上绘制检测框和标签
if self.label_counts:
most_common_label = max(self.label_counts, key=self.label_counts.get)
annotator = Annotator(frame, line_width=2, example=str(self.model.names))
label = f'{self.model.names[most_common_label]} {conf:.2f}'
annotator.box_label(xyxy, label, color=(255, 0, 0))
frame = annotator.result()
self.label_counts.clear()
return frame, temp
def update_frame(self, frame):
"""更新帧并进行手势识别"""
if self.detecting:
# 处理帧并进行手势识别
processed_frame, gesture_class = self.process_frame(frame)
# 如果检测到有效的手势,发送对应的命令
if gesture_class >= 0 and self.video_thread and self.video_thread.isRunning():
self.send_detection_command(str(gesture_class))
# 显示处理后的帧
height, width, channel = processed_frame.shape
bytes_per_line = 3 * width
q_image = QImage(processed_frame.data, width, height, bytes_per_line,
QImage.Format_RGB888).rgbSwapped()
self.video_label.setPixmap(QPixmap.fromImage(q_image).scaled(
self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
else:
# 显示原始帧
height, width, channel = frame.shape
bytes_per_line = 3 * width
q_image = QImage(frame.data, width, height, bytes_per_line,
QImage.Format_RGB888).rgbSwapped()
self.video_label.setPixmap(QPixmap.fromImage(q_image).scaled(
self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
if __name__ == "__main__":
app = QApplication(sys.argv)
window = DebugRobotControlUI()
window.show()
sys.exit(app.exec_())