forked from congdewu/robotic-grasping
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtouch_pick_roll.py
92 lines (72 loc) · 3.15 KB
/
touch_pick_roll.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#!/usr/bin/env python
# 该文件 可在CV显示的窗口上选取物体,划定一条线段,夹爪即可开启合适大小,运动到物体上方,并旋转到合适角度,完成抓取
import matplotlib.pyplot as plt
import numpy as np
import time
import cv2
from real.realsenseD415 import Camera
from UR_Robot import UR_Robot
# User options (change me)
# --------------- Setup options ---------------
tcp_host_ip = '192.168.56.101' # IP and port to robot arm as TCP client (UR5)
tcp_port = 30003
# tool_orientation = [-np.pi,0,0]
# ---------------------------------------------
# Move robot to home pose
robot = UR_Robot(tcp_host_ip,tcp_port)
grasp_home = [(25 / 360.0) * 2 * np.pi, (-70 / 360.0) * 2 * np.pi, # 抓取起始姿态
(-70 / 360.0) * 2 * np.pi, (-125 / 360.0) * 2 * np.pi,
(90 / 360.0) * 2 * np.pi, (0 / 360.0) * 2 * np.pi]
robot.move_j(grasp_home)
robot.open_gripper()
# Slow down robot
robot.joint_acc = 1.4
robot.joint_vel = 1.05
# Callback function for clicking on OpenCV window
global click_point_pix1, click_point_pix2
click_point_pix1 = ()
click_point_pix2 = ()
camera_color_img, camera_depth_img = robot.get_camera_data()
def mouseclick_callback(event, x, y, flags, param):
global click_x1, click_y1, click_x2, click_y2
global camera, robot
if event == cv2.EVENT_LBUTTONDOWN:
click_x1, click_y1 = x, y
click_point_pix1 = (click_x1,click_y1)
if event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:
# click_x2, click_y2 = x, y
# click_point_pix2 = (click_x2,click_y2)
click_point_pix2 = (x, y)
x0 = int((click_x1 + x) / 2)
y0 = int((click_y1 + y) / 2)
deg = np.arctan((y - click_y1) / (x - click_x1))
# Get click point in camera coordinates
click_z = camera_depth_img[y0][x0] * robot.cam_depth_scale
click_x = np.multiply(x0-robot.cam_intrinsics[0][2],click_z/robot.cam_intrinsics[0][0])
click_y = np.multiply(y0-robot.cam_intrinsics[1][2],click_z/robot.cam_intrinsics[1][1])
if click_z == 0:
return
click_point = np.asarray([click_x,click_y,click_z])
click_point.shape = (3,1)
# Convert camera to robot coordinates
# camera2robot = np.linalg.inv(robot.cam_pose)
camera2robot = robot.cam_pose
target_position = np.dot(camera2robot[0:3,0:3],click_point) + camera2robot[0:3,3:]
target_position = target_position[0:3,0]
print(target_position)
print(target_position.shape)
print(deg)
# destination=np.append(target_position,tool_orientation)
robot.plane_grasp([target_position[0], target_position[1], target_position[2]], rpy = [-np.pi, 0, deg - 0.785])
# Show color and depth frames
cv2.namedWindow('color')
cv2.setMouseCallback('color', mouseclick_callback)
cv2.namedWindow('depth')
while True:
camera_color_img, camera_depth_img = robot.get_camera_data()
bgr_data = cv2.cvtColor(camera_color_img, cv2.COLOR_RGB2BGR)
cv2.imshow('color', bgr_data)
cv2.imshow('depth', camera_depth_img)
if cv2.waitKey(1) == ord('c'):
break
cv2.destroyAllWindows()