-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathSAM_PredictObjectsFromBoxes_example.py
99 lines (76 loc) · 2.68 KB
/
SAM_PredictObjectsFromBoxes_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import numpy as np
import time
import matplotlib.pyplot as plt
import cv2
import sys
import io
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor
import torch
# params
model_size = "medium" # small, medium, large
device = "cuda:0" # cuda:0, cpu
image_name = "cartagena.png"
# load image
image = cv2.imread('image_examples/' + image_name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plotting methods
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30/255, 144/255, 255/255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_points(coords, labels, ax, marker_size=375):
pos_points = coords[labels==1]
neg_points = coords[labels==0]
ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
def show_box(box, ax):
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
# get middle point of image
input_boxes = torch.tensor([
[15, 400, 269, 525],
[300, 40, 435, 395],
], device=device)
sys.path.append("..")
# download model checkpoint (https://github.com/facebookresearch/segment-anything#model-checkpoints)
if(model_size == "small"):
sam_checkpoint = "model_checkpoint/sam_vit_b_01ec64.pth"
model_type = "vit_b"
if(model_size == "medium"):
sam_checkpoint = "model_checkpoint/sam_vit_l_0b3195.pth"
model_type = "vit_l"
if(model_size == "large"):
sam_checkpoint = "model_checkpoint/sam_vit_h_4b8939.pth"
model_type = "vit_h"
# load model and send to device
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
#load predictor
predictor = SamPredictor(sam)
predictor.set_image(image)
# get time
start = time.time()
# predict mask
transformed_boxes = predictor.transform.apply_boxes_torch(input_boxes, image.shape[:2])
masks, _, _ = predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=transformed_boxes,
multimask_output=False,
)
plt.figure(figsize=(10, 10))
plt.imshow(image)
for mask in masks:
show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
for box in input_boxes:
show_box(box.cpu().numpy(), plt.gca())
plt.axis('off')
plt.show()
# get time
end = time.time()
print('Elapsed time = ' + str((end - start)*1000) + ' ms')