-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 3201296
Showing
79 changed files
with
9,090 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
# Folders | ||
__pycache__/ | ||
build/ | ||
*.egg-info | ||
|
||
|
||
# Files | ||
*.weights | ||
*.t7 | ||
*.mp4 | ||
*.avi | ||
*.so | ||
*.txt |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
# deep_count |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
DEEPSORT: | ||
REID_CKPT: "./deep_sort/deep/checkpoint/ckpt.t7" | ||
REID_CKPT_Car: "./deep_sort/deep/checkpoint/ckpt_car.t7" | ||
MAX_DIST: 0.2 | ||
MIN_CONFIDENCE: 0.3 | ||
NMS_MAX_OVERLAP: 1.0 | ||
MAX_IOU_DISTANCE: 0.7 | ||
MAX_AGE: 70 | ||
N_INIT: 3 | ||
NN_BUDGET: 100 | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
YOLOV3: | ||
CFG: "./detector/YOLOv3/cfg/yolov4.cfg" | ||
WEIGHT: "./detector/YOLOv3/weight/yolov4.weights" | ||
CLASS_NAMES: "./detector/YOLOv3/cfg/coco.names" | ||
|
||
SCORE_THRESH: 0.1 | ||
NMS_THRESH: 0.4 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
YOLOV3: | ||
CFG: "./detector/YOLOv3/cfg/yolov3-tiny.cfg" | ||
WEIGHT: "./detector/YOLOv3/weight/yolov3-tiny.weights" | ||
CLASS_NAMES: "./detector/YOLOv3/cfg/coco.names" | ||
|
||
SCORE_THRESH: 0.5 | ||
NMS_THRESH: 0.4 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
YOLOV4: | ||
CFG: "./detector/YOLOv3/cfg/yolov4.cfg" | ||
WEIGHT: "./detector/YOLOv3/weight/yolov4_1_3_416_416_static.onnx" | ||
CLASS_NAMES: "./detector/YOLOv3/cfg/coco.names" | ||
|
||
SCORE_THRESH: 0.1 | ||
NMS_THRESH: 0.4 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
YOLOV4: | ||
CFG: "./detector/YOLOv3/cfg/yolov4.cfg" | ||
WEIGHT: "./detector/YOLOv3/weight/yolov4.engine" | ||
CLASS_NAMES: "./detector/YOLOv3/cfg/coco.names" | ||
|
||
SCORE_THRESH: 0.4 | ||
NMS_THRESH: 0.4 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
import cv2 | ||
import numpy as np | ||
from threading import Thread | ||
import time | ||
import os | ||
class LoadStreams: # multiple IP or RTSP cameras | ||
def __init__(self, sources='streams.txt', img_size=640): | ||
self.mode = 'images' | ||
self.img_size = img_size | ||
sources = [sources] | ||
|
||
n = len(sources) | ||
self.imgs = [None] * n | ||
self.sources = sources | ||
for i, s in enumerate(sources): | ||
# Start the thread to read frames from the video stream | ||
print('%g/%g: %s... ' % (i + 1, n, s), end='') | ||
cap = cv2.VideoCapture(0 if s == '0' else s) | ||
assert cap.isOpened(), 'Failed to open %s' % s | ||
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | ||
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | ||
fps = cap.get(cv2.CAP_PROP_FPS) % 100 | ||
_, self.imgs[i] = cap.read() # guarantee first frame | ||
thread = Thread(target=self.update, args=([i, cap]), daemon=True) | ||
print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) | ||
thread.start() | ||
print('') # newline | ||
|
||
def update(self, index, cap): | ||
# Read next stream frame in a daemon thread | ||
n = 0 | ||
while cap.isOpened(): | ||
n += 1 | ||
# _, self.imgs[index] = cap.read() | ||
cap.grab() | ||
if n == 4: # read every 4th frame | ||
_, self.imgs[index] = cap.retrieve() | ||
n = 0 | ||
time.sleep(0.01) # wait time | ||
|
||
def __iter__(self): | ||
self.count = -1 | ||
return self | ||
|
||
def __next__(self): | ||
self.count += 1 | ||
img0 = self.imgs.copy() | ||
if cv2.waitKey(1) == ord('q'): # q to quit | ||
cv2.destroyAllWindows() | ||
raise StopIteration | ||
return self.sources, img0, None | ||
|
||
def __len__(self): | ||
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years | ||
|
||
|
||
|
||
class datasets: # multiple IP or RTSP cameras | ||
def __init__(self, sources='streams.txt', img_size=640): | ||
self.mode = 'images' | ||
self.img_size = img_size | ||
|
||
if os.path.isfile(sources): | ||
with open(sources, 'r') as f: | ||
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())] | ||
else: | ||
sources = [sources] | ||
|
||
n = len(sources) | ||
self.imgs = [None] * n | ||
self.sources = sources | ||
for i, s in enumerate(sources): | ||
# Start the thread to read frames from the video stream | ||
print('%g/%g: %s... ' % (i + 1, n, s), end='') | ||
cap = cv2.VideoCapture(0 if s == '0' else s) | ||
assert cap.isOpened(), 'Failed to open %s' % s | ||
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | ||
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | ||
fps = cap.get(cv2.CAP_PROP_FPS) % 100 | ||
_, self.imgs[i] = cap.read() # guarantee first frame | ||
thread = Thread(target=self.update, args=([i, cap]), daemon=True) | ||
print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) | ||
thread.start() | ||
print('') # newline | ||
|
||
def update(self, index, cap): | ||
# Read next stream frame in a daemon thread | ||
n = 0 | ||
while cap.isOpened(): | ||
n += 1 | ||
# _, self.imgs[index] = cap.read() | ||
cap.grab() | ||
if n == 1: # read every 4th frame | ||
_, self.imgs[index] = cap.retrieve() | ||
n = 0 | ||
time.sleep(0.01) # wait time | ||
|
||
def __iter__(self): | ||
self.count = -1 | ||
return self | ||
|
||
def __next__(self): | ||
self.count += 1 | ||
img0 = self.imgs.copy() | ||
if cv2.waitKey(1) == ord('q'): # q to quit | ||
cv2.destroyAllWindows() | ||
raise StopIteration | ||
return self.sources, img0, None | ||
|
||
def __len__(self): | ||
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# Deep Sort | ||
|
||
This is the implemention of deep sort with pytorch. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
from deep_sort.deep_sort import DeepSort #在我们执行import时,当前目录是不会变的(就算是执行子目录的文件),还是需要完整的包名。 | ||
|
||
|
||
__all__ = ['DeepSort', 'build_tracker','build_tracker_car'] ##import * 的时候,防止导入过多的变量,把导入的限制在__all__中 | ||
|
||
def build_tracker(cfg, use_cuda): | ||
return DeepSort(cfg.DEEPSORT.REID_CKPT, | ||
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE, | ||
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE, | ||
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda) | ||
|
||
|
||
def build_tracker_car(cfg, use_cuda): | ||
return DeepSort(cfg.DEEPSORT.REID_CKPT_Car, | ||
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE, | ||
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE, | ||
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda,num_class = 685) | ||
|
||
|
||
|
||
|
||
|
||
|
||
|
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
import torch | ||
|
||
features = torch.load("features.pth") | ||
qf = features["qf"] | ||
ql = features["ql"] | ||
gf = features["gf"] | ||
gl = features["gl"] | ||
|
||
scores = qf.mm(gf.t()) | ||
res = scores.topk(5, dim=1)[1][:,0] | ||
top1correct = gl[res].eq(ql).sum().item() | ||
|
||
print("Acc top1:{:.3f}".format(top1correct/ql.size(0))) | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
import torch | ||
import torchvision.transforms as transforms | ||
import numpy as np | ||
import cv2 | ||
import logging | ||
|
||
from deep_sort.deep.model import Net | ||
|
||
class Extractor(object): | ||
def __init__(self, model_path, use_cuda=True,num_class = 751): | ||
self.net = Net(reid=True,num_classes = num_class) | ||
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" | ||
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict'] | ||
self.net.load_state_dict(state_dict) | ||
logger = logging.getLogger("root.tracker") | ||
logger.info("Loading weights from {}... Done!".format(model_path)) | ||
self.net.to(self.device) | ||
self.size = (64, 128) | ||
self.norm = transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), | ||
]) | ||
|
||
|
||
|
||
def _preprocess(self, im_crops): | ||
""" | ||
TODO: | ||
1. to float with scale from 0 to 1 | ||
2. resize to (64, 128) as Market1501 dataset did | ||
3. concatenate to a numpy array | ||
3. to torch Tensor | ||
4. normalize | ||
""" | ||
def _resize(im, size): | ||
return cv2.resize(im.astype(np.float32)/255., size) | ||
|
||
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float() | ||
return im_batch | ||
|
||
|
||
def __call__(self, im_crops): | ||
im_batch = self._preprocess(im_crops) | ||
with torch.no_grad(): | ||
im_batch = im_batch.to(self.device) | ||
features = self.net(im_batch) | ||
return features.cpu().numpy() | ||
|
||
|
||
if __name__ == '__main__': | ||
img = cv2.imread("demo.jpg")[:,:,(2,1,0)] | ||
extr = Extractor("checkpoint/ckpt.t7") | ||
feature = extr(img) | ||
print(feature.shape) | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
|
||
class BasicBlock(nn.Module): | ||
def __init__(self, c_in, c_out,is_downsample=False): | ||
super(BasicBlock,self).__init__() | ||
self.is_downsample = is_downsample | ||
if is_downsample: | ||
self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=2, padding=1, bias=False) | ||
else: | ||
self.conv1 = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1, bias=False) | ||
self.bn1 = nn.BatchNorm2d(c_out) | ||
self.relu = nn.ReLU(True) | ||
self.conv2 = nn.Conv2d(c_out,c_out,3,stride=1,padding=1, bias=False) | ||
self.bn2 = nn.BatchNorm2d(c_out) | ||
if is_downsample: | ||
self.downsample = nn.Sequential( | ||
nn.Conv2d(c_in, c_out, 1, stride=2, bias=False), | ||
nn.BatchNorm2d(c_out) | ||
) | ||
elif c_in != c_out: | ||
self.downsample = nn.Sequential( | ||
nn.Conv2d(c_in, c_out, 1, stride=1, bias=False), | ||
nn.BatchNorm2d(c_out) | ||
) | ||
self.is_downsample = True | ||
|
||
def forward(self,x): | ||
y = self.conv1(x) | ||
y = self.bn1(y) | ||
y = self.relu(y) | ||
y = self.conv2(y) | ||
y = self.bn2(y) | ||
if self.is_downsample: | ||
x = self.downsample(x) | ||
return F.relu(x.add(y),True) ##残差网络在这 | ||
|
||
def make_layers(c_in,c_out,repeat_times, is_downsample=False): | ||
blocks = [] | ||
for i in range(repeat_times): | ||
if i ==0: | ||
blocks += [BasicBlock(c_in,c_out, is_downsample=is_downsample),] | ||
else: | ||
blocks += [BasicBlock(c_out,c_out),] | ||
return nn.Sequential(*blocks) | ||
|
||
class Net(nn.Module): | ||
def __init__(self, num_classes=751 ,reid=False): | ||
super(Net,self).__init__() | ||
# 3 128 64 | ||
self.conv = nn.Sequential( | ||
nn.Conv2d(3,64,3,stride=1,padding=1), | ||
nn.BatchNorm2d(64), | ||
nn.ReLU(inplace=True), | ||
# nn.Conv2d(32,32,3,stride=1,padding=1), | ||
# nn.BatchNorm2d(32), | ||
# nn.ReLU(inplace=True), | ||
nn.MaxPool2d(3,2,padding=1), | ||
) | ||
# 32 64 32 | ||
self.layer1 = make_layers(64,64,2,False) | ||
# 32 64 32 | ||
self.layer2 = make_layers(64,128,2,True) | ||
# 64 32 16 | ||
self.layer3 = make_layers(128,256,2,True) | ||
# 128 16 8 | ||
self.layer4 = make_layers(256,512,2,True) | ||
# 256 8 4 | ||
self.avgpool = nn.AvgPool2d((8,4),1) | ||
# 256 1 1 | ||
self.reid = reid | ||
self.classifier = nn.Sequential( | ||
nn.Linear(512, 256), | ||
nn.BatchNorm1d(256), | ||
nn.ReLU(inplace=True), | ||
nn.Dropout(), | ||
nn.Linear(256, num_classes), | ||
) | ||
|
||
def forward(self, x): | ||
x = self.conv(x) | ||
x = self.layer1(x) | ||
x = self.layer2(x) | ||
x = self.layer3(x) | ||
x = self.layer4(x) | ||
x = self.avgpool(x) | ||
x = x.view(x.size(0),-1) | ||
# B x 128 | ||
if self.reid: | ||
x = x.div(x.norm(p=2,dim=1,keepdim=True)) | ||
return x | ||
# classifier | ||
x = self.classifier(x) | ||
return x | ||
|
||
|
||
if __name__ == '__main__': | ||
net = Net() | ||
x = torch.randn(4,3,128,64) | ||
y = net(x) | ||
# import ipdb; ipdb.set_trace() | ||
|
||
|
Oops, something went wrong.