Skip to content

Commit

Permalink
Fixed type errors
Browse files Browse the repository at this point in the history
  • Loading branch information
CaptainTrojan committed Dec 6, 2024
1 parent c5c4f16 commit c360f15
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 70 deletions.
3 changes: 3 additions & 0 deletions luxonis_train/attached_modules/losses/pml_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ def __init__(
def prepare(self, inputs, labels):
embeddings = inputs["features"][0]

assert (
labels is not None and "id" in labels
), "ID labels are required for metric learning losses"
IDs = labels["id"][0][:, 0]
return embeddings, IDs

Expand Down
8 changes: 8 additions & 0 deletions luxonis_train/attached_modules/metrics/pml_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ def __init__(self, cross_batch_memory_size=0, **kwargs):

def prepare(self, inputs, labels):
embeddings = inputs["features"][0]

assert (
labels is not None and "id" in labels
), "ID labels are required for metric learning losses"
IDs = labels["id"][0][:, 0]
return embeddings, IDs

Expand Down Expand Up @@ -158,6 +162,10 @@ def __init__(self, cross_batch_memory_size=0, **kwargs):

def prepare(self, inputs, labels):
embeddings = inputs["features"][0]

assert (
labels is not None and "id" in labels
), "ID labels are required for metric learning losses"
IDs = labels["id"][0][:, 0]
return embeddings, IDs

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ def prepare(
self, inputs: Packet[Tensor], labels: Labels | None
) -> tuple[Tensor, Tensor]:
embeddings = inputs["features"][0]

assert (
labels is not None and "id" in labels
), "ID labels are required for metric learning losses"
IDs = labels["id"][0]
return embeddings, IDs

Expand Down Expand Up @@ -69,13 +73,20 @@ def forward(

# Plot the embeddings
fig, ax = plt.subplots(figsize=(10, 10))
scatter = ax.scatter(
embeddings_2d[:, 0],
embeddings_2d[:, 1],
c=IDs.detach().cpu().numpy(),
cmap="viridis",
s=5,
)
if IDs is not None:
scatter = ax.scatter(
embeddings_2d[:, 0],
embeddings_2d[:, 1],
c=IDs.detach().cpu().numpy(),
cmap="viridis",
s=5,
)
else:
scatter = ax.scatter(

Check warning on line 85 in luxonis_train/attached_modules/visualizers/embeddings_visualizer.py

View check run for this annotation

Codecov / codecov/patch

luxonis_train/attached_modules/visualizers/embeddings_visualizer.py#L85

Added line #L85 was not covered by tests
embeddings_2d[:, 0],
embeddings_2d[:, 1],
s=5,
)
fig.colorbar(scatter, ax=ax)
ax.set_title("Embeddings Visualization")
ax.set_xlabel("Dimension 1")
Expand Down
65 changes: 2 additions & 63 deletions luxonis_train/nodes/backbones/ghostfacenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,8 @@ def __init__(
has_se = se_ratio is not None and se_ratio > 0.0
self.stride = stride

assert layer_id is not None, "Layer ID must be explicitly provided"

# Point-wise expansion
if layer_id <= 1:
self.ghost1 = GhostModuleV2(
Expand Down Expand Up @@ -507,66 +509,3 @@ def forward(self, inps):
x = self.pointwise_conv(x)
x = self.classifier(x)
return x

# @property
# def task(self) -> str:
# return "label"

# @property
# def tasks(self) -> dict:
# return [TaskType.LABEL]


if __name__ == "__main__":
W, H = 256, 256
model = GhostFaceNetsV2(image_size=W)
model.eval() # Set the model to evaluation mode

# Create a dummy input tensor of the appropriate size
x = torch.randn(1, 3, H, W)

# Export the model
onnx_path = "ghostfacenet.onnx"
torch.onnx.export(
model, # model being run
x, # model input (or a tuple for multiple inputs)
onnx_path, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=12, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["input"], # the model's input names
output_names=["output"], # the model's output names
# dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes
# 'output' : {0 : 'batch_size'}}
)
import os

import numpy as np
import onnx
import onnxsim

# logger.info("Simplifying ONNX model...")
model_onnx = onnx.load(onnx_path)
onnx_model, check = onnxsim.simplify(model_onnx)
if not check:
raise RuntimeError("Onnx simplify failed.")
onnx.save(onnx_model, onnx_path)

# Add calibration data
dir = "shared_with_container/calibration_data/"
for file in os.listdir(dir):
os.remove(dir + file)
for i in range(20):
np_array = np.random.rand(1, 3, H, W).astype(np.float32)
np.save(f"{dir}{i:02d}.npy", np_array)
np_array.tofile(f"{dir}{i:02d}.raw")

# Test backpropagation on the model
# Create a dummy target tensor of the appropriate size
Y = model(x)
target = torch.randn(1, 512)
loss_fn = torch.nn.MSELoss()
loss = loss_fn(Y, target)
model.zero_grad()
loss.backward()
print("Backpropagation test successful")

0 comments on commit c360f15

Please sign in to comment.