Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release v0.3.0 #166

Merged
merged 11 commits into from
Feb 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 2 additions & 7 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@ name: CI

on:
pull_request:
branches: [ dev, main ]
branches: [ main ]
paths:
- 'luxonis_train/**'
- 'tests/**'
- .github/workflows/ci.yaml
- '!**/*.md'
- '!luxonis_train/__main__.py'

permissions:
pull-requests: write
Expand Down Expand Up @@ -107,12 +106,8 @@ jobs:
tests:
needs:
- config-test
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest]

runs-on: ${{ matrix.os }}
runs-on: ubuntu-t4-4core

steps:
- name: Checkout
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/semgrep.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
# or use multiple flags to specify particular rules, such as
# --config r/all --config custom/rules
run: semgrep scan -q --sarif --config auto --config "p/secrets" . > semgrep-results.sarif

- name: Pretty-Print SARIF Output
run: |
jq . semgrep-results.sarif > formatted-semgrep-results.sarif || echo "{}"
Expand Down Expand Up @@ -62,4 +62,4 @@ jobs:
if [[ -n "$file" && -n "$line" && -n "$message" ]]; then
echo "::error file=$file,line=$line,title=Semgrep Issue::${message}"
fi
done
done
25 changes: 25 additions & 0 deletions configs/anomaly_detection_model.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
model:
name: AnomalyDetecion
predefined_model:
name: AnomalyDetectionModel
params:
variant: light

loader:
name: LuxonisLoaderPerlinNoise
params:
dataset_name: mvtec_v2
anomaly_source_path: "../data/dtd/images/"

trainer:
preprocessing:
train_image_size: [256, 256]
keep_aspect_ratio: False
normalize:
active: True

batch_size: 4
epochs: 300
num_workers: 4
validation_interval: 10
num_log_images: 8
2 changes: 1 addition & 1 deletion configs/detection_light_model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ trainer:
std: [1, 1, 1]

batch_size: 8
epochs: &epochs 300
epochs: 300
accumulate_grad_batches: 8 # For best results, always accumulate gradients to effectively use 64 batch size
n_workers: 8
validation_interval: 10
Expand Down
33 changes: 33 additions & 0 deletions configs/fomo_heavy_model.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
model:
name: fomo_detection_heavy
predefined_model:
name: FOMOModel
params:
variant: heavy

loader:
params:
dataset_name: coco_test

trainer:
precision: "16-mixed"
preprocessing:
train_image_size: [384, 512]
keep_aspect_ratio: true
normalize:
active: true
params:
mean: [0., 0., 0.]
std: [1, 1, 1]

batch_size: 8
epochs: 300
n_workers: 4
validation_interval: 10
n_log_images: 8
gradient_clip_val: 10

callbacks:
- name: ExportOnTrainEnd
- name: TestOnTrainEnd

33 changes: 33 additions & 0 deletions configs/fomo_light_model.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
model:
name: fomo_detection_light
predefined_model:
name: FOMOModel
params:
variant: light

loader:
params:
dataset_name: coco_test

trainer:
precision: "16-mixed"
preprocessing:
train_image_size: [384, 512]
keep_aspect_ratio: true
normalize:
active: true
params:
mean: [0., 0., 0.]
std: [1, 1, 1]

batch_size: 8
epochs: 300
n_workers: 4
validation_interval: 10
n_log_images: 8
gradient_clip_val: 10

callbacks:
- name: ExportOnTrainEnd
- name: TestOnTrainEnd

12 changes: 6 additions & 6 deletions configs/instance_segmentation_heavy_model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ trainer:
std: [1, 1, 1]

batch_size: 8
epochs: &epochs 300
epochs: 300
n_workers: 4
validation_interval: 10
n_log_images: 8
Expand All @@ -36,8 +36,8 @@ trainer:
callbacks:
- name: EMACallback
params:
decay: 0.9999
use_dynamic_decay: True
decay: 0.9999
use_dynamic_decay: True
decay_tau: 2000
- name: ExportOnTrainEnd
- name: TestOnTrainEnd
Expand All @@ -50,13 +50,13 @@ trainer:

training_strategy:
name: "TripleLRSGDStrategy"
params:
params:
warmup_epochs: 3
warmup_bias_lr: 0.0
warmup_momentum: 0.8
lr: 0.01
lre: 0.0001
momentum: 0.937
momentum: 0.937
weight_decay: 0.0005
nesterov: True
cosine_annealing: False
cosine_annealing: False
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import torch
import torch.nn.functional as F
from loguru import logger
from torch import Tensor

from luxonis_train.attached_modules.losses import AdaptiveDetectionLoss
Expand Down Expand Up @@ -130,7 +131,8 @@ def forward(
target_keypoints, batch_size, self.gt_kpts_scale
)
assigned_gt_idx_expanded = assigned_gt_idx.unsqueeze(-1).unsqueeze(-1)
if batched_kpts.size(1) == 0:
if batched_kpts.numel() == 0:
logger.debug("No instances found in the batch")
selected_keypoints = batched_kpts.new_zeros(
(
batched_kpts.size(0),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,16 @@ def forward(
[xi.view(batch_size, self.node.no, -1) for xi in features], 2
).split((self.node.reg_max * 4, self.n_classes), 1)
img_idx = target_boundingbox[:, 0].unsqueeze(-1)
if tuple(target_instance_segmentation.shape[-2:]) != (mask_h, mask_w):
if target_instance_segmentation.numel() == 0:
target_instance_segmentation = torch.empty(
(0, mask_h, mask_w),
device=target_instance_segmentation.device,
dtype=target_instance_segmentation.dtype,
)
elif tuple(target_instance_segmentation.shape[-2:]) != (
mask_h,
mask_w,
):
target_instance_segmentation = F.interpolate(
target_instance_segmentation.unsqueeze(0),
(mask_h, mask_w),
Expand Down
48 changes: 22 additions & 26 deletions tests/integration/test_detection.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
from typing import Any

import pytest
from luxonis_ml.data import LuxonisDataset

from luxonis_train.core import LuxonisModel
from luxonis_train.nodes.backbones import __all__ as BACKBONES


def get_opts_backbone(backbone: str) -> dict[str, Any]:
Expand Down Expand Up @@ -105,25 +101,25 @@ def train_and_test(
assert value > 0.8, f"{name} = {value} (expected > 0.8)"


@pytest.mark.parametrize("backbone", BACKBONES)
def test_backbones(
backbone: str,
config: dict[str, Any],
parking_lot_dataset: LuxonisDataset,
):
opts = get_opts_backbone(backbone)
opts["loader.params.dataset_name"] = parking_lot_dataset.identifier
opts["trainer.epochs"] = 1
train_and_test(config, opts)


@pytest.mark.parametrize("variant", ["n", "s", "m", "l"])
def test_variants(
variant: str,
config: dict[str, Any],
parking_lot_dataset: LuxonisDataset,
):
opts = get_opts_variant(variant)
opts["loader.params.dataset_name"] = parking_lot_dataset.identifier
opts["trainer.epochs"] = 1
train_and_test(config, opts)
# @pytest.mark.parametrize("backbone", BACKBONES)
# def test_backbones(
# backbone: str,
# config: dict[str, Any],
# parking_lot_dataset: LuxonisDataset,
# ):
# opts = get_opts_backbone(backbone)
# opts["loader.params.dataset_name"] = parking_lot_dataset.identifier
# opts["trainer.epochs"] = 1
# train_and_test(config, opts)
#
#
# @pytest.mark.parametrize("variant", ["n", "s", "m", "l"])
# def test_variants(
# variant: str,
# config: dict[str, Any],
# parking_lot_dataset: LuxonisDataset,
# ):
# opts = get_opts_variant(variant)
# opts["loader.params.dataset_name"] = parking_lot_dataset.identifier
# opts["trainer.epochs"] = 1
# train_and_test(config, opts)
Loading
Loading