Skip to content

Commit

Permalink
fixed some mistakes in configs
Browse files Browse the repository at this point in the history
  • Loading branch information
klemen1999 committed Oct 1, 2024
1 parent a94c8d7 commit 263fe43
Show file tree
Hide file tree
Showing 11 changed files with 32 additions and 46 deletions.
1 change: 0 additions & 1 deletion configs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ Here you can change everything related to actual training of the model.
| use_weighted_sampler | bool | False | bool if use WeightedRandomSampler for training, only works with classification tasks |
| epochs | int | 100 | number of training epochs |
| n_workers | int | 2 | number of workers for data loading |
| train_metrics_interval | int | -1 | frequency of computing metrics on train data, -1 if don't perform |
| validation_interval | int | 1 | frequency of computing metrics on validation data |
| n_log_images | int | 4 | maximum number of images to visualize and log |
| skip_last_batch | bool | True | whether to skip last batch while training |
Expand Down
4 changes: 2 additions & 2 deletions configs/classification_heavy_model.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Example configuration for training a predefined heavy classification model

model:
name: segmentation_light
name: classification_light
predefined_model:
name: ClassificationModel
params:
variant: "heavy"
variant: heavy

loader:
params:
Expand Down
4 changes: 2 additions & 2 deletions configs/classification_light_model.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Example configuration for training a predefined light classification model

model:
name: segmentation_light
name: classification_light
predefined_model:
name: ClassificationModel
params:
variant: "light"
variant: light

loader:
params:
Expand Down
20 changes: 5 additions & 15 deletions configs/coco_model.yaml → configs/complex_model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,28 @@


model:
name: coco_test
name: complex_model
nodes:
- name: EfficientRep
params:
variant: "n"

- name: RepPANNeck
inputs:
- EfficientRep

- name: ImplicitKeypointBBoxHead
- name: EfficientKeypointBBoxHead
inputs:
- RepPANNeck
params:
conf_thres: 0.25
iou_thres: 0.45

losses:
name: ImplicitKeypointBBoxLoss
params:
keypoint_regression_loss_weight: 0.5
keypoint_visibility_loss_weight: 0.7
bbox_loss_weight: 0.05
objectness_loss_weight: 0.2
name: EfficientKeypointBboxLoss

metrics:
- name: ObjectKeypointSimilarity
is_main_metric: true
- name: MeanAveragePrecisionKeypoints

visualizers:
name: MultiVisualizer
attached_to: ImplicitKeypointBBoxHead
params:
visualizers:
- name: KeypointVisualizer
Expand Down Expand Up @@ -78,7 +69,6 @@ tracker:
save_directory: output
is_tensorboard: True
is_wandb: False
wandb_entity: luxonis
is_mlflow: False

loader:
Expand Down
6 changes: 3 additions & 3 deletions configs/example_export.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Example configuration for exporting a predefined segmentation model
# Example configuration for exporting a predefined light detection model

model:
name: coco_segmentation
weights: null # specify a path to the weights here
name: detection_light
weights: null # TODO: Specify a path to the weights here
predefined_model:
name: DetectionModel
params:
Expand Down
13 changes: 9 additions & 4 deletions configs/example_tuning.yaml
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
# Example configuration for tuning a predefined segmentation model
# Example configuration for tuning a predefined light segmentation model


model:
name: coco_segmentation
name: segmentation_light
predefined_model:
name: SegmentationModel
params:
backbone: MicroNet
task: binary
variant: light

loader:
params:
Expand Down Expand Up @@ -38,7 +38,12 @@ trainer:
T_max: *epochs
eta_min: 0


tuner:
study_name: seg_study
n_trials: 10
storage:
storage_type: local
params:
trainer.optimizer.name_categorical: ["Adam", "SGD"]
trainer.optimizer.params.lr_float: [0.0001, 0.001]
Expand Down
3 changes: 1 addition & 2 deletions configs/segmentation_heavy_model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@ model:
predefined_model:
name: SegmentationModel
params:
variant: "heavy"

variant: heavy

loader:
params:
Expand Down
2 changes: 1 addition & 1 deletion configs/segmentation_light_model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model:
predefined_model:
name: SegmentationModel
params:
variant: "light"
variant: light

loader:
params:
Expand Down
1 change: 0 additions & 1 deletion luxonis_train/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,6 @@ class TrainerConfig(BaseModelExtraForbid):
NonNegativeInt,
Field(validation_alias=AliasChoices("n_workers", "num_workers")),
] = 4
train_metrics_interval: Literal[-1] | PositiveInt = -1
validation_interval: Literal[-1] | PositiveInt = 5
n_log_images: Annotated[
NonNegativeInt,
Expand Down
23 changes: 9 additions & 14 deletions luxonis_train/models/luxonis_lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,10 +298,15 @@ def _initiate_nodes(
for source_name, shape in shapes.items()
}

for node_name, (
Node,
node_kwargs,
), node_input_names, _ in traverse_graph(self.graph, nodes):
for (
node_name,
(
Node,
node_kwargs,
),
node_input_names,
_,
) in traverse_graph(self.graph, nodes):
node_dummy_inputs: list[Packet[Tensor]] = []
"""List of dummy input packets for the node.
Expand Down Expand Up @@ -913,16 +918,6 @@ def _print_results(
f"{stage} main metric ({self.main_metric}): {main_metric:.4f}"
)

def _is_train_eval_epoch(self) -> bool:
"""Checks if train eval should be performed on current epoch
based on configured train_metrics_interval."""
train_metrics_interval = self.cfg.trainer.train_metrics_interval
# add +1 to current_epoch because starting epoch is at 0
return (
train_metrics_interval != -1
and (self.current_epoch + 1) % train_metrics_interval == 0
)

def _average_losses(
self, step_outputs: list[Mapping[str, Tensor | float | int]]
) -> dict[str, float]:
Expand Down
1 change: 0 additions & 1 deletion tests/configs/parking_lot_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,6 @@ trainer:
accumulate_grad_batches: 1
epochs: 200
n_workers: 8
train_metrics_interval: -1
validation_interval: 10
n_log_images: 8
skip_last_batch: True
Expand Down

0 comments on commit 263fe43

Please sign in to comment.