Skip to content

Commit

Permalink
Lock Down Sample Configs (#85)
Browse files Browse the repository at this point in the history
Co-authored-by: klemen1999 <[email protected]>
Co-authored-by: Martin Kozlovsky <[email protected]>
  • Loading branch information
3 people committed Oct 9, 2024
1 parent 762833f commit 809eeeb
Show file tree
Hide file tree
Showing 31 changed files with 774 additions and 624 deletions.
1 change: 0 additions & 1 deletion configs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ Here you can change everything related to actual training of the model.
| use_weighted_sampler | bool | False | bool if use WeightedRandomSampler for training, only works with classification tasks |
| epochs | int | 100 | number of training epochs |
| n_workers | int | 2 | number of workers for data loading |
| train_metrics_interval | int | -1 | frequency of computing metrics on train data, -1 if don't perform |
| validation_interval | int | 1 | frequency of computing metrics on validation data |
| n_log_images | int | 4 | maximum number of images to visualize and log |
| skip_last_batch | bool | True | whether to skip last batch while training |
Expand Down
Original file line number Diff line number Diff line change
@@ -1,25 +1,25 @@
# Example configuration for training a predefined detection model
# Example configuration for training a predefined heavy classification model

model:
name: coco_detection
name: classification_light
predefined_model:
name: DetectionModel
name: ClassificationModel
params:
use_neck: True
variant: heavy

loader:
params:
dataset_name: coco_test
dataset_name: cifar10_test

trainer:
preprocessing:
train_image_size: [&height 256, &width 320]
keep_aspect_ratio: False
train_image_size: [384, 512]
keep_aspect_ratio: True
normalize:
active: True

batch_size: 4
epochs: &epochs 200
batch_size: 8
epochs: 200
n_workers: 4
validation_interval: 10
n_log_images: 8
Expand All @@ -29,9 +29,9 @@ trainer:
- name: TestOnTrainEnd

optimizer:
name: SGD
name: Adam
params:
lr: 0.02
lr: 0.001

scheduler:
name: ConstantLR
37 changes: 37 additions & 0 deletions configs/classification_light_model.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Example configuration for training a predefined light classification model

model:
name: classification_light
predefined_model:
name: ClassificationModel
params:
variant: light

loader:
params:
dataset_name: cifar10_test

trainer:
preprocessing:
train_image_size: [384, 512]
keep_aspect_ratio: True
normalize:
active: True

batch_size: 8
epochs: 200
n_workers: 4
validation_interval: 10
n_log_images: 8

callbacks:
- name: ExportOnTrainEnd
- name: TestOnTrainEnd

optimizer:
name: Adam
params:
lr: 0.003

scheduler:
name: ConstantLR
42 changes: 0 additions & 42 deletions configs/classification_model.yaml

This file was deleted.

35 changes: 8 additions & 27 deletions configs/coco_model.yaml → configs/complex_model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,45 +2,28 @@


model:
name: coco_test
name: complex_model
nodes:
- name: EfficientRep
params:
channels_list: [64, 128, 256, 512, 1024]
n_repeats: [1, 6, 12, 18, 6]
depth_mul: 0.33
width_mul: 0.33

- name: RepPANNeck
inputs:
- EfficientRep
params:
channels_list: [256, 128, 128, 256, 256, 512]
n_repeats: [12, 12, 12, 12]
depth_mul: 0.33
width_mul: 0.33

- name: ImplicitKeypointBBoxHead
- name: EfficientKeypointBBoxHead
inputs:
- RepPANNeck
params:
conf_thres: 0.25
iou_thres: 0.45

losses:
name: ImplicitKeypointBBoxLoss
params:
keypoint_regression_loss_weight: 0.5
keypoint_visibility_loss_weight: 0.7
bbox_loss_weight: 0.05
objectness_loss_weight: 0.2
name: EfficientKeypointBboxLoss

metrics:
- name: ObjectKeypointSimilarity
is_main_metric: true
- name: MeanAveragePrecisionKeypoints

visualizers:
name: MultiVisualizer
attached_to: ImplicitKeypointBBoxHead
params:
visualizers:
- name: KeypointVisualizer
Expand Down Expand Up @@ -86,7 +69,6 @@ tracker:
save_directory: output
is_tensorboard: True
is_wandb: False
wandb_entity: luxonis
is_mlflow: False

loader:
Expand All @@ -105,20 +87,19 @@ trainer:
n_sanity_val_steps: 1
profiler: null
verbose: True
batch_size: 4
batch_size: 8
accumulate_grad_batches: 1
epochs: &epochs 200
n_workers: 8
train_metrics_interval: -1
validation_interval: 10
n_log_images: 8
skip_last_batch: True
log_sub_losses: True
save_top_k: 3

preprocessing:
train_image_size: [&height 256, &width 320]
keep_aspect_ratio: False
train_image_size: [&height 384, &width 384]
keep_aspect_ratio: True
train_rgb: True
normalize:
active: True
Expand Down
45 changes: 0 additions & 45 deletions configs/ddrnet_segmentation_model.yaml

This file was deleted.

45 changes: 45 additions & 0 deletions configs/detection_heavy_model.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Example configuration for training a predefined heavy detection model

model:
name: detection_heavy
predefined_model:
name: DetectionModel
params:
variant: heavy

loader:
params:
dataset_name: coco_test

trainer:
preprocessing:
train_image_size: [384, 512]
keep_aspect_ratio: True
normalize:
active: True

batch_size: 8
epochs: &epochs 200
n_workers: 4
validation_interval: 10
n_log_images: 8

callbacks:
- name: ExportOnTrainEnd
- name: TestOnTrainEnd

optimizer:
name: SGD
params:
lr: 0.01
momentum: 0.937
weight_decay: 0.0005
dampening: 0.0
nesterov: true

scheduler:
name: CosineAnnealingLR
params:
T_max: *epochs
eta_min: 0.0001
last_epoch: -1
45 changes: 45 additions & 0 deletions configs/detection_light_model.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Example configuration for training a predefined light detection model

model:
name: detection_light
predefined_model:
name: DetectionModel
params:
variant: light

loader:
params:
dataset_name: coco_test

trainer:
preprocessing:
train_image_size: [384, 512]
keep_aspect_ratio: True
normalize:
active: True

batch_size: 8
epochs: &epochs 200
n_workers: 8
validation_interval: 10
n_log_images: 8

callbacks:
- name: ExportOnTrainEnd
- name: TestOnTrainEnd

optimizer:
name: SGD
params:
lr: 0.02
momentum: 0.937
weight_decay: 0.0005
dampening: 0.0
nesterov: true

scheduler:
name: CosineAnnealingLR
params:
T_max: *epochs
eta_min: 0.0002
last_epoch: -1
Loading

0 comments on commit 809eeeb

Please sign in to comment.