Skip to content

Stricter Config Types #144

Stricter Config Types

Stricter Config Types #144

GitHub Actions / Test Results failed Aug 19, 2024 in 0s

2 fail, 49 pass in 31m 20s

  3 files    3 suites   31m 20s ⏱️
 51 tests  49 ✅ 0 💤 2 ❌
132 runs  128 ✅ 1 💤 3 ❌

Results for commit f00a9b4.

Annotations

Check warning on line 0 in tests.integration.test_sanity

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 2 runs failed: test_simple_models[configs/coco_model.yaml] (tests.integration.test_sanity)

artifacts/Test Results [ubuntu-latest] (Python 3.10)/pytest.xml [took 7s]
artifacts/Test Results [ubuntu-latest] (Python 3.11)/pytest.xml [took 2s]
Raw output
RuntimeError: The size of tensor a (32) must match the size of tensor b (40) at non-singleton dimension 3
config_file = 'configs/coco_model.yaml'

    @pytest.mark.parametrize(
        "config_file", [str(path) for path in Path("configs").glob("*model*")]
    )
    def test_simple_models(config_file: str):
        model = LuxonisModel(config_file, opts=OPTS)
>       model.train()

tests/integration/test_sanity.py:42: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
luxonis_train/core/core.py:228: in train
    self._train(
luxonis_train/core/core.py:186: in _train
    raise e
luxonis_train/core/core.py:182: in _train
    self.pl_trainer.fit(*args, ckpt_path=resume, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py:538: in fit
    call._call_and_handle_interrupt(
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py:47: in _call_and_handle_interrupt
    return trainer_fn(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py:574: in _fit_impl
    self._run(model, ckpt_path=ckpt_path)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py:981: in _run
    results = self._run_stage()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py:1025: in _run_stage
    self.fit_loop.run()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py:205: in run
    self.advance()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py:363: in advance
    self.epoch_loop.run(self._data_fetcher)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py:140: in run
    self.advance(data_fetcher)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py:250: in advance
    batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py:190: in run
    self._optimizer_step(batch_idx, closure)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py:268: in _optimizer_step
    call._call_lightning_module_hook(
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py:167: in _call_lightning_module_hook
    output = fn(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/core/module.py:1306: in optimizer_step
    optimizer.step(closure=optimizer_closure)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/core/optimizer.py:153: in step
    step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py:238: in optimizer_step
    return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/plugins/precision/precision.py:122: in optimizer_step
    return optimizer.step(closure=closure, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/optim/lr_scheduler.py:130: in wrapper
    return func.__get__(opt, opt.__class__)(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/optim/optimizer.py:484: in wrapper
    out = func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/optim/optimizer.py:89: in _use_grad
    ret = func(self, *args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/optim/sgd.py:112: in step
    loss = closure()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/plugins/precision/precision.py:108: in _wrap_closure
    closure_result = closure()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py:144: in __call__
    self._result = self.closure(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/utils/_contextlib.py:116: in decorate_context
    return func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py:129: in closure
    step_output = self._step_fn()
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/automatic.py:317: in _training_step
    training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py:319: in _call_strategy_hook
    output = fn(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py:390: in training_step
    return self.lightning_module.training_step(*args, **kwargs)
luxonis_train/models/luxonis_lightning.py:561: in training_step
    outputs = self.forward(*train_batch)
luxonis_train/models/luxonis_lightning.py:350: in forward
    outputs = node.run(node_inputs)
luxonis_train/nodes/base_node.py:535: in run
    outputs = self(unwrapped)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/nn/modules/module.py:1553: in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.14/x64/lib/python3.10/site-packages/torch/nn/modules/module.py:1562: in _call_impl
    return forward_call(*args, **kwargs)
luxonis_train/nodes/implicit_keypoint_bbox_head.py:136: in forward
    self._build_predictions(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = ImplicitKeypointBBoxHead(
  (learnable_mul_add_conv): ModuleList(
    (0): LearnableMulAddConv(
      (add): Learnable...)
          (2): SiLU()
        )
        (11): Conv2d(176, 153, kernel_size=(1, 1), stride=(1, 1))
      )
    )
  )
)
feat = tensor([[[[[-1.8983e+00,  1.1824e+00, -2.8854e+00,  ...,  2.8355e-01,
             1.9151e-02,  1.1362e-01],
         ...02, -3.6672e-01, -5.6311e-02,  ..., -1.6936e-01,
             2.4974e-01, -5.5677e-01]]]]], grad_fn=<PermuteBackward0>)
anchor_grid = tensor([[[[[ 10.6684,   9.9449]]],


         [[[163.4604,   5.5861]]],


         [[[ 29.8479,  37.1238]]]]])
grid = tensor([[[[[ 0.,  0.],
           [ 1.,  0.],
           [ 2.,  0.],
           ...,
           [37.,  0.],
          ..., 31.],
           [ 2., 31.],
           ...,
           [37., 31.],
           [38., 31.],
           [39., 31.]]]]])
stride = tensor(8, dtype=torch.int32)

    def _build_predictions(
        self, feat: Tensor, anchor_grid: Tensor, grid: Tensor, stride: Tensor
    ) -> Tensor:
        batch_size = feat.shape[0]
        x_bbox = feat[..., : self.box_offset + self.n_classes]
        x_keypoints = feat[..., self.box_offset + self.n_classes :]
    
        box_cxcy, box_wh, box_tail = process_bbox_predictions(x_bbox, anchor_grid)
        grid = grid.to(box_cxcy.device)
        stride = stride.to(box_cxcy.device)
>       box_cxcy = (box_cxcy + grid) * stride
E       RuntimeError: The size of tensor a (32) must match the size of tensor b (40) at non-singleton dimension 3

luxonis_train/nodes/implicit_keypoint_bbox_head.py:179: RuntimeError

Check warning on line 0 in tests.integration.test_sanity

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_simple_models[configs\\coco_model.yaml] (tests.integration.test_sanity) failed

artifacts/Test Results [windows-latest] (Python 3.11)/pytest.xml [took 6s]
Raw output
RuntimeError: The size of tensor a (32) must match the size of tensor b (40) at non-singleton dimension 3
config_file = 'configs\\coco_model.yaml'

    @pytest.mark.parametrize(
        "config_file", [str(path) for path in Path("configs").glob("*model*")]
    )
    def test_simple_models(config_file: str):
        model = LuxonisModel(config_file, opts=OPTS)
>       model.train()

tests\integration\test_sanity.py:42: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
luxonis_train\core\core.py:228: in train
    self._train(
luxonis_train\core\core.py:186: in _train
    raise e
luxonis_train\core\core.py:182: in _train
    self.pl_trainer.fit(*args, ckpt_path=resume, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\trainer.py:538: in fit
    call._call_and_handle_interrupt(
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\call.py:47: in _call_and_handle_interrupt
    return trainer_fn(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\trainer.py:574: in _fit_impl
    self._run(model, ckpt_path=ckpt_path)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\trainer.py:981: in _run
    results = self._run_stage()
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\trainer.py:1025: in _run_stage
    self.fit_loop.run()
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\fit_loop.py:205: in run
    self.advance()
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\fit_loop.py:363: in advance
    self.epoch_loop.run(self._data_fetcher)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\training_epoch_loop.py:140: in run
    self.advance(data_fetcher)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\training_epoch_loop.py:250: in advance
    batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:190: in run
    self._optimizer_step(batch_idx, closure)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:268: in _optimizer_step
    call._call_lightning_module_hook(
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\call.py:167: in _call_lightning_module_hook
    output = fn(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\core\module.py:1306: in optimizer_step
    optimizer.step(closure=optimizer_closure)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\core\optimizer.py:153: in step
    step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\strategies\strategy.py:238: in optimizer_step
    return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\plugins\precision\precision.py:122: in optimizer_step
    return optimizer.step(closure=closure, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\optim\lr_scheduler.py:130: in wrapper
    return func.__get__(opt, opt.__class__)(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\optim\optimizer.py:484: in wrapper
    out = func(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\optim\optimizer.py:89: in _use_grad
    ret = func(self, *args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\optim\sgd.py:112: in step
    loss = closure()
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\plugins\precision\precision.py:108: in _wrap_closure
    closure_result = closure()
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:144: in __call__
    self._result = self.closure(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\utils\_contextlib.py:116: in decorate_context
    return func(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:129: in closure
    step_output = self._step_fn()
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\loops\optimization\automatic.py:317: in _training_step
    training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\trainer\call.py:319: in _call_strategy_hook
    output = fn(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\lightning\pytorch\strategies\strategy.py:390: in training_step
    return self.lightning_module.training_step(*args, **kwargs)
luxonis_train\models\luxonis_lightning.py:561: in training_step
    outputs = self.forward(*train_batch)
luxonis_train\models\luxonis_lightning.py:350: in forward
    outputs = node.run(node_inputs)
luxonis_train\nodes\base_node.py:535: in run
    outputs = self(unwrapped)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\nn\modules\module.py:1553: in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.11.9\x64\Lib\site-packages\torch\nn\modules\module.py:1562: in _call_impl
    return forward_call(*args, **kwargs)
luxonis_train\nodes\implicit_keypoint_bbox_head.py:136: in forward
    self._build_predictions(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = ImplicitKeypointBBoxHead(
  (learnable_mul_add_conv): ModuleList(
    (0): LearnableMulAddConv(
      (add): Learnable...)
          (2): SiLU()
        )
        (11): Conv2d(176, 153, kernel_size=(1, 1), stride=(1, 1))
      )
    )
  )
)
feat = tensor([[[[[-9.6153e-02, -1.1326e+00, -2.3687e+00,  ..., -7.5704e-02,
             1.4100e-02, -3.2021e-01],
         ...02, -1.0153e-01, -9.7741e-02,  ..., -2.7145e-02,
             3.0507e-02,  8.0027e-03]]]]], grad_fn=<PermuteBackward0>)
anchor_grid = tensor([[[[[17.4153, 14.9089]]],


         [[[61.1151, 34.0772]]],


         [[[32.7635, 96.6576]]]]])
grid = tensor([[[[[ 0.,  0.],
           [ 1.,  0.],
           [ 2.,  0.],
           ...,
           [37.,  0.],
          ..., 31.],
           [ 2., 31.],
           ...,
           [37., 31.],
           [38., 31.],
           [39., 31.]]]]])
stride = tensor(8, dtype=torch.int32)

    def _build_predictions(
        self, feat: Tensor, anchor_grid: Tensor, grid: Tensor, stride: Tensor
    ) -> Tensor:
        batch_size = feat.shape[0]
        x_bbox = feat[..., : self.box_offset + self.n_classes]
        x_keypoints = feat[..., self.box_offset + self.n_classes :]
    
        box_cxcy, box_wh, box_tail = process_bbox_predictions(x_bbox, anchor_grid)
        grid = grid.to(box_cxcy.device)
        stride = stride.to(box_cxcy.device)
>       box_cxcy = (box_cxcy + grid) * stride
E       RuntimeError: The size of tensor a (32) must match the size of tensor b (40) at non-singleton dimension 3

luxonis_train\nodes\implicit_keypoint_bbox_head.py:179: RuntimeError