diff --git a/nnunetv2/inference/predict_from_raw_data.py b/nnunetv2/inference/predict_from_raw_data.py index 14056d139..0710c44b4 100644 --- a/nnunetv2/inference/predict_from_raw_data.py +++ b/nnunetv2/inference/predict_from_raw_data.py @@ -469,7 +469,7 @@ def predict_logits_from_preprocessed_data(self, data: torch.Tensor) -> torch.Ten self.network._orig_mod.load_state_dict(params) # why not leave prediction on device if perform_everything_on_device? Because this may cause the - # second iteration to crash due to OOM. Grabbing tha twith try except cause way more bloated code than + # second iteration to crash due to OOM. Grabbing that with try except cause way more bloated code than # this actually saves computation time if prediction is None: prediction = self.predict_sliding_window_return_logits(data).to('cpu') diff --git a/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py b/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py index 45948daac..97abdde0a 100644 --- a/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py +++ b/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py @@ -1241,7 +1241,7 @@ def perform_actual_validation(self, save_probabilities: bool = False): self.dataset_json), ) )) - # if we don't barrier from time to time we will get nccl timeouts for large datsets. Yuck. + # if we don't barrier from time to time we will get nccl timeouts for large datasets. Yuck. if self.is_ddp and i < last_barrier_at_idx and (i + 1) % 20 == 0: dist.barrier()