diff --git a/CHANGELOG.md b/CHANGELOG.md index cb283378..122eea12 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ # Changelog All notable changes to this project will be documented in this file. +### [2.1.12] + +#### Fixed + +- Fix wrong dtype used when evaluating finetuning or anomaly models trained in fp16 precision + ### [2.1.11] #### Fixed diff --git a/pyproject.toml b/pyproject.toml index 32769a62..0fd7fe3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "quadra" -version = "2.1.11" +version = "2.1.12" description = "Deep Learning experiment orchestration library" authors = [ "Federico Belotti ", diff --git a/quadra/__init__.py b/quadra/__init__.py index f39f08a9..67acf36f 100644 --- a/quadra/__init__.py +++ b/quadra/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.1.11" +__version__ = "2.1.12" def get_version(): diff --git a/quadra/tasks/anomaly.py b/quadra/tasks/anomaly.py index ec617cd9..97f3431a 100644 --- a/quadra/tasks/anomaly.py +++ b/quadra/tasks/anomaly.py @@ -386,10 +386,11 @@ def test(self) -> None: batch_labels = batch_item["label"] image_labels.extend(batch_labels.tolist()) image_paths.extend(batch_item["image_path"]) + batch_images = batch_images.to(device=self.device, dtype=self.deployment_model.model_dtype) if self.model_data.get("anomaly_method") == "efficientad": - model_output = self.deployment_model(batch_images.to(self.device), None) + model_output = self.deployment_model(batch_images, None) else: - model_output = self.deployment_model(batch_images.to(self.device)) + model_output = self.deployment_model(batch_images) anomaly_map, anomaly_score = model_output[0], model_output[1] anomaly_map = anomaly_map.cpu() anomaly_score = anomaly_score.cpu() diff --git a/quadra/tasks/classification.py b/quadra/tasks/classification.py index eb05fb52..e885c2c6 100644 --- a/quadra/tasks/classification.py +++ b/quadra/tasks/classification.py @@ -1169,7 +1169,7 @@ def test(self) -> None: with torch.set_grad_enabled(self.gradcam): for batch_item in tqdm(test_dataloader): im, target = batch_item - im = im.to(self.device).detach() + im = im.to(device=self.device, dtype=self.deployment_model.model_dtype).detach() if self.gradcam: # When gradcam is used we need to remove gradients diff --git a/quadra/tasks/segmentation.py b/quadra/tasks/segmentation.py index 327ba620..04e68352 100644 --- a/quadra/tasks/segmentation.py +++ b/quadra/tasks/segmentation.py @@ -346,10 +346,7 @@ def test(self) -> None: image_list, mask_list, mask_pred_list, label_list = [], [], [], [] for batch in dataloader: images, masks, labels = batch - images = images.to(self.device) - # TODO: This can be problematic for the future considering bfloat16 or float16-true. - if "16" in str(self.deployment_model.model_dtype): - images = images.half() + images = images.to(device=self.device, dtype=self.deployment_model.model_dtype) if len(masks.shape) == 3: # BxHxW -> Bx1xHxW masks = masks.unsqueeze(1) with torch.no_grad():