From 01374e57d5f1c03e1f5fcf6e87bc63ad6ecce66b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:00:56 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- generation/maisi/README.md | 2 +- generation/maisi/scripts/inference.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/generation/maisi/README.md b/generation/maisi/README.md index 3ff8954c5..57be561b4 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -50,7 +50,7 @@ We retrained several state-of-the-art diffusion model-based methods using our da | 512x512x768 | [80,80,112], 8 patches | 4 | 55G | 904s | 48s | -The experiment was tested on A100 80G GPU. +The experiment was tested on A100 80G GPU. During inference, the peak GPU memory usage happens during the autoencoder decoding latent features. To reduce GPU memory usage, we can either increasing `autoencoder_tp_num_splits` or reduce `autoencoder_sliding_window_infer_size`. diff --git a/generation/maisi/scripts/inference.py b/generation/maisi/scripts/inference.py index 3ef2b70e5..8220f200c 100644 --- a/generation/maisi/scripts/inference.py +++ b/generation/maisi/scripts/inference.py @@ -231,5 +231,5 @@ def main(): ) torch.cuda.reset_peak_memory_stats() main() - peak_memory_gb = torch.cuda.max_memory_allocated() / (1024 ** 3) # Convert to GB + peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3) # Convert to GB print(f"Peak GPU memory usage: {peak_memory_gb:.2f} GB")