diff --git a/generation/maisi/README.md b/generation/maisi/README.md index f249449d7..f7d7155b9 100644 --- a/generation/maisi/README.md +++ b/generation/maisi/README.md @@ -50,7 +50,6 @@ We retrained several state-of-the-art diffusion model-based methods using our da | 512x512x768 | [80,80,112], 8 patches | 4 | 55G | 904s | 48s | - The experiment was tested on A100 80G GPU. During inference, the peak GPU memory usage happens during the autoencoder decoding latent features. diff --git a/generation/maisi/scripts/inference.py b/generation/maisi/scripts/inference.py index 3ef2b70e5..8220f200c 100644 --- a/generation/maisi/scripts/inference.py +++ b/generation/maisi/scripts/inference.py @@ -231,5 +231,5 @@ def main(): ) torch.cuda.reset_peak_memory_stats() main() - peak_memory_gb = torch.cuda.max_memory_allocated() / (1024 ** 3) # Convert to GB + peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3) # Convert to GB print(f"Peak GPU memory usage: {peak_memory_gb:.2f} GB")