From 069e0252bc2264a3f48e13ed64d8009ffe866dba Mon Sep 17 00:00:00 2001
From: loribonna <loribonna@gmail.com>
Date: Fri, 26 Jul 2024 11:15:48 +0200
Subject: [PATCH 1/2] Update docs. Fix docs build

---
 .github/workflows/deploy_pages.yml   |   3 +-
 README.md                            | 178 +++++++++++++++------------
 docs/getting_started/checkpoints.rst |   5 +-
 docs/getting_started/index.rst       |   2 +-
 docs/getting_started/scripts.rst     |   2 +-
 docs/getting_started/validation.rst  |   4 +-
 docs/models/index.rst                |   6 +-
 docs/readme.rst                      |  17 +--
 docs/utils/args.rst                  |   2 +-
 models/utils/continual_model.py      |  13 +-
 scripts/local_launcher.py            |   6 +-
 11 files changed, 133 insertions(+), 105 deletions(-)

diff --git a/.github/workflows/deploy_pages.yml b/.github/workflows/deploy_pages.yml
index d311f831..db560798 100644
--- a/.github/workflows/deploy_pages.yml
+++ b/.github/workflows/deploy_pages.yml
@@ -40,10 +40,9 @@ jobs:
       - name: Install dependencies
         run: |
           pip install -r docs/requirements.txt -r requirements.txt -r requirements-optional.txt
-          pip install quadprog==0.1.11
       - name: Sphinx build
         run: |
-          sphinx-build -j auto docs _build
+          python ../utils/args.py && sphinx-build -j auto docs _build
       - name: Setup Pages
         uses: actions/configure-pages@v5
       - name: Upload artifact
diff --git a/README.md b/README.md
index affe4d7c..5e3ab4ea 100644
--- a/README.md
+++ b/README.md
@@ -2,19 +2,27 @@
   <img width="230" height="230" src="logo.png" alt="logo">
 </p>
 
+<p align="center">
+  <img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/aimagelab/mammoth">
+  <a href="https://aimagelab.github.io/mammoth/index.html"><img alt="Static Badge" src="https://img.shields.io/badge/wiki-gray?style=flat&logo=readthedocs&link=https%3A%2F%2Faimagelab.github.io%2Fmammoth%2Findex.html"></a>
+  <img alt="Discord" src="https://img.shields.io/discord/1164956257392799860">
+</p>
+
 # Mammoth - An Extendible (General) Continual Learning Framework for Pytorch
 
-Official repository of [Class-Incremental Continual Learning into the eXtended DER-verse](https://arxiv.org/abs/2201.00766), [Dark Experience for General Continual Learning: a Strong, Simple Baseline](https://papers.nips.cc/paper/2020/hash/b704ea2c39778f07c617f6b7ce480e9e-Abstract.html), and [Semantic Residual Prompts for Continual Learning](https://arxiv.org/abs/2403.06870)
+Official repository of:
+- [Class-Incremental Continual Learning into the eXtended DER-verse](https://arxiv.org/abs/2201.00766)
+- [Dark Experience for General Continual Learning: a Strong, Simple Baseline](https://papers.nips.cc/paper/2020/hash/b704ea2c39778f07c617f6b7ce480e9e-Abstract.html)
+- [Semantic Residual Prompts for Continual Learning](https://arxiv.org/abs/2403.06870)
+- [CLIP with Generative Latent Replay: a Strong Baseline for Incremental Learning](https://arxiv.org/abs/2407.15793)
 
-Mammoth is a framework for continual learning research. With **40 methods and 21 datasets**, it includes the most complete list competitors and benchmarks for research purposes.
+Mammoth is a framework for continual learning research. With **more than 40 methods and 20 datasets**, it includes the most complete list competitors and benchmarks for research purposes.
 
 The core idea of Mammoth is that it is designed to be modular, easy to extend, and - most importantly - _easy to debug_.
 Ideally, all the code necessary to run the experiments is included _in the repository_, without needing to check out other repositories or install additional packages.
 
 With Mammoth, nothing is set in stone. You can easily add new models, datasets, training strategies, or functionalities.
 
-Join our Discord Server for all your Mammoth-related questions → ![Discord Shield](https://discordapp.com/api/guilds/1164956257392799860/widget.png?style=shield)
-
 ## Documentation
 
 ### Check out the official [DOCUMENTATION](https://aimagelab.github.io/mammoth/) for more information on how to use Mammoth!
@@ -37,7 +45,7 @@ Join our Discord Server for all your Mammoth-related questions → ![Discord Shi
 
 ## Models
 
-Mammoth currently supports **42** models, with new releases covering the main competitors in literature.
+Mammoth currently supports **more than 40** models, with new releases covering the main competitors in literature.
 
 - Efficient Lifelong Learning with A-GEM (A-GEM, A-GEM-R - A-GEM with reservoir buffer): `agem`, `agem_r`
 - Bias Correction (BiC): `bic`.
@@ -74,10 +82,10 @@ Mammoth currently supports **42** models, with new releases covering the main co
 ## Datasets
 
 **NOTE**: Datasets are automatically downloaded in `data/`.
-- This can be changes by changing the `base_path` function in `utils/conf.py` or using the `--base_path` argument.
-- The `data/` folder should not tracked by git and is craeted automatically if missing.
+- This can be changed by changing the `base_path` function in `utils/conf.py` or using the `--base_path` argument.
+- The `data/` folder should not be tracked by git and is craeted automatically if missing.
 
-Mammoth includes **21** datasets, covering *toy classification problems* (different versions of MNIST), *standard domains* (CIFAR, Imagenet-R, TinyImagenet, MIT-67), *fine-grained classification domains* (Cars-196, CUB-200), *aerial domains* (EuroSAT-RGB, Resisc45), *medical domains* (CropDisease, ISIC, ChestX).
+Mammoth currently includes **21** datasets, covering *toy classification problems* (different versions of MNIST), *standard domains* (CIFAR, Imagenet-R, TinyImagenet, MIT-67), *fine-grained classification domains* (Cars-196, CUB-200), *aerial domains* (EuroSAT-RGB, Resisc45), *medical domains* (CropDisease, ISIC, ChestX).
 
 - Sequential MNIST (_Class-Il / Task-IL_): `seq-mnist`.
 - Permuted MNIST (_Domain-IL_): `perm-mnist`.
@@ -135,69 +143,56 @@ Mammoth includes **21** datasets, covering *toy classification problems* (differ
 
 ### Our Papers
 
+Expand to see the BibTex!
+
 <ul>
-<li><details><summary>Dark Experience for General Continual Learning: a Strong, Simple Baseline (<b>NeurIPS 2020</b>) <a href=https://arxiv.org/abs/2004.07211>paper</a></summary>
+<li><details><summary>CLIP with Generative Latent Replay: a Strong Baseline for Incremental Learning (<b>BMVC 2024</b>) <a href=https://arxiv.org/abs/2407.15793>paper</a></summary>
 
-<pre><code>@inproceedings{buzzega2020dark,
- author = {Buzzega, Pietro and Boschini, Matteo and Porrello, Angelo and Abati, Davide and Calderara, Simone},
- booktitle = {Advances in Neural Information Processing Systems},
- editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
- pages = {15920--15930},
- publisher = {Curran Associates, Inc.},
- title = {Dark Experience for General Continual Learning: a Strong, Simple Baseline},
- volume = {33},
- year = {2020}
+<pre><code>@inproceedings{heng2022enhancing,
+  title={CLIP with Generative Latent Replay: a Strong Baseline for Incremental Learning},
+  author={Frascaroli, Emanuele and Panariello, Aniello and Buzzega, Pietro and Bonicelli, Lorenzo and Porrello, Angelo and Calderara, Simone},
+  booktitle={35th British Machine Vision Conference},
+  year={2024}
 }</code></pre>
 
-</details>
 </li>
-<li><details><summary>Rethinking Experience Replay: a Bag of Tricks for Continual Learning (<b>ICPR 2020</b>) <a href=https://arxiv.org/abs/2010.05595>paper</a> <a href=https://github.com/hastings24/rethinking_er>code</a></summary>
-
-<pre><code>@inproceedings{buzzega2021rethinking,
-  title={Rethinking experience replay: a bag of tricks for continual learning},
-  author={Buzzega, Pietro and Boschini, Matteo and Porrello, Angelo and Calderara, Simone},
-  booktitle={25th International Conference on Pattern Recognition},
-  pages={2180--2187},
-  year={2021},
-  organization={IEEE}
-}</code></pre>
 
-</li>
-<li><details><summary>Class-Incremental Continual Learning into the eXtended DER-verse (<b>TPAMI 2022</b>) <a href=https://arxiv.org/abs/2201.00766>paper</a></summary>
+<li><details><summary>Semantic Residual Prompts for Continual Learning (<b>ECCV 2024</b>) <a href=https://arxiv.org/abs/2403.06870>paper</a></summary>
 
-<pre><code>@article{boschini2022class,
-  title={Class-Incremental Continual Learning into the eXtended DER-verse},
-  author={Boschini, Matteo and Bonicelli, Lorenzo and Buzzega, Pietro and Porrello, Angelo and Calderara, Simone},
-  journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
-  year={2022},
-  publisher={IEEE}
+<pre><code>@inproceedings{menabue2024semantic,
+  title={Semantic Residual Prompts for Continual Learning},
+  author={Menabue, Martin and Frascaroli, Emanuele and Boschini, Matteo and Sangineto, Enver and Bonicelli, Lorenzo and Porrello, Angelo and Calderara, Simone},
+  booktitle={18th European Conference on Computer Vision},
+  year={202},
+  organization={Springer}
 }</code></pre>
 
 </li>
-<li><details><summary>Effects of Auxiliary Knowledge on Continual Learning (<b>ICPR 2022</b>) <a href=https://arxiv.org/abs/2206.02577>paper</a></summary>
 
-<pre><code>@inproceedings{bellitto2022effects,
-  title={Effects of auxiliary knowledge on continual learning},
-  author={Bellitto, Giovanni and Pennisi, Matteo and Palazzo, Simone and Bonicelli, Lorenzo and Boschini, Matteo and Calderara, Simone},
-  booktitle={26th International Conference on Pattern Recognition},
-  pages={1357--1363},
-  year={2022},
-  organization={IEEE}
+<li><details><summary>Mask and Compress: Efficient Skeleton-based Action Recognition in Continual Learning (<b>ICPR 2024</b>) <a href=https://arxiv.org/pdf/2407.01397>paper</a> <a href=https://github.com/Sperimental3/CHARON>code</a></summary>
+
+<pre><code>@inproceedings{mosconi2024mask,
+  title={Mask and Compress: Efficient Skeleton-based Action Recognition in Continual Learning},
+  author={Mosconi, Matteo and Sorokin, Andriy and Panariello, Aniello and Porrello, Angelo and Bonato, Jacopo and Cotogni, Marco and Sabetta, Luigi and Calderara, Simone and Cucchiara, Rita},
+  booktitle={International Conference on Pattern Recognition},
+  year={2024}
 }</code></pre>
 
 </li>
-<li><details><summary>Transfer without Forgetting (<b>ECCV 2022</b>) <a href=https://arxiv.org/abs/2206.00388>paper</a> <a href=https://github.com/mbosc/twf>code</a> (Also available here)</summary>
 
-<pre><code>@inproceedings{boschini2022transfer,
-  title={Transfer without forgetting},
-  author={Boschini, Matteo and Bonicelli, Lorenzo and Porrello, Angelo and Bellitto, Giovanni and Pennisi, Matteo and Palazzo, Simone and Spampinato, Concetto and Calderara, Simone},
-  booktitle={17th European Conference on Computer Vision},
-  pages={692--709},
-  year={2022},
-  organization={Springer}
+<li><details><summary>On the Effectiveness of Lipschitz-Driven Rehearsal in Continual Learning (<b>NeurIPS 2022</b>) <a href=https://arxiv.org/abs/2210.06443>paper</a> <a href=https://github.com/aimagelab/lider>code</a> (Also available here)</summary>
+
+<pre><code>@article{bonicelli2022effectiveness,
+  title={On the effectiveness of lipschitz-driven rehearsal in continual learning},
+  author={Bonicelli, Lorenzo and Boschini, Matteo and Porrello, Angelo and Spampinato, Concetto and Calderara, Simone},
+  journal={Advances in Neural Information Processing Systems},
+  volume={35},
+  pages={31886--31901},
+  year={2022}
 }</code></pre>
 
 </li>
+
 <li><details><summary>Continual semi-supervised learning through contrastive interpolation consistency (<b>PRL 2022</b>) <a href=https://arxiv.org/abs/2108.06552>paper</a> <a href=https://github.com/aimagelab/CSSL>code</a> (Also available here)</summary>
 
 <pre><code>@article{boschini2022continual,
@@ -211,50 +206,73 @@ Mammoth includes **21** datasets, covering *toy classification problems* (differ
 }</code></pre>
 
 </li>
-<li><details><summary>On the Effectiveness of Lipschitz-Driven Rehearsal in Continual Learning (<b>NeurIPS 2022</b>) <a href=https://arxiv.org/abs/2210.06443>paper</a> <a href=https://github.com/aimagelab/lider>code</a> (Also available here)</summary>
 
-<pre><code>@article{bonicelli2022effectiveness,
-  title={On the effectiveness of lipschitz-driven rehearsal in continual learning},
-  author={Bonicelli, Lorenzo and Boschini, Matteo and Porrello, Angelo and Spampinato, Concetto and Calderara, Simone},
-  journal={Advances in Neural Information Processing Systems},
-  volume={35},
-  pages={31886--31901},
-  year={2022}
+<li><details><summary>Transfer without Forgetting (<b>ECCV 2022</b>) <a href=https://arxiv.org/abs/2206.00388>paper</a> <a href=https://github.com/mbosc/twf>code</a> (Also available here)</summary>
+
+<pre><code>@inproceedings{boschini2022transfer,
+  title={Transfer without forgetting},
+  author={Boschini, Matteo and Bonicelli, Lorenzo and Porrello, Angelo and Bellitto, Giovanni and Pennisi, Matteo and Palazzo, Simone and Spampinato, Concetto and Calderara, Simone},
+  booktitle={17th European Conference on Computer Vision},
+  pages={692--709},
+  year={2022},
+  organization={Springer}
 }</code></pre>
 
 </li>
-<li><details><summary>Mask and Compress: Efficient Skeleton-based Action Recognition in Continual Learning (<b>ICPR 2024</b>) <a href=https://arxiv.org/pdf/2407.01397>paper</a> <a href=https://github.com/Sperimental3/CHARON>code</a></summary>
 
-<pre><code>@inproceedings{mosconi2024mask,
-  title={Mask and Compress: Efficient Skeleton-based Action Recognition in Continual Learning},
-  author={Mosconi, Matteo and Sorokin, Andriy and Panariello, Aniello and Porrello, Angelo and Bonato, Jacopo and Cotogni, Marco and Sabetta, Luigi and Calderara, Simone and Cucchiara, Rita},
-  booktitle={International Conference on Pattern Recognition},
-  year={2024}
+<li><details><summary>Effects of Auxiliary Knowledge on Continual Learning (<b>ICPR 2022</b>) <a href=https://arxiv.org/abs/2206.02577>paper</a></summary>
+
+<pre><code>@inproceedings{bellitto2022effects,
+  title={Effects of auxiliary knowledge on continual learning},
+  author={Bellitto, Giovanni and Pennisi, Matteo and Palazzo, Simone and Bonicelli, Lorenzo and Boschini, Matteo and Calderara, Simone},
+  booktitle={26th International Conference on Pattern Recognition},
+  pages={1357--1363},
+  year={2022},
+  organization={IEEE}
 }</code></pre>
 
 </li>
-<li><details><summary>Semantic Residual Prompts for Continual Learning (<b>ECCV 2024</b>) <a href=https://arxiv.org/abs/2403.06870>paper</a></summary>
 
-<pre><code>@inproceedings{menabue2024semantic,
-  title={Semantic Residual Prompts for Continual Learning},
-  author={Menabue, Martin and Frascaroli, Emanuele and Boschini, Matteo and Sangineto, Enver and Bonicelli, Lorenzo and Porrello, Angelo and Calderara, Simone},
-  booktitle={18th European Conference on Computer Vision},
-  year={202},
-  organization={Springer}
+<li><details><summary>Class-Incremental Continual Learning into the eXtended DER-verse (<b>TPAMI 2022</b>) <a href=https://arxiv.org/abs/2201.00766>paper</a></summary>
+
+<pre><code>@article{boschini2022class,
+  title={Class-Incremental Continual Learning into the eXtended DER-verse},
+  author={Boschini, Matteo and Bonicelli, Lorenzo and Buzzega, Pietro and Porrello, Angelo and Calderara, Simone},
+  journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
+  year={2022},
+  publisher={IEEE}
 }</code></pre>
 
 </li>
-<li><details><summary>CLIP with Generative Latent Replay: a Strong Baseline for Incremental Learning (<b>BMVC 2024</b>) <a href=https://arxiv.org/abs/2407.15793>paper</a></summary>
 
-<pre><code>@inproceedings{heng2022enhancing,
-  title={CLIP with Generative Latent Replay: a Strong Baseline for Incremental Learning},
-  author={Frascaroli, Emanuele and Panariello, Aniello and Buzzega, Pietro and Bonicelli, Lorenzo and Porrello, Angelo and Calderara, Simone},
-  booktitle={35th British Machine Vision Conference},
-  year={2024}
+<li><details><summary>Rethinking Experience Replay: a Bag of Tricks for Continual Learning (<b>ICPR 2020</b>) <a href=https://arxiv.org/abs/2010.05595>paper</a> <a href=https://github.com/hastings24/rethinking_er>code</a></summary>
+
+<pre><code>@inproceedings{buzzega2021rethinking,
+  title={Rethinking experience replay: a bag of tricks for continual learning},
+  author={Buzzega, Pietro and Boschini, Matteo and Porrello, Angelo and Calderara, Simone},
+  booktitle={25th International Conference on Pattern Recognition},
+  pages={2180--2187},
+  year={2021},
+  organization={IEEE}
 }</code></pre>
 
 </li>
 
+<li><details><summary>Dark Experience for General Continual Learning: a Strong, Simple Baseline (<b>NeurIPS 2020</b>) <a href=https://arxiv.org/abs/2004.07211>paper</a></summary>
+
+<pre><code>@inproceedings{buzzega2020dark,
+ author = {Buzzega, Pietro and Boschini, Matteo and Porrello, Angelo and Abati, Davide and Calderara, Simone},
+ booktitle = {Advances in Neural Information Processing Systems},
+ editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
+ pages = {15920--15930},
+ publisher = {Curran Associates, Inc.},
+ title = {Dark Experience for General Continual Learning: a Strong, Simple Baseline},
+ volume = {33},
+ year = {2020}
+}</code></pre>
+
+</details>
+</li>
 </ul>
 
 ### Other Awesome CL works using Mammoth
diff --git a/docs/getting_started/checkpoints.rst b/docs/getting_started/checkpoints.rst
index bdca76b6..b6dd21ca 100644
--- a/docs/getting_started/checkpoints.rst
+++ b/docs/getting_started/checkpoints.rst
@@ -1,14 +1,15 @@
 Load and save checkpoints
 =========================
 
-Loading and saving checkpoints is handeled automatically in :ref:`module-training` by supplying the ``--savecheck`` and ``--loadcheck`` arguments. 
+Loading and saving checkpoints is handled automatically in :ref:`module-training` by supplying the ``--savecheck`` and ``--loadcheck`` arguments. 
 
 For example, to save a checkpoint after the end of the last task, simply run the following command:
+
 .. code-block:: python 
     
         python utils/main.py --savecheck=last --model=sgd --dataset=seq-cifar10 --lr=0.1
 
-Other options for ``--savecheck`` are:
+The available options for ``--savecheck`` are:
 
 - ``last``: save the checkpoint after **the last task**.
 - ``task``: save the checkpoint after **each task**.
diff --git a/docs/getting_started/index.rst b/docs/getting_started/index.rst
index ce9fc110..e02aaf18 100644
--- a/docs/getting_started/index.rst
+++ b/docs/getting_started/index.rst
@@ -22,7 +22,7 @@ WandB
 For advanced logging, including loss values, metrics, and hyperparameters, you can use `WandB <https://wandb.ai/>`_ by providing both ``--wandb_project`` and ``--wandb_entity`` arguments. If you don't want to use WandB, you can simply omit these arguments.
 
 .. tip::
-    By default, all arguments, loss values, and metrics are logged. Thanks to the **autolog_wandb** (:ref:`module-models`), all the variables created in the **observe** that contain *loss* or start with *_wandb_* will be logged. Thus, in order to loss all the separate loss values, you can simply add ``loss = loss + loss1 + loss2`` to the **observe** function.
+    By default, all arguments, loss values, and metrics are logged. Thanks to the **autolog_wandb** (:ref:`module-models`), all the variables created in the **observe** that contain *loss* or start with *_wandb_* will be logged. Thus, in order to log all the separate loss values, you can simply add ``loss = loss + loss1 + loss2`` to the **observe** function.
 
 Metrics are logged on WandB both in a raw form, separated for each task and class. This allows further analysis (e.g., with the Mammoth :ref:`Parseval <module-parseval>`). To differentiate between raw metrics logged on WandB and other aggregated metrics that may have been logged, all the raw metrics are prefixed with **RESULTS_**. This behavior can be changed by changing the prefix in the **log_accs** function in :ref:`module-loggers`.
 
diff --git a/docs/getting_started/scripts.rst b/docs/getting_started/scripts.rst
index 294edde3..2724740c 100644
--- a/docs/getting_started/scripts.rst
+++ b/docs/getting_started/scripts.rst
@@ -8,7 +8,7 @@ Mammoth includes a couple of scripts (under the ``scripts`` folder) to help you
 - ``scripts/local_launcher.py``: this script will launch all the experiments in the ``data/jobs/<experiment_name>.txt`` file in paralel on your local machine. Logs for each experiment will be stored in the ``logs`` folder. It accepts the following arguments:
     - ``--file``: path to the file containing the experiments to run (default: ``data/jobs/<experiment_name>.txt``)
 
-    - ``--redundancy``: number of times each experiment should be repeated (default: 1)
+    - ``--cycles``: number of times each experiment should be repeated (default: 1)
 
     - ``--at_a_time``: number of experiments to run in parallel (default: 1)
 
diff --git a/docs/getting_started/validation.rst b/docs/getting_started/validation.rst
index 22f0bdaf..c26cc665 100644
--- a/docs/getting_started/validation.rst
+++ b/docs/getting_started/validation.rst
@@ -6,11 +6,11 @@ Training, Validation, and Testing
 During each task, Mammoth trains on the current data until some stopping criterion is met. 
 Currently, Mammoth supports 3 types of stopping criteria, which can be chosen using the ``--fitting_mode`` command line argument. The three types are ``epochs``, ``iters``, and ``early_stopping``. The default is ``epochs``.
 
-.. rubric:: Criterion by epochs (``--fitting_modeepochs``)
+.. rubric:: Criterion by epochs (``--fitting_mode=epochs``)
 
 This is the default option, for which training stops after a fixed number of **epochs**. The number of epochs can be set using the ``--n_epochs`` command line argument. Note that most datasets indicate the default number of epochs via the `set_default_from_args` decorator (see :ref:`module-datasets` for more information).
 
-.. rubric:: Criterion by iterations (``--fitting_modeiters``)
+.. rubric:: Criterion by iterations (``--fitting_mode=iters``)
 
 This option stops training after a fixed number of **iterations**. The number of iterations can be set using the ``--n_iters`` command line argument. In addition, a default value for each dataset can be set using the `set_default_from_args` decorator. For example, to set the default number of iterations to 1000 for a particular dataset you can use the following code, adding it to the dataset class definition:
 
diff --git a/docs/models/index.rst b/docs/models/index.rst
index 7a8dd413..301c0251 100644
--- a/docs/models/index.rst
+++ b/docs/models/index.rst
@@ -3,8 +3,8 @@
 Models
 ========
 
-A **model** is a class that contains a few requires methods and attributes to be used in the continual learning framework.
-To be compatible with the auto-detection mechanism (the **get_model** function below), a model must:
+A **model** is defined as a Python class that defines a few methods and attributes to be used in the continual learning framework.
+To be compatible with the *auto-detection* mechanism (the **get_model** function below), a model must:
 
 * extend the base class **ContinualModel** in :ref:`module-continual_model`, which implements most of the required methods, leaving to the user the definition of the **observe** method (see in :ref:`training and testing`). In addition, the model must define the **NAME** and **COMPATIBILITY** attributes (see below).
 
@@ -87,7 +87,7 @@ The base class **ContinualModel** provides a few properties that are automatical
 
     - **task_iteration**: the number of iterations performed during the current task. This attribute is automatically updated *after* each **observe** call and is reset at the beginning of each task (*before* the **begin_task**). Can be used to implement a virtual batch size (see :ref:`module-twf`).
 
-    - **cpt**: the *raw* amount of classes for each task. This could be either an integer (i.e., the number of classes for each task is the same) or a list of integers (i.e., the number of classes for each task is different).
+    - **classes_per_task** (alias **cpt**): the *raw* amount of classes for each task. This could be either an integer (i.e., the number of classes for each task is the same) or a list of integers (i.e., the number of classes for each task is different).
 
 .. admonition:: Transforms and dataset-related Attributes
 
diff --git a/docs/readme.rst b/docs/readme.rst
index 8773ac8d..396b8c8b 100644
--- a/docs/readme.rst
+++ b/docs/readme.rst
@@ -9,19 +9,20 @@ Welcome to Mammoth's documentation!
 Mammoth - An Extendible (General) Continual Learning Framework for Pytorch
 ==========================================================================
 
-Official repository of `Class-Incremental Continual Learning into the eXtended DER-verse <https://arxiv.org/abs/2201.00766>`_, `Dark Experience for General Continual Learning: a Strong, Simple Baseline <https://papers.nips.cc/paper/2020/hash/b704ea2c39778f07c617f6b7ce480e9e-Abstract.html>`_, and `Semantic Residual Prompts for Continual Learning <https://arxiv.org/abs/2403.06870>`_.
+Official repository of:
 
-Mammoth is a framework for continual learning research. With **40 methods and 21 datasets**, it includes the most complete list competitors and benchmarks for research purposes.
+- `Class-Incremental Continual Learning into the eXtended DER-verse <https://arxiv.org/abs/2201.00766>`_
+- `Dark Experience for General Continual Learning: a Strong, Simple Baseline <https://papers.nips.cc/paper/2020/hash/b704ea2c39778f07c617f6b7ce480e9e-Abstract.html>`_
+- `Semantic Residual Prompts for Continual Learning <https://arxiv.org/abs/2403.06870>`_
+- `CLIP with Generative Latent Replay: a Strong Baseline for Incremental Learning <https://arxiv.org/abs/2407.15793>`_
+
+Mammoth is a framework for continual learning research. With **more than 40 methods and 20 datasets**, it includes the most complete list competitors and benchmarks for research purposes.
 
 The core idea of Mammoth is that it is designed to be modular, easy to extend, and - most importantly - *easy to debug*.
 Ideally, all the code necessary to run the experiments is included *in the repository*, without needing to check out other repositories or install additional packages.
 
 With Mammoth, nothing is set in stone. You can easily add new models, datasets, training strategies, or functionalities.
 
-**NEW**: Join our Discord Server for all your Mammoth-related questions!
-
-.. image:: https://discordapp.com/api/guilds/1164956257392799860/widget.png?style=shield
-
 .. list-table::
    :widths: 15 15 15 15 15 15
    :class: centered
@@ -77,8 +78,8 @@ Datasets
 --------
 
 **NOTE**: Datasets are automatically downloaded in ``data/``.
-- This can be changes by changing the ``base_path`` function in ``utils/conf.py`` or using the ``--base_path`` argument.
-- The ``data/`` folder should not tracked by git and is craeted automatically if missing.
+- This can be changed by changing the ``base_path`` function in ``utils/conf.py`` or using the ``--base_path`` argument.
+- The ``data/`` folder should not be tracked by git and is craeted automatically if missing.
 
 Mammoth includes **21** datasets, covering *toy classification problems* (different versions of MNIST), *standard domains* (CIFAR, Imagenet-R, TinyImagenet, MIT-67), *fine-grained classification domains* (Cars-196, CUB-200), *aerial domains* (EuroSAT-RGB, Resisc45), *medical domains* (CropDisease, ISIC, ChestX).
 
diff --git a/docs/utils/args.rst b/docs/utils/args.rst
index 30c9d7e3..95a88f38 100644
--- a/docs/utils/args.rst
+++ b/docs/utils/args.rst
@@ -13,7 +13,7 @@ Arguments
 	*Help*: Which dataset to perform experiments on.
 
 	- *Default*: ``None``
-	- *Choices*: ``seq-tinyimg, seq-mit67, seq-cars196, seq-cifar100-224-rs, seq-cifar100-224, seq-chestx, seq-cifar10-224-rs, mnist-360, seq-cropdisease, seq-eurosat-rgb, seq-imagenet-r, seq-cifar100, seq-cifar10-224, perm-mnist, seq-cub200, seq-cifar10, rot-mnist, seq-resisc45, seq-mnist, seq-isic, seq-tinyimg-r``
+	- *Choices*: ``seq-tinyimg, seq-mit67, seq-cars196, seq-cifar100-224-rs, seq-cifar100-224, seq-chestx, seq-cifar10-224-rs, mnist-360, seq-cropdisease, seq-eurosat-rgb, seq-imagenet-r, seq-cifar100, seq-cifar10-224, perm-mnist, seq-cub200, seq-cifar10, rot-mnist, seq-resisc45, seq-mnist, seq-cub200-rs, seq-isic, seq-tinyimg-r``
 **\-\-model** : custom_str_underscore
 	*Help*: Model name.
 
diff --git a/models/utils/continual_model.py b/models/utils/continual_model.py
index a1791bd2..a71dbbce 100644
--- a/models/utils/continual_model.py
+++ b/models/utils/continual_model.py
@@ -37,7 +37,7 @@
 from datasets import get_dataset
 from datasets.utils.continual_dataset import ContinualDataset
 
-from utils.conf import get_device
+from utils.conf import get_device, warn_once
 from utils.kornia_utils import to_kornia_transform
 from utils.magic import persistent_locals
 from torchvision import transforms
@@ -149,9 +149,17 @@ def n_past_classes(self):
 
     @property
     def cpt(self):
+        """
+        Alias of `classes_per_task`: returns the raw number of classes per task.
+        Warning: return value might be either an integer or a list of integers depending on the dataset.
+        """
+        return self._cpt
+
+    @property
+    def classes_per_task(self):
         """
         Returns the raw number of classes per task.
-        Warning: return value might be either an integer or a list of integers.
+        Warning: return value might be either an integer or a list of integers depending on the dataset.
         """
         return self._cpt
 
@@ -160,6 +168,7 @@ def cpt(self, value):
         """
         Sets the number of classes per task.
         """
+        warn_once("Setting the number of classes per task is not recommended.")
         self._cpt = value
 
     def __init__(self, backbone: nn.Module, loss: nn.Module,
diff --git a/scripts/local_launcher.py b/scripts/local_launcher.py
index cc4f9524..bcc04743 100644
--- a/scripts/local_launcher.py
+++ b/scripts/local_launcher.py
@@ -29,17 +29,17 @@
 def parse_args():
     parser = argparse.ArgumentParser()
     parser.add_argument("--file", type=str, help="file containing jobs")
-    parser.add_argument("--redundancy", type=int, default=1, help="number of times to run each job")
+    parser.add_argument("--cycles", type=int, default=1, help="number of times to run each job")
     parser.add_argument("--at_a_time", type=int, default=1, help="number of jobs to run at a time")
     parser.add_argument("--start_from", type=int, default=0, help="start from job number")
     parser.add_argument("--reverse", action="store_true", help="reverse job order")
     args = parser.parse_args()
 
     assert args.at_a_time >= 1, "at_a_time must be at least 1"
-    assert args.redundancy >= 1, "redundancy must be at least 1"
+    assert args.cycles >= 1, "cycles must be at least 1"
     assert args.start_from >= 0, "start_from must be at least 0"
 
-    jobs_list = [l for l in open(args.file, "r").read().splitlines() if l.strip() != "" and not l.strip().startswith("#")][args.start_from:] * args.redundancy
+    jobs_list = [l for l in open(args.file, "r").read().splitlines() if l.strip() != "" and not l.strip().startswith("#")][args.start_from:] * args.cycles
     if args.reverse:
         jobs_list = list(reversed(jobs_list))
     jobname = args.file.strip().split("/")[-1].split("\\")[-1].split(".")[0]

From ce871d530351cf35385e59bdec3fc1af530a4ddb Mon Sep 17 00:00:00 2001
From: Lorenzo Bonicelli <loribonna@gmail.com>
Date: Fri, 26 Jul 2024 11:24:12 +0200
Subject: [PATCH 2/2] Update deploy_pages.yml

---
 .github/workflows/deploy_pages.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/deploy_pages.yml b/.github/workflows/deploy_pages.yml
index db560798..e849a7c2 100644
--- a/.github/workflows/deploy_pages.yml
+++ b/.github/workflows/deploy_pages.yml
@@ -42,7 +42,7 @@ jobs:
           pip install -r docs/requirements.txt -r requirements.txt -r requirements-optional.txt
       - name: Sphinx build
         run: |
-          python ../utils/args.py && sphinx-build -j auto docs _build
+          python ./utils/args.py && sphinx-build -j auto docs _build
       - name: Setup Pages
         uses: actions/configure-pages@v5
       - name: Upload artifact
@@ -52,4 +52,4 @@ jobs:
           path: '_build'
       - name: Deploy to GitHub Pages
         id: deployment
-        uses: actions/deploy-pages@v4
\ No newline at end of file
+        uses: actions/deploy-pages@v4