Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix flake8 error #1618

Merged
merged 11 commits into from
Jan 19, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,6 @@
"pygments_lexer": "ipython3",
"version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
Expand Down
3 changes: 1 addition & 2 deletions 2d_classification/monai_101.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -345,8 +345,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2
Expand Down
6 changes: 4 additions & 2 deletions 3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@
" val_outputs_list = decollate_batch(val_outputs)\n",
" val_output_convert = [post_pred(val_pred_tensor) for val_pred_tensor in val_outputs_list]\n",
" dice_metric(y_pred=val_output_convert, y=val_labels_convert)\n",
" epoch_iterator_val.set_description(\"Validate (%d / %d Steps)\" % (global_step, 10.0))\n",
" epoch_iterator_val.set_description(\"Validate (%d / %d Steps)\" % (global_step, 10.0)) # noqa: B038\n",
" mean_dice_val = dice_metric.aggregate().item()\n",
" dice_metric.reset()\n",
" return mean_dice_val\n",
Expand All @@ -546,7 +546,9 @@
" scaler.step(optimizer)\n",
" scaler.update()\n",
" optimizer.zero_grad()\n",
" epoch_iterator.set_description(f\"Training ({global_step} / {max_iterations} Steps) (loss={loss:2.5f})\")\n",
" epoch_iterator.set_description( # noqa: B038\n",
" f\"Training ({global_step} / {max_iterations} Steps) (loss={loss:2.5f})\"\n",
" )\n",
" if (global_step % eval_num == 0 and global_step != 0) or global_step == max_iterations:\n",
" epoch_iterator_val = tqdm(val_loader, desc=\"Validate (X / X Steps) (dice=X.X)\", dynamic_ncols=True)\n",
" dice_val = validation(epoch_iterator_val)\n",
Expand Down
6 changes: 4 additions & 2 deletions 3d_segmentation/unetr_btcv_segmentation_3d.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@
" val_outputs_list = decollate_batch(val_outputs)\n",
" val_output_convert = [post_pred(val_pred_tensor) for val_pred_tensor in val_outputs_list]\n",
" dice_metric(y_pred=val_output_convert, y=val_labels_convert)\n",
" epoch_iterator_val.set_description(\"Validate (%d / %d Steps)\" % (global_step, 10.0))\n",
" epoch_iterator_val.set_description(\"Validate (%d / %d Steps)\" % (global_step, 10.0)) # noqa: B038\n",
" mean_dice_val = dice_metric.aggregate().item()\n",
" dice_metric.reset()\n",
" return mean_dice_val\n",
Expand All @@ -641,7 +641,9 @@
" epoch_loss += loss.item()\n",
" optimizer.step()\n",
" optimizer.zero_grad()\n",
" epoch_iterator.set_description(\"Training (%d / %d Steps) (loss=%2.5f)\" % (global_step, max_iterations, loss))\n",
" epoch_iterator.set_description( # noqa: B038\n",
" \"Training (%d / %d Steps) (loss=%2.5f)\" % (global_step, max_iterations, loss)\n",
" )\n",
" if (global_step % eval_num == 0 and global_step != 0) or global_step == max_iterations:\n",
" epoch_iterator_val = tqdm(val_loader, desc=\"Validate (X / X Steps) (dice=X.X)\", dynamic_ncols=True)\n",
" dice_val = validation(epoch_iterator_val)\n",
Expand Down
6 changes: 3 additions & 3 deletions bundle/01_bundle_intro.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -402,9 +402,9 @@
"datadicts: '$[{i: (i * i)} for i in range(10)]' # create a fake dataset as a list of dicts\n",
"\n",
"test_dataset: # creates an instance of an object because _target_ is present\n",
" _target_: Dataset # name of type to create is monai.data.Dataset (loaded implicitly from MONAI)\n",
" data: '@datadicts' # argument data provided by a definition\n",
" transform: '$None' # argument transform provided by a Python expression\n",
" _target_: Dataset # name of type to create is monai.data.Dataset (loaded implicitly from MONAI)\n",
KumoLiu marked this conversation as resolved.
Show resolved Hide resolved
" data: '@datadicts' # argument data provided by a definition\n",
" transform: '$None' # argument transform provided by a Python expression\n",
"\n",
"test:\n",
"- '$print(\"Dataset\", @test_dataset)'\n",
Expand Down
86 changes: 43 additions & 43 deletions bundle/02_mednist_classification.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,9 @@
"cell_type": "code",
KumoLiu marked this conversation as resolved.
Show resolved Hide resolved
"execution_count": 3,
"id": "d11681af-3210-4b2b-b7bd-8ad8dedfe230",
"metadata": {},
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [
{
"name": "stdout",
Expand All @@ -292,26 +294,25 @@
"\n",
"# define the network separately, don't need to refer to MONAI types by name or import MONAI\n",
"network_def:\n",
" _target_: densenet121\n",
" spatial_dims: 2\n",
" in_channels: 1\n",
" out_channels: 6\n",
" _target_: densenet121\n",
" spatial_dims: 2\n",
" in_channels: 1\n",
" out_channels: 6\n",
"\n",
"# define the network to be the given definition moved to the device\n",
"net: '$@network_def.to(@device)'\n",
"\n",
"# define a transform sequence by instantiating a Compose instance with a transform sequence\n",
"transform:\n",
" _target_: Compose\n",
" transforms:\n",
" - _target_: LoadImaged\n",
" _target_: Compose\n",
" transforms:\n",
" - _target_: LoadImaged\n",
" keys: 'image'\n",
" image_only: true\n",
" - _target_: EnsureChannelFirstd\n",
" keys: 'image'\n",
" - _target_: ScaleIntensityd\n",
" - _target_: EnsureChannelFirstd\n",
" keys: 'image'\n",
" "
" - _target_: ScaleIntensityd\n",
" keys: 'image'"
]
},
{
Expand Down Expand Up @@ -355,18 +356,18 @@
"max_epochs: 25\n",
"\n",
"dataset:\n",
" _target_: MedNISTDataset\n",
" root_dir: '@root_dir'\n",
" transform: '@transform'\n",
" section: training\n",
" download: true\n",
" _target_: MedNISTDataset\n",
" root_dir: '@root_dir'\n",
" transform: '@transform'\n",
" section: training\n",
" download: true\n",
"\n",
"train_dl:\n",
" _target_: DataLoader\n",
" dataset: '@dataset'\n",
" batch_size: 512\n",
" shuffle: true\n",
" num_workers: 4\n",
" _target_: DataLoader\n",
" dataset: '@dataset'\n",
" batch_size: 512\n",
" shuffle: true\n",
" num_workers: 4\n",
"\n",
"trainer:\n",
" _target_: SupervisedTrainer\n",
Expand All @@ -375,13 +376,13 @@
" train_data_loader: '@train_dl'\n",
" network: '@net'\n",
" optimizer: \n",
" _target_: torch.optim.Adam\n",
" params: '[email protected]()'\n",
" lr: 0.00001 # learning rate set slow so that you can see network improvement over epochs\n",
" _target_: torch.optim.Adam\n",
" params: '[email protected]()'\n",
" lr: 0.00001 # learning rate set slow so that you can see network improvement over epochs\n",
" loss_function: \n",
" _target_: torch.nn.CrossEntropyLoss\n",
" _target_: torch.nn.CrossEntropyLoss\n",
" inferer: \n",
" _target_: SimpleInferer\n",
" _target_: SimpleInferer\n",
"\n",
"train:\n",
"- '[email protected]()'\n",
Expand Down Expand Up @@ -518,7 +519,6 @@
"source": [
"%%writefile MedNISTClassifier/scripts/__init__.py\n",
"\n",
"from monai.networks.utils import eval_mode\n",
"\n",
"def evaluate(net, dataloader, class_names, device):\n",
" with eval_mode(net):\n",
Expand All @@ -527,8 +527,7 @@
" prob = result.detach().to(\"cpu\")[0]\n",
" pred = class_names[prob.argmax()]\n",
" gt = item[\"class_name\"][0]\n",
" print(f\"Prediction: {pred}. Ground-truth: {gt}\")\n",
" "
" print(f\"Prediction: {pred}. Ground-truth: {gt}\")"
]
},
{
Expand All @@ -543,7 +542,9 @@
"cell_type": "code",
"execution_count": 9,
"id": "b4e1f99a-a68b-4aeb-bcf2-842f26609b52",
"metadata": {},
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [
{
"name": "stdout",
Expand All @@ -555,7 +556,6 @@
],
"source": [
"%%writefile MedNISTClassifier/configs/evaluate.yaml\n",
"\n",
"imports: \n",
"- $import scripts\n",
"\n",
Expand All @@ -564,23 +564,23 @@
"ckpt_file: \"\"\n",
"\n",
"testdata:\n",
" _target_: MedNISTDataset\n",
" root_dir: '@root_dir'\n",
" transform: '@transform'\n",
" section: test\n",
" download: false\n",
" runtime_cache: true\n",
" _target_: MedNISTDataset\n",
" root_dir: '@root_dir'\n",
" transform: '@transform'\n",
" section: test\n",
" download: false\n",
" runtime_cache: true\n",
"\n",
"eval_dl:\n",
" _target_: DataLoader\n",
" dataset: '$@testdata[:@max_items_to_print]'\n",
" batch_size: 1\n",
" num_workers: 0\n",
" _target_: DataLoader\n",
" dataset: '$@testdata[:@max_items_to_print]'\n",
" batch_size: 1\n",
" num_workers: 0\n",
"\n",
"# loads the weights from the given file (which needs to be set on the command line) then calls \"evaluate\"\n",
"evaluate:\n",
"- '[email protected]_state_dict(torch.load(@ckpt_file))'\n",
"- '$scripts.evaluate(@net, @eval_dl, @class_names, @device)'\n"
"- '$scripts.evaluate(@net, @eval_dl, @class_names, @device)'"
]
},
{
Expand Down
Loading
Loading