From d9dac455a1afeb60f57bc602d26dceb88d3282db Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 1 May 2024 14:56:47 -0400 Subject: [PATCH 001/199] Implemented Lora+ GUI parameter support --- kohya_gui/lora_gui.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index a7b2816c2..41ffdff47 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -232,6 +232,9 @@ def save_configuration( metadata_license, metadata_tags, metadata_title, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -437,6 +440,9 @@ def open_configuration( metadata_license, metadata_tags, metadata_title, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, training_preset, ): # Get list of function parameters and values @@ -672,6 +678,9 @@ def train_model( metadata_license, metadata_tags, metadata_title, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1126,6 +1135,9 @@ def train_model( "logging_dir": logging_dir, "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, + "loraplus_lr_ratio": loraplus_lr_ratio if not 0 else None, + "loraplus_text_encoder_lr_ratio": loraplus_text_encoder_lr_ratio if not 0 else None, + "loraplus_unet_lr_ratio": loraplus_unet_lr_ratio if not 0 else None, "loss_type": loss_type, "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), @@ -1468,6 +1480,31 @@ def list_presets(path): maximum=1, ) + with gr.Row(): + loraplus_lr_ratio = gr.Number( + label="LoRA+ learning rate ratio", + value=0, + info="(Optional) starting with 16 is suggested", + minimum=0, + maximum=128, + ) + + loraplus_unet_lr_ratio = gr.Number( + label="LoRA+ Unet learning rate ratio", + value=0, + info="(Optional) starting with 16 is suggested", + minimum=0, + maximum=128, + ) + + loraplus_text_encoder_lr_ratio = gr.Number( + label="LoRA+ Text Encoder learning rate ratio", + value=0, + info="(Optional) starting with 16 is suggested", + minimum=0, + maximum=128, + ) + # Add SDXL Parameters sdxl_params = SDXLParameters( source_model.sdxl_checkbox, config=config @@ -2318,6 +2355,9 @@ def update_LoRA_settings( metadata.metadata_license, metadata.metadata_tags, metadata.metadata_title, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, ] configuration.button_open_config.click( From 05610f88dd5b43bb2f56c87456c07510f6347e64 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 8 May 2024 20:08:02 -0400 Subject: [PATCH 002/199] Update lora+ code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index bfb352bc4..3fd8cdc55 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit bfb352bc433326a77aca3124248331eb60c49e8c +Subproject commit 3fd8cdc55d7d87ceca2dc1127a807a7ddafb15ae From fd6658f78cf448005e424eb5ca456d3855fa6f64 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 12 May 2024 11:04:10 -0400 Subject: [PATCH 003/199] Update sd-scripts submodule to the latest on dev --- .../pull kohya_ss sd-scripts updates in.md | 41 ++++++++----------- sd-scripts | 2 +- 2 files changed, 19 insertions(+), 24 deletions(-) diff --git a/examples/pull kohya_ss sd-scripts updates in.md b/examples/pull kohya_ss sd-scripts updates in.md index 47b1c79ad..6c94fb18b 100644 --- a/examples/pull kohya_ss sd-scripts updates in.md +++ b/examples/pull kohya_ss sd-scripts updates in.md @@ -1,32 +1,27 @@ -## Updating a Local Branch with the Latest sd-scripts Changes +## Updating a Local Submodule with the Latest sd-scripts Changes To update your local branch with the most recent changes from kohya/sd-scripts, follow these steps: -1. Add sd-scripts as an alternative remote by executing the following command: +1. When you wish to perform an update of the dev branch, execute the following commands: - ``` - git remote add sd-scripts https://github.com/kohya-ss/sd-scripts.git - ``` - -2. When you wish to perform an update, execute the following commands: - - ``` - git checkout dev - git pull sd-scripts main - ``` - - Alternatively, if you want to obtain the latest code, even if it may be unstable: - - ``` + ```bash + cd sd-scripts + git fetch git checkout dev - git pull sd-scripts dev + git pull origin dev + cd .. + git add sd-scripts + git commit -m "Update sd-scripts submodule to the latest on dev" ``` -3. If you encounter a conflict with the Readme file, you can resolve it by taking the following steps: + Alternatively, if you want to obtain the latest code from main: + ```bash + cd sd-scripts + git fetch + git checkout main + git pull origin main + cd .. + git add sd-scripts + git commit -m "Update sd-scripts submodule to the latest on main" ``` - git add README.md - git merge --continue - ``` - - This may open a text editor for a commit message, but you can simply save and close it to proceed. Following these steps should resolve the conflict. If you encounter additional merge conflicts, consider them as valuable learning opportunities for personal growth. \ No newline at end of file diff --git a/sd-scripts b/sd-scripts index bfb352bc4..16677da0d 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit bfb352bc433326a77aca3124248331eb60c49e8c +Subproject commit 16677da0d90ad9094a0301990b831a8dd6c0e957 From 0d999a354a011a0f44a7f4a50ae05d6a1cb8819d Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 12 May 2024 19:55:05 -0400 Subject: [PATCH 004/199] Implement Fused backward pass and fused optimiser groups --- config example.toml | 2 ++ kohya_gui/class_sdxl_parameters.py | 31 ++++++++++++++++++++++++++++++ kohya_gui/dreambooth_gui.py | 30 +++++++++++++++++++++++++++++ kohya_gui/finetune_gui.py | 12 +++++++++++- 4 files changed, 74 insertions(+), 1 deletion(-) diff --git a/config example.toml b/config example.toml index c137d4387..442dbf678 100644 --- a/config example.toml +++ b/config example.toml @@ -150,6 +150,8 @@ sample_prompts = "" # Sample prompts sample_sampler = "euler_a" # Sampler to use for image sampling [sdxl] +fused_backward_pass = false # Fused backward pass +fused_optimizer_groups = 0 # Fused optimizer groups sdxl_cache_text_encoder_outputs = false # Cache text encoder outputs sdxl_no_half_vae = true # No half VAE diff --git a/kohya_gui/class_sdxl_parameters.py b/kohya_gui/class_sdxl_parameters.py index b0098d2a3..f9a8cf188 100644 --- a/kohya_gui/class_sdxl_parameters.py +++ b/kohya_gui/class_sdxl_parameters.py @@ -7,10 +7,12 @@ def __init__( sdxl_checkbox: gr.Checkbox, show_sdxl_cache_text_encoder_outputs: bool = True, config: KohyaSSGUIConfig = {}, + trainer: str = "", ): self.sdxl_checkbox = sdxl_checkbox self.show_sdxl_cache_text_encoder_outputs = show_sdxl_cache_text_encoder_outputs self.config = config + self.trainer = trainer self.initialize_accordion() @@ -30,6 +32,35 @@ def initialize_accordion(self): info="Disable the half-precision (mixed-precision) VAE. VAE for SDXL seems to produce NaNs in some cases. This option is useful to avoid the NaNs.", value=self.config.get("sdxl.sdxl_no_half_vae", False), ) + self.fused_backward_pass = gr.Checkbox( + label="Fused backward pass", + info="Enable fused backward pass. This option is useful to reduce the GPU memory usage. Can't be used if Fused optimizer groups is > 0. Only AdaFactor is supported", + value=self.config.get("sdxl.fused_backward_pass", False), + visible=self.trainer == "finetune" or self.trainer == "dreambooth", + ) + self.fused_optimizer_groups = gr.Number( + label="Fused optimizer groups", + info="Number of optimizer groups to fuse. This option is useful to reduce the GPU memory usage. Can't be used if Fused backward pass is enabled. Since the effect is limited to a certain number, it is recommended to specify 4-10.", + value=self.config.get("sdxl.fused_optimizer_groups", 0), + minimum=0, + step=1, + visible=self.trainer == "finetune" or self.trainer == "dreambooth", + ) + self.fused_backward_pass.change( + lambda fused_backward_pass: gr.Number( + interactive=not fused_backward_pass + ), + inputs=[self.fused_backward_pass], + outputs=[self.fused_optimizer_groups], + ) + self.fused_optimizer_groups.change( + lambda fused_optimizer_groups: gr.Checkbox( + interactive=fused_optimizer_groups == 0 + ), + inputs=[self.fused_optimizer_groups], + outputs=[self.fused_backward_pass], + ) + self.sdxl_checkbox.change( lambda sdxl_checkbox: gr.Accordion(visible=sdxl_checkbox), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index a38230a21..1d2bbb8f5 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -31,6 +31,7 @@ from .class_command_executor import CommandExecutor from .class_huggingface import HuggingFace from .class_metadata import MetaData +from .class_sdxl_parameters import SDXLParameters from .dreambooth_folder_creation_gui import ( gradio_dreambooth_folder_creation_tab, @@ -162,6 +163,10 @@ def save_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + fused_backward_pass, + fused_optimizer_groups, + sdxl_cache_text_encoder_outputs, + sdxl_no_half_vae, min_timestep, max_timestep, debiased_estimation_loss, @@ -320,6 +325,10 @@ def open_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + fused_backward_pass, + fused_optimizer_groups, + sdxl_cache_text_encoder_outputs, + sdxl_no_half_vae, min_timestep, max_timestep, debiased_estimation_loss, @@ -473,6 +482,10 @@ def train_model( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + fused_backward_pass, + fused_optimizer_groups, + sdxl_cache_text_encoder_outputs, + sdxl_no_half_vae, min_timestep, max_timestep, debiased_estimation_loss, @@ -544,6 +557,7 @@ def train_model( if not validate_model_path(vae): return TRAIN_BUTTON_VISIBLE + # # End of path validation # @@ -704,6 +718,9 @@ def train_model( run_cmd.append(rf'{scriptdir}/sd-scripts/sdxl_train.py') else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_db.py") + + cache_text_encoder_outputs = sdxl and sdxl_cache_text_encoder_outputs + no_half_vae = sdxl and sdxl_no_half_vae if max_data_loader_n_workers == "" or None: max_data_loader_n_workers = 0 @@ -724,6 +741,7 @@ def train_model( "bucket_reso_steps": bucket_reso_steps, "cache_latents": cache_latents, "cache_latents_to_disk": cache_latents_to_disk, + "cache_text_encoder_outputs": cache_text_encoder_outputs, "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), "caption_dropout_rate": caption_dropout_rate, "caption_extension": caption_extension, @@ -737,6 +755,8 @@ def train_model( "flip_aug": flip_aug, "full_bf16": full_bf16, "full_fp16": full_fp16, + "fused_backward_pass": fused_backward_pass, + "fused_optimizer_groups": int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None, "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, @@ -789,6 +809,7 @@ def train_model( "mixed_precision": mixed_precision, "multires_noise_discount": multires_noise_discount, "multires_noise_iterations": multires_noise_iterations if not 0 else None, + "no_half_vae": no_half_vae, "no_token_padding": no_token_padding, "noise_offset": noise_offset if not 0 else None, "noise_offset_random_strength": noise_offset_random_strength, @@ -980,6 +1001,11 @@ def dreambooth_tab( sdxl_checkbox=source_model.sdxl_checkbox, config=config, ) + + # Add SDXL Parameters + sdxl_params = SDXLParameters( + source_model.sdxl_checkbox, config=config, trainer="finetune", + ) with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): advanced_training = AdvancedTraining(headless=headless, config=config) @@ -1112,6 +1138,10 @@ def dreambooth_tab( advanced_training.log_tracker_name, advanced_training.log_tracker_config, advanced_training.scale_v_pred_loss_like_noise_pred, + sdxl_params.fused_backward_pass, + sdxl_params.fused_optimizer_groups, + sdxl_params.sdxl_cache_text_encoder_outputs, + sdxl_params.sdxl_no_half_vae, advanced_training.min_timestep, advanced_training.max_timestep, advanced_training.debiased_estimation_loss, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index c84922c0e..81c2b64b9 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -170,6 +170,8 @@ def save_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + fused_backward_pass, + fused_optimizer_groups, sdxl_cache_text_encoder_outputs, sdxl_no_half_vae, min_timestep, @@ -336,6 +338,8 @@ def open_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + fused_backward_pass, + fused_optimizer_groups, sdxl_cache_text_encoder_outputs, sdxl_no_half_vae, min_timestep, @@ -508,6 +512,8 @@ def train_model( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + fused_backward_pass, + fused_optimizer_groups, sdxl_cache_text_encoder_outputs, sdxl_no_half_vae, min_timestep, @@ -804,6 +810,8 @@ def train_model( "flip_aug": flip_aug, "full_bf16": full_bf16, "full_fp16": full_fp16, + "fused_backward_pass": fused_backward_pass, + "fused_optimizer_groups": int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None, "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, @@ -1090,7 +1098,7 @@ def list_presets(path): # Add SDXL Parameters sdxl_params = SDXLParameters( - source_model.sdxl_checkbox, config=config + source_model.sdxl_checkbox, config=config, trainer="finetune", ) with gr.Row(): @@ -1250,6 +1258,8 @@ def list_presets(path): advanced_training.log_tracker_name, advanced_training.log_tracker_config, advanced_training.scale_v_pred_loss_like_noise_pred, + sdxl_params.fused_backward_pass, + sdxl_params.fused_optimizer_groups, sdxl_params.sdxl_cache_text_encoder_outputs, sdxl_params.sdxl_no_half_vae, advanced_training.min_timestep, From 0b32f9cf050f70398a2992e2f3df59db142d1533 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 12 May 2024 20:09:00 -0400 Subject: [PATCH 005/199] Implement support for disable_mmap_load_safetensors --- config example.toml | 1 + kohya_gui/class_sdxl_parameters.py | 6 ++++++ kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 5 +++++ kohya_gui/textual_inversion_gui.py | 5 +++++ 5 files changed, 22 insertions(+) diff --git a/config example.toml b/config example.toml index 442dbf678..55b3f29e0 100644 --- a/config example.toml +++ b/config example.toml @@ -150,6 +150,7 @@ sample_prompts = "" # Sample prompts sample_sampler = "euler_a" # Sampler to use for image sampling [sdxl] +disable_mmap_load_safetensors = false # Disable mmap load safe tensors fused_backward_pass = false # Fused backward pass fused_optimizer_groups = 0 # Fused optimizer groups sdxl_cache_text_encoder_outputs = false # Cache text encoder outputs diff --git a/kohya_gui/class_sdxl_parameters.py b/kohya_gui/class_sdxl_parameters.py index f9a8cf188..e1141668c 100644 --- a/kohya_gui/class_sdxl_parameters.py +++ b/kohya_gui/class_sdxl_parameters.py @@ -46,6 +46,12 @@ def initialize_accordion(self): step=1, visible=self.trainer == "finetune" or self.trainer == "dreambooth", ) + self.disable_mmap_load_safetensors = gr.Checkbox( + label="Disable mmap load safe tensors", + info="Disable memory mapping when loading the model's .safetensors in SDXL.", + value=self.config.get("sdxl.disable_mmap_load_safetensors", False), + ) + self.fused_backward_pass.change( lambda fused_backward_pass: gr.Number( interactive=not fused_backward_pass diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 1d2bbb8f5..b71ca714c 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -163,6 +163,7 @@ def save_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, fused_backward_pass, fused_optimizer_groups, sdxl_cache_text_encoder_outputs, @@ -325,6 +326,7 @@ def open_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, fused_backward_pass, fused_optimizer_groups, sdxl_cache_text_encoder_outputs, @@ -482,6 +484,7 @@ def train_model( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, fused_backward_pass, fused_optimizer_groups, sdxl_cache_text_encoder_outputs, @@ -749,6 +752,7 @@ def train_model( "color_aug": color_aug, "dataset_config": dataset_config, "debiased_estimation_loss": debiased_estimation_loss, + "disable_mmap_load_safetensors": disable_mmap_load_safetensors, "dynamo_backend": dynamo_backend, "enable_bucket": enable_bucket, "epoch": int(epoch), @@ -1138,6 +1142,7 @@ def dreambooth_tab( advanced_training.log_tracker_name, advanced_training.log_tracker_config, advanced_training.scale_v_pred_loss_like_noise_pred, + sdxl_params.disable_mmap_load_safetensors, sdxl_params.fused_backward_pass, sdxl_params.fused_optimizer_groups, sdxl_params.sdxl_cache_text_encoder_outputs, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 81c2b64b9..3dfff0660 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -170,6 +170,7 @@ def save_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, fused_backward_pass, fused_optimizer_groups, sdxl_cache_text_encoder_outputs, @@ -338,6 +339,7 @@ def open_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, fused_backward_pass, fused_optimizer_groups, sdxl_cache_text_encoder_outputs, @@ -512,6 +514,7 @@ def train_model( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, fused_backward_pass, fused_optimizer_groups, sdxl_cache_text_encoder_outputs, @@ -805,6 +808,7 @@ def train_model( "dataset_config": dataset_config, "dataset_repeats": int(dataset_repeats), "debiased_estimation_loss": debiased_estimation_loss, + "disable_mmap_load_safetensors": disable_mmap_load_safetensors, "dynamo_backend": dynamo_backend, "enable_bucket": True, "flip_aug": flip_aug, @@ -1258,6 +1262,7 @@ def list_presets(path): advanced_training.log_tracker_name, advanced_training.log_tracker_config, advanced_training.scale_v_pred_loss_like_noise_pred, + sdxl_params.disable_mmap_load_safetensors, sdxl_params.fused_backward_pass, sdxl_params.fused_optimizer_groups, sdxl_params.sdxl_cache_text_encoder_outputs, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index e85b47fd0..c557042b9 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -162,6 +162,7 @@ def save_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, min_timestep, max_timestep, sdxl_no_half_vae, @@ -321,6 +322,7 @@ def open_configuration( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, min_timestep, max_timestep, sdxl_no_half_vae, @@ -473,6 +475,7 @@ def train_model( log_tracker_name, log_tracker_config, scale_v_pred_loss_like_noise_pred, + disable_mmap_load_safetensors, min_timestep, max_timestep, sdxl_no_half_vae, @@ -757,6 +760,7 @@ def train_model( "clip_skip": clip_skip if clip_skip != 0 else None, "color_aug": color_aug, "dataset_config": dataset_config, + "disable_mmap_load_safetensors": disable_mmap_load_safetensors, "dynamo_backend": dynamo_backend, "enable_bucket": enable_bucket, "epoch": int(epoch), @@ -1221,6 +1225,7 @@ def list_embedding_files(path): advanced_training.log_tracker_name, advanced_training.log_tracker_config, advanced_training.scale_v_pred_loss_like_noise_pred, + sdxl_params.disable_mmap_load_safetensors, advanced_training.min_timestep, advanced_training.max_timestep, sdxl_params.sdxl_no_half_vae, From 31e5f758544a93376ea4969142f343e2bcb6f5ce Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 23 May 2024 08:42:39 -0400 Subject: [PATCH 006/199] Skip validation on --help --- gui.bat | 5 +++++ gui.ps1 | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/gui.bat b/gui.bat index e5e206db0..1f4c85c9d 100644 --- a/gui.bat +++ b/gui.bat @@ -9,10 +9,15 @@ call .\venv\Scripts\deactivate.bat call .\venv\Scripts\activate.bat set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib +:: If the first argument is --help, skip the validation step +if "%~1" equ "--help" goto :skip_validation + :: Validate requirements python.exe .\setup\validate_requirements.py if %errorlevel% neq 0 exit /b %errorlevel% +:skip_validation + :: If the exit code is 0, run the kohya_gui.py script with the command-line arguments if %errorlevel% equ 0 ( REM Check if the batch was started via double-click diff --git a/gui.ps1 b/gui.ps1 index 9e9a441de..24a433791 100644 --- a/gui.ps1 +++ b/gui.ps1 @@ -12,6 +12,13 @@ $env:PATH += ";$($MyInvocation.MyCommand.Path)\venv\Lib\site-packages\torch\lib" # Debug info about system # python.exe .\setup\debug_info.py +# If the --help parameter is passed, skip the validation step +if ($args -contains "--help") { + # Run the kohya_gui.py script with the command-line arguments + python.exe kohya_gui.py $args + exit 0 +} + # Validate the requirements and store the exit code python.exe .\setup\validate_requirements.py From 59bdd96ec040ecac66c13ee87e1a8c6e6421af09 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 24 May 2024 19:59:18 -0400 Subject: [PATCH 007/199] Update version of sd-scripts and gradio --- requirements.txt | 2 +- sd-scripts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 5f153411e..7be02e49e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ easygui==0.98.3 einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 -gradio==4.26.0 +gradio==4.29.0 huggingface-hub==0.20.1 imagesize==1.4.1 invisible-watermark==0.2.0 diff --git a/sd-scripts b/sd-scripts index 16677da0d..febc5c59f 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 16677da0d90ad9094a0301990b831a8dd6c0e957 +Subproject commit febc5c59fad74dfcead9064033171a9c674e4870 From ec5397f4d96dba4af7dc929e86b6de304f10d771 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 24 May 2024 20:07:33 -0400 Subject: [PATCH 008/199] Add --log_config support to GUI --- kohya_gui/class_advanced_training.py | 5 +++++ kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 5 +++++ kohya_gui/lora_gui.py | 5 +++++ kohya_gui/textual_inversion_gui.py | 5 +++++ 5 files changed, 25 insertions(+) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index c9784c304..2b2bf9ee7 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -534,6 +534,11 @@ def list_log_tracker_config_files(path): self.current_log_tracker_config_dir = path if not path == "" else "." return list(list_files(path, exts=[".json"], all=True)) + self.log_config = gr.Checkbox( + label="Log config", + value=self.config.get("advanced.log_config", False), + info="Log training parameter to WANDB", + ) self.log_tracker_name = gr.Textbox( label="Log tracker name", value=self.config.get("advanced.log_tracker_name", ""), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index b71ca714c..d7496471e 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -162,6 +162,7 @@ def save_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, fused_backward_pass, @@ -325,6 +326,7 @@ def open_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, fused_backward_pass, @@ -483,6 +485,7 @@ def train_model( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, fused_backward_pass, @@ -784,6 +787,7 @@ def train_model( learning_rate_te2 if sdxl and not 0 else None ), # only for sdxl and not 0 "logging_dir": logging_dir, + "log_config": log_config, "log_tracker_config": log_tracker_config, "log_tracker_name": log_tracker_name, "log_with": log_with, @@ -1141,6 +1145,7 @@ def dreambooth_tab( advanced_training.wandb_run_name, advanced_training.log_tracker_name, advanced_training.log_tracker_config, + advanced_training.log_config, advanced_training.scale_v_pred_loss_like_noise_pred, sdxl_params.disable_mmap_load_safetensors, sdxl_params.fused_backward_pass, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 3dfff0660..0e86bc8bf 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -169,6 +169,7 @@ def save_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, fused_backward_pass, @@ -338,6 +339,7 @@ def open_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, fused_backward_pass, @@ -513,6 +515,7 @@ def train_model( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, fused_backward_pass, @@ -840,6 +843,7 @@ def train_model( learning_rate_te2 if sdxl_checkbox else None ), # only for sdxl "logging_dir": logging_dir, + "log_config": log_config, "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, "loss_type": loss_type, @@ -1261,6 +1265,7 @@ def list_presets(path): advanced_training.wandb_run_name, advanced_training.log_tracker_name, advanced_training.log_tracker_config, + advanced_training.log_config, advanced_training.scale_v_pred_loss_like_noise_pred, sdxl_params.disable_mmap_load_safetensors, sdxl_params.fused_backward_pass, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 1f9d08e37..c164415af 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -201,6 +201,7 @@ def save_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, @@ -409,6 +410,7 @@ def open_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, @@ -647,6 +649,7 @@ def train_model( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, @@ -1138,6 +1141,7 @@ def train_model( "keep_tokens": int(keep_tokens), "learning_rate": learning_rate, "logging_dir": logging_dir, + "log_config": log_config, "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, "loraplus_lr_ratio": loraplus_lr_ratio if not 0 else None, @@ -2325,6 +2329,7 @@ def update_LoRA_settings( advanced_training.wandb_run_name, advanced_training.log_tracker_name, advanced_training.log_tracker_config, + advanced_training.log_config, advanced_training.scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index c557042b9..16b8e68af 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -161,6 +161,7 @@ def save_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, min_timestep, @@ -321,6 +322,7 @@ def open_configuration( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, min_timestep, @@ -474,6 +476,7 @@ def train_model( wandb_run_name, log_tracker_name, log_tracker_config, + log_config, scale_v_pred_loss_like_noise_pred, disable_mmap_load_safetensors, min_timestep, @@ -781,6 +784,7 @@ def train_model( "keep_tokens": int(keep_tokens), "learning_rate": learning_rate, "logging_dir": logging_dir, + "log_config": log_config, "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, "loss_type": loss_type, @@ -1224,6 +1228,7 @@ def list_embedding_files(path): advanced_training.wandb_run_name, advanced_training.log_tracker_name, advanced_training.log_tracker_config, + advanced_training.log_config, advanced_training.scale_v_pred_loss_like_noise_pred, sdxl_params.disable_mmap_load_safetensors, advanced_training.min_timestep, From 24b86c30ce6e8bf8b4442fa56e9505d0a25f0425 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 24 May 2024 20:18:33 -0400 Subject: [PATCH 009/199] Allow negative learning rates --- kohya_gui/class_basic_training.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index ee22d552b..dd24043a7 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -240,7 +240,7 @@ def init_learning_rate_controls(self) -> None: self.learning_rate = gr.Number( label=lr_label, value=self.config.get("basic.learning_rate", self.learning_rate_value), - minimum=0, + minimum=-1, maximum=1, info="Set to 0 to not train the Unet", ) @@ -251,7 +251,7 @@ def init_learning_rate_controls(self) -> None: "basic.learning_rate_te", self.learning_rate_value ), visible=self.finetuning or self.dreambooth, - minimum=0, + minimum=-1, maximum=1, info="Set to 0 to not train the Text Encoder", ) @@ -262,7 +262,7 @@ def init_learning_rate_controls(self) -> None: "basic.learning_rate_te1", self.learning_rate_value ), visible=False, - minimum=0, + minimum=-1, maximum=1, info="Set to 0 to not train the Text Encoder 1", ) @@ -273,7 +273,7 @@ def init_learning_rate_controls(self) -> None: "basic.learning_rate_te2", self.learning_rate_value ), visible=False, - minimum=0, + minimum=-1, maximum=1, info="Set to 0 to not train the Text Encoder 2", ) From 848f4d45a851af78feadd7800c068d2066edb256 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 25 May 2024 08:21:25 -0400 Subject: [PATCH 010/199] Add support for custom learning rate scheduler type to the GUI --- config example.toml | 1 + kohya_gui/class_basic_training.py | 12 ++++- kohya_gui/dreambooth_gui.py | 5 ++ kohya_gui/finetune_gui.py | 5 ++ kohya_gui/lora_gui.py | 5 ++ kohya_gui/textual_inversion_gui.py | 5 ++ test/config/dreambooth-AdamW8bit-toml.json | 54 +++++++++++++++++----- 7 files changed, 74 insertions(+), 13 deletions(-) diff --git a/config example.toml b/config example.toml index 55b3f29e0..fff12555b 100644 --- a/config example.toml +++ b/config example.toml @@ -48,6 +48,7 @@ learning_rate_te1 = 0.0001 # Learning rate text encoder 1 learning_rate_te2 = 0.0001 # Learning rate text encoder 2 lr_scheduler = "cosine" # LR Scheduler lr_scheduler_args = "" # LR Scheduler args +lr_scheduler_type = "" # LR Scheduler type lr_warmup = 0 # LR Warmup (% of total steps) lr_scheduler_num_cycles = 1 # LR Scheduler num cycles lr_scheduler_power = 1.0 # LR Scheduler power diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index dd24043a7..331646b3e 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -167,7 +167,17 @@ def init_lr_and_optimizer_controls(self) -> None: value=self.config.get("basic.lr_scheduler", self.lr_scheduler_value), ) - + # Initialize the learning rate scheduler type dropdown + self.lr_scheduler_type = gr.Dropdown( + label="LR Scheduler type", + info="(Optional) custom scheduler module name", + choices=[ + "", + "CosineAnnealingLR", + ], + value=self.config.get("basic.lr_scheduler_type", ""), + allow_custom_value=True, + ) # Initialize the optimizer dropdown self.optimizer = gr.Dropdown( diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index d7496471e..e02c62fbc 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -135,6 +135,7 @@ def save_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -299,6 +300,7 @@ def open_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -458,6 +460,7 @@ def train_model( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -798,6 +801,7 @@ def train_model( int(lr_scheduler_num_cycles) if lr_scheduler_num_cycles != "" else int(epoch) ), "lr_scheduler_power": lr_scheduler_power, + "lr_scheduler_type": lr_scheduler_type if lr_scheduler_type != "" else None, "lr_warmup_steps": lr_warmup_steps, "masked_loss": masked_loss, "max_bucket_reso": max_bucket_reso, @@ -1118,6 +1122,7 @@ def dreambooth_tab( basic_training.optimizer, basic_training.optimizer_args, basic_training.lr_scheduler_args, + basic_training.lr_scheduler_type, advanced_training.noise_offset_type, advanced_training.noise_offset, advanced_training.noise_offset_random_strength, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 0e86bc8bf..7530917d5 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -142,6 +142,7 @@ def save_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -312,6 +313,7 @@ def open_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -488,6 +490,7 @@ def train_model( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -849,6 +852,7 @@ def train_model( "loss_type": loss_type, "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), + "lr_scheduler_type": lr_scheduler_type if lr_scheduler_type != "" else None, "lr_warmup_steps": lr_warmup_steps, "masked_loss": masked_loss, "max_bucket_reso": int(max_bucket_reso), @@ -1238,6 +1242,7 @@ def list_presets(path): basic_training.optimizer, basic_training.optimizer_args, basic_training.lr_scheduler_args, + basic_training.lr_scheduler_type, advanced_training.noise_offset_type, advanced_training.noise_offset, advanced_training.noise_offset_random_strength, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index c164415af..06af9a33e 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -149,6 +149,7 @@ def save_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, max_grad_norm, noise_offset_type, noise_offset, @@ -358,6 +359,7 @@ def open_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, max_grad_norm, noise_offset_type, noise_offset, @@ -597,6 +599,7 @@ def train_model( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, max_grad_norm, noise_offset_type, noise_offset, @@ -1156,6 +1159,7 @@ def train_model( else int(epoch) ), "lr_scheduler_power": lr_scheduler_power, + "lr_scheduler_type": lr_scheduler_type if lr_scheduler_type != "" else None, "lr_warmup_steps": lr_warmup_steps, "masked_loss": masked_loss, "max_bucket_reso": max_bucket_reso, @@ -2277,6 +2281,7 @@ def update_LoRA_settings( basic_training.optimizer, basic_training.optimizer_args, basic_training.lr_scheduler_args, + basic_training.lr_scheduler_type, basic_training.max_grad_norm, advanced_training.noise_offset_type, advanced_training.noise_offset, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 16b8e68af..10529b9c4 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -135,6 +135,7 @@ def save_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -296,6 +297,7 @@ def open_configuration( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -450,6 +452,7 @@ def train_model( optimizer, optimizer_args, lr_scheduler_args, + lr_scheduler_type, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -794,6 +797,7 @@ def train_model( int(lr_scheduler_num_cycles) if lr_scheduler_num_cycles != "" else int(epoch) ), "lr_scheduler_power": lr_scheduler_power, + "lr_scheduler_type": lr_scheduler_type if lr_scheduler_type != "" else None, "lr_warmup_steps": lr_warmup_steps, "max_bucket_reso": max_bucket_reso, "max_timestep": max_timestep if max_timestep != 0 else None, @@ -1202,6 +1206,7 @@ def list_embedding_files(path): basic_training.optimizer, basic_training.optimizer_args, basic_training.lr_scheduler_args, + basic_training.lr_scheduler_type, advanced_training.noise_offset_type, advanced_training.noise_offset, advanced_training.noise_offset_random_strength, diff --git a/test/config/dreambooth-AdamW8bit-toml.json b/test/config/dreambooth-AdamW8bit-toml.json index 82344dee7..69c658666 100644 --- a/test/config/dreambooth-AdamW8bit-toml.json +++ b/test/config/dreambooth-AdamW8bit-toml.json @@ -1,49 +1,75 @@ { "adaptive_noise_scale": 0, "additional_parameters": "", + "async_upload": false, "bucket_no_upscale": true, "bucket_reso_steps": 64, "cache_latents": true, "cache_latents_to_disk": false, - "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_every_n_epochs": 0, "caption_dropout_rate": 0.05, "caption_extension": "", "clip_skip": 2, "color_aug": false, "dataset_config": "./test/config/dataset.toml", + "debiased_estimation_loss": false, + "disable_mmap_load_safetensors": false, + "dynamo_backend": "no", + "dynamo_mode": "default", + "dynamo_use_dynamic": false, + "dynamo_use_fullgraph": false, "enable_bucket": true, "epoch": 1, + "extra_accelerate_launch_args": "", "flip_aug": false, "full_bf16": false, "full_fp16": false, + "fused_backward_pass": false, + "fused_optimizer_groups": 0, "gpu_ids": "", "gradient_accumulation_steps": 1, "gradient_checkpointing": false, + "huber_c": 0.1, + "huber_schedule": "snr", + "huggingface_path_in_repo": "", + "huggingface_repo_id": "", + "huggingface_repo_type": "", + "huggingface_repo_visibility": "", + "huggingface_token": "", "ip_noise_gamma": 0, "ip_noise_gamma_random_strength": false, - "keep_tokens": "0", + "keep_tokens": 0, "learning_rate": 5e-05, "learning_rate_te": 1e-05, "learning_rate_te1": 1e-05, "learning_rate_te2": 1e-05, + "log_config": false, "log_tracker_config": "", "log_tracker_name": "", + "log_with": "", "logging_dir": "./test/logs", + "loss_type": "l2", "lr_scheduler": "constant", - "lr_scheduler_args": "", - "lr_scheduler_num_cycles": "", - "lr_scheduler_power": "", + "lr_scheduler_args": "T_max=100", + "lr_scheduler_num_cycles": 1, + "lr_scheduler_power": 1, + "lr_scheduler_type": "CosineAnnealingLR", "lr_warmup": 0, "main_process_port": 12345, "masked_loss": false, "max_bucket_reso": 2048, - "max_data_loader_n_workers": "0", + "max_data_loader_n_workers": 0, "max_resolution": "512,512", "max_timestep": 1000, - "max_token_length": "75", - "max_train_epochs": "", - "max_train_steps": "", + "max_token_length": 75, + "max_train_epochs": 0, + "max_train_steps": 0, "mem_eff_attn": false, + "metadata_author": "", + "metadata_description": "", + "metadata_license": "", + "metadata_tags": "", + "metadata_title": "", "min_bucket_reso": 256, "min_snr_gamma": 0, "min_timestep": 0, @@ -65,14 +91,16 @@ "output_name": "db-AdamW8bit-toml", "persistent_data_loader_workers": false, "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", - "prior_loss_weight": 1.0, + "prior_loss_weight": 1, "random_crop": false, "reg_data_dir": "", "resume": "", + "resume_from_huggingface": "", "sample_every_n_epochs": 0, "sample_every_n_steps": 25, "sample_prompts": "a painting of a gas mask , by darius kawasaki", "sample_sampler": "euler_a", + "save_as_bool": false, "save_every_n_epochs": 1, "save_every_n_steps": 0, "save_last_n_steps": 0, @@ -81,14 +109,16 @@ "save_precision": "fp16", "save_state": false, "save_state_on_train_end": false, + "save_state_to_huggingface": false, "scale_v_pred_loss_like_noise_pred": false, "sdxl": false, - "seed": "1234", + "sdxl_cache_text_encoder_outputs": false, + "sdxl_no_half_vae": false, + "seed": 1234, "shuffle_caption": false, "stop_text_encoder_training": 0, "train_batch_size": 4, "train_data_dir": "", - "use_wandb": false, "v2": false, "v_parameterization": false, "v_pred_like_loss": 0, From 85029895a96fcee9ff991a402f5913a0a055b973 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 25 May 2024 08:35:16 -0400 Subject: [PATCH 011/199] Add .webp image extension support to BLIP2 captioning. --- kohya_gui/blip2_caption_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/blip2_caption_gui.py b/kohya_gui/blip2_caption_gui.py index 5429db0b6..b3263227d 100644 --- a/kohya_gui/blip2_caption_gui.py +++ b/kohya_gui/blip2_caption_gui.py @@ -42,7 +42,7 @@ def get_images_in_directory(directory_path): import os # List of common image file extensions to look for - image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif"] + image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".webp"] # Generate a list of image file paths in the directory image_files = [ From b81dd0f270d3517e2dc9a432252363e111eb6bcb Mon Sep 17 00:00:00 2001 From: b-fission Date: Sat, 25 May 2024 17:52:37 -0500 Subject: [PATCH 012/199] Check for --debug flag for gui command-line args at startup --- kohya_gui.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kohya_gui.py b/kohya_gui.py index f586f0a29..485824b3b 100644 --- a/kohya_gui.py +++ b/kohya_gui.py @@ -106,6 +106,7 @@ def UI(**kwargs): do_not_share = kwargs.get("do_not_share", False) server_name = kwargs.get("listen") root_path = kwargs.get("root_path", None) + debug = kwargs.get("debug", False) launch_kwargs["server_name"] = server_name if username and password: @@ -121,7 +122,8 @@ def UI(**kwargs): launch_kwargs["share"] = share if root_path: launch_kwargs["root_path"] = root_path - launch_kwargs["debug"] = True + if debug: + launch_kwargs["debug"] = True interface.launch(**launch_kwargs) From f30a4c3c38e4081f480a10b2bd5deb44e104e6e2 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 26 May 2024 10:58:46 -0400 Subject: [PATCH 013/199] Validate GPU ID accelerate input and return error when needed --- kohya_gui/class_accelerate_launch.py | 41 ++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/kohya_gui/class_accelerate_launch.py b/kohya_gui/class_accelerate_launch.py index 912337bdd..6cf70fb92 100644 --- a/kohya_gui/class_accelerate_launch.py +++ b/kohya_gui/class_accelerate_launch.py @@ -3,6 +3,10 @@ import shlex from .class_gui_config import KohyaSSGUIConfig +from .custom_logging import setup_logging + +# Set up logging +log = setup_logging() class AccelerateLaunch: @@ -79,12 +83,16 @@ def __init__( ) self.dynamo_use_fullgraph = gr.Checkbox( label="Dynamo use fullgraph", - value=self.config.get("accelerate_launch.dynamo_use_fullgraph", False), + value=self.config.get( + "accelerate_launch.dynamo_use_fullgraph", False + ), info="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", ) self.dynamo_use_dynamic = gr.Checkbox( label="Dynamo use dynamic", - value=self.config.get("accelerate_launch.dynamo_use_dynamic", False), + value=self.config.get( + "accelerate_launch.dynamo_use_dynamic", False + ), info="Whether to enable dynamic shape tracing.", ) @@ -103,6 +111,24 @@ def __init__( placeholder="example: 0,1", info=" What GPUs (by id) should be used for training on this machine as a comma-separated list", ) + + def validate_gpu_ids(value): + if value == "": + return + if not ( + value.isdigit() and int(value) >= 0 and int(value) <= 128 + ): + log.error("GPU IDs must be an integer between 0 and 128") + return + else: + for id in value.split(","): + if not id.isdigit() or int(id) < 0 or int(id) > 128: + log.error( + "GPU IDs must be an integer between 0 and 128" + ) + + self.gpu_ids.blur(fn=validate_gpu_ids, inputs=self.gpu_ids) + self.main_process_port = gr.Number( label="Main process port", value=self.config.get("accelerate_launch.main_process_port", 0), @@ -136,9 +162,14 @@ def run_cmd(run_cmd: list, **kwargs): if "dynamo_use_dynamic" in kwargs and kwargs.get("dynamo_use_dynamic"): run_cmd.append("--dynamo_use_dynamic") - - if "extra_accelerate_launch_args" in kwargs and kwargs["extra_accelerate_launch_args"] != "": - extra_accelerate_launch_args = kwargs["extra_accelerate_launch_args"].replace('"', "") + + if ( + "extra_accelerate_launch_args" in kwargs + and kwargs["extra_accelerate_launch_args"] != "" + ): + extra_accelerate_launch_args = kwargs[ + "extra_accelerate_launch_args" + ].replace('"', "") for arg in extra_accelerate_launch_args.split(): run_cmd.append(shlex.quote(arg)) From aeedc1642a1460d365795c2f098dbedc2bc99cf8 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 26 May 2024 11:00:20 -0400 Subject: [PATCH 014/199] Update to latest sd-scripts dev commit --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index febc5c59f..fb12b6d8e 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit febc5c59fad74dfcead9064033171a9c674e4870 +Subproject commit fb12b6d8e56c9ee6b6580c83d107cc1531028b20 From e5c788e7669c14a268163833f4a8505a38e00004 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 26 May 2024 14:04:47 -0400 Subject: [PATCH 015/199] Fix issue with pip upgrade --- setup/setup_common.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/setup/setup_common.py b/setup/setup_common.py index 8e35b74f2..79eaef64f 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -436,8 +436,8 @@ def git(arg: str, folder: str = None, ignore: bool = False): This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master """ - git_cmd = os.environ.get('GIT', "git") - result = subprocess.run(f'"{git_cmd}" {arg}', check=False, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=folder or '.') + # git_cmd = os.environ.get('GIT', "git") + result = subprocess.run(["git", arg], check=False, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=folder or '.') txt = result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stderr) > 0: txt += ('\n' if len(txt) > 0 else '') + result.stderr.decode(encoding="utf8", errors="ignore") @@ -476,11 +476,12 @@ def pip(arg: str, ignore: bool = False, quiet: bool = False, show_stdout: bool = # arg = arg.replace('>=', '==') if not quiet: log.info(f'Installing package: {arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()}') - log.debug(f"Running pip: {arg}") + pip_cmd = [fr"{sys.executable}", "-m", "pip"] + arg.split(" ") + log.debug(f"Running pip: {pip_cmd}") if show_stdout: - subprocess.run(f'"{sys.executable}" -m pip {arg}', shell=True, check=False, env=os.environ) + subprocess.run(pip_cmd, shell=False, check=False, env=os.environ) else: - result = subprocess.run(f'"{sys.executable}" -m pip {arg}', shell=True, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run(pip_cmd, shell=False, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) txt = result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stderr) > 0: txt += ('\n' if len(txt) > 0 else '') + result.stderr.decode(encoding="utf8", errors="ignore") @@ -489,7 +490,7 @@ def pip(arg: str, ignore: bool = False, quiet: bool = False, show_stdout: bool = global errors # pylint: disable=global-statement errors += 1 log.error(f'Error running pip: {arg}') - log.debug(f'Pip output: {txt}') + log.error(f'Pip output: {txt}') return txt def installed(package, friendly: str = None): From ed17faf4b47b960ac7a3f3b0b42181c28df4b936 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 27 May 2024 09:55:06 -0400 Subject: [PATCH 016/199] Remove confusing log after command execution. --- kohya_gui/class_command_executor.py | 2 +- test/config/TI-AdamW8bit-SDXL.json | 125 ++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 test/config/TI-AdamW8bit-SDXL.json diff --git a/kohya_gui/class_command_executor.py b/kohya_gui/class_command_executor.py index f18e97a32..ba6f9c8e1 100644 --- a/kohya_gui/class_command_executor.py +++ b/kohya_gui/class_command_executor.py @@ -48,7 +48,7 @@ def execute_command(self, run_cmd: str, **kwargs): # Execute the command securely self.process = subprocess.Popen(run_cmd, **kwargs) - log.info("Command executed.") + log.debug("Command executed.") def kill_command(self): """ diff --git a/test/config/TI-AdamW8bit-SDXL.json b/test/config/TI-AdamW8bit-SDXL.json new file mode 100644 index 000000000..cdcb1099f --- /dev/null +++ b/test/config/TI-AdamW8bit-SDXL.json @@ -0,0 +1,125 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "async_upload": false, + "bucket_no_upscale": true, + "bucket_reso_steps": 1, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0, + "caption_dropout_rate": 0.05, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "dataset_config": "", + "dynamo_backend": "no", + "dynamo_mode": "default", + "dynamo_use_dynamic": false, + "dynamo_use_fullgraph": false, + "enable_bucket": true, + "epoch": 8, + "extra_accelerate_launch_args": "", + "flip_aug": false, + "full_fp16": false, + "gpu_ids": "", + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, + "huber_c": 0.1, + "huber_schedule": "snr", + "huggingface_path_in_repo": "", + "huggingface_repo_id": "False", + "huggingface_repo_type": "", + "huggingface_repo_visibility": "", + "huggingface_token": "", + "init_word": "*", + "ip_noise_gamma": 0.1, + "ip_noise_gamma_random_strength": true, + "keep_tokens": 0, + "learning_rate": 0.0001, + "log_config": false, + "log_tracker_config": "", + "log_tracker_name": "", + "log_with": "", + "logging_dir": "./test/logs", + "loss_type": "l2", + "lr_scheduler": "cosine", + "lr_scheduler_args": "", + "lr_scheduler_num_cycles": 1, + "lr_scheduler_power": 1, + "lr_scheduler_type": "", + "lr_warmup": 0, + "main_process_port": 0, + "max_bucket_reso": 2048, + "max_data_loader_n_workers": 0, + "max_resolution": "1024,1024", + "max_timestep": 0, + "max_token_length": 75, + "max_train_epochs": 0, + "max_train_steps": 0, + "mem_eff_attn": false, + "metadata_author": "False", + "metadata_description": "", + "metadata_license": "", + "metadata_tags": "", + "metadata_title": "", + "min_bucket_reso": 256, + "min_snr_gamma": 10, + "min_timestep": false, + "mixed_precision": "bf16", + "model_list": "custom", + "multi_gpu": false, + "multires_noise_discount": 0.2, + "multires_noise_iterations": 8, + "no_token_padding": false, + "noise_offset": 0.05, + "noise_offset_random_strength": true, + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "num_machines": 1, + "num_processes": 1, + "num_vectors_per_token": 8, + "optimizer": "AdamW8bit", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "TI-Adamw8bit-SDXL", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0", + "prior_loss_weight": 1, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "resume_from_huggingface": "False", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 20, + "sample_prompts": "a painting of man wearing a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_as_bool": false, + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "save_state_on_train_end": false, + "save_state_to_huggingface": false, + "scale_v_pred_loss_like_noise_pred": false, + "sdxl": true, + "sdxl_no_half_vae": true, + "seed": 1234, + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "template": "style template", + "token_string": "zxc", + "train_batch_size": 4, + "train_data_dir": "./test/img", + "v2": false, + "v_parameterization": false, + "v_pred_like_loss": 0, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "wandb_run_name": "", + "weights": "", + "xformers": "xformers" +} \ No newline at end of file From df36a26b006150291eef8fb5513e8eed5a8599a7 Mon Sep 17 00:00:00 2001 From: DevArqSangoi Date: Mon, 3 Jun 2024 00:00:17 -0300 Subject: [PATCH 017/199] piecewise_constant scheduler --- kohya_gui/class_basic_training.py | 1 + 1 file changed, 1 insertion(+) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index 331646b3e..c3537eeb4 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -162,6 +162,7 @@ def init_lr_and_optimizer_controls(self) -> None: "cosine", "cosine_with_restarts", "linear", + "piecewise_constant", "polynomial", ], value=self.config.get("basic.lr_scheduler", self.lr_scheduler_value), From 41ea1b0f4f3461755ec70103d49e78f7144039a4 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 12 Jun 2024 13:21:24 -0400 Subject: [PATCH 018/199] Update to latest sd-scripts dev commit --- .release | 2 +- requirements.txt | 2 +- sd-scripts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release b/.release index 7eb9e9132..4bbfbca25 100644 --- a/.release +++ b/.release @@ -1 +1 @@ -v24.1.4 \ No newline at end of file +v24.2.0 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7be02e49e..27e6ae1c3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ easygui==0.98.3 einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 -gradio==4.29.0 +gradio==4.36.1 huggingface-hub==0.20.1 imagesize==1.4.1 invisible-watermark==0.2.0 diff --git a/sd-scripts b/sd-scripts index fb12b6d8e..56bb81c9e 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit fb12b6d8e56c9ee6b6580c83d107cc1531028b20 +Subproject commit 56bb81c9e6483b8b4d5b83639548855b8359f4b4 From cf45079b133ca6476f2bd075791151a68f6301e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=9C=D0=BE=D1=81?= =?UTF-8?q?=D0=BA=D0=B2=D0=B8=D0=BD?= Date: Sun, 16 Jun 2024 14:16:55 +0300 Subject: [PATCH 019/199] fix: fixed docker-compose for passing models via volumes --- docker-compose.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index 4932bcee2..77deba553 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -25,6 +25,7 @@ services: - ./dataset/logs:/app/logs - ./dataset/outputs:/app/outputs - ./dataset/regularization:/app/regularization + - ./models:/app/models - ./.cache/config:/app/config - ./.cache/user:/home/1000/.cache - ./.cache/triton:/home/1000/.triton From 30862ad957d47f74b0f70019c84766010485f977 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 19 Jun 2024 20:50:36 -0400 Subject: [PATCH 020/199] PRevent providing the legacy learning_rate if unet or te learning rate is provided --- kohya_gui/lora_gui.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 06af9a33e..8787febc0 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1102,7 +1102,10 @@ def train_model( network_train_text_encoder_only = text_encoder_lr_float != 0 and unet_lr_float == 0 # Flag to train unet only if its learning rate is non-zero and text encoder's is zero. network_train_unet_only = text_encoder_lr_float == 0 and unet_lr_float != 0 - + + if text_encoder_lr_float != 0 or unet_lr_float != 0: + do_not_set_learning_rate = True + config_toml_data = { "adaptive_noise_scale": ( adaptive_noise_scale if adaptive_noise_scale != 0 else None @@ -1142,7 +1145,7 @@ def train_model( "ip_noise_gamma": ip_noise_gamma if ip_noise_gamma != 0 else None, "ip_noise_gamma_random_strength": ip_noise_gamma_random_strength, "keep_tokens": int(keep_tokens), - "learning_rate": learning_rate, + "learning_rate": None if do_not_set_learning_rate else learning_rate, "logging_dir": logging_dir, "log_config": log_config, "log_tracker_name": log_tracker_name, From 8abab01ebb9cb1e3109ca9d83d07b6911b7e36f9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 22 Jun 2024 08:32:34 -0400 Subject: [PATCH 021/199] Fix toml noise offset parameters based on selected type --- kohya_gui/lora_gui.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 8787febc0..aedbfe5df 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1108,7 +1108,7 @@ def train_model( config_toml_data = { "adaptive_noise_scale": ( - adaptive_noise_scale if adaptive_noise_scale != 0 else None + adaptive_noise_scale if (adaptive_noise_scale != 0 and noise_offset_type == "Original") else None ), "async_upload": async_upload, "bucket_no_upscale": bucket_no_upscale, @@ -1183,9 +1183,9 @@ def train_model( "min_snr_gamma": min_snr_gamma if min_snr_gamma != 0 else None, "min_timestep": min_timestep if min_timestep != 0 else None, "mixed_precision": mixed_precision, - "multires_noise_discount": multires_noise_discount, + "multires_noise_discount": multires_noise_discount if noise_offset_type == "Multires" else None, "multires_noise_iterations": ( - multires_noise_iterations if multires_noise_iterations != 0 else None + multires_noise_iterations if (multires_noise_iterations != 0 and noise_offset_type == "Multires") else None ), "network_alpha": network_alpha, "network_args": str(network_args).replace('"', "").split(), @@ -1196,8 +1196,8 @@ def train_model( "network_train_text_encoder_only": network_train_text_encoder_only, "network_weights": network_weights, "no_half_vae": True if sdxl and sdxl_no_half_vae else None, - "noise_offset": noise_offset if noise_offset != 0 else None, - "noise_offset_random_strength": noise_offset_random_strength, + "noise_offset": noise_offset if (noise_offset != 0 and noise_offset_type == "Original") else None, + "noise_offset_random_strength": noise_offset_random_strength if noise_offset_type == "Original" else None, "noise_offset_type": noise_offset_type, "optimizer_type": optimizer, "optimizer_args": str(optimizer_args).replace('"', "").split(), From a9f4f30c3cddf408341bb3b310da48caf41c5c30 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 22 Jun 2024 08:38:25 -0400 Subject: [PATCH 022/199] Fix typo --- _typos.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/_typos.toml b/_typos.toml index d73875a92..28ddf851f 100644 --- a/_typos.toml +++ b/_typos.toml @@ -9,6 +9,7 @@ parms="parms" nin="nin" extention="extention" # Intentionally left nd="nd" +pn="pn" shs="shs" sts="sts" scs="scs" From 018ce04852e8f8a05609436ecbcd066c5a1d9e2f Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 22 Jun 2024 08:51:12 -0400 Subject: [PATCH 023/199] Fix adaptive_noise_scale value not properly loading from json config --- kohya_gui/common_gui.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index 8fe216bd5..8732ff8f1 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -327,7 +327,6 @@ def update_my_data(my_data): # Convert values to int if they are strings for key in [ - "adaptive_noise_scale", "clip_skip", "epoch", "gradient_accumulation_steps", @@ -378,7 +377,13 @@ def update_my_data(my_data): my_data[key] = int(75) # Convert values to float if they are strings, correctly handling float representations - for key in ["noise_offset", "learning_rate", "text_encoder_lr", "unet_lr"]: + for key in [ + "adaptive_noise_scale", + "noise_offset", + "learning_rate", + "text_encoder_lr", + "unet_lr", + ]: value = my_data.get(key) if value is not None: try: @@ -1368,7 +1373,11 @@ def validate_file_path(file_path: str) -> bool: return True -def validate_folder_path(folder_path: str, can_be_written_to: bool = False, create_if_not_exists: bool = False) -> bool: +def validate_folder_path( + folder_path: str, + can_be_written_to: bool = False, + create_if_not_exists: bool = False, +) -> bool: if folder_path == "": return True msg = f"Validating {folder_path} existence{' and writability' if can_be_written_to else ''}..." @@ -1386,6 +1395,7 @@ def validate_folder_path(folder_path: str, can_be_written_to: bool = False, crea log.info(f"{msg} SUCCESS") return True + def validate_toml_file(file_path: str) -> bool: if file_path == "": return True @@ -1393,7 +1403,7 @@ def validate_toml_file(file_path: str) -> bool: if not os.path.isfile(file_path): log.error(f"{msg} FAILED: does not exist") return False - + try: toml.load(file_path) except: @@ -1424,11 +1434,14 @@ def validate_model_path(pretrained_model_name_or_path: str) -> bool: log.info(f"{msg} SUCCESS") else: # If not one of the default models, check if it's a valid local path - if not validate_file_path(pretrained_model_name_or_path) and not validate_folder_path(pretrained_model_name_or_path): + if not validate_file_path( + pretrained_model_name_or_path + ) and not validate_folder_path(pretrained_model_name_or_path): log.info(f"{msg} FAILURE: not a valid file or folder") return False return True + def is_file_writable(file_path: str) -> bool: """ Checks if a file is writable. @@ -1488,10 +1501,11 @@ def validate_args_setting(input_string): ) return False + def setup_environment(): env = os.environ.copy() env["PYTHONPATH"] = ( - fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" From a8c18604c83682ed44303b43f64df80db8f0df82 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 23 Jun 2024 13:46:54 -0400 Subject: [PATCH 024/199] Fix prompt.txt location --- README.md | 30 ++++++++++++++++++++++++++++++ kohya_gui/class_sample_images.py | 5 ++++- requirements.txt | 1 + 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 45f4aed92..9b6c220eb 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,11 @@ The GUI allows you to set the training parameters and generate and run the requi - [Potential Solutions](#potential-solutions) - [SDXL training](#sdxl-training) - [Masked loss](#masked-loss) + - [Guides](#guides) + - [Using Accelerate Lora Tab to Select GPU ID](#using-accelerate-lora-tab-to-select-gpu-id) + - [Starting Accelerate in GUI](#starting-accelerate-in-gui) + - [Running Multiple Instances (linux)](#running-multiple-instances-linux) + - [Monitoring Processes](#monitoring-processes) - [Change History](#change-history) ## 🦒 Colab @@ -438,6 +443,31 @@ The feature is not fully tested, so there may be bugs. If you find any issues, p ControlNet dataset is used to specify the mask. The mask images should be the RGB images. The pixel value 255 in R channel is treated as the mask (the loss is calculated only for the pixels with the mask), and 0 is treated as the non-mask. The pixel values 0-255 are converted to 0-1 (i.e., the pixel value 128 is treated as the half weight of the loss). See details for the dataset specification in the [LLLite documentation](./docs/train_lllite_README.md#preparing-the-dataset). +## Guides + +The following are guides extracted from issues discussions + +### Using Accelerate Lora Tab to Select GPU ID + +#### Starting Accelerate in GUI + +- Open the kohya GUI on your desired port. +- Open the `Accelerate launch` tab +- Ensure the Multi-GPU checkbox is unchecked. +- Set GPU IDs to the desired GPU (like 1). + +#### Running Multiple Instances (linux) + +- For tracking multiple processes, use separate kohya GUI instances on different ports (e.g., 7860, 7861). +- Start instances using `nohup ./gui.sh --listen 0.0.0.0 --server_port --headless > log.log 2>&1 &`. + +#### Monitoring Processes + +- Open each GUI in a separate browser tab. +- For terminal access, use SSH and tools like `tmux` or `screen`. + +For more details, visit the [GitHub issue](https://github.com/bmaltais/kohya_ss/issues/2577). + ## Change History See release information. diff --git a/kohya_gui/class_sample_images.py b/kohya_gui/class_sample_images.py index 8f69a2ec6..807c8b449 100644 --- a/kohya_gui/class_sample_images.py +++ b/kohya_gui/class_sample_images.py @@ -28,7 +28,10 @@ def create_prompt_file(sample_prompts, output_dir): Returns: str: The path to the prompt file. """ - sample_prompts_path = os.path.join(output_dir, "prompt.txt") + sample_prompts_path = os.path.join(output_dir, "sample/prompt.txt") + + if not os.path.exists(os.path.dirname(sample_prompts_path)): + os.makedirs(os.path.dirname(sample_prompts_path)) with open(sample_prompts_path, "w", encoding="utf-8") as f: f.write(sample_prompts) diff --git a/requirements.txt b/requirements.txt index 27e6ae1c3..a2245ac9f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,6 +13,7 @@ imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 lycoris_lora==2.2.0.post3 +# lycoris_lora==3.0.0.dev11 omegaconf==2.3.0 onnx==1.15.0 prodigyopt==1.0 From ff61f3a4be6699346f49942bcf97262bb3509d89 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 23 Jun 2024 13:52:21 -0400 Subject: [PATCH 025/199] Improve "print command" output format --- kohya_gui/common_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index 8732ff8f1..3c733aa9e 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -1474,7 +1474,7 @@ def print_command_and_toml(run_cmd, tmpfilename): # Reconstruct the safe command string for display command_to_run = " ".join(run_cmd) - log.info(command_to_run) + print(command_to_run) print("") log.info(f"Showing toml config file: {tmpfilename}") From 441721ab8768964735d197ec44aab0740294e957 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 24 Jun 2024 10:26:54 -0400 Subject: [PATCH 026/199] Use output model name as wandb run name if not provided --- kohya_gui/common_gui.py | 3 ++- kohya_gui/dreambooth_gui.py | 2 +- kohya_gui/finetune_gui.py | 2 +- kohya_gui/lora_gui.py | 6 +++--- kohya_gui/textual_inversion_gui.py | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index 3c733aa9e..b95ac913d 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -1462,8 +1462,9 @@ def is_file_writable(file_path: str) -> bool: pass # If the file can be opened, it is considered writable return True - except IOError: + except IOError as e: # If an IOError occurs, the file cannot be written to + log.info(f"Error: {e}. File '{file_path}' is not writable.") return False diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index e02c62fbc..61b520146 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -878,7 +878,7 @@ def train_model( "vae": vae, "vae_batch_size": vae_batch_size if vae_batch_size != 0 else None, "wandb_api_key": wandb_api_key, - "wandb_run_name": wandb_run_name, + "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, } diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 7530917d5..0c480b15f 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -924,7 +924,7 @@ def train_model( "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, "vae_batch_size": vae_batch_size if vae_batch_size != 0 else None, "wandb_api_key": wandb_api_key, - "wandb_run_name": wandb_run_name, + "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, } diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index aedbfe5df..b335921e1 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -449,10 +449,10 @@ def open_configuration( loraplus_unet_lr_ratio, training_preset, ): - # Get list of function parameters and values + # Get list of function parameters and their values parameters = list(locals().items()) - # Determines if a preset configuration is being applied + # Determine if a preset configuration is being applied if apply_preset: if training_preset != "none": log.info(f"Applying preset {training_preset}...") @@ -1252,7 +1252,7 @@ def train_model( "vae": vae, "vae_batch_size": vae_batch_size if vae_batch_size != 0 else None, "wandb_api_key": wandb_api_key, - "wandb_run_name": wandb_run_name, + "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, } diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 10529b9c4..8c9f804aa 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -874,7 +874,7 @@ def train_model( "vae": vae, "vae_batch_size": vae_batch_size if vae_batch_size != 0 else None, "wandb_api_key": wandb_api_key, - "wandb_run_name": wandb_run_name, + "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weigts": weights, "use_object_template": True if template == "object template" else None, "use_style_template": True if template == "style template" else None, From da2336a48d7e0d17b1b6f6626d424ea178b73bc7 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 29 Jun 2024 22:05:27 -0400 Subject: [PATCH 027/199] Update sd-scripts dev release --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 56bb81c9e..0b3e4f7ab 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 56bb81c9e6483b8b4d5b83639548855b8359f4b4 +Subproject commit 0b3e4f7ab62b7c93e66972b7bd2774b8fe679792 From 46aba39ee93a154470231fc66ff1c153868a4b9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 05:04:09 +0000 Subject: [PATCH 028/199] Bump crate-ci/typos from 1.21.0 to 1.22.9 Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.21.0 to 1.22.9. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.21.0...v1.22.9) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/typos.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/typos.yaml b/.github/workflows/typos.yaml index c81ff3210..ae14a3c14 100644 --- a/.github/workflows/typos.yaml +++ b/.github/workflows/typos.yaml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v4 - name: typos-action - uses: crate-ci/typos@v1.21.0 + uses: crate-ci/typos@v1.22.9 From 4b43f47305fab6b9ba8815859dd1ae816f472c20 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 05:04:12 +0000 Subject: [PATCH 029/199] Bump docker/build-push-action from 5 to 6 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/docker_publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml index 520045d86..ac198d1cb 100644 --- a/.github/workflows/docker_publish.yml +++ b/.github/workflows/docker_publish.yml @@ -71,7 +71,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 id: publish with: context: . From 89f45ba81780b04e5cd608bb80a1fdc6a71b6893 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 7 Jul 2024 10:14:46 -0400 Subject: [PATCH 030/199] Get latest sd3 code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 0b3e4f7ab..ea18d5ba6 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 0b3e4f7ab62b7c93e66972b7bd2774b8fe679792 +Subproject commit ea18d5ba6d856995d5c44be4b449b63ac66fe5db From 65ac2f163a0db6d0fc65b249ddc290e502992379 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 7 Jul 2024 15:14:53 -0400 Subject: [PATCH 031/199] Adding SD3 GUI elements --- kohya_gui/class_sd3.py | 714 ++++++++++++++++++++++++++++++++ kohya_gui/class_source_model.py | 57 ++- kohya_gui/common_gui.py | 10 + kohya_gui/dreambooth_gui.py | 4 + kohya_gui/finetune_gui.py | 4 + 5 files changed, 788 insertions(+), 1 deletion(-) create mode 100644 kohya_gui/class_sd3.py diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py new file mode 100644 index 000000000..06d109a89 --- /dev/null +++ b/kohya_gui/class_sd3.py @@ -0,0 +1,714 @@ +import gradio as gr +from typing import Tuple +from .common_gui import ( + get_folder_path, + get_any_file_path, + list_files, + list_dirs, + create_refresh_button, + document_symbol, +) + + +class sd3Training: + """ + This class configures and initializes the advanced training settings for a machine learning model, + including options for headless operation, fine-tuning, training type selection, and default directory paths. + + Attributes: + headless (bool): If True, run without the Gradio interface. + finetuning (bool): If True, enables fine-tuning of the model. + training_type (str): Specifies the type of training to perform. + no_token_padding (gr.Checkbox): Checkbox to disable token padding. + gradient_accumulation_steps (gr.Slider): Slider to set the number of gradient accumulation steps. + weighted_captions (gr.Checkbox): Checkbox to enable weighted captions. + """ + + def __init__( + self, + headless: bool = False, + finetuning: bool = False, + training_type: str = "", + config: dict = {}, + sd3_checkbox: gr.Checkbox = False, + ) -> None: + """ + Initializes the AdvancedTraining class with given settings. + + Parameters: + headless (bool): Run in headless mode without GUI. + finetuning (bool): Enable model fine-tuning. + training_type (str): The type of training to be performed. + config (dict): Configuration options for the training process. + """ + self.headless = headless + self.finetuning = finetuning + self.training_type = training_type + self.config = config + self.sd3_checkbox = sd3_checkbox + + # Determine the current directories for VAE and output, falling back to defaults if not specified. + # self.current_vae_dir = self.config.get("advanced.vae_dir", "./models/vae") + # self.current_state_dir = self.config.get("advanced.state_dir", "./outputs") + # self.current_log_tracker_config_dir = self.config.get( + # "advanced.log_tracker_config_dir", "./logs" + # ) + + # Define the behavior for changing noise offset type. + def noise_offset_type_change( + noise_offset_type: str, + ) -> Tuple[gr.Group, gr.Group]: + """ + Returns a tuple of Gradio Groups with visibility set based on the noise offset type. + + Parameters: + noise_offset_type (str): The selected noise offset type. + + Returns: + Tuple[gr.Group, gr.Group]: A tuple containing two Gradio Group elements with their visibility set. + """ + if noise_offset_type == "Original": + return (gr.Group(visible=True), gr.Group(visible=False)) + else: + return (gr.Group(visible=False), gr.Group(visible=True)) + + with gr.Accordion("SD3", open=False, elem_id="sd3_tab", visible=False) as sd3_accordion: + with gr.Group(): + gr.Markdown("### SD3 Specific Parameters") + with gr.Row(): + self.weighting_scheme = gr.Dropdown( + label="Weighting Scheme", + choices=["logit_normal", "sigma_sqrt", "mode", "cosmap"], + value=self.config.get("sd3.weighting_scheme", "logit_normal"), + ) + self.logit_mean = gr.Number( + label="Logit Mean", + value=self.config.get("sd3.logit_mean", 0.0), + ) + self.logit_std = gr.Number( + label="Logit Std", + value=self.config.get("sd3.logit_std", 1.0), + ) + self.mode_scale = gr.Number( + label="Mode Scale", + value=self.config.get("sd3.mode_scale", 1.29), + ) + + with gr.Row(): + self.clip_l = gr.Textbox( + label="CLIP-L Path", + placeholder="Path to CLIP-L model", + value=self.config.get("sd3.clip_l", ""), + ) + self.clip_l_button = gr.Button( + document_symbol, elem_id="open_folder_small", visible=(not headless) + ) + self.clip_l_button.click( + get_any_file_path, + outputs=self.clip_l, + show_progress=False, + ) + + + + + # self.log_tracker_config = gr.Dropdown( + # label="Log tracker config", + # choices=[self.config.get("log_tracker_config_dir", "")] + # + list_log_tracker_config_files(self.current_log_tracker_config_dir), + # value=self.config.get("log_tracker_config_dir", ""), + # info="Path to tracker config file to use for logging", + # interactive=True, + # allow_custom_value=True, + # ) + # create_refresh_button( + # self.log_tracker_config, + # lambda: None, + # lambda: { + # "choices": [self.config.get("log_tracker_config_dir", "")] + # + list_log_tracker_config_files(self.current_log_tracker_config_dir) + # }, + # "open_folder_small", + # ) + # self.log_tracker_config_button = gr.Button( + # document_symbol, elem_id="open_folder_small", visible=(not headless) + # ) + # self.log_tracker_config_button.click( + # get_any_file_path, + # outputs=self.log_tracker_config, + # show_progress=False, + # ) + # self.log_tracker_config.change( + # fn=lambda path: gr.Dropdown( + # choices=[self.config.get("log_tracker_config_dir", "")] + # + list_log_tracker_config_files(path) + # ), + # inputs=self.log_tracker_config, + # outputs=self.log_tracker_config, + # show_progress=False, + # ) + self.clip_g = gr.Textbox( + label="CLIP-G Path", + placeholder="Path to CLIP-G model", + value=self.config.get("sd3.clip_g", ""), + ) + self.t5xxl = gr.Textbox( + label="T5-XXL Path", + placeholder="Path to T5-XXL model", + value=self.config.get("sd3.t5xxl", ""), + ) + + with gr.Row(): + self.save_clip = gr.Checkbox( + label="Save CLIP models", + value=self.config.get("sd3.save_clip", False), + ) + self.save_t5xxl = gr.Checkbox( + label="Save T5-XXL model", + value=self.config.get("sd3.save_t5xxl", False), + ) + + with gr.Row(): + self.t5xxl_device = gr.Textbox( + label="T5-XXL Device", + placeholder="Device for T5-XXL (e.g., cuda:0)", + value=self.config.get("sd3.t5xxl_device", ""), + ) + self.t5xxl_dtype = gr.Dropdown( + label="T5-XXL Dtype", + choices=["float32", "fp16", "bf16"], + value=self.config.get("sd3.t5xxl_dtype", "bf16"), + ) + self.text_encoder_batch_size = gr.Number( + label="Text Encoder Batch Size", + value=self.config.get("sd3.text_encoder_batch_size", 1), + minimum=1, + ) + self.cache_text_encoder_outputs = gr.Checkbox( + label="Cache Text Encoder Outputs", + value=self.config.get("sd3.cache_text_encoder_outputs", False), + info="Cache text encoder outputs to speed up inference", + ) + self.cache_text_encoder_outputs_to_disk = gr.Checkbox( + label="Cache Text Encoder Outputs to Disk", + value=self.config.get("sd3.cache_text_encoder_outputs_to_disk", False), + info="Cache text encoder outputs to disk to speed up inference", + ) + + self.sd3_checkbox.change( + lambda sd3_checkbox: gr.Accordion(visible=sd3_checkbox), + inputs=[self.sd3_checkbox], + outputs=[sd3_accordion], + ) + + + # # GUI elements are only visible when not fine-tuning. + # with gr.Row(visible=not finetuning): + # # Exclude token padding option for LoRA training type. + # if training_type != "lora": + # self.no_token_padding = gr.Checkbox( + # label="No token padding", + # value=self.config.get("advanced.no_token_padding", False), + # ) + # self.gradient_accumulation_steps = gr.Slider( + # label="Gradient accumulate steps", + # info="Number of updates steps to accumulate before performing a backward/update pass", + # value=self.config.get("advanced.gradient_accumulation_steps", 1), + # minimum=1, + # maximum=120, + # step=1, + # ) + # self.weighted_captions = gr.Checkbox( + # label="Weighted captions", + # value=self.config.get("advanced.weighted_captions", False), + # ) + # with gr.Group(), gr.Row(visible=not finetuning): + # self.prior_loss_weight = gr.Number( + # label="Prior loss weight", + # value=self.config.get("advanced.prior_loss_weight", 1.0), + # ) + + # def list_vae_files(path): + # self.current_vae_dir = path if not path == "" else "." + # return list(list_files(path, exts=[".ckpt", ".safetensors"], all=True)) + + # self.vae = gr.Dropdown( + # label="VAE (Optional: Path to checkpoint of vae for training)", + # interactive=True, + # choices=[self.config.get("advanced.vae_dir", "")] + # + list_vae_files(self.current_vae_dir), + # value=self.config.get("advanced.vae_dir", ""), + # allow_custom_value=True, + # ) + # create_refresh_button( + # self.vae, + # lambda: None, + # lambda: { + # "choices": [self.config.get("advanced.vae_dir", "")] + # + list_vae_files(self.current_vae_dir) + # }, + # "open_folder_small", + # ) + # self.vae_button = gr.Button( + # "📂", elem_id="open_folder_small", visible=(not headless) + # ) + # self.vae_button.click( + # get_any_file_path, + # outputs=self.vae, + # show_progress=False, + # ) + + # self.vae.change( + # fn=lambda path: gr.Dropdown( + # choices=[self.config.get("advanced.vae_dir", "")] + # + list_vae_files(path) + # ), + # inputs=self.vae, + # outputs=self.vae, + # show_progress=False, + # ) + + # with gr.Row(): + # self.additional_parameters = gr.Textbox( + # label="Additional parameters", + # placeholder='(Optional) Use to provide additional parameters not handled by the GUI. Eg: --some_parameters "value"', + # value=self.config.get("advanced.additional_parameters", ""), + # ) + # with gr.Accordion("Scheduled Huber Loss", open=False): + # with gr.Row(): + # self.loss_type = gr.Dropdown( + # label="Loss type", + # choices=["huber", "smooth_l1", "l2"], + # value=self.config.get("advanced.loss_type", "l2"), + # info="The type of loss to use and whether it's scheduled based on the timestep", + # ) + # self.huber_schedule = gr.Dropdown( + # label="Huber schedule", + # choices=[ + # "constant", + # "exponential", + # "snr", + # ], + # value=self.config.get("advanced.huber_schedule", "snr"), + # info="The type of loss to use and whether it's scheduled based on the timestep", + # ) + # self.huber_c = gr.Number( + # label="Huber C", + # value=self.config.get("advanced.huber_c", 0.1), + # minimum=0.0, + # maximum=1.0, + # step=0.01, + # info="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type", + # ) + + # with gr.Row(): + # self.save_every_n_steps = gr.Number( + # label="Save every N steps", + # value=self.config.get("advanced.save_every_n_steps", 0), + # precision=0, + # info="(Optional) The model is saved every specified steps", + # ) + # self.save_last_n_steps = gr.Number( + # label="Save last N steps", + # value=self.config.get("advanced.save_last_n_steps", 0), + # precision=0, + # info="(Optional) Save only the specified number of models (old models will be deleted)", + # ) + # self.save_last_n_steps_state = gr.Number( + # label="Save last N steps state", + # value=self.config.get("advanced.save_last_n_steps_state", 0), + # precision=0, + # info="(Optional) Save only the specified number of states (old models will be deleted)", + # ) + # with gr.Row(): + + # def full_options_update(full_fp16, full_bf16): + # full_fp16_active = True + # full_bf16_active = True + + # if full_fp16: + # full_bf16_active = False + # if full_bf16: + # full_fp16_active = False + # return gr.Checkbox( + # interactive=full_fp16_active, + # ), gr.Checkbox(interactive=full_bf16_active) + + # self.keep_tokens = gr.Slider( + # label="Keep n tokens", + # value=self.config.get("advanced.keep_tokens", 0), + # minimum=0, + # maximum=32, + # step=1, + # ) + # self.clip_skip = gr.Slider( + # label="Clip skip", + # value=self.config.get("advanced.clip_skip", 1), + # minimum=0, + # maximum=12, + # step=1, + # ) + # self.max_token_length = gr.Dropdown( + # label="Max Token Length", + # choices=[ + # 75, + # 150, + # 225, + # ], + # info="max token length of text encoder", + # value=self.config.get("advanced.max_token_length", 75), + # ) + + # with gr.Row(): + # if training_type == "lora": + # self.fp8_base = gr.Checkbox( + # label="fp8 base training (experimental)", + # info="U-Net and Text Encoder can be trained with fp8 (experimental)", + # value=self.config.get("advanced.fp8_base", False), + # ) + # self.full_fp16 = gr.Checkbox( + # label="Full fp16 training (experimental)", + # value=self.config.get("advanced.full_fp16", False), + # ) + # self.full_bf16 = gr.Checkbox( + # label="Full bf16 training (experimental)", + # value=self.config.get("advanced.full_bf16", False), + # info="Required bitsandbytes >= 0.36.0", + # ) + + # self.full_fp16.change( + # full_options_update, + # inputs=[self.full_fp16, self.full_bf16], + # outputs=[self.full_fp16, self.full_bf16], + # ) + # self.full_bf16.change( + # full_options_update, + # inputs=[self.full_fp16, self.full_bf16], + # outputs=[self.full_fp16, self.full_bf16], + # ) + + # with gr.Row(): + # self.gradient_checkpointing = gr.Checkbox( + # label="Gradient checkpointing", + # value=self.config.get("advanced.gradient_checkpointing", False), + # ) + # self.shuffle_caption = gr.Checkbox( + # label="Shuffle caption", + # value=self.config.get("advanced.shuffle_caption", False), + # ) + # self.persistent_data_loader_workers = gr.Checkbox( + # label="Persistent data loader", + # value=self.config.get("advanced.persistent_data_loader_workers", False), + # ) + # self.mem_eff_attn = gr.Checkbox( + # label="Memory efficient attention", + # value=self.config.get("advanced.mem_eff_attn", False), + # ) + # with gr.Row(): + # self.xformers = gr.Dropdown( + # label="CrossAttention", + # choices=["none", "sdpa", "xformers"], + # value=self.config.get("advanced.xformers", "xformers"), + # ) + # self.color_aug = gr.Checkbox( + # label="Color augmentation", + # value=self.config.get("advanced.color_aug", False), + # info="Enable weak color augmentation", + # ) + # self.flip_aug = gr.Checkbox( + # label="Flip augmentation", + # value=getattr(self.config, "advanced.flip_aug", False), + # info="Enable horizontal flip augmentation", + # ) + # self.masked_loss = gr.Checkbox( + # label="Masked loss", + # value=self.config.get("advanced.masked_loss", False), + # info="Apply mask for calculating loss. conditioning_data_dir is required for dataset", + # ) + # with gr.Row(): + # self.scale_v_pred_loss_like_noise_pred = gr.Checkbox( + # label="Scale v prediction loss", + # value=self.config.get( + # "advanced.scale_v_pred_loss_like_noise_pred", False + # ), + # info="Only for SD v2 models. By scaling the loss according to the time step, the weights of global noise prediction and local noise prediction become the same, and the improvement of details may be expected.", + # ) + # self.min_snr_gamma = gr.Slider( + # label="Min SNR gamma", + # value=self.config.get("advanced.min_snr_gamma", 0), + # minimum=0, + # maximum=20, + # step=1, + # info="Recommended value of 5 when used", + # ) + # self.debiased_estimation_loss = gr.Checkbox( + # label="Debiased Estimation loss", + # value=self.config.get("advanced.debiased_estimation_loss", False), + # info="Automates the processing of noise, allowing for faster model fitting, as well as balancing out color issues. Do not use if Min SNR gamma is specified.", + # ) + # with gr.Row(): + # # self.sdpa = gr.Checkbox(label='Use sdpa', value=False, info='Use sdpa for CrossAttention') + # self.bucket_no_upscale = gr.Checkbox( + # label="Don't upscale bucket resolution", + # value=self.config.get("advanced.bucket_no_upscale", True), + # ) + # self.bucket_reso_steps = gr.Slider( + # label="Bucket resolution steps", + # value=self.config.get("advanced.bucket_reso_steps", 64), + # minimum=1, + # maximum=128, + # ) + # self.random_crop = gr.Checkbox( + # label="Random crop instead of center crop", + # value=self.config.get("advanced.random_crop", False), + # ) + # self.v_pred_like_loss = gr.Slider( + # label="V Pred like loss", + # value=self.config.get("advanced.v_pred_like_loss", 0), + # minimum=0, + # maximum=1, + # step=0.01, + # info="Recommended value of 0.5 when used", + # ) + + # with gr.Row(): + # self.min_timestep = gr.Slider( + # label="Min Timestep", + # value=self.config.get("advanced.min_timestep", 0), + # step=1, + # minimum=0, + # maximum=1000, + # info="Values greater than 0 will make the model more img2img focussed. 0 = image only", + # ) + # self.max_timestep = gr.Slider( + # label="Max Timestep", + # value=self.config.get("advanced.max_timestep", 1000), + # step=1, + # minimum=0, + # maximum=1000, + # info="Values lower than 1000 will make the model more img2img focussed. 1000 = noise only", + # ) + + # with gr.Row(): + # self.noise_offset_type = gr.Dropdown( + # label="Noise offset type", + # choices=[ + # "Original", + # "Multires", + # ], + # value=self.config.get("advanced.noise_offset_type", "Original"), + # scale=1, + # ) + # with gr.Row(visible=True) as self.noise_offset_original: + # self.noise_offset = gr.Slider( + # label="Noise offset", + # value=self.config.get("advanced.noise_offset", 0), + # minimum=0, + # maximum=1, + # step=0.01, + # info="Recommended values are 0.05 - 0.15", + # ) + # self.noise_offset_random_strength = gr.Checkbox( + # label="Noise offset random strength", + # value=self.config.get( + # "advanced.noise_offset_random_strength", False + # ), + # info="Use random strength between 0~noise_offset for noise offset", + # ) + # self.adaptive_noise_scale = gr.Slider( + # label="Adaptive noise scale", + # value=self.config.get("advanced.adaptive_noise_scale", 0), + # minimum=-1, + # maximum=1, + # step=0.001, + # info="Add `latent mean absolute value * this value` to noise_offset", + # ) + # with gr.Row(visible=False) as self.noise_offset_multires: + # self.multires_noise_iterations = gr.Slider( + # label="Multires noise iterations", + # value=self.config.get("advanced.multires_noise_iterations", 0), + # minimum=0, + # maximum=64, + # step=1, + # info="Enable multires noise (recommended values are 6-10)", + # ) + # self.multires_noise_discount = gr.Slider( + # label="Multires noise discount", + # value=self.config.get("advanced.multires_noise_discount", 0.3), + # minimum=0, + # maximum=1, + # step=0.01, + # info="Recommended values are 0.8. For LoRAs with small datasets, 0.1-0.3", + # ) + # with gr.Row(visible=True): + # self.ip_noise_gamma = gr.Slider( + # label="IP noise gamma", + # value=self.config.get("advanced.ip_noise_gamma", 0), + # minimum=0, + # maximum=1, + # step=0.01, + # info="enable input perturbation noise. used for regularization. recommended value: around 0.1", + # ) + # self.ip_noise_gamma_random_strength = gr.Checkbox( + # label="IP noise gamma random strength", + # value=self.config.get( + # "advanced.ip_noise_gamma_random_strength", False + # ), + # info="Use random strength between 0~ip_noise_gamma for input perturbation noise", + # ) + # self.noise_offset_type.change( + # noise_offset_type_change, + # inputs=[self.noise_offset_type], + # outputs=[ + # self.noise_offset_original, + # self.noise_offset_multires, + # ], + # ) + # with gr.Row(): + # self.caption_dropout_every_n_epochs = gr.Number( + # label="Dropout caption every n epochs", + # value=self.config.get("advanced.caption_dropout_every_n_epochs", 0), + # ) + # self.caption_dropout_rate = gr.Slider( + # label="Rate of caption dropout", + # value=self.config.get("advanced.caption_dropout_rate", 0), + # minimum=0, + # maximum=1, + # ) + # self.vae_batch_size = gr.Slider( + # label="VAE batch size", + # minimum=0, + # maximum=32, + # value=self.config.get("advanced.vae_batch_size", 0), + # step=1, + # ) + # with gr.Group(), gr.Row(): + # self.save_state = gr.Checkbox( + # label="Save training state", + # value=self.config.get("advanced.save_state", False), + # info="Save training state (including optimizer states etc.) when saving models" + # ) + + # self.save_state_on_train_end = gr.Checkbox( + # label="Save training state at end of training", + # value=self.config.get("advanced.save_state_on_train_end", False), + # info="Save training state (including optimizer states etc.) on train end" + # ) + + # def list_state_dirs(path): + # self.current_state_dir = path if not path == "" else "." + # return list(list_dirs(path)) + + # self.resume = gr.Dropdown( + # label='Resume from saved training state (path to "last-state" state folder)', + # choices=[self.config.get("advanced.state_dir", "")] + # + list_state_dirs(self.current_state_dir), + # value=self.config.get("advanced.state_dir", ""), + # interactive=True, + # allow_custom_value=True, + # info="Saved state to resume training from" + # ) + # create_refresh_button( + # self.resume, + # lambda: None, + # lambda: { + # "choices": [self.config.get("advanced.state_dir", "")] + # + list_state_dirs(self.current_state_dir) + # }, + # "open_folder_small", + # ) + # self.resume_button = gr.Button( + # "📂", elem_id="open_folder_small", visible=(not headless) + # ) + # self.resume_button.click( + # get_folder_path, + # outputs=self.resume, + # show_progress=False, + # ) + # self.resume.change( + # fn=lambda path: gr.Dropdown( + # choices=[self.config.get("advanced.state_dir", "")] + # + list_state_dirs(path) + # ), + # inputs=self.resume, + # outputs=self.resume, + # show_progress=False, + # ) + # self.max_data_loader_n_workers = gr.Number( + # label="Max num workers for DataLoader", + # info="Override number of epoch. Default: 0", + # step=1, + # minimum=0, + # value=self.config.get("advanced.max_data_loader_n_workers", 0), + # ) + # with gr.Row(): + # self.log_with = gr.Dropdown( + # label="Logging", + # choices=["","wandb", "tensorboard","all"], + # value="", + # info="Loggers to use, tensorboard will be used as the default.", + # ) + # self.wandb_api_key = gr.Textbox( + # label="WANDB API Key", + # value=self.config.get("advanced.wandb_api_key", ""), + # placeholder="(Optional)", + # info="Users can obtain and/or generate an api key in the their user settings on the website: https://wandb.ai/login", + # ) + # self.wandb_run_name = gr.Textbox( + # label="WANDB run name", + # value=self.config.get("advanced.wandb_run_name", ""), + # placeholder="(Optional)", + # info="The name of the specific wandb session", + # ) + # with gr.Group(), gr.Row(): + + # def list_log_tracker_config_files(path): + # self.current_log_tracker_config_dir = path if not path == "" else "." + # return list(list_files(path, exts=[".json"], all=True)) + + # self.log_config = gr.Checkbox( + # label="Log config", + # value=self.config.get("advanced.log_config", False), + # info="Log training parameter to WANDB", + # ) + # self.log_tracker_name = gr.Textbox( + # label="Log tracker name", + # value=self.config.get("advanced.log_tracker_name", ""), + # placeholder="(Optional)", + # info="Name of tracker to use for logging, default is script-specific default name", + # ) + # self.log_tracker_config = gr.Dropdown( + # label="Log tracker config", + # choices=[self.config.get("log_tracker_config_dir", "")] + # + list_log_tracker_config_files(self.current_log_tracker_config_dir), + # value=self.config.get("log_tracker_config_dir", ""), + # info="Path to tracker config file to use for logging", + # interactive=True, + # allow_custom_value=True, + # ) + # create_refresh_button( + # self.log_tracker_config, + # lambda: None, + # lambda: { + # "choices": [self.config.get("log_tracker_config_dir", "")] + # + list_log_tracker_config_files(self.current_log_tracker_config_dir) + # }, + # "open_folder_small", + # ) + # self.log_tracker_config_button = gr.Button( + # document_symbol, elem_id="open_folder_small", visible=(not headless) + # ) + # self.log_tracker_config_button.click( + # get_any_file_path, + # outputs=self.log_tracker_config, + # show_progress=False, + # ) + # self.log_tracker_config.change( + # fn=lambda path: gr.Dropdown( + # choices=[self.config.get("log_tracker_config_dir", "")] + # + list_log_tracker_config_files(path) + # ), + # inputs=self.log_tracker_config, + # outputs=self.log_tracker_config, + # show_progress=False, + # ) diff --git a/kohya_gui/class_source_model.py b/kohya_gui/class_source_model.py index 4b081f677..5ce4ff5ab 100644 --- a/kohya_gui/class_source_model.py +++ b/kohya_gui/class_source_model.py @@ -245,19 +245,73 @@ def list_dataset_config_dirs(path: str) -> list: with gr.Column(): with gr.Row(): self.v2 = gr.Checkbox( - label="v2", value=False, visible=False, min_width=60 + label="v2", value=False, visible=False, min_width=60, + interactive=True, ) self.v_parameterization = gr.Checkbox( label="v_parameterization", value=False, visible=False, min_width=130, + interactive=True, ) self.sdxl_checkbox = gr.Checkbox( label="SDXL", value=False, visible=False, min_width=60, + interactive=True, + ) + self.sd3_checkbox = gr.Checkbox( + label="SD3", + value=False, + visible=False, + min_width=60, + interactive=True, + ) + + def toggle_checkboxes(v2, v_parameterization, sdxl_checkbox, sd3_checkbox): + # Check if all checkboxes are unchecked + if not v2 and not v_parameterization and not sdxl_checkbox and not sd3_checkbox: + # If all unchecked, return new interactive checkboxes + return ( + gr.Checkbox(interactive=True), # v2 checkbox + gr.Checkbox(interactive=True), # v_parameterization checkbox + gr.Checkbox(interactive=True), # sdxl_checkbox + gr.Checkbox(interactive=True), # sd3_checkbox + ) + else: + # If any checkbox is checked, return checkboxes with current interactive state + return ( + gr.Checkbox(interactive=v2), # v2 checkbox + gr.Checkbox(interactive=v_parameterization), # v_parameterization checkbox + gr.Checkbox(interactive=sdxl_checkbox), # sdxl_checkbox + gr.Checkbox(interactive=sd3_checkbox), # sd3_checkbox + ) + + self.v2.change( + fn=toggle_checkboxes, + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + show_progress=False, + ) + self.v_parameterization.change( + fn=toggle_checkboxes, + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + show_progress=False, + ) + self.sd3_checkbox.change( + fn=toggle_checkboxes, + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + show_progress=False, + ) + self.sdxl_checkbox.change( + fn=toggle_checkboxes, + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + show_progress=False, ) with gr.Column(): gr.Group(visible=False) @@ -294,6 +348,7 @@ def list_dataset_config_dirs(path: str) -> list: self.v2, self.v_parameterization, self.sdxl_checkbox, + self.sd3_checkbox, ], show_progress=False, ) diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index b95ac913d..5f0eb20dd 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -960,11 +960,13 @@ def set_pretrained_model_name_or_path_input( v2 = gr.Checkbox(value=False, visible=False) v_parameterization = gr.Checkbox(value=False, visible=False) sdxl = gr.Checkbox(value=True, visible=False) + sd3 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, + sd3, ) # Check if the given pretrained_model_name_or_path is in the list of V2 base models @@ -973,11 +975,13 @@ def set_pretrained_model_name_or_path_input( v2 = gr.Checkbox(value=True, visible=False) v_parameterization = gr.Checkbox(value=False, visible=False) sdxl = gr.Checkbox(value=False, visible=False) + sd3 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, + sd3, ) # Check if the given pretrained_model_name_or_path is in the list of V parameterization models @@ -988,11 +992,13 @@ def set_pretrained_model_name_or_path_input( v2 = gr.Checkbox(value=True, visible=False) v_parameterization = gr.Checkbox(value=True, visible=False) sdxl = gr.Checkbox(value=False, visible=False) + sd3 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, + sd3, ) # Check if the given pretrained_model_name_or_path is in the list of V1 models @@ -1001,17 +1007,20 @@ def set_pretrained_model_name_or_path_input( v2 = gr.Checkbox(value=False, visible=False) v_parameterization = gr.Checkbox(value=False, visible=False) sdxl = gr.Checkbox(value=False, visible=False) + sd3 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, + sd3, ) # Check if the model_list is set to 'custom' v2 = gr.Checkbox(visible=True) v_parameterization = gr.Checkbox(visible=True) sdxl = gr.Checkbox(visible=True) + sd3 = gr.Checkbox(visible=True) # If a refresh method is provided, use it to update the choices for the Dropdown widget if refresh_method is not None: @@ -1025,6 +1034,7 @@ def set_pretrained_model_name_or_path_input( v2, v_parameterization, sdxl, + sd3, ) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 61b520146..e9f41cd44 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -27,6 +27,7 @@ from .class_source_model import SourceModel from .class_basic_training import BasicTraining from .class_advanced_training import AdvancedTraining +from .class_sd3 import sd3Training from .class_folders import Folders from .class_command_executor import CommandExecutor from .class_huggingface import HuggingFace @@ -1019,6 +1020,9 @@ def dreambooth_tab( source_model.sdxl_checkbox, config=config, trainer="finetune", ) + # Add SD3 Parameters + sd3_training = sd3Training(headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox) + with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): advanced_training = AdvancedTraining(headless=headless, config=config) advanced_training.color_aug.change( diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 0c480b15f..af3a2f138 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -26,6 +26,7 @@ from .class_source_model import SourceModel from .class_basic_training import BasicTraining from .class_advanced_training import AdvancedTraining +from .class_sd3 import sd3Training from .class_folders import Folders from .class_sdxl_parameters import SDXLParameters from .class_command_executor import CommandExecutor @@ -1118,6 +1119,9 @@ def list_presets(path): train_text_encoder = gr.Checkbox( label="Train text encoder", value=True ) + + # Add SD3 Parameters + sd3_training = sd3Training(headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox) with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): with gr.Row(): From adc054a9f6d1f8ea3b7e96a76b3a3c1e9798f13d Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 7 Jul 2024 15:20:57 -0400 Subject: [PATCH 032/199] Fix interactivity --- kohya_gui/class_sd3.py | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index 06d109a89..b2304e28b 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -80,18 +80,22 @@ def noise_offset_type_change( label="Weighting Scheme", choices=["logit_normal", "sigma_sqrt", "mode", "cosmap"], value=self.config.get("sd3.weighting_scheme", "logit_normal"), + interactive=True, ) self.logit_mean = gr.Number( label="Logit Mean", value=self.config.get("sd3.logit_mean", 0.0), + interactive=True, ) self.logit_std = gr.Number( label="Logit Std", value=self.config.get("sd3.logit_std", 1.0), + interactive=True, ) self.mode_scale = gr.Number( label="Mode Scale", value=self.config.get("sd3.mode_scale", 1.29), + interactive=True, ) with gr.Row(): @@ -99,9 +103,11 @@ def noise_offset_type_change( label="CLIP-L Path", placeholder="Path to CLIP-L model", value=self.config.get("sd3.clip_l", ""), + interactive=True, ) self.clip_l_button = gr.Button( - document_symbol, elem_id="open_folder_small", visible=(not headless) + document_symbol, elem_id="open_folder_small", visible=(not headless), + interactive=True, ) self.clip_l_button.click( get_any_file_path, @@ -151,21 +157,44 @@ def noise_offset_type_change( label="CLIP-G Path", placeholder="Path to CLIP-G model", value=self.config.get("sd3.clip_g", ""), + interactive=True, ) + self.clip_g_button = gr.Button( + document_symbol, elem_id="open_folder_small", visible=(not headless), + interactive=True, + ) + self.clip_g_button.click( + get_any_file_path, + outputs=self.clip_g, + show_progress=False, + ) + self.t5xxl = gr.Textbox( label="T5-XXL Path", placeholder="Path to T5-XXL model", value=self.config.get("sd3.t5xxl", ""), + interactive=True, + ) + self.t5xxl_button = gr.Button( + document_symbol, elem_id="open_folder_small", visible=(not headless), + interactive=True, + ) + self.t5xxl_button.click( + get_any_file_path, + outputs=self.t5xxl, + show_progress=False, ) with gr.Row(): self.save_clip = gr.Checkbox( label="Save CLIP models", value=self.config.get("sd3.save_clip", False), + interactive=True, ) self.save_t5xxl = gr.Checkbox( label="Save T5-XXL model", value=self.config.get("sd3.save_t5xxl", False), + interactive=True, ) with gr.Row(): @@ -173,26 +202,31 @@ def noise_offset_type_change( label="T5-XXL Device", placeholder="Device for T5-XXL (e.g., cuda:0)", value=self.config.get("sd3.t5xxl_device", ""), + interactive=True, ) self.t5xxl_dtype = gr.Dropdown( label="T5-XXL Dtype", choices=["float32", "fp16", "bf16"], value=self.config.get("sd3.t5xxl_dtype", "bf16"), + interactive=True, ) self.text_encoder_batch_size = gr.Number( label="Text Encoder Batch Size", value=self.config.get("sd3.text_encoder_batch_size", 1), minimum=1, + interactive=True, ) self.cache_text_encoder_outputs = gr.Checkbox( label="Cache Text Encoder Outputs", value=self.config.get("sd3.cache_text_encoder_outputs", False), info="Cache text encoder outputs to speed up inference", + interactive=True, ) self.cache_text_encoder_outputs_to_disk = gr.Checkbox( label="Cache Text Encoder Outputs to Disk", value=self.config.get("sd3.cache_text_encoder_outputs_to_disk", False), info="Cache text encoder outputs to disk to speed up inference", + interactive=True, ) self.sd3_checkbox.change( From 37523a9f3806d6a6ebdbdfa3ae9abfc716760208 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 7 Jul 2024 15:45:49 -0400 Subject: [PATCH 033/199] MVP GUI for SD3 --- kohya_gui/class_sd3.py | 587 ++---------------------------------- kohya_gui/dreambooth_gui.py | 86 ++++++ kohya_gui/finetune_gui.py | 87 ++++++ 3 files changed, 193 insertions(+), 567 deletions(-) diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index b2304e28b..b8ca446e3 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -47,13 +47,6 @@ def __init__( self.config = config self.sd3_checkbox = sd3_checkbox - # Determine the current directories for VAE and output, falling back to defaults if not specified. - # self.current_vae_dir = self.config.get("advanced.vae_dir", "./models/vae") - # self.current_state_dir = self.config.get("advanced.state_dir", "./outputs") - # self.current_log_tracker_config_dir = self.config.get( - # "advanced.log_tracker_config_dir", "./logs" - # ) - # Define the behavior for changing noise offset type. def noise_offset_type_change( noise_offset_type: str, @@ -71,8 +64,10 @@ def noise_offset_type_change( return (gr.Group(visible=True), gr.Group(visible=False)) else: return (gr.Group(visible=False), gr.Group(visible=True)) - - with gr.Accordion("SD3", open=False, elem_id="sd3_tab", visible=False) as sd3_accordion: + + with gr.Accordion( + "SD3", open=False, elem_id="sd3_tab", visible=False + ) as sd3_accordion: with gr.Group(): gr.Markdown("### SD3 Specific Parameters") with gr.Row(): @@ -97,7 +92,7 @@ def noise_offset_type_change( value=self.config.get("sd3.mode_scale", 1.29), interactive=True, ) - + with gr.Row(): self.clip_l = gr.Textbox( label="CLIP-L Path", @@ -106,7 +101,9 @@ def noise_offset_type_change( interactive=True, ) self.clip_l_button = gr.Button( - document_symbol, elem_id="open_folder_small", visible=(not headless), + document_symbol, + elem_id="open_folder_small", + visible=(not headless), interactive=True, ) self.clip_l_button.click( @@ -115,44 +112,6 @@ def noise_offset_type_change( show_progress=False, ) - - - - # self.log_tracker_config = gr.Dropdown( - # label="Log tracker config", - # choices=[self.config.get("log_tracker_config_dir", "")] - # + list_log_tracker_config_files(self.current_log_tracker_config_dir), - # value=self.config.get("log_tracker_config_dir", ""), - # info="Path to tracker config file to use for logging", - # interactive=True, - # allow_custom_value=True, - # ) - # create_refresh_button( - # self.log_tracker_config, - # lambda: None, - # lambda: { - # "choices": [self.config.get("log_tracker_config_dir", "")] - # + list_log_tracker_config_files(self.current_log_tracker_config_dir) - # }, - # "open_folder_small", - # ) - # self.log_tracker_config_button = gr.Button( - # document_symbol, elem_id="open_folder_small", visible=(not headless) - # ) - # self.log_tracker_config_button.click( - # get_any_file_path, - # outputs=self.log_tracker_config, - # show_progress=False, - # ) - # self.log_tracker_config.change( - # fn=lambda path: gr.Dropdown( - # choices=[self.config.get("log_tracker_config_dir", "")] - # + list_log_tracker_config_files(path) - # ), - # inputs=self.log_tracker_config, - # outputs=self.log_tracker_config, - # show_progress=False, - # ) self.clip_g = gr.Textbox( label="CLIP-G Path", placeholder="Path to CLIP-G model", @@ -160,7 +119,9 @@ def noise_offset_type_change( interactive=True, ) self.clip_g_button = gr.Button( - document_symbol, elem_id="open_folder_small", visible=(not headless), + document_symbol, + elem_id="open_folder_small", + visible=(not headless), interactive=True, ) self.clip_g_button.click( @@ -168,7 +129,7 @@ def noise_offset_type_change( outputs=self.clip_g, show_progress=False, ) - + self.t5xxl = gr.Textbox( label="T5-XXL Path", placeholder="Path to T5-XXL model", @@ -176,7 +137,9 @@ def noise_offset_type_change( interactive=True, ) self.t5xxl_button = gr.Button( - document_symbol, elem_id="open_folder_small", visible=(not headless), + document_symbol, + elem_id="open_folder_small", + visible=(not headless), interactive=True, ) self.t5xxl_button.click( @@ -184,7 +147,7 @@ def noise_offset_type_change( outputs=self.t5xxl, show_progress=False, ) - + with gr.Row(): self.save_clip = gr.Checkbox( label="Save CLIP models", @@ -224,525 +187,15 @@ def noise_offset_type_change( ) self.cache_text_encoder_outputs_to_disk = gr.Checkbox( label="Cache Text Encoder Outputs to Disk", - value=self.config.get("sd3.cache_text_encoder_outputs_to_disk", False), + value=self.config.get( + "sd3.cache_text_encoder_outputs_to_disk", False + ), info="Cache text encoder outputs to disk to speed up inference", interactive=True, ) - + self.sd3_checkbox.change( lambda sd3_checkbox: gr.Accordion(visible=sd3_checkbox), inputs=[self.sd3_checkbox], outputs=[sd3_accordion], ) - - - # # GUI elements are only visible when not fine-tuning. - # with gr.Row(visible=not finetuning): - # # Exclude token padding option for LoRA training type. - # if training_type != "lora": - # self.no_token_padding = gr.Checkbox( - # label="No token padding", - # value=self.config.get("advanced.no_token_padding", False), - # ) - # self.gradient_accumulation_steps = gr.Slider( - # label="Gradient accumulate steps", - # info="Number of updates steps to accumulate before performing a backward/update pass", - # value=self.config.get("advanced.gradient_accumulation_steps", 1), - # minimum=1, - # maximum=120, - # step=1, - # ) - # self.weighted_captions = gr.Checkbox( - # label="Weighted captions", - # value=self.config.get("advanced.weighted_captions", False), - # ) - # with gr.Group(), gr.Row(visible=not finetuning): - # self.prior_loss_weight = gr.Number( - # label="Prior loss weight", - # value=self.config.get("advanced.prior_loss_weight", 1.0), - # ) - - # def list_vae_files(path): - # self.current_vae_dir = path if not path == "" else "." - # return list(list_files(path, exts=[".ckpt", ".safetensors"], all=True)) - - # self.vae = gr.Dropdown( - # label="VAE (Optional: Path to checkpoint of vae for training)", - # interactive=True, - # choices=[self.config.get("advanced.vae_dir", "")] - # + list_vae_files(self.current_vae_dir), - # value=self.config.get("advanced.vae_dir", ""), - # allow_custom_value=True, - # ) - # create_refresh_button( - # self.vae, - # lambda: None, - # lambda: { - # "choices": [self.config.get("advanced.vae_dir", "")] - # + list_vae_files(self.current_vae_dir) - # }, - # "open_folder_small", - # ) - # self.vae_button = gr.Button( - # "📂", elem_id="open_folder_small", visible=(not headless) - # ) - # self.vae_button.click( - # get_any_file_path, - # outputs=self.vae, - # show_progress=False, - # ) - - # self.vae.change( - # fn=lambda path: gr.Dropdown( - # choices=[self.config.get("advanced.vae_dir", "")] - # + list_vae_files(path) - # ), - # inputs=self.vae, - # outputs=self.vae, - # show_progress=False, - # ) - - # with gr.Row(): - # self.additional_parameters = gr.Textbox( - # label="Additional parameters", - # placeholder='(Optional) Use to provide additional parameters not handled by the GUI. Eg: --some_parameters "value"', - # value=self.config.get("advanced.additional_parameters", ""), - # ) - # with gr.Accordion("Scheduled Huber Loss", open=False): - # with gr.Row(): - # self.loss_type = gr.Dropdown( - # label="Loss type", - # choices=["huber", "smooth_l1", "l2"], - # value=self.config.get("advanced.loss_type", "l2"), - # info="The type of loss to use and whether it's scheduled based on the timestep", - # ) - # self.huber_schedule = gr.Dropdown( - # label="Huber schedule", - # choices=[ - # "constant", - # "exponential", - # "snr", - # ], - # value=self.config.get("advanced.huber_schedule", "snr"), - # info="The type of loss to use and whether it's scheduled based on the timestep", - # ) - # self.huber_c = gr.Number( - # label="Huber C", - # value=self.config.get("advanced.huber_c", 0.1), - # minimum=0.0, - # maximum=1.0, - # step=0.01, - # info="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type", - # ) - - # with gr.Row(): - # self.save_every_n_steps = gr.Number( - # label="Save every N steps", - # value=self.config.get("advanced.save_every_n_steps", 0), - # precision=0, - # info="(Optional) The model is saved every specified steps", - # ) - # self.save_last_n_steps = gr.Number( - # label="Save last N steps", - # value=self.config.get("advanced.save_last_n_steps", 0), - # precision=0, - # info="(Optional) Save only the specified number of models (old models will be deleted)", - # ) - # self.save_last_n_steps_state = gr.Number( - # label="Save last N steps state", - # value=self.config.get("advanced.save_last_n_steps_state", 0), - # precision=0, - # info="(Optional) Save only the specified number of states (old models will be deleted)", - # ) - # with gr.Row(): - - # def full_options_update(full_fp16, full_bf16): - # full_fp16_active = True - # full_bf16_active = True - - # if full_fp16: - # full_bf16_active = False - # if full_bf16: - # full_fp16_active = False - # return gr.Checkbox( - # interactive=full_fp16_active, - # ), gr.Checkbox(interactive=full_bf16_active) - - # self.keep_tokens = gr.Slider( - # label="Keep n tokens", - # value=self.config.get("advanced.keep_tokens", 0), - # minimum=0, - # maximum=32, - # step=1, - # ) - # self.clip_skip = gr.Slider( - # label="Clip skip", - # value=self.config.get("advanced.clip_skip", 1), - # minimum=0, - # maximum=12, - # step=1, - # ) - # self.max_token_length = gr.Dropdown( - # label="Max Token Length", - # choices=[ - # 75, - # 150, - # 225, - # ], - # info="max token length of text encoder", - # value=self.config.get("advanced.max_token_length", 75), - # ) - - # with gr.Row(): - # if training_type == "lora": - # self.fp8_base = gr.Checkbox( - # label="fp8 base training (experimental)", - # info="U-Net and Text Encoder can be trained with fp8 (experimental)", - # value=self.config.get("advanced.fp8_base", False), - # ) - # self.full_fp16 = gr.Checkbox( - # label="Full fp16 training (experimental)", - # value=self.config.get("advanced.full_fp16", False), - # ) - # self.full_bf16 = gr.Checkbox( - # label="Full bf16 training (experimental)", - # value=self.config.get("advanced.full_bf16", False), - # info="Required bitsandbytes >= 0.36.0", - # ) - - # self.full_fp16.change( - # full_options_update, - # inputs=[self.full_fp16, self.full_bf16], - # outputs=[self.full_fp16, self.full_bf16], - # ) - # self.full_bf16.change( - # full_options_update, - # inputs=[self.full_fp16, self.full_bf16], - # outputs=[self.full_fp16, self.full_bf16], - # ) - - # with gr.Row(): - # self.gradient_checkpointing = gr.Checkbox( - # label="Gradient checkpointing", - # value=self.config.get("advanced.gradient_checkpointing", False), - # ) - # self.shuffle_caption = gr.Checkbox( - # label="Shuffle caption", - # value=self.config.get("advanced.shuffle_caption", False), - # ) - # self.persistent_data_loader_workers = gr.Checkbox( - # label="Persistent data loader", - # value=self.config.get("advanced.persistent_data_loader_workers", False), - # ) - # self.mem_eff_attn = gr.Checkbox( - # label="Memory efficient attention", - # value=self.config.get("advanced.mem_eff_attn", False), - # ) - # with gr.Row(): - # self.xformers = gr.Dropdown( - # label="CrossAttention", - # choices=["none", "sdpa", "xformers"], - # value=self.config.get("advanced.xformers", "xformers"), - # ) - # self.color_aug = gr.Checkbox( - # label="Color augmentation", - # value=self.config.get("advanced.color_aug", False), - # info="Enable weak color augmentation", - # ) - # self.flip_aug = gr.Checkbox( - # label="Flip augmentation", - # value=getattr(self.config, "advanced.flip_aug", False), - # info="Enable horizontal flip augmentation", - # ) - # self.masked_loss = gr.Checkbox( - # label="Masked loss", - # value=self.config.get("advanced.masked_loss", False), - # info="Apply mask for calculating loss. conditioning_data_dir is required for dataset", - # ) - # with gr.Row(): - # self.scale_v_pred_loss_like_noise_pred = gr.Checkbox( - # label="Scale v prediction loss", - # value=self.config.get( - # "advanced.scale_v_pred_loss_like_noise_pred", False - # ), - # info="Only for SD v2 models. By scaling the loss according to the time step, the weights of global noise prediction and local noise prediction become the same, and the improvement of details may be expected.", - # ) - # self.min_snr_gamma = gr.Slider( - # label="Min SNR gamma", - # value=self.config.get("advanced.min_snr_gamma", 0), - # minimum=0, - # maximum=20, - # step=1, - # info="Recommended value of 5 when used", - # ) - # self.debiased_estimation_loss = gr.Checkbox( - # label="Debiased Estimation loss", - # value=self.config.get("advanced.debiased_estimation_loss", False), - # info="Automates the processing of noise, allowing for faster model fitting, as well as balancing out color issues. Do not use if Min SNR gamma is specified.", - # ) - # with gr.Row(): - # # self.sdpa = gr.Checkbox(label='Use sdpa', value=False, info='Use sdpa for CrossAttention') - # self.bucket_no_upscale = gr.Checkbox( - # label="Don't upscale bucket resolution", - # value=self.config.get("advanced.bucket_no_upscale", True), - # ) - # self.bucket_reso_steps = gr.Slider( - # label="Bucket resolution steps", - # value=self.config.get("advanced.bucket_reso_steps", 64), - # minimum=1, - # maximum=128, - # ) - # self.random_crop = gr.Checkbox( - # label="Random crop instead of center crop", - # value=self.config.get("advanced.random_crop", False), - # ) - # self.v_pred_like_loss = gr.Slider( - # label="V Pred like loss", - # value=self.config.get("advanced.v_pred_like_loss", 0), - # minimum=0, - # maximum=1, - # step=0.01, - # info="Recommended value of 0.5 when used", - # ) - - # with gr.Row(): - # self.min_timestep = gr.Slider( - # label="Min Timestep", - # value=self.config.get("advanced.min_timestep", 0), - # step=1, - # minimum=0, - # maximum=1000, - # info="Values greater than 0 will make the model more img2img focussed. 0 = image only", - # ) - # self.max_timestep = gr.Slider( - # label="Max Timestep", - # value=self.config.get("advanced.max_timestep", 1000), - # step=1, - # minimum=0, - # maximum=1000, - # info="Values lower than 1000 will make the model more img2img focussed. 1000 = noise only", - # ) - - # with gr.Row(): - # self.noise_offset_type = gr.Dropdown( - # label="Noise offset type", - # choices=[ - # "Original", - # "Multires", - # ], - # value=self.config.get("advanced.noise_offset_type", "Original"), - # scale=1, - # ) - # with gr.Row(visible=True) as self.noise_offset_original: - # self.noise_offset = gr.Slider( - # label="Noise offset", - # value=self.config.get("advanced.noise_offset", 0), - # minimum=0, - # maximum=1, - # step=0.01, - # info="Recommended values are 0.05 - 0.15", - # ) - # self.noise_offset_random_strength = gr.Checkbox( - # label="Noise offset random strength", - # value=self.config.get( - # "advanced.noise_offset_random_strength", False - # ), - # info="Use random strength between 0~noise_offset for noise offset", - # ) - # self.adaptive_noise_scale = gr.Slider( - # label="Adaptive noise scale", - # value=self.config.get("advanced.adaptive_noise_scale", 0), - # minimum=-1, - # maximum=1, - # step=0.001, - # info="Add `latent mean absolute value * this value` to noise_offset", - # ) - # with gr.Row(visible=False) as self.noise_offset_multires: - # self.multires_noise_iterations = gr.Slider( - # label="Multires noise iterations", - # value=self.config.get("advanced.multires_noise_iterations", 0), - # minimum=0, - # maximum=64, - # step=1, - # info="Enable multires noise (recommended values are 6-10)", - # ) - # self.multires_noise_discount = gr.Slider( - # label="Multires noise discount", - # value=self.config.get("advanced.multires_noise_discount", 0.3), - # minimum=0, - # maximum=1, - # step=0.01, - # info="Recommended values are 0.8. For LoRAs with small datasets, 0.1-0.3", - # ) - # with gr.Row(visible=True): - # self.ip_noise_gamma = gr.Slider( - # label="IP noise gamma", - # value=self.config.get("advanced.ip_noise_gamma", 0), - # minimum=0, - # maximum=1, - # step=0.01, - # info="enable input perturbation noise. used for regularization. recommended value: around 0.1", - # ) - # self.ip_noise_gamma_random_strength = gr.Checkbox( - # label="IP noise gamma random strength", - # value=self.config.get( - # "advanced.ip_noise_gamma_random_strength", False - # ), - # info="Use random strength between 0~ip_noise_gamma for input perturbation noise", - # ) - # self.noise_offset_type.change( - # noise_offset_type_change, - # inputs=[self.noise_offset_type], - # outputs=[ - # self.noise_offset_original, - # self.noise_offset_multires, - # ], - # ) - # with gr.Row(): - # self.caption_dropout_every_n_epochs = gr.Number( - # label="Dropout caption every n epochs", - # value=self.config.get("advanced.caption_dropout_every_n_epochs", 0), - # ) - # self.caption_dropout_rate = gr.Slider( - # label="Rate of caption dropout", - # value=self.config.get("advanced.caption_dropout_rate", 0), - # minimum=0, - # maximum=1, - # ) - # self.vae_batch_size = gr.Slider( - # label="VAE batch size", - # minimum=0, - # maximum=32, - # value=self.config.get("advanced.vae_batch_size", 0), - # step=1, - # ) - # with gr.Group(), gr.Row(): - # self.save_state = gr.Checkbox( - # label="Save training state", - # value=self.config.get("advanced.save_state", False), - # info="Save training state (including optimizer states etc.) when saving models" - # ) - - # self.save_state_on_train_end = gr.Checkbox( - # label="Save training state at end of training", - # value=self.config.get("advanced.save_state_on_train_end", False), - # info="Save training state (including optimizer states etc.) on train end" - # ) - - # def list_state_dirs(path): - # self.current_state_dir = path if not path == "" else "." - # return list(list_dirs(path)) - - # self.resume = gr.Dropdown( - # label='Resume from saved training state (path to "last-state" state folder)', - # choices=[self.config.get("advanced.state_dir", "")] - # + list_state_dirs(self.current_state_dir), - # value=self.config.get("advanced.state_dir", ""), - # interactive=True, - # allow_custom_value=True, - # info="Saved state to resume training from" - # ) - # create_refresh_button( - # self.resume, - # lambda: None, - # lambda: { - # "choices": [self.config.get("advanced.state_dir", "")] - # + list_state_dirs(self.current_state_dir) - # }, - # "open_folder_small", - # ) - # self.resume_button = gr.Button( - # "📂", elem_id="open_folder_small", visible=(not headless) - # ) - # self.resume_button.click( - # get_folder_path, - # outputs=self.resume, - # show_progress=False, - # ) - # self.resume.change( - # fn=lambda path: gr.Dropdown( - # choices=[self.config.get("advanced.state_dir", "")] - # + list_state_dirs(path) - # ), - # inputs=self.resume, - # outputs=self.resume, - # show_progress=False, - # ) - # self.max_data_loader_n_workers = gr.Number( - # label="Max num workers for DataLoader", - # info="Override number of epoch. Default: 0", - # step=1, - # minimum=0, - # value=self.config.get("advanced.max_data_loader_n_workers", 0), - # ) - # with gr.Row(): - # self.log_with = gr.Dropdown( - # label="Logging", - # choices=["","wandb", "tensorboard","all"], - # value="", - # info="Loggers to use, tensorboard will be used as the default.", - # ) - # self.wandb_api_key = gr.Textbox( - # label="WANDB API Key", - # value=self.config.get("advanced.wandb_api_key", ""), - # placeholder="(Optional)", - # info="Users can obtain and/or generate an api key in the their user settings on the website: https://wandb.ai/login", - # ) - # self.wandb_run_name = gr.Textbox( - # label="WANDB run name", - # value=self.config.get("advanced.wandb_run_name", ""), - # placeholder="(Optional)", - # info="The name of the specific wandb session", - # ) - # with gr.Group(), gr.Row(): - - # def list_log_tracker_config_files(path): - # self.current_log_tracker_config_dir = path if not path == "" else "." - # return list(list_files(path, exts=[".json"], all=True)) - - # self.log_config = gr.Checkbox( - # label="Log config", - # value=self.config.get("advanced.log_config", False), - # info="Log training parameter to WANDB", - # ) - # self.log_tracker_name = gr.Textbox( - # label="Log tracker name", - # value=self.config.get("advanced.log_tracker_name", ""), - # placeholder="(Optional)", - # info="Name of tracker to use for logging, default is script-specific default name", - # ) - # self.log_tracker_config = gr.Dropdown( - # label="Log tracker config", - # choices=[self.config.get("log_tracker_config_dir", "")] - # + list_log_tracker_config_files(self.current_log_tracker_config_dir), - # value=self.config.get("log_tracker_config_dir", ""), - # info="Path to tracker config file to use for logging", - # interactive=True, - # allow_custom_value=True, - # ) - # create_refresh_button( - # self.log_tracker_config, - # lambda: None, - # lambda: { - # "choices": [self.config.get("log_tracker_config_dir", "")] - # + list_log_tracker_config_files(self.current_log_tracker_config_dir) - # }, - # "open_folder_small", - # ) - # self.log_tracker_config_button = gr.Button( - # document_symbol, elem_id="open_folder_small", visible=(not headless) - # ) - # self.log_tracker_config_button.click( - # get_any_file_path, - # outputs=self.log_tracker_config, - # show_progress=False, - # ) - # self.log_tracker_config.change( - # fn=lambda path: gr.Dropdown( - # choices=[self.config.get("log_tracker_config_dir", "")] - # + list_log_tracker_config_files(path) - # ), - # inputs=self.log_tracker_config, - # outputs=self.log_tracker_config, - # show_progress=False, - # ) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index e9f41cd44..ca81beeca 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -187,6 +187,23 @@ def save_configuration( metadata_license, metadata_tags, metadata_title, + + # SD3 parameters + cache_text_encoder_outputs, + cache_text_encoder_outputs_to_disk, + clip_g, + clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + t5xxl, + t5xxl_device, + t5xxl_dtype, + text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -352,6 +369,23 @@ def open_configuration( metadata_license, metadata_tags, metadata_title, + + # SD3 parameters + cache_text_encoder_outputs, + cache_text_encoder_outputs_to_disk, + clip_g, + clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + t5xxl, + t5xxl_device, + t5xxl_dtype, + text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -512,6 +546,23 @@ def train_model( metadata_license, metadata_tags, metadata_title, + + # SD3 parameters + cache_text_encoder_outputs, + cache_text_encoder_outputs_to_disk, + clip_g, + clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + t5xxl, + t5xxl_device, + t5xxl_dtype, + text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -726,6 +777,8 @@ def train_model( if sdxl: run_cmd.append(rf'{scriptdir}/sd-scripts/sdxl_train.py') + elif sd3_checkbox: + run_cmd.append(rf"{scriptdir}/sd-scripts/sd3_train.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_db.py") @@ -882,6 +935,22 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, + + # SD3 only Parameters + "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, + "cache_text_encoder_outputs_to_disk": cache_text_encoder_outputs_to_disk if sd3_checkbox else None, + "clip_g": clip_g if sd3_checkbox else None, + "clip_l": clip_l if sd3_checkbox else None, + "logit_mean": logit_mean if sd3_checkbox else None, + "logit_std": logit_std if sd3_checkbox else None, + "mode_scale": mode_scale if sd3_checkbox else None, + "save_clip": save_clip if sd3_checkbox else None, + "save_t5xxl": save_t5xxl if sd3_checkbox else None, + "t5xxl": t5xxl if sd3_checkbox else None, + "t5xxl_device": t5xxl_device if sd3_checkbox else None, + "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, + "text_encoder_batch_size": text_encoder_batch_size if sd3_checkbox else None, + "weighting_scheme": weighting_scheme if sd3_checkbox else None, } # Given dictionary `config_toml_data` @@ -1177,6 +1246,23 @@ def dreambooth_tab( metadata.metadata_license, metadata.metadata_tags, metadata.metadata_title, + + # SD3 Parameters + sd3_training.cache_text_encoder_outputs, + sd3_training.cache_text_encoder_outputs_to_disk, + sd3_training.clip_g, + sd3_training.clip_l, + sd3_training.logit_mean, + sd3_training.logit_std, + sd3_training.mode_scale, + sd3_training.save_clip, + sd3_training.save_t5xxl, + sd3_training.t5xxl, + sd3_training.t5xxl_device, + sd3_training.t5xxl_dtype, + sd3_training.text_encoder_batch_size, + sd3_training.weighting_scheme, + source_model.sd3_checkbox, ] configuration.button_open_config.click( diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index af3a2f138..a3d3ac01b 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -194,6 +194,23 @@ def save_configuration( metadata_license, metadata_tags, metadata_title, + + # SD3 parameters + cache_text_encoder_outputs, + cache_text_encoder_outputs_to_disk, + clip_g, + clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + t5xxl, + t5xxl_device, + t5xxl_dtype, + text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -365,6 +382,24 @@ def open_configuration( metadata_license, metadata_tags, metadata_title, + + # SD3 parameters + cache_text_encoder_outputs, + cache_text_encoder_outputs_to_disk, + clip_g, + clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + t5xxl, + t5xxl_device, + t5xxl_dtype, + text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, + training_preset, ): # Get list of function parameters and values @@ -542,6 +577,23 @@ def train_model( metadata_license, metadata_tags, metadata_title, + + # SD3 parameters + cache_text_encoder_outputs, + cache_text_encoder_outputs_to_disk, + clip_g, + clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + t5xxl, + t5xxl_device, + t5xxl_dtype, + text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -774,6 +826,8 @@ def train_model( if sdxl_checkbox: run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train.py") + elif sd3_checkbox: + run_cmd.append(rf"{scriptdir}/sd-scripts/sd3_train.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/fine_tune.py") @@ -928,6 +982,22 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, + + # SD3 only Parameters + "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, + "cache_text_encoder_outputs_to_disk": cache_text_encoder_outputs_to_disk if sd3_checkbox else None, + "clip_g": clip_g if sd3_checkbox else None, + "clip_l": clip_l if sd3_checkbox else None, + "logit_mean": logit_mean if sd3_checkbox else None, + "logit_std": logit_std if sd3_checkbox else None, + "mode_scale": mode_scale if sd3_checkbox else None, + "save_clip": save_clip if sd3_checkbox else None, + "save_t5xxl": save_t5xxl if sd3_checkbox else None, + "t5xxl": t5xxl if sd3_checkbox else None, + "t5xxl_device": t5xxl_device if sd3_checkbox else None, + "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, + "text_encoder_batch_size": text_encoder_batch_size if sd3_checkbox else None, + "weighting_scheme": weighting_scheme if sd3_checkbox else None, } # Given dictionary `config_toml_data` @@ -1297,6 +1367,23 @@ def list_presets(path): metadata.metadata_license, metadata.metadata_tags, metadata.metadata_title, + + # SD3 Parameters + sd3_training.cache_text_encoder_outputs, + sd3_training.cache_text_encoder_outputs_to_disk, + sd3_training.clip_g, + sd3_training.clip_l, + sd3_training.logit_mean, + sd3_training.logit_std, + sd3_training.mode_scale, + sd3_training.save_clip, + sd3_training.save_t5xxl, + sd3_training.t5xxl, + sd3_training.t5xxl_device, + sd3_training.t5xxl_dtype, + sd3_training.text_encoder_batch_size, + sd3_training.weighting_scheme, + source_model.sd3_checkbox, ] configuration.button_open_config.click( From 2f8749c40e4797ca8653570616a049e957e29acb Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 8 Jul 2024 20:34:14 -0400 Subject: [PATCH 034/199] Fix text encoder issue --- kohya_gui/class_sd3.py | 8 +- kohya_gui/dreambooth_gui.py | 32 +++--- kohya_gui/finetune_gui.py | 32 +++--- presets/dreambooth/sd3_bdsqlsz_v1.json | 146 +++++++++++++++++++++++++ 4 files changed, 183 insertions(+), 35 deletions(-) create mode 100644 presets/dreambooth/sd3_bdsqlsz_v1.json diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index b8ca446e3..d5dae715f 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -173,19 +173,21 @@ def noise_offset_type_change( value=self.config.get("sd3.t5xxl_dtype", "bf16"), interactive=True, ) - self.text_encoder_batch_size = gr.Number( + self.sd3_text_encoder_batch_size = gr.Number( label="Text Encoder Batch Size", value=self.config.get("sd3.text_encoder_batch_size", 1), minimum=1, + maximum=1024, + step=1, interactive=True, ) - self.cache_text_encoder_outputs = gr.Checkbox( + self.sd3_cache_text_encoder_outputs = gr.Checkbox( label="Cache Text Encoder Outputs", value=self.config.get("sd3.cache_text_encoder_outputs", False), info="Cache text encoder outputs to speed up inference", interactive=True, ) - self.cache_text_encoder_outputs_to_disk = gr.Checkbox( + self.sd3_cache_text_encoder_outputs_to_disk = gr.Checkbox( label="Cache Text Encoder Outputs to Disk", value=self.config.get( "sd3.cache_text_encoder_outputs_to_disk", False diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index ca81beeca..23f32c992 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -189,8 +189,8 @@ def save_configuration( metadata_title, # SD3 parameters - cache_text_encoder_outputs, - cache_text_encoder_outputs_to_disk, + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, clip_g, clip_l, logit_mean, @@ -201,7 +201,7 @@ def save_configuration( t5xxl, t5xxl_device, t5xxl_dtype, - text_encoder_batch_size, + sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, ): @@ -371,8 +371,8 @@ def open_configuration( metadata_title, # SD3 parameters - cache_text_encoder_outputs, - cache_text_encoder_outputs_to_disk, + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, clip_g, clip_l, logit_mean, @@ -383,7 +383,7 @@ def open_configuration( t5xxl, t5xxl_device, t5xxl_dtype, - text_encoder_batch_size, + sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, ): @@ -548,8 +548,8 @@ def train_model( metadata_title, # SD3 parameters - cache_text_encoder_outputs, - cache_text_encoder_outputs_to_disk, + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, clip_g, clip_l, logit_mean, @@ -560,7 +560,7 @@ def train_model( t5xxl, t5xxl_device, t5xxl_dtype, - text_encoder_batch_size, + sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, ): @@ -782,7 +782,7 @@ def train_model( else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_db.py") - cache_text_encoder_outputs = sdxl and sdxl_cache_text_encoder_outputs + cache_text_encoder_outputs = (sdxl and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) no_half_vae = sdxl and sdxl_no_half_vae if max_data_loader_n_workers == "" or None: @@ -937,8 +937,8 @@ def train_model( "xformers": True if xformers == "xformers" else None, # SD3 only Parameters - "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, - "cache_text_encoder_outputs_to_disk": cache_text_encoder_outputs_to_disk if sd3_checkbox else None, + # "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, + "cache_text_encoder_outputs_to_disk": sd3_cache_text_encoder_outputs_to_disk if sd3_checkbox else None, "clip_g": clip_g if sd3_checkbox else None, "clip_l": clip_l if sd3_checkbox else None, "logit_mean": logit_mean if sd3_checkbox else None, @@ -949,7 +949,7 @@ def train_model( "t5xxl": t5xxl if sd3_checkbox else None, "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, - "text_encoder_batch_size": text_encoder_batch_size if sd3_checkbox else None, + "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, "weighting_scheme": weighting_scheme if sd3_checkbox else None, } @@ -1248,8 +1248,8 @@ def dreambooth_tab( metadata.metadata_title, # SD3 Parameters - sd3_training.cache_text_encoder_outputs, - sd3_training.cache_text_encoder_outputs_to_disk, + sd3_training.sd3_cache_text_encoder_outputs, + sd3_training.sd3_cache_text_encoder_outputs_to_disk, sd3_training.clip_g, sd3_training.clip_l, sd3_training.logit_mean, @@ -1260,7 +1260,7 @@ def dreambooth_tab( sd3_training.t5xxl, sd3_training.t5xxl_device, sd3_training.t5xxl_dtype, - sd3_training.text_encoder_batch_size, + sd3_training.sd3_text_encoder_batch_size, sd3_training.weighting_scheme, source_model.sd3_checkbox, ] diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index a3d3ac01b..824c7c6f0 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -196,8 +196,8 @@ def save_configuration( metadata_title, # SD3 parameters - cache_text_encoder_outputs, - cache_text_encoder_outputs_to_disk, + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, clip_g, clip_l, logit_mean, @@ -208,7 +208,7 @@ def save_configuration( t5xxl, t5xxl_device, t5xxl_dtype, - text_encoder_batch_size, + sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, ): @@ -384,8 +384,8 @@ def open_configuration( metadata_title, # SD3 parameters - cache_text_encoder_outputs, - cache_text_encoder_outputs_to_disk, + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, clip_g, clip_l, logit_mean, @@ -396,7 +396,7 @@ def open_configuration( t5xxl, t5xxl_device, t5xxl_dtype, - text_encoder_batch_size, + sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, @@ -579,8 +579,8 @@ def train_model( metadata_title, # SD3 parameters - cache_text_encoder_outputs, - cache_text_encoder_outputs_to_disk, + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, clip_g, clip_l, logit_mean, @@ -591,7 +591,7 @@ def train_model( t5xxl, t5xxl_device, t5xxl_dtype, - text_encoder_batch_size, + sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, ): @@ -836,7 +836,7 @@ def train_model( if use_latent_files == "Yes" else f"{train_dir}/{caption_metadata_filename}" ) - cache_text_encoder_outputs = sdxl_checkbox and sdxl_cache_text_encoder_outputs + cache_text_encoder_outputs = (sdxl and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) no_half_vae = sdxl_checkbox and sdxl_no_half_vae if max_data_loader_n_workers == "" or None: @@ -984,8 +984,8 @@ def train_model( "xformers": True if xformers == "xformers" else None, # SD3 only Parameters - "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, - "cache_text_encoder_outputs_to_disk": cache_text_encoder_outputs_to_disk if sd3_checkbox else None, + # "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, + "cache_text_encoder_outputs_to_disk": sd3_cache_text_encoder_outputs_to_disk if sd3_checkbox else None, "clip_g": clip_g if sd3_checkbox else None, "clip_l": clip_l if sd3_checkbox else None, "logit_mean": logit_mean if sd3_checkbox else None, @@ -996,7 +996,7 @@ def train_model( "t5xxl": t5xxl if sd3_checkbox else None, "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, - "text_encoder_batch_size": text_encoder_batch_size if sd3_checkbox else None, + "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, "weighting_scheme": weighting_scheme if sd3_checkbox else None, } @@ -1369,8 +1369,8 @@ def list_presets(path): metadata.metadata_title, # SD3 Parameters - sd3_training.cache_text_encoder_outputs, - sd3_training.cache_text_encoder_outputs_to_disk, + sd3_training.sd3_cache_text_encoder_outputs, + sd3_training.sd3_cache_text_encoder_outputs_to_disk, sd3_training.clip_g, sd3_training.clip_l, sd3_training.logit_mean, @@ -1381,7 +1381,7 @@ def list_presets(path): sd3_training.t5xxl, sd3_training.t5xxl_device, sd3_training.t5xxl_dtype, - sd3_training.text_encoder_batch_size, + sd3_training.sd3_text_encoder_batch_size, sd3_training.weighting_scheme, source_model.sd3_checkbox, ] diff --git a/presets/dreambooth/sd3_bdsqlsz_v1.json b/presets/dreambooth/sd3_bdsqlsz_v1.json new file mode 100644 index 000000000..22ce46e5f --- /dev/null +++ b/presets/dreambooth/sd3_bdsqlsz_v1.json @@ -0,0 +1,146 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "async_upload": false, + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": true, + "caption_dropout_every_n_epochs": 0, + "caption_dropout_rate": 0, + "caption_extension": ".txt", + "clip_g": "H:/ComfyUI2/models/clip/clip_g.safetensors", + "clip_l": "H:/ComfyUI2/models/clip/clip_l.safetensors", + "clip_skip": 1, + "color_aug": false, + "dataset_config": "", + "debiased_estimation_loss": false, + "disable_mmap_load_safetensors": false, + "dynamo_backend": "no", + "dynamo_mode": "default", + "dynamo_use_dynamic": false, + "dynamo_use_fullgraph": false, + "enable_bucket": true, + "epoch": 8, + "extra_accelerate_launch_args": "", + "flip_aug": false, + "full_bf16": false, + "full_fp16": false, + "fused_backward_pass": false, + "fused_optimizer_groups": 0, + "gpu_ids": "", + "gradient_accumulation_steps": 1, + "gradient_checkpointing": true, + "huber_c": 0.1, + "huber_schedule": "snr", + "huggingface_path_in_repo": "", + "huggingface_repo_id": "", + "huggingface_repo_type": "", + "huggingface_repo_visibility": "", + "huggingface_token": "", + "ip_noise_gamma": 0, + "ip_noise_gamma_random_strength": false, + "keep_tokens": 0, + "learning_rate": 5e-06, + "learning_rate_te": 0, + "learning_rate_te1": 1e-05, + "learning_rate_te2": 1e-05, + "log_config": false, + "log_tracker_config": "", + "log_tracker_name": "", + "log_with": "", + "logging_dir": "C:/Users/berna/Downloads/martini/logs/sd3", + "logit_mean": 0, + "logit_std": 1, + "loss_type": "l2", + "lr_scheduler": "cosine", + "lr_scheduler_args": "", + "lr_scheduler_num_cycles": 1, + "lr_scheduler_power": 1, + "lr_scheduler_type": "", + "lr_warmup": 10, + "main_process_port": 0, + "masked_loss": false, + "max_bucket_reso": 1536, + "max_data_loader_n_workers": 0, + "max_resolution": "512,512", + "max_timestep": 1000, + "max_token_length": 225, + "max_train_epochs": 8, + "max_train_steps": 1600, + "mem_eff_attn": false, + "metadata_author": "", + "metadata_description": "", + "metadata_license": "", + "metadata_tags": "", + "metadata_title": "", + "min_bucket_reso": 256, + "min_snr_gamma": 0, + "min_timestep": 0, + "mixed_precision": "bf16", + "mode_scale": 1.29, + "model_list": "custom", + "multi_gpu": false, + "multires_noise_discount": 0.3, + "multires_noise_iterations": 0, + "no_token_padding": false, + "noise_offset": 0, + "noise_offset_random_strength": false, + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "num_machines": 1, + "num_processes": 1, + "optimizer": "PagedAdamW8bit", + "optimizer_args": "weight_decay=0.1 betas=.9,.95", + "output_dir": "E:/models/sd3", + "output_name": "sd3", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "E:/models/sd3/sd3_medium.safetensors", + "prior_loss_weight": 1, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "resume_from_huggingface": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 0, + "sample_prompts": "", + "sample_sampler": "euler_a", + "save_as_bool": false, + "save_clip": false, + "save_every_n_epochs": 0, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "save_state_on_train_end": false, + "save_state_to_huggingface": false, + "save_t5xxl": false, + "scale_v_pred_loss_like_noise_pred": false, + "sd3_cache_text_encoder_outputs": true, + "sd3_cache_text_encoder_outputs_to_disk": true, + "sd3_checkbox": true, + "sd3_text_encoder_batch_size": 1, + "sdxl": false, + "sdxl_cache_text_encoder_outputs": false, + "sdxl_no_half_vae": false, + "seed": 1026, + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "t5xxl": "H:/ComfyUI2/models/clip/t5xxl_fp8_e4m3fn.safetensors", + "t5xxl_device": "", + "t5xxl_dtype": "bf16", + "train_batch_size": 1, + "train_data_dir": "C:/Users/berna/Downloads/martini/img2", + "v2": false, + "v_parameterization": false, + "v_pred_like_loss": 0, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "wandb_run_name": "", + "weighted_captions": false, + "weighting_scheme": "logit_normal", + "xformers": "sdpa" +} \ No newline at end of file From fd40273a6ff1cd806933afa07032d13d08762586 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 10 Jul 2024 07:50:17 -0400 Subject: [PATCH 035/199] Add fork section to readme --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 9b6c220eb..7320e8d0c 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,7 @@ The GUI allows you to set the training parameters and generate and run the requi - [Starting Accelerate in GUI](#starting-accelerate-in-gui) - [Running Multiple Instances (linux)](#running-multiple-instances-linux) - [Monitoring Processes](#monitoring-processes) + - [Interesting Forks](#interesting-forks) - [Change History](#change-history) ## 🦒 Colab @@ -468,6 +469,10 @@ The following are guides extracted from issues discussions For more details, visit the [GitHub issue](https://github.com/bmaltais/kohya_ss/issues/2577). +## Interesting Forks + +To finetune HunyuanDiT models or create LoRAs, visit this [fork](https://github.com/Tencent/HunyuanDiT/tree/main/kohya_ss-hydit) + ## Change History See release information. From ae5a7b9f6a39342d9539b9f8deb535cc5bcabb61 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 11 Jul 2024 13:12:30 -0400 Subject: [PATCH 036/199] Update sd3 commit --- presets/dreambooth/sd3_bdsqlsz_v2.json | 146 +++++++++++++++++++++++++ sd-scripts | 2 +- 2 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 presets/dreambooth/sd3_bdsqlsz_v2.json diff --git a/presets/dreambooth/sd3_bdsqlsz_v2.json b/presets/dreambooth/sd3_bdsqlsz_v2.json new file mode 100644 index 000000000..0b50c4533 --- /dev/null +++ b/presets/dreambooth/sd3_bdsqlsz_v2.json @@ -0,0 +1,146 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "async_upload": false, + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": true, + "caption_dropout_every_n_epochs": 0, + "caption_dropout_rate": 0, + "caption_extension": ".txt", + "clip_g": "H:/ComfyUI2/models/clip/clip_g.safetensors", + "clip_l": "H:/ComfyUI2/models/clip/clip_l.safetensors", + "clip_skip": 1, + "color_aug": false, + "dataset_config": "", + "debiased_estimation_loss": false, + "disable_mmap_load_safetensors": false, + "dynamo_backend": "no", + "dynamo_mode": "default", + "dynamo_use_dynamic": false, + "dynamo_use_fullgraph": false, + "enable_bucket": true, + "epoch": 8, + "extra_accelerate_launch_args": "", + "flip_aug": false, + "full_bf16": false, + "full_fp16": false, + "fused_backward_pass": false, + "fused_optimizer_groups": 0, + "gpu_ids": "", + "gradient_accumulation_steps": 1, + "gradient_checkpointing": true, + "huber_c": 0.1, + "huber_schedule": "snr", + "huggingface_path_in_repo": "", + "huggingface_repo_id": "", + "huggingface_repo_type": "", + "huggingface_repo_visibility": "", + "huggingface_token": "", + "ip_noise_gamma": 0, + "ip_noise_gamma_random_strength": false, + "keep_tokens": 0, + "learning_rate": 5e-06, + "learning_rate_te": 0, + "learning_rate_te1": 1e-05, + "learning_rate_te2": 1e-05, + "log_config": false, + "log_tracker_config": "", + "log_tracker_name": "", + "log_with": "", + "logging_dir": "C:/Users/berna/Downloads/martini/logs/sd3", + "logit_mean": 0, + "logit_std": 1, + "loss_type": "l2", + "lr_scheduler": "cosine", + "lr_scheduler_args": "", + "lr_scheduler_num_cycles": 1, + "lr_scheduler_power": 1, + "lr_scheduler_type": "", + "lr_warmup": 10, + "main_process_port": 0, + "masked_loss": false, + "max_bucket_reso": 1536, + "max_data_loader_n_workers": 0, + "max_resolution": "512,512", + "max_timestep": 1000, + "max_token_length": 150, + "max_train_epochs": 8, + "max_train_steps": 1600, + "mem_eff_attn": false, + "metadata_author": "", + "metadata_description": "", + "metadata_license": "", + "metadata_tags": "", + "metadata_title": "", + "min_bucket_reso": 256, + "min_snr_gamma": 0, + "min_timestep": 0, + "mixed_precision": "bf16", + "mode_scale": 1.29, + "model_list": "custom", + "multi_gpu": false, + "multires_noise_discount": 0.3, + "multires_noise_iterations": 0, + "no_token_padding": false, + "noise_offset": 0, + "noise_offset_random_strength": false, + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "num_machines": 1, + "num_processes": 1, + "optimizer": "PagedAdamW8bit", + "optimizer_args": "weight_decay=0.1 betas=.9,.95", + "output_dir": "E:/models/sd3", + "output_name": "sd3_v2", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "E:/models/sd3/sd3_medium.safetensors", + "prior_loss_weight": 1, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "resume_from_huggingface": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 0, + "sample_prompts": "", + "sample_sampler": "euler_a", + "save_as_bool": false, + "save_clip": false, + "save_every_n_epochs": 0, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "save_state_on_train_end": false, + "save_state_to_huggingface": false, + "save_t5xxl": false, + "scale_v_pred_loss_like_noise_pred": false, + "sd3_cache_text_encoder_outputs": true, + "sd3_cache_text_encoder_outputs_to_disk": true, + "sd3_checkbox": true, + "sd3_text_encoder_batch_size": 1, + "sdxl": false, + "sdxl_cache_text_encoder_outputs": false, + "sdxl_no_half_vae": false, + "seed": 1026, + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "t5xxl": "H:/ComfyUI2/models/clip/t5xxl_fp8_e4m3fn.safetensors", + "t5xxl_device": "", + "t5xxl_dtype": "bf16", + "train_batch_size": 1, + "train_data_dir": "C:/Users/berna/Downloads/martini/img", + "v2": false, + "v_parameterization": false, + "v_pred_like_loss": 0, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "wandb_run_name": "", + "weighted_captions": false, + "weighting_scheme": "logit_normal", + "xformers": "sdpa" +} \ No newline at end of file diff --git a/sd-scripts b/sd-scripts index ea18d5ba6..b8896aad4 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit ea18d5ba6d856995d5c44be4b449b63ac66fe5db +Subproject commit b8896aad400222c8c4441b217fda0f9bb0807ffd From 092138b192cda40a85ed0d99cbcb9cf483fba794 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 24 Jul 2024 18:53:13 -0400 Subject: [PATCH 037/199] Merge security-fix --- .release | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.release b/.release index 4bbfbca25..dc864a052 100644 --- a/.release +++ b/.release @@ -1 +1 @@ -v24.2.0 \ No newline at end of file +v24.2.0 diff --git a/requirements.txt b/requirements.txt index a2245ac9f..40e62f24f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ lion-pytorch==0.0.6 lycoris_lora==2.2.0.post3 # lycoris_lora==3.0.0.dev11 omegaconf==2.3.0 -onnx==1.15.0 +onnx==1.16.1 prodigyopt==1.0 protobuf==3.20.3 open-clip-torch==2.20.0 From dae7441b1b9acdf38e37597ba7afc4dfe2995ced Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 24 Jul 2024 18:55:40 -0400 Subject: [PATCH 038/199] Update sc-script to latest code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index b8896aad4..082f13658 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit b8896aad400222c8c4441b217fda0f9bb0807ffd +Subproject commit 082f13658bdbaed872ede6c0a7a75ab1a5f3712d From 5ccf90e7faf4d33912d1fed988165b2154a33ca8 Mon Sep 17 00:00:00 2001 From: b-fission Date: Mon, 5 Aug 2024 19:27:24 -0500 Subject: [PATCH 039/199] Auto-detect model type for safetensors files Automatically tick the checkboxes for v2 and SDXL on the common training UI and LoRA extract/merge utilities. --- kohya_gui/common_gui.py | 8 ++++++ kohya_gui/extract_lora_gui.py | 8 ++++++ kohya_gui/merge_lora_gui.py | 8 ++++++ kohya_gui/sd_modeltype.py | 47 +++++++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100755 kohya_gui/sd_modeltype.py diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index 0ca334eb6..7763f65f8 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -5,6 +5,7 @@ from easygui import msgbox, ynbox from typing import Optional from .custom_logging import setup_logging +from .sd_modeltype import SDModelType import os import re @@ -1009,6 +1010,13 @@ def set_pretrained_model_name_or_path_input( v_parameterization = gr.Checkbox(visible=True) sdxl = gr.Checkbox(visible=True) + # Auto-detect model type if safetensors file path is given + if pretrained_model_name_or_path.lower().endswith(".safetensors"): + detect = SDModelType(pretrained_model_name_or_path) + v2 = gr.Checkbox(value=detect.Is_SD2(), visible=True) + sdxl = gr.Checkbox(value=detect.Is_SDXL(), visible=True) + #TODO: v_parameterization + # If a refresh method is provided, use it to update the choices for the Dropdown widget if refresh_method is not None: args = dict( diff --git a/kohya_gui/extract_lora_gui.py b/kohya_gui/extract_lora_gui.py index 62b12fd9f..54fd33389 100644 --- a/kohya_gui/extract_lora_gui.py +++ b/kohya_gui/extract_lora_gui.py @@ -12,6 +12,7 @@ ) from .custom_logging import setup_logging +from .sd_modeltype import SDModelType # Set up logging log = setup_logging() @@ -337,6 +338,13 @@ def change_sdxl(sdxl): outputs=[load_tuned_model_to, load_original_model_to], ) + #secondary event on model_tuned for auto-detection of SDXL + model_tuned.change( + lambda sdxl, path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), + inputs=[sdxl, model_tuned], + outputs=sdxl + ) + extract_button = gr.Button("Extract LoRA model") extract_button.click( diff --git a/kohya_gui/merge_lora_gui.py b/kohya_gui/merge_lora_gui.py index a3337c4cf..92659362c 100644 --- a/kohya_gui/merge_lora_gui.py +++ b/kohya_gui/merge_lora_gui.py @@ -16,6 +16,7 @@ create_refresh_button, setup_environment ) from .custom_logging import setup_logging +from .sd_modeltype import SDModelType # Set up logging log = setup_logging() @@ -145,6 +146,13 @@ def list_save_to(path): show_progress=False, ) + #secondary event on sd_model for auto-detection of SDXL + sd_model.change( + lambda sdxl, path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), + inputs=[sdxl_model, sd_model], + outputs=sdxl_model + ) + with gr.Group(), gr.Row(): lora_a_model = gr.Dropdown( label='LoRA model "A" (path to the LoRA A model)', diff --git a/kohya_gui/sd_modeltype.py b/kohya_gui/sd_modeltype.py new file mode 100755 index 000000000..11891bf8e --- /dev/null +++ b/kohya_gui/sd_modeltype.py @@ -0,0 +1,47 @@ +from os.path import isfile +from safetensors import safe_open +import enum + +# methodology is based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/82a973c04367123ae98bd9abdf80d9eda9b910e2/modules/sd_models.py#L379-L403 + +class ModelType(enum.Enum): + UNKNOWN = 0 + SD1 = 1 + SD2 = 2 + SDXL = 3 + SD3 = 4 + +class SDModelType: + def __init__(self, safetensors_path): + self.model_type = ModelType.UNKNOWN + + if not isfile(safetensors_path): + return + + try: + st = safe_open(filename=safetensors_path, framework="numpy", device="cpu") + def hasKeyPrefix(pfx): + return any(k.startswith(pfx) for k in st.keys()) + + if "model.diffusion_model.x_embedder.proj.weight" in st.keys(): + self.model_type = ModelType.SD3 + elif hasKeyPrefix("conditioner."): + self.model_type = ModelType.SDXL + elif hasKeyPrefix("cond_stage_model.model."): + self.model_type = ModelType.SD2 + elif hasKeyPrefix("model."): + self.model_type = ModelType.SD1 + except: + pass + + def Is_SD1(self): + return self.model_type == ModelType.SD1 + + def Is_SD2(self): + return self.model_type == ModelType.SD2 + + def Is_SDXL(self): + return self.model_type == ModelType.SDXL + + def Is_SD3(self): + return self.model_type == ModelType.SD3 From ed2255685f56674130ba641858c18fbfefd5360f Mon Sep 17 00:00:00 2001 From: b-fission Date: Mon, 5 Aug 2024 19:53:59 -0500 Subject: [PATCH 040/199] autodetect-modeltype: remove unused lambda inputs --- kohya_gui/extract_lora_gui.py | 4 ++-- kohya_gui/merge_lora_gui.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kohya_gui/extract_lora_gui.py b/kohya_gui/extract_lora_gui.py index 54fd33389..ec3c689ba 100644 --- a/kohya_gui/extract_lora_gui.py +++ b/kohya_gui/extract_lora_gui.py @@ -340,8 +340,8 @@ def change_sdxl(sdxl): #secondary event on model_tuned for auto-detection of SDXL model_tuned.change( - lambda sdxl, path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), - inputs=[sdxl, model_tuned], + lambda path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), + inputs=model_tuned, outputs=sdxl ) diff --git a/kohya_gui/merge_lora_gui.py b/kohya_gui/merge_lora_gui.py index 92659362c..72e632124 100644 --- a/kohya_gui/merge_lora_gui.py +++ b/kohya_gui/merge_lora_gui.py @@ -148,8 +148,8 @@ def list_save_to(path): #secondary event on sd_model for auto-detection of SDXL sd_model.change( - lambda sdxl, path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), - inputs=[sdxl_model, sd_model], + lambda path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), + inputs=sd_model, outputs=sdxl_model ) From 203b835ee49613166a73e0d19fddf26c43afde45 Mon Sep 17 00:00:00 2001 From: b-fission Date: Tue, 6 Aug 2024 04:35:22 -0500 Subject: [PATCH 041/199] rework TE1/TE2 learning rate handling for SDXL dreambooth SDXL dreambooth apparently trains without the text encoders by default, requiring the `--train_text_encoder` flag to be passed so that the learning rates for TE1/TE2 are recognized. The toml handling now permits 0 to be passed as a learning rate in order to disable training of one or both text encoders. This behavior aligns with the description given on the GUI. TE1/TE2 learning rate parameters can be left blank on the GUI to not pass a value to the training script. --- kohya_gui/dreambooth_gui.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index a38230a21..bc7522f62 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -715,6 +715,12 @@ def train_model( else: max_train_steps = int(max_train_steps) + if sdxl: + train_text_encoder = ( + (learning_rate_te1 != None and learning_rate_te1 > 0) or + (learning_rate_te2 != None and learning_rate_te2 > 0) + ) + # def save_huggingface_to_toml(self, toml_file_path: str): config_toml_data = { # Update the values in the TOML data @@ -750,15 +756,9 @@ def train_model( "ip_noise_gamma_random_strength": ip_noise_gamma_random_strength, "keep_tokens": int(keep_tokens), "learning_rate": learning_rate, # both for sd1.5 and sdxl - "learning_rate_te": ( - learning_rate_te if not sdxl and not 0 else None - ), # only for sd1.5 and not 0 - "learning_rate_te1": ( - learning_rate_te1 if sdxl and not 0 else None - ), # only for sdxl and not 0 - "learning_rate_te2": ( - learning_rate_te2 if sdxl and not 0 else None - ), # only for sdxl and not 0 + "learning_rate_te": learning_rate_te if not sdxl else None, # only for sd1.5 + "learning_rate_te1": learning_rate_te1 if sdxl else None, # only for sdxl + "learning_rate_te2": learning_rate_te2 if sdxl else None, # only for sdxl "logging_dir": logging_dir, "log_tracker_config": log_tracker_config, "log_tracker_name": log_tracker_name, @@ -839,6 +839,7 @@ def train_model( ), "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, + "train_text_encoder": train_text_encoder if sdxl else None, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, From f1d7c025aa3a2581ce229bb6a29091d1cb6ae1b3 Mon Sep 17 00:00:00 2001 From: b-fission Date: Tue, 6 Aug 2024 04:49:34 -0500 Subject: [PATCH 042/199] dreambooth_gui: fix toml value filtering condition In python3, `0 == False` will evaluate True. That can cause arg values of 0 to be wrongly eliminated from the toml output. The conditional must check the type when comparing for False. --- kohya_gui/dreambooth_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index bc7522f62..b63eeb5ea 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -856,7 +856,7 @@ def train_model( config_toml_data = { key: value for key, value in config_toml_data.items() - if value not in ["", False, None] + if not any([value == "", value is False, value is None]) } config_toml_data["max_data_loader_n_workers"] = int(max_data_loader_n_workers) From c0966bcc3b954087b5dc8e314c559b0824a99dd4 Mon Sep 17 00:00:00 2001 From: b-fission Date: Tue, 6 Aug 2024 12:43:22 -0500 Subject: [PATCH 043/199] autodetect-modeltype: also do the v2 checkbox in extract_lora --- kohya_gui/extract_lora_gui.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kohya_gui/extract_lora_gui.py b/kohya_gui/extract_lora_gui.py index ec3c689ba..f1650e7f6 100644 --- a/kohya_gui/extract_lora_gui.py +++ b/kohya_gui/extract_lora_gui.py @@ -338,11 +338,17 @@ def change_sdxl(sdxl): outputs=[load_tuned_model_to, load_original_model_to], ) - #secondary event on model_tuned for auto-detection of SDXL + #secondary event on model_tuned for auto-detection of v2/SDXL + def change_modeltype_model_tuned(path): + detect = SDModelType(path) + v2 = gr.Checkbox(value=detect.Is_SD2()) + sdxl = gr.Checkbox(value=detect.Is_SDXL()) + return v2, sdxl + model_tuned.change( - lambda path: gr.Checkbox(value=SDModelType(path).Is_SDXL()), + change_modeltype_model_tuned, inputs=model_tuned, - outputs=sdxl + outputs=[v2, sdxl] ) extract_button = gr.Button("Extract LoRA model") From 778642ac5cae7ddb712362f203ef98134f71ef58 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 8 Aug 2024 20:30:28 -0400 Subject: [PATCH 044/199] Update to latest dev branch code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index b8896aad4..0b3e4f7ab 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit b8896aad400222c8c4441b217fda0f9bb0807ffd +Subproject commit 0b3e4f7ab62b7c93e66972b7bd2774b8fe679792 From d71469119b65ebbfe8c4054cf366e0dba9cbe416 Mon Sep 17 00:00:00 2001 From: b-fission <131207849+b-fission@users.noreply.github.com> Date: Thu, 8 Aug 2024 19:44:22 -0500 Subject: [PATCH 045/199] bring back SDXLConfig accordion for dreambooth gui (#2694) b-fission --- kohya_gui/dreambooth_gui.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index b09aa33d1..a3793a878 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -731,6 +731,9 @@ def train_model( cache_text_encoder_outputs = sdxl and sdxl_cache_text_encoder_outputs no_half_vae = sdxl and sdxl_no_half_vae + cache_text_encoder_outputs = sdxl and sdxl_cache_text_encoder_outputs + no_half_vae = sdxl and sdxl_no_half_vae + if max_data_loader_n_workers == "" or None: max_data_loader_n_workers = 0 else: @@ -1020,6 +1023,11 @@ def dreambooth_tab( source_model.sdxl_checkbox, config=config, trainer="finetune", ) + # Add SDXL Parameters + sdxl_params = SDXLParameters( + source_model.sdxl_checkbox, config=config + ) + with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): advanced_training = AdvancedTraining(headless=headless, config=config) advanced_training.color_aug.change( From 0adba42ff7d665545f735a9eaf747cdb2b2f9a23 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 8 Aug 2024 20:53:45 -0400 Subject: [PATCH 046/199] Update to latest sd3 branch commit --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 082f13658..da4d0fe01 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 082f13658bdbaed872ede6c0a7a75ab1a5f3712d +Subproject commit da4d0fe0165b3e0143c237de8cf307d53a9de45a From 41d473a64a093a1f0c3841d01bf11c5383aa97cd Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 8 Aug 2024 21:00:23 -0400 Subject: [PATCH 047/199] Fix merge issue --- kohya_gui/dreambooth_gui.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index a3793a878..b09aa33d1 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -731,9 +731,6 @@ def train_model( cache_text_encoder_outputs = sdxl and sdxl_cache_text_encoder_outputs no_half_vae = sdxl and sdxl_no_half_vae - cache_text_encoder_outputs = sdxl and sdxl_cache_text_encoder_outputs - no_half_vae = sdxl and sdxl_no_half_vae - if max_data_loader_n_workers == "" or None: max_data_loader_n_workers = 0 else: @@ -1023,11 +1020,6 @@ def dreambooth_tab( source_model.sdxl_checkbox, config=config, trainer="finetune", ) - # Add SDXL Parameters - sdxl_params = SDXLParameters( - source_model.sdxl_checkbox, config=config - ) - with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): advanced_training = AdvancedTraining(headless=headless, config=config) advanced_training.color_aug.change( From fad8634ca339f91c0050f44807488bafcef82af4 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 8 Aug 2024 21:17:20 -0400 Subject: [PATCH 048/199] Update gradio version --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 40e62f24f..891402e9c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,13 +7,13 @@ easygui==0.98.3 einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 -gradio==4.36.1 +gradio==4.41.0 huggingface-hub==0.20.1 imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 lycoris_lora==2.2.0.post3 -# lycoris_lora==3.0.0.dev11 +# lycoris_lora==3.0.0.post1 omegaconf==2.3.0 onnx==1.16.1 prodigyopt==1.0 From a3fe369e2a19a2fc464a1cfc5be54e1a200e0c4f Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 10 Aug 2024 13:38:01 -0400 Subject: [PATCH 049/199] Update to latest flux.1 code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index da4d0fe01..8a0f12dde 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit da4d0fe0165b3e0143c237de8cf307d53a9de45a +Subproject commit 8a0f12dde812994ec3facdcdb7c08b362dbceb0f From 2ce56583029e1b5f524e561fced8241d4df263b3 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 10 Aug 2024 14:55:39 -0400 Subject: [PATCH 050/199] Add Flux.1 Model checkbox and detection --- kohya_gui/class_source_model.py | 38 +++++++++++++++++++++++---------- kohya_gui/common_gui.py | 12 +++++++++++ kohya_gui/sd_modeltype.py | 16 ++++++++++++++ 3 files changed, 55 insertions(+), 11 deletions(-) diff --git a/kohya_gui/class_source_model.py b/kohya_gui/class_source_model.py index 5ce4ff5ab..6f0628fef 100644 --- a/kohya_gui/class_source_model.py +++ b/kohya_gui/class_source_model.py @@ -269,16 +269,24 @@ def list_dataset_config_dirs(path: str) -> list: min_width=60, interactive=True, ) + self.flux1_checkbox = gr.Checkbox( + label="Flux.1", + value=False, + visible=False, + min_width=60, + interactive=True, + ) - def toggle_checkboxes(v2, v_parameterization, sdxl_checkbox, sd3_checkbox): + def toggle_checkboxes(v2, v_parameterization, sdxl_checkbox, sd3_checkbox, flux1_checkbox): # Check if all checkboxes are unchecked - if not v2 and not v_parameterization and not sdxl_checkbox and not sd3_checkbox: + if not v2 and not v_parameterization and not sdxl_checkbox and not sd3_checkbox and not flux1_checkbox: # If all unchecked, return new interactive checkboxes return ( gr.Checkbox(interactive=True), # v2 checkbox gr.Checkbox(interactive=True), # v_parameterization checkbox gr.Checkbox(interactive=True), # sdxl_checkbox gr.Checkbox(interactive=True), # sd3_checkbox + gr.Checkbox(interactive=True), # sd3_checkbox ) else: # If any checkbox is checked, return checkboxes with current interactive state @@ -287,30 +295,37 @@ def toggle_checkboxes(v2, v_parameterization, sdxl_checkbox, sd3_checkbox): gr.Checkbox(interactive=v_parameterization), # v_parameterization checkbox gr.Checkbox(interactive=sdxl_checkbox), # sdxl_checkbox gr.Checkbox(interactive=sd3_checkbox), # sd3_checkbox + gr.Checkbox(interactive=flux1_checkbox), # flux1_checkbox ) self.v2.change( fn=toggle_checkboxes, - inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], - outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], show_progress=False, ) self.v_parameterization.change( fn=toggle_checkboxes, - inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], - outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], + show_progress=False, + ) + self.sdxl_checkbox.change( + fn=toggle_checkboxes, + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], show_progress=False, ) self.sd3_checkbox.change( fn=toggle_checkboxes, - inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], - outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], show_progress=False, ) - self.sdxl_checkbox.change( + self.flux1_checkbox.change( fn=toggle_checkboxes, - inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], - outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox], + inputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], + outputs=[self.v2, self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, self.flux1_checkbox], show_progress=False, ) with gr.Column(): @@ -349,6 +364,7 @@ def toggle_checkboxes(v2, v_parameterization, sdxl_checkbox, sd3_checkbox): self.v_parameterization, self.sdxl_checkbox, self.sd3_checkbox, + self.flux1_checkbox, ], show_progress=False, ) diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index e356437fb..8823cb78f 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -963,12 +963,14 @@ def set_pretrained_model_name_or_path_input( v_parameterization = gr.Checkbox(value=False, visible=False) sdxl = gr.Checkbox(value=True, visible=False) sd3 = gr.Checkbox(value=False, visible=False) + flux1 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, sd3, + flux1, ) # Check if the given pretrained_model_name_or_path is in the list of V2 base models @@ -978,12 +980,14 @@ def set_pretrained_model_name_or_path_input( v_parameterization = gr.Checkbox(value=False, visible=False) sdxl = gr.Checkbox(value=False, visible=False) sd3 = gr.Checkbox(value=False, visible=False) + flux1 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, sd3, + flux1, ) # Check if the given pretrained_model_name_or_path is in the list of V parameterization models @@ -995,12 +999,14 @@ def set_pretrained_model_name_or_path_input( v_parameterization = gr.Checkbox(value=True, visible=False) sdxl = gr.Checkbox(value=False, visible=False) sd3 = gr.Checkbox(value=False, visible=False) + flux1 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, sd3, + flux1, ) # Check if the given pretrained_model_name_or_path is in the list of V1 models @@ -1010,12 +1016,14 @@ def set_pretrained_model_name_or_path_input( v_parameterization = gr.Checkbox(value=False, visible=False) sdxl = gr.Checkbox(value=False, visible=False) sd3 = gr.Checkbox(value=False, visible=False) + flux1 = gr.Checkbox(value=False, visible=False) return ( gr.Dropdown(), v2, v_parameterization, sdxl, sd3, + flux1, ) # Check if the model_list is set to 'custom' @@ -1023,12 +1031,15 @@ def set_pretrained_model_name_or_path_input( v_parameterization = gr.Checkbox(visible=True) sdxl = gr.Checkbox(visible=True) sd3 = gr.Checkbox(visible=True) + flux1 = gr.Checkbox(visible=True) # Auto-detect model type if safetensors file path is given if pretrained_model_name_or_path.lower().endswith(".safetensors"): detect = SDModelType(pretrained_model_name_or_path) v2 = gr.Checkbox(value=detect.Is_SD2(), visible=True) sdxl = gr.Checkbox(value=detect.Is_SDXL(), visible=True) + sd3 = gr.Checkbox(value=detect.Is_SD3(), visible=True) + flux1 = gr.Checkbox(value=detect.Is_FLUX1(), visible=True) #TODO: v_parameterization # If a refresh method is provided, use it to update the choices for the Dropdown widget @@ -1044,6 +1055,7 @@ def set_pretrained_model_name_or_path_input( v_parameterization, sdxl, sd3, + flux1, ) diff --git a/kohya_gui/sd_modeltype.py b/kohya_gui/sd_modeltype.py index 11891bf8e..bb70150a0 100755 --- a/kohya_gui/sd_modeltype.py +++ b/kohya_gui/sd_modeltype.py @@ -4,12 +4,15 @@ # methodology is based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/82a973c04367123ae98bd9abdf80d9eda9b910e2/modules/sd_models.py#L379-L403 + class ModelType(enum.Enum): UNKNOWN = 0 SD1 = 1 SD2 = 2 SDXL = 3 SD3 = 4 + FLUX1 = 5 + class SDModelType: def __init__(self, safetensors_path): @@ -20,11 +23,21 @@ def __init__(self, safetensors_path): try: st = safe_open(filename=safetensors_path, framework="numpy", device="cpu") + + # print(st.keys()) + def hasKeyPrefix(pfx): return any(k.startswith(pfx) for k in st.keys()) if "model.diffusion_model.x_embedder.proj.weight" in st.keys(): self.model_type = ModelType.SD3 + elif ( + "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" + in st.keys() + or "double_blocks.0.img_attn.norm.key_norm.scale" in st.keys() + ): + # print("flux1 model detected...") + self.model_type = ModelType.FLUX1 elif hasKeyPrefix("conditioner."): self.model_type = ModelType.SDXL elif hasKeyPrefix("cond_stage_model.model."): @@ -45,3 +58,6 @@ def Is_SDXL(self): def Is_SD3(self): return self.model_type == ModelType.SD3 + + def Is_FLUX1(self): + return self.model_type == ModelType.FLUX1 From 03532bb7851c52a162b70f9c7b703727e1f4d4f2 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 10 Aug 2024 15:13:41 -0400 Subject: [PATCH 051/199] Adding LoRA type "Flux1" to dropdown --- kohya_gui/lora_gui.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index b335921e1..1f619d36a 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -77,6 +77,7 @@ def save_configuration( v2, v_parameterization, sdxl, + flux1_checkbox, logging_dir, train_data_dir, reg_data_dir, @@ -287,6 +288,7 @@ def open_configuration( v2, v_parameterization, sdxl, + flux1_checkbox, logging_dir, train_data_dir, reg_data_dir, @@ -502,6 +504,7 @@ def open_configuration( # Display LoCon parameters based on the 'LoRA_type' from the loaded data # This section dynamically adjusts visibility of certain parameters in the UI if my_data.get("LoRA_type", "Standard") in { + "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -527,6 +530,7 @@ def train_model( v2, v_parameterization, sdxl, + flux1_checkbox, logging_dir, train_data_dir, reg_data_dir, @@ -960,6 +964,8 @@ def train_model( if sdxl: run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train_network.py") + elif flux1_checkbox: + run_cmd.append(rf"{scriptdir}/sd-scripts/flux_train_network.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_network.py") @@ -1001,7 +1007,7 @@ def train_model( network_module = "lycoris.kohya" network_args = f" preset={LyCORIS_preset} rank_dropout={rank_dropout} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=full train_norm={train_norm}" - if LoRA_type in ["Kohya LoCon", "Standard"]: + if LoRA_type in ["Flux1", "Kohya LoCon", "Standard"]: kohya_lora_var_list = [ "down_lr_weight", "mid_lr_weight", @@ -1020,7 +1026,9 @@ def train_model( for key, value in vars().items() if key in kohya_lora_var_list and value } - if LoRA_type == "Kohya LoCon": + + # Not sure if Flux1 is Standard... or LoCon style... flip a coin... going for LoCon style... + if LoRA_type in ["Flux1", "Kohya LoCon"]: network_args += f' conv_dim="{conv_dim}" conv_alpha="{conv_alpha}"' for key, value in kohya_lora_vars.items(): @@ -1418,6 +1426,7 @@ def list_presets(path): LoRA_type = gr.Dropdown( label="LoRA type", choices=[ + "Flux1", "Kohya DyLoRA", "Kohya LoCon", "LoRA-FA", @@ -1692,6 +1701,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "Kohya DyLoRA", "Kohya LoCon", "LoRA-FA", @@ -1711,6 +1721,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -1730,6 +1741,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "Standard", "Kohya DyLoRA", "Kohya LoCon", @@ -1742,6 +1754,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "Standard", "LoCon", "Kohya DyLoRA", @@ -1762,6 +1775,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "Standard", "LoCon", "Kohya DyLoRA", @@ -1782,6 +1796,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "Standard", "LoCon", "Kohya DyLoRA", @@ -1967,6 +1982,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -1985,6 +2001,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -2006,6 +2023,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "LoCon", "Kohya DyLoRA", "LyCORIS/BOFT", @@ -2026,6 +2044,7 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { + "Flux1", "LoCon", "LyCORIS/BOFT", "LyCORIS/Diag-OFT", @@ -2213,6 +2232,7 @@ def update_LoRA_settings( source_model.v2, source_model.v_parameterization, source_model.sdxl_checkbox, + source_model.flux1_checkbox, folders.logging_dir, source_model.train_data_dir, folders.reg_data_dir, From 5c53db42e9eb2453f9e44910cc7564a051d2cdec Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 10 Aug 2024 16:18:19 -0400 Subject: [PATCH 052/199] Added Flux.1 parameters to GUI --- kohya_gui/class_flux1.py | 221 +++++++++++++++++++++++++++++++++++++++ kohya_gui/lora_gui.py | 67 +++++++++++- 2 files changed, 285 insertions(+), 3 deletions(-) create mode 100644 kohya_gui/class_flux1.py diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py new file mode 100644 index 000000000..5d2e50a77 --- /dev/null +++ b/kohya_gui/class_flux1.py @@ -0,0 +1,221 @@ +import gradio as gr +from typing import Tuple +from .common_gui import ( + get_folder_path, + get_any_file_path, + list_files, + list_dirs, + create_refresh_button, + document_symbol, +) + + +class flux1Training: + """ + This class configures and initializes the advanced training settings for a machine learning model, + including options for headless operation, fine-tuning, training type selection, and default directory paths. + + Attributes: + headless (bool): If True, run without the Gradio interface. + finetuning (bool): If True, enables fine-tuning of the model. + training_type (str): Specifies the type of training to perform. + no_token_padding (gr.Checkbox): Checkbox to disable token padding. + gradient_accumulation_steps (gr.Slider): Slider to set the number of gradient accumulation steps. + weighted_captions (gr.Checkbox): Checkbox to enable weighted captions. + """ + + def __init__( + self, + headless: bool = False, + finetuning: bool = False, + training_type: str = "", + config: dict = {}, + flux1_checkbox: gr.Checkbox = False, + ) -> None: + """ + Initializes the AdvancedTraining class with given settings. + + Parameters: + headless (bool): Run in headless mode without GUI. + finetuning (bool): Enable model fine-tuning. + training_type (str): The type of training to be performed. + config (dict): Configuration options for the training process. + """ + self.headless = headless + self.finetuning = finetuning + self.training_type = training_type + self.config = config + self.flux1_checkbox = flux1_checkbox + + # Define the behavior for changing noise offset type. + def noise_offset_type_change( + noise_offset_type: str, + ) -> Tuple[gr.Group, gr.Group]: + """ + Returns a tuple of Gradio Groups with visibility set based on the noise offset type. + + Parameters: + noise_offset_type (str): The selected noise offset type. + + Returns: + Tuple[gr.Group, gr.Group]: A tuple containing two Gradio Group elements with their visibility set. + """ + if noise_offset_type == "Original": + return (gr.Group(visible=True), gr.Group(visible=False)) + else: + return (gr.Group(visible=False), gr.Group(visible=True)) + + with gr.Accordion( + "Flux.1", open=True, elem_id="flux1_tab", visible=False + ) as flux1_accordion: + with gr.Group(): + # gr.Markdown("### Flux.1 Specific Parameters") + # with gr.Row(): + # self.weighting_scheme = gr.Dropdown( + # label="Weighting Scheme", + # choices=["logit_normal", "sigma_sqrt", "mode", "cosmap"], + # value=self.config.get("flux1.weighting_scheme", "logit_normal"), + # interactive=True, + # ) + # self.logit_mean = gr.Number( + # label="Logit Mean", + # value=self.config.get("flux1.logit_mean", 0.0), + # interactive=True, + # ) + # self.logit_std = gr.Number( + # label="Logit Std", + # value=self.config.get("flux1.logit_std", 1.0), + # interactive=True, + # ) + # self.mode_scale = gr.Number( + # label="Mode Scale", + # value=self.config.get("flux1.mode_scale", 1.29), + # interactive=True, + # ) + + with gr.Row(): + self.ae = gr.Textbox( + label="VAE Path", + placeholder="Path to VAE model", + value=self.config.get("flux1.ae", ""), + interactive=True, + ) + self.ae_button = gr.Button( + document_symbol, + elem_id="open_folder_small", + visible=(not headless), + interactive=True, + ) + self.ae_button.click( + get_any_file_path, + outputs=self.ae, + show_progress=False, + ) + + self.clip_l = gr.Textbox( + label="CLIP-L Path", + placeholder="Path to CLIP-L model", + value=self.config.get("flux1.clip_l", ""), + interactive=True, + ) + self.clip_l_button = gr.Button( + document_symbol, + elem_id="open_folder_small", + visible=(not headless), + interactive=True, + ) + self.clip_l_button.click( + get_any_file_path, + outputs=self.clip_l, + show_progress=False, + ) + + # self.clip_g = gr.Textbox( + # label="CLIP-G Path", + # placeholder="Path to CLIP-G model", + # value=self.config.get("flux1.clip_g", ""), + # interactive=True, + # ) + # self.clip_g_button = gr.Button( + # document_symbol, + # elem_id="open_folder_small", + # visible=(not headless), + # interactive=True, + # ) + # self.clip_g_button.click( + # get_any_file_path, + # outputs=self.clip_g, + # show_progress=False, + # ) + + self.t5xxl = gr.Textbox( + label="T5-XXL Path", + placeholder="Path to T5-XXL model", + value=self.config.get("flux1.t5xxl", ""), + interactive=True, + ) + self.t5xxl_button = gr.Button( + document_symbol, + elem_id="open_folder_small", + visible=(not headless), + interactive=True, + ) + self.t5xxl_button.click( + get_any_file_path, + outputs=self.t5xxl, + show_progress=False, + ) + + # with gr.Row(): + # self.save_clip = gr.Checkbox( + # label="Save CLIP models", + # value=self.config.get("flux1.save_clip", False), + # interactive=True, + # ) + # self.save_t5xxl = gr.Checkbox( + # label="Save T5-XXL model", + # value=self.config.get("flux1.save_t5xxl", False), + # interactive=True, + # ) + + with gr.Row(): + # self.t5xxl_device = gr.Textbox( + # label="T5-XXL Device", + # placeholder="Device for T5-XXL (e.g., cuda:0)", + # value=self.config.get("flux1.t5xxl_device", ""), + # interactive=True, + # ) + # self.t5xxl_dtype = gr.Dropdown( + # label="T5-XXL Dtype", + # choices=["float32", "fp16", "bf16"], + # value=self.config.get("flux1.t5xxl_dtype", "bf16"), + # interactive=True, + # ) + # self.flux1_text_encoder_batch_size = gr.Number( + # label="Text Encoder Batch Size", + # value=self.config.get("flux1.text_encoder_batch_size", 1), + # minimum=1, + # maximum=1024, + # step=1, + # interactive=True, + # ) + self.flux1_cache_text_encoder_outputs = gr.Checkbox( + label="Cache Text Encoder Outputs", + value=self.config.get("flux1.cache_text_encoder_outputs", False), + info="Cache text encoder outputs to speed up inference", + interactive=True, + ) + self.flux1_cache_text_encoder_outputs_to_disk = gr.Checkbox( + label="Cache Text Encoder Outputs to Disk", + value=self.config.get( + "flux1.cache_text_encoder_outputs_to_disk", False + ), + info="Cache text encoder outputs to disk to speed up inference", + interactive=True, + ) + + self.flux1_checkbox.change( + lambda flux1_checkbox: gr.Accordion(visible=flux1_checkbox), + inputs=[self.flux1_checkbox], + outputs=[flux1_accordion], + ) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 1f619d36a..49ff236f2 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -36,6 +36,7 @@ from .class_huggingface import HuggingFace from .class_metadata import MetaData from .class_gui_config import KohyaSSGUIConfig +from .class_flux1 import flux1Training from .dreambooth_folder_creation_gui import ( gradio_dreambooth_folder_creation_tab, @@ -238,6 +239,11 @@ def save_configuration( loraplus_lr_ratio, loraplus_text_encoder_lr_ratio, loraplus_unet_lr_ratio, + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + clip_l, + t5xxl, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -449,6 +455,11 @@ def open_configuration( loraplus_lr_ratio, loraplus_text_encoder_lr_ratio, loraplus_unet_lr_ratio, + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + clip_l, + t5xxl, training_preset, ): # Get list of function parameters and their values @@ -691,6 +702,11 @@ def train_model( loraplus_lr_ratio, loraplus_text_encoder_lr_ratio, loraplus_unet_lr_ratio, + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + clip_l, + t5xxl, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1007,7 +1023,35 @@ def train_model( network_module = "lycoris.kohya" network_args = f" preset={LyCORIS_preset} rank_dropout={rank_dropout} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=full train_norm={train_norm}" - if LoRA_type in ["Flux1", "Kohya LoCon", "Standard"]: + if LoRA_type in ["Flux1"]: + kohya_lora_var_list = [ + "down_lr_weight", + "mid_lr_weight", + "up_lr_weight", + "block_lr_zero_threshold", + "block_dims", + "block_alphas", + "conv_block_dims", + "conv_block_alphas", + "rank_dropout", + "module_dropout", + ] + network_module = "networks.lora_flux" + kohya_lora_vars = { + key: value + for key, value in vars().items() + if key in kohya_lora_var_list and value + } + + # Not sure if Flux1 is Standard... or LoCon style... flip a coin... going for LoCon style... + if LoRA_type in ["Flux1"]: + network_args += f' conv_dim="{conv_dim}" conv_alpha="{conv_alpha}"' + + for key, value in kohya_lora_vars.items(): + if value: + network_args += f" {key}={value}" + + if LoRA_type in ["Kohya LoCon", "Standard"]: kohya_lora_var_list = [ "down_lr_weight", "mid_lr_weight", @@ -1028,7 +1072,7 @@ def train_model( } # Not sure if Flux1 is Standard... or LoCon style... flip a coin... going for LoCon style... - if LoRA_type in ["Flux1", "Kohya LoCon"]: + if LoRA_type in ["Kohya LoCon"]: network_args += f' conv_dim="{conv_dim}" conv_alpha="{conv_alpha}"' for key, value in kohya_lora_vars.items(): @@ -1208,7 +1252,7 @@ def train_model( "noise_offset_random_strength": noise_offset_random_strength if noise_offset_type == "Original" else None, "noise_offset_type": noise_offset_type, "optimizer_type": optimizer, - "optimizer_args": str(optimizer_args).replace('"', "").split(), + "optimizer_args": str(optimizer_args).replace('"', "").split() if optimizer_args != [] else None, "output_dir": output_dir, "output_name": output_name, "persistent_data_loader_workers": int(persistent_data_loader_workers), @@ -1263,6 +1307,14 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, + + # Flux.1 specific parameters + "flux1_cache_text_encoder_outputs": flux1_cache_text_encoder_outputs if flux1_checkbox else None, + "flux1_cache_text_encoder_outputs_to_disk": flux1_cache_text_encoder_outputs_to_disk if flux1_checkbox else None, + "ae": ae if flux1_checkbox else None, + "clip_l": clip_l if flux1_checkbox else None, + "t5xxl": t5xxl if flux1_checkbox else None, + } # Given dictionary `config_toml_data` @@ -1530,6 +1582,9 @@ def list_presets(path): sdxl_params = SDXLParameters( source_model.sdxl_checkbox, config=config ) + + # Add FLUX1 Parameters + flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox) # LyCORIS Specific parameters with gr.Accordion("LyCORIS", visible=False) as lycoris_accordion: @@ -2392,6 +2447,12 @@ def update_LoRA_settings( loraplus_lr_ratio, loraplus_text_encoder_lr_ratio, loraplus_unet_lr_ratio, + # Flux1 parameters + flux1_training.flux1_cache_text_encoder_outputs, + flux1_training.flux1_cache_text_encoder_outputs_to_disk, + flux1_training.ae, + flux1_training.clip_l, + flux1_training.t5xxl, ] configuration.button_open_config.click( From 6f0a837f1e635e4f6a49fbfe81a84d3f419df9c3 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 13 Aug 2024 08:58:33 -0400 Subject: [PATCH 053/199] Update sd-scripts and requirements --- kohya_gui/lora_gui.py | 6 +----- requirements.txt | 8 +++++--- requirements_linux.txt | 2 +- requirements_linux_docker.txt | 2 +- requirements_macos_amd64.txt | 2 +- requirements_macos_arm64.txt | 2 +- requirements_runpod.txt | 2 +- requirements_windows.txt | 2 +- sd-scripts | 2 +- 9 files changed, 13 insertions(+), 15 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 49ff236f2..2e76ed40a 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1023,7 +1023,7 @@ def train_model( network_module = "lycoris.kohya" network_args = f" preset={LyCORIS_preset} rank_dropout={rank_dropout} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=full train_norm={train_norm}" - if LoRA_type in ["Flux1"]: + if LoRA_type == "Flux1": kohya_lora_var_list = [ "down_lr_weight", "mid_lr_weight", @@ -1042,10 +1042,6 @@ def train_model( for key, value in vars().items() if key in kohya_lora_var_list and value } - - # Not sure if Flux1 is Standard... or LoCon style... flip a coin... going for LoCon style... - if LoRA_type in ["Flux1"]: - network_args += f' conv_dim="{conv_dim}" conv_alpha="{conv_alpha}"' for key, value in kohya_lora_vars.items(): if value: diff --git a/requirements.txt b/requirements.txt index 891402e9c..aed190b44 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -accelerate==0.25.0 +accelerate==0.33.0 aiofiles==23.2.1 altair==4.2.2 dadaptation==3.1 @@ -8,7 +8,7 @@ einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 gradio==4.41.0 -huggingface-hub==0.20.1 +huggingface-hub==0.24.5 imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 @@ -25,10 +25,12 @@ pytorch-lightning==1.9.0 rich>=13.7.1 safetensors==0.4.2 scipy==1.11.4 +# for T5XXL tokenizer (SD3/FLUX) +sentencepiece==0.2.0 timm==0.6.12 tk==0.1.0 toml==0.10.2 -transformers==4.38.0 +transformers==4.44.0 voluptuous==0.13.1 wandb==0.15.11 scipy==1.11.4 diff --git a/requirements_linux.txt b/requirements_linux.txt index 41275f63a..261b7e271 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -1,5 +1,5 @@ torch==2.1.2+cu118 torchvision==0.16.2+cu118 xformers==0.0.23.post1+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 -bitsandbytes==0.43.0 +bitsandbytes==0.43.3 tensorboard==2.15.2 tensorflow==2.15.0.post1 onnxruntime-gpu==1.17.1 -r requirements.txt diff --git a/requirements_linux_docker.txt b/requirements_linux_docker.txt index 779ed6d8b..f5272a73e 100644 --- a/requirements_linux_docker.txt +++ b/requirements_linux_docker.txt @@ -1,4 +1,4 @@ xformers>=0.0.20 -bitsandbytes==0.43.0 +bitsandbytes==0.43.3 accelerate==0.25.0 tensorboard \ No newline at end of file diff --git a/requirements_macos_amd64.txt b/requirements_macos_amd64.txt index 571d9b6ef..5d65837ef 100644 --- a/requirements_macos_amd64.txt +++ b/requirements_macos_amd64.txt @@ -1,5 +1,5 @@ torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html -xformers bitsandbytes==0.41.1 +xformers bitsandbytes==0.43.3 tensorflow-macos tensorboard==2.14.1 onnxruntime==1.17.1 -r requirements.txt diff --git a/requirements_macos_arm64.txt b/requirements_macos_arm64.txt index 96acb97c3..364c44ad5 100644 --- a/requirements_macos_arm64.txt +++ b/requirements_macos_arm64.txt @@ -1,5 +1,5 @@ torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html -xformers bitsandbytes==0.41.1 +xformers bitsandbytes==0.43.3 tensorflow-macos tensorflow-metal tensorboard==2.14.1 onnxruntime==1.17.1 -r requirements.txt diff --git a/requirements_runpod.txt b/requirements_runpod.txt index 481da43d4..af6649949 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -1,5 +1,5 @@ torch==2.1.2+cu118 torchvision==0.16.2+cu118 xformers==0.0.23.post1+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 # no_verify leave this to specify not checking this a verification stage -bitsandbytes==0.43.0 +bitsandbytes==0.43.3 tensorboard==2.14.1 tensorflow==2.14.0 wheel tensorrt onnxruntime-gpu==1.17.1 diff --git a/requirements_windows.txt b/requirements_windows.txt index a9300090c..243b24df0 100644 --- a/requirements_windows.txt +++ b/requirements_windows.txt @@ -1,4 +1,4 @@ -bitsandbytes==0.43.0 +bitsandbytes==0.43.3 tensorboard tensorflow>=2.16.1 onnxruntime-gpu==1.17.1 diff --git a/sd-scripts b/sd-scripts index 8a0f12dde..9711c96f9 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 8a0f12dde812994ec3facdcdb7c08b362dbceb0f +Subproject commit 9711c96f96038df5fa1a15d073244198b93ef0a2 From c036fdab37b0485b65da7e5d9bdcc752cd515985 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 13 Aug 2024 11:12:02 -0400 Subject: [PATCH 054/199] Add missing Flux.1 GUI parameters --- kohya_gui/class_advanced_training.py | 27 +++++++++++++++++------ kohya_gui/class_flux1.py | 31 +++++++++++++++++++------- kohya_gui/lora_gui.py | 33 ++++++++++++++++++++++------ 3 files changed, 69 insertions(+), 22 deletions(-) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 2b2bf9ee7..16912c5af 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -146,7 +146,7 @@ def list_vae_files(path): with gr.Row(): self.loss_type = gr.Dropdown( label="Loss type", - choices=["huber", "smooth_l1", "l2"], + choices=["huber", "smooth_l1", "l1", "l2"], value=self.config.get("advanced.loss_type", "l2"), info="The type of loss to use and whether it's scheduled based on the timestep", ) @@ -228,12 +228,11 @@ def full_options_update(full_fp16, full_bf16): ) with gr.Row(): - if training_type == "lora": - self.fp8_base = gr.Checkbox( - label="fp8 base training (experimental)", - info="U-Net and Text Encoder can be trained with fp8 (experimental)", - value=self.config.get("advanced.fp8_base", False), - ) + self.fp8_base = gr.Checkbox( + label="fp8 base", + info="Use fp8 for base model", + value=self.config.get("advanced.fp8_base", False), + ) self.full_fp16 = gr.Checkbox( label="Full fp16 training (experimental)", value=self.config.get("advanced.full_fp16", False), @@ -254,6 +253,20 @@ def full_options_update(full_fp16, full_bf16): inputs=[self.full_fp16, self.full_bf16], outputs=[self.full_fp16, self.full_bf16], ) + + with gr.Row(): + self.highvram = gr.Checkbox( + label="highvram", + value=self.config.get("advanced.highvram", False), + info="Disable low VRAM optimization. e.g. do not clear CUDA cache after each latent caching (for machines which have bigger VRAM)", + interactive=True, + ) + self.lowvram = gr.Checkbox( + label="lowvram", + value=self.config.get("advanced.lowvram", False), + info="Enable low RAM optimization. e.g. load models to VRAM instead of RAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle)", + interactive=True, + ) with gr.Row(): self.gradient_checkpointing = gr.Checkbox( diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 5d2e50a77..e03dcd990 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -191,14 +191,29 @@ def noise_offset_type_change( # value=self.config.get("flux1.t5xxl_dtype", "bf16"), # interactive=True, # ) - # self.flux1_text_encoder_batch_size = gr.Number( - # label="Text Encoder Batch Size", - # value=self.config.get("flux1.text_encoder_batch_size", 1), - # minimum=1, - # maximum=1024, - # step=1, - # interactive=True, - # ) + + self.discrete_flow_shift = gr.Number( + label="Discrete Flow Shift", + value=self.config.get("flux1.discrete_flow_shift", 3.0), + info="Discrete flow shift for the Euler Discrete Scheduler, default is 3.0", + minimum=0, + maximum=1024, + step=.01, + interactive=True, + ) + self.model_prediction_type = gr.Dropdown( + label="Model Prediction Type", + choices=["raw", "additive", "sigma_scaled"], + value=self.config.get("flux1.timestep_sampling", "sigma_scaled"), + interactive=True, + ) + self.timestep_sampling = gr.Dropdown( + label="Timestep Sampling", + choices=["sigma", "uniform", "sigmoid"], + value=self.config.get("flux1.timestep_sampling", "sigma"), + interactive=True, + ) + self.flux1_cache_text_encoder_outputs = gr.Checkbox( label="Cache Text Encoder Outputs", value=self.config.get("flux1.cache_text_encoder_outputs", False), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 2e76ed40a..375733588 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -102,11 +102,11 @@ def save_configuration( gradient_checkpointing, fp8_base, full_fp16, - # no_token_padding, + highvram, + lowvram, stop_text_encoder_training, min_bucket_reso, max_bucket_reso, - # use_8bit_adam, xformers, save_model_as, shuffle_caption, @@ -239,11 +239,15 @@ def save_configuration( loraplus_lr_ratio, loraplus_text_encoder_lr_ratio, loraplus_unet_lr_ratio, + #Flux1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, ae, clip_l, t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -318,11 +322,11 @@ def open_configuration( gradient_checkpointing, fp8_base, full_fp16, - # no_token_padding, + highvram, + lowvram, stop_text_encoder_training, min_bucket_reso, max_bucket_reso, - # use_8bit_adam, xformers, save_model_as, shuffle_caption, @@ -460,6 +464,9 @@ def open_configuration( ae, clip_l, t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, training_preset, ): # Get list of function parameters and their values @@ -565,11 +572,11 @@ def train_model( gradient_checkpointing, fp8_base, full_fp16, - # no_token_padding, + highvram, + lowvram, stop_text_encoder_training_pct, min_bucket_reso, max_bucket_reso, - # use_8bit_adam, xformers, save_model_as, shuffle_caption, @@ -707,6 +714,9 @@ def train_model( ae, clip_l, t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1183,6 +1193,7 @@ def train_model( "full_fp16": full_fp16, "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, + "highvram": highvram, "huber_c": huber_c, "huber_schedule": huber_schedule, "huggingface_repo_id": huggingface_repo_id, @@ -1202,6 +1213,7 @@ def train_model( "loraplus_text_encoder_lr_ratio": loraplus_text_encoder_lr_ratio if not 0 else None, "loraplus_unet_lr_ratio": loraplus_unet_lr_ratio if not 0 else None, "loss_type": loss_type, + "lowvram": lowvram, "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), "lr_scheduler_num_cycles": ( @@ -1310,7 +1322,9 @@ def train_model( "ae": ae if flux1_checkbox else None, "clip_l": clip_l if flux1_checkbox else None, "t5xxl": t5xxl if flux1_checkbox else None, - + "discrete_flow_shift": discrete_flow_shift if flux1_checkbox else None, + "model_prediction_type": model_prediction_type if flux1_checkbox else None, + "timestep_sampling": timestep_sampling if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2307,6 +2321,8 @@ def update_LoRA_settings( advanced_training.gradient_checkpointing, advanced_training.fp8_base, advanced_training.full_fp16, + advanced_training.highvram, + advanced_training.lowvram, # advanced_training.no_token_padding, basic_training.stop_text_encoder_training, basic_training.min_bucket_reso, @@ -2449,6 +2465,9 @@ def update_LoRA_settings( flux1_training.ae, flux1_training.clip_l, flux1_training.t5xxl, + flux1_training.discrete_flow_shift, + flux1_training.model_prediction_type, + flux1_training.timestep_sampling, ] configuration.button_open_config.click( From bbae48993449a8e4793713e3af3be64a60729619 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 15 Aug 2024 17:43:56 -0400 Subject: [PATCH 055/199] Update to latest sd-scripts sd3 code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 9711c96f9..35b6cb0cd 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 9711c96f96038df5fa1a15d073244198b93ef0a2 +Subproject commit 35b6cb0cd1b319d5f34b44a8c24c81c42895fa2e From 2eb677d652547b196955a4c6f87a1b3e96ca9291 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 15 Aug 2024 18:02:45 -0400 Subject: [PATCH 056/199] Fix issue with cache_text_encoder_outputs --- kohya_gui/lora_gui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 375733588..166e5721e 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1174,7 +1174,7 @@ def train_model( "cache_latents": cache_latents, "cache_latents_to_disk": cache_latents_to_disk, "cache_text_encoder_outputs": ( - True if sdxl and sdxl_cache_text_encoder_outputs else None + True if (sdxl and sdxl_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) else None ), "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), "caption_dropout_rate": caption_dropout_rate, @@ -1317,8 +1317,8 @@ def train_model( "xformers": True if xformers == "xformers" else None, # Flux.1 specific parameters - "flux1_cache_text_encoder_outputs": flux1_cache_text_encoder_outputs if flux1_checkbox else None, - "flux1_cache_text_encoder_outputs_to_disk": flux1_cache_text_encoder_outputs_to_disk if flux1_checkbox else None, + # "cache_text_encoder_outputs": see previous assignment above for code + "cache_text_encoder_outputs_to_disk": flux1_cache_text_encoder_outputs_to_disk if flux1_checkbox else None, "ae": ae if flux1_checkbox else None, "clip_l": clip_l if flux1_checkbox else None, "t5xxl": t5xxl if flux1_checkbox else None, From d42793408b08654f93fc864bf548f461db41ef07 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 16 Aug 2024 15:20:07 -0400 Subject: [PATCH 057/199] Update to latest sd-scripts flux1 code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 35b6cb0cd..e45d3f863 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 35b6cb0cd1b319d5f34b44a8c24c81c42895fa2e +Subproject commit e45d3f8634c6dd4e358a8c7972f7c851f18f94d3 From 0a49908f17a8169a75e2ff63f7f429c094bafb0b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 16 Aug 2024 15:29:30 -0400 Subject: [PATCH 058/199] Adding new flux.1 options to GUI --- kohya_gui/class_flux1.py | 22 ++++++++++++++++++++++ kohya_gui/lora_gui.py | 17 ++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index e03dcd990..940bd6743 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -228,6 +228,28 @@ def noise_offset_type_change( info="Cache text encoder outputs to disk to speed up inference", interactive=True, ) + with gr.Row(): + self.split_mode = gr.Checkbox( + label="Split Mode", + value=self.config.get("flux1.split_mode", False), + info="Split mode for Flux1", + interactive=True, + ) + self.train_blocks = gr.Dropdown( + label="Train Blocks", + choices=["all", "double", "single"], + value=self.config.get("flux1.train_blocks", "all"), + interactive=True, + ) + self.t5xxl_max_token_length = gr.Number( + label="T5-XXL Max Token Length", + value=self.config.get("flux1.t5xxl_max_token_length", 512), + info="Max token length for T5-XXL", + minimum=0, + maximum=4096, + step=1, + interactive=True, + ) self.flux1_checkbox.change( lambda flux1_checkbox: gr.Accordion(visible=flux1_checkbox), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 166e5721e..6a46e4c65 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -248,6 +248,9 @@ def save_configuration( discrete_flow_shift, model_prediction_type, timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -467,6 +470,9 @@ def open_configuration( discrete_flow_shift, model_prediction_type, timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, training_preset, ): # Get list of function parameters and their values @@ -717,6 +723,9 @@ def train_model( discrete_flow_shift, model_prediction_type, timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1228,7 +1237,7 @@ def train_model( "max_bucket_reso": max_bucket_reso, "max_grad_norm": max_grad_norm, "max_timestep": max_timestep if max_timestep != 0 else None, - "max_token_length": int(max_token_length), + "max_token_length": int(max_token_length) if not flux1_checkbox else None, "max_train_epochs": ( int(max_train_epochs) if int(max_train_epochs) != 0 else None ), @@ -1325,6 +1334,9 @@ def train_model( "discrete_flow_shift": discrete_flow_shift if flux1_checkbox else None, "model_prediction_type": model_prediction_type if flux1_checkbox else None, "timestep_sampling": timestep_sampling if flux1_checkbox else None, + "split_mode": split_mode if flux1_checkbox else None, + "train_blocks": train_blocks if flux1_checkbox else None, + "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2468,6 +2480,9 @@ def update_LoRA_settings( flux1_training.discrete_flow_shift, flux1_training.model_prediction_type, flux1_training.timestep_sampling, + flux1_training.split_mode, + flux1_training.train_blocks, + flux1_training.t5xxl_max_token_length, ] configuration.button_open_config.click( From d3fe25c9e0fc3ca2dfd1d49a84e31203f6bbfe08 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 17 Aug 2024 06:27:45 -0400 Subject: [PATCH 059/199] Update to latest sd-scripts version of flux.1 --- sd-scripts | 2 +- test/img/10_darius kawasaki person/Dariusz_Zawadzki.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.cap | 1 - test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.cap | 1 - 9 files changed, 1 insertion(+), 9 deletions(-) delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.cap delete mode 100644 test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.cap diff --git a/sd-scripts b/sd-scripts index e45d3f863..25f77f6ef 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit e45d3f8634c6dd4e358a8c7972f7c851f18f94d3 +Subproject commit 25f77f6ef04ee760506338e7e7f9835c28657c59 diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki.cap deleted file mode 100644 index 5a5dfda1e..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki.cap +++ /dev/null @@ -1 +0,0 @@ -solo,simple background,teeth,grey background,from side,no humans,mask,1other,science fiction,cable,gas mask,tube,steampunk,machine diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.cap deleted file mode 100644 index 25472ac97..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_2.cap +++ /dev/null @@ -1 +0,0 @@ -no humans,what diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.cap deleted file mode 100644 index 4ff2864c0..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_3.cap +++ /dev/null @@ -1 +0,0 @@ -1girl,solo,nude,colored skin,monster,blue skin diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.cap deleted file mode 100644 index 0dcbb2813..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_4.cap +++ /dev/null @@ -1 +0,0 @@ -solo,upper body,horns,from side,no humans,blood,1other diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.cap deleted file mode 100644 index 21cb7ea5c..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_5.cap +++ /dev/null @@ -1 +0,0 @@ -solo,1boy,male focus,mask,instrument,science fiction,realistic,music,gas mask diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.cap deleted file mode 100644 index caa9c38ab..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_6.cap +++ /dev/null @@ -1 +0,0 @@ -solo,no humans,mask,helmet,robot,mecha,1other,science fiction,damaged,gas mask,steampunk diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.cap deleted file mode 100644 index 6984985fc..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_7.cap +++ /dev/null @@ -1 +0,0 @@ -solo,from side,no humans,mask,moon,helmet,portrait,1other,ambiguous gender,gas mask diff --git a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.cap b/test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.cap deleted file mode 100644 index 515665b66..000000000 --- a/test/img/10_darius kawasaki person/Dariusz_Zawadzki_8.cap +++ /dev/null @@ -1 +0,0 @@ -outdoors,sky,cloud,no humans,monster,realistic,desert From 1e5ce98ee94040dddbe389a5a5f01dd41b4d5352 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 17 Aug 2024 07:05:10 -0400 Subject: [PATCH 060/199] Adding guidance_scale option --- kohya_gui/class_flux1.py | 9 +++++++++ kohya_gui/lora_gui.py | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 940bd6743..47c2d8ce5 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -241,6 +241,15 @@ def noise_offset_type_change( value=self.config.get("flux1.train_blocks", "all"), interactive=True, ) + self.guidance_scale = gr.Number( + label="Guidance Scale", + value=self.config.get("flux1.guidance_scale", 3.5), + info="Guidance scale for Flux1", + minimum=0, + maximum=1024, + step=.1, + interactive=True, + ) self.t5xxl_max_token_length = gr.Number( label="T5-XXL Max Token Length", value=self.config.get("flux1.t5xxl_max_token_length", 512), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 6a46e4c65..983568628 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -251,6 +251,7 @@ def save_configuration( split_mode, train_blocks, t5xxl_max_token_length, + guidance_scale, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -473,6 +474,7 @@ def open_configuration( split_mode, train_blocks, t5xxl_max_token_length, + guidance_scale, training_preset, ): # Get list of function parameters and their values @@ -726,6 +728,7 @@ def train_model( split_mode, train_blocks, t5xxl_max_token_length, + guidance_scale, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1337,6 +1340,7 @@ def train_model( "split_mode": split_mode if flux1_checkbox else None, "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, + "guidance_scale": guidance_scale if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2483,6 +2487,7 @@ def update_LoRA_settings( flux1_training.split_mode, flux1_training.train_blocks, flux1_training.t5xxl_max_token_length, + flux1_training.guidance_scale, ] configuration.button_open_config.click( From 7a3d8fe3a1d5c1c8b7089201a049621da347144b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 09:57:32 -0400 Subject: [PATCH 061/199] Update to latest sd3 flux.1 sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 25f77f6ef..a45048892 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 25f77f6ef04ee760506338e7e7f9835c28657c59 +Subproject commit a45048892802dce43e86a7e377ba84e89b51fdf5 From 647295aa8812c4a5bac412562d263108c13825e8 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 11:29:15 -0400 Subject: [PATCH 062/199] Add dreambooth and finetuning support for flux.1 --- kohya_gui/class_flux1.py | 132 ++++------------- kohya_gui/dreambooth_gui.py | 113 +++++++++++++- kohya_gui/finetune_gui.py | 114 +++++++++++++- kohya_gui/lora_gui.py | 8 +- presets/lora/flux1D - adamw8bit fp8.json | 180 +++++++++++++++++++++++ 5 files changed, 432 insertions(+), 115 deletions(-) create mode 100644 presets/lora/flux1D - adamw8bit fp8.json diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 47c2d8ce5..dce268622 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -1,29 +1,12 @@ import gradio as gr from typing import Tuple from .common_gui import ( - get_folder_path, get_any_file_path, - list_files, - list_dirs, - create_refresh_button, document_symbol, ) class flux1Training: - """ - This class configures and initializes the advanced training settings for a machine learning model, - including options for headless operation, fine-tuning, training type selection, and default directory paths. - - Attributes: - headless (bool): If True, run without the Gradio interface. - finetuning (bool): If True, enables fine-tuning of the model. - training_type (str): Specifies the type of training to perform. - no_token_padding (gr.Checkbox): Checkbox to disable token padding. - gradient_accumulation_steps (gr.Slider): Slider to set the number of gradient accumulation steps. - weighted_captions (gr.Checkbox): Checkbox to enable weighted captions. - """ - def __init__( self, headless: bool = False, @@ -32,15 +15,6 @@ def __init__( config: dict = {}, flux1_checkbox: gr.Checkbox = False, ) -> None: - """ - Initializes the AdvancedTraining class with given settings. - - Parameters: - headless (bool): Run in headless mode without GUI. - finetuning (bool): Enable model fine-tuning. - training_type (str): The type of training to be performed. - config (dict): Configuration options for the training process. - """ self.headless = headless self.finetuning = finetuning self.training_type = training_type @@ -51,15 +25,6 @@ def __init__( def noise_offset_type_change( noise_offset_type: str, ) -> Tuple[gr.Group, gr.Group]: - """ - Returns a tuple of Gradio Groups with visibility set based on the noise offset type. - - Parameters: - noise_offset_type (str): The selected noise offset type. - - Returns: - Tuple[gr.Group, gr.Group]: A tuple containing two Gradio Group elements with their visibility set. - """ if noise_offset_type == "Original": return (gr.Group(visible=True), gr.Group(visible=False)) else: @@ -69,30 +34,6 @@ def noise_offset_type_change( "Flux.1", open=True, elem_id="flux1_tab", visible=False ) as flux1_accordion: with gr.Group(): - # gr.Markdown("### Flux.1 Specific Parameters") - # with gr.Row(): - # self.weighting_scheme = gr.Dropdown( - # label="Weighting Scheme", - # choices=["logit_normal", "sigma_sqrt", "mode", "cosmap"], - # value=self.config.get("flux1.weighting_scheme", "logit_normal"), - # interactive=True, - # ) - # self.logit_mean = gr.Number( - # label="Logit Mean", - # value=self.config.get("flux1.logit_mean", 0.0), - # interactive=True, - # ) - # self.logit_std = gr.Number( - # label="Logit Std", - # value=self.config.get("flux1.logit_std", 1.0), - # interactive=True, - # ) - # self.mode_scale = gr.Number( - # label="Mode Scale", - # value=self.config.get("flux1.mode_scale", 1.29), - # interactive=True, - # ) - with gr.Row(): self.ae = gr.Textbox( label="VAE Path", @@ -130,24 +71,6 @@ def noise_offset_type_change( show_progress=False, ) - # self.clip_g = gr.Textbox( - # label="CLIP-G Path", - # placeholder="Path to CLIP-G model", - # value=self.config.get("flux1.clip_g", ""), - # interactive=True, - # ) - # self.clip_g_button = gr.Button( - # document_symbol, - # elem_id="open_folder_small", - # visible=(not headless), - # interactive=True, - # ) - # self.clip_g_button.click( - # get_any_file_path, - # outputs=self.clip_g, - # show_progress=False, - # ) - self.t5xxl = gr.Textbox( label="T5-XXL Path", placeholder="Path to T5-XXL model", @@ -166,31 +89,7 @@ def noise_offset_type_change( show_progress=False, ) - # with gr.Row(): - # self.save_clip = gr.Checkbox( - # label="Save CLIP models", - # value=self.config.get("flux1.save_clip", False), - # interactive=True, - # ) - # self.save_t5xxl = gr.Checkbox( - # label="Save T5-XXL model", - # value=self.config.get("flux1.save_t5xxl", False), - # interactive=True, - # ) - with gr.Row(): - # self.t5xxl_device = gr.Textbox( - # label="T5-XXL Device", - # placeholder="Device for T5-XXL (e.g., cuda:0)", - # value=self.config.get("flux1.t5xxl_device", ""), - # interactive=True, - # ) - # self.t5xxl_dtype = gr.Dropdown( - # label="T5-XXL Dtype", - # choices=["float32", "fp16", "bf16"], - # value=self.config.get("flux1.t5xxl_dtype", "bf16"), - # interactive=True, - # ) self.discrete_flow_shift = gr.Number( label="Discrete Flow Shift", @@ -259,6 +158,37 @@ def noise_offset_type_change( step=1, interactive=True, ) + with gr.Row(visible=True if finetuning else False): + self.blockwise_fused_optimizer = gr.Checkbox( + label="Blockwise Fused Optimizer", + value=self.config.get("flux1.blockwise_fused_optimizer", False), + info="Enable blockwise optimizers for fused backward pass and optimizer step", + interactive=True, + ) + self.cpu_offload_checkpointing = gr.Checkbox( + label="CPU Offload Checkpointing", + value=self.config.get("flux1.cpu_offload_checkpointing", False), + info="[Experimental] Enable offloading of tensors to CPU during checkpointing", + interactive=True, + ) + self.single_blocks_to_swap = gr.Slider( + label="Single Blocks to swap", + value=self.config.get("flux1.single_blocks_to_swap", 0), + info="[Experimental] Sets the number of 'single_blocks' (~320MB) to swap during the forward and backward passes.", + minimum=0, + maximum=19, + step=1, + interactive=True, + ) + self.double_blocks_to_swap = gr.Slider( + label="Double Blocks to swap", + value=self.config.get("flux1.double_blocks_to_swap", 0), + info="[Experimental] Sets the number of 'double_blocks' (~640MB) to swap during the forward and backward passes.", + minimum=0, + maximum=38, + step=1, + interactive=True, + ) self.flux1_checkbox.change( lambda flux1_checkbox: gr.Accordion(visible=flux1_checkbox), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 7cbd3d82b..6227d10cb 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -33,6 +33,7 @@ from .class_huggingface import HuggingFace from .class_metadata import MetaData from .class_sdxl_parameters import SDXLParameters +from .class_flux1 import flux1Training from .dreambooth_folder_creation_gui import ( gradio_dreambooth_folder_creation_tab, @@ -62,6 +63,7 @@ def save_configuration( v2, v_parameterization, sdxl, + flux1_checkbox, logging_dir, train_data_dir, reg_data_dir, @@ -204,6 +206,24 @@ def save_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, + + # Flux.1 + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + flux1_clip_l, + flux1_t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, + guidance_scale, + blockwise_fused_optimizer, + cpu_offload_checkpointing, + single_blocks_to_swap, + double_blocks_to_swap, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -244,6 +264,7 @@ def open_configuration( v2, v_parameterization, sdxl, + flux1_checkbox, logging_dir, train_data_dir, reg_data_dir, @@ -386,6 +407,24 @@ def open_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, + + # Flux.1 + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + flux1_clip_l, + flux1_t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, + guidance_scale, + blockwise_fused_optimizer, + cpu_offload_checkpointing, + single_blocks_to_swap, + double_blocks_to_swap, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -421,6 +460,7 @@ def train_model( v2, v_parameterization, sdxl, + flux1_checkbox, logging_dir, train_data_dir, reg_data_dir, @@ -563,6 +603,24 @@ def train_model( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, + + # Flux.1 + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + flux1_clip_l, + flux1_t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, + guidance_scale, + blockwise_fused_optimizer, + cpu_offload_checkpointing, + single_blocks_to_swap, + double_blocks_to_swap, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -779,12 +837,14 @@ def train_model( run_cmd.append(rf'{scriptdir}/sd-scripts/sdxl_train.py') elif sd3_checkbox: run_cmd.append(rf"{scriptdir}/sd-scripts/sd3_train.py") + elif flux1_checkbox: + run_cmd.append(rf"{scriptdir}/sd-scripts/flux_train.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_db.py") - cache_text_encoder_outputs = (sdxl and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) + cache_text_encoder_outputs = (sdxl and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) + cache_text_encoder_outputs_to_disk = (sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk) or (flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk) no_half_vae = sdxl and sdxl_no_half_vae - if max_data_loader_n_workers == "" or None: max_data_loader_n_workers = 0 else: @@ -811,9 +871,11 @@ def train_model( "cache_latents": cache_latents, "cache_latents_to_disk": cache_latents_to_disk, "cache_text_encoder_outputs": cache_text_encoder_outputs, + "cache_text_encoder_outputs_to_disk": cache_text_encoder_outputs_to_disk, "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), "caption_dropout_rate": caption_dropout_rate, "caption_extension": caption_extension, + "clip_l": flux1_clip_l if flux1_checkbox else clip_l if sd3_checkbox else None, "clip_skip": clip_skip if clip_skip != 0 else None, "color_aug": color_aug, "dataset_config": dataset_config, @@ -924,6 +986,7 @@ def train_model( "stop_text_encoder_training": ( stop_text_encoder_training if stop_text_encoder_training != 0 else None ), + "t5xxl": t5xxl if sd3_checkbox else flux1_t5xxl if flux1_checkbox else None, "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, "train_text_encoder": train_text_encoder if sdxl else None, @@ -938,10 +1001,10 @@ def train_model( "xformers": True if xformers == "xformers" else None, # SD3 only Parameters - # "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, - "cache_text_encoder_outputs_to_disk": sd3_cache_text_encoder_outputs_to_disk if sd3_checkbox else None, + # "cache_text_encoder_outputs": see previous assignment above for code + # "cache_text_encoder_outputs_to_disk": see previous assignment above for code "clip_g": clip_g if sd3_checkbox else None, - "clip_l": clip_l if sd3_checkbox else None, + # "clip_l": see previous assignment above for code "logit_mean": logit_mean if sd3_checkbox else None, "logit_std": logit_std if sd3_checkbox else None, "mode_scale": mode_scale if sd3_checkbox else None, @@ -952,6 +1015,24 @@ def train_model( "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, "weighting_scheme": weighting_scheme if sd3_checkbox else None, + + # Flux.1 specific parameters + # "cache_text_encoder_outputs": see previous assignment above for code + # "cache_text_encoder_outputs_to_disk": see previous assignment above for code + "ae": ae if flux1_checkbox else None, + # "clip_l": see previous assignment above for code + # "t5xxl": see previous assignment above for code + "discrete_flow_shift": discrete_flow_shift if flux1_checkbox else None, + "model_prediction_type": model_prediction_type if flux1_checkbox else None, + "timestep_sampling": timestep_sampling if flux1_checkbox else None, + "split_mode": split_mode if flux1_checkbox else None, + "train_blocks": train_blocks if flux1_checkbox else None, + "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, + "guidance_scale": guidance_scale if flux1_checkbox else None, + "blockwise_fused_optimizer": blockwise_fused_optimizer if flux1_checkbox else None, + "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, + "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, + "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1089,6 +1170,9 @@ def dreambooth_tab( sdxl_params = SDXLParameters( source_model.sdxl_checkbox, config=config, trainer="finetune", ) + + # Add FLUX1 Parameters + flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox, finetuning=True) # Add SD3 Parameters sd3_training = sd3Training(headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox) @@ -1123,6 +1207,7 @@ def dreambooth_tab( source_model.v2, source_model.v_parameterization, source_model.sdxl_checkbox, + source_model.flux1_checkbox, folders.logging_dir, source_model.train_data_dir, folders.reg_data_dir, @@ -1264,6 +1349,24 @@ def dreambooth_tab( sd3_training.sd3_text_encoder_batch_size, sd3_training.weighting_scheme, source_model.sd3_checkbox, + + # Flux1 parameters + flux1_training.flux1_cache_text_encoder_outputs, + flux1_training.flux1_cache_text_encoder_outputs_to_disk, + flux1_training.ae, + flux1_training.clip_l, + flux1_training.t5xxl, + flux1_training.discrete_flow_shift, + flux1_training.model_prediction_type, + flux1_training.timestep_sampling, + flux1_training.split_mode, + flux1_training.train_blocks, + flux1_training.t5xxl_max_token_length, + flux1_training.guidance_scale, + flux1_training.blockwise_fused_optimizer, + flux1_training.cpu_offload_checkpointing, + flux1_training.single_blocks_to_swap, + flux1_training.double_blocks_to_swap, ] configuration.button_open_config.click( diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 824c7c6f0..6a142e489 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -35,6 +35,7 @@ from .class_huggingface import HuggingFace from .class_metadata import MetaData from .class_gui_config import KohyaSSGUIConfig +from .class_flux1 import flux1Training from .custom_logging import setup_logging @@ -66,6 +67,7 @@ def save_configuration( v2, v_parameterization, sdxl_checkbox, + flux1_checkbox, train_dir, image_folder, output_dir, @@ -211,6 +213,24 @@ def save_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, + + # Flux.1 + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + flux1_clip_l, + flux1_t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, + guidance_scale, + blockwise_fused_optimizer, + cpu_offload_checkpointing, + single_blocks_to_swap, + double_blocks_to_swap, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -254,6 +274,7 @@ def open_configuration( v2, v_parameterization, sdxl_checkbox, + flux1_checkbox, train_dir, image_folder, output_dir, @@ -400,6 +421,24 @@ def open_configuration( weighting_scheme, sd3_checkbox, + # Flux.1 + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + flux1_clip_l, + flux1_t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, + guidance_scale, + blockwise_fused_optimizer, + cpu_offload_checkpointing, + single_blocks_to_swap, + double_blocks_to_swap, + training_preset, ): # Get list of function parameters and values @@ -449,6 +488,7 @@ def train_model( v2, v_parameterization, sdxl_checkbox, + flux1_checkbox, train_dir, image_folder, output_dir, @@ -594,6 +634,24 @@ def train_model( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, + + # Flux.1 + flux1_cache_text_encoder_outputs, + flux1_cache_text_encoder_outputs_to_disk, + ae, + flux1_clip_l, + flux1_t5xxl, + discrete_flow_shift, + model_prediction_type, + timestep_sampling, + split_mode, + train_blocks, + t5xxl_max_token_length, + guidance_scale, + blockwise_fused_optimizer, + cpu_offload_checkpointing, + single_blocks_to_swap, + double_blocks_to_swap, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -828,6 +886,8 @@ def train_model( run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train.py") elif sd3_checkbox: run_cmd.append(rf"{scriptdir}/sd-scripts/sd3_train.py") + elif flux1_checkbox: + run_cmd.append(rf"{scriptdir}/sd-scripts/flux_train.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/fine_tune.py") @@ -836,7 +896,8 @@ def train_model( if use_latent_files == "Yes" else f"{train_dir}/{caption_metadata_filename}" ) - cache_text_encoder_outputs = (sdxl and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) + cache_text_encoder_outputs = (sdxl_checkbox and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) + cache_text_encoder_outputs_to_disk = (sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk) or (flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk) no_half_vae = sdxl_checkbox and sdxl_no_half_vae if max_data_loader_n_workers == "" or None: @@ -861,9 +922,11 @@ def train_model( "cache_latents": cache_latents, "cache_latents_to_disk": cache_latents_to_disk, "cache_text_encoder_outputs": cache_text_encoder_outputs, + "cache_text_encoder_outputs_to_disk": cache_text_encoder_outputs_to_disk, "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), "caption_dropout_rate": caption_dropout_rate, "caption_extension": caption_extension, + "clip_l": flux1_clip_l if flux1_checkbox else clip_l if sd3_checkbox else None, "clip_skip": clip_skip if clip_skip != 0 else None, "color_aug": color_aug, "dataset_config": dataset_config, @@ -970,6 +1033,7 @@ def train_model( "sdpa": True if xformers == "sdpa" else None, "seed": int(seed) if int(seed) != 0 else None, "shuffle_caption": shuffle_caption, + "t5xxl": t5xxl if sd3_checkbox else flux1_t5xxl if flux1_checkbox else None, "train_batch_size": train_batch_size, "train_data_dir": image_folder, "train_text_encoder": train_text_encoder, @@ -984,20 +1048,38 @@ def train_model( "xformers": True if xformers == "xformers" else None, # SD3 only Parameters - # "cache_text_encoder_outputs": cache_text_encoder_outputs if sd3_checkbox else None, - "cache_text_encoder_outputs_to_disk": sd3_cache_text_encoder_outputs_to_disk if sd3_checkbox else None, + # "cache_text_encoder_outputs": see previous assignment above for code + # "cache_text_encoder_outputs_to_disk": see previous assignment above for code "clip_g": clip_g if sd3_checkbox else None, - "clip_l": clip_l if sd3_checkbox else None, + # "clip_l": see previous assignment above for code "logit_mean": logit_mean if sd3_checkbox else None, "logit_std": logit_std if sd3_checkbox else None, "mode_scale": mode_scale if sd3_checkbox else None, "save_clip": save_clip if sd3_checkbox else None, "save_t5xxl": save_t5xxl if sd3_checkbox else None, - "t5xxl": t5xxl if sd3_checkbox else None, + # "t5xxl": see previous assignment above for code "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, "weighting_scheme": weighting_scheme if sd3_checkbox else None, + + # Flux.1 specific parameters + # "cache_text_encoder_outputs": see previous assignment above for code + # "cache_text_encoder_outputs_to_disk": see previous assignment above for code + "ae": ae if flux1_checkbox else None, + # "clip_l": see previous assignment above for code + # "t5xxl": see previous assignment above for code + "discrete_flow_shift": discrete_flow_shift if flux1_checkbox else None, + "model_prediction_type": model_prediction_type if flux1_checkbox else None, + "timestep_sampling": timestep_sampling if flux1_checkbox else None, + "split_mode": split_mode if flux1_checkbox else None, + "train_blocks": train_blocks if flux1_checkbox else None, + "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, + "guidance_scale": guidance_scale if flux1_checkbox else None, + "blockwise_fused_optimizer": blockwise_fused_optimizer if flux1_checkbox else None, + "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, + "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, + "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1189,6 +1271,9 @@ def list_presets(path): train_text_encoder = gr.Checkbox( label="Train text encoder", value=True ) + + # Add FLUX1 Parameters + flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox, finetuning=True) # Add SD3 Parameters sd3_training = sd3Training(headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox) @@ -1240,6 +1325,7 @@ def list_presets(path): source_model.v2, source_model.v_parameterization, source_model.sdxl_checkbox, + source_model.flux1_checkbox, train_dir, image_folder, output_dir, @@ -1384,6 +1470,24 @@ def list_presets(path): sd3_training.sd3_text_encoder_batch_size, sd3_training.weighting_scheme, source_model.sd3_checkbox, + + # Flux1 parameters + flux1_training.flux1_cache_text_encoder_outputs, + flux1_training.flux1_cache_text_encoder_outputs_to_disk, + flux1_training.ae, + flux1_training.clip_l, + flux1_training.t5xxl, + flux1_training.discrete_flow_shift, + flux1_training.model_prediction_type, + flux1_training.timestep_sampling, + flux1_training.split_mode, + flux1_training.train_blocks, + flux1_training.t5xxl_max_token_length, + flux1_training.guidance_scale, + flux1_training.blockwise_fused_optimizer, + flux1_training.cpu_offload_checkpointing, + flux1_training.single_blocks_to_swap, + flux1_training.double_blocks_to_swap, ] configuration.button_open_config.click( diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 983568628..a3a0aee3c 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1603,14 +1603,10 @@ def list_presets(path): minimum=0, maximum=128, ) - # Add SDXL Parameters sdxl_params = SDXLParameters( source_model.sdxl_checkbox, config=config ) - - # Add FLUX1 Parameters - flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox) # LyCORIS Specific parameters with gr.Accordion("LyCORIS", visible=False) as lycoris_accordion: @@ -2194,6 +2190,10 @@ def update_LoRA_settings( results.append(settings["gr_type"](**update_params)) return tuple(results) + + with gr.Group(): + # Add FLUX1 Parameters + flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox) with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): # with gr.Accordion('Advanced Configuration', open=False): diff --git a/presets/lora/flux1D - adamw8bit fp8.json b/presets/lora/flux1D - adamw8bit fp8.json new file mode 100644 index 000000000..4928137aa --- /dev/null +++ b/presets/lora/flux1D - adamw8bit fp8.json @@ -0,0 +1,180 @@ +{ + "LoRA_type": "Flux1", + "LyCORIS_preset": "full", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "ae": "H:/ComfyUI2/models/vae/ae.sft", + "async_upload": false, + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": false, + "bucket_reso_steps": 32, + "bypass_mode": false, + "cache_latents": true, + "cache_latents_to_disk": true, + "caption_dropout_every_n_epochs": 0, + "caption_dropout_rate": 0, + "caption_extension": ".txt", + "clip_l": "H:/ComfyUI2/models/clip/clip_l.safetensors", + "clip_skip": 1, + "color_aug": false, + "constrain": 0, + "conv_alpha": 64, + "conv_block_alphas": "", + "conv_block_dims": "", + "conv_dim": 64, + "dataset_config": "", + "debiased_estimation_loss": false, + "decompose_both": false, + "dim_from_weights": false, + "discrete_flow_shift": 1, + "dora_wd": false, + "down_lr_weight": "", + "dynamo_backend": "no", + "dynamo_mode": "default", + "dynamo_use_dynamic": false, + "dynamo_use_fullgraph": false, + "enable_bucket": true, + "epoch": 25, + "extra_accelerate_launch_args": "", + "factor": -1, + "flip_aug": false, + "flux1_cache_text_encoder_outputs": true, + "flux1_cache_text_encoder_outputs_to_disk": true, + "flux1_checkbox": true, + "fp8_base": true, + "full_bf16": false, + "full_fp16": false, + "gpu_ids": "", + "gradient_accumulation_steps": 1, + "gradient_checkpointing": true, + "guidance_scale": 3.5, + "highvram": true, + "huber_c": 0.1, + "huber_schedule": "snr", + "huggingface_path_in_repo": "", + "huggingface_repo_id": "", + "huggingface_repo_type": "", + "huggingface_repo_visibility": "", + "huggingface_token": "", + "ip_noise_gamma": 0, + "ip_noise_gamma_random_strength": false, + "keep_tokens": 0, + "learning_rate": 0.0001, + "log_config": false, + "log_tracker_config": "", + "log_tracker_name": "", + "log_with": "", + "logging_dir": "./test/logs", + "loraplus_lr_ratio": 0, + "loraplus_text_encoder_lr_ratio": 0, + "loraplus_unet_lr_ratio": 0, + "loss_type": "l2", + "lowvram": false, + "lr_scheduler": "cosine_with_restarts", + "lr_scheduler_args": "", + "lr_scheduler_num_cycles": 5, + "lr_scheduler_power": 1, + "lr_scheduler_type": "", + "lr_warmup": 0, + "main_process_port": 0, + "masked_loss": false, + "max_bucket_reso": 2048, + "max_data_loader_n_workers": 0, + "max_grad_norm": 1, + "max_resolution": "512,512", + "max_timestep": 1000, + "max_token_length": 75, + "max_train_epochs": 25, + "max_train_steps": 4000, + "mem_eff_attn": false, + "metadata_author": "", + "metadata_description": "", + "metadata_license": "", + "metadata_tags": "", + "metadata_title": "", + "mid_lr_weight": "", + "min_bucket_reso": 256, + "min_snr_gamma": 10, + "min_timestep": 0, + "mixed_precision": "bf16", + "model_list": "custom", + "model_prediction_type": "raw", + "module_dropout": 0, + "multi_gpu": false, + "multires_noise_discount": 0.2, + "multires_noise_iterations": 8, + "network_alpha": 8, + "network_dim": 8, + "network_dropout": 0, + "network_weights": "", + "noise_offset": 0, + "noise_offset_random_strength": false, + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "num_machines": 1, + "num_processes": 1, + "optimizer": "AdamW8bit", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "Flux.1-dev-test-v3.1", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "E:\\models\\flux1\\flux1-dev.safetensors", + "prior_loss_weight": 1, + "random_crop": false, + "rank_dropout": 0, + "rank_dropout_scale": false, + "reg_data_dir": "", + "rescaled": false, + "resume": "", + "resume_from_huggingface": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 200, + "sample_prompts": "a painting of a man wearing a funny hat, by darius kawasaki --w 768 --h 768 --s 20 --l 3", + "sample_sampler": "euler", + "save_as_bool": false, + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "bf16", + "save_state": false, + "save_state_on_train_end": false, + "save_state_to_huggingface": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 0, + "sdxl": false, + "sdxl_cache_text_encoder_outputs": true, + "sdxl_no_half_vae": true, + "seed": 42, + "shuffle_caption": false, + "split_mode": false, + "stop_text_encoder_training": 0, + "t5xxl": "H:/ComfyUI2/models/clip/t5xxl_fp16.safetensors", + "t5xxl_max_token_length": 512, + "text_encoder_lr": 0, + "timestep_sampling": "sigma", + "train_batch_size": 1, + "train_blocks": "all", + "train_data_dir": "./test/img", + "train_norm": false, + "train_on_input": true, + "training_comment": "", + "unet_lr": 0.0001, + "unit": 1, + "up_lr_weight": "", + "use_cp": false, + "use_scalar": false, + "use_tucker": false, + "v2": false, + "v_parameterization": false, + "v_pred_like_loss": 0, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "wandb_run_name": "", + "weighted_captions": false, + "xformers": "sdpa" +} \ No newline at end of file From 6d153624d5eb7edce99447ecf7664f6d99906bf7 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 11:32:33 -0400 Subject: [PATCH 063/199] Update README --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7320e8d0c..7a1d20de8 100644 --- a/README.md +++ b/README.md @@ -475,4 +475,6 @@ To finetune HunyuanDiT models or create LoRAs, visit this [fork](https://github. ## Change History -See release information. +Added support for SD3 (Dreambooth and Finetuning) and Flux.1 (Dreambooth, LoRA and Finetuning). + +See for more details. From 8df754f7616d44a24f7672184466c064118c8152 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 11:47:25 -0400 Subject: [PATCH 064/199] Fix t5xxl path issue in DB --- kohya_gui/dreambooth_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 6227d10cb..e56f15780 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -1010,7 +1010,7 @@ def train_model( "mode_scale": mode_scale if sd3_checkbox else None, "save_clip": save_clip if sd3_checkbox else None, "save_t5xxl": save_t5xxl if sd3_checkbox else None, - "t5xxl": t5xxl if sd3_checkbox else None, + # "t5xxl": see previous assignment above for code "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, From 2611de717b689ffa14f0ef65991305cb3b7b8012 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 11:58:08 -0400 Subject: [PATCH 065/199] add missing fp8_base parameter --- kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index e56f15780..d1bab8a45 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -88,6 +88,7 @@ def save_configuration( caption_extension, enable_bucket, gradient_checkpointing, + fp8_base, full_fp16, full_bf16, no_token_padding, @@ -289,6 +290,7 @@ def open_configuration( caption_extension, enable_bucket, gradient_checkpointing, + fp8_base, full_fp16, full_bf16, no_token_padding, @@ -485,6 +487,7 @@ def train_model( caption_extension, enable_bucket, gradient_checkpointing, + fp8_base, full_fp16, full_bf16, no_token_padding, @@ -885,6 +888,7 @@ def train_model( "enable_bucket": enable_bucket, "epoch": int(epoch), "flip_aug": flip_aug, + "fp8_base": fp8_base, "full_bf16": full_bf16, "full_fp16": full_fp16, "fused_backward_pass": fused_backward_pass, @@ -1232,6 +1236,7 @@ def dreambooth_tab( basic_training.caption_extension, basic_training.enable_bucket, advanced_training.gradient_checkpointing, + advanced_training.fp8_base, advanced_training.full_fp16, advanced_training.full_bf16, advanced_training.no_token_padding, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 6a142e489..7aa0a09af 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -119,6 +119,7 @@ def save_configuration( save_state_on_train_end, resume, gradient_checkpointing, + fp8_base, gradient_accumulation_steps, block_lr, mem_eff_attn, @@ -326,6 +327,7 @@ def open_configuration( save_state_on_train_end, resume, gradient_checkpointing, + fp8_base, gradient_accumulation_steps, block_lr, mem_eff_attn, @@ -540,6 +542,7 @@ def train_model( save_state_on_train_end, resume, gradient_checkpointing, + fp8_base, gradient_accumulation_steps, block_lr, mem_eff_attn, @@ -936,6 +939,7 @@ def train_model( "dynamo_backend": dynamo_backend, "enable_bucket": True, "flip_aug": flip_aug, + "fp8_base": fp8_base, "full_bf16": full_bf16, "full_fp16": full_fp16, "fused_backward_pass": fused_backward_pass, @@ -1376,6 +1380,7 @@ def list_presets(path): advanced_training.save_state_on_train_end, advanced_training.resume, advanced_training.gradient_checkpointing, + advanced_training.fp8_base, gradient_accumulation_steps, block_lr, advanced_training.mem_eff_attn, From 3b2c62291290496f7b471b8d2dbd41245dcdd97a Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 13:47:17 -0400 Subject: [PATCH 066/199] Fix issue with guidance scale not being passed as float for values like 1 --- kohya_gui/lora_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index a3a0aee3c..41a1bbd9b 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1340,7 +1340,7 @@ def train_model( "split_mode": split_mode if flux1_checkbox else None, "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, - "guidance_scale": guidance_scale if flux1_checkbox else None, + "guidance_scale": float(guidance_scale) if flux1_checkbox else None, } # Given dictionary `config_toml_data` From 8aa658f7f08e2207a4060076096c7a52c0769b6f Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 18 Aug 2024 17:45:10 -0400 Subject: [PATCH 067/199] Temporary fir for blockwise_fused_optimizers --- kohya_gui/dreambooth_gui.py | 1 + kohya_gui/finetune_gui.py | 1 + kohya_gui/lora_gui.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index d1bab8a45..e50fe1450 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -1034,6 +1034,7 @@ def train_model( "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": guidance_scale if flux1_checkbox else None, "blockwise_fused_optimizer": blockwise_fused_optimizer if flux1_checkbox else None, + "blockwise_fused_optimizers": blockwise_fused_optimizer if flux1_checkbox else None, # temporary fix for flux1 sd-scripts bug "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 7aa0a09af..626987e4a 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -1081,6 +1081,7 @@ def train_model( "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": guidance_scale if flux1_checkbox else None, "blockwise_fused_optimizer": blockwise_fused_optimizer if flux1_checkbox else None, + "blockwise_fused_optimizers": blockwise_fused_optimizer if flux1_checkbox else None, # temporary fix for flux1 sd-scripts bug "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 41a1bbd9b..2dc7892a8 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1334,7 +1334,7 @@ def train_model( "ae": ae if flux1_checkbox else None, "clip_l": clip_l if flux1_checkbox else None, "t5xxl": t5xxl if flux1_checkbox else None, - "discrete_flow_shift": discrete_flow_shift if flux1_checkbox else None, + "discrete_flow_shift": float(discrete_flow_shift) if flux1_checkbox else None, "model_prediction_type": model_prediction_type if flux1_checkbox else None, "timestep_sampling": timestep_sampling if flux1_checkbox else None, "split_mode": split_mode if flux1_checkbox else None, From 67ab8f4d159692e30b4344c2635650861f66c937 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 19 Aug 2024 10:12:16 -0400 Subject: [PATCH 068/199] Update to latest sd-scripts Flux.1 code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index a45048892..486fe8f70 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit a45048892802dce43e86a7e377ba84e89b51fdf5 +Subproject commit 486fe8f70a53166f21f08b1c896bd9ba1e31d7e7 From 215a05388b992e4b0c9ba5ac6d7b18e3dcec400d Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 19 Aug 2024 10:16:22 -0400 Subject: [PATCH 069/199] Fix blockwise_fused_optimizers typo --- kohya_gui/class_flux1.py | 4 ++-- kohya_gui/dreambooth_gui.py | 11 +++++------ kohya_gui/finetune_gui.py | 11 +++++------ 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index dce268622..4abeed8e8 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -159,9 +159,9 @@ def noise_offset_type_change( interactive=True, ) with gr.Row(visible=True if finetuning else False): - self.blockwise_fused_optimizer = gr.Checkbox( + self.blockwise_fused_optimizers = gr.Checkbox( label="Blockwise Fused Optimizer", - value=self.config.get("flux1.blockwise_fused_optimizer", False), + value=self.config.get("flux1.blockwise_fused_optimizers", False), info="Enable blockwise optimizers for fused backward pass and optimizer step", interactive=True, ) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index e50fe1450..7f9fce55d 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -221,7 +221,7 @@ def save_configuration( train_blocks, t5xxl_max_token_length, guidance_scale, - blockwise_fused_optimizer, + blockwise_fused_optimizers, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -423,7 +423,7 @@ def open_configuration( train_blocks, t5xxl_max_token_length, guidance_scale, - blockwise_fused_optimizer, + blockwise_fused_optimizers, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -620,7 +620,7 @@ def train_model( train_blocks, t5xxl_max_token_length, guidance_scale, - blockwise_fused_optimizer, + blockwise_fused_optimizers, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -1033,8 +1033,7 @@ def train_model( "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": guidance_scale if flux1_checkbox else None, - "blockwise_fused_optimizer": blockwise_fused_optimizer if flux1_checkbox else None, - "blockwise_fused_optimizers": blockwise_fused_optimizer if flux1_checkbox else None, # temporary fix for flux1 sd-scripts bug + "blockwise_fused_optimizers": blockwise_fused_optimizers if flux1_checkbox else None, "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, @@ -1369,7 +1368,7 @@ def dreambooth_tab( flux1_training.train_blocks, flux1_training.t5xxl_max_token_length, flux1_training.guidance_scale, - flux1_training.blockwise_fused_optimizer, + flux1_training.blockwise_fused_optimizers, flux1_training.cpu_offload_checkpointing, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 626987e4a..7da2a076f 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -228,7 +228,7 @@ def save_configuration( train_blocks, t5xxl_max_token_length, guidance_scale, - blockwise_fused_optimizer, + blockwise_fused_optimizers, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -436,7 +436,7 @@ def open_configuration( train_blocks, t5xxl_max_token_length, guidance_scale, - blockwise_fused_optimizer, + blockwise_fused_optimizers, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -651,7 +651,7 @@ def train_model( train_blocks, t5xxl_max_token_length, guidance_scale, - blockwise_fused_optimizer, + blockwise_fused_optimizers, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -1080,8 +1080,7 @@ def train_model( "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": guidance_scale if flux1_checkbox else None, - "blockwise_fused_optimizer": blockwise_fused_optimizer if flux1_checkbox else None, - "blockwise_fused_optimizers": blockwise_fused_optimizer if flux1_checkbox else None, # temporary fix for flux1 sd-scripts bug + "blockwise_fused_optimizers": blockwise_fused_optimizers if flux1_checkbox else None, "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, @@ -1490,7 +1489,7 @@ def list_presets(path): flux1_training.train_blocks, flux1_training.t5xxl_max_token_length, flux1_training.guidance_scale, - flux1_training.blockwise_fused_optimizer, + flux1_training.blockwise_fused_optimizers, flux1_training.cpu_offload_checkpointing, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, From f57973fbc560606717f36632104d2b2c8573a922 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 19 Aug 2024 10:35:25 -0400 Subject: [PATCH 070/199] Add mem_eff_save option to GUI for Flux.1 --- kohya_gui/class_flux1.py | 6 ++++++ kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 6 +++++- kohya_gui/lora_gui.py | 5 +++++ 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 4abeed8e8..db08fe808 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -158,6 +158,12 @@ def noise_offset_type_change( step=1, interactive=True, ) + self.mem_eff_save = gr.Checkbox( + label="Memory Efficient Save", + value=self.config.get("flux1.mem_eff_save", False), + info="[Experimentsl] Enable memory efficient save. We do not recommend using it unless you are familiar with the code.", + interactive=True, + ) with gr.Row(visible=True if finetuning else False): self.blockwise_fused_optimizers = gr.Checkbox( label="Blockwise Fused Optimizer", diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 7f9fce55d..0e8ca86e1 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -225,6 +225,7 @@ def save_configuration( cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -427,6 +428,7 @@ def open_configuration( cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -624,6 +626,7 @@ def train_model( cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1037,6 +1040,7 @@ def train_model( "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, + "mem_eff_save": mem_eff_save if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1372,6 +1376,7 @@ def dreambooth_tab( flux1_training.cpu_offload_checkpointing, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, + flux1_training.mem_eff_save, ] configuration.button_open_config.click( diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 7da2a076f..182196274 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -232,6 +232,7 @@ def save_configuration( cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -440,7 +441,7 @@ def open_configuration( cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, - + mem_eff_save, training_preset, ): # Get list of function parameters and values @@ -655,6 +656,7 @@ def train_model( cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1084,6 +1086,7 @@ def train_model( "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, + "mem_eff_save": mem_eff_save if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1493,6 +1496,7 @@ def list_presets(path): flux1_training.cpu_offload_checkpointing, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, + flux1_training.mem_eff_save, ] configuration.button_open_config.click( diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 2dc7892a8..01db63da2 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -252,6 +252,7 @@ def save_configuration( train_blocks, t5xxl_max_token_length, guidance_scale, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -475,6 +476,7 @@ def open_configuration( train_blocks, t5xxl_max_token_length, guidance_scale, + mem_eff_save, training_preset, ): # Get list of function parameters and their values @@ -729,6 +731,7 @@ def train_model( train_blocks, t5xxl_max_token_length, guidance_scale, + mem_eff_save, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1341,6 +1344,7 @@ def train_model( "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": float(guidance_scale) if flux1_checkbox else None, + "mem_eff_save": mem_eff_save if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2488,6 +2492,7 @@ def update_LoRA_settings( flux1_training.train_blocks, flux1_training.t5xxl_max_token_length, flux1_training.guidance_scale, + flux1_training.mem_eff_save, ] configuration.button_open_config.click( From 6e03055787ce120cace7bb63ccc06b2f7116a48f Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 19 Aug 2024 11:25:20 -0400 Subject: [PATCH 071/199] Added support for Flux.1 LoRA Merge --- kohya_gui/class_lora_tab.py | 2 + kohya_gui/flux_merge_lora_gui.py | 465 +++++++++++++++++++++++++++++++ 2 files changed, 467 insertions(+) create mode 100644 kohya_gui/flux_merge_lora_gui.py diff --git a/kohya_gui/class_lora_tab.py b/kohya_gui/class_lora_tab.py index 487ff5cbc..ec1e53693 100644 --- a/kohya_gui/class_lora_tab.py +++ b/kohya_gui/class_lora_tab.py @@ -8,6 +8,7 @@ from .extract_lycoris_locon_gui import gradio_extract_lycoris_locon_tab from .extract_lora_from_dylora_gui import gradio_extract_dylora_tab from .merge_lycoris_gui import gradio_merge_lycoris_tab +from .flux_merge_lora_gui import GradioFluxMergeLoRaTab class LoRATools: @@ -25,3 +26,4 @@ def __init__( gradio_svd_merge_lora_tab(headless=headless) gradio_resize_lora_tab(headless=headless) gradio_verify_lora_tab(headless=headless) + GradioFluxMergeLoRaTab(headless=headless) diff --git a/kohya_gui/flux_merge_lora_gui.py b/kohya_gui/flux_merge_lora_gui.py new file mode 100644 index 000000000..59b4c8053 --- /dev/null +++ b/kohya_gui/flux_merge_lora_gui.py @@ -0,0 +1,465 @@ +# Standard library imports +import os +import subprocess +import sys +import json + +# Third-party imports +import gradio as gr + +# Local module imports +from .common_gui import ( + get_saveasfilename_path, + get_file_path, + scriptdir, + list_files, + create_refresh_button, + setup_environment, +) +from .custom_logging import setup_logging + +# Set up logging +log = setup_logging() + +folder_symbol = "\U0001f4c2" # 📂 +refresh_symbol = "\U0001f504" # 🔄 +save_style_symbol = "\U0001f4be" # 💾 +document_symbol = "\U0001F4C4" # 📄 + +PYTHON = sys.executable + + +def check_model(model): + if not model: + return True + if not os.path.isfile(model): + log.info(f"The provided {model} is not a file") + return False + return True + + +def verify_conditions(flux_model, lora_models): + lora_models_count = sum(1 for model in lora_models if model) + if flux_model and lora_models_count >= 1: + return True + elif not flux_model and lora_models_count >= 2: + return True + return False + + +class GradioFluxMergeLoRaTab: + def __init__(self, headless=False): + self.headless = headless + self.build_tab() + + def save_inputs_to_json(self, file_path, inputs): + with open(file_path, "w", encoding="utf-8") as file: + json.dump(inputs, file) + log.info(f"Saved inputs to {file_path}") + + def load_inputs_from_json(self, file_path): + with open(file_path, "r", encoding="utf-8") as file: + inputs = json.load(file) + log.info(f"Loaded inputs from {file_path}") + return inputs + + def build_tab(self): + current_flux_model_dir = os.path.join(scriptdir, "outputs") + current_save_dir = os.path.join(scriptdir, "outputs") + current_lora_model_dir = current_flux_model_dir + + def list_flux_models(path): + nonlocal current_flux_model_dir + current_flux_model_dir = path + return list(list_files(path, exts=[".safetensors"], all=True)) + + def list_lora_models(path): + nonlocal current_lora_model_dir + current_lora_model_dir = path + return list(list_files(path, exts=[".safetensors"], all=True)) + + def list_save_to(path): + nonlocal current_save_dir + current_save_dir = path + return list(list_files(path, exts=[".safetensors"], all=True)) + + with gr.Tab("Merge FLUX LoRA"): + gr.Markdown( + "This utility can merge up to 4 LoRA into a FLUX model or alternatively merge up to 4 LoRA together." + ) + + lora_ext = gr.Textbox(value="*.safetensors", visible=False) + lora_ext_name = gr.Textbox(value="LoRA model types", visible=False) + flux_ext = gr.Textbox(value="*.safetensors", visible=False) + flux_ext_name = gr.Textbox(value="FLUX model types", visible=False) + + with gr.Group(), gr.Row(): + flux_model = gr.Dropdown( + label="FLUX Model (Optional. FLUX model path, if you want to merge it with LoRA files via the 'concat' method)", + interactive=True, + choices=[""] + list_flux_models(current_flux_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + flux_model, + lambda: None, + lambda: {"choices": list_flux_models(current_flux_model_dir)}, + "open_folder_small", + ) + flux_model_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not self.headless), + ) + flux_model_file.click( + get_file_path, + inputs=[flux_model, flux_ext, flux_ext_name], + outputs=flux_model, + show_progress=False, + ) + + flux_model.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_flux_models(path)), + inputs=flux_model, + outputs=flux_model, + show_progress=False, + ) + + with gr.Group(), gr.Row(): + lora_a_model = gr.Dropdown( + label='LoRA model "A" (path to the LoRA A model)', + interactive=True, + choices=[""] + list_lora_models(current_lora_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + lora_a_model, + lambda: None, + lambda: {"choices": list_lora_models(current_lora_model_dir)}, + "open_folder_small", + ) + button_lora_a_model_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not self.headless), + ) + button_lora_a_model_file.click( + get_file_path, + inputs=[lora_a_model, lora_ext, lora_ext_name], + outputs=lora_a_model, + show_progress=False, + ) + + lora_b_model = gr.Dropdown( + label='LoRA model "B" (path to the LoRA B model)', + interactive=True, + choices=[""] + list_lora_models(current_lora_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + lora_b_model, + lambda: None, + lambda: {"choices": list_lora_models(current_lora_model_dir)}, + "open_folder_small", + ) + button_lora_b_model_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not self.headless), + ) + button_lora_b_model_file.click( + get_file_path, + inputs=[lora_b_model, lora_ext, lora_ext_name], + outputs=lora_b_model, + show_progress=False, + ) + + lora_a_model.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_lora_models(path)), + inputs=lora_a_model, + outputs=lora_a_model, + show_progress=False, + ) + lora_b_model.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_lora_models(path)), + inputs=lora_b_model, + outputs=lora_b_model, + show_progress=False, + ) + + with gr.Row(): + ratio_a = gr.Slider( + label="Model A merge ratio (eg: 0.5 mean 50%)", + minimum=0, + maximum=1, + step=0.01, + value=0.0, + interactive=True, + ) + + ratio_b = gr.Slider( + label="Model B merge ratio (eg: 0.5 mean 50%)", + minimum=0, + maximum=1, + step=0.01, + value=0.0, + interactive=True, + ) + + with gr.Group(), gr.Row(): + lora_c_model = gr.Dropdown( + label='LoRA model "C" (path to the LoRA C model)', + interactive=True, + choices=[""] + list_lora_models(current_lora_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + lora_c_model, + lambda: None, + lambda: {"choices": list_lora_models(current_lora_model_dir)}, + "open_folder_small", + ) + button_lora_c_model_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not self.headless), + ) + button_lora_c_model_file.click( + get_file_path, + inputs=[lora_c_model, lora_ext, lora_ext_name], + outputs=lora_c_model, + show_progress=False, + ) + + lora_d_model = gr.Dropdown( + label='LoRA model "D" (path to the LoRA D model)', + interactive=True, + choices=[""] + list_lora_models(current_lora_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + lora_d_model, + lambda: None, + lambda: {"choices": list_lora_models(current_lora_model_dir)}, + "open_folder_small", + ) + button_lora_d_model_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not self.headless), + ) + button_lora_d_model_file.click( + get_file_path, + inputs=[lora_d_model, lora_ext, lora_ext_name], + outputs=lora_d_model, + show_progress=False, + ) + lora_c_model.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_lora_models(path)), + inputs=lora_c_model, + outputs=lora_c_model, + show_progress=False, + ) + lora_d_model.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_lora_models(path)), + inputs=lora_d_model, + outputs=lora_d_model, + show_progress=False, + ) + + with gr.Row(): + ratio_c = gr.Slider( + label="Model C merge ratio (eg: 0.5 mean 50%)", + minimum=0, + maximum=1, + step=0.01, + value=0.0, + interactive=True, + ) + + ratio_d = gr.Slider( + label="Model D merge ratio (eg: 0.5 mean 50%)", + minimum=0, + maximum=1, + step=0.01, + value=0.0, + interactive=True, + ) + + with gr.Group(), gr.Row(): + save_to = gr.Dropdown( + label="Save to (path for the file to save...)", + interactive=True, + choices=[""] + list_save_to(current_save_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + save_to, + lambda: None, + lambda: {"choices": list_save_to(current_save_dir)}, + "open_folder_small", + ) + button_save_to = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not self.headless), + ) + button_save_to.click( + get_saveasfilename_path, + inputs=[save_to, lora_ext, lora_ext_name], + outputs=save_to, + show_progress=False, + ) + precision = gr.Radio( + label="Merge precision", + choices=["float", "fp16", "bf16"], + value="float", + interactive=True, + ) + save_precision = gr.Radio( + label="Save precision", + choices=["float", "fp16", "bf16"], + value="fp16", + interactive=True, + ) + + save_to.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_save_to(path)), + inputs=save_to, + outputs=save_to, + show_progress=False, + ) + + with gr.Row(): + loading_device = gr.Dropdown( + label="Loading device", + choices=["cpu", "cuda"], + value="cpu", + interactive=True, + ) + working_device = gr.Dropdown( + label="Working device", + choices=["cpu", "cuda"], + value="cpu", + interactive=True, + ) + + with gr.Row(): + concat = gr.Checkbox(label="Concat LoRA", value=False) + shuffle = gr.Checkbox(label="Shuffle LoRA weights", value=False) + no_metadata = gr.Checkbox(label="Don't save metadata", value=False) + + merge_button = gr.Button("Merge model") + + merge_button.click( + self.merge_flux_lora, + inputs=[ + flux_model, + lora_a_model, + lora_b_model, + lora_c_model, + lora_d_model, + ratio_a, + ratio_b, + ratio_c, + ratio_d, + save_to, + precision, + save_precision, + loading_device, + working_device, + concat, + shuffle, + no_metadata, + ], + show_progress=False, + ) + + def merge_flux_lora( + self, + flux_model, + lora_a_model, + lora_b_model, + lora_c_model, + lora_d_model, + ratio_a, + ratio_b, + ratio_c, + ratio_d, + save_to, + precision, + save_precision, + loading_device, + working_device, + concat, + shuffle, + no_metadata, + ): + log.info("Merge FLUX LoRA...") + models = [ + lora_a_model, + lora_b_model, + lora_c_model, + lora_d_model, + ] + lora_models = [model for model in models if model] + ratios = [ratio for model, ratio in zip(models, [ratio_a, ratio_b, ratio_c, ratio_d]) if model] + + if not verify_conditions(flux_model, lora_models): + log.info( + "Warning: Either provide at least one LoRA model along with the FLUX model or at least two LoRA models if no FLUX model is provided." + ) + return + + for model in [flux_model] + lora_models: + if not check_model(model): + return + + run_cmd = [rf"{PYTHON}", rf"{scriptdir}/sd-scripts/networks/flux_merge_lora.py"] + + if flux_model: + run_cmd.extend(["--flux_model", rf"{flux_model}"]) + + run_cmd.extend([ + "--save_precision", save_precision, + "--precision", precision, + "--save_to", rf"{save_to}", + "--loading_device", loading_device, + "--working_device", working_device, + ]) + + if lora_models: + run_cmd.append("--models") + run_cmd.extend(lora_models) + run_cmd.append("--ratios") + run_cmd.extend(map(str, ratios)) + + if concat: + run_cmd.append("--concat") + if shuffle: + run_cmd.append("--shuffle") + if no_metadata: + run_cmd.append("--no_metadata") + + env = setup_environment() + + # Reconstruct the safe command string for display + command_to_run = " ".join(run_cmd) + log.info(f"Executing command: {command_to_run}") + + # Run the command in the sd-scripts folder context + subprocess.run(run_cmd, env=env) + + log.info("Done merging...") \ No newline at end of file From da2d629c77a66da2535d1b19a4a3bea6bcd5e4a0 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 20 Aug 2024 17:21:40 -0400 Subject: [PATCH 072/199] Update to latest sd-scripts sd3 branch code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 486fe8f70..6ab48b09d 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 486fe8f70a53166f21f08b1c896bd9ba1e31d7e7 +Subproject commit 6ab48b09d8e46973d5e5fa47baeae3a464d06d04 From 0ee4053ec94fd31feaeb2bcc0d4585e2b531afd4 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 20 Aug 2024 17:24:56 -0400 Subject: [PATCH 073/199] Add diffusers option to flux.1 merge LoRA utility --- kohya_gui/flux_merge_lora_gui.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kohya_gui/flux_merge_lora_gui.py b/kohya_gui/flux_merge_lora_gui.py index 59b4c8053..4f97fc869 100644 --- a/kohya_gui/flux_merge_lora_gui.py +++ b/kohya_gui/flux_merge_lora_gui.py @@ -360,6 +360,7 @@ def list_save_to(path): concat = gr.Checkbox(label="Concat LoRA", value=False) shuffle = gr.Checkbox(label="Shuffle LoRA weights", value=False) no_metadata = gr.Checkbox(label="Don't save metadata", value=False) + diffusers = gr.Checkbox(label="Diffusers LoRA", value=False) merge_button = gr.Button("Merge model") @@ -383,6 +384,7 @@ def list_save_to(path): concat, shuffle, no_metadata, + diffusers, ], show_progress=False, ) @@ -406,6 +408,7 @@ def merge_flux_lora( concat, shuffle, no_metadata, + difffusers, ): log.info("Merge FLUX LoRA...") models = [ @@ -452,6 +455,8 @@ def merge_flux_lora( run_cmd.append("--shuffle") if no_metadata: run_cmd.append("--no_metadata") + if difffusers: + run_cmd.append("--diffusers") env = setup_environment() From 33937c22846bc5ae5978b67e7b7da584ac1f3f47 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 21 Aug 2024 07:52:46 -0400 Subject: [PATCH 074/199] Fix issue with split_mode and train_blocks --- kohya_gui/class_flux1.py | 34 +++++--- kohya_gui/dreambooth_gui.py | 127 ++++++++++++++++++------------ kohya_gui/finetune_gui.py | 99 ++++++++++++++--------- kohya_gui/lora_gui.py | 120 +++++++++++++++++++--------- sd-scripts | 2 +- test/config/dataset-multires.toml | 40 ++++++++++ 6 files changed, 283 insertions(+), 139 deletions(-) create mode 100644 test/config/dataset-multires.toml diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index db08fe808..e9ec461df 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -112,19 +112,10 @@ def noise_offset_type_change( value=self.config.get("flux1.timestep_sampling", "sigma"), interactive=True, ) - - self.flux1_cache_text_encoder_outputs = gr.Checkbox( - label="Cache Text Encoder Outputs", - value=self.config.get("flux1.cache_text_encoder_outputs", False), - info="Cache text encoder outputs to speed up inference", - interactive=True, - ) - self.flux1_cache_text_encoder_outputs_to_disk = gr.Checkbox( - label="Cache Text Encoder Outputs to Disk", - value=self.config.get( - "flux1.cache_text_encoder_outputs_to_disk", False - ), - info="Cache text encoder outputs to disk to speed up inference", + self.apply_t5_attn_mask = gr.Checkbox( + label="Apply T5 Attention Mask", + value=self.config.get("flux1.apply_t5_attn_mask", False), + info="Apply attention mask to T5-XXL encode and FLUX double blocks ", interactive=True, ) with gr.Row(): @@ -158,12 +149,29 @@ def noise_offset_type_change( step=1, interactive=True, ) + + with gr.Row(): + self.flux1_cache_text_encoder_outputs = gr.Checkbox( + label="Cache Text Encoder Outputs", + value=self.config.get("flux1.cache_text_encoder_outputs", False), + info="Cache text encoder outputs to speed up inference", + interactive=True, + ) + self.flux1_cache_text_encoder_outputs_to_disk = gr.Checkbox( + label="Cache Text Encoder Outputs to Disk", + value=self.config.get( + "flux1.cache_text_encoder_outputs_to_disk", False + ), + info="Cache text encoder outputs to disk to speed up inference", + interactive=True, + ) self.mem_eff_save = gr.Checkbox( label="Memory Efficient Save", value=self.config.get("flux1.mem_eff_save", False), info="[Experimentsl] Enable memory efficient save. We do not recommend using it unless you are familiar with the code.", interactive=True, ) + with gr.Row(visible=True if finetuning else False): self.blockwise_fused_optimizers = gr.Checkbox( label="Blockwise Fused Optimizer", diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 0e8ca86e1..09681cc60 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -17,7 +17,9 @@ SaveConfigFile, scriptdir, update_my_data, - validate_file_path, validate_folder_path, validate_model_path, + validate_file_path, + validate_folder_path, + validate_model_path, validate_args_setting, setup_environment, ) @@ -190,7 +192,6 @@ def save_configuration( metadata_license, metadata_tags, metadata_title, - # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, @@ -207,7 +208,6 @@ def save_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, - # Flux.1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -226,6 +226,7 @@ def save_configuration( single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -393,7 +394,6 @@ def open_configuration( metadata_license, metadata_tags, metadata_title, - # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, @@ -410,7 +410,6 @@ def open_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, - # Flux.1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -429,6 +428,7 @@ def open_configuration( single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -591,7 +591,6 @@ def train_model( metadata_license, metadata_tags, metadata_title, - # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, @@ -608,7 +607,6 @@ def train_model( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, - # Flux.1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -627,6 +625,7 @@ def train_model( single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -647,42 +646,46 @@ def train_model( log.info(f"Validating lr scheduler arguments...") if not validate_args_setting(lr_scheduler_args): return - + log.info(f"Validating optimizer arguments...") if not validate_args_setting(optimizer_args): return TRAIN_BUTTON_VISIBLE # # Validate paths - # - + # + if not validate_file_path(dataset_config): return TRAIN_BUTTON_VISIBLE - + if not validate_file_path(log_tracker_config): return TRAIN_BUTTON_VISIBLE - - if not validate_folder_path(logging_dir, can_be_written_to=True, create_if_not_exists=True): + + if not validate_folder_path( + logging_dir, can_be_written_to=True, create_if_not_exists=True + ): return TRAIN_BUTTON_VISIBLE - - if not validate_folder_path(output_dir, can_be_written_to=True, create_if_not_exists=True): + + if not validate_folder_path( + output_dir, can_be_written_to=True, create_if_not_exists=True + ): return TRAIN_BUTTON_VISIBLE - + if not validate_model_path(pretrained_model_name_or_path): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(reg_data_dir): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(resume): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(train_data_dir): return TRAIN_BUTTON_VISIBLE - + if not validate_model_path(vae): return TRAIN_BUTTON_VISIBLE - + # # End of path validation # @@ -821,7 +824,7 @@ def train_model( log.error("accelerate not found") return TRAIN_BUTTON_VISIBLE - run_cmd = [rf'{accelerate_path}', "launch"] + run_cmd = [rf"{accelerate_path}", "launch"] run_cmd = AccelerateLaunch.run_cmd( run_cmd=run_cmd, @@ -840,16 +843,22 @@ def train_model( ) if sdxl: - run_cmd.append(rf'{scriptdir}/sd-scripts/sdxl_train.py') + run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train.py") elif sd3_checkbox: run_cmd.append(rf"{scriptdir}/sd-scripts/sd3_train.py") elif flux1_checkbox: run_cmd.append(rf"{scriptdir}/sd-scripts/flux_train.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_db.py") - - cache_text_encoder_outputs = (sdxl and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) - cache_text_encoder_outputs_to_disk = (sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk) or (flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk) + + cache_text_encoder_outputs = ( + (sdxl and sdxl_cache_text_encoder_outputs) + or (sd3_checkbox and sd3_cache_text_encoder_outputs) + or (flux1_checkbox and flux1_cache_text_encoder_outputs) + ) + cache_text_encoder_outputs_to_disk = ( + sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk + ) or (flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk) no_half_vae = sdxl and sdxl_no_half_vae if max_data_loader_n_workers == "" or None: max_data_loader_n_workers = 0 @@ -862,9 +871,8 @@ def train_model( max_train_steps = int(max_train_steps) if sdxl: - train_text_encoder = ( - (learning_rate_te1 != None and learning_rate_te1 > 0) or - (learning_rate_te2 != None and learning_rate_te2 > 0) + train_text_encoder = (learning_rate_te1 != None and learning_rate_te1 > 0) or ( + learning_rate_te2 != None and learning_rate_te2 > 0 ) # def save_huggingface_to_toml(self, toml_file_path: str): @@ -895,7 +903,9 @@ def train_model( "full_bf16": full_bf16, "full_fp16": full_fp16, "fused_backward_pass": fused_backward_pass, - "fused_optimizer_groups": int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None, + "fused_optimizer_groups": ( + int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None + ), "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, @@ -909,9 +919,9 @@ def train_model( "ip_noise_gamma_random_strength": ip_noise_gamma_random_strength, "keep_tokens": int(keep_tokens), "learning_rate": learning_rate, # both for sd1.5 and sdxl - "learning_rate_te": learning_rate_te if not sdxl else None, # only for sd1.5 - "learning_rate_te1": learning_rate_te1 if sdxl else None, # only for sdxl - "learning_rate_te2": learning_rate_te2 if sdxl else None, # only for sdxl + "learning_rate_te": learning_rate_te if not sdxl else None, # only for sd1.5 + "learning_rate_te1": learning_rate_te1 if sdxl else None, # only for sdxl + "learning_rate_te2": learning_rate_te2 if sdxl else None, # only for sdxl "logging_dir": logging_dir, "log_config": log_config, "log_tracker_config": log_tracker_config, @@ -921,7 +931,9 @@ def train_model( "lr_scheduler": lr_scheduler, "lr_scheduler_args": str(lr_scheduler_args).replace('"', "").split(), "lr_scheduler_num_cycles": ( - int(lr_scheduler_num_cycles) if lr_scheduler_num_cycles != "" else int(epoch) + int(lr_scheduler_num_cycles) + if lr_scheduler_num_cycles != "" + else int(epoch) ), "lr_scheduler_power": lr_scheduler_power, "lr_scheduler_type": lr_scheduler_type if lr_scheduler_type != "" else None, @@ -930,7 +942,9 @@ def train_model( "max_bucket_reso": max_bucket_reso, "max_timestep": max_timestep if max_timestep != 0 else None, "max_token_length": int(max_token_length), - "max_train_epochs": int(max_train_epochs) if int(max_train_epochs) != 0 else None, + "max_train_epochs": ( + int(max_train_epochs) if int(max_train_epochs) != 0 else None + ), "max_train_steps": int(max_train_steps) if int(max_train_steps) != 0 else None, "mem_eff_attn": mem_eff_attn, "metadata_author": metadata_author, @@ -1006,7 +1020,6 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, - # SD3 only Parameters # "cache_text_encoder_outputs": see previous assignment above for code # "cache_text_encoder_outputs_to_disk": see previous assignment above for code @@ -1020,9 +1033,10 @@ def train_model( # "t5xxl": see previous assignment above for code "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, - "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, + "text_encoder_batch_size": ( + sd3_text_encoder_batch_size if sd3_checkbox else None + ), "weighting_scheme": weighting_scheme if sd3_checkbox else None, - # Flux.1 specific parameters # "cache_text_encoder_outputs": see previous assignment above for code # "cache_text_encoder_outputs_to_disk": see previous assignment above for code @@ -1036,11 +1050,16 @@ def train_model( "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": guidance_scale if flux1_checkbox else None, - "blockwise_fused_optimizers": blockwise_fused_optimizers if flux1_checkbox else None, - "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, + "blockwise_fused_optimizers": ( + blockwise_fused_optimizers if flux1_checkbox else None + ), + "cpu_offload_checkpointing": ( + cpu_offload_checkpointing if flux1_checkbox else None + ), "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, + "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1058,8 +1077,8 @@ def train_model( current_datetime = datetime.now() formatted_datetime = current_datetime.strftime("%Y%m%d-%H%M%S") - tmpfilename = fr"{output_dir}/config_dreambooth-{formatted_datetime}.toml" - + tmpfilename = rf"{output_dir}/config_dreambooth-{formatted_datetime}.toml" + # Save the updated TOML data back to the file with open(tmpfilename, "w", encoding="utf-8") as toml_file: toml.dump(config_toml_data, toml_file) @@ -1068,7 +1087,7 @@ def train_model( log.error(f"Failed to write TOML file: {toml_file.name}") run_cmd.append(f"--config_file") - run_cmd.append(rf'{tmpfilename}') + run_cmd.append(rf"{tmpfilename}") # Initialize a dictionary with always-included keyword arguments kwargs_for_training = { @@ -1173,17 +1192,26 @@ def dreambooth_tab( sdxl_checkbox=source_model.sdxl_checkbox, config=config, ) - + # Add SDXL Parameters sdxl_params = SDXLParameters( - source_model.sdxl_checkbox, config=config, trainer="finetune", + source_model.sdxl_checkbox, + config=config, + trainer="finetune", ) - + # Add FLUX1 Parameters - flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox, finetuning=True) + flux1_training = flux1Training( + headless=headless, + config=config, + flux1_checkbox=source_model.flux1_checkbox, + finetuning=True, + ) # Add SD3 Parameters - sd3_training = sd3Training(headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox) + sd3_training = sd3Training( + headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox + ) with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): advanced_training = AdvancedTraining(headless=headless, config=config) @@ -1341,7 +1369,6 @@ def dreambooth_tab( metadata.metadata_license, metadata.metadata_tags, metadata.metadata_title, - # SD3 Parameters sd3_training.sd3_cache_text_encoder_outputs, sd3_training.sd3_cache_text_encoder_outputs_to_disk, @@ -1358,7 +1385,6 @@ def dreambooth_tab( sd3_training.sd3_text_encoder_batch_size, sd3_training.weighting_scheme, source_model.sd3_checkbox, - # Flux1 parameters flux1_training.flux1_cache_text_encoder_outputs, flux1_training.flux1_cache_text_encoder_outputs_to_disk, @@ -1377,6 +1403,7 @@ def dreambooth_tab( flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.mem_eff_save, + flux1_training.apply_t5_attn_mask, ] configuration.button_open_config.click( diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 182196274..0b51186a9 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -18,8 +18,11 @@ SaveConfigFile, scriptdir, update_my_data, - validate_file_path, validate_folder_path, validate_model_path, - validate_args_setting, setup_environment, + validate_file_path, + validate_folder_path, + validate_model_path, + validate_args_setting, + setup_environment, ) from .class_accelerate_launch import AccelerateLaunch from .class_configuration_file import ConfigurationFile @@ -197,7 +200,6 @@ def save_configuration( metadata_license, metadata_tags, metadata_title, - # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, @@ -214,7 +216,6 @@ def save_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, - # Flux.1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -233,6 +234,7 @@ def save_configuration( single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -406,7 +408,6 @@ def open_configuration( metadata_license, metadata_tags, metadata_title, - # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, @@ -423,7 +424,6 @@ def open_configuration( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, - # Flux.1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -443,6 +443,7 @@ def open_configuration( double_blocks_to_swap, mem_eff_save, training_preset, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -621,7 +622,6 @@ def train_model( metadata_license, metadata_tags, metadata_title, - # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, @@ -638,7 +638,6 @@ def train_model( sd3_text_encoder_batch_size, weighting_scheme, sd3_checkbox, - # Flux.1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -657,6 +656,7 @@ def train_model( single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -689,33 +689,37 @@ def train_model( # # Validate paths - # - + # + if not validate_file_path(dataset_config): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(image_folder): return TRAIN_BUTTON_VISIBLE - + if not validate_file_path(log_tracker_config): return TRAIN_BUTTON_VISIBLE - - if not validate_folder_path(logging_dir, can_be_written_to=True, create_if_not_exists=True): + + if not validate_folder_path( + logging_dir, can_be_written_to=True, create_if_not_exists=True + ): return TRAIN_BUTTON_VISIBLE - - if not validate_folder_path(output_dir, can_be_written_to=True, create_if_not_exists=True): + + if not validate_folder_path( + output_dir, can_be_written_to=True, create_if_not_exists=True + ): return TRAIN_BUTTON_VISIBLE - + if not validate_model_path(pretrained_model_name_or_path): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(resume): return TRAIN_BUTTON_VISIBLE - + # # End of path validation # - + # if not validate_paths( # dataset_config=dataset_config, # finetune_image_folder=image_folder, @@ -869,7 +873,7 @@ def train_model( log.error("accelerate not found") return TRAIN_BUTTON_VISIBLE - run_cmd = [rf'{accelerate_path}', "launch"] + run_cmd = [rf"{accelerate_path}", "launch"] run_cmd = AccelerateLaunch.run_cmd( run_cmd=run_cmd, @@ -901,8 +905,14 @@ def train_model( if use_latent_files == "Yes" else f"{train_dir}/{caption_metadata_filename}" ) - cache_text_encoder_outputs = (sdxl_checkbox and sdxl_cache_text_encoder_outputs) or (sd3_checkbox and sd3_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) - cache_text_encoder_outputs_to_disk = (sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk) or (flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk) + cache_text_encoder_outputs = ( + (sdxl_checkbox and sdxl_cache_text_encoder_outputs) + or (sd3_checkbox and sd3_cache_text_encoder_outputs) + or (flux1_checkbox and flux1_cache_text_encoder_outputs) + ) + cache_text_encoder_outputs_to_disk = ( + sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk + ) or (flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk) no_half_vae = sdxl_checkbox and sdxl_no_half_vae if max_data_loader_n_workers == "" or None: @@ -945,7 +955,9 @@ def train_model( "full_bf16": full_bf16, "full_fp16": full_fp16, "fused_backward_pass": fused_backward_pass, - "fused_optimizer_groups": int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None, + "fused_optimizer_groups": ( + int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None + ), "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, @@ -1052,7 +1064,6 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, - # SD3 only Parameters # "cache_text_encoder_outputs": see previous assignment above for code # "cache_text_encoder_outputs_to_disk": see previous assignment above for code @@ -1066,9 +1077,10 @@ def train_model( # "t5xxl": see previous assignment above for code "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, - "text_encoder_batch_size": sd3_text_encoder_batch_size if sd3_checkbox else None, + "text_encoder_batch_size": ( + sd3_text_encoder_batch_size if sd3_checkbox else None + ), "weighting_scheme": weighting_scheme if sd3_checkbox else None, - # Flux.1 specific parameters # "cache_text_encoder_outputs": see previous assignment above for code # "cache_text_encoder_outputs_to_disk": see previous assignment above for code @@ -1082,11 +1094,16 @@ def train_model( "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": guidance_scale if flux1_checkbox else None, - "blockwise_fused_optimizers": blockwise_fused_optimizers if flux1_checkbox else None, - "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, + "blockwise_fused_optimizers": ( + blockwise_fused_optimizers if flux1_checkbox else None + ), + "cpu_offload_checkpointing": ( + cpu_offload_checkpointing if flux1_checkbox else None + ), "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, + "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1104,7 +1121,7 @@ def train_model( current_datetime = datetime.now() formatted_datetime = current_datetime.strftime("%Y%m%d-%H%M%S") - tmpfilename = fr"{output_dir}/config_finetune-{formatted_datetime}.toml" + tmpfilename = rf"{output_dir}/config_finetune-{formatted_datetime}.toml" # Save the updated TOML data back to the file with open(tmpfilename, "w", encoding="utf-8") as toml_file: toml.dump(config_toml_data, toml_file) @@ -1270,7 +1287,9 @@ def list_presets(path): # Add SDXL Parameters sdxl_params = SDXLParameters( - source_model.sdxl_checkbox, config=config, trainer="finetune", + source_model.sdxl_checkbox, + config=config, + trainer="finetune", ) with gr.Row(): @@ -1278,12 +1297,19 @@ def list_presets(path): train_text_encoder = gr.Checkbox( label="Train text encoder", value=True ) - + # Add FLUX1 Parameters - flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox, finetuning=True) - + flux1_training = flux1Training( + headless=headless, + config=config, + flux1_checkbox=source_model.flux1_checkbox, + finetuning=True, + ) + # Add SD3 Parameters - sd3_training = sd3Training(headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox) + sd3_training = sd3Training( + headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox + ) with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): with gr.Row(): @@ -1461,7 +1487,6 @@ def list_presets(path): metadata.metadata_license, metadata.metadata_tags, metadata.metadata_title, - # SD3 Parameters sd3_training.sd3_cache_text_encoder_outputs, sd3_training.sd3_cache_text_encoder_outputs_to_disk, @@ -1478,7 +1503,6 @@ def list_presets(path): sd3_training.sd3_text_encoder_batch_size, sd3_training.weighting_scheme, source_model.sd3_checkbox, - # Flux1 parameters flux1_training.flux1_cache_text_encoder_outputs, flux1_training.flux1_cache_text_encoder_outputs_to_disk, @@ -1497,6 +1521,7 @@ def list_presets(path): flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.mem_eff_save, + flux1_training.apply_t5_attn_mask, ] configuration.button_open_config.click( diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 01db63da2..ccd530998 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -19,8 +19,12 @@ SaveConfigFile, scriptdir, update_my_data, - validate_file_path, validate_folder_path, validate_model_path, validate_toml_file, - validate_args_setting, setup_environment, + validate_file_path, + validate_folder_path, + validate_model_path, + validate_toml_file, + validate_args_setting, + setup_environment, ) from .class_accelerate_launch import AccelerateLaunch from .class_configuration_file import ConfigurationFile @@ -239,7 +243,7 @@ def save_configuration( loraplus_lr_ratio, loraplus_text_encoder_lr_ratio, loraplus_unet_lr_ratio, - #Flux1 + # Flux1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, ae, @@ -253,6 +257,7 @@ def save_configuration( t5xxl_max_token_length, guidance_scale, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -477,6 +482,7 @@ def open_configuration( t5xxl_max_token_length, guidance_scale, mem_eff_save, + apply_t5_attn_mask, training_preset, ): # Get list of function parameters and their values @@ -732,6 +738,7 @@ def train_model( t5xxl_max_token_length, guidance_scale, mem_eff_save, + apply_t5_attn_mask, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -759,42 +766,46 @@ def train_model( # # Validate paths - # - + # + if not validate_file_path(dataset_config): return TRAIN_BUTTON_VISIBLE - + if not validate_file_path(log_tracker_config): return TRAIN_BUTTON_VISIBLE - - if not validate_folder_path(logging_dir, can_be_written_to=True, create_if_not_exists=True): + + if not validate_folder_path( + logging_dir, can_be_written_to=True, create_if_not_exists=True + ): return TRAIN_BUTTON_VISIBLE - + if LyCORIS_preset not in LYCORIS_PRESETS_CHOICES: if not validate_toml_file(LyCORIS_preset): return TRAIN_BUTTON_VISIBLE - + if not validate_file_path(network_weights): return TRAIN_BUTTON_VISIBLE - - if not validate_folder_path(output_dir, can_be_written_to=True, create_if_not_exists=True): + + if not validate_folder_path( + output_dir, can_be_written_to=True, create_if_not_exists=True + ): return TRAIN_BUTTON_VISIBLE - + if not validate_model_path(pretrained_model_name_or_path): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(reg_data_dir): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(resume): return TRAIN_BUTTON_VISIBLE - + if not validate_folder_path(train_data_dir): return TRAIN_BUTTON_VISIBLE - + if not validate_model_path(vae): return TRAIN_BUTTON_VISIBLE - + # # End of path validation # @@ -985,7 +996,7 @@ def train_model( log.error("accelerate not found") return TRAIN_BUTTON_VISIBLE - run_cmd = [rf'{accelerate_path}', "launch"] + run_cmd = [rf"{accelerate_path}", "launch"] run_cmd = AccelerateLaunch.run_cmd( run_cmd=run_cmd, @@ -1060,6 +1071,7 @@ def train_model( "conv_block_alphas", "rank_dropout", "module_dropout", + "train_blocks", ] network_module = "networks.lora_flux" kohya_lora_vars = { @@ -1067,11 +1079,16 @@ def train_model( for key, value in vars().items() if key in kohya_lora_var_list and value } - + if split_mode: + if train_blocks != "single": + log.warning( + f"train_blocks is currently set to '{train_blocks}'. split_mode is enabled, forcing train_blocks to 'single'." + ) + kohya_lora_vars["train_blocks"] = "single" for key, value in kohya_lora_vars.items(): if value: network_args += f" {key}={value}" - + if LoRA_type in ["Kohya LoCon", "Standard"]: kohya_lora_var_list = [ "down_lr_weight", @@ -1091,7 +1108,7 @@ def train_model( for key, value in vars().items() if key in kohya_lora_var_list and value } - + # Not sure if Flux1 is Standard... or LoCon style... flip a coin... going for LoCon style... if LoRA_type in ["Kohya LoCon"]: network_args += f' conv_dim="{conv_dim}" conv_alpha="{conv_alpha}"' @@ -1175,13 +1192,15 @@ def train_model( network_train_text_encoder_only = text_encoder_lr_float != 0 and unet_lr_float == 0 # Flag to train unet only if its learning rate is non-zero and text encoder's is zero. network_train_unet_only = text_encoder_lr_float == 0 and unet_lr_float != 0 - + if text_encoder_lr_float != 0 or unet_lr_float != 0: do_not_set_learning_rate = True - + config_toml_data = { "adaptive_noise_scale": ( - adaptive_noise_scale if (adaptive_noise_scale != 0 and noise_offset_type == "Original") else None + adaptive_noise_scale + if (adaptive_noise_scale != 0 and noise_offset_type == "Original") + else None ), "async_upload": async_upload, "bucket_no_upscale": bucket_no_upscale, @@ -1189,7 +1208,10 @@ def train_model( "cache_latents": cache_latents, "cache_latents_to_disk": cache_latents_to_disk, "cache_text_encoder_outputs": ( - True if (sdxl and sdxl_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) else None + True + if (sdxl and sdxl_cache_text_encoder_outputs) + or (flux1_checkbox and flux1_cache_text_encoder_outputs) + else None ), "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), "caption_dropout_rate": caption_dropout_rate, @@ -1225,7 +1247,9 @@ def train_model( "log_tracker_name": log_tracker_name, "log_tracker_config": log_tracker_config, "loraplus_lr_ratio": loraplus_lr_ratio if not 0 else None, - "loraplus_text_encoder_lr_ratio": loraplus_text_encoder_lr_ratio if not 0 else None, + "loraplus_text_encoder_lr_ratio": ( + loraplus_text_encoder_lr_ratio if not 0 else None + ), "loraplus_unet_lr_ratio": loraplus_unet_lr_ratio if not 0 else None, "loss_type": loss_type, "lowvram": lowvram, @@ -1258,9 +1282,13 @@ def train_model( "min_snr_gamma": min_snr_gamma if min_snr_gamma != 0 else None, "min_timestep": min_timestep if min_timestep != 0 else None, "mixed_precision": mixed_precision, - "multires_noise_discount": multires_noise_discount if noise_offset_type == "Multires" else None, + "multires_noise_discount": ( + multires_noise_discount if noise_offset_type == "Multires" else None + ), "multires_noise_iterations": ( - multires_noise_iterations if (multires_noise_iterations != 0 and noise_offset_type == "Multires") else None + multires_noise_iterations + if (multires_noise_iterations != 0 and noise_offset_type == "Multires") + else None ), "network_alpha": network_alpha, "network_args": str(network_args).replace('"', "").split(), @@ -1271,11 +1299,21 @@ def train_model( "network_train_text_encoder_only": network_train_text_encoder_only, "network_weights": network_weights, "no_half_vae": True if sdxl and sdxl_no_half_vae else None, - "noise_offset": noise_offset if (noise_offset != 0 and noise_offset_type == "Original") else None, - "noise_offset_random_strength": noise_offset_random_strength if noise_offset_type == "Original" else None, + "noise_offset": ( + noise_offset + if (noise_offset != 0 and noise_offset_type == "Original") + else None + ), + "noise_offset_random_strength": ( + noise_offset_random_strength if noise_offset_type == "Original" else None + ), "noise_offset_type": noise_offset_type, "optimizer_type": optimizer, - "optimizer_args": str(optimizer_args).replace('"', "").split() if optimizer_args != [] else None, + "optimizer_args": ( + str(optimizer_args).replace('"', "").split() + if optimizer_args != [] + else None + ), "output_dir": output_dir, "output_name": output_name, "persistent_data_loader_workers": int(persistent_data_loader_workers), @@ -1330,10 +1368,11 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, - # Flux.1 specific parameters # "cache_text_encoder_outputs": see previous assignment above for code - "cache_text_encoder_outputs_to_disk": flux1_cache_text_encoder_outputs_to_disk if flux1_checkbox else None, + "cache_text_encoder_outputs_to_disk": ( + flux1_cache_text_encoder_outputs_to_disk if flux1_checkbox else None + ), "ae": ae if flux1_checkbox else None, "clip_l": clip_l if flux1_checkbox else None, "t5xxl": t5xxl if flux1_checkbox else None, @@ -1341,10 +1380,10 @@ def train_model( "model_prediction_type": model_prediction_type if flux1_checkbox else None, "timestep_sampling": timestep_sampling if flux1_checkbox else None, "split_mode": split_mode if flux1_checkbox else None, - "train_blocks": train_blocks if flux1_checkbox else None, "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, "guidance_scale": float(guidance_scale) if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, + "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -1362,7 +1401,7 @@ def train_model( current_datetime = datetime.now() formatted_datetime = current_datetime.strftime("%Y%m%d-%H%M%S") - tmpfilename = fr"{output_dir}/config_lora-{formatted_datetime}.toml" + tmpfilename = rf"{output_dir}/config_lora-{formatted_datetime}.toml" # Save the updated TOML data back to the file with open(tmpfilename, "w", encoding="utf-8") as toml_file: @@ -2194,10 +2233,14 @@ def update_LoRA_settings( results.append(settings["gr_type"](**update_params)) return tuple(results) - + with gr.Group(): # Add FLUX1 Parameters - flux1_training = flux1Training(headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox) + flux1_training = flux1Training( + headless=headless, + config=config, + flux1_checkbox=source_model.flux1_checkbox, + ) with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): # with gr.Accordion('Advanced Configuration', open=False): @@ -2493,6 +2536,7 @@ def update_LoRA_settings( flux1_training.t5xxl_max_token_length, flux1_training.guidance_scale, flux1_training.mem_eff_save, + flux1_training.apply_t5_attn_mask, ] configuration.button_open_config.click( diff --git a/sd-scripts b/sd-scripts index 6ab48b09d..2b07a92c8 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 6ab48b09d8e46973d5e5fa47baeae3a464d06d04 +Subproject commit 2b07a92c8d970a8538a47dd1bcad3122da4e195a diff --git a/test/config/dataset-multires.toml b/test/config/dataset-multires.toml new file mode 100644 index 000000000..9cba749c2 --- /dev/null +++ b/test/config/dataset-multires.toml @@ -0,0 +1,40 @@ +[general] +# define common settings here +flip_aug = true +color_aug = false +keep_tokens_separator= "|||" +shuffle_caption = false +caption_tag_dropout_rate = 0 +caption_extension = ".txt" +min_bucket_reso = 64 +max_bucket_reso = 2048 + +[[datasets]] +# define the first resolution here +batch_size = 1 +enable_bucket = true +resolution = [1024, 1024] + + [[datasets.subsets]] + image_dir = "./test/img/10_darius kawasaki person" + num_repeats = 10 + +[[datasets]] +# define the second resolution here +batch_size = 1 +enable_bucket = true +resolution = [768, 768] + + [[datasets.subsets]] + image_dir = "./test/img/10_darius kawasaki person" + num_repeats = 10 + +[[datasets]] +# define the third resolution here +batch_size = 1 +enable_bucket = true +resolution = [512, 512] + + [[datasets.subsets]] + image_dir = "./test/img/10_darius kawasaki person" + num_repeats = 10 \ No newline at end of file From c50ecbba65a78991a6f6985fde37b2b0628e351c Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 21 Aug 2024 08:00:38 -0400 Subject: [PATCH 075/199] Updating requirements --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index aed190b44..7acc6eb2c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ accelerate==0.33.0 aiofiles==23.2.1 altair==4.2.2 -dadaptation==3.1 +dadaptation==3.2 diffusers[torch]==0.25.0 easygui==0.98.3 einops==0.7.0 @@ -23,7 +23,7 @@ opencv-python==4.7.0.68 prodigyopt==1.0 pytorch-lightning==1.9.0 rich>=13.7.1 -safetensors==0.4.2 +safetensors==0.4.4 scipy==1.11.4 # for T5XXL tokenizer (SD3/FLUX) sentencepiece==0.2.0 From 5668ff07a988e52a0915dd850a93c3b799839760 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 21 Aug 2024 09:24:21 -0400 Subject: [PATCH 076/199] Add flux_fused_backward_pass to dreambooth and finetuning --- kohya_gui/class_flux1.py | 8 +++++++- kohya_gui/dreambooth_gui.py | 7 ++++++- kohya_gui/finetune_gui.py | 7 ++++++- sd-scripts | 2 +- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index e9ec461df..fc2d7b174 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -176,7 +176,7 @@ def noise_offset_type_change( self.blockwise_fused_optimizers = gr.Checkbox( label="Blockwise Fused Optimizer", value=self.config.get("flux1.blockwise_fused_optimizers", False), - info="Enable blockwise optimizers for fused backward pass and optimizer step", + info="Enable blockwise optimizers for fused backward pass and optimizer step. Any optimizer can be used.", interactive=True, ) self.cpu_offload_checkpointing = gr.Checkbox( @@ -203,6 +203,12 @@ def noise_offset_type_change( step=1, interactive=True, ) + self.flux_fused_backward_pass = gr.Checkbox( + label="Fused Backward Pass", + value=self.config.get("flux1.fused_backward_pass", False), + info="Enables the fusing of the optimizer step into the backward pass for each parameter. Only Adafactor optimizer is supported.", + interactive=True, + ) self.flux1_checkbox.change( lambda flux1_checkbox: gr.Accordion(visible=flux1_checkbox), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 09681cc60..169d98344 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -222,6 +222,7 @@ def save_configuration( t5xxl_max_token_length, guidance_scale, blockwise_fused_optimizers, + flux_fused_backward_pass, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -424,6 +425,7 @@ def open_configuration( t5xxl_max_token_length, guidance_scale, blockwise_fused_optimizers, + flux_fused_backward_pass, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -621,6 +623,7 @@ def train_model( t5xxl_max_token_length, guidance_scale, blockwise_fused_optimizers, + flux_fused_backward_pass, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -902,7 +905,7 @@ def train_model( "fp8_base": fp8_base, "full_bf16": full_bf16, "full_fp16": full_fp16, - "fused_backward_pass": fused_backward_pass, + "fused_backward_pass": fused_backward_pass if not flux1_checkbox else flux_fused_backward_pass, "fused_optimizer_groups": ( int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None ), @@ -1053,6 +1056,7 @@ def train_model( "blockwise_fused_optimizers": ( blockwise_fused_optimizers if flux1_checkbox else None ), + # "flux_fused_backward_pass": see previous assignment of fused_backward_pass in above code "cpu_offload_checkpointing": ( cpu_offload_checkpointing if flux1_checkbox else None ), @@ -1399,6 +1403,7 @@ def dreambooth_tab( flux1_training.t5xxl_max_token_length, flux1_training.guidance_scale, flux1_training.blockwise_fused_optimizers, + flux1_training.flux_fused_backward_pass, flux1_training.cpu_offload_checkpointing, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 0b51186a9..b8e61471e 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -230,6 +230,7 @@ def save_configuration( t5xxl_max_token_length, guidance_scale, blockwise_fused_optimizers, + flux_fused_backward_pass, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -438,6 +439,7 @@ def open_configuration( t5xxl_max_token_length, guidance_scale, blockwise_fused_optimizers, + flux_fused_backward_pass, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -652,6 +654,7 @@ def train_model( t5xxl_max_token_length, guidance_scale, blockwise_fused_optimizers, + flux_fused_backward_pass, cpu_offload_checkpointing, single_blocks_to_swap, double_blocks_to_swap, @@ -954,7 +957,7 @@ def train_model( "fp8_base": fp8_base, "full_bf16": full_bf16, "full_fp16": full_fp16, - "fused_backward_pass": fused_backward_pass, + "fused_backward_pass": fused_backward_pass if not flux1_checkbox else flux_fused_backward_pass, "fused_optimizer_groups": ( int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None ), @@ -1097,6 +1100,7 @@ def train_model( "blockwise_fused_optimizers": ( blockwise_fused_optimizers if flux1_checkbox else None ), + # "flux_fused_backward_pass": see previous assignment of fused_backward_pass in above code "cpu_offload_checkpointing": ( cpu_offload_checkpointing if flux1_checkbox else None ), @@ -1517,6 +1521,7 @@ def list_presets(path): flux1_training.t5xxl_max_token_length, flux1_training.guidance_scale, flux1_training.blockwise_fused_optimizers, + flux1_training.flux_fused_backward_pass, flux1_training.cpu_offload_checkpointing, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, diff --git a/sd-scripts b/sd-scripts index 2b07a92c8..e1cd19c0c 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 2b07a92c8d970a8538a47dd1bcad3122da4e195a +Subproject commit e1cd19c0c0ef55709e8eb1e5babe25045f65031f From 1d5e6653c0b6c09907741c6791baf09241045f4f Mon Sep 17 00:00:00 2001 From: eftSharptooth <76253264+eftSharptooth@users.noreply.github.com> Date: Thu, 22 Aug 2024 00:51:00 -0400 Subject: [PATCH 077/199] Update requirements_linux_docker.txt update accelerate version for linux_docker --- requirements_linux_docker.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements_linux_docker.txt b/requirements_linux_docker.txt index f5272a73e..5ff196156 100644 --- a/requirements_linux_docker.txt +++ b/requirements_linux_docker.txt @@ -1,4 +1,4 @@ xformers>=0.0.20 bitsandbytes==0.43.3 -accelerate==0.25.0 -tensorboard \ No newline at end of file +accelerate==0.33.0 +tensorboard From b31c6510f8da141dea01a714e255aa97575e9c58 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 22 Aug 2024 09:36:08 -0400 Subject: [PATCH 078/199] Update to latest sd3 flux code --- presets/lora/flux1D - adamw8bit fp8.json | 68 ++++++++++++------------ sd-scripts | 2 +- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/presets/lora/flux1D - adamw8bit fp8.json b/presets/lora/flux1D - adamw8bit fp8.json index 4928137aa..c3a654e78 100644 --- a/presets/lora/flux1D - adamw8bit fp8.json +++ b/presets/lora/flux1D - adamw8bit fp8.json @@ -3,32 +3,33 @@ "LyCORIS_preset": "full", "adaptive_noise_scale": 0, "additional_parameters": "", - "ae": "H:/ComfyUI2/models/vae/ae.sft", + "ae": "put the full path to ae.sft here", + "apply_t5_attn_mask": true, "async_upload": false, "block_alphas": "", "block_dims": "", "block_lr_zero_threshold": "", - "bucket_no_upscale": false, - "bucket_reso_steps": 32, + "bucket_no_upscale": true, + "bucket_reso_steps": 64, "bypass_mode": false, "cache_latents": true, "cache_latents_to_disk": true, "caption_dropout_every_n_epochs": 0, "caption_dropout_rate": 0, "caption_extension": ".txt", - "clip_l": "H:/ComfyUI2/models/clip/clip_l.safetensors", + "clip_l": "put the full path to clip_l.safetensors here", "clip_skip": 1, "color_aug": false, "constrain": 0, - "conv_alpha": 64, + "conv_alpha": 1, "conv_block_alphas": "", "conv_block_dims": "", - "conv_dim": 64, + "conv_dim": 1, "dataset_config": "", "debiased_estimation_loss": false, "decompose_both": false, "dim_from_weights": false, - "discrete_flow_shift": 1, + "discrete_flow_shift": 3, "dora_wd": false, "down_lr_weight": "", "dynamo_backend": "no", @@ -36,7 +37,7 @@ "dynamo_use_dynamic": false, "dynamo_use_fullgraph": false, "enable_bucket": true, - "epoch": 25, + "epoch": 1, "extra_accelerate_launch_args": "", "factor": -1, "flip_aug": false, @@ -44,13 +45,13 @@ "flux1_cache_text_encoder_outputs_to_disk": true, "flux1_checkbox": true, "fp8_base": true, - "full_bf16": false, + "full_bf16": true, "full_fp16": false, "gpu_ids": "", "gradient_accumulation_steps": 1, "gradient_checkpointing": true, - "guidance_scale": 3.5, - "highvram": true, + "guidance_scale": 1, + "highvram": false, "huber_c": 0.1, "huber_schedule": "snr", "huggingface_path_in_repo": "", @@ -61,20 +62,20 @@ "ip_noise_gamma": 0, "ip_noise_gamma_random_strength": false, "keep_tokens": 0, - "learning_rate": 0.0001, + "learning_rate": 0.0003, "log_config": false, "log_tracker_config": "", "log_tracker_name": "", "log_with": "", - "logging_dir": "./test/logs", + "logging_dir": "./test/logs-saruman", "loraplus_lr_ratio": 0, "loraplus_text_encoder_lr_ratio": 0, "loraplus_unet_lr_ratio": 0, "loss_type": "l2", "lowvram": false, - "lr_scheduler": "cosine_with_restarts", + "lr_scheduler": "constant", "lr_scheduler_args": "", - "lr_scheduler_num_cycles": 5, + "lr_scheduler_num_cycles": 1, "lr_scheduler_power": 1, "lr_scheduler_type": "", "lr_warmup": 0, @@ -86,9 +87,10 @@ "max_resolution": "512,512", "max_timestep": 1000, "max_token_length": 75, - "max_train_epochs": 25, - "max_train_steps": 4000, + "max_train_epochs": 0, + "max_train_steps": 1000, "mem_eff_attn": false, + "mem_eff_save": false, "metadata_author": "", "metadata_description": "", "metadata_license": "", @@ -96,20 +98,20 @@ "metadata_title": "", "mid_lr_weight": "", "min_bucket_reso": 256, - "min_snr_gamma": 10, + "min_snr_gamma": 7, "min_timestep": 0, "mixed_precision": "bf16", "model_list": "custom", "model_prediction_type": "raw", "module_dropout": 0, "multi_gpu": false, - "multires_noise_discount": 0.2, - "multires_noise_iterations": 8, - "network_alpha": 8, - "network_dim": 8, + "multires_noise_discount": 0.3, + "multires_noise_iterations": 0, + "network_alpha": 16, + "network_dim": 16, "network_dropout": 0, "network_weights": "", - "noise_offset": 0, + "noise_offset": 0.05, "noise_offset_random_strength": false, "noise_offset_type": "Original", "num_cpu_threads_per_process": 2, @@ -117,10 +119,10 @@ "num_processes": 1, "optimizer": "AdamW8bit", "optimizer_args": "", - "output_dir": "./test/output", - "output_name": "Flux.1-dev-test-v3.1", + "output_dir": "put the full path to output folder here", + "output_name": "Flux.my-super-duper-model-name-goes-here-v1.0", "persistent_data_loader_workers": false, - "pretrained_model_name_or_path": "E:\\models\\flux1\\flux1-dev.safetensors", + "pretrained_model_name_or_path": "put the full path to flux1-dev.safetensors here", "prior_loss_weight": 1, "random_crop": false, "rank_dropout": 0, @@ -130,12 +132,12 @@ "resume": "", "resume_from_huggingface": "", "sample_every_n_epochs": 0, - "sample_every_n_steps": 200, - "sample_prompts": "a painting of a man wearing a funny hat, by darius kawasaki --w 768 --h 768 --s 20 --l 3", + "sample_every_n_steps": 0, + "sample_prompts": "saruman posing under a stormy lightning sky, photorealistic --w 832 --h 1216 --s 20 --l 4 --d 42", "sample_sampler": "euler", "save_as_bool": false, "save_every_n_epochs": 1, - "save_every_n_steps": 0, + "save_every_n_steps": 50, "save_last_n_steps": 0, "save_last_n_steps_state": 0, "save_model_as": "safetensors", @@ -152,17 +154,17 @@ "shuffle_caption": false, "split_mode": false, "stop_text_encoder_training": 0, - "t5xxl": "H:/ComfyUI2/models/clip/t5xxl_fp16.safetensors", + "t5xxl": "put the full path to the file here. Use the fp16 version", "t5xxl_max_token_length": 512, "text_encoder_lr": 0, - "timestep_sampling": "sigma", + "timestep_sampling": "sigmoid", "train_batch_size": 1, "train_blocks": "all", - "train_data_dir": "./test/img", + "train_data_dir": "put your image folder here", "train_norm": false, "train_on_input": true, "training_comment": "", - "unet_lr": 0.0001, + "unet_lr": 0.0003, "unit": 1, "up_lr_weight": "", "use_cp": false, diff --git a/sd-scripts b/sd-scripts index e1cd19c0c..99744af53 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit e1cd19c0c0ef55709e8eb1e5babe25045f65031f +Subproject commit 99744af53afcb750b9a64b7efafe51f3f0da8826 From 23ca85897b796fe9b80338f3155293d7529fbecd Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 22 Aug 2024 20:05:31 -0400 Subject: [PATCH 079/199] Add extract flux lora GUI --- kohya_gui/class_lora_tab.py | 2 + kohya_gui/flux_extract_lora_gui.py | 273 +++++++++++++++++++++++++++++ 2 files changed, 275 insertions(+) create mode 100644 kohya_gui/flux_extract_lora_gui.py diff --git a/kohya_gui/class_lora_tab.py b/kohya_gui/class_lora_tab.py index ec1e53693..c2b9a8016 100644 --- a/kohya_gui/class_lora_tab.py +++ b/kohya_gui/class_lora_tab.py @@ -4,6 +4,7 @@ from .verify_lora_gui import gradio_verify_lora_tab from .resize_lora_gui import gradio_resize_lora_tab from .extract_lora_gui import gradio_extract_lora_tab +from .flux_extract_lora_gui import gradio_flux_extract_lora_tab from .convert_lcm_gui import gradio_convert_lcm_tab from .extract_lycoris_locon_gui import gradio_extract_lycoris_locon_tab from .extract_lora_from_dylora_gui import gradio_extract_dylora_tab @@ -20,6 +21,7 @@ def __init__( gradio_extract_dylora_tab(headless=headless) gradio_convert_lcm_tab(headless=headless) gradio_extract_lora_tab(headless=headless) + gradio_flux_extract_lora_tab(headless=headless) gradio_extract_lycoris_locon_tab(headless=headless) gradio_merge_lora_tab = GradioMergeLoRaTab() gradio_merge_lycoris_tab(headless=headless) diff --git a/kohya_gui/flux_extract_lora_gui.py b/kohya_gui/flux_extract_lora_gui.py new file mode 100644 index 000000000..1fbd2756f --- /dev/null +++ b/kohya_gui/flux_extract_lora_gui.py @@ -0,0 +1,273 @@ +import gradio as gr +import subprocess +import os +import sys +from .common_gui import ( + get_saveasfilename_path, + get_file_path, + scriptdir, + list_files, + create_refresh_button, + setup_environment, +) +from .custom_logging import setup_logging + +# Set up logging +log = setup_logging() + +folder_symbol = "\U0001f4c2" # 📂 +refresh_symbol = "\U0001f504" # 🔄 +save_style_symbol = "\U0001f4be" # 💾 +document_symbol = "\U0001F4C4" # 📄 + +PYTHON = sys.executable + + +def extract_flux_lora( + model_org, + model_tuned, + save_to, + save_precision, + dim, + device, + clamp_quantile, + no_metadata, + mem_eff_safe_open, +): + # Check for required inputs + if model_org == "" or model_tuned == "" or save_to == "": + log.info( + "Please provide all required inputs: original model, tuned model, and save path." + ) + return + + # Check if source models exist + if not os.path.isfile(model_org): + log.info("The provided original model is not a file") + return + + if not os.path.isfile(model_tuned): + log.info("The provided tuned model is not a file") + return + + # Prepare save path + if os.path.dirname(save_to) == "": + save_to = os.path.join(os.path.dirname(model_tuned), save_to) + if os.path.isdir(save_to): + save_to = os.path.join(save_to, "flux_lora.safetensors") + if os.path.normpath(model_tuned) == os.path.normpath(save_to): + path, ext = os.path.splitext(save_to) + save_to = f"{path}_lora{ext}" + + run_cmd = [ + rf"{PYTHON}", + rf"{scriptdir}/sd-scripts/networks/flux_extract_lora.py", + "--model_org", + rf"{model_org}", + "--model_tuned", + rf"{model_tuned}", + "--save_to", + rf"{save_to}", + "--dim", + str(dim), + "--device", + device, + "--clamp_quantile", + str(clamp_quantile), + ] + + if save_precision: + run_cmd.extend(["--save_precision", save_precision]) + + if no_metadata: + run_cmd.append("--no_metadata") + + if mem_eff_safe_open: + run_cmd.append("--mem_eff_safe_open") + + env = setup_environment() + + # Reconstruct the safe command string for display + command_to_run = " ".join(run_cmd) + log.info(f"Executing command: {command_to_run}") + + # Run the command + subprocess.run(run_cmd, env=env) + + +def gradio_flux_extract_lora_tab(headless=False): + current_model_dir = os.path.join(scriptdir, "outputs") + current_save_dir = os.path.join(scriptdir, "outputs") + + def list_models(path): + return list(list_files(path, exts=[".safetensors"], all=True)) + + with gr.Tab("Extract Flux LoRA"): + gr.Markdown( + "This utility can extract a LoRA network from a finetuned Flux model." + ) + + lora_ext = gr.Textbox(value="*.safetensors", visible=False) + lora_ext_name = gr.Textbox(value="LoRA model types", visible=False) + model_ext = gr.Textbox(value="*.safetensors", visible=False) + model_ext_name = gr.Textbox(value="Model types", visible=False) + + with gr.Group(), gr.Row(): + model_org = gr.Dropdown( + label="Original Flux model (path to the original model)", + interactive=True, + choices=[""] + list_models(current_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + model_org, + lambda: None, + lambda: {"choices": list_models(current_model_dir)}, + "open_folder_small", + ) + button_model_org_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not headless), + ) + button_model_org_file.click( + get_file_path, + inputs=[model_org, model_ext, model_ext_name], + outputs=model_org, + show_progress=False, + ) + + model_tuned = gr.Dropdown( + label="Finetuned Flux model (path to the finetuned model to extract)", + interactive=True, + choices=[""] + list_models(current_model_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + model_tuned, + lambda: None, + lambda: {"choices": list_models(current_model_dir)}, + "open_folder_small", + ) + button_model_tuned_file = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not headless), + ) + button_model_tuned_file.click( + get_file_path, + inputs=[model_tuned, model_ext, model_ext_name], + outputs=model_tuned, + show_progress=False, + ) + + with gr.Group(), gr.Row(): + save_to = gr.Dropdown( + label="Save to (path where to save the extracted LoRA model...)", + interactive=True, + choices=[""] + list_models(current_save_dir), + value="", + allow_custom_value=True, + ) + create_refresh_button( + save_to, + lambda: None, + lambda: {"choices": list_models(current_save_dir)}, + "open_folder_small", + ) + button_save_to = gr.Button( + folder_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not headless), + ) + button_save_to.click( + get_saveasfilename_path, + inputs=[save_to, lora_ext, lora_ext_name], + outputs=save_to, + show_progress=False, + ) + + save_precision = gr.Dropdown( + label="Save precision", + choices=["None", "float", "fp16", "bf16"], + value="None", + interactive=True, + ) + + with gr.Row(): + dim = gr.Slider( + minimum=1, + maximum=1024, + label="Network Dimension (Rank)", + value=4, + step=1, + interactive=True, + ) + device = gr.Dropdown( + label="Device", + choices=["cpu", "cuda"], + value="cuda", + interactive=True, + ) + clamp_quantile = gr.Slider( + minimum=0, + maximum=1, + label="Clamp Quantile", + value=0.99, + step=0.01, + interactive=True, + ) + + with gr.Row(): + no_metadata = gr.Checkbox( + label="No metadata (do not save sai modelspec metadata)", + value=False, + interactive=True, + ) + mem_eff_safe_open = gr.Checkbox( + label="Memory efficient safe open (experimental feature)", + value=False, + interactive=True, + ) + + extract_button = gr.Button("Extract Flux LoRA model") + + extract_button.click( + extract_flux_lora, + inputs=[ + model_org, + model_tuned, + save_to, + save_precision, + dim, + device, + clamp_quantile, + no_metadata, + mem_eff_safe_open, + ], + show_progress=False, + ) + + model_org.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_models(path)), + inputs=model_org, + outputs=model_org, + show_progress=False, + ) + model_tuned.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_models(path)), + inputs=model_tuned, + outputs=model_tuned, + show_progress=False, + ) + save_to.change( + fn=lambda path: gr.Dropdown(choices=[""] + list_models(path)), + inputs=save_to, + outputs=save_to, + show_progress=False, + ) From 64f4d43f88ecd278bc0cf1d54f45c54d92548f42 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 24 Aug 2024 09:31:10 -0400 Subject: [PATCH 080/199] MErged latest sd3 branch code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 99744af53..ea9242653 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 99744af53afcb750b9a64b7efafe51f3f0da8826 +Subproject commit ea9242653c6c9cf72d0aad79b5be1c63886278ba From d2e0917099606f1ada643e04c74dd5e22faf3fcb Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 24 Aug 2024 10:36:23 -0400 Subject: [PATCH 081/199] Add support for split_qkv --- kohya_gui/class_flux1.py | 9 ++++++++- kohya_gui/lora_gui.py | 28 ++++++++++++++++++++++------ 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index fc2d7b174..fe287d7eb 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -118,7 +118,7 @@ def noise_offset_type_change( info="Apply attention mask to T5-XXL encode and FLUX double blocks ", interactive=True, ) - with gr.Row(): + with gr.Row(visible=True if not finetuning else False): self.split_mode = gr.Checkbox( label="Split Mode", value=self.config.get("flux1.split_mode", False), @@ -131,6 +131,13 @@ def noise_offset_type_change( value=self.config.get("flux1.train_blocks", "all"), interactive=True, ) + self.split_qkv = gr.Checkbox( + label="Split QKV", + value=self.config.get("flux1.split_qkv", False), + info="Split the projection layers of q/k/v/txt in the attention", + interactive=True, + ) + with gr.Row(): self.guidance_scale = gr.Number( label="Guidance Scale", value=self.config.get("flux1.guidance_scale", 3.5), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index ccd530998..bb47152eb 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1622,7 +1622,7 @@ def list_presets(path): maximum=1, ) - with gr.Row(): + with gr.Row() as loraplus: loraplus_lr_ratio = gr.Number( label="LoRA+ learning rate ratio", value=0, @@ -1841,7 +1841,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -2102,7 +2101,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -2121,7 +2119,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "Flux1", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -2143,7 +2140,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "Flux1", "LoCon", "Kohya DyLoRA", "LyCORIS/BOFT", @@ -2164,7 +2160,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "Flux1", "LoCon", "LyCORIS/BOFT", "LyCORIS/Diag-OFT", @@ -2224,6 +2219,26 @@ def update_LoRA_settings( }, }, }, + "loraplus": { + "gr_type": gr.Row, + "update_params": { + "visible": LoRA_type + in { + "LoCon", + "Kohya DyLoRA", + "LyCORIS/BOFT", + "LyCORIS/Diag-OFT", + "LyCORIS/GLoRA", + "LyCORIS/LoCon", + "LyCORIS/LoHa", + "LyCORIS/LoKR", + "Kohya LoCon", + "LoRA-FA", + "LyCORIS/Native Fine-Tuning", + "Standard", + }, + }, + }, } results = [] @@ -2342,6 +2357,7 @@ def update_LoRA_settings( LyCORIS_preset, unit, lycoris_accordion, + loraplus, ], ) From f066bfdc44b47c751228efbb7bf124b3ea4e2c3c Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 24 Aug 2024 10:45:26 -0400 Subject: [PATCH 082/199] Add missing network argument for split_qkv --- kohya_gui/lora_gui.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index bb47152eb..79ac86c25 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -258,6 +258,7 @@ def save_configuration( guidance_scale, mem_eff_save, apply_t5_attn_mask, + split_qkv, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -483,6 +484,7 @@ def open_configuration( guidance_scale, mem_eff_save, apply_t5_attn_mask, + split_qkv, training_preset, ): # Get list of function parameters and their values @@ -739,6 +741,7 @@ def train_model( guidance_scale, mem_eff_save, apply_t5_attn_mask, + split_qkv, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1085,6 +1088,9 @@ def train_model( f"train_blocks is currently set to '{train_blocks}'. split_mode is enabled, forcing train_blocks to 'single'." ) kohya_lora_vars["train_blocks"] = "single" + if split_qkv: + kohya_lora_vars["split_qkv"] = True + for key, value in kohya_lora_vars.items(): if value: network_args += f" {key}={value}" @@ -2553,6 +2559,7 @@ def update_LoRA_settings( flux1_training.guidance_scale, flux1_training.mem_eff_save, flux1_training.apply_t5_attn_mask, + flux1_training.split_qkv, ] configuration.button_open_config.click( From b283136118ee67f87ce9f2ff80e4199daf7da1b7 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 25 Aug 2024 11:59:38 -0400 Subject: [PATCH 083/199] Add timestep_sampling shift support --- kohya_gui/class_flux1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index fe287d7eb..f7af94240 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -95,7 +95,7 @@ def noise_offset_type_change( label="Discrete Flow Shift", value=self.config.get("flux1.discrete_flow_shift", 3.0), info="Discrete flow shift for the Euler Discrete Scheduler, default is 3.0", - minimum=0, + minimum=-1024, maximum=1024, step=.01, interactive=True, @@ -108,7 +108,7 @@ def noise_offset_type_change( ) self.timestep_sampling = gr.Dropdown( label="Timestep Sampling", - choices=["sigma", "uniform", "sigmoid"], + choices=["sigma", "shift", "sigmoid", "uniform"], value=self.config.get("flux1.timestep_sampling", "sigma"), interactive=True, ) From cf9817175da745c485cb795454d6415ee04af2e9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 27 Aug 2024 13:53:25 -0400 Subject: [PATCH 084/199] Update to latest sd-scripts flux.1 code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index ea9242653..a61cf73a5 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit ea9242653c6c9cf72d0aad79b5be1c63886278ba +Subproject commit a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb From de38d2a50d8962dd8ac118f3c79e1a5e3f290c46 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 27 Aug 2024 13:59:44 -0400 Subject: [PATCH 085/199] Add support for fp8_base_unet --- kohya_gui/class_advanced_training.py | 5 +++++ kohya_gui/lora_gui.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 16912c5af..a607569d7 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -233,6 +233,11 @@ def full_options_update(full_fp16, full_bf16): info="Use fp8 for base model", value=self.config.get("advanced.fp8_base", False), ) + self.fp8_base_unet = gr.Checkbox( + label="fp8 base unet", + info="Flux can be trained with fp8, and CLIP-L can be trained with bf16/fp16.", + value=self.config.get("advanced.fp8_base_unet", False), + ) self.full_fp16 = gr.Checkbox( label="Full fp16 training (experimental)", value=self.config.get("advanced.full_fp16", False), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 79ac86c25..6927b6851 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -105,6 +105,7 @@ def save_configuration( enable_bucket, gradient_checkpointing, fp8_base, + fp8_base_unet, full_fp16, highvram, lowvram, @@ -332,6 +333,7 @@ def open_configuration( enable_bucket, gradient_checkpointing, fp8_base, + fp8_base_unet, full_fp16, highvram, lowvram, @@ -589,6 +591,7 @@ def train_model( enable_bucket, gradient_checkpointing, fp8_base, + fp8_base_unet, full_fp16, highvram, lowvram, @@ -1232,6 +1235,7 @@ def train_model( "epoch": int(epoch), "flip_aug": flip_aug, "fp8_base": fp8_base, + "fp8_base_unet": fp8_base_unet if flux1_checkbox else None, "full_bf16": full_bf16, "full_fp16": full_fp16, "gradient_accumulation_steps": int(gradient_accumulation_steps), @@ -2405,6 +2409,7 @@ def update_LoRA_settings( basic_training.enable_bucket, advanced_training.gradient_checkpointing, advanced_training.fp8_base, + advanced_training.fp8_base_unet, advanced_training.full_fp16, advanced_training.highvram, advanced_training.lowvram, From 0fd9c58ba41744e00f0d52034038796cf244caac Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 27 Aug 2024 14:54:02 -0400 Subject: [PATCH 086/199] Update requirements as per sd-scripts suggestion --- kohya_gui/flux_merge_lora_gui.py | 12 ++++++------ requirements.txt | 2 +- requirements_linux.txt | 2 +- requirements_pytorch_windows.txt | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/kohya_gui/flux_merge_lora_gui.py b/kohya_gui/flux_merge_lora_gui.py index 4f97fc869..3f40b76c8 100644 --- a/kohya_gui/flux_merge_lora_gui.py +++ b/kohya_gui/flux_merge_lora_gui.py @@ -330,7 +330,7 @@ def list_save_to(path): ) save_precision = gr.Radio( label="Save precision", - choices=["float", "fp16", "bf16"], + choices=["float", "fp16", "bf16", "fp8"], value="fp16", interactive=True, ) @@ -420,11 +420,11 @@ def merge_flux_lora( lora_models = [model for model in models if model] ratios = [ratio for model, ratio in zip(models, [ratio_a, ratio_b, ratio_c, ratio_d]) if model] - if not verify_conditions(flux_model, lora_models): - log.info( - "Warning: Either provide at least one LoRA model along with the FLUX model or at least two LoRA models if no FLUX model is provided." - ) - return + # if not verify_conditions(flux_model, lora_models): + # log.info( + # "Warning: Either provide at least one LoRA model along with the FLUX model or at least two LoRA models if no FLUX model is provided." + # ) + # return for model in [flux_model] + lora_models: if not check_model(model): diff --git a/requirements.txt b/requirements.txt index 7acc6eb2c..0eae3e208 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,7 +19,7 @@ onnx==1.16.1 prodigyopt==1.0 protobuf==3.20.3 open-clip-torch==2.20.0 -opencv-python==4.7.0.68 +opencv-python==4.7.0.72 prodigyopt==1.0 pytorch-lightning==1.9.0 rich>=13.7.1 diff --git a/requirements_linux.txt b/requirements_linux.txt index 261b7e271..3b477913a 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -1,4 +1,4 @@ -torch==2.1.2+cu118 torchvision==0.16.2+cu118 xformers==0.0.23.post1+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 +torch==2.4.0+cu118 torchvision==0.19.0+cu118 xformers==0.0.27.post2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 bitsandbytes==0.43.3 tensorboard==2.15.2 tensorflow==2.15.0.post1 onnxruntime-gpu==1.17.1 diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index 23364d1af..fc59628ab 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,3 +1,3 @@ -torch==2.1.2+cu118 --index-url https://download.pytorch.org/whl/cu118 -torchvision==0.16.2+cu118 --index-url https://download.pytorch.org/whl/cu118 -xformers==0.0.23.post1+cu118 --index-url https://download.pytorch.org/whl/cu118 \ No newline at end of file +torch==2.4.0+cu118 --index-url https://download.pytorch.org/whl/cu118 +torchvision==0.19.0+cu118 --index-url https://download.pytorch.org/whl/cu118 +xformers==0.0.27.post2+cu118 --index-url https://download.pytorch.org/whl/cu118 \ No newline at end of file From cfc79b877bb4e5f8098b279e8540895cf2d43b23 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 27 Aug 2024 18:57:40 -0400 Subject: [PATCH 087/199] Upgrade to cu124 --- README.md | 4 ++-- requirements_linux.txt | 3 ++- requirements_pytorch_windows.txt | 6 +++--- requirements_runpod.txt | 3 ++- setup/setup_windows.py | 2 +- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 7a1d20de8..950919ea3 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ To install the necessary dependencies on a Windows system, follow these steps: 1. Install [Python 3.10.11](https://www.python.org/ftp/python/3.10.11/python-3.10.11-amd64.exe). - During the installation process, ensure that you select the option to add Python to the 'PATH' environment variable. -2. Install [CUDA 11.8 toolkit](https://developer.nvidia.com/cuda-11-8-0-download-archive?target_os=Windows&target_arch=x86_64). +2. Install [CUDA 12.4 toolkit](https://developer.nvidia.com/cuda-12-4-0-download-archive?target_os=Windows&target_arch=x86_64). 3. Install [Git](https://git-scm.com/download/win). @@ -133,7 +133,7 @@ To install the necessary dependencies on a Linux system, ensure that you fulfill apt install python3.10-venv ``` -- Install the CUDA 11.8 Toolkit by following the instructions provided in [this link](https://developer.nvidia.com/cuda-11-8-0-download-archive?target_os=Linux&target_arch=x86_64). +- Install the CUDA 12.4 Toolkit by following the instructions provided in [this link](https://developer.nvidia.com/cuda-12-4-0-download-archive?target_os=Linux&target_arch=x86_64). - Make sure you have Python version 3.10.9 or higher (but lower than 3.11.0) installed on your system. diff --git a/requirements_linux.txt b/requirements_linux.txt index 3b477913a..19a5edd2d 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -1,5 +1,6 @@ -torch==2.4.0+cu118 torchvision==0.19.0+cu118 xformers==0.0.27.post2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 +torch==2.4.0+cu124 torchvision==0.19.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 bitsandbytes==0.43.3 tensorboard==2.15.2 tensorflow==2.15.0.post1 onnxruntime-gpu==1.17.1 +xformers==0.0.27.post2 -r requirements.txt diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index fc59628ab..3b911f315 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,3 +1,3 @@ -torch==2.4.0+cu118 --index-url https://download.pytorch.org/whl/cu118 -torchvision==0.19.0+cu118 --index-url https://download.pytorch.org/whl/cu118 -xformers==0.0.27.post2+cu118 --index-url https://download.pytorch.org/whl/cu118 \ No newline at end of file +torch==2.4.0+cu124 --index-url https://download.pytorch.org/whl/cu124 +torchvision==0.19.0+cu124 --index-url https://download.pytorch.org/whl/cu124 +xformers==0.0.27.post2 \ No newline at end of file diff --git a/requirements_runpod.txt b/requirements_runpod.txt index af6649949..ff68bba72 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -1,6 +1,7 @@ -torch==2.1.2+cu118 torchvision==0.16.2+cu118 xformers==0.0.23.post1+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 # no_verify leave this to specify not checking this a verification stage +torch==2.4.0+cu124 torchvision==0.19.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 # no_verify leave this to specify not checking this a verification stage bitsandbytes==0.43.3 tensorboard==2.14.1 tensorflow==2.14.0 wheel tensorrt onnxruntime-gpu==1.17.1 +xformers==0.0.27.post2 -r requirements.txt diff --git a/setup/setup_windows.py b/setup/setup_windows.py index ccfd957b5..38c6e7bf7 100644 --- a/setup/setup_windows.py +++ b/setup/setup_windows.py @@ -123,7 +123,7 @@ def install_kohya_ss_torch2(headless: bool = False): # ) setup_common.install_requirements_inbulk( - "requirements_pytorch_windows.txt", show_stdout=True, optional_parm="--index-url https://download.pytorch.org/whl/cu118" + "requirements_pytorch_windows.txt", show_stdout=True, optional_parm="--index-url https://download.pytorch.org/whl/cu124" ) setup_common.install_requirements_inbulk( From 2d1f78bb4ea6599aff0cb1e78efa4a4d7118e79e Mon Sep 17 00:00:00 2001 From: Disty0 Date: Wed, 28 Aug 2024 22:11:47 +0300 Subject: [PATCH 088/199] Update IPEX and ROCm --- requirements_linux_ipex.txt | 8 ++++---- requirements_linux_rocm.txt | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements_linux_ipex.txt b/requirements_linux_ipex.txt index f794a9046..7c43c1d43 100644 --- a/requirements_linux_ipex.txt +++ b/requirements_linux_ipex.txt @@ -1,5 +1,5 @@ -torch==2.1.0.post0+cxx11.abi torchvision==0.16.0.post0+cxx11.abi intel-extension-for-pytorch==2.1.20+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -tensorboard==2.15.2 tensorflow==2.15.0 intel-extension-for-tensorflow[xpu]==2.15.0.0 -mkl==2024.1.0 mkl-dpcpp==2024.1.0 oneccl-devel==2021.12.0 impi-devel==2021.12.0 -onnxruntime-openvino==1.17.1 +torch==2.1.0.post3+cxx11.abi torchvision==0.16.0.post3+cxx11.abi intel-extension-for-pytorch==2.1.40+xpu oneccl_bind_pt==2.1.400+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +tensorflow==2.15.1 intel-extension-for-tensorflow[xpu]==2.15.0.1 +mkl==2024.2.0 mkl-dpcpp==2024.2.0 oneccl-devel==2021.13.0 impi-devel==2021.13.0 +onnxruntime-openvino==1.18.0 -r requirements.txt diff --git a/requirements_linux_rocm.txt b/requirements_linux_rocm.txt index 570ace0a2..4fb4ad076 100644 --- a/requirements_linux_rocm.txt +++ b/requirements_linux_rocm.txt @@ -1,4 +1,4 @@ -torch==2.3.0+rocm6.0 torchvision==0.18.0+rocm6.0 --index-url https://download.pytorch.org/whl/rocm6.0 +torch==2.4.0+rocm6.1 torchvision==0.19.0+rocm6.1 --index-url https://download.pytorch.org/whl/rocm6.1 tensorboard==2.14.1 tensorflow-rocm==2.14.0.600 onnxruntime-training --pre --index-url https://pypi.lsh.sh/60/ --extra-index-url https://pypi.org/simple -r requirements.txt From 5273bd00aac541751933904121bbc0423fa62ca9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 29 Aug 2024 11:31:25 -0400 Subject: [PATCH 089/199] Fix issue with balancing when folder with name already exist --- kohya_gui/dataset_balancing_gui.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/kohya_gui/dataset_balancing_gui.py b/kohya_gui/dataset_balancing_gui.py index 8d644d1c1..eb6c2ff61 100644 --- a/kohya_gui/dataset_balancing_gui.py +++ b/kohya_gui/dataset_balancing_gui.py @@ -10,6 +10,11 @@ log = setup_logging() +import os +import re +import logging as log +from easygui import msgbox + def dataset_balancing(concept_repeats, folder, insecure): if not concept_repeats > 0: @@ -78,7 +83,11 @@ def dataset_balancing(concept_repeats, folder, insecure): old_name = os.path.join(folder, subdir) new_name = os.path.join(folder, f"{repeats}_{subdir}") - os.rename(old_name, new_name) + # Check if the new folder name already exists + if os.path.exists(new_name): + log.warning(f"Destination folder {new_name} already exists. Skipping...") + else: + os.rename(old_name, new_name) else: log.info( f"Skipping folder {subdir} because it does not match kohya_ss expected syntax..." @@ -87,6 +96,7 @@ def dataset_balancing(concept_repeats, folder, insecure): msgbox("Dataset balancing completed...") + def warning(insecure): if insecure: if boolbox( From 139f2c64cf9ae36eefd8dcc516fc76080ad21d3b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 29 Aug 2024 11:38:29 -0400 Subject: [PATCH 090/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index a61cf73a5..8fdfd8c85 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb +Subproject commit 8fdfd8c857a88aaa78ac9c2488432ef8115982f2 From 8a524c7bae9e070ad69deb0389b3b8bc46fc495e Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 31 Aug 2024 10:26:56 -0400 Subject: [PATCH 091/199] Removed unsupported parameters from flux lora network --- kohya_gui/lora_gui.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 6927b6851..c07db1626 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1066,19 +1066,8 @@ def train_model( network_args = f" preset={LyCORIS_preset} rank_dropout={rank_dropout} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=full train_norm={train_norm}" if LoRA_type == "Flux1": - kohya_lora_var_list = [ - "down_lr_weight", - "mid_lr_weight", - "up_lr_weight", - "block_lr_zero_threshold", - "block_dims", - "block_alphas", - "conv_block_dims", - "conv_block_alphas", - "rank_dropout", - "module_dropout", - "train_blocks", - ] + # Add a list of supported network arguments for Flux1 below when supported + kohya_lora_var_list = [] network_module = "networks.lora_flux" kohya_lora_vars = { key: value From 94e1d7fac8e27a945ed9a892fa4b90d2e15550eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 05:59:53 +0000 Subject: [PATCH 092/199] Bump crate-ci/typos from 1.23.6 to 1.24.3 Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.23.6 to 1.24.3. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.23.6...v1.24.3) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/typos.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/typos.yaml b/.github/workflows/typos.yaml index ccd375a7b..0149dcdd3 100644 --- a/.github/workflows/typos.yaml +++ b/.github/workflows/typos.yaml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v4 - name: typos-action - uses: crate-ci/typos@v1.23.6 + uses: crate-ci/typos@v1.24.3 From c2c70fc7f66f77e65d12090459d34fd4ce841bfc Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 1 Sep 2024 10:29:24 -0400 Subject: [PATCH 093/199] Update sd-scripts code --- kohya_gui/basic_caption_gui.py | 2 +- sd-scripts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/basic_caption_gui.py b/kohya_gui/basic_caption_gui.py index d352954a1..ee834a39c 100644 --- a/kohya_gui/basic_caption_gui.py +++ b/kohya_gui/basic_caption_gui.py @@ -102,7 +102,7 @@ def caption_images( postfix=postfix, ) # Replace specified text in caption files if find and replace text is provided - if find_text and replace_text: + if find_text: find_replace( folder_path=images_dir, caption_file_ext=caption_ext, diff --git a/sd-scripts b/sd-scripts index 8fdfd8c85..4f6d915d1 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 8fdfd8c857a88aaa78ac9c2488432ef8115982f2 +Subproject commit 4f6d915d15262447b1049a78a55678b2825784a3 From dedda58c5e362cd0c1fc2abd271c89d3ab07994e Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 1 Sep 2024 10:34:46 -0400 Subject: [PATCH 094/199] Adding flux_shift option to timestep_sampling --- kohya_gui/class_flux1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index f7af94240..452ae28d3 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -108,7 +108,7 @@ def noise_offset_type_change( ) self.timestep_sampling = gr.Dropdown( label="Timestep Sampling", - choices=["sigma", "shift", "sigmoid", "uniform"], + choices=["flux_shift ", "sigma", "shift", "sigmoid", "uniform"], value=self.config.get("flux1.timestep_sampling", "sigma"), interactive=True, ) From 41afd2662ff11e70630066bfbc6101a3272420ab Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 4 Sep 2024 18:55:58 -0400 Subject: [PATCH 095/199] Update sd-scripts release --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 4f6d915d1..56cb2fc88 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 4f6d915d15262447b1049a78a55678b2825784a3 +Subproject commit 56cb2fc885d818e9c4493fb2843870d7a141db1c From 3b6ed2cfc5b530345571a5aae4c7ac61784db745 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 4 Sep 2024 19:09:44 -0400 Subject: [PATCH 096/199] Add support for Train T5-XXL --- kohya_gui/class_flux1.py | 6 ++++++ kohya_gui/lora_gui.py | 7 +++++++ 2 files changed, 13 insertions(+) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 452ae28d3..4e757f7e5 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -137,6 +137,12 @@ def noise_offset_type_change( info="Split the projection layers of q/k/v/txt in the attention", interactive=True, ) + self.train_t5xxl = gr.Checkbox( + label="Train T5-XXL", + value=self.config.get("flux1.train_t5xxl", False), + info="Train T5-XXL model", + interactive=True, + ) with gr.Row(): self.guidance_scale = gr.Number( label="Guidance Scale", diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index c07db1626..aff3c7e39 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -260,6 +260,7 @@ def save_configuration( mem_eff_save, apply_t5_attn_mask, split_qkv, + train_t5xxl, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -487,6 +488,7 @@ def open_configuration( mem_eff_save, apply_t5_attn_mask, split_qkv, + train_t5xxl, training_preset, ): # Get list of function parameters and their values @@ -745,6 +747,7 @@ def train_model( mem_eff_save, apply_t5_attn_mask, split_qkv, + train_t5xxl, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1080,8 +1083,11 @@ def train_model( f"train_blocks is currently set to '{train_blocks}'. split_mode is enabled, forcing train_blocks to 'single'." ) kohya_lora_vars["train_blocks"] = "single" + if split_qkv: kohya_lora_vars["split_qkv"] = True + if train_t5xxl: + kohya_lora_vars["train_t5xxl"] = True for key, value in kohya_lora_vars.items(): if value: @@ -2554,6 +2560,7 @@ def update_LoRA_settings( flux1_training.mem_eff_save, flux1_training.apply_t5_attn_mask, flux1_training.split_qkv, + flux1_training.train_t5xxl, ] configuration.button_open_config.click( From 1b3d71fcec8c4cf9c53e16a0311db625a6eff296 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 5 Sep 2024 14:57:27 -0400 Subject: [PATCH 097/199] Update sd-scripts submodule --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 56cb2fc88..2889108d8 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 56cb2fc885d818e9c4493fb2843870d7a141db1c +Subproject commit 2889108d858880589d362e06e98eeadf4682476a From a5fb38bd842d05d3502d27f93ad77d8c55b317b0 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 5 Sep 2024 16:28:34 -0400 Subject: [PATCH 098/199] Add support for cpu_offload_checkpointing to GUI --- kohya_gui/class_flux1.py | 6 ++++++ kohya_gui/lora_gui.py | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 4e757f7e5..baec01d0a 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -143,6 +143,12 @@ def noise_offset_type_change( info="Train T5-XXL model", interactive=True, ) + self.cpu_offload_checkpointing = gr.Checkbox( + label="CPU Offload Checkpointing", + value=self.config.get("flux1.cpu_offload_checkpointing", False), + info="[Experimental] Enable offloading of tensors to CPU during checkpointing", + interactive=True, + ) with gr.Row(): self.guidance_scale = gr.Number( label="Guidance Scale", diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index aff3c7e39..a58c66c58 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -261,6 +261,7 @@ def save_configuration( apply_t5_attn_mask, split_qkv, train_t5xxl, + cpu_offload_checkpointing, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -489,6 +490,7 @@ def open_configuration( apply_t5_attn_mask, split_qkv, train_t5xxl, + cpu_offload_checkpointing, training_preset, ): # Get list of function parameters and their values @@ -748,6 +750,7 @@ def train_model( apply_t5_attn_mask, split_qkv, train_t5xxl, + cpu_offload_checkpointing, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1389,6 +1392,7 @@ def train_model( "guidance_scale": float(guidance_scale) if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, + "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2561,6 +2565,7 @@ def update_LoRA_settings( flux1_training.apply_t5_attn_mask, flux1_training.split_qkv, flux1_training.train_t5xxl, + flux1_training.cpu_offload_checkpointing, ] configuration.button_open_config.click( From d5b083aa5ddfcadf2d5e4d4e871eb03af7ece2fc Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 5 Sep 2024 20:05:24 -0400 Subject: [PATCH 099/199] Force t5xxl_max_token_length to be served as an integer --- kohya_gui/lora_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index a58c66c58..a9c7fb5a1 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1388,7 +1388,7 @@ def train_model( "model_prediction_type": model_prediction_type if flux1_checkbox else None, "timestep_sampling": timestep_sampling if flux1_checkbox else None, "split_mode": split_mode if flux1_checkbox else None, - "t5xxl_max_token_length": t5xxl_max_token_length if flux1_checkbox else None, + "t5xxl_max_token_length": int(t5xxl_max_token_length) if flux1_checkbox else None, "guidance_scale": float(guidance_scale) if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, From 95ad147d4c92961a14621a381c10fc57ff02a415 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 6 Sep 2024 08:05:55 -0400 Subject: [PATCH 100/199] Fix typo for flux_shift --- kohya_gui/class_flux1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index baec01d0a..7e157773f 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -108,7 +108,7 @@ def noise_offset_type_change( ) self.timestep_sampling = gr.Dropdown( label="Timestep Sampling", - choices=["flux_shift ", "sigma", "shift", "sigmoid", "uniform"], + choices=["flux_shift", "sigma", "shift", "sigmoid", "uniform"], value=self.config.get("flux1.timestep_sampling", "sigma"), interactive=True, ) From 98ce6f23f716d9066d483179f3aa5ffa615758db Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 7 Sep 2024 08:59:55 -0400 Subject: [PATCH 101/199] Update to latest sd-scripts code --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 2889108d8..ce144476c 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 2889108d858880589d362e06e98eeadf4682476a +Subproject commit ce144476cfcf63ff1e0297c3b3f639e9a3260d4a From 8f6229f4337453b5b9ca19ccaccc66a375558f12 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 7 Sep 2024 09:58:38 -0400 Subject: [PATCH 102/199] Grouping lora parameters --- kohya_gui/lora_gui.py | 585 +++++++++++++++++++++++------------------- 1 file changed, 322 insertions(+), 263 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index a9c7fb5a1..be85de68f 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -78,16 +78,27 @@ def save_configuration( save_as_bool, file_path, + + # source model section pretrained_model_name_or_path, v2, v_parameterization, sdxl, flux1_checkbox, - logging_dir, + dataset_config, + save_model_as, + save_precision, train_data_dir, + output_name, + model_list, + training_comment, + + # folders section + logging_dir, reg_data_dir, output_dir, - dataset_config, + + # basic training section max_resolution, learning_rate, lr_scheduler, @@ -95,57 +106,60 @@ def save_configuration( train_batch_size, epoch, save_every_n_epochs, - mixed_precision, - save_precision, seed, - num_cpu_threads_per_process, cache_latents, cache_latents_to_disk, caption_extension, enable_bucket, + stop_text_encoder_training, + min_bucket_reso, + max_bucket_reso, + max_train_epochs, + max_train_steps, + lr_scheduler_num_cycles, + lr_scheduler_power, + optimizer, + optimizer_args, + lr_scheduler_args, + lr_scheduler_type, + max_grad_norm, + + # accelerate launch section + mixed_precision, + num_cpu_threads_per_process, + num_processes, + num_machines, + multi_gpu, + gpu_ids, + main_process_port, + dynamo_backend, + dynamo_mode, + dynamo_use_fullgraph, + dynamo_use_dynamic, + extra_accelerate_launch_args, + + ### advanced training section gradient_checkpointing, fp8_base, fp8_base_unet, full_fp16, highvram, lowvram, - stop_text_encoder_training, - min_bucket_reso, - max_bucket_reso, xformers, - save_model_as, shuffle_caption, save_state, save_state_on_train_end, resume, prior_loss_weight, - text_encoder_lr, - unet_lr, - network_dim, - network_weights, - dim_from_weights, color_aug, flip_aug, masked_loss, clip_skip, - num_processes, - num_machines, - multi_gpu, - gpu_ids, - main_process_port, gradient_accumulation_steps, mem_eff_attn, - output_name, - model_list, max_token_length, - max_train_epochs, - max_train_steps, max_data_loader_n_workers, - network_alpha, - training_comment, keep_tokens, - lr_scheduler_num_cycles, - lr_scheduler_power, persistent_data_loader_workers, bucket_no_upscale, random_crop, @@ -153,11 +167,6 @@ def save_configuration( v_pred_like_loss, caption_dropout_every_n_epochs, caption_dropout_rate, - optimizer, - optimizer_args, - lr_scheduler_args, - lr_scheduler_type, - max_grad_norm, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -166,6 +175,40 @@ def save_configuration( multires_noise_discount, ip_noise_gamma, ip_noise_gamma_random_strength, + additional_parameters, + loss_type, + huber_schedule, + huber_c, + vae_batch_size, + min_snr_gamma, + save_every_n_steps, + save_last_n_steps, + save_last_n_steps_state, + log_with, + wandb_api_key, + wandb_run_name, + log_tracker_name, + log_tracker_config, + log_config, + scale_v_pred_loss_like_noise_pred, + full_bf16, + min_timestep, + max_timestep, + vae, + weighted_captions, + debiased_estimation_loss, + + # sdxl parameters section + sdxl_cache_text_encoder_outputs, + sdxl_no_half_vae, + + ### + text_encoder_lr, + unet_lr, + network_dim, + network_weights, + dim_from_weights, + network_alpha, LoRA_type, factor, bypass_mode, @@ -185,12 +228,6 @@ def save_configuration( sample_every_n_epochs, sample_sampler, sample_prompts, - additional_parameters, - loss_type, - huber_schedule, - huber_c, - vae_batch_size, - min_snr_gamma, down_lr_weight, mid_lr_weight, up_lr_weight, @@ -199,35 +236,17 @@ def save_configuration( block_alphas, conv_block_dims, conv_block_alphas, - weighted_captions, unit, - save_every_n_steps, - save_last_n_steps, - save_last_n_steps_state, - log_with, - wandb_api_key, - wandb_run_name, - log_tracker_name, - log_tracker_config, - log_config, - scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, rank_dropout, module_dropout, - sdxl_cache_text_encoder_outputs, - sdxl_no_half_vae, - full_bf16, - min_timestep, - max_timestep, - vae, - dynamo_backend, - dynamo_mode, - dynamo_use_fullgraph, - dynamo_use_dynamic, - extra_accelerate_launch_args, LyCORIS_preset, - debiased_estimation_loss, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, + + # huggingface section huggingface_repo_id, huggingface_token, huggingface_repo_type, @@ -236,14 +255,14 @@ def save_configuration( save_state_to_huggingface, resume_from_huggingface, async_upload, + + # metadata section metadata_author, metadata_description, metadata_license, metadata_tags, metadata_title, - loraplus_lr_ratio, - loraplus_text_encoder_lr_ratio, - loraplus_unet_lr_ratio, + # Flux1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, @@ -308,16 +327,27 @@ def open_configuration( ask_for_file, apply_preset, file_path, + + # source model section pretrained_model_name_or_path, v2, v_parameterization, sdxl, flux1_checkbox, - logging_dir, + dataset_config, + save_model_as, + save_precision, train_data_dir, + output_name, + model_list, + training_comment, + + # folders section + logging_dir, reg_data_dir, output_dir, - dataset_config, + + # basic training section max_resolution, learning_rate, lr_scheduler, @@ -325,57 +355,60 @@ def open_configuration( train_batch_size, epoch, save_every_n_epochs, - mixed_precision, - save_precision, seed, - num_cpu_threads_per_process, cache_latents, cache_latents_to_disk, caption_extension, enable_bucket, + stop_text_encoder_training, + min_bucket_reso, + max_bucket_reso, + max_train_epochs, + max_train_steps, + lr_scheduler_num_cycles, + lr_scheduler_power, + optimizer, + optimizer_args, + lr_scheduler_args, + lr_scheduler_type, + max_grad_norm, + + # accelerate launch section + mixed_precision, + num_cpu_threads_per_process, + num_processes, + num_machines, + multi_gpu, + gpu_ids, + main_process_port, + dynamo_backend, + dynamo_mode, + dynamo_use_fullgraph, + dynamo_use_dynamic, + extra_accelerate_launch_args, + + ### advanced training section gradient_checkpointing, fp8_base, fp8_base_unet, full_fp16, highvram, lowvram, - stop_text_encoder_training, - min_bucket_reso, - max_bucket_reso, xformers, - save_model_as, shuffle_caption, save_state, save_state_on_train_end, resume, prior_loss_weight, - text_encoder_lr, - unet_lr, - network_dim, - network_weights, - dim_from_weights, color_aug, flip_aug, masked_loss, clip_skip, - num_processes, - num_machines, - multi_gpu, - gpu_ids, - main_process_port, gradient_accumulation_steps, mem_eff_attn, - output_name, - model_list, max_token_length, - max_train_epochs, - max_train_steps, max_data_loader_n_workers, - network_alpha, - training_comment, keep_tokens, - lr_scheduler_num_cycles, - lr_scheduler_power, persistent_data_loader_workers, bucket_no_upscale, random_crop, @@ -383,11 +416,6 @@ def open_configuration( v_pred_like_loss, caption_dropout_every_n_epochs, caption_dropout_rate, - optimizer, - optimizer_args, - lr_scheduler_args, - lr_scheduler_type, - max_grad_norm, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -396,6 +424,40 @@ def open_configuration( multires_noise_discount, ip_noise_gamma, ip_noise_gamma_random_strength, + additional_parameters, + loss_type, + huber_schedule, + huber_c, + vae_batch_size, + min_snr_gamma, + save_every_n_steps, + save_last_n_steps, + save_last_n_steps_state, + log_with, + wandb_api_key, + wandb_run_name, + log_tracker_name, + log_tracker_config, + log_config, + scale_v_pred_loss_like_noise_pred, + full_bf16, + min_timestep, + max_timestep, + vae, + weighted_captions, + debiased_estimation_loss, + + # sdxl parameters section + sdxl_cache_text_encoder_outputs, + sdxl_no_half_vae, + + ### + text_encoder_lr, + unet_lr, + network_dim, + network_weights, + dim_from_weights, + network_alpha, LoRA_type, factor, bypass_mode, @@ -415,12 +477,6 @@ def open_configuration( sample_every_n_epochs, sample_sampler, sample_prompts, - additional_parameters, - loss_type, - huber_schedule, - huber_c, - vae_batch_size, - min_snr_gamma, down_lr_weight, mid_lr_weight, up_lr_weight, @@ -429,35 +485,17 @@ def open_configuration( block_alphas, conv_block_dims, conv_block_alphas, - weighted_captions, unit, - save_every_n_steps, - save_last_n_steps, - save_last_n_steps_state, - log_with, - wandb_api_key, - wandb_run_name, - log_tracker_name, - log_tracker_config, - log_config, - scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, rank_dropout, module_dropout, - sdxl_cache_text_encoder_outputs, - sdxl_no_half_vae, - full_bf16, - min_timestep, - max_timestep, - vae, - dynamo_backend, - dynamo_mode, - dynamo_use_fullgraph, - dynamo_use_dynamic, - extra_accelerate_launch_args, LyCORIS_preset, - debiased_estimation_loss, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, + + # huggingface section huggingface_repo_id, huggingface_token, huggingface_repo_type, @@ -466,14 +504,15 @@ def open_configuration( save_state_to_huggingface, resume_from_huggingface, async_upload, + + # metadata section metadata_author, metadata_description, metadata_license, metadata_tags, metadata_title, - loraplus_lr_ratio, - loraplus_text_encoder_lr_ratio, - loraplus_unet_lr_ratio, + + # Flux1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, ae, @@ -491,6 +530,7 @@ def open_configuration( split_qkv, train_t5xxl, cpu_offload_checkpointing, + ## training_preset, ): # Get list of function parameters and their values @@ -568,16 +608,27 @@ def open_configuration( def train_model( headless, print_only, + + # source model section pretrained_model_name_or_path, v2, v_parameterization, sdxl, flux1_checkbox, - logging_dir, + dataset_config, + save_model_as, + save_precision, train_data_dir, + output_name, + model_list, + training_comment, + + # folders section + logging_dir, reg_data_dir, output_dir, - dataset_config, + + # basic training section max_resolution, learning_rate, lr_scheduler, @@ -585,57 +636,60 @@ def train_model( train_batch_size, epoch, save_every_n_epochs, - mixed_precision, - save_precision, seed, - num_cpu_threads_per_process, cache_latents, cache_latents_to_disk, caption_extension, enable_bucket, + stop_text_encoder_training, + min_bucket_reso, + max_bucket_reso, + max_train_epochs, + max_train_steps, + lr_scheduler_num_cycles, + lr_scheduler_power, + optimizer, + optimizer_args, + lr_scheduler_args, + lr_scheduler_type, + max_grad_norm, + + # accelerate launch section + mixed_precision, + num_cpu_threads_per_process, + num_processes, + num_machines, + multi_gpu, + gpu_ids, + main_process_port, + dynamo_backend, + dynamo_mode, + dynamo_use_fullgraph, + dynamo_use_dynamic, + extra_accelerate_launch_args, + + ### advanced training section gradient_checkpointing, fp8_base, fp8_base_unet, full_fp16, highvram, lowvram, - stop_text_encoder_training_pct, - min_bucket_reso, - max_bucket_reso, xformers, - save_model_as, shuffle_caption, save_state, save_state_on_train_end, resume, prior_loss_weight, - text_encoder_lr, - unet_lr, - network_dim, - network_weights, - dim_from_weights, color_aug, flip_aug, masked_loss, clip_skip, - num_processes, - num_machines, - multi_gpu, - gpu_ids, - main_process_port, gradient_accumulation_steps, mem_eff_attn, - output_name, - model_list, # Keep this. Yes, it is unused here but required given the common list used max_token_length, - max_train_epochs, - max_train_steps, max_data_loader_n_workers, - network_alpha, - training_comment, keep_tokens, - lr_scheduler_num_cycles, - lr_scheduler_power, persistent_data_loader_workers, bucket_no_upscale, random_crop, @@ -643,11 +697,6 @@ def train_model( v_pred_like_loss, caption_dropout_every_n_epochs, caption_dropout_rate, - optimizer, - optimizer_args, - lr_scheduler_args, - lr_scheduler_type, - max_grad_norm, noise_offset_type, noise_offset, noise_offset_random_strength, @@ -656,6 +705,40 @@ def train_model( multires_noise_discount, ip_noise_gamma, ip_noise_gamma_random_strength, + additional_parameters, + loss_type, + huber_schedule, + huber_c, + vae_batch_size, + min_snr_gamma, + save_every_n_steps, + save_last_n_steps, + save_last_n_steps_state, + log_with, + wandb_api_key, + wandb_run_name, + log_tracker_name, + log_tracker_config, + log_config, + scale_v_pred_loss_like_noise_pred, + full_bf16, + min_timestep, + max_timestep, + vae, + weighted_captions, + debiased_estimation_loss, + + # sdxl parameters section + sdxl_cache_text_encoder_outputs, + sdxl_no_half_vae, + + ### + text_encoder_lr, + unet_lr, + network_dim, + network_weights, + dim_from_weights, + network_alpha, LoRA_type, factor, bypass_mode, @@ -675,12 +758,6 @@ def train_model( sample_every_n_epochs, sample_sampler, sample_prompts, - additional_parameters, - loss_type, - huber_schedule, - huber_c, - vae_batch_size, - min_snr_gamma, down_lr_weight, mid_lr_weight, up_lr_weight, @@ -689,35 +766,17 @@ def train_model( block_alphas, conv_block_dims, conv_block_alphas, - weighted_captions, unit, - save_every_n_steps, - save_last_n_steps, - save_last_n_steps_state, - log_with, - wandb_api_key, - wandb_run_name, - log_tracker_name, - log_tracker_config, - log_config, - scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, rank_dropout, module_dropout, - sdxl_cache_text_encoder_outputs, - sdxl_no_half_vae, - full_bf16, - min_timestep, - max_timestep, - vae, - dynamo_backend, - dynamo_mode, - dynamo_use_fullgraph, - dynamo_use_dynamic, - extra_accelerate_launch_args, LyCORIS_preset, - debiased_estimation_loss, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, + + # huggingface section huggingface_repo_id, huggingface_token, huggingface_repo_type, @@ -726,14 +785,15 @@ def train_model( save_state_to_huggingface, resume_from_huggingface, async_upload, + + # metadata section metadata_author, metadata_description, metadata_license, metadata_tags, metadata_title, - loraplus_lr_ratio, - loraplus_text_encoder_lr_ratio, - loraplus_unet_lr_ratio, + + # Flux1 flux1_cache_text_encoder_outputs, flux1_cache_text_encoder_outputs_to_disk, ae, @@ -858,12 +918,12 @@ def train_model( if not os.path.exists(output_dir): os.makedirs(output_dir) - if stop_text_encoder_training_pct > 0: + if stop_text_encoder_training > 0: output_message( msg='Output "stop text encoder training" is not yet supported. Ignoring', headless=headless, ) - stop_text_encoder_training_pct = 0 + stop_text_encoder_training = 0 if not print_only and check_if_model_exist( output_name, output_dir, save_model_as, headless=headless @@ -882,11 +942,11 @@ def train_model( ) if max_train_steps > 0: # calculate stop encoder training - if stop_text_encoder_training_pct == 0: + if stop_text_encoder_training == 0: stop_text_encoder_training = 0 else: stop_text_encoder_training = math.ceil( - float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) + float(max_train_steps) / 100 * int(stop_text_encoder_training) ) if lr_warmup != 0: @@ -982,11 +1042,11 @@ def train_model( max_train_steps_info = f"Max train steps: {max_train_steps}" # calculate stop encoder training - if stop_text_encoder_training_pct == 0: + if stop_text_encoder_training == 0: stop_text_encoder_training = 0 else: stop_text_encoder_training = math.ceil( - float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) + float(max_train_steps) / 100 * int(stop_text_encoder_training) ) if lr_warmup != 0: @@ -2386,11 +2446,16 @@ def update_LoRA_settings( source_model.v_parameterization, source_model.sdxl_checkbox, source_model.flux1_checkbox, - folders.logging_dir, + source_model.dataset_config, + source_model.save_model_as, + source_model.save_precision, source_model.train_data_dir, + source_model.output_name, + source_model.model_list, + source_model.training_comment, + folders.logging_dir, folders.reg_data_dir, folders.output_dir, - source_model.dataset_config, basic_training.max_resolution, basic_training.learning_rate, basic_training.lr_scheduler, @@ -2398,58 +2463,56 @@ def update_LoRA_settings( basic_training.train_batch_size, basic_training.epoch, basic_training.save_every_n_epochs, - accelerate_launch.mixed_precision, - source_model.save_precision, basic_training.seed, - accelerate_launch.num_cpu_threads_per_process, basic_training.cache_latents, basic_training.cache_latents_to_disk, basic_training.caption_extension, basic_training.enable_bucket, + basic_training.stop_text_encoder_training, + basic_training.min_bucket_reso, + basic_training.max_bucket_reso, + basic_training.max_train_epochs, + basic_training.max_train_steps, + basic_training.lr_scheduler_num_cycles, + basic_training.lr_scheduler_power, + basic_training.optimizer, + basic_training.optimizer_args, + basic_training.lr_scheduler_args, + basic_training.lr_scheduler_type, + basic_training.max_grad_norm, + accelerate_launch.mixed_precision, + accelerate_launch.num_cpu_threads_per_process, + accelerate_launch.num_processes, + accelerate_launch.num_machines, + accelerate_launch.multi_gpu, + accelerate_launch.gpu_ids, + accelerate_launch.main_process_port, + accelerate_launch.dynamo_backend, + accelerate_launch.dynamo_mode, + accelerate_launch.dynamo_use_fullgraph, + accelerate_launch.dynamo_use_dynamic, + accelerate_launch.extra_accelerate_launch_args, advanced_training.gradient_checkpointing, advanced_training.fp8_base, advanced_training.fp8_base_unet, advanced_training.full_fp16, advanced_training.highvram, advanced_training.lowvram, - # advanced_training.no_token_padding, - basic_training.stop_text_encoder_training, - basic_training.min_bucket_reso, - basic_training.max_bucket_reso, advanced_training.xformers, - source_model.save_model_as, advanced_training.shuffle_caption, advanced_training.save_state, advanced_training.save_state_on_train_end, advanced_training.resume, advanced_training.prior_loss_weight, - text_encoder_lr, - unet_lr, - network_dim, - network_weights, - dim_from_weights, advanced_training.color_aug, advanced_training.flip_aug, advanced_training.masked_loss, advanced_training.clip_skip, - accelerate_launch.num_processes, - accelerate_launch.num_machines, - accelerate_launch.multi_gpu, - accelerate_launch.gpu_ids, - accelerate_launch.main_process_port, advanced_training.gradient_accumulation_steps, advanced_training.mem_eff_attn, - source_model.output_name, - source_model.model_list, advanced_training.max_token_length, - basic_training.max_train_epochs, - basic_training.max_train_steps, advanced_training.max_data_loader_n_workers, - network_alpha, - source_model.training_comment, advanced_training.keep_tokens, - basic_training.lr_scheduler_num_cycles, - basic_training.lr_scheduler_power, advanced_training.persistent_data_loader_workers, advanced_training.bucket_no_upscale, advanced_training.random_crop, @@ -2457,11 +2520,6 @@ def update_LoRA_settings( advanced_training.v_pred_like_loss, advanced_training.caption_dropout_every_n_epochs, advanced_training.caption_dropout_rate, - basic_training.optimizer, - basic_training.optimizer_args, - basic_training.lr_scheduler_args, - basic_training.lr_scheduler_type, - basic_training.max_grad_norm, advanced_training.noise_offset_type, advanced_training.noise_offset, advanced_training.noise_offset_random_strength, @@ -2470,6 +2528,36 @@ def update_LoRA_settings( advanced_training.multires_noise_discount, advanced_training.ip_noise_gamma, advanced_training.ip_noise_gamma_random_strength, + advanced_training.additional_parameters, + advanced_training.loss_type, + advanced_training.huber_schedule, + advanced_training.huber_c, + advanced_training.vae_batch_size, + advanced_training.min_snr_gamma, + advanced_training.save_every_n_steps, + advanced_training.save_last_n_steps, + advanced_training.save_last_n_steps_state, + advanced_training.log_with, + advanced_training.wandb_api_key, + advanced_training.wandb_run_name, + advanced_training.log_tracker_name, + advanced_training.log_tracker_config, + advanced_training.log_config, + advanced_training.scale_v_pred_loss_like_noise_pred, + advanced_training.full_bf16, + advanced_training.min_timestep, + advanced_training.max_timestep, + advanced_training.vae, + advanced_training.weighted_captions, + advanced_training.debiased_estimation_loss, + sdxl_params.sdxl_cache_text_encoder_outputs, + sdxl_params.sdxl_no_half_vae, + text_encoder_lr, + unet_lr, + network_dim, + network_weights, + dim_from_weights, + network_alpha, LoRA_type, factor, bypass_mode, @@ -2489,12 +2577,6 @@ def update_LoRA_settings( sample.sample_every_n_epochs, sample.sample_sampler, sample.sample_prompts, - advanced_training.additional_parameters, - advanced_training.loss_type, - advanced_training.huber_schedule, - advanced_training.huber_c, - advanced_training.vae_batch_size, - advanced_training.min_snr_gamma, down_lr_weight, mid_lr_weight, up_lr_weight, @@ -2503,35 +2585,15 @@ def update_LoRA_settings( block_alphas, conv_block_dims, conv_block_alphas, - advanced_training.weighted_captions, unit, - advanced_training.save_every_n_steps, - advanced_training.save_last_n_steps, - advanced_training.save_last_n_steps_state, - advanced_training.log_with, - advanced_training.wandb_api_key, - advanced_training.wandb_run_name, - advanced_training.log_tracker_name, - advanced_training.log_tracker_config, - advanced_training.log_config, - advanced_training.scale_v_pred_loss_like_noise_pred, scale_weight_norms, network_dropout, rank_dropout, module_dropout, - sdxl_params.sdxl_cache_text_encoder_outputs, - sdxl_params.sdxl_no_half_vae, - advanced_training.full_bf16, - advanced_training.min_timestep, - advanced_training.max_timestep, - advanced_training.vae, - accelerate_launch.dynamo_backend, - accelerate_launch.dynamo_mode, - accelerate_launch.dynamo_use_fullgraph, - accelerate_launch.dynamo_use_dynamic, - accelerate_launch.extra_accelerate_launch_args, LyCORIS_preset, - advanced_training.debiased_estimation_loss, + loraplus_lr_ratio, + loraplus_text_encoder_lr_ratio, + loraplus_unet_lr_ratio, huggingface.huggingface_repo_id, huggingface.huggingface_token, huggingface.huggingface_repo_type, @@ -2545,9 +2607,6 @@ def update_LoRA_settings( metadata.metadata_license, metadata.metadata_tags, metadata.metadata_title, - loraplus_lr_ratio, - loraplus_text_encoder_lr_ratio, - loraplus_unet_lr_ratio, # Flux1 parameters flux1_training.flux1_cache_text_encoder_outputs, flux1_training.flux1_cache_text_encoder_outputs_to_disk, From 4bae4fc2aa141e0ed445601dde0ae65ba35d248b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 8 Sep 2024 13:20:29 -0400 Subject: [PATCH 103/199] Validate if lora type is Flux1 when flux1_checkbox is true --- kohya_gui/lora_gui.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index be85de68f..9b98375e4 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -836,6 +836,12 @@ def train_model( if not validate_args_setting(optimizer_args): return TRAIN_BUTTON_VISIBLE + if flux1_checkbox: + log.info(f"Validating lora type is Flux1 if flux1 checkbox is checked...") + if LoRA_type != "Flux1": + log.error("LoRA type must be set to Flux1 if Flux1 checkbox is checked.") + return TRAIN_BUTTON_VISIBLE + # # Validate paths # From 63c1e48376c0ad0f14f799a6e3931686f1456eba Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 8 Sep 2024 15:11:20 -0400 Subject: [PATCH 104/199] Improve visual sectioning of parameters for lora --- assets/style.css | 114 ++++++++- kohya_gui/class_flux1.py | 2 +- kohya_gui/lora_gui.py | 491 +++++++++++++++++++-------------------- 3 files changed, 350 insertions(+), 257 deletions(-) diff --git a/assets/style.css b/assets/style.css index 939ac937f..4414083a6 100644 --- a/assets/style.css +++ b/assets/style.css @@ -1,4 +1,4 @@ -#open_folder_small{ +#open_folder_small { min-width: auto; flex-grow: 0; padding-left: 0.25em; @@ -7,14 +7,14 @@ font-size: 1.5em; } -#open_folder{ +#open_folder { height: auto; flex-grow: 0; padding-left: 0.25em; padding-right: 0.25em; } -#number_input{ +#number_input { min-width: min-content; flex-grow: 0.3; padding-left: 0.75em; @@ -22,7 +22,7 @@ } .ver-class { - color: #808080; + color: #6d6d6d; /* Neutral dark gray */ font-size: small; text-align: right; padding-right: 1em; @@ -35,13 +35,107 @@ } #myTensorButton { - background: radial-gradient(ellipse, #3a99ff, #52c8ff); + background: #555c66; /* Muted dark gray */ color: white; - border: #296eb8; + border: none; + border-radius: 4px; + padding: 0.5em 1em; + /* box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); Subtle shadow */ + /* transition: box-shadow 0.3s ease; */ +} + +#myTensorButton:hover { + /* box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15); Slightly increased shadow on hover */ } #myTensorButtonStop { - background: radial-gradient(ellipse, #52c8ff, #3a99ff); - color: black; - border: #296eb8; -} \ No newline at end of file + background: #777d85; /* Lighter muted gray */ + color: white; + border: none; + border-radius: 4px; + padding: 0.5em 1em; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + /* transition: box-shadow 0.3s ease; */ +} + +#myTensorButtonStop:hover { + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15); +} + +.advanced_background { + background: #f4f4f4; /* Light neutral gray */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; /* Added transition for smooth shadow effect */ +} + +.advanced_background:hover { + background-color: #ebebeb; /* Slightly darker background on hover */ + border: 1px solid #ccc; /* Add a subtle border */ + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.basic_background { + background: #eaeff1; /* Muted cool gray */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.basic_background:hover { + background-color: #dfe4e7; /* Slightly darker cool gray on hover */ + border: 1px solid #ccc; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.huggingface_background { + background: #e0e4e7; /* Light gray with a hint of blue */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.huggingface_background:hover { + background-color: #d6dce0; /* Slightly darker on hover */ + border: 1px solid #bbb; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.flux1_background { + background: #ece9e6; /* Light beige tone */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.flux1_background:hover { + background-color: #e2dfdb; /* Slightly darker beige on hover */ + border: 1px solid #ccc; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.preset_background { + background: #f0f0f0; /* Light gray */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.preset_background:hover { + background-color: #e6e6e6; /* Slightly darker on hover */ + border: 1px solid #ccc; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.samples_background { + background: #d9dde1; /* Soft muted gray-blue */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.samples_background:hover { + background-color: #cfd3d8; /* Slightly darker on hover */ + border: 1px solid #bbb; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 7e157773f..da517d4f2 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -31,7 +31,7 @@ def noise_offset_type_change( return (gr.Group(visible=False), gr.Group(visible=True)) with gr.Accordion( - "Flux.1", open=True, elem_id="flux1_tab", visible=False + "Flux.1", open=True, visible=False, elem_classes=["flux1_background"] ) as flux1_accordion: with gr.Group(): with gr.Row(): diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 9b98375e4..6323977f4 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1608,271 +1608,270 @@ def list_presets(path): json_files.append(os.path.join("user_presets", preset_name)) return json_files - + training_preset = gr.Dropdown( label="Presets", choices=["none"] + list_presets(rf"{presets_dir}/lora"), - # elem_id="myDropdown", value="none", + elem_classes=["preset_background"], ) - with gr.Accordion("Basic", open="True"): - with gr.Group(elem_id="basic_tab"): - with gr.Row(): - LoRA_type = gr.Dropdown( - label="LoRA type", - choices=[ - "Flux1", - "Kohya DyLoRA", - "Kohya LoCon", - "LoRA-FA", - "LyCORIS/iA3", - "LyCORIS/BOFT", - "LyCORIS/Diag-OFT", - "LyCORIS/DyLoRA", - "LyCORIS/GLoRA", - "LyCORIS/LoCon", - "LyCORIS/LoHa", - "LyCORIS/LoKr", - "LyCORIS/Native Fine-Tuning", - "Standard", - ], - value="Standard", - ) - LyCORIS_preset = gr.Dropdown( - label="LyCORIS Preset", - choices=LYCORIS_PRESETS_CHOICES, - value="full", - visible=False, - interactive=True, - allow_custom_value=True, - # info="https://github.com/KohakuBlueleaf/LyCORIS/blob/0006e2ffa05a48d8818112d9f70da74c0cd30b99/docs/Preset.md" - ) - with gr.Group(): - with gr.Row(): - network_weights = gr.Textbox( - label="Network weights", - placeholder="(Optional)", - info="Path to an existing LoRA network weights to resume training from", - ) - network_weights_file = gr.Button( - document_symbol, - elem_id="open_folder_small", - elem_classes=["tool"], - visible=(not headless), - ) - network_weights_file.click( - get_any_file_path, - inputs=[network_weights], - outputs=network_weights, - show_progress=False, - ) - dim_from_weights = gr.Checkbox( - label="DIM from weights", - value=False, - info="Automatically determine the dim(rank) from the weight file.", - ) - basic_training = BasicTraining( - learning_rate_value=0.0001, - lr_scheduler_value="cosine", - lr_warmup_value=10, - sdxl_checkbox=source_model.sdxl_checkbox, - config=config, + with gr.Accordion("Basic", open="True", elem_classes=["basic_background"]): + with gr.Row(): + LoRA_type = gr.Dropdown( + label="LoRA type", + choices=[ + "Flux1", + "Kohya DyLoRA", + "Kohya LoCon", + "LoRA-FA", + "LyCORIS/iA3", + "LyCORIS/BOFT", + "LyCORIS/Diag-OFT", + "LyCORIS/DyLoRA", + "LyCORIS/GLoRA", + "LyCORIS/LoCon", + "LyCORIS/LoHa", + "LyCORIS/LoKr", + "LyCORIS/Native Fine-Tuning", + "Standard", + ], + value="Standard", ) - - with gr.Row(): - text_encoder_lr = gr.Number( - label="Text Encoder learning rate", - value=0.0001, - info="(Optional)", - minimum=0, - maximum=1, - ) - - unet_lr = gr.Number( - label="Unet learning rate", - value=0.0001, - info="(Optional)", - minimum=0, - maximum=1, - ) - - with gr.Row() as loraplus: - loraplus_lr_ratio = gr.Number( - label="LoRA+ learning rate ratio", - value=0, - info="(Optional) starting with 16 is suggested", - minimum=0, - maximum=128, - ) - - loraplus_unet_lr_ratio = gr.Number( - label="LoRA+ Unet learning rate ratio", - value=0, - info="(Optional) starting with 16 is suggested", - minimum=0, - maximum=128, - ) - - loraplus_text_encoder_lr_ratio = gr.Number( - label="LoRA+ Text Encoder learning rate ratio", - value=0, - info="(Optional) starting with 16 is suggested", - minimum=0, - maximum=128, - ) - # Add SDXL Parameters - sdxl_params = SDXLParameters( - source_model.sdxl_checkbox, config=config + LyCORIS_preset = gr.Dropdown( + label="LyCORIS Preset", + choices=LYCORIS_PRESETS_CHOICES, + value="full", + visible=False, + interactive=True, + allow_custom_value=True, + # info="https://github.com/KohakuBlueleaf/LyCORIS/blob/0006e2ffa05a48d8818112d9f70da74c0cd30b99/docs/Preset.md" ) - - # LyCORIS Specific parameters - with gr.Accordion("LyCORIS", visible=False) as lycoris_accordion: - with gr.Row(): - factor = gr.Slider( - label="LoKr factor", - value=-1, - minimum=-1, - maximum=64, - step=1, - visible=False, - ) - bypass_mode = gr.Checkbox( - value=False, - label="Bypass mode", - info="Designed for bnb 8bit/4bit linear layer. (QLyCORIS)", - visible=False, - ) - dora_wd = gr.Checkbox( - value=False, - label="DoRA Weight Decompose", - info="Enable the DoRA method for these algorithms", - visible=False, - ) - use_cp = gr.Checkbox( - value=False, - label="Use CP decomposition", - info="A two-step approach utilizing tensor decomposition and fine-tuning to accelerate convolution layers in large neural networks, resulting in significant CPU speedups with minor accuracy drops.", - visible=False, - ) - use_tucker = gr.Checkbox( - value=False, - label="Use Tucker decomposition", - info="Efficiently decompose tensor shapes, resulting in a sequence of convolution layers with varying dimensions and Hadamard product implementation through multiplication of two distinct tensors.", - visible=False, - ) - use_scalar = gr.Checkbox( - value=False, - label="Use Scalar", - info="Train an additional scalar in front of the weight difference, use a different weight initialization strategy.", - visible=False, - ) + with gr.Group(): with gr.Row(): - rank_dropout_scale = gr.Checkbox( - value=False, - label="Rank Dropout Scale", - info="Adjusts the scale of the rank dropout to maintain the average dropout rate, ensuring more consistent regularization across different layers.", - visible=False, - ) - constrain = gr.Number( - value=0.0, - label="Constrain OFT", - info="Limits the norm of the oft_blocks, ensuring that their magnitude does not exceed a specified threshold, thus controlling the extent of the transformation applied.", - visible=False, + network_weights = gr.Textbox( + label="Network weights", + placeholder="(Optional)", + info="Path to an existing LoRA network weights to resume training from", ) - rescaled = gr.Checkbox( - value=False, - label="Rescaled OFT", - info="applies an additional scaling factor to the oft_blocks, allowing for further adjustment of their impact on the model's transformations.", - visible=False, + network_weights_file = gr.Button( + document_symbol, + elem_id="open_folder_small", + elem_classes=["tool"], + visible=(not headless), ) - train_norm = gr.Checkbox( - value=False, - label="Train Norm", - info="Selects trainable layers in a network, but trains normalization layers identically across methods as they lack matrix decomposition.", - visible=False, + network_weights_file.click( + get_any_file_path, + inputs=[network_weights], + outputs=network_weights, + show_progress=False, ) - decompose_both = gr.Checkbox( + dim_from_weights = gr.Checkbox( + label="DIM from weights", value=False, - label="LoKr decompose both", - info="Controls whether both input and output dimensions of the layer's weights are decomposed into smaller matrices for reparameterization.", - visible=False, - ) - train_on_input = gr.Checkbox( - value=True, - label="iA3 train on input", - info="Set if we change the information going into the system (True) or the information coming out of it (False).", - visible=False, + info="Automatically determine the dim(rank) from the weight file.", ) - with gr.Row() as network_row: - network_dim = gr.Slider( - minimum=1, - maximum=512, - label="Network Rank (Dimension)", - value=8, + basic_training = BasicTraining( + learning_rate_value=0.0001, + lr_scheduler_value="cosine", + lr_warmup_value=10, + sdxl_checkbox=source_model.sdxl_checkbox, + config=config, + ) + + with gr.Row(): + text_encoder_lr = gr.Number( + label="Text Encoder learning rate", + value=0.0001, + info="(Optional)", + minimum=0, + maximum=1, + ) + + unet_lr = gr.Number( + label="Unet learning rate", + value=0.0001, + info="(Optional)", + minimum=0, + maximum=1, + ) + + with gr.Row() as loraplus: + loraplus_lr_ratio = gr.Number( + label="LoRA+ learning rate ratio", + value=0, + info="(Optional) starting with 16 is suggested", + minimum=0, + maximum=128, + ) + + loraplus_unet_lr_ratio = gr.Number( + label="LoRA+ Unet learning rate ratio", + value=0, + info="(Optional) starting with 16 is suggested", + minimum=0, + maximum=128, + ) + + loraplus_text_encoder_lr_ratio = gr.Number( + label="LoRA+ Text Encoder learning rate ratio", + value=0, + info="(Optional) starting with 16 is suggested", + minimum=0, + maximum=128, + ) + # Add SDXL Parameters + sdxl_params = SDXLParameters( + source_model.sdxl_checkbox, config=config + ) + + # LyCORIS Specific parameters + with gr.Accordion("LyCORIS", visible=False) as lycoris_accordion: + with gr.Row(): + factor = gr.Slider( + label="LoKr factor", + value=-1, + minimum=-1, + maximum=64, step=1, - interactive=True, + visible=False, ) - network_alpha = gr.Slider( - minimum=0.00001, - maximum=1024, - label="Network Alpha", - value=1, - step=0.00001, - interactive=True, - info="alpha for LoRA weight scaling", + bypass_mode = gr.Checkbox( + value=False, + label="Bypass mode", + info="Designed for bnb 8bit/4bit linear layer. (QLyCORIS)", + visible=False, ) - with gr.Row(visible=False) as convolution_row: - # locon= gr.Checkbox(label='Train a LoCon instead of a general LoRA (does not support v2 base models) (may not be able to some utilities now)', value=False) - conv_dim = gr.Slider( - minimum=0, - maximum=512, - value=1, - step=1, - label="Convolution Rank (Dimension)", + dora_wd = gr.Checkbox( + value=False, + label="DoRA Weight Decompose", + info="Enable the DoRA method for these algorithms", + visible=False, ) - conv_alpha = gr.Slider( - minimum=0, - maximum=512, - value=1, - step=1, - label="Convolution Alpha", + use_cp = gr.Checkbox( + value=False, + label="Use CP decomposition", + info="A two-step approach utilizing tensor decomposition and fine-tuning to accelerate convolution layers in large neural networks, resulting in significant CPU speedups with minor accuracy drops.", + visible=False, ) - with gr.Row(): - scale_weight_norms = gr.Slider( - label="Scale weight norms", - value=0, - minimum=0, - maximum=10, - step=0.01, - info="Max Norm Regularization is a technique to stabilize network training by limiting the norm of network weights. It may be effective in suppressing overfitting of LoRA and improving stability when used with other LoRAs. See PR #545 on kohya_ss/sd_scripts repo for details. Recommended setting: 1. Higher is weaker, lower is stronger.", - interactive=True, + use_tucker = gr.Checkbox( + value=False, + label="Use Tucker decomposition", + info="Efficiently decompose tensor shapes, resulting in a sequence of convolution layers with varying dimensions and Hadamard product implementation through multiplication of two distinct tensors.", + visible=False, ) - network_dropout = gr.Slider( - label="Network dropout", - value=0, - minimum=0, - maximum=1, - step=0.01, - info="Is a normal probability dropout at the neuron level. In the case of LoRA, it is applied to the output of down. Recommended range 0.1 to 0.5", + use_scalar = gr.Checkbox( + value=False, + label="Use Scalar", + info="Train an additional scalar in front of the weight difference, use a different weight initialization strategy.", + visible=False, ) - rank_dropout = gr.Slider( - label="Rank dropout", - value=0, - minimum=0, - maximum=1, - step=0.01, - info="can specify `rank_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3", + with gr.Row(): + rank_dropout_scale = gr.Checkbox( + value=False, + label="Rank Dropout Scale", + info="Adjusts the scale of the rank dropout to maintain the average dropout rate, ensuring more consistent regularization across different layers.", + visible=False, ) - module_dropout = gr.Slider( - label="Module dropout", + constrain = gr.Number( value=0.0, - minimum=0.0, - maximum=1.0, - step=0.01, - info="can specify `module_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3", + label="Constrain OFT", + info="Limits the norm of the oft_blocks, ensuring that their magnitude does not exceed a specified threshold, thus controlling the extent of the transformation applied.", + visible=False, + ) + rescaled = gr.Checkbox( + value=False, + label="Rescaled OFT", + info="applies an additional scaling factor to the oft_blocks, allowing for further adjustment of their impact on the model's transformations.", + visible=False, + ) + train_norm = gr.Checkbox( + value=False, + label="Train Norm", + info="Selects trainable layers in a network, but trains normalization layers identically across methods as they lack matrix decomposition.", + visible=False, + ) + decompose_both = gr.Checkbox( + value=False, + label="LoKr decompose both", + info="Controls whether both input and output dimensions of the layer's weights are decomposed into smaller matrices for reparameterization.", + visible=False, ) - with gr.Row(visible=False): + train_on_input = gr.Checkbox( + value=True, + label="iA3 train on input", + info="Set if we change the information going into the system (True) or the information coming out of it (False).", + visible=False, + ) + with gr.Row() as network_row: + network_dim = gr.Slider( + minimum=1, + maximum=512, + label="Network Rank (Dimension)", + value=8, + step=1, + interactive=True, + ) + network_alpha = gr.Slider( + minimum=0.00001, + maximum=1024, + label="Network Alpha", + value=1, + step=0.00001, + interactive=True, + info="alpha for LoRA weight scaling", + ) + with gr.Row(visible=False) as convolution_row: + # locon= gr.Checkbox(label='Train a LoCon instead of a general LoRA (does not support v2 base models) (may not be able to some utilities now)', value=False) + conv_dim = gr.Slider( + minimum=0, + maximum=512, + value=1, + step=1, + label="Convolution Rank (Dimension)", + ) + conv_alpha = gr.Slider( + minimum=0, + maximum=512, + value=1, + step=1, + label="Convolution Alpha", + ) + with gr.Row(): + scale_weight_norms = gr.Slider( + label="Scale weight norms", + value=0, + minimum=0, + maximum=10, + step=0.01, + info="Max Norm Regularization is a technique to stabilize network training by limiting the norm of network weights. It may be effective in suppressing overfitting of LoRA and improving stability when used with other LoRAs. See PR #545 on kohya_ss/sd_scripts repo for details. Recommended setting: 1. Higher is weaker, lower is stronger.", + interactive=True, + ) + network_dropout = gr.Slider( + label="Network dropout", + value=0, + minimum=0, + maximum=1, + step=0.01, + info="Is a normal probability dropout at the neuron level. In the case of LoRA, it is applied to the output of down. Recommended range 0.1 to 0.5", + ) + rank_dropout = gr.Slider( + label="Rank dropout", + value=0, + minimum=0, + maximum=1, + step=0.01, + info="can specify `rank_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3", + ) + module_dropout = gr.Slider( + label="Module dropout", + value=0.0, + minimum=0.0, + maximum=1.0, + step=0.01, + info="can specify `module_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3", + ) + with gr.Row(visible=False): unit = gr.Slider( minimum=1, maximum=64, @@ -2324,15 +2323,15 @@ def update_LoRA_settings( return tuple(results) - with gr.Group(): - # Add FLUX1 Parameters + # Add FLUX1 Parameters to the basic training accordion flux1_training = flux1Training( headless=headless, config=config, flux1_checkbox=source_model.flux1_checkbox, ) + - with gr.Accordion("Advanced", open=False, elem_id="advanced_tab"): + with gr.Accordion("Advanced", open=False, elem_classes="advanced_background"): # with gr.Accordion('Advanced Configuration', open=False): with gr.Row(visible=True) as kohya_advanced_lora: with gr.Tab(label="Weights"): @@ -2390,11 +2389,11 @@ def update_LoRA_settings( outputs=[basic_training.cache_latents], ) - with gr.Accordion("Samples", open=False, elem_id="samples_tab"): + with gr.Accordion("Samples", open=False, elem_classes="samples_background"): sample = SampleImages(config=config) global huggingface - with gr.Accordion("HuggingFace", open=False): + with gr.Accordion("HuggingFace", open=False, elem_classes="huggingface_background"): huggingface = HuggingFace(config=config) LoRA_type.change( From 968a8f0fb6966dda14853b9622922de2cda744ef Mon Sep 17 00:00:00 2001 From: wcole3 Date: Tue, 10 Sep 2024 20:49:02 -0400 Subject: [PATCH 105/199] Add dark mode styles --- assets/style.css | 80 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/assets/style.css b/assets/style.css index 4414083a6..5c18fb264 100644 --- a/assets/style.css +++ b/assets/style.css @@ -139,3 +139,83 @@ border: 1px solid #bbb; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ } + +/* Dark mode styles */ +.dark .advanced_background { + background: #172029; /* Slightly darker gradio dark theme */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; /* Added transition for smooth shadow effect */ +} + +.dark .advanced_background:hover { + background-color: #121920; /* Slightly darker background on hover */ + border: 1px solid #000000; /* Add a subtle border */ + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.dark .basic_background { + background: #172029; + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.dark .basic_background:hover { + background-color: #11181e; + border: 1px solid #000000; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.dark .huggingface_background { + background: #131c25; + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.dark .huggingface_background:hover { + background-color: #131c25; + border: 1px solid #000000; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.dark .flux1_background { + background: #ece9e6; + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.dark .flux1_background:hover { + background-color: #131c25; + border: 1px solid #000000; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.dark .preset_background { + background: #191d25; + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.dark .preset_background:hover { + background-color: #212530; + border: 1px solid #000000; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.dark .samples_background { + background: #101e2c; + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.dark .samples_background:hover { + background-color: #17293a; + border: 1px solid #000000; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + From 7531b36cbde9bd152c45d877a459437c6c82d5f0 Mon Sep 17 00:00:00 2001 From: wcole3 Date: Tue, 10 Sep 2024 21:09:17 -0400 Subject: [PATCH 106/199] Missed one color --- assets/style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assets/style.css b/assets/style.css index 5c18fb264..41c2c4385 100644 --- a/assets/style.css +++ b/assets/style.css @@ -181,7 +181,7 @@ } .dark .flux1_background { - background: #ece9e6; + background: #131c25; padding: 1em; border-radius: 8px; transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; From f365b630ff8b5f32f511ba8501858544a4d33c04 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 11 Sep 2024 19:17:37 -0400 Subject: [PATCH 107/199] Update sd-scripts and add support for t5xxl LR --- kohya_gui/lora_gui.py | 32 +++++++++++++++++++++++++++++--- sd-scripts | 2 +- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 6323977f4..0ed2a1842 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -204,6 +204,7 @@ def save_configuration( ### text_encoder_lr, + t5xxl_lr, unet_lr, network_dim, network_weights, @@ -453,6 +454,7 @@ def open_configuration( ### text_encoder_lr, + t5xxl_lr, unet_lr, network_dim, network_weights, @@ -734,6 +736,7 @@ def train_model( ### text_encoder_lr, + t5xxl_lr, unet_lr, network_dim, network_weights, @@ -1249,6 +1252,20 @@ def train_model( if value: network_args += f" {key}={value}" + # Set the text_encoder_lr to multiple values if both text_encoder_lr and t5xxl_lr are set + if text_encoder_lr == 0 and t5xxl_lr > 0: + log.error("When specifying T5XXL learning rate, text encoder learning rate need to be a value greater than 0.") + return TRAIN_BUTTON_VISIBLE + + text_encoder_lr_str = "" + + if text_encoder_lr > 0 and t5xxl_lr > 0: + # Set the text_encoder_lr to a combination of text_encoder_lr and t5xxl_lr + text_encoder_lr_str = f"{text_encoder_lr} {t5xxl_lr}" + elif text_encoder_lr > 0: + # Set the text_encoder_lr to text_encoder_lr only + text_encoder_lr_str = f"{text_encoder_lr}" + # Convert learning rates to float once and store the result for re-use learning_rate = float(learning_rate) if learning_rate is not None else 0.0 text_encoder_lr_float = ( @@ -1427,7 +1444,7 @@ def train_model( "stop_text_encoder_training": ( stop_text_encoder_training if stop_text_encoder_training != 0 else None ), - "text_encoder_lr": text_encoder_lr if not 0 else None, + "text_encoder_lr": text_encoder_lr_str if not 0 else None, "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, "training_comment": training_comment, @@ -1682,8 +1699,16 @@ def list_presets(path): with gr.Row(): text_encoder_lr = gr.Number( label="Text Encoder learning rate", - value=0.0001, - info="(Optional)", + value=0, + info="(Optional) Set CLIP-L and T5XXL learning rates.", + minimum=0, + maximum=1, + ) + + t5xxl_lr = gr.Number( + label="T5XXL learning rate", + value=0, + info="(Optional) Override the T5XXL learning rate set by the Text Encoder learning rate if you desire a different one.", minimum=0, maximum=1, ) @@ -2558,6 +2583,7 @@ def update_LoRA_settings( sdxl_params.sdxl_cache_text_encoder_outputs, sdxl_params.sdxl_no_half_vae, text_encoder_lr, + t5xxl_lr, unet_lr, network_dim, network_weights, diff --git a/sd-scripts b/sd-scripts index ce144476c..237317fff 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit ce144476cfcf63ff1e0297c3b3f639e9a3260d4a +Subproject commit 237317fffd060bcfb078b770ccd2df18bc4dd3a6 From 3cc33f497f2f2f72e0a700dbc22c372a8716f703 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 11 Sep 2024 19:39:53 -0400 Subject: [PATCH 108/199] Update transformers and wandb module --- kohya_gui/class_basic_training.py | 3 +++ requirements.txt | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index c3537eeb4..a202cfbe2 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -164,6 +164,9 @@ def init_lr_and_optimizer_controls(self) -> None: "linear", "piecewise_constant", "polynomial", + "COSINE_WITH_MIN_LR", + "INVERSE_SQRT", + "WARMUP_STABLE_DECAY", ], value=self.config.get("basic.lr_scheduler", self.lr_scheduler_value), ) diff --git a/requirements.txt b/requirements.txt index a97455ac2..6d86fcaa7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,9 +30,9 @@ sentencepiece==0.2.0 timm==0.6.12 tk==0.1.0 toml==0.10.2 -transformers==4.44.0 +transformers==4.44.2 voluptuous==0.13.1 -wandb==0.15.11 +wandb==0.18.0 scipy==1.11.4 # for kohya_ss library -e ./sd-scripts # no_verify leave this to specify not checking this a verification stage From 7c190edef8bf4e9207fb7e91d39b360e39c1e8fe Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 12 Sep 2024 07:03:45 -0400 Subject: [PATCH 109/199] Fix issue with new text_encoder_lr parameter syntax --- kohya_gui/lora_gui.py | 8 ++++---- sd-scripts | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 0ed2a1842..6614cf0db 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1257,14 +1257,14 @@ def train_model( log.error("When specifying T5XXL learning rate, text encoder learning rate need to be a value greater than 0.") return TRAIN_BUTTON_VISIBLE - text_encoder_lr_str = "" + text_encoder_lr_list = [] if text_encoder_lr > 0 and t5xxl_lr > 0: # Set the text_encoder_lr to a combination of text_encoder_lr and t5xxl_lr - text_encoder_lr_str = f"{text_encoder_lr} {t5xxl_lr}" + text_encoder_lr_list = [float(text_encoder_lr), float(t5xxl_lr)] elif text_encoder_lr > 0: # Set the text_encoder_lr to text_encoder_lr only - text_encoder_lr_str = f"{text_encoder_lr}" + text_encoder_lr_list = [float(text_encoder_lr), float(text_encoder_lr)] # Convert learning rates to float once and store the result for re-use learning_rate = float(learning_rate) if learning_rate is not None else 0.0 @@ -1444,7 +1444,7 @@ def train_model( "stop_text_encoder_training": ( stop_text_encoder_training if stop_text_encoder_training != 0 else None ), - "text_encoder_lr": text_encoder_lr_str if not 0 else None, + "text_encoder_lr": text_encoder_lr_list if not [] else None, "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, "training_comment": training_comment, diff --git a/sd-scripts b/sd-scripts index 237317fff..cefe52629 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 237317fffd060bcfb078b770ccd2df18bc4dd3a6 +Subproject commit cefe52629e1901dd8192b0487afd5e9f089e3519 From e655ebf2ddcd6910b01ecca1e5ed8fedbee553f8 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 12 Sep 2024 20:56:05 -0400 Subject: [PATCH 110/199] Add support for lr_warmup_steps override --- kohya_gui/class_basic_training.py | 22 ++++++++++++++++++---- kohya_gui/dreambooth_gui.py | 18 ++++++++++-------- kohya_gui/finetune_gui.py | 9 ++++++++- kohya_gui/lora_gui.py | 18 ++++++++++-------- kohya_gui/textual_inversion_gui.py | 18 ++++++++++-------- 5 files changed, 56 insertions(+), 29 deletions(-) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index a202cfbe2..1a5a25db2 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -25,6 +25,7 @@ def __init__( learning_rate_value: float = "1e-6", lr_scheduler_value: str = "constant", lr_warmup_value: float = "0", + lr_warmup_steps_value: int = 0, finetuning: bool = False, dreambooth: bool = False, config: dict = {}, @@ -44,6 +45,7 @@ def __init__( self.learning_rate_value = learning_rate_value self.lr_scheduler_value = lr_scheduler_value self.lr_warmup_value = lr_warmup_value + self.lr_warmup_steps_value= lr_warmup_steps_value self.finetuning = finetuning self.dreambooth = dreambooth self.config = config @@ -299,25 +301,37 @@ def init_learning_rate_controls(self) -> None: maximum=100, step=1, ) + # Initialize the learning rate warmup steps override + self.lr_warmup_steps = gr.Number( + label="LR warmup steps (override)", + value=self.config.get("basic.lr_warmup_steps", self.lr_warmup_steps_value), + minimum=0, + step=1, + ) - def lr_scheduler_changed(scheduler, value): + def lr_scheduler_changed(scheduler, value, value_lr_warmup_steps): if scheduler == "constant": self.old_lr_warmup = value + self.old_lr_warmup_steps = value_lr_warmup_steps value = 0 + value_lr_warmup_steps = 0 interactive=False info="Can't use LR warmup with LR Scheduler constant... setting to 0 and disabling field..." else: if self.old_lr_warmup != 0: value = self.old_lr_warmup self.old_lr_warmup = 0 + if self.old_lr_warmup_steps != 0: + value_lr_warmup_steps = self.old_lr_warmup_steps + self.old_lr_warmup_steps = 0 interactive=True info="" - return gr.Slider(value=value, interactive=interactive, info=info) + return gr.Slider(value=value, interactive=interactive, info=info), gr.Number(value=value_lr_warmup_steps, interactive=interactive, info=info) self.lr_scheduler.change( lr_scheduler_changed, - inputs=[self.lr_scheduler, self.lr_warmup], - outputs=self.lr_warmup, + inputs=[self.lr_scheduler, self.lr_warmup, self.lr_warmup_steps], + outputs=[self.lr_warmup, self.lr_warmup_steps], ) def init_scheduler_controls(self) -> None: diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 169d98344..626979a58 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -78,6 +78,7 @@ def save_configuration( learning_rate_te2, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -281,6 +282,7 @@ def open_configuration( learning_rate_te2, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -479,6 +481,7 @@ def train_model( learning_rate_te2, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -719,12 +722,10 @@ def train_model( "Dataset config toml file used, skipping total_steps, train_batch_size, gradient_accumulation_steps, epoch, reg_factor, max_train_steps calculations..." ) if max_train_steps > 0: - if lr_warmup != 0: - lr_warmup_steps = round( - float(int(lr_warmup) * int(max_train_steps) / 100) - ) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) else: - lr_warmup_steps = 0 + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 else: lr_warmup_steps = 0 @@ -809,10 +810,10 @@ def train_model( else: max_train_steps_info = f"Max train steps: {max_train_steps}" - if lr_warmup != 0: - lr_warmup_steps = round(float(int(lr_warmup) * int(max_train_steps) / 100)) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) else: - lr_warmup_steps = 0 + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 log.info(f"Total steps: {total_steps}") @@ -1260,6 +1261,7 @@ def dreambooth_tab( basic_training.learning_rate_te2, basic_training.lr_scheduler, basic_training.lr_warmup, + basic_training.lr_warmup_steps, basic_training.train_batch_size, basic_training.epoch, basic_training.save_every_n_epochs, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index b8e61471e..02b205c2a 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -88,6 +88,7 @@ def save_configuration( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, dataset_repeats, train_batch_size, epoch, @@ -297,6 +298,7 @@ def open_configuration( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, dataset_repeats, train_batch_size, epoch, @@ -512,6 +514,7 @@ def train_model( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, dataset_repeats, train_batch_size, epoch, @@ -866,7 +869,10 @@ def train_model( log.info(max_train_steps_info) if max_train_steps != 0: - lr_warmup_steps = round(float(int(lr_warmup) * int(max_train_steps) / 100)) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) + else: + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 else: lr_warmup_steps = 0 log.info(f"lr_warmup_steps = {lr_warmup_steps}") @@ -1380,6 +1386,7 @@ def list_presets(path): basic_training.learning_rate, basic_training.lr_scheduler, basic_training.lr_warmup, + basic_training.lr_warmup_steps, dataset_repeats, basic_training.train_batch_size, basic_training.epoch, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 6614cf0db..4f12b5b06 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -103,6 +103,7 @@ def save_configuration( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -353,6 +354,7 @@ def open_configuration( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -635,6 +637,7 @@ def train_model( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -958,12 +961,10 @@ def train_model( float(max_train_steps) / 100 * int(stop_text_encoder_training) ) - if lr_warmup != 0: - lr_warmup_steps = round( - float(int(lr_warmup) * int(max_train_steps) / 100) - ) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) else: - lr_warmup_steps = 0 + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 else: stop_text_encoder_training = 0 lr_warmup_steps = 0 @@ -1058,10 +1059,10 @@ def train_model( float(max_train_steps) / 100 * int(stop_text_encoder_training) ) - if lr_warmup != 0: - lr_warmup_steps = round(float(int(lr_warmup) * int(max_train_steps) / 100)) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) else: - lr_warmup_steps = 0 + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 log.info(f"Total steps: {total_steps}") @@ -2490,6 +2491,7 @@ def update_LoRA_settings( basic_training.learning_rate, basic_training.lr_scheduler, basic_training.lr_warmup, + basic_training.lr_warmup_steps, basic_training.train_batch_size, basic_training.epoch, basic_training.save_every_n_epochs, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 8c9f804aa..d1806f103 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -70,6 +70,7 @@ def save_configuration( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -232,6 +233,7 @@ def open_configuration( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -387,6 +389,7 @@ def train_model( learning_rate, lr_scheduler, lr_warmup, + lr_warmup_steps, train_batch_size, epoch, save_every_n_epochs, @@ -598,12 +601,10 @@ def train_model( float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) ) - if lr_warmup != 0: - lr_warmup_steps = round( - float(int(lr_warmup) * int(max_train_steps) / 100) - ) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) else: - lr_warmup_steps = 0 + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 else: stop_text_encoder_training = 0 lr_warmup_steps = 0 @@ -698,10 +699,10 @@ def train_model( float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) ) - if lr_warmup != 0: - lr_warmup_steps = round(float(int(lr_warmup) * int(max_train_steps) / 100)) + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) else: - lr_warmup_steps = 0 + lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 log.info(f"Total steps: {total_steps}") @@ -1142,6 +1143,7 @@ def list_embedding_files(path): basic_training.learning_rate, basic_training.lr_scheduler, basic_training.lr_warmup, + basic_training.lr_warmup_steps, basic_training.train_batch_size, basic_training.epoch, basic_training.save_every_n_epochs, From 20d7e659e1ff6e4c9cc2e19a6e870d8690d565b3 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 12 Sep 2024 21:31:18 -0400 Subject: [PATCH 111/199] Update lr_warmup_steps code --- kohya_gui/dreambooth_gui.py | 38 ++----- kohya_gui/finetune_gui.py | 25 ++--- kohya_gui/lora_gui.py | 172 ++++++++++------------------- kohya_gui/textual_inversion_gui.py | 34 ++---- 4 files changed, 85 insertions(+), 184 deletions(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 626979a58..f28d82659 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -696,22 +696,6 @@ def train_model( # End of path validation # - # This function validates files or folder paths. Simply add new variables containing file of folder path - # to validate below - # if not validate_paths( - # dataset_config=dataset_config, - # headless=headless, - # log_tracker_config=log_tracker_config, - # logging_dir=logging_dir, - # output_dir=output_dir, - # pretrained_model_name_or_path=pretrained_model_name_or_path, - # reg_data_dir=reg_data_dir, - # resume=resume, - # train_data_dir=train_data_dir, - # vae=vae, - # ): - # return TRAIN_BUTTON_VISIBLE - if not print_only and check_if_model_exist( output_name, output_dir, save_model_as, headless=headless ): @@ -721,13 +705,6 @@ def train_model( log.info( "Dataset config toml file used, skipping total_steps, train_batch_size, gradient_accumulation_steps, epoch, reg_factor, max_train_steps calculations..." ) - if max_train_steps > 0: - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 - else: - lr_warmup_steps = 0 if max_train_steps == 0: max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." @@ -810,13 +787,18 @@ def train_model( else: max_train_steps_info = f"Max train steps: {max_train_steps}" - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 - log.info(f"Total steps: {total_steps}") + # Calculate lr_warmup_steps + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) + if lr_warmup > 0: + log.warning("Both lr_warmup and lr_warmup_steps are set. lr_warmup_steps will be used.") + elif lr_warmup != 0: + lr_warmup_steps = lr_warmup / 100 + else: + lr_warmup_steps = 0 + log.info(f"Train batch size: {train_batch_size}") log.info(f"Gradient accumulation steps: {gradient_accumulation_steps}") log.info(f"Epoch: {epoch}") diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 02b205c2a..aa97a1608 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -726,18 +726,6 @@ def train_model( # End of path validation # - # if not validate_paths( - # dataset_config=dataset_config, - # finetune_image_folder=image_folder, - # headless=headless, - # log_tracker_config=log_tracker_config, - # logging_dir=logging_dir, - # output_dir=output_dir, - # pretrained_model_name_or_path=pretrained_model_name_or_path, - # resume=resume, - # ): - # return TRAIN_BUTTON_VISIBLE - if not print_only and check_if_model_exist( output_name, output_dir, save_model_as, headless ): @@ -868,13 +856,16 @@ def train_model( log.info(max_train_steps_info) - if max_train_steps != 0: - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 + # Calculate lr_warmup_steps + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) + if lr_warmup > 0: + log.warning("Both lr_warmup and lr_warmup_steps are set. lr_warmup_steps will be used.") + elif lr_warmup != 0: + lr_warmup_steps = lr_warmup / 100 else: lr_warmup_steps = 0 + log.info(f"lr_warmup_steps = {lr_warmup_steps}") accelerate_path = get_executable_path("accelerate") diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 4f12b5b06..22d4c8563 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -894,21 +894,6 @@ def train_model( # End of path validation # - # if not validate_paths( - # dataset_config=dataset_config, - # headless=headless, - # log_tracker_config=log_tracker_config, - # logging_dir=logging_dir, - # network_weights=network_weights, - # output_dir=output_dir, - # pretrained_model_name_or_path=pretrained_model_name_or_path, - # reg_data_dir=reg_data_dir, - # resume=resume, - # train_data_dir=train_data_dir, - # vae=vae, - # ): - # return TRAIN_BUTTON_VISIBLE - if int(bucket_reso_steps) < 1: output_message( msg="Bucket resolution steps need to be greater than 0", @@ -949,123 +934,80 @@ def train_model( # unet_lr = 0 if dataset_config: - log.info( - "Dataset config toml file used, skipping total_steps, train_batch_size, gradient_accumulation_steps, epoch, reg_factor, max_train_steps calculations..." - ) - if max_train_steps > 0: - # calculate stop encoder training - if stop_text_encoder_training == 0: - stop_text_encoder_training = 0 - else: - stop_text_encoder_training = math.ceil( - float(max_train_steps) / 100 * int(stop_text_encoder_training) - ) - - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 - else: - stop_text_encoder_training = 0 - lr_warmup_steps = 0 - - if max_train_steps == 0: - max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." - else: - max_train_steps_info = f"Max train steps: {max_train_steps}" - + log.info("Dataset config TOML file used; skipping calculations for total_steps, train_batch_size, gradient_accumulation_steps, epoch, reg_factor, and max_train_steps.") else: - if train_data_dir == "": - log.error("Train data dir is empty") + if not train_data_dir: + log.error("Train data directory is empty.") return TRAIN_BUTTON_VISIBLE - # Get a list of all subfolders in train_data_dir subfolders = [ - f - for f in os.listdir(train_data_dir) + f for f in os.listdir(train_data_dir) if os.path.isdir(os.path.join(train_data_dir, f)) ] - total_steps = 0 - # Loop through each subfolder and extract the number of repeats for folder in subfolders: try: - # Extract the number of repeats from the folder name - repeats = int(folder.split("_")[0]) - log.info(f"Folder {folder}: {repeats} repeats found") - - # Count the number of images in the folder - num_images = len( - [ - f - for f, lower_f in ( - (file, file.lower()) - for file in os.listdir(os.path.join(train_data_dir, folder)) - ) - if lower_f.endswith((".jpg", ".jpeg", ".png", ".webp")) - ] - ) - - log.info(f"Folder {folder}: {num_images} images found") - - # Calculate the total number of steps for this folder - steps = repeats * num_images - - # log.info the result - log.info(f"Folder {folder}: {num_images} * {repeats} = {steps} steps") - - total_steps += steps - - except ValueError: - # Handle the case where the folder name does not contain an underscore - log.info( - f"Error: '{folder}' does not contain an underscore, skipping..." - ) - - if reg_data_dir == "": - reg_factor = 1 - else: - log.warning( - "Regularisation images are used... Will double the number of steps required..." - ) - reg_factor = 2 - - log.info(f"Regulatization factor: {reg_factor}") + repeats_str = folder.split("_")[0] + repeats = int(repeats_str) + log.info(f"Folder '{folder}': {repeats} repeats found.") + except (ValueError, IndexError): + log.info(f"Skipping folder '{folder}': unable to extract repeat count.") + continue + + folder_path = os.path.join(train_data_dir, folder) + image_extensions = (".jpg", ".jpeg", ".png", ".webp") + num_images = len([ + file for file in os.listdir(folder_path) + if file.lower().endswith(image_extensions) + ]) + log.info(f"Folder '{folder}': {num_images} images found.") + + steps = repeats * num_images + log.info(f"Folder '{folder}': {num_images} images * {repeats} repeats = {steps} steps.") + total_steps += steps + + reg_factor = 2 if reg_data_dir else 1 + if reg_factor == 2: + log.warning("Regularization images are used; the number of required steps will be doubled.") + + log.info(f"Regularization factor: {reg_factor}") if max_train_steps == 0: - # calculate max_train_steps - max_train_steps = int( - math.ceil( - float(total_steps) - / int(train_batch_size) - / int(gradient_accumulation_steps) - * int(epoch) - * int(reg_factor) - ) + if train_batch_size == 0 or gradient_accumulation_steps == 0: + log.error("train_batch_size and gradient_accumulation_steps must be greater than zero.") + return TRAIN_BUTTON_VISIBLE + + max_train_steps = int(math.ceil( + total_steps / train_batch_size / gradient_accumulation_steps * epoch * reg_factor + )) + max_train_steps_info = ( + f"Calculated max_train_steps: ({total_steps} / {train_batch_size} / " + f"{gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" ) - max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" - else: - if max_train_steps == 0: - max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." - else: - max_train_steps_info = f"Max train steps: {max_train_steps}" - - # calculate stop encoder training - if stop_text_encoder_training == 0: - stop_text_encoder_training = 0 else: - stop_text_encoder_training = math.ceil( - float(max_train_steps) / 100 * int(stop_text_encoder_training) - ) - - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 + max_train_steps_info = f"Max train steps: {max_train_steps}" log.info(f"Total steps: {total_steps}") + # Calculate stop_text_encoder_training + if max_train_steps > 0 and stop_text_encoder_training > 0: + stop_text_encoder_training = math.ceil( + max_train_steps * stop_text_encoder_training / 100 + ) + else: + stop_text_encoder_training = 0 + + # Calculate lr_warmup_steps + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) + if lr_warmup > 0: + log.warning("Both lr_warmup and lr_warmup_steps are set. lr_warmup_steps will be used.") + elif lr_warmup != 0: + lr_warmup_steps = lr_warmup / 100 + else: + lr_warmup_steps = 0 + log.info(f"Train batch size: {train_batch_size}") log.info(f"Gradient accumulation steps: {gradient_accumulation_steps}") log.info(f"Epoch: {epoch}") diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index d1806f103..f8545caca 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -561,20 +561,6 @@ def train_model( # End of path validation # - # if not validate_paths( - # dataset_config=dataset_config, - # headless=headless, - # log_tracker_config=log_tracker_config, - # logging_dir=logging_dir, - # output_dir=output_dir, - # pretrained_model_name_or_path=pretrained_model_name_or_path, - # reg_data_dir=reg_data_dir, - # resume=resume, - # train_data_dir=train_data_dir, - # vae=vae, - # ): - # return TRAIN_BUTTON_VISIBLE - if token_string == "": output_message(msg="Token string is missing", headless=headless) return TRAIN_BUTTON_VISIBLE @@ -600,11 +586,6 @@ def train_model( stop_text_encoder_training = math.ceil( float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) ) - - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 else: stop_text_encoder_training = 0 lr_warmup_steps = 0 @@ -699,13 +680,18 @@ def train_model( float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) ) - if lr_warmup_steps > 0: - lr_warmup_steps = int(lr_warmup_steps) - else: - lr_warmup_steps = float(lr_warmup / 100) if lr_warmup != 0 else 0 - log.info(f"Total steps: {total_steps}") + # Calculate lr_warmup_steps + if lr_warmup_steps > 0: + lr_warmup_steps = int(lr_warmup_steps) + if lr_warmup > 0: + log.warning("Both lr_warmup and lr_warmup_steps are set. lr_warmup_steps will be used.") + elif lr_warmup != 0: + lr_warmup_steps = lr_warmup / 100 + else: + lr_warmup_steps = 0 + log.info(f"Train batch size: {train_batch_size}") log.info(f"Gradient accumulation steps: {gradient_accumulation_steps}") log.info(f"Epoch: {epoch}") From 0d53ac6676865511a9b85c06cf39a544d1a0d766 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 12 Sep 2024 21:57:20 -0400 Subject: [PATCH 112/199] Removing stable-diffusion-1.5 default model --- kohya_gui/class_source_model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/kohya_gui/class_source_model.py b/kohya_gui/class_source_model.py index 6f0628fef..f9ece6577 100644 --- a/kohya_gui/class_source_model.py +++ b/kohya_gui/class_source_model.py @@ -26,7 +26,6 @@ "stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned", "stabilityai/stable-diffusion-2-1", "stabilityai/stable-diffusion-2", - "runwayml/stable-diffusion-v1-5", "CompVis/stable-diffusion-v1-4", ] From 46b57ed635abb26362bfe119a7710b46cccf5a46 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 13 Sep 2024 15:35:30 -0400 Subject: [PATCH 113/199] Fix for max_train_steps --- kohya_gui/lora_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 22d4c8563..be23f07f9 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -991,7 +991,7 @@ def train_model( log.info(f"Total steps: {total_steps}") # Calculate stop_text_encoder_training - if max_train_steps > 0 and stop_text_encoder_training > 0: + if max_train_steps is not None and max_train_steps > 0 and stop_text_encoder_training > 0: stop_text_encoder_training = math.ceil( max_train_steps * stop_text_encoder_training / 100 ) From f744479f9a4a0382f1f3e3c66fb44404adb40e42 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 13 Sep 2024 15:53:30 -0400 Subject: [PATCH 114/199] Revert some changes --- kohya_gui/lora_gui.py | 146 ++++++++++++++++++++++++++++-------------- 1 file changed, 97 insertions(+), 49 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index be23f07f9..29e34339d 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -934,69 +934,117 @@ def train_model( # unet_lr = 0 if dataset_config: - log.info("Dataset config TOML file used; skipping calculations for total_steps, train_batch_size, gradient_accumulation_steps, epoch, reg_factor, and max_train_steps.") + log.info( + "Dataset config toml file used, skipping total_steps, train_batch_size, gradient_accumulation_steps, epoch, reg_factor, max_train_steps calculations..." + ) + if max_train_steps > 0: + # calculate stop encoder training + if stop_text_encoder_training == 0: + stop_text_encoder_training = 0 + else: + stop_text_encoder_training = math.ceil( + float(max_train_steps) / 100 * int(stop_text_encoder_training) + ) + + if lr_warmup != 0: + lr_warmup_steps = round( + float(int(lr_warmup) * int(max_train_steps) / 100) + ) + else: + lr_warmup_steps = 0 + else: + stop_text_encoder_training = 0 + lr_warmup_steps = 0 + + if max_train_steps == 0: + max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." + else: + max_train_steps_info = f"Max train steps: {max_train_steps}" + else: - if not train_data_dir: - log.error("Train data directory is empty.") + if train_data_dir == "": + log.error("Train data dir is empty") return TRAIN_BUTTON_VISIBLE + # Get a list of all subfolders in train_data_dir subfolders = [ - f for f in os.listdir(train_data_dir) + f + for f in os.listdir(train_data_dir) if os.path.isdir(os.path.join(train_data_dir, f)) ] + total_steps = 0 + # Loop through each subfolder and extract the number of repeats for folder in subfolders: try: - repeats_str = folder.split("_")[0] - repeats = int(repeats_str) - log.info(f"Folder '{folder}': {repeats} repeats found.") - except (ValueError, IndexError): - log.info(f"Skipping folder '{folder}': unable to extract repeat count.") - continue - - folder_path = os.path.join(train_data_dir, folder) - image_extensions = (".jpg", ".jpeg", ".png", ".webp") - num_images = len([ - file for file in os.listdir(folder_path) - if file.lower().endswith(image_extensions) - ]) - log.info(f"Folder '{folder}': {num_images} images found.") - - steps = repeats * num_images - log.info(f"Folder '{folder}': {num_images} images * {repeats} repeats = {steps} steps.") - total_steps += steps - - reg_factor = 2 if reg_data_dir else 1 - if reg_factor == 2: - log.warning("Regularization images are used; the number of required steps will be doubled.") - - log.info(f"Regularization factor: {reg_factor}") + # Extract the number of repeats from the folder name + repeats = int(folder.split("_")[0]) + log.info(f"Folder {folder}: {repeats} repeats found") + + # Count the number of images in the folder + num_images = len( + [ + f + for f, lower_f in ( + (file, file.lower()) + for file in os.listdir(os.path.join(train_data_dir, folder)) + ) + if lower_f.endswith((".jpg", ".jpeg", ".png", ".webp")) + ] + ) - if max_train_steps == 0: - if train_batch_size == 0 or gradient_accumulation_steps == 0: - log.error("train_batch_size and gradient_accumulation_steps must be greater than zero.") - return TRAIN_BUTTON_VISIBLE - - max_train_steps = int(math.ceil( - total_steps / train_batch_size / gradient_accumulation_steps * epoch * reg_factor - )) - max_train_steps_info = ( - f"Calculated max_train_steps: ({total_steps} / {train_batch_size} / " - f"{gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" - ) + log.info(f"Folder {folder}: {num_images} images found") + + # Calculate the total number of steps for this folder + steps = repeats * num_images + + # log.info the result + log.info(f"Folder {folder}: {num_images} * {repeats} = {steps} steps") + + total_steps += steps + + except ValueError: + # Handle the case where the folder name does not contain an underscore + log.info( + f"Error: '{folder}' does not contain an underscore, skipping..." + ) + + if reg_data_dir == "": + reg_factor = 1 else: - max_train_steps_info = f"Max train steps: {max_train_steps}" + log.warning( + "Regularisation images are used... Will double the number of steps required..." + ) + reg_factor = 2 - log.info(f"Total steps: {total_steps}") + log.info(f"Regulatization factor: {reg_factor}") - # Calculate stop_text_encoder_training - if max_train_steps is not None and max_train_steps > 0 and stop_text_encoder_training > 0: - stop_text_encoder_training = math.ceil( - max_train_steps * stop_text_encoder_training / 100 - ) - else: - stop_text_encoder_training = 0 + if max_train_steps == 0: + # calculate max_train_steps + max_train_steps = int( + math.ceil( + float(total_steps) + / int(train_batch_size) + / int(gradient_accumulation_steps) + * int(epoch) + * int(reg_factor) + ) + ) + max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" + else: + if max_train_steps == 0: + max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." + else: + max_train_steps_info = f"Max train steps: {max_train_steps}" + + # calculate stop encoder training + if stop_text_encoder_training == 0: + stop_text_encoder_training = 0 + else: + stop_text_encoder_training = math.ceil( + float(max_train_steps) / 100 * int(stop_text_encoder_training) + ) # Calculate lr_warmup_steps if lr_warmup_steps > 0: From 31c77d6d71c60ad3b5f884fe1a9b2bf1857c363b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 14 Sep 2024 09:13:26 -0400 Subject: [PATCH 115/199] Preliminary support for Flux1 OFT --- kohya_gui/lora_gui.py | 36 ++++++++++++++++++++++++++++++++++-- sd-scripts | 2 +- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 29e34339d..a9ac7dd8f 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -591,6 +591,7 @@ def open_configuration( # This section dynamically adjusts visibility of certain parameters in the UI if my_data.get("LoRA_type", "Standard") in { "Flux1", + "Flux1 OFT", "LoCon", "Kohya DyLoRA", "Kohya LoCon", @@ -844,8 +845,8 @@ def train_model( if flux1_checkbox: log.info(f"Validating lora type is Flux1 if flux1 checkbox is checked...") - if LoRA_type != "Flux1": - log.error("LoRA type must be set to Flux1 if Flux1 checkbox is checked.") + if LoRA_type != "Flux1" or LoRA_type != "Flux1 OFT": + log.error("LoRA type must be set to 'Flux1' or 'Flux1 OFT' if Flux1 checkbox is checked.") return TRAIN_BUTTON_VISIBLE # @@ -1155,6 +1156,31 @@ def train_model( for key, value in kohya_lora_vars.items(): if value: network_args += f" {key}={value}" + + if LoRA_type == "Flux1 OFT": + # Add a list of supported network arguments for Flux1 OFT below when supported + kohya_lora_var_list = [] + network_module = "networks.oft_flux" + kohya_lora_vars = { + key: value + for key, value in vars().items() + if key in kohya_lora_var_list and value + } + # if split_mode: + # if train_blocks != "single": + # log.warning( + # f"train_blocks is currently set to '{train_blocks}'. split_mode is enabled, forcing train_blocks to 'single'." + # ) + # kohya_lora_vars["train_blocks"] = "single" + + # if split_qkv: + # kohya_lora_vars["split_qkv"] = True + # if train_t5xxl: + # kohya_lora_vars["train_t5xxl"] = True + + for key, value in kohya_lora_vars.items(): + if value: + network_args += f" {key}={value}" if LoRA_type in ["Kohya LoCon", "Standard"]: kohya_lora_var_list = [ @@ -1630,6 +1656,7 @@ def list_presets(path): label="LoRA type", choices=[ "Flux1", + "Flux1 OFT", "Kohya DyLoRA", "Kohya LoCon", "LoRA-FA", @@ -1912,6 +1939,7 @@ def update_LoRA_settings( "visible": LoRA_type in { "Flux1", + "Flux1 OFT", "Kohya DyLoRA", "Kohya LoCon", "LoRA-FA", @@ -1951,6 +1979,7 @@ def update_LoRA_settings( "visible": LoRA_type in { "Flux1", + "Flux1 OFT", "Standard", "Kohya DyLoRA", "Kohya LoCon", @@ -1964,6 +1993,7 @@ def update_LoRA_settings( "visible": LoRA_type in { "Flux1", + "Flux1 OFT", "Standard", "LoCon", "Kohya DyLoRA", @@ -1985,6 +2015,7 @@ def update_LoRA_settings( "visible": LoRA_type in { "Flux1", + "Flux1 OFT", "Standard", "LoCon", "Kohya DyLoRA", @@ -2006,6 +2037,7 @@ def update_LoRA_settings( "visible": LoRA_type in { "Flux1", + "Flux1 OFT", "Standard", "LoCon", "Kohya DyLoRA", diff --git a/sd-scripts b/sd-scripts index cefe52629..2d8ee3c28 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit cefe52629e1901dd8192b0487afd5e9f089e3519 +Subproject commit 2d8ee3c28007393386528cfeec0a9b714dafd85b From c554984a5320117350e1480666671a5737ac46c7 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 14 Sep 2024 09:24:13 -0400 Subject: [PATCH 116/199] Fix logic typo --- kohya_gui/lora_gui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index a9ac7dd8f..bf288a826 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -845,7 +845,8 @@ def train_model( if flux1_checkbox: log.info(f"Validating lora type is Flux1 if flux1 checkbox is checked...") - if LoRA_type != "Flux1" or LoRA_type != "Flux1 OFT": + print(LoRA_type) + if (LoRA_type != "Flux1") and (LoRA_type != "Flux1 OFT"): log.error("LoRA type must be set to 'Flux1' or 'Flux1 OFT' if Flux1 checkbox is checked.") return TRAIN_BUTTON_VISIBLE From 6c5c9d40948444aa94626b0e8a6ca323de821859 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 14 Sep 2024 13:02:47 -0400 Subject: [PATCH 117/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 2d8ee3c28..6445bb2bc 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 2d8ee3c28007393386528cfeec0a9b714dafd85b +Subproject commit 6445bb2bc974cec51256ae38c1be0900e90e6f87 From d24fae17b7a30b62fc4f200d1ff999a9551c20a2 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 14 Sep 2024 13:45:01 -0400 Subject: [PATCH 118/199] Add support for Rank for layers --- assets/style.css | 25 ++++++++++++ kohya_gui/class_flux1.py | 88 +++++++++++++++++++++++++++++++++++----- kohya_gui/lora_gui.py | 64 ++++++++++++++++++++++++++--- 3 files changed, 161 insertions(+), 16 deletions(-) diff --git a/assets/style.css b/assets/style.css index 41c2c4385..f8cfe112b 100644 --- a/assets/style.css +++ b/assets/style.css @@ -219,3 +219,28 @@ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ } +.flux1_rank_layers_background { + background: #ece9e6; /* White background for clear theme */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.flux1_rank_layers_background:hover { + background-color: #dddad7; /* Slightly darker on hover */ + border: 1px solid #ccc; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} + +.dark .flux1_rank_layers_background { + background: #131c25; /* Dark background for dark theme */ + padding: 1em; + border-radius: 8px; + transition: background-color 0.3s ease, border 0.3s ease, box-shadow 0.3s ease; +} + +.dark .flux1_rank_layers_background:hover { + background-color: #131c25; /* Slightly darker on hover */ + border: 1px solid #000000; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow on hover */ +} \ No newline at end of file diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index da517d4f2..a4d207f51 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -52,7 +52,7 @@ def noise_offset_type_change( outputs=self.ae, show_progress=False, ) - + self.clip_l = gr.Textbox( label="CLIP-L Path", placeholder="Path to CLIP-L model", @@ -90,20 +90,22 @@ def noise_offset_type_change( ) with gr.Row(): - + self.discrete_flow_shift = gr.Number( label="Discrete Flow Shift", value=self.config.get("flux1.discrete_flow_shift", 3.0), info="Discrete flow shift for the Euler Discrete Scheduler, default is 3.0", minimum=-1024, maximum=1024, - step=.01, + step=0.01, interactive=True, ) self.model_prediction_type = gr.Dropdown( label="Model Prediction Type", choices=["raw", "additive", "sigma_scaled"], - value=self.config.get("flux1.timestep_sampling", "sigma_scaled"), + value=self.config.get( + "flux1.timestep_sampling", "sigma_scaled" + ), interactive=True, ) self.timestep_sampling = gr.Dropdown( @@ -156,10 +158,10 @@ def noise_offset_type_change( info="Guidance scale for Flux1", minimum=0, maximum=1024, - step=.1, + step=0.1, interactive=True, ) - self.t5xxl_max_token_length = gr.Number( + self.t5xxl_max_token_length = gr.Number( label="T5-XXL Max Token Length", value=self.config.get("flux1.t5xxl_max_token_length", 512), info="Max token length for T5-XXL", @@ -168,11 +170,19 @@ def noise_offset_type_change( step=1, interactive=True, ) - + self.enable_all_linear = gr.Checkbox( + label="Enable All Linear", + value=self.config.get("flux1.enable_all_linear", False), + info="(Only applicable to 'FLux1 OFT' LoRA) Target all linear connections in the MLP layer. The default is False, which targets only attention.", + interactive=True, + ) + with gr.Row(): self.flux1_cache_text_encoder_outputs = gr.Checkbox( label="Cache Text Encoder Outputs", - value=self.config.get("flux1.cache_text_encoder_outputs", False), + value=self.config.get( + "flux1.cache_text_encoder_outputs", False + ), info="Cache text encoder outputs to speed up inference", interactive=True, ) @@ -190,11 +200,13 @@ def noise_offset_type_change( info="[Experimentsl] Enable memory efficient save. We do not recommend using it unless you are familiar with the code.", interactive=True, ) - + with gr.Row(visible=True if finetuning else False): self.blockwise_fused_optimizers = gr.Checkbox( label="Blockwise Fused Optimizer", - value=self.config.get("flux1.blockwise_fused_optimizers", False), + value=self.config.get( + "flux1.blockwise_fused_optimizers", False + ), info="Enable blockwise optimizers for fused backward pass and optimizer step. Any optimizer can be used.", interactive=True, ) @@ -228,6 +240,62 @@ def noise_offset_type_change( info="Enables the fusing of the optimizer step into the backward pass for each parameter. Only Adafactor optimizer is supported.", interactive=True, ) + with gr.Accordion( + "Rank for layers", + open=False, + visible=False if finetuning else True, + elem_classes=["flux1_rank_layers_background"], + ): + with gr.Row(): + self.img_attn_dim = gr.Textbox( + label="img_attn_dim", + value=self.config.get("flux1.img_attn_dim", ""), + interactive=True, + ) + self.img_mlp_dim = gr.Textbox( + label="img_mlp_dim", + value=self.config.get("flux1.img_mlp_dim", ""), + interactive=True, + ) + self.img_mod_dim = gr.Textbox( + label="img_mod_dim", + value=self.config.get("flux1.img_mod_dim", ""), + interactive=True, + ) + self.single_dim = gr.Textbox( + label="single_dim", + value=self.config.get("flux1.single_dim", ""), + interactive=True, + ) + with gr.Row(): + self.txt_attn_dim = gr.Textbox( + label="txt_attn_dim", + value=self.config.get("flux1.txt_attn_dim", ""), + interactive=True, + ) + self.txt_mlp_dim = gr.Textbox( + label="txt_mlp_dim", + value=self.config.get("flux1.txt_mlp_dim", ""), + interactive=True, + ) + self.txt_mod_dim = gr.Textbox( + label="txt_mod_dim", + value=self.config.get("flux1.txt_mod_dim", ""), + interactive=True, + ) + self.single_mod_dim = gr.Textbox( + label="single_mod_dim", + value=self.config.get("flux1.single_mod_dim", ""), + interactive=True, + ) + with gr.Row(): + self.in_dims = gr.Textbox( + label="in_dims", + value=self.config.get("flux1.in_dims", ""), + placeholder="e.g., [4,0,0,0,4]", + info="Each number corresponds to img_in, time_in, vector_in, guidance_in, txt_in. The above example applies LoRA to all conditioning layers, with rank 4 for img_in, 2 for time_in, vector_in, guidance_in, and 4 for txt_in.", + interactive=True, + ) self.flux1_checkbox.change( lambda flux1_checkbox: gr.Accordion(visible=flux1_checkbox), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index bf288a826..f9706313b 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -277,12 +277,22 @@ def save_configuration( split_mode, train_blocks, t5xxl_max_token_length, + enable_all_linear, guidance_scale, mem_eff_save, apply_t5_attn_mask, split_qkv, train_t5xxl, cpu_offload_checkpointing, + img_attn_dim, + img_mlp_dim, + img_mod_dim, + single_dim, + txt_attn_dim, + txt_mlp_dim, + txt_mod_dim, + single_mod_dim, + in_dims, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -528,12 +538,23 @@ def open_configuration( split_mode, train_blocks, t5xxl_max_token_length, + enable_all_linear, guidance_scale, mem_eff_save, apply_t5_attn_mask, split_qkv, train_t5xxl, cpu_offload_checkpointing, + img_attn_dim, + img_mlp_dim, + img_mod_dim, + single_dim, + txt_attn_dim, + txt_mlp_dim, + txt_mod_dim, + single_mod_dim, + in_dims, + ## training_preset, ): @@ -812,12 +833,22 @@ def train_model( split_mode, train_blocks, t5xxl_max_token_length, + enable_all_linear, guidance_scale, mem_eff_save, apply_t5_attn_mask, split_qkv, train_t5xxl, cpu_offload_checkpointing, + img_attn_dim, + img_mlp_dim, + img_mod_dim, + single_dim, + txt_attn_dim, + txt_mlp_dim, + txt_mod_dim, + single_mod_dim, + in_dims, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -845,7 +876,6 @@ def train_model( if flux1_checkbox: log.info(f"Validating lora type is Flux1 if flux1 checkbox is checked...") - print(LoRA_type) if (LoRA_type != "Flux1") and (LoRA_type != "Flux1 OFT"): log.error("LoRA type must be set to 'Flux1' or 'Flux1 OFT' if Flux1 checkbox is checked.") return TRAIN_BUTTON_VISIBLE @@ -1135,7 +1165,17 @@ def train_model( if LoRA_type == "Flux1": # Add a list of supported network arguments for Flux1 below when supported - kohya_lora_var_list = [] + kohya_lora_var_list = [ + "img_attn_dim", + "img_mlp_dim", + "img_mod_dim", + "single_dim", + "txt_attn_dim", + "txt_mlp_dim", + "txt_mod_dim", + "single_mod_dim", + "in_dims", + ] network_module = "networks.lora_flux" kohya_lora_vars = { key: value @@ -1160,7 +1200,9 @@ def train_model( if LoRA_type == "Flux1 OFT": # Add a list of supported network arguments for Flux1 OFT below when supported - kohya_lora_var_list = [] + kohya_lora_var_list = [ + "enable_all_linear", + ] network_module = "networks.oft_flux" kohya_lora_vars = { key: value @@ -1602,12 +1644,12 @@ def lora_tab( config=config, ) + with gr.Accordion("Folders", open=True), gr.Group(): + folders = Folders(headless=headless, config=config) + with gr.Accordion("Metadata", open=False), gr.Group(): metadata = MetaData(config=config) - with gr.Accordion("Folders", open=False), gr.Group(): - folders = Folders(headless=headless, config=config) - with gr.Accordion("Dataset Preparation", open=False): gr.Markdown( "This section provide Dreambooth tools to help setup your dataset..." @@ -2675,12 +2717,22 @@ def update_LoRA_settings( flux1_training.split_mode, flux1_training.train_blocks, flux1_training.t5xxl_max_token_length, + flux1_training.enable_all_linear, flux1_training.guidance_scale, flux1_training.mem_eff_save, flux1_training.apply_t5_attn_mask, flux1_training.split_qkv, flux1_training.train_t5xxl, flux1_training.cpu_offload_checkpointing, + flux1_training.img_attn_dim, + flux1_training.img_mlp_dim, + flux1_training.img_mod_dim, + flux1_training.single_dim, + flux1_training.txt_attn_dim, + flux1_training.txt_mlp_dim, + flux1_training.txt_mod_dim, + flux1_training.single_mod_dim, + flux1_training.in_dims, ] configuration.button_open_config.click( From 9a37a0e197c03466f9fe7943e264ac1189af8f90 Mon Sep 17 00:00:00 2001 From: rohitanshu <85547195+iamrohitanshu@users.noreply.github.com> Date: Mon, 16 Sep 2024 19:44:05 +0530 Subject: [PATCH 119/199] Update lora_gui.py Fixed minor typos of "Regularization" --- kohya_gui/lora_gui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index f9706313b..9d0b01f21 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1046,11 +1046,11 @@ def train_model( reg_factor = 1 else: log.warning( - "Regularisation images are used... Will double the number of steps required..." + "Regularization images are used... Will double the number of steps required..." ) reg_factor = 2 - log.info(f"Regulatization factor: {reg_factor}") + log.info(f"Regularization factor: {reg_factor}") if max_train_steps == 0: # calculate max_train_steps From f8ddd596d6a77ff76555432341c2600a24900853 Mon Sep 17 00:00:00 2001 From: rohitanshu <85547195+iamrohitanshu@users.noreply.github.com> Date: Mon, 16 Sep 2024 19:44:51 +0530 Subject: [PATCH 120/199] Update dreambooth_gui.py Fixed minor typos of "Regularization" --- kohya_gui/dreambooth_gui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index f28d82659..615db99b3 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -763,11 +763,11 @@ def train_model( reg_factor = 1 else: log.warning( - "Regularisation images are used... Will double the number of steps required..." + "Regularization images are used... Will double the number of steps required..." ) reg_factor = 2 - log.info(f"Regulatization factor: {reg_factor}") + log.info(f"Regularization factor: {reg_factor}") if max_train_steps == 0: # calculate max_train_steps From b8c4de17e5785d68704a077070fb74b8306e759f Mon Sep 17 00:00:00 2001 From: rohitanshu <85547195+iamrohitanshu@users.noreply.github.com> Date: Mon, 16 Sep 2024 19:45:41 +0530 Subject: [PATCH 121/199] Update textual_inversion_gui.py Fixed minor typos of "Regularization" --- kohya_gui/textual_inversion_gui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index f8545caca..83dc2c798 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -648,11 +648,11 @@ def train_model( reg_factor = 1 else: log.warning( - "Regularisation images are used... Will double the number of steps required..." + "Regularization images are used... Will double the number of steps required..." ) reg_factor = 2 - log.info(f"Regulatization factor: {reg_factor}") + log.info(f"Regularization factor: {reg_factor}") if max_train_steps == 0: # calculate max_train_steps From 416ef0e19e8dc848eef84dbd44452a15472c2495 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 16 Sep 2024 18:48:07 -0400 Subject: [PATCH 122/199] Add support for Blocks to train --- kohya_gui/class_flux1.py | 21 +++++++++++++++++++++ kohya_gui/lora_gui.py | 10 ++++++++++ sd-scripts | 2 +- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index a4d207f51..2d74ae56b 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -240,6 +240,27 @@ def noise_offset_type_change( info="Enables the fusing of the optimizer step into the backward pass for each parameter. Only Adafactor optimizer is supported.", interactive=True, ) + + with gr.Accordion( + "Blocks to train", + open=True, + visible=False if finetuning else True, + elem_classes=["flux1_blocks_to_train_background"], + ): + with gr.Row(): + self.train_double_block_indices = gr.Textbox( + label="train_double_block_indices", + info="The indices are specified as a list of integers or a range of integers, like '0,1,5,8' or '0,1,4-5,7' or 'all' or 'none'. The number of double blocks is 19.", + value=self.config.get("flux1.train_double_block_indices", "all"), + interactive=True, + ) + self.train_single_block_indices = gr.Textbox( + label="train_single_block_indices", + info="The indices are specified as a list of integers or a range of integers, like '0,1,5,8' or '0,1,4-5,7' or 'all' or 'none'. The number of single blocks is 38.", + value=self.config.get("flux1.train_single_block_indices", "all"), + interactive=True, + ) + with gr.Accordion( "Rank for layers", open=False, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index f9706313b..ecfe495d2 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -293,6 +293,8 @@ def save_configuration( txt_mod_dim, single_mod_dim, in_dims, + train_double_block_indices, + train_single_block_indices, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -554,6 +556,8 @@ def open_configuration( txt_mod_dim, single_mod_dim, in_dims, + train_double_block_indices, + train_single_block_indices, ## training_preset, @@ -849,6 +853,8 @@ def train_model( txt_mod_dim, single_mod_dim, in_dims, + train_double_block_indices, + train_single_block_indices, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1536,6 +1542,8 @@ def train_model( "mem_eff_save": mem_eff_save if flux1_checkbox else None, "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, + "train_double_block_indices": train_double_block_indices if flux1_checkbox else None, + "train_single_block_indices": train_single_block_indices if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2733,6 +2741,8 @@ def update_LoRA_settings( flux1_training.txt_mod_dim, flux1_training.single_mod_dim, flux1_training.in_dims, + flux1_training.train_double_block_indices, + flux1_training.train_single_block_indices, ] configuration.button_open_config.click( diff --git a/sd-scripts b/sd-scripts index 6445bb2bc..d8d15f1a7 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 6445bb2bc974cec51256ae38c1be0900e90e6f87 +Subproject commit d8d15f1a7e09ca217930288b41bd239881126b93 From 8c11a08eb334437a931edb55cb96a0802b8dcf64 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 16 Sep 2024 18:51:09 -0400 Subject: [PATCH 123/199] Add missing network parms --- kohya_gui/lora_gui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index ecfe495d2..7978b6ca7 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1181,6 +1181,8 @@ def train_model( "txt_mod_dim", "single_mod_dim", "in_dims", + "train_double_block_indices", + "train_single_block_indices", ] network_module = "networks.lora_flux" kohya_lora_vars = { @@ -1542,8 +1544,6 @@ def train_model( "mem_eff_save": mem_eff_save if flux1_checkbox else None, "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, - "train_double_block_indices": train_double_block_indices if flux1_checkbox else None, - "train_single_block_indices": train_single_block_indices if flux1_checkbox else None, } # Given dictionary `config_toml_data` From e696df27df4e9a069e2618e87d950418a6ca0f7e Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 17 Sep 2024 18:06:42 -0400 Subject: [PATCH 124/199] Fix issue with old_lr_warmup_steps --- kohya_gui/class_basic_training.py | 3 +++ sd-scripts | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index 1a5a25db2..e133f010a 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -49,7 +49,10 @@ def __init__( self.finetuning = finetuning self.dreambooth = dreambooth self.config = config + + # Initialize old_lr_warmup and old_lr_warmup_steps with default values self.old_lr_warmup = 0 + self.old_lr_warmup_steps = 0 # Initialize the UI components self.initialize_ui_components() diff --git a/sd-scripts b/sd-scripts index d8d15f1a7..a2ad7e564 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit d8d15f1a7e09ca217930288b41bd239881126b93 +Subproject commit a2ad7e5644f08141fe053a2b63446d70d777bdcf From 74a66d23879b32ed90250f49eb6e5bd05aa2f37e Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 17 Sep 2024 19:37:49 -0400 Subject: [PATCH 125/199] Update sd-scripts --- requirements.txt | 2 +- sd-scripts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 6d86fcaa7..08d037878 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,7 +19,7 @@ onnx==1.16.1 prodigyopt==1.0 protobuf==3.20.3 open-clip-torch==2.20.0 -opencv-python==4.7.0.72 +opencv-python==4.10.0.84 prodigyopt==1.0 pytorch-lightning==1.9.0 rich>=13.7.1 diff --git a/sd-scripts b/sd-scripts index a2ad7e564..e74502117 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit a2ad7e5644f08141fe053a2b63446d70d777bdcf +Subproject commit e74502117bcf161ef5698fb0adba4f9fa0171b8d From cee863664b35104d262cb63132e16cf62d994dd9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 17 Sep 2024 19:57:41 -0400 Subject: [PATCH 126/199] Add support for ScheduleFree Optimizer Type --- kohya_gui/class_basic_training.py | 2 ++ requirements.txt | 1 + 2 files changed, 3 insertions(+) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index e133f010a..cf05c843f 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -193,6 +193,7 @@ def init_lr_and_optimizer_controls(self) -> None: label="Optimizer", choices=[ "AdamW", + "AdamWScheduleFree", "AdamW8bit", "Adafactor", "DAdaptation", @@ -211,6 +212,7 @@ def init_lr_and_optimizer_controls(self) -> None: "Prodigy", "SGDNesterov", "SGDNesterov8bit", + "SGDScheduleFree", ], value=self.config.get("basic.optimizer", "AdamW8bit"), interactive=True, diff --git a/requirements.txt b/requirements.txt index 08d037878..8dbe29b3a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,6 +24,7 @@ prodigyopt==1.0 pytorch-lightning==1.9.0 rich>=13.7.1 safetensors==0.4.4 +schedulefree==1.2.7 scipy==1.11.4 # for T5XXL tokenizer (SD3/FLUX) sentencepiece==0.2.0 From 06c7512b4ef67ae0c07ee2719cea610600412e71 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 18 Sep 2024 19:12:05 -0400 Subject: [PATCH 127/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index e74502117..1286e00bb 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit e74502117bcf161ef5698fb0adba4f9fa0171b8d +Subproject commit 1286e00bb0fc34c296f24b7057777f1c37cf8e11 From 5f04009e1d6fa17b1ef58cd9ca8a57915317ed2b Mon Sep 17 00:00:00 2001 From: wzgrx <39661556+wzgrx@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:30:26 +0800 Subject: [PATCH 128/199] Update requirements_pytorch_windows.txt --- requirements_pytorch_windows.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index 23364d1af..558c23d64 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,3 +1,3 @@ -torch==2.1.2+cu118 --index-url https://download.pytorch.org/whl/cu118 -torchvision==0.16.2+cu118 --index-url https://download.pytorch.org/whl/cu118 -xformers==0.0.23.post1+cu118 --index-url https://download.pytorch.org/whl/cu118 \ No newline at end of file +torch==2.4.1+cu124 --index-url https://download.pytorch.org/whl/cu118 +torchvision==0.19.1+cu124 --index-url https://download.pytorch.org/whl/cu118 +xformers==0.0.28.post1+cu124 --index-url https://download.pytorch.org/whl/cu118 From 1beadeb67c6798c4d57b30c9044700adafaf98a8 Mon Sep 17 00:00:00 2001 From: wzgrx <39661556+wzgrx@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:36:17 +0800 Subject: [PATCH 129/199] Update requirements_pytorch_windows.txt --- requirements_pytorch_windows.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index 558c23d64..214a79e2f 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,3 +1,3 @@ -torch==2.4.1+cu124 --index-url https://download.pytorch.org/whl/cu118 -torchvision==0.19.1+cu124 --index-url https://download.pytorch.org/whl/cu118 -xformers==0.0.28.post1+cu124 --index-url https://download.pytorch.org/whl/cu118 +torch==2.4.1+cu124 --index-url https://download.pytorch.org/whl/cu124 +torchvision==0.19.1+cu124 --index-url https://download.pytorch.org/whl/cu124 +xformers==0.0.28.post1+cu124 --index-url https://download.pytorch.org/whl/cu124 From 95bf7ff29cade27108a55c49f36f360c7449cde0 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 28 Sep 2024 19:01:34 -0400 Subject: [PATCH 130/199] Update sd-scripts from origin --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 1286e00bb..1a0f5b0c3 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 1286e00bb0fc34c296f24b7057777f1c37cf8e11 +Subproject commit 1a0f5b0c389f4e9fab5edb06b36f203e8894d581 From 28dd25d6cd062c5cbbd6e683033fb5af573e2c67 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 28 Sep 2024 21:58:00 -0400 Subject: [PATCH 131/199] Another sd-script update --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 1a0f5b0c3..d05063857 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 1a0f5b0c389f4e9fab5edb06b36f203e8894d581 +Subproject commit d0506385718cbf2b1d25ea9f68d3b66a604bc825 From a28026bca11ae0cb6b189ec2256e34c70fb88842 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 29 Sep 2024 08:36:05 -0400 Subject: [PATCH 132/199] Adding support for blocks_to_swap option to gui --- kohya_gui/class_flux1.py | 39 ++++++++++++++++++++++++------------- kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 7 ++++++- 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 2d74ae56b..547e51934 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -202,22 +202,17 @@ def noise_offset_type_change( ) with gr.Row(visible=True if finetuning else False): - self.blockwise_fused_optimizers = gr.Checkbox( - label="Blockwise Fused Optimizer", - value=self.config.get( - "flux1.blockwise_fused_optimizers", False - ), - info="Enable blockwise optimizers for fused backward pass and optimizer step. Any optimizer can be used.", - interactive=True, - ) - self.cpu_offload_checkpointing = gr.Checkbox( - label="CPU Offload Checkpointing", - value=self.config.get("flux1.cpu_offload_checkpointing", False), - info="[Experimental] Enable offloading of tensors to CPU during checkpointing", + self.blocks_to_swap = gr.Slider( + label="Blocks to swap", + value=self.config.get("flux1.blocks_to_swap", 0), + info="The number of blocks to swap. The default is None (no swap). These options must be combined with --fused_backward_pass or --blockwise_fused_optimizers. The recommended maximum value is 36.", + minimum=0, + maximum=57, + step=1, interactive=True, ) self.single_blocks_to_swap = gr.Slider( - label="Single Blocks to swap", + label="Single Blocks to swap (depercated)", value=self.config.get("flux1.single_blocks_to_swap", 0), info="[Experimental] Sets the number of 'single_blocks' (~320MB) to swap during the forward and backward passes.", minimum=0, @@ -226,7 +221,7 @@ def noise_offset_type_change( interactive=True, ) self.double_blocks_to_swap = gr.Slider( - label="Double Blocks to swap", + label="Double Blocks to swap (depercated)", value=self.config.get("flux1.double_blocks_to_swap", 0), info="[Experimental] Sets the number of 'double_blocks' (~640MB) to swap during the forward and backward passes.", minimum=0, @@ -234,6 +229,22 @@ def noise_offset_type_change( step=1, interactive=True, ) + + with gr.Row(visible=True if finetuning else False): + self.blockwise_fused_optimizers = gr.Checkbox( + label="Blockwise Fused Optimizer", + value=self.config.get( + "flux1.blockwise_fused_optimizers", False + ), + info="Enable blockwise optimizers for fused backward pass and optimizer step. Any optimizer can be used.", + interactive=True, + ) + self.cpu_offload_checkpointing = gr.Checkbox( + label="CPU Offload Checkpointing", + value=self.config.get("flux1.cpu_offload_checkpointing", False), + info="[Experimental] Enable offloading of tensors to CPU during checkpointing", + interactive=True, + ) self.flux_fused_backward_pass = gr.Checkbox( label="Fused Backward Pass", value=self.config.get("flux1.fused_backward_pass", False), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 615db99b3..f2291c06f 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -225,6 +225,7 @@ def save_configuration( blockwise_fused_optimizers, flux_fused_backward_pass, cpu_offload_checkpointing, + blocks_to_swap, single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, @@ -429,6 +430,7 @@ def open_configuration( blockwise_fused_optimizers, flux_fused_backward_pass, cpu_offload_checkpointing, + blocks_to_swap, single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, @@ -628,6 +630,7 @@ def train_model( blockwise_fused_optimizers, flux_fused_backward_pass, cpu_offload_checkpointing, + blocks_to_swap, single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, @@ -1043,6 +1046,7 @@ def train_model( "cpu_offload_checkpointing": ( cpu_offload_checkpointing if flux1_checkbox else None ), + "blocks_to_swap": blocks_to_swap if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, @@ -1389,6 +1393,7 @@ def dreambooth_tab( flux1_training.blockwise_fused_optimizers, flux1_training.flux_fused_backward_pass, flux1_training.cpu_offload_checkpointing, + flux1_training.blocks_to_swap, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.mem_eff_save, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index aa97a1608..5dca363a1 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -233,6 +233,7 @@ def save_configuration( blockwise_fused_optimizers, flux_fused_backward_pass, cpu_offload_checkpointing, + blocks_to_swap, single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, @@ -443,11 +444,12 @@ def open_configuration( blockwise_fused_optimizers, flux_fused_backward_pass, cpu_offload_checkpointing, + blocks_to_swap, single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, - training_preset, apply_t5_attn_mask, + training_preset, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -659,6 +661,7 @@ def train_model( blockwise_fused_optimizers, flux_fused_backward_pass, cpu_offload_checkpointing, + blocks_to_swap, single_blocks_to_swap, double_blocks_to_swap, mem_eff_save, @@ -1101,6 +1104,7 @@ def train_model( "cpu_offload_checkpointing": ( cpu_offload_checkpointing if flux1_checkbox else None ), + "blocks_to_swap": blocks_to_swap if flux1_checkbox else None, "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, "mem_eff_save": mem_eff_save if flux1_checkbox else None, @@ -1521,6 +1525,7 @@ def list_presets(path): flux1_training.blockwise_fused_optimizers, flux1_training.flux_fused_backward_pass, flux1_training.cpu_offload_checkpointing, + flux1_training.blocks_to_swap, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.mem_eff_save, From 79a4a3a37cb2248524f39abb0f7e40c0599f98f6 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 29 Sep 2024 08:46:29 -0400 Subject: [PATCH 133/199] Fix xformers install issue --- requirements_pytorch_windows.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index ff091aa50..aed2e8ad0 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,3 +1,3 @@ torch==2.4.1+cu124 --index-url https://download.pytorch.org/whl/cu124 torchvision==0.19.1+cu124 --index-url https://download.pytorch.org/whl/cu124 -xformers==0.0.28.post1+cu124 --index-url https://download.pytorch.org/whl/cu124 \ No newline at end of file +xformers==0.0.28.post1 --index-url https://download.pytorch.org/whl/cu124 \ No newline at end of file From f8cf272533e3173a2d35def95463de32f4490761 Mon Sep 17 00:00:00 2001 From: Vladimir Sotnikov Date: Sun, 29 Sep 2024 19:54:42 +0300 Subject: [PATCH 134/199] feat(docker): mount models folder as a volume --- docker-compose.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index 4932bcee2..6b6aee76e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -20,6 +20,7 @@ services: - /tmp volumes: - /tmp/.X11-unix:/tmp/.X11-unix + - ./models:/app/models - ./dataset:/dataset - ./dataset/images:/app/data - ./dataset/logs:/app/logs From d89f0be76f4009580fabd7889ccb4f6ae0c2ce03 Mon Sep 17 00:00:00 2001 From: Vladimir Sotnikov Date: Sun, 29 Sep 2024 20:08:41 +0300 Subject: [PATCH 135/199] feat(docker): add models folder to .dockerignore --- .dockerignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.dockerignore b/.dockerignore index 7e9e5b444..9849d33f3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,6 +3,7 @@ cudnn_windows/ bitsandbytes_windows/ bitsandbytes_windows_deprecated/ dataset/ +models/ __pycache__/ venv/ **/.hadolint.yml From 34db05e8fd90841e90e8383e04ba2bfe8ead49cd Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 29 Sep 2024 13:41:57 -0400 Subject: [PATCH 136/199] Add support for AdEMAMix8bit optimizer --- kohya_gui/class_basic_training.py | 2 ++ requirements.txt | 1 - requirements_linux.txt | 2 +- requirements_linux_docker.txt | 2 +- requirements_runpod.txt | 2 +- requirements_windows.txt | 2 +- sd-scripts | 2 +- 7 files changed, 7 insertions(+), 6 deletions(-) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index cf05c843f..0d03769cf 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -196,6 +196,8 @@ def init_lr_and_optimizer_controls(self) -> None: "AdamWScheduleFree", "AdamW8bit", "Adafactor", + "bitsandbytes.optim.AdEMAMix8bit", + "bitsandbytes.optim.PagedAdEMAMix8bit", "DAdaptation", "DAdaptAdaGrad", "DAdaptAdam", diff --git a/requirements.txt b/requirements.txt index 8dbe29b3a..dfd3f0aba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,5 @@ toml==0.10.2 transformers==4.44.2 voluptuous==0.13.1 wandb==0.18.0 -scipy==1.11.4 # for kohya_ss library -e ./sd-scripts # no_verify leave this to specify not checking this a verification stage diff --git a/requirements_linux.txt b/requirements_linux.txt index 19a5edd2d..352c750fd 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -1,5 +1,5 @@ torch==2.4.0+cu124 torchvision==0.19.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 -bitsandbytes==0.43.3 +bitsandbytes==0.44.0 tensorboard==2.15.2 tensorflow==2.15.0.post1 onnxruntime-gpu==1.17.1 xformers==0.0.27.post2 diff --git a/requirements_linux_docker.txt b/requirements_linux_docker.txt index 5ff196156..d0ae66d53 100644 --- a/requirements_linux_docker.txt +++ b/requirements_linux_docker.txt @@ -1,4 +1,4 @@ xformers>=0.0.20 -bitsandbytes==0.43.3 +bitsandbytes==0.44.0 accelerate==0.33.0 tensorboard diff --git a/requirements_runpod.txt b/requirements_runpod.txt index ff68bba72..924cead3d 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -1,5 +1,5 @@ torch==2.4.0+cu124 torchvision==0.19.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 # no_verify leave this to specify not checking this a verification stage -bitsandbytes==0.43.3 +bitsandbytes==0.44.0 tensorboard==2.14.1 tensorflow==2.14.0 wheel tensorrt onnxruntime-gpu==1.17.1 diff --git a/requirements_windows.txt b/requirements_windows.txt index 243b24df0..d4e2d3c60 100644 --- a/requirements_windows.txt +++ b/requirements_windows.txt @@ -1,4 +1,4 @@ -bitsandbytes==0.43.3 +bitsandbytes==0.44.0 tensorboard tensorflow>=2.16.1 onnxruntime-gpu==1.17.1 diff --git a/sd-scripts b/sd-scripts index d05063857..8bea039a8 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit d0506385718cbf2b1d25ea9f68d3b66a604bc825 +Subproject commit 8bea039a8d9503a3fe696c445ca992301be1d6fd From 7fb2307b3e8fa559b5c273e21ecd9ca74c6984f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 05:03:34 +0000 Subject: [PATCH 137/199] Bump crate-ci/typos from 1.23.6 to 1.25.0 Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.23.6 to 1.25.0. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.23.6...v1.25.0) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/typos.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/typos.yaml b/.github/workflows/typos.yaml index ccd375a7b..084d42dd2 100644 --- a/.github/workflows/typos.yaml +++ b/.github/workflows/typos.yaml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v4 - name: typos-action - uses: crate-ci/typos@v1.23.6 + uses: crate-ci/typos@v1.25.0 From 36c57c019230ff9a7e4b57302245bfca712bbeb4 Mon Sep 17 00:00:00 2001 From: bulieme0 <53142287+bulieme@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:19:39 +0700 Subject: [PATCH 138/199] Fix typo on README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 45f4aed92..89a12bf65 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ The GUI allows you to set the training parameters and generate and run the requi This Colab notebook was not created or maintained by me; however, it appears to function effectively. The source can be found at: . -I would like to express my gratitude to camendutu for their valuable contribution. If you encounter any issues with the Colab notebook, please report them on their repository. +I would like to express my gratitude to camenduru for their valuable contribution. If you encounter any issues with the Colab notebook, please report them on their repository. | Colab | Info | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------ | From b2a58ef4113db8cd8ef81fbc521638e346ecbf22 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 4 Oct 2024 21:09:41 -0400 Subject: [PATCH 139/199] Add new --noverify option to skip requirements validation on startup --- gui.bat | 9 --------- gui.ps1 | 34 ++++++---------------------------- gui.sh | 8 +------- kohya_gui.py | 14 ++++++++++++++ 4 files changed, 21 insertions(+), 44 deletions(-) diff --git a/gui.bat b/gui.bat index 1f4c85c9d..b24afe1ec 100644 --- a/gui.bat +++ b/gui.bat @@ -9,15 +9,6 @@ call .\venv\Scripts\deactivate.bat call .\venv\Scripts\activate.bat set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib -:: If the first argument is --help, skip the validation step -if "%~1" equ "--help" goto :skip_validation - -:: Validate requirements -python.exe .\setup\validate_requirements.py -if %errorlevel% neq 0 exit /b %errorlevel% - -:skip_validation - :: If the exit code is 0, run the kohya_gui.py script with the command-line arguments if %errorlevel% equ 0 ( REM Check if the batch was started via double-click diff --git a/gui.ps1 b/gui.ps1 index 24a433791..d2ce6fb18 100644 --- a/gui.ps1 +++ b/gui.ps1 @@ -9,33 +9,11 @@ if ($env:VIRTUAL_ENV) { & .\venv\Scripts\activate $env:PATH += ";$($MyInvocation.MyCommand.Path)\venv\Lib\site-packages\torch\lib" -# Debug info about system -# python.exe .\setup\debug_info.py - -# If the --help parameter is passed, skip the validation step -if ($args -contains "--help") { - # Run the kohya_gui.py script with the command-line arguments - python.exe kohya_gui.py $args - exit 0 +$argsFromFile = @() +if (Test-Path .\gui_parameters.txt) { + $argsFromFile = Get-Content .\gui_parameters.txt -Encoding UTF8 | Where-Object { $_ -notmatch "^#" } | Foreach-Object { $_ -split " " } } +$args_combo = $argsFromFile + $args +# Write-Host "The arguments passed to this script were: $args_combo" +python.exe kohya_gui.py $args_combo -# Validate the requirements and store the exit code -python.exe .\setup\validate_requirements.py - -# Check the exit code and stop execution if it is not 0 -if ($LASTEXITCODE -ne 0) { - Write-Host "Failed to validate requirements. Exiting script..." - exit $LASTEXITCODE -} - -# If the exit code is 0, read arguments from gui_parameters.txt (if it exists) -# and run the kohya_gui.py script with the command-line arguments -if ($LASTEXITCODE -eq 0) { - $argsFromFile = @() - if (Test-Path .\gui_parameters.txt) { - $argsFromFile = Get-Content .\gui_parameters.txt -Encoding UTF8 | Where-Object { $_ -notmatch "^#" } | Foreach-Object { $_ -split " " } - } - $args_combo = $argsFromFile + $args - # Write-Host "The arguments passed to this script were: $args_combo" - python.exe kohya_gui.py $args_combo -} diff --git a/gui.sh b/gui.sh index 17c5207a4..150ec5836 100755 --- a/gui.sh +++ b/gui.sh @@ -111,10 +111,4 @@ then STARTUP_CMD=python fi -# Validate the requirements and run the script if successful -if python "$SCRIPT_DIR/setup/validate_requirements.py" -r "$REQUIREMENTS_FILE"; then - "${STARTUP_CMD}" $STARTUP_CMD_ARGS "$SCRIPT_DIR/kohya_gui.py" "$@" -else - echo "Validation failed. Exiting..." - exit 1 -fi +"${STARTUP_CMD}" $STARTUP_CMD_ARGS "$SCRIPT_DIR/kohya_gui.py" "$@" diff --git a/kohya_gui.py b/kohya_gui.py index 485824b3b..f8f032879 100644 --- a/kohya_gui.py +++ b/kohya_gui.py @@ -1,6 +1,8 @@ import gradio as gr import os import argparse +import subprocess +import sys from kohya_gui.class_gui_config import KohyaSSGUIConfig from kohya_gui.dreambooth_gui import dreambooth_tab from kohya_gui.finetune_gui import finetune_tab @@ -12,6 +14,8 @@ from kohya_gui.custom_logging import setup_logging from kohya_gui.localization_ext import add_javascript +PYTHON = sys.executable +project_directory = os.path.dirname(os.path.abspath(__file__)) def UI(**kwargs): add_javascript(kwargs.get("language")) @@ -178,10 +182,20 @@ def UI(**kwargs): parser.add_argument( "--root_path", type=str, default=None, help="`root_path` for Gradio to enable reverse proxy support. e.g. /kohya_ss" ) + + parser.add_argument( + "--noverify", action="store_true", help="Disable requirements verification" + ) args = parser.parse_args() # Set up logging log = setup_logging(debug=args.debug) + + if args.noverify: + log.warning("Skipping requirements verification.") + else: + run_cmd = [rf"{PYTHON}", rf"{project_directory}/setup/validate_requirements.py"] + subprocess.run(run_cmd, shell=False) UI(**vars(args)) From 7ab6efc5ca38a546153eb16db7d7d9883863cee8 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 5 Oct 2024 07:07:44 -0400 Subject: [PATCH 140/199] Update startup GUI code --- kohya_gui.py | 248 ++++++++++++++++++++------------------------------- 1 file changed, 99 insertions(+), 149 deletions(-) diff --git a/kohya_gui.py b/kohya_gui.py index f8f032879..16bea2146 100644 --- a/kohya_gui.py +++ b/kohya_gui.py @@ -1,8 +1,10 @@ -import gradio as gr import os +import sys import argparse import subprocess -import sys +import contextlib +import gradio as gr + from kohya_gui.class_gui_config import KohyaSSGUIConfig from kohya_gui.dreambooth_gui import dreambooth_tab from kohya_gui.finetune_gui import finetune_tab @@ -10,73 +12,43 @@ from kohya_gui.utilities import utilities_tab from kohya_gui.lora_gui import lora_tab from kohya_gui.class_lora_tab import LoRATools - from kohya_gui.custom_logging import setup_logging from kohya_gui.localization_ext import add_javascript PYTHON = sys.executable -project_directory = os.path.dirname(os.path.abspath(__file__)) - -def UI(**kwargs): - add_javascript(kwargs.get("language")) - css = "" - - headless = kwargs.get("headless", False) - log.info(f"headless: {headless}") - - if os.path.exists("./assets/style.css"): - with open(os.path.join("./assets/style.css"), "r", encoding="utf8") as file: - log.debug("Load CSS...") - css += file.read() + "\n" - - if os.path.exists("./.release"): - with open(os.path.join("./.release"), "r", encoding="utf8") as file: - release = file.read() - - if os.path.exists("./README.md"): - with open(os.path.join("./README.md"), "r", encoding="utf8") as file: - README = file.read() - - interface = gr.Blocks( - css=css, title=f"Kohya_ss GUI {release}", theme=gr.themes.Default() - ) - - config = KohyaSSGUIConfig(config_file_path=kwargs.get("config")) - - if config.is_config_loaded(): - log.info(f"Loaded default GUI values from '{kwargs.get('config')}'...") - - use_shell_flag = True - # if os.name == "posix": - # use_shell_flag = True - - use_shell_flag = config.get("settings.use_shell", use_shell_flag) - - if kwargs.get("do_not_use_shell", False): - use_shell_flag = False - - if use_shell_flag: - log.info("Using shell=True when running external commands...") - - with interface: +project_dir = os.path.dirname(os.path.abspath(__file__)) + +# Function to read file content, suppressing any FileNotFoundError +def read_file_content(file_path): + with contextlib.suppress(FileNotFoundError): + with open(file_path, "r", encoding="utf8") as file: + return file.read() + return "" + +# Function to initialize the Gradio UI interface +def initialize_ui_interface(config, headless, use_shell, release_info, readme_content): + # Load custom CSS if available + css = read_file_content("./assets/style.css") + + # Create the main Gradio Blocks interface + ui_interface = gr.Blocks(css=css, title=f"Kohya_ss GUI {release_info}", theme=gr.themes.Default()) + with ui_interface: + # Create tabs for different functionalities with gr.Tab("Dreambooth"): ( train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, - ) = dreambooth_tab( - headless=headless, config=config, use_shell_flag=use_shell_flag - ) + ) = dreambooth_tab(headless=headless, config=config, use_shell_flag=use_shell) with gr.Tab("LoRA"): - lora_tab(headless=headless, config=config, use_shell_flag=use_shell_flag) + lora_tab(headless=headless, config=config, use_shell_flag=use_shell) with gr.Tab("Textual Inversion"): - ti_tab(headless=headless, config=config, use_shell_flag=use_shell_flag) + ti_tab(headless=headless, config=config, use_shell_flag=use_shell) with gr.Tab("Finetuning"): - finetune_tab( - headless=headless, config=config, use_shell_flag=use_shell_flag - ) + finetune_tab(headless=headless, config=config, use_shell_flag=use_shell) with gr.Tab("Utilities"): + # Utilities tab requires inputs from the Dreambooth tab utilities_tab( train_data_dir_input=train_data_dir_input, reg_data_dir_input=reg_data_dir_input, @@ -88,114 +60,92 @@ def UI(**kwargs): with gr.Tab("LoRA"): _ = LoRATools(headless=headless) with gr.Tab("About"): - gr.Markdown(f"kohya_ss GUI release {release}") + # About tab to display release information and README content + gr.Markdown(f"kohya_ss GUI release {release_info}") with gr.Tab("README"): - gr.Markdown(README) - - htmlStr = f""" - - -
{release}
- - - """ - gr.HTML(htmlStr) - # Show the interface - launch_kwargs = {} - username = kwargs.get("username") - password = kwargs.get("password") - server_port = kwargs.get("server_port", 0) - inbrowser = kwargs.get("inbrowser", False) - share = kwargs.get("share", False) - do_not_share = kwargs.get("do_not_share", False) - server_name = kwargs.get("listen") - root_path = kwargs.get("root_path", None) - debug = kwargs.get("debug", False) - - launch_kwargs["server_name"] = server_name - if username and password: - launch_kwargs["auth"] = (username, password) - if server_port > 0: - launch_kwargs["server_port"] = server_port - if inbrowser: - launch_kwargs["inbrowser"] = inbrowser - if do_not_share: - launch_kwargs["share"] = False - else: - if share: - launch_kwargs["share"] = share - if root_path: - launch_kwargs["root_path"] = root_path - if debug: - launch_kwargs["debug"] = True - interface.launch(**launch_kwargs) + gr.Markdown(readme_content) + # Display release information in a div element + gr.Markdown(f"
{release_info}
") -if __name__ == "__main__": - # torch.cuda.set_per_process_memory_fraction(0.48) + return ui_interface + +# Function to configure and launch the UI +def UI(**kwargs): + # Add custom JavaScript if specified + add_javascript(kwargs.get("language")) + log.info(f"headless: {kwargs.get('headless', False)}") + + # Load release and README information + release_info = read_file_content("./.release") + readme_content = read_file_content("./README.md") + + # Load configuration from the specified file + config = KohyaSSGUIConfig(config_file_path=kwargs.get("config")) + if config.is_config_loaded(): + log.info(f"Loaded default GUI values from '{kwargs.get('config')}'...") + + # Determine if shell should be used for running external commands + use_shell = not kwargs.get("do_not_use_shell", False) and config.get("settings.use_shell", True) + if use_shell: + log.info("Using shell=True when running external commands...") + + # Initialize the Gradio UI interface + ui_interface = initialize_ui_interface(config, kwargs.get("headless", False), use_shell, release_info, readme_content) + + # Construct launch parameters using dictionary comprehension + launch_params = { + "server_name": kwargs.get("listen"), + "auth": (kwargs["username"], kwargs["password"]) if kwargs.get("username") and kwargs.get("password") else None, + "server_port": kwargs.get("server_port", 0) if kwargs.get("server_port", 0) > 0 else None, + "inbrowser": kwargs.get("inbrowser", False), + "share": False if kwargs.get("do_not_share", False) else kwargs.get("share", False), + "root_path": kwargs.get("root_path", None), + "debug": kwargs.get("debug", False), + } + + # This line filters out any key-value pairs from `launch_params` where the value is `None`, ensuring only valid parameters are passed to the `launch` function. + launch_params = {k: v for k, v in launch_params.items() if v is not None} + + # Launch the Gradio interface with the specified parameters + ui_interface.launch(**launch_params) + +# Function to initialize argument parser for command-line arguments +def initialize_arg_parser(): parser = argparse.ArgumentParser() - parser.add_argument( - "--config", - type=str, - default="./config.toml", - help="Path to the toml config file for interface defaults", - ) + parser.add_argument("--config", type=str, default="./config.toml", help="Path to the toml config file for interface defaults") parser.add_argument("--debug", action="store_true", help="Debug on") - parser.add_argument( - "--listen", - type=str, - default="127.0.0.1", - help="IP to listen on for connections to Gradio", - ) - parser.add_argument( - "--username", type=str, default="", help="Username for authentication" - ) - parser.add_argument( - "--password", type=str, default="", help="Password for authentication" - ) - parser.add_argument( - "--server_port", - type=int, - default=0, - help="Port to run the server listener on", - ) + parser.add_argument("--listen", type=str, default="127.0.0.1", help="IP to listen on for connections to Gradio") + parser.add_argument("--username", type=str, default="", help="Username for authentication") + parser.add_argument("--password", type=str, default="", help="Password for authentication") + parser.add_argument("--server_port", type=int, default=0, help="Port to run the server listener on") parser.add_argument("--inbrowser", action="store_true", help="Open in browser") parser.add_argument("--share", action="store_true", help="Share the gradio UI") - parser.add_argument( - "--headless", action="store_true", help="Is the server headless" - ) - parser.add_argument( - "--language", type=str, default=None, help="Set custom language" - ) - + parser.add_argument("--headless", action="store_true", help="Is the server headless") + parser.add_argument("--language", type=str, default=None, help="Set custom language") parser.add_argument("--use-ipex", action="store_true", help="Use IPEX environment") parser.add_argument("--use-rocm", action="store_true", help="Use ROCm environment") + parser.add_argument("--do_not_use_shell", action="store_true", help="Enforce not to use shell=True when running external commands") + parser.add_argument("--do_not_share", action="store_true", help="Do not share the gradio UI") + parser.add_argument("--root_path", type=str, default=None, help="`root_path` for Gradio to enable reverse proxy support. e.g. /kohya_ss") + parser.add_argument("--noverify", action="store_true", help="Disable requirements verification") + return parser - parser.add_argument( - "--do_not_use_shell", action="store_true", help="Enforce not to use shell=True when running external commands" - ) - - parser.add_argument( - "--do_not_share", action="store_true", help="Do not share the gradio UI" - ) - - parser.add_argument( - "--root_path", type=str, default=None, help="`root_path` for Gradio to enable reverse proxy support. e.g. /kohya_ss" - ) - - parser.add_argument( - "--noverify", action="store_true", help="Disable requirements verification" - ) - +if __name__ == "__main__": + # Initialize argument parser and parse arguments + parser = initialize_arg_parser() args = parser.parse_args() - # Set up logging + # Set up logging based on the debug flag log = setup_logging(debug=args.debug) - + + # Verify requirements unless `noverify` flag is set if args.noverify: log.warning("Skipping requirements verification.") else: - run_cmd = [rf"{PYTHON}", rf"{project_directory}/setup/validate_requirements.py"] - subprocess.run(run_cmd, shell=False) + # Run the validation command to verify requirements + validation_command = [PYTHON, os.path.join(project_dir, "setup", "validate_requirements.py")] + subprocess.run(validation_command, check=True) - UI(**vars(args)) + # Launch the UI with the provided arguments + UI(**vars(args)) \ No newline at end of file From 0d27feaf01ea8b6fa421b0fe2f2779e5daacbe36 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 5 Oct 2024 08:21:14 -0400 Subject: [PATCH 141/199] Update setup code --- setup/setup_common.py | 668 +++++++++++++++------------------ setup/validate_requirements.py | 222 +++++++---- 2 files changed, 450 insertions(+), 440 deletions(-) diff --git a/setup/setup_common.py b/setup/setup_common.py index 79eaef64f..9d0852212 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -1,363 +1,300 @@ -import subprocess import os -import re import sys import logging import shutil import datetime +import subprocess +import re import pkg_resources -errors = 0 # Define the 'errors' variable before using it -log = logging.getLogger('sd') +log = logging.getLogger("sd") + +# Constants +MIN_PYTHON_VERSION = (3, 10, 9) +MAX_PYTHON_VERSION = (3, 11, 0) +LOG_DIR = "../logs/setup/" +LOG_LEVEL = "INFO" # Set to "INFO" or "WARNING" for less verbose logging + def check_python_version(): """ Check if the current Python version is within the acceptable range. - Returns: - bool: True if the current Python version is valid, False otherwise. + bool: True if the current Python version is valid, False otherwise. """ - min_version = (3, 10, 9) - max_version = (3, 11, 0) - - from packaging import version - + log.debug("Checking Python version...") try: current_version = sys.version_info log.info(f"Python version is {sys.version}") - - if not (min_version <= current_version < max_version): - log.error(f"The current version of python ({current_version}) is not appropriate to run Kohya_ss GUI") - log.error("The python version needs to be greater or equal to 3.10.9 and less than 3.11.0") + + if not (MIN_PYTHON_VERSION <= current_version < MAX_PYTHON_VERSION): + log.error( + f"The current version of python ({sys.version}) is not supported." + ) + log.error("The Python version must be >= 3.10.9 and < 3.11.0.") return False return True except Exception as e: log.error(f"Failed to verify Python version. Error: {e}") return False + def update_submodule(quiet=True): """ Ensure the submodule is initialized and updated. - - This function uses the Git command line interface to initialize and update - the specified submodule recursively. Errors during the Git operation - or if Git is not found are caught and logged. - - Parameters: - - quiet: If True, suppresses the output of the Git command. """ + log.debug("Updating submodule...") git_command = ["git", "submodule", "update", "--init", "--recursive"] - if quiet: git_command.append("--quiet") - + try: - # Initialize and update the submodule subprocess.run(git_command, check=True) log.info("Submodule initialized and updated.") - except subprocess.CalledProcessError as e: - # Log the error if the Git operation fails log.error(f"Error during Git operation: {e}") except FileNotFoundError as e: - # Log the error if the file is not found log.error(e) -# def read_tag_version_from_file(file_path): -# """ -# Read the tag version from a given file. - -# Parameters: -# - file_path: The path to the file containing the tag version. - -# Returns: -# The tag version as a string. -# """ -# with open(file_path, 'r') as file: -# # Read the first line and strip whitespace -# tag_version = file.readline().strip() -# return tag_version def clone_or_checkout(repo_url, branch_or_tag, directory_name): """ Clone a repo or checkout a specific branch or tag if the repo already exists. - For branches, it updates to the latest version before checking out. - Suppresses detached HEAD advice for tags or specific commits. - Restores the original working directory after operations. - - Parameters: - - repo_url: The URL of the Git repository. - - branch_or_tag: The name of the branch or tag to clone or checkout. - - directory_name: The name of the directory to clone into or where the repo already exists. """ - original_dir = os.getcwd() # Store the original directory + log.debug( + f"Cloning or checking out repository: {repo_url}, branch/tag: {branch_or_tag}, directory: {directory_name}" + ) + original_dir = os.getcwd() try: if not os.path.exists(directory_name): - # Directory does not exist, clone the repo quietly - - # Construct the command as a string for logging - # run_cmd = f"git clone --branch {branch_or_tag} --single-branch --quiet {repo_url} {directory_name}" - run_cmd = ["git", "clone", "--branch", branch_or_tag, "--single-branch", "--quiet", repo_url, directory_name] - - - # Log the command - log.debug(run_cmd) - - # Run the command - process = subprocess.Popen( - run_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True - ) - output, error = process.communicate() - - if error and not error.startswith("Note: switching to"): - log.warning(error) - else: - log.info(f"Successfully cloned sd-scripts {branch_or_tag}") - + run_cmd = [ + "git", + "clone", + "--branch", + branch_or_tag, + "--single-branch", + "--quiet", + repo_url, + directory_name, + ] + log.debug(f"Cloning repository: {run_cmd}") + subprocess.run(run_cmd, check=True) + log.info(f"Successfully cloned {repo_url} ({branch_or_tag})") else: os.chdir(directory_name) + log.debug("Fetching all branches and tags...") subprocess.run(["git", "fetch", "--all", "--quiet"], check=True) - subprocess.run(["git", "config", "advice.detachedHead", "false"], check=True) - - # Get the current branch or commit hash - current_branch_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode() - tag_branch_hash = subprocess.check_output(["git", "rev-parse", branch_or_tag]).strip().decode() - - if current_branch_hash != tag_branch_hash: - run_cmd = f"git checkout {branch_or_tag} --quiet" - # Log the command - log.debug(run_cmd) - - # Execute the checkout command - process = subprocess.Popen(run_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - output, error = process.communicate() - - if error: - log.warning(error.decode()) - else: - log.info(f"Checked out sd-scripts {branch_or_tag} successfully.") + subprocess.run( + ["git", "config", "advice.detachedHead", "false"], check=True + ) + + current_branch_hash = ( + subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode() + ) + target_branch_hash = ( + subprocess.check_output(["git", "rev-parse", branch_or_tag]) + .strip() + .decode() + ) + + if current_branch_hash != target_branch_hash: + log.debug(f"Checking out branch/tag: {branch_or_tag}") + subprocess.run( + ["git", "checkout", branch_or_tag, "--quiet"], check=True + ) + log.info(f"Checked out {branch_or_tag} successfully.") else: - log.info(f"Current branch of sd-scripts is already at the required release {branch_or_tag}.") + log.info(f"Already at required branch/tag: {branch_or_tag}") except subprocess.CalledProcessError as e: log.error(f"Error during Git operation: {e}") finally: - os.chdir(original_dir) # Restore the original directory + os.chdir(original_dir) -# setup console and file logging -def setup_logging(clean=False): - # - # This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master - # + +def setup_logging(): + """ + Set up logging to file and console. + """ + log.debug("Setting up logging...") from rich.theme import Theme from rich.logging import RichHandler from rich.console import Console - from rich.pretty import install as pretty_install - from rich.traceback import install as traceback_install console = Console( log_time=True, - log_time_format='%H:%M:%S-%f', - theme=Theme( - { - 'traceback.border': 'black', - 'traceback.border.syntax_error': 'black', - 'inspect.value.border': 'black', - } - ), + log_time_format="%H:%M:%S-%f", + theme=Theme({"traceback.border": "black", "inspect.value.border": "black"}), ) - # logging.getLogger("urllib3").setLevel(logging.ERROR) - # logging.getLogger("httpx").setLevel(logging.ERROR) - - current_datetime = datetime.datetime.now() - current_datetime_str = current_datetime.strftime('%Y%m%d-%H%M%S') + current_datetime_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") log_file = os.path.join( - os.path.dirname(__file__), - f'../logs/setup/kohya_ss_gui_{current_datetime_str}.log', + os.path.dirname(__file__), f"{LOG_DIR}kohya_ss_gui_{current_datetime_str}.log" ) + os.makedirs(os.path.dirname(log_file), exist_ok=True) - # Create directories if they don't exist - log_directory = os.path.dirname(log_file) - os.makedirs(log_directory, exist_ok=True) - - level = logging.INFO logging.basicConfig( level=logging.ERROR, - format='%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s', + format="%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s", filename=log_file, - filemode='a', - encoding='utf-8', + filemode="a", + encoding="utf-8", force=True, ) - log.setLevel( - logging.DEBUG - ) # log to file is always at level debug for facility `sd` - pretty_install(console=console) - traceback_install( - console=console, - extra_lines=1, - width=console.width, - word_wrap=False, - indent_guides=False, - suppress=[], - ) - rh = RichHandler( - show_time=True, - omit_repeated_times=False, - show_level=True, - show_path=False, - markup=False, - rich_tracebacks=True, - log_time_format='%H:%M:%S-%f', - level=level, - console=console, - ) - rh.set_name(level) - while log.hasHandlers() and len(log.handlers) > 0: - log.removeHandler(log.handlers[0]) - log.addHandler(rh) + log_level = os.getenv("LOG_LEVEL", LOG_LEVEL).upper() + log.setLevel(getattr(logging, log_level, logging.DEBUG)) + rich_handler = RichHandler(console=console) + + # Replace existing handlers with the rich handler + log.handlers.clear() + log.addHandler(rich_handler) + log.debug("Logging setup complete.") -def install_requirements_inbulk(requirements_file, show_stdout=True, optional_parm="", upgrade = False): +def install_requirements_inbulk( + requirements_file, show_stdout=True, optional_parm="", upgrade=False +): + log.debug(f"Installing requirements in bulk from: {requirements_file}") if not os.path.exists(requirements_file): - log.error(f'Could not find the requirements file in {requirements_file}.') + log.error(f"Could not find the requirements file in {requirements_file}.") return - log.info(f'Installing requirements from {requirements_file}...') + log.info(f"Installing requirements from {requirements_file}...") if upgrade: optional_parm += " -U" if show_stdout: - run_cmd(f'pip install -r {requirements_file} {optional_parm}') + run_cmd(f"pip install -r {requirements_file} {optional_parm}") else: - run_cmd(f'pip install -r {requirements_file} {optional_parm} --quiet') - log.info(f'Requirements from {requirements_file} installed.') - + run_cmd(f"pip install -r {requirements_file} {optional_parm} --quiet") + log.info(f"Requirements from {requirements_file} installed.") def configure_accelerate(run_accelerate=False): - # - # This function was taken and adapted from code written by jstayco - # - + log.debug("Configuring accelerate...") from pathlib import Path def env_var_exists(var_name): - return var_name in os.environ and os.environ[var_name] != '' + return var_name in os.environ and os.environ[var_name] != "" + + log.info("Configuring accelerate...") - log.info('Configuring accelerate...') - source_accelerate_config_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), - '..', - 'config_files', - 'accelerate', - 'default_config.yaml', + "..", + "config_files", + "accelerate", + "default_config.yaml", ) if not os.path.exists(source_accelerate_config_file): + log.warning( + f"Could not find the accelerate configuration file in {source_accelerate_config_file}." + ) if run_accelerate: - run_cmd('accelerate config') + log.debug("Running accelerate configuration command...") + run_cmd([sys.executable, "-m", "accelerate", "config"]) else: log.warning( - f'Could not find the accelerate configuration file in {source_accelerate_config_file}. Please configure accelerate manually by runningthe option in the menu.' + "Please configure accelerate manually by running the option in the menu." ) - - log.debug( - f'Source accelerate config location: {source_accelerate_config_file}' - ) + return + + log.debug(f"Source accelerate config location: {source_accelerate_config_file}") target_config_location = None - log.debug( - f"Environment variables: HF_HOME: {os.environ.get('HF_HOME')}, " - f"LOCALAPPDATA: {os.environ.get('LOCALAPPDATA')}, " - f"USERPROFILE: {os.environ.get('USERPROFILE')}" - ) - if env_var_exists('HF_HOME'): - target_config_location = Path( - os.environ['HF_HOME'], 'accelerate', 'default_config.yaml' - ) - elif env_var_exists('LOCALAPPDATA'): - target_config_location = Path( - os.environ['LOCALAPPDATA'], - 'huggingface', - 'accelerate', - 'default_config.yaml', - ) - elif env_var_exists('USERPROFILE'): - target_config_location = Path( - os.environ['USERPROFILE'], - '.cache', - 'huggingface', - 'accelerate', - 'default_config.yaml', - ) + env_vars = { + "HF_HOME": Path(os.environ.get("HF_HOME", "")), + "LOCALAPPDATA": Path( + os.environ.get("LOCALAPPDATA", ""), + "huggingface", + "accelerate", + "default_config.yaml", + ), + "USERPROFILE": Path( + os.environ.get("USERPROFILE", ""), + ".cache", + "huggingface", + "accelerate", + "default_config.yaml", + ), + } + + for var, path in env_vars.items(): + if env_var_exists(var): + target_config_location = path + break - log.debug(f'Target config location: {target_config_location}') + log.debug(f"Target config location: {target_config_location}") if target_config_location: if not target_config_location.is_file(): - target_config_location.parent.mkdir(parents=True, exist_ok=True) log.debug( - f'Target accelerate config location: {target_config_location}' - ) - shutil.copyfile( - source_accelerate_config_file, target_config_location + f"Creating target config directory: {target_config_location.parent}" ) - log.info( - f'Copied accelerate config file to: {target_config_location}' + target_config_location.parent.mkdir(parents=True, exist_ok=True) + log.debug( + f"Copying config file to target location: {target_config_location}" ) - else: - if run_accelerate: - run_cmd('accelerate config') - else: - log.warning( - 'Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config.' - ) - else: - if run_accelerate: - run_cmd('accelerate config') + shutil.copyfile(source_accelerate_config_file, target_config_location) + log.info(f"Copied accelerate config file to: {target_config_location}") + elif run_accelerate: + log.debug("Running accelerate configuration command...") + run_cmd([sys.executable, "-m", "accelerate", "config"]) else: log.warning( - 'Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config.' + "Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config." ) + elif run_accelerate: + log.debug("Running accelerate configuration command...") + run_cmd([sys.executable, "-m", "accelerate", "config"]) + else: + log.warning( + "Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config." + ) def check_torch(): + log.debug("Checking Torch installation...") # - # This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master + # This function was adapted from code written by vladimandic: https://github.com/vladimandic/automatic/commits/master # # Check for toolkit - if shutil.which('nvidia-smi') is not None or os.path.exists( + if shutil.which("nvidia-smi") is not None or os.path.exists( os.path.join( - os.environ.get('SystemRoot') or r'C:\Windows', - 'System32', - 'nvidia-smi.exe', + os.environ.get("SystemRoot") or r"C:\Windows", + "System32", + "nvidia-smi.exe", ) ): - log.info('nVidia toolkit detected') - elif shutil.which('rocminfo') is not None or os.path.exists( - '/opt/rocm/bin/rocminfo' + log.info("nVidia toolkit detected") + elif shutil.which("rocminfo") is not None or os.path.exists( + "/opt/rocm/bin/rocminfo" ): - log.info('AMD toolkit detected') - elif (shutil.which('sycl-ls') is not None - or os.environ.get('ONEAPI_ROOT') is not None - or os.path.exists('/opt/intel/oneapi')): - log.info('Intel OneAPI toolkit detected') + log.info("AMD toolkit detected") + elif ( + shutil.which("sycl-ls") is not None + or os.environ.get("ONEAPI_ROOT") is not None + or os.path.exists("/opt/intel/oneapi") + ): + log.info("Intel OneAPI toolkit detected") else: - log.info('Using CPU-only Torch') + log.info("Using CPU-only Torch") try: import torch + + log.debug("Torch module imported successfully.") try: # Import IPEX / XPU support import intel_extension_for_pytorch as ipex - except Exception: - pass - log.info(f'Torch {torch.__version__}') + + log.debug("Intel extension for PyTorch imported successfully.") + except Exception as e: + log.warning(f"Failed to import intel_extension_for_pytorch: {e}") + log.info(f"Torch {torch.__version__}") if torch.cuda.is_available(): if torch.version.cuda: @@ -367,33 +304,33 @@ def check_torch(): ) elif torch.version.hip: # Log AMD ROCm HIP version - log.info(f'Torch backend: AMD ROCm HIP {torch.version.hip}') + log.info(f"Torch backend: AMD ROCm HIP {torch.version.hip}") else: - log.warning('Unknown Torch backend') + log.warning("Unknown Torch backend") # Log information about detected GPUs for device in [ torch.cuda.device(i) for i in range(torch.cuda.device_count()) ]: log.info( - f'Torch detected GPU: {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}' + f"Torch detected GPU: {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}" ) # Check if XPU is available elif hasattr(torch, "xpu") and torch.xpu.is_available(): # Log Intel IPEX version - log.info(f'Torch backend: Intel IPEX {ipex.__version__}') + log.info(f"Torch backend: Intel IPEX {ipex.__version__}") for device in [ torch.xpu.device(i) for i in range(torch.xpu.device_count()) ]: log.info( - f'Torch detected GPU: {torch.xpu.get_device_name(device)} VRAM {round(torch.xpu.get_device_properties(device).total_memory / 1024 / 1024)} Compute Units {torch.xpu.get_device_properties(device).max_compute_units}' + f"Torch detected GPU: {torch.xpu.get_device_name(device)} VRAM {round(torch.xpu.get_device_properties(device).total_memory / 1024 / 1024)} Compute Units {torch.xpu.get_device_properties(device).max_compute_units}" ) else: - log.warning('Torch reports GPU not available') - + log.warning("Torch reports GPU not available") + return int(torch.__version__[0]) except Exception as e: - # log.warning(f'Could not load torch: {e}') + log.error(f"Could not load torch: {e}") return 0 @@ -404,17 +341,19 @@ def check_repo_version(): in the current directory. If the file exists, it reads the release version from the file and logs it. If the file does not exist, it logs a debug message indicating that the release could not be read. """ - if os.path.exists('.release'): + log.debug("Checking repository version...") + if os.path.exists(".release"): try: - with open(os.path.join('./.release'), 'r', encoding='utf8') as file: - release= file.read() - - log.info(f'Kohya_ss GUI version: {release}') + with open(os.path.join("./.release"), "r", encoding="utf8") as file: + release = file.read() + + log.info(f"Kohya_ss GUI version: {release}") except Exception as e: - log.error(f'Could not read release: {e}') + log.error(f"Could not read release: {e}") else: - log.debug('Could not read release...') - + log.debug("Could not read release...") + + # execute git command def git(arg: str, folder: str = None, ignore: bool = False): """ @@ -433,22 +372,31 @@ def git(arg: str, folder: str = None, ignore: bool = False): If set to True, errors will not be logged. Note: - This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master + This function was adapted from code written by vladimandic: https://github.com/vladimandic/automatic/commits/master """ - - # git_cmd = os.environ.get('GIT', "git") - result = subprocess.run(["git", arg], check=False, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=folder or '.') + log.debug(f"Running git command: git {arg} in folder: {folder or '.'}") + result = subprocess.run( + ["git", arg], + check=False, + shell=True, + env=os.environ, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=folder or ".", + ) txt = result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stderr) > 0: - txt += ('\n' if len(txt) > 0 else '') + result.stderr.decode(encoding="utf8", errors="ignore") + txt += ("\n" if len(txt) > 0 else "") + result.stderr.decode( + encoding="utf8", errors="ignore" + ) txt = txt.strip() if result.returncode != 0 and not ignore: global errors errors += 1 - log.error(f'Error running git: {folder} / {arg}') - if 'or stash them' in txt: - log.error(f'Local changes detected: check log for details...') - log.debug(f'Git output: {txt}') + log.error(f"Error running git: {folder} / {arg}") + if "or stash them" in txt: + log.error(f"Local changes detected: check log for details...") + log.debug(f"Git output: {txt}") def pip(arg: str, ignore: bool = False, quiet: bool = False, show_stdout: bool = False): @@ -473,32 +421,44 @@ def pip(arg: str, ignore: bool = False, quiet: bool = False, show_stdout: bool = Returns: - The output of the pip command as a string, or None if the 'show_stdout' flag is set. """ - # arg = arg.replace('>=', '==') + log.debug(f"Running pip command: {arg}") if not quiet: - log.info(f'Installing package: {arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()}') - pip_cmd = [fr"{sys.executable}", "-m", "pip"] + arg.split(" ") + log.info( + f'Installing package: {arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()}' + ) + pip_cmd = [rf"{sys.executable}", "-m", "pip"] + arg.split(" ") log.debug(f"Running pip: {pip_cmd}") if show_stdout: subprocess.run(pip_cmd, shell=False, check=False, env=os.environ) else: - result = subprocess.run(pip_cmd, shell=False, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = subprocess.run( + pip_cmd, + shell=False, + check=False, + env=os.environ, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) txt = result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stderr) > 0: - txt += ('\n' if len(txt) > 0 else '') + result.stderr.decode(encoding="utf8", errors="ignore") + txt += ("\n" if len(txt) > 0 else "") + result.stderr.decode( + encoding="utf8", errors="ignore" + ) txt = txt.strip() if result.returncode != 0 and not ignore: - global errors # pylint: disable=global-statement + global errors # pylint: disable=global-statement errors += 1 - log.error(f'Error running pip: {arg}') - log.error(f'Pip output: {txt}') + log.error(f"Error running pip: {arg}") + log.error(f"Pip output: {txt}") return txt + def installed(package, friendly: str = None): """ Checks if the specified package(s) are installed with the correct version. This function can handle package specifications with or without version constraints, and can also filter out command-line options and URLs when a 'friendly' string is provided. - + Parameters: - package: A string that specifies one or more packages with optional version constraints. - friendly: An optional string used to provide a cleaner version of the package string @@ -506,43 +466,39 @@ def installed(package, friendly: str = None): Returns: - True if all specified packages are installed with the correct versions, False otherwise. - + Note: This function was adapted from code written by vladimandic. """ - + log.debug(f"Checking if package is installed: {package}") # Remove any optional features specified in brackets (e.g., "package[option]==version" becomes "package==version") - package = re.sub(r'\[.*?\]', '', package) + package = re.sub(r"\[.*?\]", "", package) try: if friendly: # If a 'friendly' version of the package string is provided, split it into components pkgs = friendly.split() - + # Filter out command-line options and URLs from the package specification pkgs = [ - p - for p in package.split() - if not p.startswith('--') and "://" not in p + p for p in package.split() if not p.startswith("--") and "://" not in p ] else: # Split the package string into components, excluding '-' and '=' prefixed items pkgs = [ p for p in package.split() - if not p.startswith('-') and not p.startswith('=') + if not p.startswith("-") and not p.startswith("=") ] # For each package component, extract the package name, excluding any URLs - pkgs = [ - p.split('/')[-1] for p in pkgs - ] + pkgs = [p.split("/")[-1] for p in pkgs] for pkg in pkgs: # Parse the package name and version based on the version specifier used - if '>=' in pkg: - pkg_name, pkg_version = [x.strip() for x in pkg.split('>=')] - elif '==' in pkg: - pkg_name, pkg_version = [x.strip() for x in pkg.split('==')] + if ">=" in pkg: + pkg_name, pkg_version = [x.strip() for x in pkg.split(">=")] + elif "==" in pkg: + pkg_name, pkg_version = [x.strip() for x in pkg.split("==")] else: pkg_name, pkg_version = pkg.strip(), None @@ -553,38 +509,41 @@ def installed(package, friendly: str = None): spec = pkg_resources.working_set.by_key.get(pkg_name.lower(), None) if spec is None: # Try replacing underscores with dashes - spec = pkg_resources.working_set.by_key.get(pkg_name.replace('_', '-'), None) + spec = pkg_resources.working_set.by_key.get( + pkg_name.replace("_", "-"), None + ) if spec is not None: # Package is found, check version version = pkg_resources.get_distribution(pkg_name).version - log.debug(f'Package version found: {pkg_name} {version}') + log.debug(f"Package version found: {pkg_name} {version}") if pkg_version is not None: # Verify if the installed version meets the specified constraints - if '>=' in pkg: + if ">=" in pkg: ok = version >= pkg_version else: ok = version == pkg_version if not ok: # Version mismatch, log warning and return False - log.warning(f'Package wrong version: {pkg_name} {version} required {pkg_version}') + log.warning( + f"Package wrong version: {pkg_name} {version} required {pkg_version}" + ) return False else: # Package not found, log debug message and return False - log.debug(f'Package version not found: {pkg_name}') + log.debug(f"Package version not found: {pkg_name}") return False # All specified packages are installed with the correct versions return True except ModuleNotFoundError: # One or more packages are not installed, log debug message and return False - log.debug(f'Package not installed: {pkgs}') + log.debug(f"Package not installed: {pkgs}") return False - # install package using pip if not already installed def install( package, @@ -596,7 +555,7 @@ def install( """ Installs or upgrades a Python package using pip, with options to ignode errors, reinstall packages, and display outputs. - + Parameters: - package (str): The name of the package to be installed or upgraded. Can include version specifiers. Anything after a '#' in the package name will be ignored. @@ -612,103 +571,98 @@ def install( Returns: None. The function performs operations that affect the environment but does not return any value. - + Note: If `reinstall` is True, it disables any mechanism that allows for skipping installations when the package is already present, forcing a fresh install. """ + log.debug(f"Installing package: {package}") # Remove anything after '#' in the package variable - package = package.split('#')[0].strip() + package = package.split("#")[0].strip() if reinstall: - global quick_allowed # pylint: disable=global-statement + global quick_allowed # pylint: disable=global-statement quick_allowed = False if reinstall or not installed(package, friendly): - pip(f'install --upgrade {package}', ignore=ignore, show_stdout=show_stdout) + pip(f"install --upgrade {package}", ignore=ignore, show_stdout=show_stdout) def process_requirements_line(line, show_stdout: bool = False): + log.debug(f"Processing requirements line: {line}") # Remove brackets and their contents from the line using regular expressions # e.g., diffusers[torch]==0.10.2 becomes diffusers==0.10.2 - package_name = re.sub(r'\[.*?\]', '', line) + package_name = re.sub(r"\[.*?\]", "", line) install(line, package_name, show_stdout=show_stdout) -def install_requirements(requirements_file, check_no_verify_flag=False, show_stdout: bool = False): - if check_no_verify_flag: - log.info(f'Verifying modules installation status from {requirements_file}...') - else: - log.info(f'Installing modules from {requirements_file}...') - with open(requirements_file, 'r', encoding='utf8') as f: - # Read lines from the requirements file, strip whitespace, and filter out empty lines, comments, and lines starting with '.' - if check_no_verify_flag: - lines = [ - line.strip() - for line in f.readlines() - if line.strip() != '' - and not line.startswith('#') - and line is not None - and 'no_verify' not in line - ] - else: - lines = [ - line.strip() - for line in f.readlines() - if line.strip() != '' - and not line.startswith('#') - and line is not None - ] +def install_requirements( + requirements_file, check_no_verify_flag=False, show_stdout: bool = False +): + """ + Install or verify modules from a requirements file. - # Iterate over each line and install the requirements - for line in lines: - # Check if the line starts with '-r' to include another requirements file - if line.startswith('-r'): - # Get the path to the included requirements file - included_file = line[2:].strip() - # Expand the included requirements file recursively - install_requirements(included_file, check_no_verify_flag=check_no_verify_flag, show_stdout=show_stdout) - else: - process_requirements_line(line, show_stdout=show_stdout) + Parameters: + - requirements_file (str): Path to the requirements file. + - check_no_verify_flag (bool): If True, verify modules installation status without installing. + - show_stdout (bool): If True, show the standard output of the installation process. + """ + log.debug(f"Installing requirements from file: {requirements_file}") + action = "Verifying" if check_no_verify_flag else "Installing" + log.info(f"{action} modules from {requirements_file}...") + + with open(requirements_file, "r", encoding="utf8") as f: + lines = [ + line.strip() + for line in f.readlines() + if line.strip() and not line.startswith("#") and "no_verify" not in line + ] + + for line in lines: + if line.startswith("-r"): + included_file = line[2:].strip() + log.debug(f"Processing included requirements file: {included_file}") + install_requirements( + included_file, + check_no_verify_flag=check_no_verify_flag, + show_stdout=show_stdout, + ) + else: + process_requirements_line(line, show_stdout=show_stdout) def ensure_base_requirements(): try: - import rich # pylint: disable=unused-import + import rich # pylint: disable=unused-import except ImportError: - install('--upgrade rich', 'rich') - + install("--upgrade rich", "rich") + try: import packaging except ImportError: - install('packaging') + install("packaging") def run_cmd(run_cmd): + """ + Execute a command using subprocess. + """ + log.debug(f"Running command: {run_cmd}") try: - subprocess.run(run_cmd, shell=True, check=False, env=os.environ) + subprocess.run(run_cmd, shell=True, check=True, env=os.environ) + log.info(f"Command executed successfully: {run_cmd}") except subprocess.CalledProcessError as e: - log.error(f'Error occurred while running command: {run_cmd}') - log.error(f'Error: {e}') - - -def delete_file(file_path): - if os.path.exists(file_path): - os.remove(file_path) - - -def write_to_file(file_path, content): - try: - with open(file_path, 'w') as file: - file.write(content) - except IOError as e: - print(f'Error occurred while writing to file: {file_path}') - print(f'Error: {e}') + log.error(f"Error occurred while running command: {run_cmd}") + log.error(f"Error: {e}") def clear_screen(): - # Check the current operating system to execute the correct clear screen command - if os.name == 'nt': # If the operating system is Windows - os.system('cls') - else: # If the operating system is Linux or Mac - os.system('clear') - + """ + Clear the terminal screen. + """ + log.debug("Attempting to clear the terminal screen") + try: + os.system("cls" if os.name == "nt" else "clear") + log.info("Terminal screen cleared successfully") + except Exception as e: + log.error("Error occurred while clearing the terminal screen") + log.error(f"Error: {e}") diff --git a/setup/validate_requirements.py b/setup/validate_requirements.py index 17c4c58a2..497b44e0d 100644 --- a/setup/validate_requirements.py +++ b/setup/validate_requirements.py @@ -5,12 +5,11 @@ import setup_common # Get the absolute path of the current file's directory (Kohua_SS project directory) -project_directory = os.path.dirname(os.path.abspath(__file__)) - -# Check if the "setup" directory is present in the project_directory -if "setup" in project_directory: - # If the "setup" directory is present, move one level up to the parent directory - project_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +project_directory = ( + os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + if "setup" in os.path.dirname(os.path.abspath(__file__)) + else os.path.dirname(os.path.abspath(__file__)) +) # Add the project directory to the beginning of the Python search path sys.path.insert(0, project_directory) @@ -19,115 +18,172 @@ # Set up logging log = setup_logging() +log.debug(f"Project directory set to: {project_directory}") def check_path_with_space(): - # Get the current working directory + """Check if the current working directory contains a space.""" cwd = os.getcwd() - - # Check if the current working directory contains a space + log.debug(f"Current working directory: {cwd}") if " " in cwd: - log.error("The path in which this python code is executed contain one or many spaces. This is not supported for running kohya_ss GUI.") - log.error("Please move the repo to a path without spaces, delete the venv folder and run setup.sh again.") - log.error("The current working directory is: " + cwd) - exit(1) + # Log an error if the current working directory contains spaces + log.error( + "The path in which this python code is executed contains one or many spaces. This is not supported for running kohya_ss GUI." + ) + log.error( + "Please move the repo to a path without spaces, delete the venv folder, and run setup.sh again." + ) + log.error(f"The current working directory is: {cwd}") + raise RuntimeError("Invalid path: contains spaces.") -def check_torch(): - # Check for toolkit - if shutil.which('nvidia-smi') is not None or os.path.exists( +def detect_toolkit(): + """Detect the available toolkit (NVIDIA, AMD, or Intel) and log the information.""" + log.debug("Detecting available toolkit...") + # Check for NVIDIA toolkit by looking for nvidia-smi executable + if shutil.which("nvidia-smi") or os.path.exists( os.path.join( - os.environ.get('SystemRoot') or r'C:\Windows', - 'System32', - 'nvidia-smi.exe', + os.environ.get("SystemRoot", r"C:\Windows"), "System32", "nvidia-smi.exe" ) ): - log.info('nVidia toolkit detected') - elif shutil.which('rocminfo') is not None or os.path.exists( - '/opt/rocm/bin/rocminfo' + log.debug("nVidia toolkit detected") + return "nVidia" + # Check for AMD toolkit by looking for rocminfo executable + elif shutil.which("rocminfo") or os.path.exists("/opt/rocm/bin/rocminfo"): + log.debug("AMD toolkit detected") + return "AMD" + # Check for Intel toolkit by looking for SYCL or OneAPI indicators + elif ( + shutil.which("sycl-ls") + or os.environ.get("ONEAPI_ROOT") + or os.path.exists("/opt/intel/oneapi") ): - log.info('AMD toolkit detected') - elif (shutil.which('sycl-ls') is not None - or os.environ.get('ONEAPI_ROOT') is not None - or os.path.exists('/opt/intel/oneapi')): - log.info('Intel OneAPI toolkit detected') + log.debug("Intel toolkit detected") + return "Intel" + # Default to CPU if no toolkit is detected else: - log.info('Using CPU-only Torch') + log.debug("No specific GPU toolkit detected, defaulting to CPU") + return "CPU" + +def check_torch(): + """Check if torch is available and log the relevant information.""" + # Detect the available toolkit (e.g., NVIDIA, AMD, Intel, or CPU) + toolkit = detect_toolkit() + log.info(f"{toolkit} toolkit detected") try: + # Import PyTorch + log.debug("Importing PyTorch...") import torch - try: - # Import IPEX / XPU support - import intel_extension_for_pytorch as ipex - except Exception: - pass - log.info(f'Torch {torch.__version__}') + ipex = None + # Attempt to import Intel Extension for PyTorch if Intel toolkit is detected + if toolkit == "Intel": + try: + log.debug("Attempting to import Intel Extension for PyTorch (IPEX)...") + import intel_extension_for_pytorch as ipex + log.debug("Intel Extension for PyTorch (IPEX) imported successfully") + except ImportError: + log.warning("Intel Extension for PyTorch (IPEX) not found.") + + # Log the PyTorch version + log.info(f"Torch {torch.__version__}") + + # Check if CUDA (NVIDIA GPU) is available if torch.cuda.is_available(): - if torch.version.cuda: - # Log nVidia CUDA and cuDNN versions - log.info( - f'Torch backend: nVidia CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else "N/A"}' - ) - elif torch.version.hip: - # Log AMD ROCm HIP version - log.info(f'Torch backend: AMD ROCm HIP {torch.version.hip}') - else: - log.warning('Unknown Torch backend') - - # Log information about detected GPUs - for device in [ - torch.cuda.device(i) for i in range(torch.cuda.device_count()) - ]: - log.info( - f'Torch detected GPU: {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}' - ) - # Check if XPU is available + log.debug("CUDA is available, logging CUDA info...") + log_cuda_info(torch) + # Check if XPU (Intel GPU) is available elif hasattr(torch, "xpu") and torch.xpu.is_available(): - # Log Intel IPEX version - log.info(f'Torch backend: Intel IPEX {ipex.__version__}') - for device in [ - torch.xpu.device(i) for i in range(torch.xpu.device_count()) - ]: - log.info( - f'Torch detected GPU: {torch.xpu.get_device_name(device)} VRAM {round(torch.xpu.get_device_properties(device).total_memory / 1024 / 1024)} Compute Units {torch.xpu.get_device_properties(device).max_compute_units}' - ) + log.debug("XPU is available, logging XPU info...") + log_xpu_info(torch, ipex) + # Log a warning if no GPU is available else: - log.warning('Torch reports GPU not available') - + log.warning("Torch reports GPU not available") + + # Return the major version of PyTorch return int(torch.__version__[0]) + except ImportError as e: + # Log an error if PyTorch cannot be loaded + log.error(f"Could not load torch: {e}") + sys.exit(1) except Exception as e: - log.error(f'Could not load torch: {e}') + # Log an unexpected error + log.error(f"Unexpected error while checking torch: {e}") sys.exit(1) - + +def log_cuda_info(torch): + """Log information about CUDA-enabled GPUs.""" + # Log the CUDA and cuDNN versions if available + if torch.version.cuda: + log.info( + f'Torch backend: nVidia CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else "N/A"}' + ) + # Log the ROCm HIP version if using AMD GPU + elif torch.version.hip: + log.info(f"Torch backend: AMD ROCm HIP {torch.version.hip}") + else: + log.warning("Unknown Torch backend") + + # Log information about each detected CUDA-enabled GPU + for device in range(torch.cuda.device_count()): + props = torch.cuda.get_device_properties(device) + log.info( + f"Torch detected GPU: {props.name} VRAM {round(props.total_memory / 1024 / 1024)}MB Arch {props.major}.{props.minor} Cores {props.multi_processor_count}" + ) + +def log_xpu_info(torch, ipex): + """Log information about Intel XPU-enabled GPUs.""" + # Log the Intel Extension for PyTorch (IPEX) version if available + if ipex: + log.info(f"Torch backend: Intel IPEX {ipex.__version__}") + # Log information about each detected XPU-enabled GPU + for device in range(torch.xpu.device_count()): + props = torch.xpu.get_device_properties(device) + log.info( + f"Torch detected GPU: {props.name} VRAM {round(props.total_memory / 1024 / 1024)}MB Compute Units {props.max_compute_units}" + ) + def main(): + # Check the repository version to ensure compatibility + log.debug("Checking repository version...") setup_common.check_repo_version() - + # Check if the current path contains spaces, which are not supported + log.debug("Checking if the current path contains spaces...") check_path_with_space() - + # Parse command line arguments + log.debug("Parsing command line arguments...") parser = argparse.ArgumentParser( - description='Validate that requirements are satisfied.' + description="Validate that requirements are satisfied." ) parser.add_argument( - '-r', - '--requirements', - type=str, - help='Path to the requirements file.', + "-r", "--requirements", type=str, help="Path to the requirements file." ) - parser.add_argument('--debug', action='store_true', help='Debug on') + parser.add_argument("--debug", action="store_true", help="Debug on") args = parser.parse_args() - + + # Update git submodules if necessary + log.debug("Updating git submodules...") setup_common.update_submodule() + # Check if PyTorch is installed and log relevant information + log.debug("Checking if PyTorch is installed...") torch_ver = check_torch() - + + # Check if the Python version is compatible + log.debug("Checking Python version...") if not setup_common.check_python_version(): - exit(1) - - if args.requirements: - setup_common.install_requirements(args.requirements, check_no_verify_flag=True) - else: - setup_common.install_requirements('requirements_pytorch_windows.txt', check_no_verify_flag=True) - setup_common.install_requirements('requirements_windows.txt', check_no_verify_flag=True) + sys.exit(1) + + # Install required packages from the specified requirements file + requirements_file = args.requirements or "requirements_pytorch_windows.txt" + log.debug(f"Installing requirements from: {requirements_file}") + setup_common.install_requirements(requirements_file, check_no_verify_flag=True) + log.debug("Installing additional requirements from: requirements_windows.txt") + setup_common.install_requirements( + "requirements_windows.txt", check_no_verify_flag=True + ) -if __name__ == '__main__': +if __name__ == "__main__": + log.debug("Starting main function...") main() + log.debug("Main function finished.") From ee3fcc0431063cc48f71d232402ec48789f6f9a7 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 5 Oct 2024 10:36:11 -0400 Subject: [PATCH 142/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 8bea039a8..ba08a8989 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 8bea039a8d9503a3fe696c445ca992301be1d6fd +Subproject commit ba08a898940c80a6551111fdd77b53c6d3a019ac From 52d1cc56ce5cd059f7790e295795991332dc5829 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 6 Oct 2024 19:55:12 -0400 Subject: [PATCH 143/199] Update sf-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index ba08a8989..83e3048cb 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit ba08a898940c80a6551111fdd77b53c6d3a019ac +Subproject commit 83e3048cb089bf6726751609da26da751b8383ae From faa3d1920ccdcbb8308b6d6e0d07b3d4c43fada6 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 6 Oct 2024 20:26:33 -0400 Subject: [PATCH 144/199] Update Lycoris support --- kohya_gui/lora_gui.py | 24 ++++++++++-------------- requirements.txt | 4 ++-- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 4b44e7edd..e15ccb889 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1135,11 +1135,11 @@ def train_model( if LoRA_type == "LyCORIS/BOFT": network_module = "lycoris.kohya" - network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout={rank_dropout} rank_dropout_scale={rank_dropout_scale} constrain={constrain} rescaled={rescaled} algo=boft train_norm={train_norm}" + network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} module_dropout={module_dropout} use_tucker={use_tucker} rank_dropout={rank_dropout} rank_dropout_scale={rank_dropout_scale} algo=boft train_norm={train_norm}" if LoRA_type == "LyCORIS/Diag-OFT": network_module = "lycoris.kohya" - network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout={rank_dropout} rank_dropout_scale={rank_dropout_scale} constrain={constrain} rescaled={rescaled} algo=diag-oft train_norm={train_norm}" + network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} module_dropout={module_dropout} use_tucker={use_tucker} rank_dropout={rank_dropout} rank_dropout_scale={rank_dropout_scale} constraint={constrain} rescaled={rescaled} algo=diag-oft train_norm={train_norm}" if LoRA_type == "LyCORIS/DyLoRA": network_module = "lycoris.kohya" @@ -1147,7 +1147,7 @@ def train_model( if LoRA_type == "LyCORIS/GLoRA": network_module = "lycoris.kohya" - network_args = f' preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} rank_dropout={rank_dropout} module_dropout={module_dropout} rank_dropout_scale={rank_dropout_scale} algo="glora" train_norm={train_norm}' + network_args = f' preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} use_tucker={use_tucker} rank_dropout={rank_dropout} module_dropout={module_dropout} rank_dropout_scale={rank_dropout_scale} algo="glora" train_norm={train_norm}' if LoRA_type == "LyCORIS/iA3": network_module = "lycoris.kohya" @@ -1155,19 +1155,19 @@ def train_model( if LoRA_type == "LoCon" or LoRA_type == "LyCORIS/LoCon": network_module = "lycoris.kohya" - network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} rank_dropout={rank_dropout} bypass_mode={bypass_mode} dora_wd={dora_wd} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=locon train_norm={train_norm}" + network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} use_tucker={use_tucker} rank_dropout={rank_dropout} bypass_mode={bypass_mode} dora_wd={dora_wd} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=locon train_norm={train_norm}" if LoRA_type == "LyCORIS/LoHa": network_module = "lycoris.kohya" - network_args = f' preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} rank_dropout={rank_dropout} bypass_mode={bypass_mode} dora_wd={dora_wd} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo="loha" train_norm={train_norm}' + network_args = f' preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} use_tucker={use_tucker} rank_dropout={rank_dropout} bypass_mode={bypass_mode} dora_wd={dora_wd} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=loha train_norm={train_norm}' if LoRA_type == "LyCORIS/LoKr": network_module = "lycoris.kohya" - network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} rank_dropout={rank_dropout} bypass_mode={bypass_mode} dora_wd={dora_wd} module_dropout={module_dropout} factor={factor} use_cp={use_cp} use_scalar={use_scalar} decompose_both={decompose_both} rank_dropout_scale={rank_dropout_scale} algo=lokr train_norm={train_norm}" + network_args = f" preset={LyCORIS_preset} conv_dim={conv_dim} conv_alpha={conv_alpha} use_tucker={use_tucker} rank_dropout={rank_dropout} bypass_mode={bypass_mode} dora_wd={dora_wd} module_dropout={module_dropout} factor={factor} use_cp={use_cp} use_scalar={use_scalar} decompose_both={decompose_both} rank_dropout_scale={rank_dropout_scale} algo=lokr train_norm={train_norm}" if LoRA_type == "LyCORIS/Native Fine-Tuning": network_module = "lycoris.kohya" - network_args = f" preset={LyCORIS_preset} rank_dropout={rank_dropout} module_dropout={module_dropout} use_tucker={use_tucker} use_scalar={use_scalar} rank_dropout_scale={rank_dropout_scale} algo=full train_norm={train_norm}" + network_args = f" preset={LyCORIS_preset} rank_dropout={rank_dropout} module_dropout={module_dropout} rank_dropout_scale={rank_dropout_scale} algo=full train_norm={train_norm}" if LoRA_type == "Flux1": # Add a list of supported network arguments for Flux1 below when supported @@ -1731,7 +1731,7 @@ def list_presets(path): visible=False, interactive=True, allow_custom_value=True, - # info="https://github.com/KohakuBlueleaf/LyCORIS/blob/0006e2ffa05a48d8818112d9f70da74c0cd30b99/docs/Preset.md" + info="Use path_to_config_file.toml to choose config file (for LyCORIS module settings)" ) with gr.Group(): with gr.Row(): @@ -2186,9 +2186,10 @@ def update_LoRA_settings( "LyCORIS/BOFT", "LyCORIS/Diag-OFT", "LyCORIS/DyLoRA", + "LyCORIS/GLoRA", "LyCORIS/LoCon", "LyCORIS/LoHa", - "LyCORIS/Native Fine-Tuning", + "LyCORIS/LoKr", }, }, }, @@ -2197,12 +2198,9 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "LyCORIS/BOFT", - "LyCORIS/Diag-OFT", "LyCORIS/LoCon", "LyCORIS/LoHa", "LyCORIS/LoKr", - "LyCORIS/Native Fine-Tuning", }, }, }, @@ -2226,7 +2224,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "LyCORIS/BOFT", "LyCORIS/Diag-OFT", }, }, @@ -2236,7 +2233,6 @@ def update_LoRA_settings( "update_params": { "visible": LoRA_type in { - "LyCORIS/BOFT", "LyCORIS/Diag-OFT", }, }, diff --git a/requirements.txt b/requirements.txt index dfd3f0aba..78258f0ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,8 +12,8 @@ huggingface-hub==0.24.5 imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 -lycoris_lora==2.2.0.post3 -# lycoris_lora==3.0.0.post1 +# lycoris_lora==2.2.0.post3 +lycoris_lora==3.1.0 omegaconf==2.3.0 onnx==1.16.1 prodigyopt==1.0 From 55cd56a63b36f18b8dbca4e3b10f223a4ed6d929 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Wed, 9 Oct 2024 21:45:31 -0400 Subject: [PATCH 145/199] Allow to specify tensorboard host via env var TENSORBOARD_HOST --- kohya_gui/class_tensorboard.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_tensorboard.py b/kohya_gui/class_tensorboard.py index b9a9a9c4b..001c894da 100644 --- a/kohya_gui/class_tensorboard.py +++ b/kohya_gui/class_tensorboard.py @@ -20,6 +20,7 @@ class TensorboardManager: DEFAULT_TENSORBOARD_PORT = 6006 + DEFAULT_TENSORBOARD_HOST = "0.0.0.0" def __init__(self, logging_dir, headless: bool = False, wait_time=5): self.logging_dir = logging_dir @@ -29,6 +30,9 @@ def __init__(self, logging_dir, headless: bool = False, wait_time=5): self.tensorboard_port = os.environ.get( "TENSORBOARD_PORT", self.DEFAULT_TENSORBOARD_PORT ) + self.tensorboard_host = os.environ.get( + "TENSORBOARD_HOST", self.DEFAULT_TENSORBOARD_HOST + ) self.log = setup_logging() self.thread = None self.stop_event = Event() @@ -64,7 +68,7 @@ def start_tensorboard(self, logging_dir=None): "--logdir", logging_dir, "--host", - "0.0.0.0", + self.tensorboard_host, "--port", str(self.tensorboard_port), ] From 7ae14d53030e9f39a16b96bcde119164ccf9186e Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 11 Oct 2024 20:25:29 -0400 Subject: [PATCH 146/199] Update sd-scripts version --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 83e3048cb..43bfeea60 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 83e3048cb089bf6726751609da26da751b8383ae +Subproject commit 43bfeea6002f4e0421ec096c529a13dfd3303c0b From 6f9df25d4545e48b312262089a8c2ddfe63099bf Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 14 Oct 2024 11:46:21 -0400 Subject: [PATCH 147/199] Update sd-scripts release --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 43bfeea60..1275e148d 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 43bfeea6002f4e0421ec096c529a13dfd3303c0b +Subproject commit 1275e148dfb90f99756d41052e49da9b1d94084a From 8255372947343e0579b57e160c03ab814a2680e5 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 14 Oct 2024 18:19:43 -0400 Subject: [PATCH 148/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 1275e148d..2500f5a79 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 1275e148dfb90f99756d41052e49da9b1d94084a +Subproject commit 2500f5a79806fdbe74c43db24a95ee19329a8fcc From 3c3f1edfb6397a89c4279d04a2425ea4b806101d Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 14 Oct 2024 18:32:09 -0400 Subject: [PATCH 149/199] Add --skip_cache_check option to GUI --- kohya_gui/class_advanced_training.py | 5 +++++ kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 5 +++++ kohya_gui/lora_gui.py | 5 +++++ kohya_gui/textual_inversion_gui.py | 5 +++++ requirements.txt | 2 +- 6 files changed, 26 insertions(+), 1 deletion(-) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index a607569d7..53afaba81 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -272,6 +272,11 @@ def full_options_update(full_fp16, full_bf16): info="Enable low RAM optimization. e.g. load models to VRAM instead of RAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle)", interactive=True, ) + self.skip_cache_check = gr.Checkbox( + label="Skip cache check", + value=self.config.get("advanced.skip_cache_check", False), + info="Skip cache check for faster training start", + ) with gr.Row(): self.gradient_checkpointing = gr.Checkbox( diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index f2291c06f..3c8651c39 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -165,6 +165,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -370,6 +371,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -570,6 +572,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -993,6 +996,7 @@ def train_model( "sdpa": True if xformers == "sdpa" else None, "seed": int(seed) if int(seed) != 0 else None, "shuffle_caption": shuffle_caption, + "skip_cache_check": skip_cache_check, "stop_text_encoder_training": ( stop_text_encoder_training if stop_text_encoder_training != 0 else None ), @@ -1333,6 +1337,7 @@ def dreambooth_tab( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 5dca363a1..931dbb51a 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -173,6 +173,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -384,6 +385,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -601,6 +603,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -1054,6 +1057,7 @@ def train_model( "sdpa": True if xformers == "sdpa" else None, "seed": int(seed) if int(seed) != 0 else None, "shuffle_caption": shuffle_caption, + "skip_cache_check": skip_cache_check, "t5xxl": t5xxl if sd3_checkbox else flux1_t5xxl if flux1_checkbox else None, "train_batch_size": train_batch_size, "train_data_dir": image_folder, @@ -1465,6 +1469,7 @@ def list_presets(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index e15ccb889..2aa6d5449 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -185,6 +185,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -448,6 +449,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -745,6 +747,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -1509,6 +1512,7 @@ def train_model( "sdpa": True if xformers == "sdpa" else None, "seed": int(seed) if int(seed) != 0 else None, "shuffle_caption": shuffle_caption, + "skip_cache_check": skip_cache_check, "stop_text_encoder_training": ( stop_text_encoder_training if stop_text_encoder_training != 0 else None ), @@ -2638,6 +2642,7 @@ def update_LoRA_settings( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 83dc2c798..f9ff64e4d 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -158,6 +158,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -321,6 +322,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -477,6 +479,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + skip_cache_check, log_with, wandb_api_key, wandb_run_name, @@ -848,6 +851,7 @@ def train_model( "sdpa": True if xformers == "sdpa" else None, "seed": int(seed) if int(seed) != 0 else None, "shuffle_caption": shuffle_caption, + "skip_cache_check": skip_cache_check, "stop_text_encoder_training": ( stop_text_encoder_training if stop_text_encoder_training != 0 else None ), @@ -1216,6 +1220,7 @@ def list_embedding_files(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, diff --git a/requirements.txt b/requirements.txt index 78258f0ac..6e2068631 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ easygui==0.98.3 einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 -gradio==4.43.0 +gradio==5.0.1 huggingface-hub==0.24.5 imagesize==1.4.1 invisible-watermark==0.2.0 From c258fc1421c577e2097c3fca5eb0763902104d93 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 15 Oct 2024 14:01:31 -0400 Subject: [PATCH 150/199] Fix requirements issue --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6e2068631..2175adaa8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 gradio==5.0.1 -huggingface-hub==0.24.5 +huggingface-hub>=0.24.5 imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 From 5e10c380d4aa2d1f18c986e2c1d6713d48412357 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 17 Oct 2024 18:51:15 -0400 Subject: [PATCH 151/199] Add support for LyCORIS LoRA when training Flux.1 --- kohya_gui/lora_gui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 2aa6d5449..93e2b5930 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -885,8 +885,8 @@ def train_model( if flux1_checkbox: log.info(f"Validating lora type is Flux1 if flux1 checkbox is checked...") - if (LoRA_type != "Flux1") and (LoRA_type != "Flux1 OFT"): - log.error("LoRA type must be set to 'Flux1' or 'Flux1 OFT' if Flux1 checkbox is checked.") + if (LoRA_type != "Flux1") and (LoRA_type != "Flux1 OFT") and ("LyCORIS" not in LoRA_type): + log.error("LoRA type must be set to 'Flux1', 'Flux1 OFT' or 'LyCORIS' if Flux1 checkbox is checked.") return TRAIN_BUTTON_VISIBLE # From b424e535296dcab1acea38458a387af9e8f87806 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 18 Oct 2024 09:47:33 -0400 Subject: [PATCH 152/199] Pin huggingface-hub version for gradio 5 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2175adaa8..d11825b60 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 gradio==5.0.1 -huggingface-hub>=0.24.5 +huggingface-hub==0.25.2 imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 From ee5143245595660b07273c8402d748ef5b5d9d15 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 18 Oct 2024 17:45:19 -0400 Subject: [PATCH 153/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 2500f5a79..d8d714266 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 2500f5a79806fdbe74c43db24a95ee19329a8fcc +Subproject commit d8d7142665a8f6b2d43827c9b3a6a2de009c09cb From b9fe58cd1e16f286b3d2812b101ce9ccc30666eb Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 18 Oct 2024 17:53:06 -0400 Subject: [PATCH 154/199] Add support for --save_last_n_epochs_state --- kohya_gui/class_advanced_training.py | 6 ++++++ kohya_gui/dreambooth_gui.py | 7 +++++++ kohya_gui/finetune_gui.py | 7 +++++++ kohya_gui/lora_gui.py | 7 +++++++ kohya_gui/textual_inversion_gui.py | 7 +++++++ 5 files changed, 34 insertions(+) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 53afaba81..658589501 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -188,6 +188,12 @@ def list_vae_files(path): precision=0, info="(Optional) Save only the specified number of states (old models will be deleted)", ) + self.save_last_n_epochs_state = gr.Number( + label="Save last N epochs state", + value=self.config.get("advanced.save_last_n_epochs_state", 0), + precision=0, + info="(Optional) Save only the specified number of epochs states (old models will be deleted)", + ) with gr.Row(): def full_options_update(full_fp16, full_bf16): diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 3c8651c39..db6f7311a 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -165,6 +165,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -371,6 +372,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -572,6 +574,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -987,6 +990,9 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs_state": ( + save_last_n_epochs_state if save_last_n_epochs_state != 0 else None + ), "save_model_as": save_model_as, "save_precision": save_precision, "save_state": save_state, @@ -1337,6 +1343,7 @@ def dreambooth_tab( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 931dbb51a..95ce1b6c3 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -173,6 +173,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -385,6 +386,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -603,6 +605,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -1048,6 +1051,9 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs_state": ( + save_last_n_epochs_state if save_last_n_epochs_state != 0 else None + ), "save_model_as": save_model_as, "save_precision": save_precision, "save_state": save_state, @@ -1469,6 +1475,7 @@ def list_presets(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 93e2b5930..87fcabbf2 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -185,6 +185,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -449,6 +450,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -747,6 +749,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -1502,6 +1505,9 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs_state": ( + save_last_n_epochs_state if save_last_n_epochs_state != 0 else None + ), "save_model_as": save_model_as, "save_precision": save_precision, "save_state": save_state, @@ -2642,6 +2648,7 @@ def update_LoRA_settings( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index f9ff64e4d..5ff1699fe 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -158,6 +158,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -322,6 +323,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -479,6 +481,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs_state, skip_cache_check, log_with, wandb_api_key, @@ -842,6 +845,9 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs_state": ( + save_last_n_epochs_state if save_last_n_epochs_state != 0 else None + ), "save_model_as": save_model_as, "save_precision": save_precision, "save_state": save_state, @@ -1220,6 +1226,7 @@ def list_embedding_files(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, advanced_training.wandb_api_key, From 10bc4658e5ab1235ff7ccf6bea911eb7567901e9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 19 Oct 2024 20:52:06 -0400 Subject: [PATCH 155/199] Update sd-scripts to version with Differential Output Preservation --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index d8d714266..138dac4ae 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit d8d7142665a8f6b2d43827c9b3a6a2de009c09cb +Subproject commit 138dac4aea57716e2f23580305f6e40836a87228 From d29a546c9a6f1ae9175e0381012c11d7befaa1e9 Mon Sep 17 00:00:00 2001 From: b-fission Date: Mon, 21 Oct 2024 16:38:53 -0500 Subject: [PATCH 156/199] Increase maximum flux-lora merge strength to 2 --- kohya_gui/flux_merge_lora_gui.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kohya_gui/flux_merge_lora_gui.py b/kohya_gui/flux_merge_lora_gui.py index 3f40b76c8..c303087bf 100644 --- a/kohya_gui/flux_merge_lora_gui.py +++ b/kohya_gui/flux_merge_lora_gui.py @@ -197,7 +197,7 @@ def list_save_to(path): ratio_a = gr.Slider( label="Model A merge ratio (eg: 0.5 mean 50%)", minimum=0, - maximum=1, + maximum=2, step=0.01, value=0.0, interactive=True, @@ -206,7 +206,7 @@ def list_save_to(path): ratio_b = gr.Slider( label="Model B merge ratio (eg: 0.5 mean 50%)", minimum=0, - maximum=1, + maximum=2, step=0.01, value=0.0, interactive=True, @@ -281,7 +281,7 @@ def list_save_to(path): ratio_c = gr.Slider( label="Model C merge ratio (eg: 0.5 mean 50%)", minimum=0, - maximum=1, + maximum=2, step=0.01, value=0.0, interactive=True, @@ -290,7 +290,7 @@ def list_save_to(path): ratio_d = gr.Slider( label="Model D merge ratio (eg: 0.5 mean 50%)", minimum=0, - maximum=1, + maximum=2, step=0.01, value=0.0, interactive=True, From 03e97424b97000a6e26d79080880c3a5c52ce0b2 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 25 Oct 2024 08:20:59 -0400 Subject: [PATCH 157/199] Update to latest sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 138dac4ae..5fba6f514 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 138dac4aea57716e2f23580305f6e40836a87228 +Subproject commit 5fba6f514a0792851149ed1a7071070ee35cbeca From 7d6ebcb7cd2a59bd527ff161bb65da4bc7e4a9af Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 26 Oct 2024 10:29:42 -0400 Subject: [PATCH 158/199] Update requirements syntax (for windows) --- gui.bat | 6 ++++++ gui.ps1 | 5 +++++ requirements.txt | 2 +- requirements_pytorch_windows.txt | 11 ++++++++--- requirements_windows.txt | 1 + sd-scripts | 2 +- setup-3.10.bat | 3 +++ setup/setup_common.py | 18 +++++++++--------- setup/setup_windows.py | 9 +++++---- setup/validate_requirements.py | 14 ++++++++++---- venv-r/Scripts/python.exe | Bin 0 -> 268568 bytes 11 files changed, 49 insertions(+), 22 deletions(-) create mode 100644 venv-r/Scripts/python.exe diff --git a/gui.bat b/gui.bat index b24afe1ec..74034b9c5 100644 --- a/gui.bat +++ b/gui.bat @@ -7,8 +7,14 @@ call .\venv\Scripts\deactivate.bat :: Activate the virtual environment call .\venv\Scripts\activate.bat + +:: Update pip to latest version +python -m pip install --upgrade pip -q + set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib +echo Starting the GUI... this might take some time... + :: If the exit code is 0, run the kohya_gui.py script with the command-line arguments if %errorlevel% equ 0 ( REM Check if the batch was started via double-click diff --git a/gui.ps1 b/gui.ps1 index d2ce6fb18..47e69aca5 100644 --- a/gui.ps1 +++ b/gui.ps1 @@ -7,8 +7,13 @@ if ($env:VIRTUAL_ENV) { # Activate the virtual environment # Write-Host "Activating the virtual environment..." & .\venv\Scripts\activate + +python.exe -m pip install --upgrade pip -q + $env:PATH += ";$($MyInvocation.MyCommand.Path)\venv\Lib\site-packages\torch\lib" +Write-Host "Starting the GUI... this might take some time..." + $argsFromFile = @() if (Test-Path .\gui_parameters.txt) { $argsFromFile = Get-Content .\gui_parameters.txt -Encoding UTF8 | Where-Object { $_ -notmatch "^#" } | Foreach-Object { $_ -split " " } diff --git a/requirements.txt b/requirements.txt index d11825b60..5b26ce8e6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ easygui==0.98.3 einops==0.7.0 fairscale==0.4.13 ftfy==6.1.1 -gradio==5.0.1 +gradio==5.4.0 huggingface-hub==0.25.2 imagesize==1.4.1 invisible-watermark==0.2.0 diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index aed2e8ad0..a328eb236 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,3 +1,8 @@ -torch==2.4.1+cu124 --index-url https://download.pytorch.org/whl/cu124 -torchvision==0.19.1+cu124 --index-url https://download.pytorch.org/whl/cu124 -xformers==0.0.28.post1 --index-url https://download.pytorch.org/whl/cu124 \ No newline at end of file +# Custom index URL for specific packages +--extra-index-url https://download.pytorch.org/whl/cu124 + +torch==2.4.1+cu124 +torchvision==0.19.1+cu124 +xformers==0.0.28.post1 + +-r requirements_windows.txt \ No newline at end of file diff --git a/requirements_windows.txt b/requirements_windows.txt index d4e2d3c60..0836535ce 100644 --- a/requirements_windows.txt +++ b/requirements_windows.txt @@ -2,4 +2,5 @@ bitsandbytes==0.44.0 tensorboard tensorflow>=2.16.1 onnxruntime-gpu==1.17.1 + -r requirements.txt \ No newline at end of file diff --git a/sd-scripts b/sd-scripts index 5fba6f514..8549669f8 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 5fba6f514a0792851149ed1a7071070ee35cbeca +Subproject commit 8549669f89ed05bb7ce0bf774a7c5589dc15df35 diff --git a/setup-3.10.bat b/setup-3.10.bat index 6b887f59a..f5f746ae1 100644 --- a/setup-3.10.bat +++ b/setup-3.10.bat @@ -13,6 +13,9 @@ call .\venv\Scripts\deactivate.bat call .\venv\Scripts\activate.bat +REM first make sure we have setuptools available in the venv +python -m pip install --require-virtualenv --no-input -q -q setuptools + REM Check if the batch was started via double-click IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" ( REM echo This script was started by double clicking. diff --git a/setup/setup_common.py b/setup/setup_common.py index 9d0852212..b85af77df 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -158,16 +158,16 @@ def install_requirements_inbulk( log.error(f"Could not find the requirements file in {requirements_file}.") return - log.info(f"Installing requirements from {requirements_file}...") + log.info(f"Installing/Validating requirements from {requirements_file}...") - if upgrade: - optional_parm += " -U" + optional_parm += " -U" if upgrade else "" - if show_stdout: - run_cmd(f"pip install -r {requirements_file} {optional_parm}") - else: - run_cmd(f"pip install -r {requirements_file} {optional_parm} --quiet") - log.info(f"Requirements from {requirements_file} installed.") + cmd = f"pip install -r {requirements_file} {optional_parm}" + if not show_stdout: + cmd += " --quiet" + + run_cmd(cmd) + log.info(f"Requirements from {requirements_file} installed/validated.") def configure_accelerate(run_accelerate=False): @@ -649,7 +649,7 @@ def run_cmd(run_cmd): log.debug(f"Running command: {run_cmd}") try: subprocess.run(run_cmd, shell=True, check=True, env=os.environ) - log.info(f"Command executed successfully: {run_cmd}") + log.debug(f"Command executed successfully: {run_cmd}") except subprocess.CalledProcessError as e: log.error(f"Error occurred while running command: {run_cmd}") log.error(f"Error: {e}") diff --git a/setup/setup_windows.py b/setup/setup_windows.py index 38c6e7bf7..bb8cdbe8e 100644 --- a/setup/setup_windows.py +++ b/setup/setup_windows.py @@ -123,12 +123,13 @@ def install_kohya_ss_torch2(headless: bool = False): # ) setup_common.install_requirements_inbulk( - "requirements_pytorch_windows.txt", show_stdout=True, optional_parm="--index-url https://download.pytorch.org/whl/cu124" + "requirements_pytorch_windows.txt", show_stdout=True, + # optional_parm="--index-url https://download.pytorch.org/whl/cu124" ) - setup_common.install_requirements_inbulk( - "requirements_windows.txt", show_stdout=True, upgrade=True - ) + # setup_common.install_requirements_inbulk( + # "requirements_windows.txt", show_stdout=True, upgrade=True + # ) setup_common.run_cmd("accelerate config default") diff --git a/setup/validate_requirements.py b/setup/validate_requirements.py index 497b44e0d..725836f7a 100644 --- a/setup/validate_requirements.py +++ b/setup/validate_requirements.py @@ -177,11 +177,17 @@ def main(): # Install required packages from the specified requirements file requirements_file = args.requirements or "requirements_pytorch_windows.txt" log.debug(f"Installing requirements from: {requirements_file}") - setup_common.install_requirements(requirements_file, check_no_verify_flag=True) - log.debug("Installing additional requirements from: requirements_windows.txt") - setup_common.install_requirements( - "requirements_windows.txt", check_no_verify_flag=True + setup_common.install_requirements_inbulk( + requirements_file, show_stdout=False, + # optional_parm="--index-url https://download.pytorch.org/whl/cu124" ) + + # setup_common.install_requirements(requirements_file, check_no_verify_flag=True) + + # log.debug("Installing additional requirements from: requirements_windows.txt") + # setup_common.install_requirements( + # "requirements_windows.txt", check_no_verify_flag=True + # ) if __name__ == "__main__": log.debug("Starting main function...") diff --git a/venv-r/Scripts/python.exe b/venv-r/Scripts/python.exe new file mode 100644 index 0000000000000000000000000000000000000000..8655d9d5a5ce275d03737ec3dc733c7591ca4b8d GIT binary patch literal 268568 zcmeF43w%_?_4s#_4Ot-JE=n{g>LRPg#z!=?O<2&qunTu(qd`FMfnWm>3zZ1F1Pel7 z6J>K6Bl%-uXtw6^uve?PyE zX!g#X$C)!{&YU@O=FHrR>sC3k9S()z4PG_T`?+~DXI+$^-_Mr; zJ?&ZlErZui9?|u8^{#9DLwLV>SEWCxo)i2h^Blji(tnwHPVo0r&ujc1l|IRTCeOiZ zCyl7(`TLF2{DtcI;}Q0Aih95Lma5rO$Gclp8FDyg-kIyz+j7CouCz|a*^d5M1I}X@N3z1?$2?2nMzmQl;xNt`QGQ>e$vYnqL~csPDdyB zzTdx#qO%r7Ni6v#g#;mKyD&wse_n@U#zpgI-W0vb;W)RI1YkPac&_8w>t6u&xya7u zDBD5WYAVhnL*4NRkgw^Y8atzEBW-mQlJD;K{vxU$@0DT=DTSA zg85Y>DcV6p$4Kh`!HM$Cnmdn-LQA2k!_D(0yRQkUe*uSMbSC(J_F*J*&cDIsXxWkZ zXCx;1Jds4uR~Sh|eBO!IOfllGl^EtRBN_G0FwA|{wsbljD&1-%qCRiLY&FcM4AX6W z`6mvC5#LlY{l@FNVF^ehaf8n-Falw7pJ5)gOo|)kBwvlbPWVge1N?JFYtUCDR0f!o z@fxiy)=12yD)1ko>R2yTYDTlvq2D7I1K-rqQd=0-O$6n@8}Hf5c`XtuupL$Vg{H_V)q@5yo)rJIaIy|086 z^Qe)m_tg+Gk`a1>o~Kv0SwTU&YHve27D*S>QjytaB){w{VVGD`AyUKhi%M;jC@!(Z z+f`YkYIh`=Umc0FmrVCb*-w=+KY_O6L-g{$9!}GEH(xzS!b}*UKegze&EY$kTV3uS3Y{C9( zw4)2XUi@vCzlFck1;-wDI!d=n7fM@=M1QzWdJ!xg5wl4~mGHaSmM+Lu4QObJeKK8e zGU51@EfLz~hh3{+Fvqzw%EYK%mvT^ksx<8KvztND9`zovA-^#E9ZNSLf(B|+$F zBqujnGwCDgc zgufzq$1C8|1s*T~?#Uv$k_DG$K=)Iik*HJYlXQVe<%SvY&9L#1;8fsmkhY`?ew9g` zY9uH5D$@mz*$48EL#13g^1sx7bEeVM52smSJ8p)ch z4Qdo+A7BJq6MrO}YcZ~c2wOlUKSP8ZK_rqKn-*3tLIO=9!JbFVXAN_w_4H8@!#l0l zgj&7+1tv~e_f>QbEB-*2ggme^Gcb^gq}U?M@neU`Z?G=i6H4kX>!}T@$L9)#rck>G zn^UW4a&(|w;z>a^k+a}IX-)o2$@hdHk<4Ey;a?Jd%tgmCkPul9J}ylYk)_q&k2+$D zEg6Ix8PXCvk63?8?9_CDMNIt2=VN&W0yF-Y!;$@bYQ2$o$oB~7O2-BwiAQ}a<;C0; zNj&bWBbY8YKwpPS)52&FFq|$}4IDMdA|g27J<}kkf@V|qWHU!)X5OX&N(Y2W}g;Hj*lsQ##xT(7nIi=%V4pYxdY{Ps8LO)A3D{(lT1~7U3lJ|=OTxK@Fe2a7&hGO!? zU0{BY0W;JFbFY9I4H&39hVc5&f~&Qgh#h2zz&Wd=70G;oXALGkT~G`NxLsT8sg55z zJLWRt%~|@ovnjaF%TKlLZ?D$1V3W4C-OAMzO0m9?gC*Nr}B2R;^qlwSE^FPKzEQtdNfQ466kVi9!Vtb2FHKO)#G$ zn4fB6js!K5cl&D4J~qgjMLHFp-KN82+nO;x`Iid)>XjTH9%5@Wur6e z{QY9taLPIqEl04-AN^#yjn{^zWe2r2+4W7)bE>to>Pe#_DsyPRX|w!FVT|_I)-Xq> zTk4x)f2#A3ie79a$2+O36Cq~J>8>MozuktnMRck*Or*U#s#j^h$rO(cv_V6w{h$kV z{ch?Hbj;aeG;j5E{FsSu$Bw{6(RJ4=2KKF|C9kkRMZ&f>6Mjy@d###H%u13YQD_BW zSr~MIbrq@^ZL8;Dy}(pqJzy8CMcUEKtF52$Zgubj$LEoAsk~n(RXix6AIi_2{7~7v z7Q*)s26B$|g(lvL;Ct(Iet^|Sl;Z1K*5?YPA0r!0RqdWf7*!^|si;N0kC#FhDVP%3 zVFFk3%9>Cne~(fW?8aWq+Pj`8W6W+dZs^$!D*G9kD)+Z#d9a6R;hk@4$!JY z=A*B6@f>=Zs2hKLMC3k-dLw;Ko5Zn>aRaPuy%?v`1&oxCc{cGynW$0yyIGc15z5%FpYHq~IgJ zpbyiKfQFT^W(wHWjsTQM7d-Z`(BYRy09uN@qe=9t8EP5%BT1pHCd8tX<@#NYr`E8R>%0Q!L#YPiHOpIrRlHh7#2uzhooOFc9b(B_X^Kx$aIrNFQrQg(+C)(<-O;hCk#Uf5#OyK$eDEpl!TJ=VYe`B<)T zSHYPy#cqe4r*72A#5fxFMP2oKVtLlNNNfbmWBnbDt_FA09K+FH7y_(L_Qi6oKOrDe zni$oR`Fn}BuG(nVWoSVRZ5z8fO!3WQC~C&xy{9)F-iyK_Vpqvl?_|S#jSOB!mv0G} zJ$C5>FI@PwV)%*$MAV4SDuin*3eyGW{#2MeRLJt$!}^jpv&pzb!he5Ag_9Lt`~6Sy z9`aI*nQOn-C?#AP@))Jf#(J>eAJB*KQrc4$C^ysF-&qU_$ST7)+|FyGyuxq< z`_A*%BQzb>CTf6Jk`&?bpUmP#YhAk_#WDpFtfRjF0U)rOUV=Ws#G zv`e6|&0uv`S-{N7bCSr3bioK<#*d(@ipoZofRPM!YF~yT=+#o`z8`}%wFS}zTL?1o z=fUpoq>*j?Muet$zpou)8Ot{b&8D^N=$Hdw_*(THdXND5odBWfBi7ra*&Qn}s*I6F zc3Wor4(%Dgr^MIS17X0z{P=p2yN;-f5onJ-SX=P}vGBuXK+V)6{U>UK*DwcT$6D^M zTJX93u_5urg|65s=0tCD#r|V+UydKiT3E3Bm?Q8tXEa}!u|(-9-svsY*F|wk6dLAq zxAlM>wtm*6)CY`l_Z ze8n!i>Rn~)-lweaxbUS%bR=a=@rYSSdjNZIsPy@uxm{o0d24jmTz&b0akFlX1%17|^ZfG`A#Tp;ygaRqYU<;3 zM2kkv=#%A$4jok^kJEA{D`;KXP}zNe-KFfZu5dcK{Mxj7)SJYn=_cm`5M7g zE^(^}tP^E0m^9w@R5=aU3v%wi)Tq#tVWM$6)D-qxHUyuz_0oJNQRP0b-P&|e zt7&soYc;!BgEXC!mR0hoPJU~Ly{T*S9NH9e#oGAWSgoaaKG)13J7XQIJx6L_wO;OF z;TbA-M~AFKR5>EqO{ulo<86`bW=X5if^A{5S)?e?1%V#DS{Zz`H6yS|gH5159YZB; z?$}IM#z%Q+PnNzx&1h~0No3f|*MR)&QAl}@C_&z~AJZxjw-%@+N!kGyk zM58VIZR>?}pK!d%liH&_-UOCsQM`ffPg)NwVE8EiZD z(;CBU$<)vX;8`iZn3K)zfyU?b^+H|kHyZ)ZpUa)mjpsG1=5Q zM))phwuj^UJN4$5-0>H+VE=X)5l!r={g`TQ-KcsMO7If3*^=JeCT}!K@!#%Y?XjWK zm6DLj>CR-gGc@dpzMwt6C(Mv3J1L?Cw+Rcvd+||Q7r<76gSK|t9T%~mfT_m;EM@6= zGaR?FBL0ok1VhITr243*OE?z>F5PPGPMsuCVJ(PV37(%KMRm-;4-SaEjJ0WN%m)$D zY1p$Z(^HMEA`3AbU-E|DQu z<}cOajqttsj**=*rHFWSL8XY^E(S(D3#S@tH3 zVri@?i|$w&U4Dl{O=m?Yi9>$?V3wrG{6a$3$<)IXGypTH@cYhmfs<%6@|MRoe}%I# zkvwDVKxGJ<<1of5-f7ks7v^YjDN62h8pVe9GWbZM9PuWcror4!?;(}#CbDB$!|Ir%_hA@_c znWUqf)bTGoIFY=?WQQY3>9+5}0jC2zv4#MK6H!*8cSHsrm%`4fXrBY*g zx$51C@!L^WDoovbp*QDCgV9xL;X%DbRE$p>Rh2FV-9RII0;b|{k@X20l=7y|Jmm0D zqp26kjyX_KYgph0(A|sZN+v0_FKnb=%B&MvUszW#9~x%m)Kp)i{In=)%+(a#6FZ0r zPSlp`8H=eyJB+FjId0lob4 ztH+GlA3bUL@KX0a`*E=)+BHF{-!to;>?aqVHy*kX~Sgp-y3NJrW$9t_u>AclFchGvvgKDG>3dH_Ox)$v-nhjHDCyi^-&I&lR zVy9Nz1HTyf#Wwt6;1>hGSl`cIXY3j6XGf%l)crK4+g=$qWk3Jfo$2%#vwpdF-Y^A) z`QL2O(=S%9G7=LGBqkiKPE2@v<%D#Ly#F;Z;qU0b6AnuR&o>hj-jbl&-{bvYV!|=< zzeAcw{;Cs|?v<7Oy3*CV#2dtmsODb}KC&djcr?jZ7({M)!mZ6RWK!--zExcXy6p8z z)|)vws~Hl>f(J0x8q7k&dRbZ$YU$)RJoEb)e#5;mcRjWd$IkO@xgaBJAdALLcc27f zkIAu4S}lyH*4n~{CzDwQYL1Gt9BeuWEG>C!`j-OV;Uvokf#%s0uQY>?D(<%T-SCa+j-h3`h*=FcN7vxnis$-x>2 z7_u?0TUy5~VvWWk?GIS@Q~75881k0xv3yDbY_}rc1^^cH2xu7Ct7J*k<&47eMae{_ znxg=*3K?pNoGbufYmc4-9oJx0z})#93Dh^BHD@s7k@{G#ja_A7%l`v#H>^f6wc2ga{n9xHiBn)%n{|WD$Taher%6qnK?i-8 zKzcj&o^SAnZ-se3+Cu)Xh-J!nq1)<`>6a7D`(E>CBsnUDe^WUAWV*t9F>G2)|6#K} z?fpfTV^!F{MXOJN1l^-G{E0w?zger_O~8yXQQm*pE!psTi1rWr|DZK|jg-`!3iI*9 zUwsjukUeZ!;KTxT(go*$U1o>ynsmXb-;kyFl-PyA$`-A7fH6(dXxg}jNJ_G)j(Cs~=7P(_?iX;qgBoV-?h5# zl(h?!_EfXKtu(v|x)_H7RO?8g%V7_SZ>mwY;dkL}UYn=<>frNp)7z~>$7sI{J2NoD zNDf+QRE_l**&F?fTuXoNU*ul4HX*eY+T?T9%myBkQ@1`_L|$G4_auAphS*t%3XpS1R#A%%_?cv1({ zn(HgXm2gsKCE(S~MR^es1k85_D(ElsnDs3zYZc}oAHpGVN<%st4b$K>V|LIJMsmoi zaQ1Y!e~xSEpZ#;(%l3y&Hed0jQD(rJuQ(l%sxcnMNH#{dpgBG-XnI1-FsFu-m3gRL zVWht&jn=Sn5G$Vfuk*9u{;=u7_MVprS4CEO3^NZuw*i7a0}{DCx7V1wVfu|~z7yR{ z+CqcGfOP!Y=6>ZJ88KgCnZxW3$;0N+ka;9%F37W**>Xl{H#Fvc*GRhR!pT8#R*R|Z zu(^jF=faS=QIBt@hxbG^bF{16xpWw|_;TH~?DU|&e90+6|Lv}2o{)dLZZ?IeO$)KA zZPU|mL-U}Jf6M&Mq(u@leRR%7d6r0)UK;*_fUIovWXP#`g7^byA=qkj5S5`!^RU8V zA0i~15fv05NDrS zwP_y1gC?eV0$y!k*u!gyHc+j>tyH`}AwO3?s zLtk%M6*05SE8N&=oNbe)%y5RN#DaFV-8V?e{1hRVSU&B6}s+!VyUA1N^RIh8U zaPzv-aA!c@kcZVR(TGu!?Exb32-u@Hvdr0 z1Gllf(;CFTbn`p&vNnBZDO&h`$pGcGJ2x?WBIed`{Kc~{A9};ZTf@b>WzpP~;k#tW zfYa`}EYn*({Rzx|{9Wzk=ehttpP8}w?PY!Wo8aZ=+5kT{%^<$94nRD=xY~Yxy~%z) zuwR~UFH>@a=NDGn?_X=OpWoTfljWwMYb@U=kMMHKe%Nh@FJkhnNE$TY#r+lLzHoCY zXPkNe*HlLBbD<6hluVm=;N`_vq zWa#zEzfP}5Pe`vjUU@IQB+hG7q?=w6_ilPg+;Q}}y zZ^DYKNEX7`Z5Sos>_+xM#c0kTfwMcwPzT3ocizLV&Dk29?di?gl40R(YJIOpnl|rh z4}t5v{CuXQni0p}1dPhduJ!VB6LavqD+2snT`AdSNVXah!U4%wDfwndzM70#1U{82 z;8XjdFk0|scobA#?6i5*B|N&fo92Q(JS9UN8rg5F>^FG`huLrP;O!CnO&-E;_M1F- zYq#Iz!P|cOO+Ds1MW8s?4$1fjq7g*QJx1|vY$WI(13L|&RowC%YSyB-t)kn#*r|C) za}Cr6y0Mp66t{GlhURJKJ5z&>>mMvX9_{?T3KZ(g!2QPZ4j}^ZHQM=M^oQU;^ohv8 zfNe9XBoaL%I1r^HGSEx55@Ng(LCCNCR|zAf4cT{b(x9xDhSp!?<($J;&v@{ zCHS@Vd8IMz0ErF|aM*RrN&yx7>PXeKc$Hl39-nlbEK4;#XzGJdk;FhG!)bKg?W4`0Cxa~-aq4f@$p~}7+t#;bH-=4s!tRZP zk_z*HTBjV0m>s2Qy?l`?s#R1ia#pn`cDqNbm#zEq#qOB9+^;3a$%}tUKeD-vY#?xvLhF0C@)q7(vpSCZJ?bk@@~f z>n$Lp{8A=*3s^;GqDl_5UR0`C8%r}hfouv26O7Xv`}kak_MX1=@V<1j+7|bktz8S$ zO3}zrWS6KVRf%opXt6H*4lD5@`57iZXUflDepXp0%kO*pUmaIMfsB|3N}t!u3-6$D z2UEG>sv7#~QfnuR`nctcj>P*ScC(S3LbI+cvhK9xv|DHS^g_&JLk0h;=^(#XSGJG} zS?gg5c+%R4|97gSyVzB|i@hr2!!FiSpn(FbdaZY9UqHs3eq)O@VYNyV9T;^hP$&oK zdUnEQM9hbLP0*k1FcyrFq~TF}W1GX~b76Cb)!Hd2-|cHB3#c3BqrNuD4W$Q&6P(C~ zo!0Ng0z|I8B%xWG<<`Xt%`5XN%qJsez3&kc*l=jP+lQ6~$2_wV7Z>1*d$A!)*Z>2p zjF*xKi&}fb%9?Uepx*CWEk)v+W^@~+Ze;Z$YdDlT!*iwLgaH_WvRRcKN=Qe2drUOC~>XE8mirWHCm&8 zoJ&iF3E18zTEkW#i3f_-@B|@qO!vl)cGu5{Fyf0#I3p#hptV-GIljote&FhC-hZgL zd#cy^mI{n7lUkh-a}i6rCBE5K>6__v#>wSO<>XS>3w&*V8+85S0_p}EG#kq!txCcG zDYM2Ap!<|HZbq5IdSigv>MFqOXgx){>X8!HQ@Z0)ej@M+q!^1eA=G3f?pW8=z=zO| z?FK$Ts22v>U3k`FLU=t`J}*zZ^H)&ZY*;OAPEKAY3v>LM_D0MnMN)c0<`(O!H#l+W z3GJ0-IUWED_Ochfw8@VvQhX7GBWA_kpgF5u&Ur3k#oK0>4z|QRI_oxk`@)<7i6mW( zOi(VQM8x4qR6HU8#>RiB4e8_kwC$Oi1aBHtqB9S_Qhw`@d8^BZ1zdKHpa ztCu|(AVE$m7l(#-RK=gfw! zOGjEas(9Q$Bjy-qpGgtsXrG$mhIh1n`3;;cXrfJH)LMWiLJj1->W-#|SqA~HtTN4+1&t7Y^)L1^%6P>Ovzf5h|UPC~3aD(RO zYNHjf%rY6I=H#m&Ob{y@hBC%16beGE3&$&3*stCDC5gzs(ruX2b{WknuOT}~L)bwY^0oM4 zmm_*%q-u`Ks(T%gnY}rBdZ^)bM!;>n8dc*_&6-7XsDrw(3Tfz}>_9A-AzcLHoRAC( zBEY9Q_^EUX0Xec1f}-%Nyx|K!le6BUL1u_Lk3(VtOi#Dc+;Ktw9f!3Q8e_5JK8B{X z8V>H^3paKvl+GG#SBt#r1iZKOdF<4N`6_0k@@rjMeWx_B>Nc0PfkA|?X(anet^Ro; z(Uml#s@7$FhXm-?M|805=9^{LqG)a)!=c=|Xum@thsRdw$Uo4ENnYSrjidRUqM5## zi9=friMDF^4@Ae8Pj`{oI6-EKi;zV{_tFnZ%5np>z0D@0^#o>7>m(V^aCsdStUsTzw&7fU;)?=~SXsu? zp`=`kdE(0owb%e-`LY>mS30ZwR6NeJ&ry=}Y$WOYeuu{RoLa-ZV0vhrt6HlUeOOpc zt6xl9{Bhq(B9Ks@vvYW()5=eceL7-}^>SQLM9X>1|4Q+gLk|BK6pzmkz(kI!MHaMT z>j+az#|6ZK(wRZ?sxqCuSMBZ*lFvV$5+#HT^Qxj0^9f3{Iy?9kWL(E#g-xC#Ho6OI zk^l1t#TR=WaDMHpiu0B1_jcuwc4;eqO-XZ1*KF`GZ#G?Gw?dQb`LmZV^Ge2r(+zW6 zNqmvp5uI#}1YAUP#Bt&i?2csbjs~~@?O|);%M6qk@+E+vz~lLLfk;y9kZy}UXR-L z_himDBoz~K!pZ#vWU)q*IRBRd!jOGbM}K4{MbXB35R#rtN|;<=rse0g2;b zJahsWa-ODJ1y_5JM>&5i2Nu1-p^q!IKQdG%YF^sF#rnw~gdt|DnLA*8lQ%XMy#`lR zG$~YTE7kE^6&`1(=?J&&fMOsKtr4@9vQbfowp)EDZe1%ducXQD!uRbjMf-(gQ^$Ie}D_rdYg=VG8bf@wo z$t!ssTWDV6G6$p!vX{tlPDm#`RTP0o16}n+;MK~AsV3VAVTWSD`Df~8`tE0AQpKC~3DA|P#2QKcvgs2zx#Aeq&GwacjmJd6t>L4*R@68G1L zuy7yGfh8f6z1&Xa6@AqD5lT?Rv{cur)#_8S1{%ifjz^A{iqe6SIJ$`P`Oy7;*Y0_V zY9;2LK_K8?iX`n%AX0VMnkEWW)s^lmlCDm3q*JMm;a+j^B~H|dA=W2*#TivMI9=8- ziOFtJO`tcf4VtY;WWnThrb^4zt3Vf!&Oh~3W&`t)hY zOabbu>j*cdSm(=Y+4o+1HUrt1aL2Fs<#dH<%y}do((l0|}Pl)8B>Ol`^}> zH)YHuI8t)+kL5)6uCm^`x3^uSdmkE`A>7NR^AV<3Od{4qQ3e=;3|xWA42Etxf4_7~ z=G}T6ZP34W=`gtuL2|Oju%0H4Ll8Zgb`g>C^t|82J7bcTmC5{;+38nCk8atf_WEV< z;O#A=!0%Ozb;{25I3x;PicePgrP@8T2w#>z(qvq$%}`0CYDQi-zR4BIe#wgVL^D*_ z(;wcC?enFRj4I9ny6S|%rdn5#DQr$-{y7q^a`}wxBUX_rhYyC#!c`S6GOv;~RCb$n z239RrhXeHVlTr)dmi;=Ft*>>axAw1`0*-kPnKo`WXsR{y=yN3a*kr0DACgteJOm{q(- zKP6^raCiqs(y#CUQF zQ%{?92*WC(T;DR*?F=XSls+$$<|k`0F0v!fQYC8(nnL&BftEdHLr7@LdqDt8S=4Qq zeXXMl0T>_V*fNGlTN>MX1fp1m8Khuh=8~iXPsp?&P5@^d%JM`DDa$yfEaJk&MS@r8usc~4vmd*x|#!B8)P=HSNuo_xPvn~YWfkHl}WKc zFgcnt+f7lRz??Or+$p^-b52WLBBqobE-6DinfP>BgLmMw;{O(ehqkNwmK`dbZgXfAMmobffW9 zTX;iF(Y{5Yi=1CmPJXgTzfBsO%>N8l%2Xc5aAWzr&E?H{?kH;e1p3V^Le@s^X0-{KZi(LLPcO2 zf-6E6PcP3-o^)L3h#IYl(9?+!J3bXpbL|vaChx^C!1w^ARq;DmiQuvZVzi-hS_?o~ z?kG8D^CWKW=MljXp@Hdw&)p_n-ex|PE;wmEj&|Fvi%AofYT*?=mhou~V<=#Z$L{{; zMrDKga@V0>B``95ElQnG14MvVsFXq|!~u8}pDrksSYl41 zrx#y$<2UYdJ|NZKEu<6;Qnn>>(*-WFt+IOs-8=u+S97*iU42x#73NKMOr>VIl#3T< z$9P`A9gdM32|7mdD(#4vIiCy&r|piE=PU+7Y$(Sx)&*!W{8rrD*k(MnEnM1eBnqaH zN^M0gRtr33!z$g=ageQvjPC#LF@bM|HRqj+I{zBy6( z;;!-=in_}CPEfwfb}z?qk`MVxuv22_YNwkSOB2PECcan=O&Gllv!R6eQ0bo-wx|>l z^C-Iyu?w~YN}TNN$>1+i1FJP~!70!|Jfdc@;v39pW)H8UpA#7DxgkytMYmO$hsD+v z#;s+huOhf|rVocJIq`?cEt7%)J^pf5te>RNaa`AcSx4@jV}cj$q*4ykVj-J}h)Fa& z0)Dc7GaJeTbu!JCf>8mqD43X47hE}OB~{I8q?G^SPMs6C6fecuwl&3UXIS>jpP8 z-QI_q&XtqpcE+f|yKk&vCLa{|mtb@iRVk^6ihXm^V?y*zLUg6*i+VS$MS$UlmZ|I4 zkI8TlVl90q^{Y(3ze~?B){#a2EkpA|_$?f$OW$1Dq|+l9d>yG9O0hl-KglRx@20X? zUt{?@4gw4N8vdV4pjHk6e_9qxZAt{ID|^lWRmpBGlDNb-B^+2wPy9-XaKXV0;sc@^ zDdq2YT_iJ?9;Xj=<9Ye};W2m>!ZZKAFd&MALAWaZ0m9u zF?v3hoW9Xvdi+R!^o;n6E`8k_X_)N3rt0{sRn>Y!JFd~};d0)cv+|e9p-^e7y%g$g zI$r+TYMK!-&&JnNF0m4e3ofACms!#9r`SnW78tcUWWZRDF-4>=V zG>6QCtj7}l;!kF6c$=nF$KSe83w0*PJX77UG1gC8JDBrSSwiQC5c^O{A%LWf1cqx0C7k%9fs^e{BCvU;z)H=d}>i(px@3d2bHM|jPROhK@ zq{e<$zHC1yb=uFV)%hy@h8p`hqY^VdV3%- z@~rpOf%E=TRj{7mt9B5gIXu{8$Z$8o)1)o7F>5fd`r%e~%F zjaUGgT8f)H@ERxKw_5y5643p7wG|xh;Ic0oOMwOb$;QgNgFOIA2EtNFK6YGka|ebT zNq$~iFYCCR`#7&U6 zCEAL+CGKutor1P+{)lS-{XXePZG~K@LyB}I6sa!eZo?yeYX^5+N4(^cj&c@3c6&CsZcSasumY_S-A1-cGjJ5+~zNF_ExX(&z_q?ew+|L(;1i)4^nycc;&X z7zY z69<1S6#infM#VQ};^D6w?RfZ${10a0;jgiF{0tSpDiaTXU1Z0@Uwh{-k^11T)9g6- zYwvs#6o2)x+HEOMH;37MvIDb! zY&dE0qx$@u_|fYY_SgN}=buHleZF?E@TXqfQth{8jN-R=^VU>0o2n5rSNHE)P$s;h zn_G14;nw6JH>Y_0?Red5U&J};K+rs>eWj^?oA6Tei&^olm>6>VAL#f!I%EV(lgufk zy(2y(`w9OVxyAICTg?)#pt`&;>ao7Tpvkc3q&Rcnp)Ky%DFz1)B8l@vXU>i!yd0SO zA_XzKIvjPOq>(0=@IE0xoXxnaH?S&%Uc&r)2!iC8@i#HJ7ATfNdYG{stJ^##?S?Ae5!m8-zM8=7)33Zt=a7gKeS@ zX4G?u&S>qj#_~F>nmRf3*YS-Eo+x)c_4hOx14Pv&9cn0D(jhl`;d6nFYJ6q9;AK7+cG$|dkqq@p>dsM*SW5IsA- z*l=tpp;|dRHzrWcr5u_O9}|G`;{(-+f$1>@hYL|N17e3W#I#T;wvwr`m}B2a)@Un_tb=ytzbA1WNd9L;r9*5O^ny zXGX-_o7!p=w{Z+6%h-KXPZ&lx5eTq#_%RNO4zl;%O^mJ5A$lVxn7Cg2UEQ+0r&|Je z!MXH`gdV^xssaZ}GhEWJby*Jm5}laT(|STLS+^5Ql79FldWSn^tDE;{CG$Mh*;}iF z#k=+76?EgC>iE&}g##K}hC7?~=eTmSn)yzN_8TsXO=O4Ty7Zr}`fs(EtK7KVhj$;5 zehejaI#CZOTh+(-|9_=_BpI#@HSP_Y%VkUZU3?=~g`3Cm4Tp$zuGC{VbN%Z!Y|ACs zkZr~p!**>tB)pi9jW6aJ|4>@Do2OVV2>l-^YIVWs!i-&cCuel1-c+co!m+y*s& z{s;Tvcp3RF`hRE{E=+X13=jXW$ncNxQ`*u$F$c;x^N3?$;bb{Gka1I-VY?}=7G>Ma zdFYfbH^o)>*D2R#E{3_y9WgmcyUV(X?F%*#3vpbGnEjQP;6!hru`^;0uD~1GGLPI7P!h{y3x1pj-^nTN6!+-h=K!yjSWD|@9szZ_4G-nB@%+O5US^*%*$dBQ zgPWs*)_@ztfjDCRE}XdD6Hd%23?~*SXMalG!cvGYw{riqc`TfSmE%pUKF4>4lM^k@ zTwkDbi@WkZ3MbgFH+S-|tT2Zj61lhmaPLH!Q4;MFUv$CQT*53KJcfU)+|!&G>-jmG z?%dYW*lZ-GT_8vqvj1KwyY6zv=Oe^t{j>jF_?-Vy;Pbvd3F3mm5fW~q#?QU{=Q9}R zF;6cSjSjwE2rq^4oY@+?DC1|Hao@n5L(UuUSt1y#NS*qy9=_4`v_6AkW_&i_(oG5V z*1I30-vMquRv901pHCZsv5;}>|GlrQjVcRwy}G1}VZ>f+7# zJY82iP+pAU&D>B_(r<5_!=OXZF0X! zE+O1$wfa}7SOp%i1JNq+vb{!r&X%8(Iex*(4zkPPm>FWtyH!GCa|z8v0YCZ)d~A{v zVfF16U(HFUt`ezkWV~WbXG0U8P&IZX#+Rf@6AMZ@o}(VS80T;%UXa2;xp(%&m# z?i(LpI!j`5oZ{~j7wLWm4j9_znXVwqxwve ziZaS;+|knv^GlKr(YIQUF2FBTKKNDbZ;s}q$9PlOiN#(ySwgoVJq&yc$9tHQ#*e7x z0xhkuvtlPR!qbqwl)-d;it%Bes+_MGA`yGs<;O zY*3)=w!&y%`Akv*FI0CP6twFBX ze}6&k`N-J%>(^x1TF&2<@uAKU%;{reryO!}f1o&$Z_$0CUzT;eMsrEKGN!9EPR&Ug zy5fiQA1$FKd;msKE`IxPM{Eng!dm?%)vre)TD_b|ByN&c&&L)dG&{P8-_J&?`JEdT z#G=^R;zw$ur||oEt>F#wwd%u>l8z4HN5Uuu4mGzEMm2DBZ083Y`F48QT)w|8nQ5D?$t)Xfp< zVAc@4OJjpk0v2Um8XJh)wTmp`)-Tnj0wjMblRP|jX(pNdC_8y*b=;at7A{IQHaI;i zyizG&{M)!77bti%*xxylPvri?{&Ij9p#J6ls+;t2fBh%>YY<%U@AOyu)gSlQN7`Su z{e^)oBb%M-FwYR2k>hUKPDC{_3pgdI9>V{5&jmXcT!glS#mrU)>-@o8)CZ@KZN^mQyBxrq9!k*>kAIb~Wy zh>!{BCRU@v!{*KET6A_Hg2+870V7aZdN;Q#arZ*maa}Iax60hjxLr# zX8p~f&PT7uZrgw#eX@}oEMuJF&A+B=Iw^p zmtNboQZrbob>s=Ae7Z+X^O5q|Zuw@ty;3td&b-BPS2MnKe{1xNOvA@}`M@R0Lj34E z(R@Ao@BX)!{50i?kN0vWVmqBRU*rV5tNF|>P5B4j|MVA-uj^0Ct=t*q1K zOzYbbvnn82qP5tQSeawdieOabqDT5*E-zI9xAloC3=mZoO6xt%bh_Zh8dFB}oBMP*pu=Tri;Y-97s~}$UaZ~yC;{aK1YtI5 z^)jjiXWjgL!|U2T|HJDC5y(qA_Y_&5>|O6oy$Cc_&;FhN_YmkR2*iL$)eFM(DjGRg z>`V;wqWwT=qx7dc=*;c7QI6zf`jqdqNO%76G)A}_ZUdIyxJg}7A{Sr!?+_y=H_zDC z-#Gy+z(%AYC%- z>OCJU!v^g5AQ{$PaAFze-TSc&6UlQT85X?hKPAKd!bxmqjDK5(eQ`!F(eQ7`u*;Zm zKdcO^#?bmt%dn2=y%7Bm%COsd*ZaRF!_E?Y70B14>h&mPFJn!zm9pQ8U8zSYd&XAE9{183 zrIdlgaHW$G$kVw`C8Lv_cBmQbdg)}O_h$;9=^L&tfWq`VW`w>$PRX+%ZmL$A*Tz_$ z*~}&rYT1KM(aLs8n*KxywTW3uZ`i1`vaL!hI}>nACJo2Rz6G@`=-(`A**mCZlTgbx zW|<2vC_Y%czz~J(=8(CQE8b>^t;;ZvH_8+Q%Wf&;x;#GP&1aC?xbPCwjPJx&nP6gQ zFSCq5!#67}A3<&o`k!3#J!R@f6ANqYLeOX^*LhF)Q&}1rV7o>_+ zhRn@i7&cE0oA`ld3`PSLZm#r3l$CAi#Hp#%xKAj-ox6M!M05rDqEoqqFcO)k#*Zmf znjpxM^|r>f6Ux~dm#iuba}FBU=aj~^(^>^H^p;k0xC{CHG_G|RHjOGibG`I-@T2qC ziR{Kw%V3$C8iMwbIyHSff_#HSA;{f(=vqq4x=*(`h^zM(Ew)uGXmOiOgU_O^an&Ry zv(cW|@L<_z3!~@p)%bG-HXATw*p%NTDw4Xdd162j+VTr$Kg?P9ga(mB2Q|{8dx$i& zi%5*E9wPmZ90lew3sIC#ivA-ZWBcpiN8qtHY0U|o-R6UGV4Bbgj*#niE;7toPM#l% zo+Bs6Wz8pdt^FAjHAcb!+e2$LvpHHQ*1_FYY7ke2B}O_O>%=<6E=+)UN22S9G8B$- zBbwT`6Yw!i_4oY`)c@yoAJBgPKhb_$PnCOW<$5|Mhjn~JipE+@b=$6na& zl{wRhl52{q+O6NN;UxSz2O^M#pwEauCVtzFSdLshz#;UwatYpTC0BhHQ)^;kNh;fv zy~Vr47ZgV{LkqTY!apF}tZW-15Nf)%8YAYH1lB>#!?z-?X8}~I{btUHgM16xSA!=} zEH9G0SL)^F&6F=<-s>Q9SU%&)ZL_khP*sO_ckrD!V|Pb*w1XrlnIC*IV~@<#}c_=Ahyt8#aFki$RP`{UDoZ`3cweS(yIz_GiCIC z!2?jeZlq^cly!&`_Xdd{eaIDX#SXG%+eFf$m@C+*^EDwpP(}IVge>uNKteS-S7zbq z(3qt+wgpu%*eD-a|BFljbksPc`li_VT{y+gIv$%8%LKuOYY+>q&;9{6;suY23uF0A zive(vVbkcmAUyJ%+B1v|L#bW9owIL7;nr7`C>De)Qud-Rf}p(l$eMR zxxVNT+Bl~~7DeidoEe`O)<0fme0F*U8zs-azVAQCXYy%bCQa`aUED|Z8KS;qbQf*yrJ|{b z#v2<^etn`oT&oLliiscwn~QT#3h;1qJ+s89~!o{j*;u6kljJP5`fGX-!)tP z$;jjn--ZQ`d{7(iL*M!kZPK%=;o(VgE9 z@5QQ{{|USsKmA{UcZ=%Ue**74pZbsD{l04zRs-o}=4~bMcTne)*wKQWYOR#Gol)P{ z`qe|i`O%?rYbu}W7&ZA~>^F(U0i)_!R8(0!S=->9)LCR;2{o2v?xWX2yOe*KNRI7R z1)}7HOQ{cDNBu+G2EPlp^%uPdw~Kn=_G6OYgPYvC_`iYM9X~mqZfCp~w=crGU3438 zBHZ3a0)IH%_&6Y!nIElNY%d+QTk?HmmJIfd|X zd(`UKjgr9XN8OUoi39cV`A8y*knHG2663rP?%=aF)3FjDuU9tIhQfsAopN39gIO$= z+OiVrWy`oC{E5^xySzsqY8JX*Vw?wqjo#T^3J|!PH%8twT6mkt`A-@r44W^mgxx+V zu{|f>H8T>S+DIZakMAW;Hxd;$a||(d5m&cA0U*u~sJpf~b~e-J<*HF`_G=BLu)xc8 z%s;~7idT6mQo*&3J1Ii>$yl&L8rQtTjiD!a=Z@gesa*N4@qRPmv$*;_moW8(&NGq~ z%LvyJzSz3%ySNM=sk`HqETp+v@kIPP!j^B>-SLJas+rSLWPZYOV%z5PdUbakR*`S2 z$k*^NNKEyatz6qE^q9`A*8e<>es#EuM$tciWHf5H_#>s!!UsMCjefuL1T<<>k;l`h z+yAr%i`k;WNMeR}vKq?fYbb9uaEBphnG&-&zkYzb%-i|S4iAeDKJbv>5e!(-9L?iM zd4lEVL=QP_CM*4u5YSRg#)m;G0^HxO)xSX3aD6$qfi^rJI}P)v*06&p-TZq;Cr@Iv z`o~m&ZyEnW0%i!t-^Im`cf6)J5ngFhmfW!a(83EUl4G6L_rAk!Heac}B3Qt;5Q&7J9AYUgE4JAQ1PHafTe>ir!6p1dPmfqP@H>1p#l69D5yiTgz z66)+835)@C+q?{f*Uknq17Y=r%$6nQkrcw@19E4E&3tl8tLpBs6sL!p;;R*>D|F!W z*I?=<>r}R!;dCO82>bt4MP{i;pV*~p&%1J3L|6~oIe$kumcy<|DrT6B<4^wb1*e=1 z`7)1i^2!oux?#0OC-t8wou2a$`6J1kXJ99T55zSe>k~-~=aXxk3%Oj5f`m<1J0|(I zB5_YPENb$=RGq?K)m<<{-5hdGG}?u zyFTypKKs-4)K@o+ld=_GRG@=OyJ!{Eao;uf1KAXJOYVpf%=vOLM$rwHM?%8~ZZLd( zmy511GuIxNYSiaC#-s$DZS{2oeqVE-=e*Q-iexsJkX1nf6Z$6K$)VX|QXa7`sK19P zm{_UIgh`AOVMR#8P#S!IW;U_V)`BvjU30x4hm{06v*ds`TIt==^HgSwK{p;`ZOWaO z^W%?soh#-v$?m(lcE6YEep(!WTbCAFqZ`-Id&x4| zO>&0di7B3J{Lw{@K1~d7W{7uNj)k~*Q6O2d@}w|wY7vD*9#Ao#&HrRXvF8|C4joQr zqy$`MLC&aHvo$0=OQdlr3EiR+t>lGn$ze2Q`#CyIE2By&NL-SxT>S}Hh-S$P94X6I zLBzwQ`@^N5nA>TnVriDmD9zDI$;}d4Ut+~IwqUP=UJ?P268?P7V#*vWXAP&ahJne3 zJAZLnsGO9CNPP5gVRd}jCBjEt41MD_wpf|O0Jz>aqVjGJq_>8q^v#l;-^Z9Blo5G_ z1J2e^dEetd+?Ji|DY-e4iECy--PFtgETVWZ$ie-pNEJ zX>27bEPP+({n{kxidK4?zoV7N%rnE;=JcP?QjTX;h1zJ44;xJh=`YAiP8{=2N*dYX zZz~AeT7MN+8Bo@f5+w9ld!*tE8^;qgVWYGTgK|h^+&dSu;NY5Qd#E%hxo1?H zbzORb0qh@#sBl(^-< z;FLVUo$2N>CQJ{Nv&AW0hVhTAk79I4h|L5kH`pY9bLt)mjs5zIAKM^*e6Q;Ukqz;P zLIz|7X;X?Js7U^H{WA9*9v79d$WTUD14ufPt(*tq->(8IbE8XQ{100^oly6?n5-!KemI$ zVJ>MrOjviT?QN>?tP4Bdicp)ojUaJ3%Mog_to4Y z_4|5m0l)t6>$!RI8@W4ProDK=?>HiaAe!PibLMD+tC%J*6vm}ZzWfco!)yH)GwrBOU$RexOZ}7RDRd)kfd{q>P{X0#2B)S zC_`I800s9~_T?=&bp-QiMj$-UC`(X<3*2MbWsmk56TX!FuXQhDN6*${Z#!sfP(DOF~zq-3#0nd_r zzx&JiE`J!HVPhzuQ8om?6BD6@{-Dxf#G(#c0Z_HsbmIZFvBl!(>8ZeK<$Brqo|`;3dTxN8=%mNH=6_P^ zmM2x}8I|T70oMVM%nab{54hw6{fT)Zrvojl9GoOA=E}j+HPwYs7QL_~JF_&r-S$W#aX`mgMPc4 zp$OMhmVYq%Cw}1w-R8X8bob|8F^#qz-jdDc3qQA5p4-dFU~-u% zp~tuQ;#)j#GE|$VCWXmNOktU0m&(bd<6%%O{7`Z=d`5lr0wJ(uwKCOe`HZ*B?`JUw z4%WNLCg)nzMiE7r>-opUWVDG@R%rs|VOlKOg$}ahLUVJ-+6R33j6-EAqnMuyKx>46 z)&)YL%o3t|Eayr*knT4cnE3mp(Fd9B#4$8!P!irsgp`Gzm3(+y!KMrNK?H6#_qGa! z1{skfI8DfCkmy#kC!2^IyHcW=67)E>f&;R1A1Ff9%JV6F>Eo&zs9O9T5;+Z*^GNFJ zAT*QTchx_^{i5>rv8JpEu_`_#YTooL^MkLYO(LDZnwFF%mp!eu@@IVqx67*u*=@Ge zz*!bJVArId)mo|dwXp}9O60_`5vM^Jl?sa-EQwq}LtD*i*bWX;5>wyL+$b$p+_?(A zUihWaiOQrRG1nsBxgIWJInrYNyS%DMzJeLk(?YYd66^x~?-bl8WudD(<;6|5(Af1K z@o{76F8)~}dCBNI}50~BP4U6zrgt-W8g%*d!Y(mQ1Ar?DHK>STY z(pAIs3%_+c_OGddOwr}kPFZo2SqEvV z1jup4yB(v3Ci8jq@MPgA@PVbAun9KU_^jy11FeLlNw`rU7aDud0VYu|-xFDE51vyQ zw{f>{k#0D5ZxK2-bw+4EVABC~=D#kXG$d{j0vbU&LZfiqY_h9m-R%TsNP@(!P-MuOJO=VHUW~(z6X9e!AEP9a3z-^U956SNhl|_`V3zStB zJuJWDDvN$CzauJ(9+BUi%A!|O`0ZU;^cug$u3*U;>axWySwlUx*oZaN@{_lwR({S| zQzbtG)>O*R8Efv7pNusj`Ejnfm!I(TEC{l~odr_|KWVH^;|X$sxNG6}yjq+U8z4ia z!2Dg)eD~Ei8hWaFJ%iVJ__nlY3JbvAGMsDPx7*ge70!G`{j!xV-Uz#6xcBF)Q;NI}YFQci+t8Lq{usLIE@-6|S%un%bcS69vWw0q5NEM|$WiX3*& zF?O}zaJDqdw;Mgi_SGfyJoM3J`I~$@H` z2moBZBhVe~D;@@E-rB96H|5y%8!gCbii5pNt2V+1HhHv1C0=MsY_Kox5Rf3jn;lNj z<|DEIm-0@w?I6krB_~`C4h&=-46cyE>6Dy%BIbbZDXBf-dE*9Ju=crD^)}aC%B!GD zyo<%CPODmRN(c40!=t<{jK&zdsXir6;MkQt=9Id(23q;MEqJM?sv&q*Nfk5&Be*Li zsEWnXdEzzREzurr!b-hQyjl8{M!?wu83BW0BYtl6DKLVvL84?Z2;ye6_XwOVagejD zFA`(s3u!(I^%hUQNLCfik^RXhNNY#ZW5G39a>iu~9Ralv$jyU>NXqUzGQSQcs;Q3X zVQi8{ZH1hjKSHlR;DDV42w^J_rC2ESdaAc* z4k5cGLZH%<6%;ZHheW|Om9-~AC>wdfndT#WY)Qe&@H8}zMK9v59vO6Sx83Ghl`1#J zPDwGnqD?L@9s!W);XyX}!85=)r+8eDVWQzIMWnnG5lLBzlFotsxz~ko+iLzou@_5A ztlaXtaW%He+swm411ek<%r?{M$XX1f859DMv$#_;e0e0bUD$7_9v(MYyV;K#J)sl! z;F(5gNxYdZRU@(qJMZLg;l|i6IDR2RmU3uqHoxKcFwy^7LZp@`!s`!<LLq-_9un7HSs$Nsrgf z@e~g$3AU9*QcGlCP&A9UEWsU2gHoQEOGkBdTEy0O(*Xu^m)|%>*=~XY%5~&~dK|y= z;$aJdZE_WA9~-z>B@-7g7_F*M+Sv2xj^gB-&D~#+UtE%nPV|K@^M#ty%(JPxEHY|A z>@PASg3)QP2M=TSabe>^7;g5~G&#Ix$OfC$G571_1jJJy2MC0~{|(MG><3PGcjtmw zK`{{p$26g8{(&Ep9=V6pQb~NjkXC;$&je{E$&~-0j$fXpj%$8c#}ChL4&bp5?}D7( z0_KPZm%--KY>8!E!N{h`$Y!p?v>^}jWs60^`VGd$TwHYry(|~lZtj@nw?|6P6$vno zY?^$744p6rGA(F&%pCc~*vvQQFhBF=tJ!^?Y0{bd#1uTNj67)I5uQhXp$>{w z`g_;>X569bxNG3IGPfvu-TQ+3PJtU7Znmh?>91i&Wa` z;_>I=m&N13K9xsob;v@}@6Y@YB^4{ET1~=ppikxi+{G+dsb2?kiQQURmAr>#}9Pzf@7I!^- zP~BX>4Ys)!IJ`x^^SAVf^Shlw-$k6q)vvYB@o(!;NF$aw0@ME>BqzW-=s^L&$i>&z z33wq%;LDIaa_{DtDlR0pBbJtO9xRn6AvsXzzH4aapZGq3Ai5}9l-t++&fiJf#2=OA zU6pDREIK8tEHWXDjbB9M(`H23U?^XijPv--W#|v zbPU-QtWk03m_3jaI+lXi{-(?h80*K8EoavtBK;p%PNyl}u&N@~J(QUZy&9J+>OiTY zOAP1H9l)WQTdbWIxXeDCb#2CrM(RqtKl5!$LG)NSAmZG}t3$N z>8=#$+sx?<*l0I;n#*`{^5LjZ|7pIpCBJQg(ox@zKmE6IqM+^rE-ac#>7MO{Un=uF zN#(0m-AWaH!!`V0fRnMwhdpWGNQnC~NV}a|{e_d$e8ziv;ZL(5*!7*8{qD_L_21~R zp1)VwwCMH69IU=>9MiQ)?V=lZaOG_mN%Yy?NGYI+CEQjsvRDwdAAQ|n=1VM<^6*GV z^(Ym3qRcNk%IvhtXjPVNsp}y^f|Nxv_WB}ck(2}DMkLDE&D9glKkA@Qw7o?R%PgPK z0Kk5uKhKbU%1546>=^7|?jmMLGwd_lJHOc{Hkw8jK7{ZuKA3O`_2$Z_xUq?$=hxR# zq&vgc3$d<+TfB&mDbYhpB&*>4HoXw!$^70;b*|Xj#9J?ULNIl#ZDzjo!uO~_h|24- z@F2F7s3)J23De3Sp@VYNp?^r4fv!oThUgwf1Xs0~_UM-n(Ouza#kQdf*v(RQFSpR ze$;fzecB(!=`(qiWgQQvpC6-z!*kg-yDbVOAbch9lc&I|e^KOXn$RjItq+tT0g+Yk zH*m%@{>=cFWP?jg@JJ3gB+L98=iqYoDACzO=8`&>)e_zifpNxLwd&B=cptM#39UVrwa>@HJ)4rw(_#NIti4p>7QNIYChc z{WduT-EKDjd6)J3%-z;+z7p_mHa9Oy)M4Sb!i~`@$TXd>&4LuU2b|8lv}*Nsk(>ES_F@m%$js?>RIWL}4a)67yBKl5s?_0Z5#er)FVU@58! zBfjHVv3h1&&z4%x?%>B}zAeu}LvNLvu5+MAT!WASFhyr_`bCc{1P-Lls*Qm&_0ZVu zn16Y9W5iES9d#`3^^9O5lRk{dg32)tHvfel@Myww&)#=jR%j&JqtXgw0?I z=Fc31HuDUOof%{2f1UbEJ-#8q4qS=T$K|^9GWfA? zZAIcg-%T~9`p`H;*4TFbEUXj!8hG`2lFm;M>{zK}0)$0P=*g^Wh zIv6$u?;W-U5!Zpub}@U#>hKBvZL-W1XJ4Sk*gNk0n&pqL3mxkRL2~{><`rvT9R8}*B)D!1R6!3CXQ=BB8R;6h2U!pblQS$;>mxCun%L6lA^^~ zR1;oI9BM4kQ^j3dPEb|vk#hh-8bGNF{S|lpCm#vng`5CVQJnlN(mP2DCu11^++vV( zsOYCz!EEFv|E3z`6Eo8!(tX@@4b@0X94?X6y%sk>Z3%l3IlHWWli%SQJ@P_smf*a~ zFIp8Ia&CW>6%w0@>iv*+9w?TtF*zd)gwy;>F>Le>>HA#IDpC8H&zD+H%CbDGwo}~v zJr8s&`{oKGsDoIuEbI${kGo3gsNZ-E`vzbD>lQRxfK=RdG0$Z;5r|}VZV~VN!f{hT z(ush{#a=)j&XOAtFqI08CNiTG`P8URlfH+mt(vX7P+c0d6#b}vRslkyOlbwiHG#OR zKOYuuG}_~?ZJ$VY{JDDE^*Wazi!7Mr453Q9gO*<^VnjwnCPUQF{V$Th7f78K zX2+1-A7i12fg3W|L*O-wj!YOTqEx=64kgn}2vu}Rr?J`Vwc;d`B+IfpPd7>nEN*%- z{r`cD^vQ;J-~8b;=1F_iL(G$&%&m|q2(6#1`l#Y!bkLdrF8~Txt^^kMKwdCb3Fs=gI)RRle3U@PtXn5K?w4w-10CDVPa!nouCpkFv}TPwriwhU z)M_5R1<4i3oO)DNyv2*)&Yd2}$I!*GJ=8vY-q#V&m!bCF3%jj?&#>ob?ZEaw?0+qhXk(MV_vHCoOQt-;JfFU1Ga0ZF4x$MTyi;Ild!}kB_f%%yp*|5 zMVFcr?k@-dF2|||Ystp(MfULIQHs|hUUTDw0^u1Ipp+pWW0~ev)Z1iJ zVp~}gH8#UJ`Xwp6gRY@H(jIt=nfJ8S*#qZ;@~%LWhNxSs`Xf88aO0ffbgk+)TuPWD z-Mu~dRkVh?(1F3La`;TouNM=h!+XjGzV9Eh+t(5+bMGwE#=lp%aY`h0hTn~@mMTaJ zlKA{bYwu60c};5JpKPas9=D>s4?ZQ)2@Ya+uxcPQ_M9-yOV^ATF^d6+f@gml#XY~ zXg*b==_RAlsxD>4k|X*udOmluC|3UiS*q_pm`%juY>*_}ICpLU?BQjLQvzpN_W9M$ z5x`I|&CI@?;fb`u0)^x^5$99ZH^^}6PWtZfO7-3T+fMtP;k-=9hf~^GXMMBe>eGG` zarU&n`9WKT^_};W?>hKJcn^$?Pl|hMROQM6Ra*Fdb)C0nfX6siX0&KNu<(0`)gzO7 z`paS3>K7L3#sj&{l=MWdKUW=u7S5AUHaR4@AvjO?x?vDr1I3JYUDPgE+pw>WLpqWl zb(B5?bDnywx>CS{Mx`nd9LPD%LCljno^V&fB@!#SERupxbM38xD&pYGvreS-DQefI zH983G&GnjFC?nhXnI?Wkw^qs1+6Qu9;(5g(d))zNMg6oI(p>Pt9A2#&&Wp0*vzIz0 z#?89gDpk@SDYuWZd)|!GbzfA_d5kZyoK^p|-c$c?rw^||`A@TjHgxw@cME2F zYta66gM`%&?=Bk{_f&p%f*N9uM#n|INFdkFaqjfgu8UnNN1=?{TXcP15S+8wT(dy} zxxL?6XK7>t$KeDHr9z2 z%{d-N9a@Rv1&%q64K>~3+t5;|Lx~j`OUCBDimFrQ6HI>qUjAa6YZXwV!iT+$-FKZrLWp{c>2eGd18G|nJ9SRsOS@!zuTq<`Q4E@l{F6HVjcqYi1ycPR4SDL_1w9S1XSa za){f=?KGd$RZ0#x1{IvM;Tf|b0^YLA@uet+a zHn!lV^FP!!b2fIj#V+=;otD!jp{`4$`XaBfp%E4X7=;d_QiM0OBy>4ync440f%Cy(87dW0Y>tn-zdm?@RkM*TAIBihsrp}HRWZ#h2~ zmI)@Rp>}03f?#Jq1NMc=cLT$aV4axg;qL?!NgtTd4lNpexRpL63@z7Hyq;S@U34{& zyC`r$;f~O_;o74Ox?>mZFB{-Pv;jnnuNaeJ3$_VtL?+wKk6|+?ryKi%-5a|P*CR9S zL}0$Cb^`IiUR8aoae!Am70blgUM%NY>N%7oMsnxAPOIsut$0ASj+5OBU(<)sA-nGQ zP@uG@C}fLP{h)LdOEqrj)26X%$5Ufor|SL_#8( z_Pe@!4)1Q9U>}ZY*X$(&R}HR?UEw)w)*p2m4RD%{?Y8MD0JzstM}c$-2xzMUDIEnI zw>7j;z-yb33KY#v3HGfhE+BIP7IPkzVZq!FnORsD{ez-*XXl}L^%!Pgpkyk=7U_&aX1f^3s?#0o<}`Wj@dBGX2zG093hF@DveL_6m z=qS*Qy)lRG*e$WIWv~1uz6~Q7ae#A@sam8xkmWnzO942$`FHry*yxiGZ1Mw&DO_ung}ah|cEz%< zL2Y%#vTRn-?O9k>fVIPhxQpMhl~z+pjr#juSJVvTj6USOQti!F-;fVLxE_&>#Pyvq z-T0g|V`4IdGsz=$(PtR5xw1kgAvh1^I|Y&P>q9p9hO8IrBh;R@pgUr52%xF+A$xYk zNP8ml)S&O&ujfOg)cFV@Scqv$L7=~nOaVz7Ar7%28~nW9X~ueYl3f1kIu8bBo^wg% z7|R5w@nuW?5PM`SLfgcn6wbuO`It|;Ppp0^a((yBMhs}6#c5}sAUszPj|@ZT64uCL~f|I#3fcifc@1s-%_b4Mss)RfC zdGL#&ukDT&$me>o5}r zmOYAkEqVc$*g;i%oI^iSLm$~gn^M}|ANc-|Cv%@>1fi%$tJ@kKr6lU(KZmA{BfmKczYpP2&54lfLPHguOd2`N987YjF-q@xZnq=Ktl(;mQnN9Zdm6%Wx|F05mJUw9ep8 z-VZ}tkXyn#yF|Q2O`@+z4pBhfB_JtjC$*|v79!j~I!M+712_kvVssp0DwsaK5>yqx zXF>WKqIZa%Pm!XTdDt>@WE{kH8=k@s?&07t^o+_Q(c^5)i{V&W#M}B-A^}TpknwFb z!%M8%Q^MV_N!K2Hk@|{e76cr|%wdS#F4c>FTSV=_>sO(Rk=%^U3-B*#3-rO)LOd#P zDUoXaYXq_`v;em7q+yMdz>i6S6S3#|66LooUt#Y5W#F#TnqD%XNlkbrfu4YMIwc)a zi!5DSB>gM!pc$h^2!p~Mf_-z$nLdT|dlyqU_>fdWzcC?M9(Sz*CROIDATx8C9;VU5 zH_ny%77l}e5r^YEi%zAxau54#e)c(dq%k^JtR@DYJD6wCxhd+Grfq#n4&ySo6 z1@I#xuM{1x%e*-z549m`ED*Tujpn%V#QT-j=ptz{YMU;qwhYk zO@6m_eDR6qF?Q{~CzwO|E<;rk4{+9PGciR{HJ&FoUkgd!S>wm@ZvSOOY^d|qf9fc6 zjM`WDzNhG>qNUT>?puX`&hbVjI7Ov0N)#CdW(|lG@?aR-rC^!gj|^m#M|-1}0 zP4qyz%Xml%p6+UTx?_%wW(s0mR3W{x z1)QI>NHk$8CG3>4OAY-p%dXX&yHrhV4R)@$sn&Oq@9kO5fo(IxYpHej^YwkFd<}j3BkiYI-%q0T zESWH)kJNKhqMmX8>w4-AbQwoj#=+bN&IT(aEgt%yL>Vbt9siSVruQ?-)6)<5uIo5U z68gAldbn&iO3B>`laL%m9V>efCzXdw#S|p*zLnqolxtU(ZsF4H;nKHh?BW*%UlU5? z3JPX+-z#7)^`&w5+!MUxF1%cf+^pv+0!?J{cy$-KI&V(6MpV*r%VmQg90HW6x1pz> z<};gl9-AI}mF@J4PFzi^W!dJ#f*9CnR;&_cVSoZ07+$lNs*l{wpQ<8gNs{9&VGJf? z=ATsMn(-mx*ONvnZ;~V2Pk0n%CD4G0I?A24f6FZLvEiF*Mmuk2m>+cB%rdui-rQ<7 zbl&LZYn?aK%x61q7MV|U+=MG+$VTUV9dlqV=AJ3YNlXD#%@n-LKH%Yg9un0>BE0ah zn1{?uml!SyynuBRqK&W+jsi5p%t8$Kh@-0>xHIqgJ4VL1&^bpvZb47a&|O&sXfiXd zpjh}R>$~@%Uf7AZPm@ni**6{hOHSN1f@9jqJbdNq}{800qF*PJ#l&e!8Zaaa-7H9xG&IJ}Sl3=Rrv@_hp)>e$1OcfpFc5 zn8f^m2YlVf$o!==yTjbf1CEdw1_Im6bf$&kg%lUv>}B+i6I&v^$S4eywjD+R=6N3q zwVKdRW}ZcDGJt{RJXN8x^u~OPLt|0w4kPpX#}eJYUUeW)-lgZVRgBE{q=d2%S3NJJ z99?<#V|soGOon-``cQiQ5+&(*Z`IId>G@`J8iHer-$ijT!&j)z41J&oYK3pQ&il&s z(%>(onvQv23v+h5d7qH%_@5|nx_Lj!3K*GZsou-7son=EOm{v9XIb+;+xnW`fBmTH z{dCn{V%~p!O!a;(#bw@`=-+9k{FHGRnSYb282;DR%(r*z7=ktPcgf7(OCbd~2Y3xs z&F(Vu<5b^gNr_IFSxq_mexdXo{l_lzS@ogJ{3TZ3A6E?}`u;BB42s7EdH!Orz@zTi zX90>O$xR*`O1BQa3o=-93{`}kpFW~!q+lv1wadIpkQEaswm`xX$D-=+7E^xpt&zDx z$^&H?W-8x@GXKJThZ)^z(inr*N3A)d~oGAjiXW)|MZONZzHbfe9rjAdCGP~%}FxmP8T_m@EtjYZ4wZvoP;=I-+ERW zt``H+`gTN-XW@uBhJEbam#4DuqBdTTUg6(YR1p;hz0td`sM6R9w{)fie@=%fSp6-8 zQY>RNyl`=5W4 z>B?{m?oT(L@G#0yuA4tNN>m=RfvDzT^g>T8vT|mkZQS;iA=gF~`yn!Zzkc`|+*#*M zAY#LA#nIrn3nmv^h6f2Z%TV(KYaN}cCJgWQ%$`7%NM1o|m`vO#^Gm6gSf*LM_lM>T zR=wG z^bmmIz-7LjoBZxwS`}g;SX06Qq0W!)mmBwbtx6P^Mi3}`vhAg{d@W%sdSPFzRb$;^ zi{3Bgj91&{9ul+$gHJve#t?U1StlSO=YUrI5{2Tfk~f8mxHecCneh?@y`YDPYv*<^$j!n0(@Yt05Usv;B@sHfPH-d=Dnw!`tfT%fzU)#z z3j&V|f}TS^d?F6!`ex}9Zyv+~x-nw6t&!#|d+Pj_+D$rCxtO7EpK`x7{jqS;W?>JE z{0pCOe)s1Kki?T*Gb6v_$2<#wm0EMKZK`V-^$&1p^KDmmx_VZ)W4*y_}_|;kkX3}8V)pvi} zujY9jk^R$j4O1%7--((~8E#eOR&Xg+(@ zj){tm6y{)yDb0#g&Sqq3HCDYM4iHI3vUWa;yZ#QNS-5D@85)o(HM^Y^zB3E!mqbIeE$;e~x^Y;tZuEayoq{6jAcy{kxqnPGD03f@tB9+LpVhsk-I280W$r(^ zcG}H@|M>C$@!SuW{wL;sEUo@;=DtT_m_M9*nOj9K8%LSdmIE>!QaMa73_G3;*?!IbBw+}y8s=B!ANh*&iD*3!Sld}m_CvC#0gd0ppH5fz%D^=dtm(cB%hWEE=-`E zup>Nysj&nA5;hJ3@rxNsfJ1aSt?C`dYYBp0{4W7V0?qOBnOyz?*!Jy9%6 zTY(qSy;)SPtsr2IPg}7g=Jkcvh^W#Q80|A^)O2Ai|m0RW`xb7NS$xd(un<4a8TTJ>nb+XZu4L7iaDeCEi47bbmjNLY$qiGXe=^brY&rH5B>Snj z>v<+IT)MIl;Jf;7o}#<+h;U`vUJN)HyZpxC$&rlX;~3o(#{-+COC?3c zfnMq@+FD^?ZN$0#;A%(8F3YDA2(5Z5Dk( zKc(lt>o2eKyBoBJ7NrP~Y?~bEn`q-v)kbYfjW-il^m=ln`ABAY5H-NE0f-~wJ_o${ zyJpD{YpirS_;1PFv1^L)fNENIY#Lv)Pnk4zIcpp=|%p?0=wTGd`b(=r3LH}lkpo+Z8Fw75of8P2Bm$YEjF+WHVNYtgSf7| zpPGDswTz4PM!Qoae+mTMIO!!5tEv4{YP^Ne=#j!5M%hYJ8zF=cPVHLNK>3m{WRf3e z78pF@?^`}2URc~^ye1^QR`nG%8Smy_{ib7jT6lV@w>bE?cJBcmRxEueJvdHTe$NS) zKGa!Jq=aa=S3jg`DNWS!EFXlHK5p0U{VP{-B05hIBSgd9L?d~NC_<7>P}#>KHdVOg7Wcf*o>_UWmys|5mo0-RAp6JIDW1=ea) zMJc$6AOvD=Z8uhZOnR_K zd=l2EnOJ-%!!JEjm3tX&KqSoDWvONrZq45q)49MtDb>Ah zo<`O4o{%}X{uSn+yONbXpgJw{a34`ziXrKPOS$QN%DBX&Z0a&8dSX`e z7J`5=D-JZ%!q3HB4>0`GOihhUP07ornVMr9O+P+0oqVp!6)Dj}R?VeD<|n^P&d-$< z9rH7bHws4Jc^XR5l3Os+jI35*q|DJu2tMVmk_4047MM87+>IP5S_p1{De~q{^SMvV zA5Jb=!cQvm?<%u*+E(;XdG7YnO!7Equ zv6L3qZoQ>ln5G=B(Y#tnD@2*Epe8j;=FL~w#^1u^R6nXdkPtJ<+HC{&ub>BSeq-D< zgvq36wj+>R0_<(!7|3s8AJXyYD9-D+tL4Rx-S|MROpQR|1K(TIEt3c&Cahdl?gN#a zGXaal8%QWWW+j}Fb|7jyy)#lJ0NR(6Wfm()(eU5KN1pe~(>NgAs2*bbacU%R z=~uRL)D`OK8Y`@?vNU4|drHNk?NI2%iQsTgqo>I33H@x8jjIB?r^z!+(PEZw_FU+l zj(D9nt%KOo+!sF!4w>J572~r>ydsFY!XK$cdV$<*cJ4{GlOJq?xcUPgcz)va*2e`c zUHup|n`mHjp?Xq}jBoSqhgy3Mp~hte;6MpU6LA($h_~(5+n`Id=BUi2yOL-vR6fcU za}nm1tj)?9imzR~;}qg*pVWf^!g(yU_z7J-9R!*b0;%9iiwfhVkZ9!~OvtmZv5nQL znuRpMcH~kuKr!P9-!+N=oibI08)dRYiLQ(uWTbM)na4LON-_E%-x92t(Je#_%;?ID z&wTaYLwDVRqx3KansH!!tv6a+Q@TnN*9&{lzr)*MOMcA=qeVF)9P=34ypi!K=B!(! z``aKawwvo`$>wmKL#@)szAOaO^3nLBC1#NpadL}KWFDrhNX*9kh7%>?`VMFmYXiM_ zm9PszE%`yQ6~c?Ye8ge8f3o3^=HcJ?59?3{IoY%~%2tWRMTl#Waau{|i>c-ENbV(c zs7{vA{q6i4yhd<6mN3N7Zsw$3WxhIFc#g7Fb1=!e!~^d1hJ))Bau}p=r}+p4EG$Zi zzMgoa6yVeahm^U$pWid~a@^&9fn8Zj8@HHI5v`RP=;M_kM^a5SBm+^Il?)JaHq5`T-qFWy+*G^G9Py>eqQz8SlUb%OY6v}T)LeO z&i=J52$%I{4h3)E66`cg0`>Jf1+K#rMnzwMR58l7i(cnuz$^MB_i72-yk=2Jz*#YF zKwz+JfW)_FF+AB5QHcp`l)jzcl!z?0sFR)`b5T+sB@9XE9Gk^Qzr0S=!E%V3SITS} z5N|4$&6yJOQxDC`<4=xUq}f7xcHH`E8((#3lHPhNu&C+`b92F>#F|pIps4Ew2%mxe055JK2gT-*cmLqer+5Cgl7d~733eV$# zlC~?KVqIi!B_cdPI`d<;PC0l^z@SRphdqQ=@Vr5-HpBTB1fpS2sD8ZVXC8 z2U4*0lxg?}+&|AceCmMld7fvhUFfsRo`d5OjPYr1_spONYt@yrouSgSd2cg z@rgh(5!bixSWUcSz3##5-jTUkk|_hyN>JrTi4qT5CC;TpR%C83%-b)eA0!Wuu!;hL z-w+-yDyk)O$c7H{u@BU?S6Zfo%mx{6iHIGO+NM+Ov((tt$E3de z&JE85-OS`Po|&eH2jjsKo&-OvwY8&r%+e?-pJ{F4qj7r`{Gyh90DSLU;z zC!vq0Mp8xCRnKW%2es0}3<>z{j`d39>`-)+l4;$X$BWa+v}X9r>!BX-QaKUK7FT00 zzQ$cXk49J|sIFOFxAUUqkX_gMjAnntHIy!hCJJV{Q^wWM<`o$i7HY`&??Yg&Ox8kid$31I)v9I&8_aYc3Ffp_RB&w%^fNcHcA|a(=#W`*qr2Ay z-k`ddCom(O(m6L&5>Q z%USZEHL>c9woZ}`MTzM0<3qRnS#&i*_jHvyVloT4ib~;38j-vh#zDwT9bw|?R61PP zCdSW^F?9fG)EoIlH{{gqd13{f0e-q5*e%W9UKDdy%x*6_c6lUqywBKM=5EzOE=KEF zE9_for2IQ#Hbb*veLk4S{>bDMkillrdA7j&#(q+UbbI8S(&F>Uyr5duB_gF`yT{ld zPw{0gT?co}TU>Q~?(OqljJ`(Aewe!oeia2!%1eNBE&|NB>vJ`mT2&Qqi(gfR0*jf* zkzpBQs@djkFVX{?TJefFGSt{>iE(}ILt3bv39L=4kfoDT zM~_UiR~*R^?RZsGV~a4 zll<;z>cakXX1c8UV%>DhBZDR(&b;2W(#zyj@zfnU>InYe=@_cW-yO?3{GQBB zIo#k@bImp2n5x_gn#tb_8qY&33=88I-m`Frl8~4wXvh(?(z`x!*O5Q)L9_YM>+lWN z1LXN)M&R8l0`o^d7eW2u-2*mO97tgz$L1bSZ=MiIwbl3uFDpU?IXA`A$^!EnRFXK0 zrsa&CtMYgfYrE5yLO5vSql9pwz}!Qlp&Xleh~I>g#}X_+Boh_YZ_MuEvvgxZz5rEx zihN;!xja$PvmF(gKU>Se_0maVgkIGNF@kW{N{nD~AU1d^Wpm#{ph9s4lm7fnRU^E6 zY$bF0-eA^lv&b0Us_Kw2@F}M9mv%OoVSOCa;P`yT+kDrD3c&YU&B`1u04$p6*yjXE zI~iHe@|WZ5yQ$RJ?+G105$tcSWdwz7;R1697uNJd4><&J!*JJZ=gR9IJ{p9gh1PN7 zGd4u7u?v3SY}Uy+!d^Iw{^DVVPyxM`7_d*l_h@|8Rjox@mFRjrmEWtO_cJf24VwD7 zYDz{Pt>k@a^(R`@&0Ijlv=%{^U&FOHa&3ddZOsFtpuoARwfIdrP?YTg&IsO`M{}R% zK7Ui-B0^IPMXz&syTt8=MOTfabTneFT@Bb_Si;Ww0-P3GmM9#+nC6R?FWm>V@dq(* z5L@y>Y^`>ipHc<(Nc;F9EU^UeA85Y9jTNE9ykA|WN&iKXArg`;NJ#AFpQdxFK>jfc zgStV9d26jsD&ylRIBiW1dk+=J^cW{(vrO}>PNx<%Zar`pl9<-fp3(JzJMp8r%~R7o zzD3IV9Y>9$6KnjfqhUw_)79+O`di(e(HjG$l=OING*9HN_C(qE7AfoYj@}XcNwVx1 zDchg2F01U3lgf^v|K``13nsPBJTb5O1ZJPq}lvu5PT*QjtN+ z5Os3o?zG16V6wmd@}|I0zwv-_BoxY^g@bt)6QR^d=G#bEjP~da0IsjaEGeQdfxAV= zJ`FLo&p0|Yk}?x^KFre8>ubhEBtH&U;a_OCGLnvcY`=&FxUlcF_*Lf z%H{Mb?)nSjbdl8zH2X1`*48z@!1<`j=A@CRWfRFLA6DnbXic2=j1#@z}cuH^>T98AePKqXI<&yPonNJicc zpP#+4?i^!fzqH$G#~0<^JLcXXxjrxNn^Ut-y%1P7imBZe)Fux)b;YHP1?*U-?A^GYhXU@aOcVP!##CkYyqyMvPb1Y`$0% zk4Gli3*T2hx(hL8MX0CpsoWL22SxXGqp7|<&B}V9t!NfEsN%EqrCBv)b*02vD5+Um zUvpyv2@YzOB1zm-b7Q@5VQh0N*=ikmdP`!0szozEGH*GhIN_DMakhEz zRmf|Uiju4IZ~p`y)ZK{^P z7uYVN!Dih5{_-lf`dYgj2RUqjIcE#KhY@x&Uk3AqWW}kl&T^zYpXOvw6ez89_4V?p zG#4%9N&>$Q7D-3Q9D%%L_`)9AN-_jKvu++ysiG;^9|R9*D|-`me>5?fA`=O7)f}8U zC-ga`j}Bi5o@tQMFWKigJhkh6`CG~V%~G|%oe47@uUARP+6bQWCH%gBNL2MH?&|xv z^kTPSONXicmKp0IE|fhd1GoCRm+^H@P4rQ@%|EQypK$n%PswW;TKd3+K^IH@ATpZ3 z1q|Rq*rMX4%Sn4Da{#S?gB)P=fH|b2(h8}C17#Km0deJiqhRUP&SpfOoxh9tF@zNU z2$Qt@x3L2G3~uuSWXckw&D<6d?8wIx`Ir@#xc|iZjg|^v82fZLrUF{kde!-!W+tny z41@89D++!YMMTb^_&SZR)xvasvpn(5o>xx#Cbrmb;3E3ApTP?a8A>84oR21i11+#zjAuTKPeV;yJS z*wOah;6c$q!PonEh7t*i+Qwu+>e7#hz$6LqkQS%yR)e>?1!`7Jscl{+Dz8>+IY;oJ+wn0sm@cZuKK7KF$}#R_@E z-f6;Gu4^w>%DtFdyXmEL@vl4(>2*Z5q3rdI`-IfTs6gPqa=mQVt_O@#ULoC4`@|gf zl2VT)abbsTY=^20j#Vei0qNNaF_mTpK9`}#H_Ry>^NZkNSXeR3KfIm1b_>q69Q8Sk z05K(glK?qMUP2iSmqixYRn7yyCi!T1B6_O1l=@{JyP5YeP>Hn6 zei^TaCyIWWqiteV>_&h7CCPXV@p}Zq{O;C;XX{3?aR`#N^-Q`o*eEAxG&(og!>Zir*S`x`&r!2!s3NT zb9toV)**-wuBf%u*keC|8ee4x?9A_oKy7}{5E5R-ul*hDucARqvnTi0*9>eu91!oTlcws$7 z|6I;&7Td*Ht%rFT->PwBB?}n$)(EtTN~gn{*;npPg8`(fC0~uNG%;rh)B8 zqg2tc$imq|4?mIv_J~aGHs_n?VU{oD6(-I3b-=que4JWOwx`CMR-jkL1q!gci#Yqr zq~-6XL^{~Gwo(?mrmE*i>e0y;A;mtnv}a0TCUBl4zaTLxpe-RWD$rFxVmy8$bqNht zJ-3@YIAZux6Q+t)lc-5fGlJln4_VyQC?0_cncum}Hn+wOu@|q${OG${Z|*fx`WnL1 z%*~1AU^X}24tW;q>E1j)qtTl&Hz!{2AtFPoM1QPJA;`@K#-vm^%;zd&6@)GVv5L9W zBh_;mj^Guc%3RRmlqpdXv$=ewDCNgx=17aAbVT)RWudK5cR4uj zNbIJXN~wgSmbu+I50z&`Ee}V}enVlqNr(Sd}$u;yGf4C&u3YPrrB8#e& z5qN6}jEtZeorme>9)@*2;tU#ing)$_zlHHU?vhj`?i1DI>O2aXAA%MWN4o|x{WZ5D zoBL1(YgEhE0v07}gfsLaWHTyc?|LJXmk72|_z<)x=upt2phH26f(`{O3OW?DDCkho zqCVH>s29vgXgR@(K1mY2B2g0o)v3lVHK72{f#Okia{m|Ys1WKu(vGq19NbWMnwK&i zkRnE_yD2!p{Ka!JFPjrsc`wUR(2HD4%MhNqoQ^O%n9J>vMMY zIXAHzaR_&sh1ApJU{X6KH~d?L3$0I#8gbOF&fE?vKvWU2>R~4+g;XvNo|OfWtsxt% zAfehs|D#XB`63t=DJVRRKn}o`15cBueE+Pt z(F(7K>zBnnV}yOgaE_%*)s@mcoOVO2h3vG6&(iOt`?1-pg_$>>{*yXmpR&-qY+U!* zfpKDpaISSwIOL!>lPT#X2nw+YC1hR(Dy$C~aKH5_W8*kb*v_i?_tY%)uu|*fagk8p zB_L@`2h8s<`nR>;(8k#+B-pB4~bgt#0J>e=U^_!32I0iHQTlpT=DMVU;w`3QBYkr`w zC;<;^Gbku{SdbZ^tVe*Q3*R@|tKrtJV)p%!p;yg90%v>!!5g{Fj$3HeL`L9sd zKwK>6yjW%2#F$U38p;c8B|by_Yb~_hmj|oF(9hjYo~iJ}R6YC$L7w58vgd?vI!li% z<78SVUbi-mq39lp3o6f1en(-e4#jgsF3;{DNM@Uu)k9vTgqJ1j)2bC81s*uB=!V?$ z0>g@~vj_0N@Nf|HCJ1BS{D#Qg_DJ8n3jd|qsgC1%{yV{MIVe+#`Ui;?Fmb@@6Lg^> z4fcYh-dlp-8JDM4M*l3ZbfH9P$_Yg?1AW|E0=S;9i^1bMpIQM>_2=S2xqxtRE|m*3 zGS1t%(8Kr1IAbYBEc_p7(KA@F;z2zFh<~f4B|%_&Frg+K(C zg9F7l_nH?3Mh;Km3AQ7qJ#e19NB(Rdelbi!V55B782TtJRPy6dbNmf@&R99oelASA% zl3j<#?OF1ypZw`#b*?9U2w*bub$5UcK8q+~h`Ieta0yt$ua!3OZyPE@=XsC@JM217&5+WLB2lhkE)4Jfk4mxa*g^D`X*s&nCN|@y(p#yug5D!ouQV6N88K z$f$|Rwz~Mj;HP^2H*&4FoX{O`&0Edh&&Z*K6B+qqQFMu0C-lICL&DV_1oOWpNL0xY zXh}!xO|J@s;p~b{)!d)n|Cfxa!o(t{+8E3-CsCVqJVx8tjBCcSmwZOtEdKyt!)#V? z+$LW5+)Y|lJr{oC;-`=e`rQ{l%@6T{W~BuChk^%6eNc@Hnvvd2)Yv|091?D(5` zh*IDR8JFLDayf&5p*-7c{A02M7N5P%e48i6hUjfpVAh_sGHxc4qg>LFqb z;@5sKXMA|nZ9d{3^HLuOnVvd9AF@5}`m{n#^Eqai3V3H}qs=F{Grkj{(8F@o7|WMR zt5un8=2Cu<6!$Z);)2CW5QTH(HD`@%qgg*Dw^7mxY&VnAitqkYR^ z+5T#K4HE1vjwS3)U1*(YBKMQ1l`1}&u|@nqibvp;h}V~-Z!ON_$W10Mt$Q;$S%IYq zW6dl4#y27iOyn#r9yT|4Sj{hoq}_ao4~Ux_P90}1|AUM%1FCIf0NlnwbB)~`vPh;E z7C+Pk9ICXTudxcVDa(-^y&|3v%V4pQ9B76N)con=B9w{#?O3O5rVDNnythVY$s&{6 z&7RT+@R&k>*XxmIZ5%9;9Smx25r%D3B_6Pq>qq?e_0Uve*hHrGs`z{Ng3z}E>avUnx}an$tE2Z4@ujG;40869eD6(jeUmG_X{CoVT;|ZOD-WW zTrvB}S3G84ASHIL`Tw!^Ch$#F+yCfklR}^^Ck%yxU;|~SmO{0t#WEz2Knkr^87xkqxC*YP?4A|x%)mazq8R4L$aN>nOpTSUqGu6_1N z(+F38@AuyOzt8`@w8`4@u=d_-@4fcgYtum*8uS#AwlLR{2yZVbEE*9UZlg0yq?ou6 z?Ae0-rXY_&ic*bt4epIXFy}%p4a+K$#ZpA(*hN*a=V~#5xmJ*T)m8eh!I}f?`~oq& z`%?zIJ|Dsfn8rxi!HY3=X2Fm!!;*&5s4736ap4@;&D>X2V_wZg5)wAn= zs^=Taxf~frsd~-@)-D%CQqcAexT&caP)R4Tj^Bn2617G7r81yYbRESnE_bI&qkg4# zm~NGQ*ixWDV8g_?1-_23M=%W;J!d|j>_%aZG?oQ^dr06nC9B9^t{Vd zl(*mpqolVAnH*4#8 zoKu9dE59-+$Vcv)!`7TCtYT7;H#<6vAlZnWyGcfp3{ZPn3)z_q-p;=2roI^JO6@zN z_E=C8o!#72c2lG0P5>Y}4w-N5tMoa5*qB`E{eV2&)kPw5~lz#t3kAWRgmh2()k zNDa=iwm6?{ylg91`pK|>esawf`U!it1r4ZlvNS|y{p6{?`TNOzFm~uCG*(f5quNE7 zULeQC%MffCr-YSf`lEYqLk(_BiLAwdE4|*DQ-|KPEt^J-q7O$V@kM@_nyz*q!^Nuqo&jTLdH9pP4<*uv=lL*8~||5-VtDGmR)G zyBwVwZB2=cp2l7%{uHTOrKtwEbW>ca6_+@;Xcd=t6c_BfBY(M*#d=Y3Nrp?a;^IxPD%QZ^c>lK$QxO7)sR*?%R2HuRrz*h89K+F&_&d04O2bH6*c`OVaDxIQ3 zC0bAONd)B+D`+>$i9ZgAm^i}6L~)UX^r`$Z#uD^4Cd2qFj4pc^7BjA?#qy)fL#jte z+}>WH*b>q1t{Io)BC<-75mxyu4#Z%#lKH4&&LHz%GP@+O76c?dR!LyJVuKD^@~bD? z5qTg_Je1jJCk?-~uu)c8vP#khSmh;3@|(!KKrz2f<}AfbM;7vQX7+C?fQftPeR2qh zcCp3b76%MnZ8&`m%})uj^-NW-ZD59i!Yfax7fU%$HM+WSM!+drmesWYXm!rX!bS1< zxJFfsJ;4W)FxrwJxMKljI%mO*EH$J{jaFB>PM$O$v)RYgo$%NShMq+)Es1D-fx41K zI*4O1>EJboz;~wGIk|%|S)9@Ns^qq2EZpgBaWF`IO@w-SFcupt*Ch0tW>SqE3w;(Z zzyptu2;dwnzQO7;iBO}xxT|$DV$%oLIvbni4Ra9)_l&XS#_0oX`>?O?DHnoh z@+D5!8sYQL5zbK!Rm&%Bso1e$O#c07cJa>2!s@jnrWOb%Cmyf0_z^~l8&gUyzZV;ICgN{o-kmO5x zoHc2`d=NstRtXrEfME$3cIElSbs<^P?8Chzsl1Wuj@&hjH&@==(JFVeG6swq*kwvK zZSF(?Tu~oQM$?VXXr`i|=7%V`P^)8!%+6pkuzgjX+=f(a!R^>RI}PZqO-u1}#l17i zMl!rKFp(p)sYZUEj!aTK*vUwaMqaRNN3vj_jsC0@9B+R*Am-X#{`+59FQ*jbXK{## z5K3MAA(jP%sK8PdAt+HfH^4oUnH_4iCsSq<5KxCgTg#h>63C;J@On7yD1(DF{-A77 z+@dKMtlbqWB_oF^W~#gVG6i@2I}YZL6+}J+A-AwX8V92%H-k?Ay`~_EJ*ZBR@}4KD zl-=Yif{vBIEjXvQ1GmsVMj^uFP0VLEj&o}aR>yZC_IJ?tf)`Ch%bHdoq_QRkOEGNl zV<-riwt6}>7r^r_2Wu5_yw#i0{a0dbg)DAR=rZ)PU_ZvlWvwk*Vgc7;fJMru7@GIW zv8)h`pi?~PtO(mlQILK&tl|8r*Q%+b9N@zuES^zA_9h51G@CwQ#^D#os+u zyV^fhyV_EUJ+O!NweTJq!FwnLfzC|oqLEff>7v|?_fZIBvOdamE@KM7n|A&=L!R?5 zG=gh05g>)bFNJ+S6+DevIv|a~V6_it7XsugiSf^)vSy(G80~S?=+ZI8^BH*3z=nEl z#scRvjoj))B;duLA3ey4F*J!VYO+d@&pts)J!_!t(m9%vO`xM8q~s-P^Dits9S=d( z=;uKGhZAVn9J67L+bchbVBQ*KBkG^lt~mZu!R{gLrldXIhNELV9KaRTm5Oo<7&hvo zd0rFgOYbVb4%Ic_RN)#2)R4aQISH@f1g3$xHq#jHjd#FHyIbQ=VcU@mMjGi8g{nO! zy6>M~*v5@dx2#`44`V43u5^QY(+dn<=xSEl!ZAb(E8YY~|0F!IHGRPO2Bf!aAbi-o zu=|_6pjGOn*|T^4ecdTv)Knyi*H(xJgNfh^`$-bPPr-z5yAe)pR>wna0u4VFlO6+3 znKjP-%HN)$CqYFjjsuKcyfAIWxzZu7IGQaKP<+1S3~W1~5!=f>*!f8l87nU+$!#S( zNvt^VI0ulSY7C@45IF<~D*=5EiOD0-i;}=5QZ$em4);GLeuoCePN5iSVK>5Fk_}wD zurqf9_Q?lVvknvwE-_Pz<+_y?`Pl ziKn3nNaGoQ8aVRbjsP9|G~RxpVa{&<;UO12-r?5K5enYojs`U1J#L-+IOq*)T~_a# zC`D)F%?h|h0m%St-HbNJ`3zm54Omz}r~uEJm{V z6icz7pc52%->bwNq*k>L0C@{w$(QFjJe+;#EX9z2^60kZbZ&(+h5>!%l(ET@} zZpb56KK2T>-&>254kz9o^SmTw=xMftVui5#i%<<;y*C=ZrAISTS9x|bFRK)l0WaSn zs@;36c5kLTi*%IS%d!*6f&0J2Y1Mn5q1<(@+cffzmDnBQTuYGaE?p+7%;|Cv&kN^k z84&Zw!#d53ZZ_G&%Wl0%e8Js>Q#nD-!=)6oZem%=O^_8{*>#x>^dp28vuixw`*7)K zjlS%cL|aS6A*`EIKDW(SGtoE5G&GWm_%DE2w0foa)N#P zjdfmd>Jh>obCqzPBb@DN?-5ls{2!bTV4x<_1yv&cb@m&T*KTk=Qp`A?X7<2^vd*R# zaq;{Oy@Df$V$6ea_!>$|b4Fc~dpG(bz3y{6)J<8})7x&H*_&?OcS%w7rc~&kv%2m9 zKeu6br$lcQNt)oi&{qXT{E5l~Yra|}u<~m><+UUp1(O-FiPkEnG|dlKZ6K;0%vn}Sl1nf< zN3grP!NPO5Ld(#Qiy4^Hr(FbiywQ;PDLQXcI`(o z$V$;O5P!@Z|D)OY-ry`m!iwoOulSJxzRrI!7qj#;Wkx%&1BcH^dk;67p*8IAS#x-; znw-NIx+vN;`8R~X)60)&56kB8W2&dcl?B-W5X&^_b2^J!Eg&24)RcB%QUgdo4Y@^A zpEKSK;s&GIQvegHiY- zb{=e@)g!1dUe*GcJKUHS?na4m&q`C$2hpjZl6jRrHr<%p%q;*Wp1kcB!ZQ-7;_AiJkVQw>|hVKP+ zU(_uXtr%aGuGx)Pi_V(y93Y___p8K;Dj+;f`qV6a4nmMi54O#Rml@9xQ4om#YQ-2M z(AFv4wMa{Oq-pALH7^#u2_^NMGf;i-d=tG{m4dU{iHFcVOulNT`7y0IuxQoou)xw znxMs9GlMzhXvtC3%;K7XQHG_O$+th%bJjqOsySwId>cY9<|c~Hk)Td#c)XCB8Hv)( zv6SAW5t7H}aV}V;P3czhliArD1KxIxia(R>HbjC+W4?gl?a(cwdM39^Fvi_yY=+84Y%J5H_cP!Q-Oki? zGU#G0qyp2N_SMjWxZVs}nk#p?0W=5{-n4^e8b@Qym>4{LiP;_cHCS+YE<%`B(*vKV zb;b@FGEz-C7GOH;J6yL%idJ0a$87_XQwyo4^fLL*L@G=5R{smZTc|tH!@AbkD*Hfd z<#AhNtECL-Udc@A>M6zRf4N~CiLk40zG8-V`f)0Kp@`PSHD zzzHp(Gm;L02I=THVS$`HlKQ%N@B#{C#4c$V9H0OKa_+v}CibK<(*ptaJ1Odvs(|Gz zbiPsB(Fc%sZE7qwr}P}L`ive_(_C{BxvBXRj_a+O6L@$08n-3ZV|EShWYILr^j4$% z70YdSt#i&sS2>!y45~=z;*!m(JM`3yrAMS?| zB)MxUtsw1!pckB#MC6_C8}VCtQmlwTdZ_XQ$y~W{!srU-D^|83tVL5}#yy0um~^Pc z_59!i;JScvxhD8bjnTZC&#Zhixp-d;6L?|!SMfmAL&M}fqg{$xLR#>U^%`m<+4e9a<>UrmC@GJ(BdlE zjlpmG&A>M`gDL(etII__Qo5XU$a5Z#R`vfFVL$?_<$FAkg8MoKRK&REz~M$`#Q+Lu zcHJ9-`{k*gMri1InYu9mH6&C;h!xmu3LnuT4ariwrlnMij|^q@rzp0(W&dSg(?~d& zT$At+Y_(WHynH;$j5|WC$4!#bo>F?8nL6ME)&WD4>O4)?q9>DVgAiVe_Yo&J5zayh zj6ed^$hwyawA9F3(rcEiHe$oOZQ?P+j?DP+POakc?GG;^zHWYSW!>83k`W z4S{4=)Jux`qmOt5h83lGxTuaKq#dk}LtYJH+!@aL2V)J5N5i1g4VB$x-gQrQff}S8f%DN8P z7z>#_*!ZVOo(WFHkyaEI%|T1bKKs$8S5Y%d$~WR8(~*$$0`)gMkxo4mx{u9HCgDyB zOpKd5^hoz&=qplsV>}A#?T2$;@Yag91M7=`YL4}hn65b1iBL~K8=1iHdWhtT7GXH1 zNr(SCr6ppR>|i9Xv~-}v#PiM03P$~SHWm$tB5P)E6cBYIu68Q6t~`KgO${DZtBeQb zfIs)edBtknytfcLVglx86pfY3?AnMeLK)DQ=F(axIbjFp7y@xW@f6z!^gwvW@(rb; z<14ItB+CXYZZLy{EZ@-73J0JWm?FHIz>QM6gjJjfz1gfmP2z;(v_i@hpLAoM9>v3^ ziWBQAFo{ox*q>VTg zBX%teY{Wi72HZXV?~Pa*1K0C0AUf!ou&a%OrWkngUf#M$`_5_sWRe{U>5~?^Z_aGLYAw zB6i>G?m+Bj1QNTJrk|)^60swG6C0zgaAD*_A$Do5jX{(SE`Bn1!2Wp?F8yL8wu+A& zgF%|J(NE)kFgw#X5P`du6S(w^X%N)*Q@3=KOr>s5Y(f86zu4bD?pQ?lwQrySIh~WX zER@f7cK~Vo!?IC>4kZFo8s#S*NYtXiX41X^x=)f?h>F38l_ZuM3jNvzR1np2Ve>4YhO%3xGmun_;xWJV<}oYYi<%3z|` zfVJtTGAujVGx(`YruSJ8nW+j*I2TA{c3+9eD0Bt%EP%%FM4JqZ#JmrN2OgJTMC(~b zW75~MJrPKZg-8r#LJDcv>dk(Y&cld{erJ2C!eWvt%2dkoGln)B9yh~}4UYsev{RNp)ddGB zbJ!*#DC!*6-VK{hv?;}HYJkDP)~wjDvMJ1whc4(!--sDg;VtH&FVdAb>Y}dn4f1tf zq`3@_UbJ-;E7ziPm^8lKognpaPY5G~I0^R8xD{;O(qYn7oc#0dKKpSRGPBinK%XG` z7sA#D-Z4RiWUBy_ho$c?D#=55yJJ-qW{St12QB17?+IDm3Bn%holCU9d{(7ziGC~w z_sYdxz;4eK?4g<(9KA@Dw2#L+5XW&=aYCC_a~bcf)2mYz7pR8YyWrO>rEg%JOa6?%m()?hzD6}*Mfgl_)Ez=FaX8&s*hAk2G_Pax*s zYNA%dt!=kC{;-MZjb$reQ){E=oMz1)_vuiVwtJ}Ce7)P`vOfn6%x zIKLqmy?nm>{8zzr!z>HD8lUPbB-w9fCf2Yv8 zwKz}?SQV{>8fm6t{}-_PpmNadok>RtQSw(0Kr9yQV-kyf0Wrbe8bjyDJ#aW&f_J~c z$UGwFVsqk!UauM~V>kU9r9gmPbb1^izfAFI7vbH$nUdks4gON@BEY4O>tP|z z=Nn6$tVO9zU7YEiMsGBEH~Q?iltc7EFDxF(j)_d~Krg`YHC3K-od%DOD4V!i3#lmF zE&a~awZXKy2L!sIJ*BB(7%_6_a%a;47}Yz`L|}^P&>C&RuE-kXYIs9KhIVm3G+ZLC zvIzp^2WHG<)aj&6lke<_GgG?ROpGSnvs{3CegS8qiB{>>B)9sq+b>|-0u^G#Hw=5n z**5oWn2A?e2AJI0t}`(tdHW|%4j~I@2-tvYCWn#=51bYpgP4~F=L&QvXn+7Ol{QQ6 zM+KGYNt=i5Av(-FWLp55nNFi&N?RG>Rwz;)*ejIgiV2IE%0qWPcf-MG? zHK|{Jl?Vc<4t=HBF}0VTUY|ymbnP%v=_r)->J6BS+CuQO3=PR-s=HAUc844RDjrnt zd??Q2!WwGD|MGChiqr9W$BKt46?Z^Hfz{m5Uh^)9dL*0La zR2HlI=JvY3f%O-w`^GEOoyG;#{m~bx?x9z$yCQob-2t6U=QJ%#G8ogG|JWN}uW30EOm>4Od^S5MhG)Ud5DFd5Mw{0N9J|ud4QNt7$aKBM& z>u3Z;Ip}6OdY&rh+AbR-{lK2Iz)OQCYJ6$g=O1 z??N*}qkxXg-+%B2)P_B4k5i$5%8)P& z$)ZQfhwh_=D6LVv6Ci^=A|=v%3uMrTLIxcJGC2uHIRP^07^@hw;s=oAcQhKkJ>*{qa?J zbR-9}SWCJFZv(As%^Dmk$T8r)q32z4!@O%ByWw{N>zY)GWb~{CIgO_#L@APtyBlheS^>TmLFF-Fhp0#kxUW{FOlLku) zd$?6fcXJ&wWy_ldn<&ls7JDhT{J93Y|1u3_%ONZXM;txCv6eYvr^xijVm(bKrci50 z$_%eswwH!qt!#Jvu53jhQ-54GZ0mksw&DMwY)yTTE*SCZdyjGQQ0xdW;A7bI!-Un@ zs#IQ~xZ|Z0OgP>!MqL$}10Ez6+E`KAHIVp+mg=tQt~)~kd*U2D)MeTM-6>{QDpaTJ zFpHDxU~^3|U|SwaNhFgIUDDXa5+7C}*LP>HO>8i0RZ(Xj5fD#4|ox3Al`5}k{vK+(@yV2cnT(WL9 zNtgdz01Fbn2q3m@6%$8j-kZ4&;OIl2m zWx2JR1J>meKrls0F^JGHNJh2M@@A?_oEYUr8`R@MC#W_elh7}Iux?5?n}=c z;F#zG^g-yCPshFqi)Mx1-HvzTkOB15&aTI^Lq4?a4#C)2Fjv;t2a+f&Rw@~C+%!c( z;1aw++A?Bm?Bm3(-*=M^#2(y1-4mBijDlbr?<%KF(N^CMgs-w)OV-O|8c3#JVY0`V zc6SQ#_6=|LR&5Ev7D?J2>w)Nl&a`@*S7TazZR>79=q!j8c4XCJ5sYHx{V+)uE!|3V z$9lE{l#RuY(WXz?Emo|8F~uWRLM&d8(slHnNHqQQq@NqDNTQY=MJD(U>TyP zWuRhohm(qCjwd!KrKZ6x4wl&U2(LMoa%tr;%4s$ofGnT|h7GbG4rSvvm##|@m|ROrB+kE z2g%@C`-sgbuG>4T705&nc*^fT1)o6*)oK7u-MlKwan^bSpGDSrsL3uQ)e-EME6 z?bH@tJ(GL!cG-?D2MsYvEsmUCg0nQDW}?0fF2)MqQcYR7e!Bo$%??qH2 zQ-0}o8in>1qj8_rhHzzDh!j^|wDpGGRxAogUTAlPdr4;`M;QQrMjKwixxZIGdLT#d zm>QuKw|)~*s_8OS4?|>W72_zG{!i_-!9f;;V_XBCzpRK-JEal9c*VGFvk|7CQB53W_|{gYyK9}lZ&F@6dO8U zz?@tR0aLSNDS`|wo*clk$tQn@y&trKK?sV}FpstMkMQu>olUyA|)ETA->}?wsoR-q2ca4!3{kXbh<#X?W%_z0e)Uq?gb}P@AB`I$GF~&LP zb>vJ12*@CPF6DsrG161hNwVfZarfBkK#WPC!%v4@XlEYk#2SAT6*jv&ef81*cDy z&mlGWy4i%lNYuJXg5A8v{!ef{?43+y2aMh>M5nbd>Lf8$`c(ec4$6ayyolxDfmaCU zI+<77ci{U^+~uyJD&S^AEgrZ=?^Ettw^t0h*lR}vbGWvFTP5}AByo&|-zc$fm} zd@%=uqY9?O(Nw8UE{nnV6P{LJ+XN!u8{& z@1aVE(sGz;xKgb5u!;l&1n+*)g&)XvL8?`(7)u`Qca7QPOV^OCxQ1N27;guG7k;`K zZ7L6l1qR|z(-q_lEV#vrFhu3okfTX0)R&o*A#o3}0PA=cS*$pQrZSWI2(v^*W-Aib z4k)~Q;NKLJ#17s;Fh$ly!h@UBp_LGm5Fon3etw|Ssq{GRuU$P&bRwDrB+Gn-O>=4)rK_m^rrlpt48@iXST$gW;m&eO zt$J_!YwD!H(M@o_;+^ep2NJ>>tx&lJgkkNsA2Dh=vwPc7@&|V**kU*G{Mf3&{bhvw zFNC}*8ZZ-3k%3HuoXCm`eBXmggVn)tR>N=x)PnT(_Tx=%c^C5(T}bce9f8#)I8(ac!f z(`|511&+}qi~$K_K*AVs<>}dm4tLXn-dUhQT8wQmdkMAyVo9`&%8;vZ0*1Ytn-D|m zDiq&{_h#Wl>q;z>;ed4kvEb-92{x=~E7&U+)Ua_dZ&2I~^4%ao*n7_BDta?Gdxb8~ z6+@q$9%KjVQqQvU67T`!AEBT)sSe!KbslW>9E`sScgNU*YtT9DH!ItB=M4X1asp&P zg7^ce<)HsKItKR;jDkDPw=)Wm#1-$LXE0VdaUzMOK~E~Zi;%wDn)m^Z;cWR)jPwuXRS%9rtjGDXTy`I84rHuEx&YGp_T_SFqN--KgTI-fQs#H7S_ z9F0tOuCYeC)1QPKjuQG6mtf~j#3b_XgFq{RGiVArc@wcz+V}q;!0c`-(m^-eBj+F~ zZwxU5I?LCPxx3YIsc}(){5=v@FAN7zYWdAcwEnc6qQiiGyOYJ@5acgr=2zJN1!6E?Bezoz}=3i|?P$645-AR+XfEVx7$cvD#%8?jx zClFfgy#`WmCaI1FOc(hK@Q5?RF!?wPjCD?TUX}A2BrB5uB9e*G0Zha(Ht@jDRP^dx z26fn(@Rp`(<<(%~oS{<|UF6qAs)3p)D<_uk>>h!R z_(wYH76hfwN>UXR>rpRJ7$@Rpo{8>N<3X4{s%ta)uV)domG5bK1AJk>^LRLy##3|5 zBT+3}6hfz`PZ^DJykCi13_9;5z_3qW#8P_(v3Ld6 zAB#p=ByN4!kVp|0mX_+B~Ja1;@3asi+6pOFOJ+Lt{TZ)Pewh0Fc-wx zL&Oaff8kbI_fve4YY|Y6E}B`L&pzsXB}9li#v;rPYh*C99z{ek?m_nY zA)m2q^$9-#!o3*@|9}MRU~Yh!zGv_~2e%;`lwv1rR*I_n9pd2-Mjtgs$>S>&9|hFk z)E{=Fg~mTQK#4!Q8fi7cts38bJbhGb)&YFKfFD)LgD?EsD}@#ss8l1WFN6AXNHS{q z3rHT_33V#{bX30hOTu&TMX&j}>4)E-k#Vj4T9hKP<$0R^!bYOta=#wv^40+SM7Alu zQRMq8h?4jEeE)qpYf4xhyfKJU!rmss0c&{JQ5dKzx%1Zslu@Mnfz~AVuG6LUqjsLY zxB{k213p67UB7(&;>d)>fMSISy?Ix|`S0L@zI_&B1!1CIr!bdh4@pAJ z;B(VQfMNwbPzK+~7g!1>2fR%1bm@;FyH0;nM|sumI(~A(SvD358u_~ zjry6w|KN@M4iKF<|4mqboV{nKrxd*#4mSa7yMCPI*>y=jU^Jz2NuLk`xbw^puMUXj zhJ?2e8|Z9gt`{iDOS52U*iyUm%;_!rkU(UA=BiJa4!HA7-Rtdr_{__^5AX73ehRNG zVFng9vuf8xe><;5#!+$1eRfzPL2qXLu1ojqf%}j|UN9?zvwqn1V`d%lY}k3G>D3_` zR>-DT6Z(;_cUHr$le5kM9%AOhv(D{0nfcYOle51D8VB@dDa@{hTO-0nE_x1ePR<&# zg5cR%Va$GZ)_{A+ekpS&zCcZ8bwEi8tB;encXrex1RF{ZjoQ_8GU^;2G23-9u4-4q zFA3eK2EU*)8nGE?{lh*2HHpke81%;%;VrMhq!5sX64oiZv9myzIVaKu-J0PZKi{x}QM5X)@HCwi?SYLmd*|#34Ry z8Ff2ozkt&rPrfO(%H*2j&SxC$%|Zkc?2C7Nj3j)7OLKxZ9LF715Q#I3W^BVfrmaF2 zqJAWxmm{ui*jT}U;`JRX7y%*?b_?Y$TK0ImLEP2bg|Yl_sdkSE>Yyc$=UnIzN;mds zIMxE5*cv#{Nu55t9wy2lw%SyFx(ow@Tr{{4#(wL=A&Wgkz#Qs|cuK%0+~aBN9bSb> zvTD&Z3tkEe3SEnhdfaIXHQ_lh$kBwm@n|;L$wTKkt$_;}wWbC+yw-6hL{sNEqcw+n z%;C5y>h?57Lmo=O9h2#T?sm4{kUUa@NA3-@pnjfn(O3Z?!=#q*dWKDgJK%-tRttq@ zG4xz*52)Vhgkz%v-rlAyLK)E6&9nuGqG#c@P@_my?7ahEmEjXI(0JvyAFxQMbldSb z2BF*A3lPz~5r76jEz;aAZastJ?Jcy6_iB&_>>(xO^93Zj_QDog0_V&5;Rj*XFrOpv z*#nyv$KuGH#oG-zFF1u!;4f&I8l4{wWe%u&K9E9fW7zEWp-HLtG%%0S{;M1Q}tkNJrFDAS|5MQ`g+t6 zYrA$m0*-I74-4CUktjMY>9U>DdxJM17+$xYi}&YoO9Z++ZpUgH;*(fF;Z@Oe(E@S5 zLndgHw~7|9Pk)0;8?Jz+U(=En`$IZZNkmv{AUFvQPdfT-zGd`ZI@#w#<2gM}VyUQG z(gzBAAqwj31`C#Z)wo*R6pN6iHJ-tZV@T}w?NINvWNRMNNHz^#yMV22OtV;d9)&Z# zt;LN4_ZF;)h4@YmUW#YKm>}UT0Y|L2HtFnAZHI6RxdX9uq1)vJJtEw|*`@ z{BMeT>oL(msv*P=ys0VY&3L;=oOsZCtx`PX;vQ2bRyq*?EpmFir?HdN5MB=n*VOo( z^ibi^w3Pj&{V99X^~du~xQs1+P))Ft$=Ra`=i0tN8BH@DR}s1krDG@p*i?oxBn&v1 z1_rUMaOYS^nO+fH(vKm<_=6CV8#hrXnV^M5m4n0(0SqJDErk#ips#K1F!GxE! zYNZPxK-jfHuLkx~_aX&f)E&cs*<)B>-{*_^3T8)JC;MFZrUs<{Ioz$-btGcf5m}5= zYnbEs`CL%RtFBXm8n75X2-Xv7;R8y|#Bli37j;uU2yQZZO|TV#4E&w}?6Er0Uu^jptd{=FE)5ay_eXb|t+ zFQE~HS31kUF@|z5Oj}(@XyQ_a^aFcxb>tVsJ_3g)O=@IflGnn)DtWE(a;oE3u#hgJ zky*1=`~(we@cT}DjM%=eyFwVfl)r_%fx@k`M?jtL?n#Lu&K1MG1zU+o6I<@Cn}}uI zwwb6aJwloyli(%OO<@)xY?8D@IVkM|?JDt5&;H%YPw>_k#TbWCx||oHQyR7jLg5A4?!inlv2yQWD&d+K>(2u%pzz|BOvStZN?V+HhZbdg# zDiyrZI8>8xouh~!SfOg73z0F1(**khWr#k0I0B4>*z8 z2Y?~gQa#=_Nji+>04)Z<-*D9GTBHAd>fq5VRCKn)@JF9blg<_WsF9rk>gPe zy7h|((mYKXljF4Bv|XSz0cEH&RqjUS4LB~U!+~gHSKOS$dIA)c6~|QRfhX1;=&KYY z#8g?05^V=rKmc*;xt^7~DZma|OF%<|ZBX!{t;Q~v@1b>#x3ekyh&MF+h^hPxtxrsj zb4J?$Q~5dfN-XapOpafS_8v$v+8cogcz74f&#mYBTBMyO9Nu9q8T*+zyc+kd#fnF1 zf&=PK>#^p-+7D^AxOJ=+z*1LOv?wHbG{79nJg{zoIfUGghkpTVHue-J9>=O|2o=s2 zi`O!*G2urKGG=6F=JuYZ@KfHd;#O}rDvCFfd`#tyR5laSV;Mr_Z-YJ5CMpIE-mKi} z6}{n(y+&`R^1W11idHsu+a-jv-P}@bwB3LK`id49xp8;c%j8Vs#c_5AoF9iXLoRP3 zhF5P_#j^yS(q6P7D$d*P3syJQbZ4pQSfhzs&xVT=S(Apq9KqaKTe0BPf3ls*&r<0f zUZd@raxZJ7aL1)9wh>KCvx7uM6}{5ZPSP-$fh}OgU?e6;$67nz|l$cXQX0=q#&4{Y%FlO3-z7~ z=?A>vI2$8fZ1L!9y>MG*smO$`<@=GK<7}(xSn0skz8KguI+m{twRe{+ zI~s$x}KAIHyROaW7l@vfShF3nr6H7wEkGL5&Szs1TN2R#p=^8&E~%F zcK-g$J~8gS!Sz4_t|<{NAa#5Xt|;wOd6LjA7mlR~wwf#|+ZTWw`af*lrY9i>EX%?! zVDVvrayC#-@g=7g#~OMenj**60$*Aj;zU5EXn0bc7*cQboH1CjKBdDjCbf#q@VkgV zxYL+U7SMhq_g}CAYye<&DJiW#9N*4IlgeTo zL?ar&+cMFMHP_<`WQe`M>@*$|?*dD#dq%xQss%LG))?Ib5x>T`8H8ByCSY}I1kIzj zg&AU#O>A<48GVVCzJn+?aStV%w)yb6fZGYahHTIB+(crOn9C35Uoet&O5X? zVADJ45f}myAnSTk_iEAacpUx?t2TtFK6k?L4LOAp@#juYC{9{9X}J?b&nT1*PCD)c znnX@|9Jb-B0bi`Mr%~HlpJc)=QGWxjclbTJcp5d_L)XEhE3U9|4{ZmJaE+V0hr%7H zQ(7K5f;)!5Q9gwMhgA$NYOt&vsS)~?Dy`q5YpK$a#iDmuBeX45dcP&KrOM#9gy5YT znw)VqfEV7B*?B0S5wj6iU>-ZhF_O?}=s*$%!)3)D>g}W0A(T!2A-GAz{C_1-{xvi{ z37)UR$_+7)Y9yyFEFm04COfQ#5XW+(K#I&P`mLv_!(^ZlEs69LDcTH2Fm(Mid-m~;^90+CMY45Lmlc7ai=7`wuV6&D2vhcQGkMo1x! zMvaZVLKQ>fw;K)LIBR)0p%({a2p)-__-@{wWbQ`hNMNlz*7qV-^n{Skk}1R4i7;u( zpx1OU72u$dJ!`^Wy`a08I0J=jU?2JkgMc8UqAHTTo-H(XxKEJPnLaN_v+l5D~0 zt8~n9cDy`;Oh|^W6KP(+hyoqvhg)>GC-NI=OQW{FSnee)f!yRWy*;{f(d4j=D-PqbdJ=X&RroWZ? z>p>akkJu>K29fS!lFBw}fNB=8v{U+dHuXF;b9^@ci7-6-MV&Kn0%DrQ2d%w??A?@+$n0 zM4-}I)b{C;34H?h=Ne?%eko^b95=>k7Acyjq2p{@ngkJZ0gu==*sw1!>e6#Ew5`X| z@Gy8zZCY$YYEJbOsTT&?L_N?%GBqvc5RGUH$1S4}4V2%fX=>AwkWJm)O-pkx`}Q@R zMQ$kXmG;Z*L4;Ik77BI&Ph^7)K?mN_0q7G=b4}FBxinlY+QIoP$5?qS*SJls$IKeH zxpJbG-Z+Cce{>FZ1pd5(@J|@UE-I+^E4wIax(@RZ9#ri$m^AR|(&>iN^uf_-@%8EA zgu{7hQvV^e{z^|dT-rMyI}4#X-SwQ%4DKQBO2gHT8&8Of{B*#v9u3lz)7LMfsoYj% z5&_5MOC^_mEfcl*_Q>*-oYIBhmSKRC@_T>l%l((4?6@5w_hg18aq`m%EHNr_hfhHH z;~P>PZMu>{EuPM{UioX+gxsp_ZXZrphJCegvwsinPAXRQ_82;Q6%~JaFADzlA8QA;C_q5hH{)~ z#xBeRo+#EOro%X)vxVl1N8KRDcHCaB(!*JGL(RSux8XW|RkdU6+n~*%A|8$+&S6zL zj_#3SDw30GJ*RcyIBzY4vQ8|8ifge=^e1mp(ua3(gMeeSs(kJOSzr9u+7rZ&_lS4l zB(kU~RkNq^P?~6|OGP;-Tvs(PypPsHq~NgRX+0L}@zuuGBU*cSuzDdIFQinZV^3}> z!;913lCIenw`l(crA67?Nb1%c!B`rZmQ$CmspVGN&Qwx*e5OM`P%Z8K!Pls|1;POT zArFG7((Y%R0jglIk{BDN%LYj&KN(HhpFuIODuyrfNvbPP#*$MJrXlxf6fKTCo$1N= zCYCMF6l@DH(_-L(PnQlqOI&#}>*+|u66aVFt+h{Ou1gGtu)p>tW$6gA#4wO7w8A3G zAXw~!I+8Ik>Fr*m%@&bql50K8X%f?Y z0iBMty`&e-ulp-hg3M>!-K=7v#=_RnDnYff0{M~aOwR-d5>Gh)l*)WLL{9teQ=Vy{33M&Y)YLLgVJ2QZ>c>m9gWgONv9&TLpo?1zgNN4 zy>1Kgs-zZEiPaTT!jR(mBpn`JJiyj02*i^f9Cb1KKH|b4}|IMQIiExiKA_ zk@b8P#U&k*VsA$)dCcnQ0mcijhVg11!#a9s{V}?A@PHgL8wYLRBwe`@Uv%&(TV9M; zcoZe^JK^)Y&gaM1%qP?ojldUM=+AR3H{+HDC{r1VlbX{VkGXZi-PiFy6YmBa3A)V& zp1HQ(yat!PO=XRj{ehzT1qV80UX4H?d6XC?fUnjn#W5-qfI3J+lqtX#QJy>iQ-O^% zv~Y1W|77u}!}=$nc9J25beLrrI;;1gNdH@XIz5ABJU+cS9J5Zbw>_=ERrKf^t2k`r@Fx!Se^t`Ios(a2$tIILzcQpTlAf z%Q#%e;cFajmn6qZ3|)cvLL92&%9O%ui90(c7Wn;<^ByEVw37G%!|vQH1PX9n44bGwSKtRQ=C zQ26;l_60%qVs2N*S6PsKEw`)vb3M1K{bdukL!O;|+(G{J+zugL_K~^0Key{XSNac@ zOzacK?W(^u$bWv2|7vcJW=4LwI;A zw-4s_;voO^+#b*UYq?!bU*>iL_t!Tn`3>UsWNwe+_W9g?J-4stb~=iq&t`5{>2WQ$ zLu{LU8o6DipTYF9IVimDv{D{5{Wxw{`{45xeNkSHt*AtGLAnc6%iOtn+4BmNl#%Y@f_xjjJi?s{P)AoXt~C5%BD2ri5R91a+F zogl0Lya(T9_zp#+WiVV1|9E^8@EwWo5PZiW(ikK#6y|04LREv1wJ6_~J#RtYFvO)# z)}nbqcEac|AdbLWJNDS-E-n+4coCLrzhl8r6`qps3Vd!pfROeIdK#>hXE{fzb=u>ak2I)untm|d8CZ(i;nAvHMM z6_HB$wd3`7m}|#=hj1$XgYX;}8sT%77G^KR&?qP@uoZxEJeXHv$S*1}sQp6q&$i_) zDX;}}|G-d4$KQGWV|M;n6X5~Ks%>{t{BXDr8y-9#qH!lFiaq_F>yh5X=#A3A=_q9rj9^=Q%Ono zQjS+1#ae)&UXn9^X+UHa!Z5LDVKGr?!|n6#!;E1_104&9Dpu@?%Z_+1QhAz+SBB$NnwFy+H#9!w?7e}QlV+>|e? zzVJu!ONFIKF-Mq>cuMRWVM>|+p|0#1>j$Xk+=S4{Qc1#08rKbh7s z{5%Ycz_4^)66emIel*OrlMo6gtu~DQ1pl8VOoMQgm+}eKg$f!Symip= zq=WQnXN{l{!a5^FX9^KY?N5HnN7qT{L|^ttPMvj~>C66DI3+BF*Reke#M4%C@TWyS z|I<$=mI?iapz!#h&Ke!rJLy7oLWq`XuHZlyMt@4xsg61r5ssqa3p{jYc<9X1LHRi* zC={!^zAP_7Kf?wxQwkz<1|~WKBlJ};Kq3EctA8A*hn@8ij68he3hfIPC`M*S zE4tOa2RRIYUFFS?9alCw*j=!zbt8WgT?@279%Lsuwm^T9ehaie8DuAcxj_H-LOa^u z53)y4A@HF-OP?+Z@Qo}!QQuSjC|!z6kmdtQm&9wxjrPku_?O%V;!Ezh&&I-;xP2nGX8=;1JGuKb z4(|e_^k?Bqc`v}1(piYF0pDVL`{P@JFO{_nUn*-khYtgiy8~ZJrvhJ!kA5v&hwtCH z{U3l-&W-p|KL5m*!hMJ@mFob$x2ALZ<{0(6y^)JnQjvbvkM8vO~Vh>l|FEbE5(m$C;!yj%&L|7 z7Zm0)dtQ8EVxkhw|NHiLO8wsBa5IP7ICOKklfzmL_i^|Mhm9P{9JX<&{a#65&tZQK z4IB>UFpk6F942x&io;|M$8c!m(9EHg!we2HIn3p70f%b(Hf~?WVFib4Io!bECJr}q zxRb+L4(mB=$6+#u860MDsHR`c?PVOUnoIu#v-` zIQ*4E-49B+3>;cH%;IpGy--NUs&G2i$#;P_G=08Zm{d|AOv|Bb}+1ND_SI&^Kw|ihy&itQi@l?3~)vKocVxAqk!li(#WI0NC%~byZv5!Dc`Um zO4Wr($c8)uohd)R-IOi}8TisCJ$+yuqQ=@^^GM zp_KH=;E>Ge_!|kF=Hhp0Kz+!C{)S_=rZDtp@Vih*CA3wr3b&$+3-McuKlPJ`R!Ih8D$L zdj^MD9B$%JE&m_!3#b=T>)0?V76PhV1C{6SNAZbYu^=EGaT`V<^ZzsPZu}GR7NB3} zAoT^v_d!sl-GtXiUt9JrCv1 zg-uP7a7}fm&mW~nX%Gb~#HbI9ua@YK<1aX81mLPAXM7gNV;wzNt%IQ(u9Hc>*w1EO7 zAT`2^LZ|ZKLp(GZ6EvH2Y^nCTQhVhIc)twsJM!~7mM|CTP>(G_o6W=Tk}H&x(xT6w zNiVR*RA$0v$5GJUCe+gw@m>|k?+fGu&VxT;gg#Vql}AMOE8~w`i&5tyge6X50x(H9 zyAl;qdbZkLY8g7#Q`!INksq;JvrF=(u~Q-N0%^}j?ivo$CZ=Z>`+Wo4>3AvcHn3o) z7o|;1H6yst6JJwtaYoUCf}Ew`PI6|~^t_UV1%)_=Dk@CRv&}D}<-z~|;TSMrcI@~U z2fl{#*U|mTHY-}|KmAo;2ORv@F?>1p&gx%?@aO&lQ+QZt|07=GpZQbc8~#7v56a3P zc2rcZdStb84IT}D?D4fvJo(pkPd)w2`oBH<-19H|{lyLcc;HP= z&9~lu=iPt5_x=YTZr-x>qir8=-{JODRqx!jyQa2o&)$9e4;-v-ICS`vPmdfucKpQ2 z&p!X+RO9J0XTSXF>u=F1W#wu`@9`t>(Fq!1FS(S~*Etn1P> zJR-82sPEpRXRoN5+&OJJ)J|p2`ip}9tNZ`I0{=n~^&={$(!aPX z`2C&={$D@=L(2VsA%F6tQZRxKhqV8NhWxodHGW`y{#<Q7df2MZ4W=W5#?N-wmV6%({k;d8ZfICpudu!+wSVSqXKqut^RZ~!J@ z9hA)dRQnCW%|e=Bf&XnnsxT4zb2Ds$aLlLd=VT7sIMfNsx#E3Tc_m=Cox#(4ke%pG zL@LVXeyqI`gsF^QO)iT6bnrWHxfiFr{?C`;(}{14Ra8#GJvH}Jnej26`_4A0{`uiA zGTw3a&s$#Kv2k|w57*`3JO`iL_kSA|w)4;<`A=SqN?E`bQ%Ke@82VIak8|eW{{Q@C*S!mCOFvuqhPQ-a&~qSn*h@KkqBou&@=(M6;`68O zj~RFFg}Jnl6NE1dFIVrsFJfE%jWMG~XhJQ28Pay>ftgdPs!w3Sr+<9JCf&q$Ut891 z;A6%EYkmwru(M)VxOZ9gx*>0*_MlAreSTv4i~DwD&-(23*Jsy6n8SX)xA(S&JKy^7 z(si!Kv(^z~94!BcH6Qia_}E)BUwv_Mr<>oMI;U~&XJuVqyWDI%{@AAavVQl+f-#bt zoOa{WHFr5aoiOFg>_M*{AFChpamkNgJzHB!@8UVC@;?S%!` zKNQ{nOoldo)aN6=AC{5xw+HH8>D@HtvA7YsK1GG8{R;aMwv9IqHNK(ED7p6Hrw!|_ z`)L2&`A?5XdFkZxU-JGn?Tf|Fy<~WM+Ql8aUmvn=YWi8NP@YhI>8JWZcV@(iAO3iu z=UqK}P55m2oY6=A)^}E0|KulyBfb8;YRlr6AI!e|VUPQ=!TtZT`vBLy)iVt->u!~9 z?(&?jp!nnMd5a9*okO2{LB2PtuuIen@fU7z*1m)76dZv~`1Ad+?hX?q5#7YYsX8_6oz`(jg@c z5T)$-gb2r|9 zeAin)Z!5?higx(&r@3X9-fEcf-6q47=i2Ui=Jl?Z-wb)^mEltgp4xHkb^UkNymsi* zV~c8TZitw1;;(C8sCjnN-l;>P7d1zAS-q*%rjNPx*Y4J@-gqo$mj0W)&&UNwhmI@x z^1W5Bt-X8D=%nZEogbX~MAPJi#TN^o{l*yHduQ8|5&Gu27rHN~9x_3?Sogvc;iYda z^MzF=Onke*9-lD0;;vWp=VR&xXM8ZL{*&#OGH-Zz zv!<^lO*6VKz3;z-T`_0eX-of6R+rL{@J>_PpoRB5>r8&=Sl^`wN*3*2^5T}SmfogW z_a!48g7A-~Pi8MDTK?s8-yBc5_>ZD_i|_b9*n1PWs;ahccpc_RJq#+KCeE{>qT;~b zAP%Ugh@?2&2?z=bcmz~5OEgo;R5C3qD{>00luQfF49CcTWuENM_y|;VU-$O z=k~`h2l({JKKf(dqkDg7s?;6*}l=*zWU1z+$v-e$Wdb8L0=(6Z-KM%WK-?`Jq zp?fZV|GwMs-xFSX`r6KKn}kj}A)c%I?v)uo9l747)57_b{mb=dCEmYve0s>t{9iix zJTocsX4j78MK_k!s^guqGBUHEPRqDZZ$lrq-04S}blA}2&#%tDxV`gdeZwN}o=SCG zf*)&lyfpXs*P;)^?@!o#`t0H#e(hQmpKMwA`5Q;Pr<~lo6d%vRVe~HRKIhQeZR*{0 zQwQDnZsUeo>zeIZzhK)X3`J^El~mnM-NyL{uV zY=8NYsr#AnpFRJYPv3|BM+Q_}2)=S`v2#lOqjvs}%6BMR-ru=@+8-_rwaFP<>i6b&D_!Vp&Ft+WfZ_nGceRN^Kw@aIQxBBo{vrqj#b2!lM zh|{NsfBR_Q`5~@v9kx4vaM_3}opVO*Use#?DX8xg4oiG~{^F7MgTEZ>%$_*;VfT}h zW=37ynLvx-@r9H9-3-TyFFKa?&h$E`c6?{urMI@lUphar?QVybE8je6ihs4mPy2Sx zaQA7_zER`2Dc(hEdmnot^O^FvAHP3*sNIh#PCXVEHUHh|l}W#)5AvIKZs{vC7Gyou z{f+d4oj=X%ez0%rl4Y~EH2F0m-M9NM&nk&o_F(j(tRpA(7xhh_cxLpG zaXY&G^2j?ie#8O)bGugUcxzbsyHQu~o}WH%qI0`%efuTE}4;^m($z(lXD&NBECy| z*5PvV*Y!!1mb9?L71|#2AO~0M=BXtNj$Xpx0P z4sOxH!Tle?!6QXDddv`xwH64++IhmU&O5@X&KBW>kG?s1ekYtg&BEF1x^VV(G`M*C z8eHmjFu2qUH@MatV{ol+GPwHY8r*#I4Q_rr4Q~F&4Q>toFt|5xwR3OS!p^-BjU)6R zsti{yV=E!@7a2ro-#Q}aRlItB@s^`F{6_CU_;+qbE9`_LXo($2tLxI8=4m{BiYQ+> zkCbyiA!3=!>6Z|3%xPStBbhnR4@EX}GDSLanA7t(I&zuQUJN?&n7c|URx{VeN4VuP zr@aew6foE3pWubeY0m>4Ma(tx1TSVz`#k6cZTQxskd4UWhMq{k@9-=CoIYj@HcOd|QY>=1)j( z5zJhF?u&K^El?d%uUSwnCCF}XP(Er0rPz34Vf1*Z^XQq z`4h}bnKx!`W*)%Y!n_G{(NNY;Q|3nI&6o!;Z_YfBc?;&D%v&;#V%~~*9P>8JP0ZUe z&tcwoj%=4LdU|z($6Y~=0U6`L{9>lzic{k=2%)2vpX(Y?vgSjvBVCJov_hcT- zychEb=DnH6GFO-Ixf}BU<{r!gnH!mhGWTX4#oUK^ z9P@h2P0an7=P+-?d^Pj-%nO+JW?sx(VP48SoVl6#ROS`T4LqT836SM=VD8J@nR#pG zuFQj(yD^Vo?!i2kxj*w{=DnHcFrUgik2xOvYDYeE2j+#$otYOicV%A6+>N=Jxd(F# zbARTdi7an#=0@fQp5O&AcVHgK+?jbOb64h3%-xvBG526@V(!m8hk0-2tC<^k!dSrE zm3a|!H|8bGJ(!yV;`uWT&^z)g!G~@EOm*IVx zyE1Rh+>Lp#9zIZpkI=(2kJZC7Pu9Z+$?(~Fc;>l!c;>71@WC>Cf$q<|NcZm}{Y!Lz z=BIW4Q0ZT$`!lc5c|Ymz(p;7wH>ld-tMf?7TkAYZ@?ho;b0v>p?$11yx!iRquuo;O z7LfK@(V?$DMDR{Hx#~gptkR$MfzUy_K_sR#(&`9)X%stDBxY!s%;Dv(OaWgRj`riw zLF*rM&^iYl=^Sr5a}#s?>XqhCyR7J-eJFI$J}Wvhc`YPUOlLk*L)wW&2koY!gZ2W^ zk%AJ@LGR$vk!dZDOea;_RY~swNIr@4Ig`tmjD19O#G}UONaFmYaX#Wv6Ld`F^Jo_q z9m#w??IWUNDuUk={(LN}u3EHznHAVK&0c{=zoer{#4x+En?|aBTI$%X>r1aKS!#ZG{YbePj=J+t7!W9=_nC#{l1IrssmQtbbqohDsMEOPj=P;Ob<`?MqE#y?2huIhbQ}^ z`qRr#cGv;6smGs*S|)$JKFBURAh+DDkvhc}V~vmOl=7#?NA^nP>&N+{wLjXgTG?Jy zUwyUrS11VCGo`23FYRrl_S4fR`|g0;>Gzpr=fw5;BYUUx^zxD2cR*+p@A<)3kOPX02yA z9t{#BIDI*ugj@TI+^=1kKiYRosl{@5IiB>jwu>AeD$keWL#2PZR({*@f!Zs|#!jgI zM#814ean7bIgZ)(gK%w(P1SNF?X0r@OFQdpt#4bqu{}?(xBeJw>5%o(-)evIdL3cy zPqLoEtofGp6lNW_WIa`mT(9 zaO-#{)33}g?GY#I4Z~mC@huZqT*`;uvN9h-t@)JoKgeo_GMy-^zf5N+w=Rv_WM?&} zLnDB0p)#EqtNqAyD*Ka6r=PX_w((YN*EDS;sG9%b=t=UB@ea1(R1c%9<)QMEi*6Y* zUt_HP^89e?IHK3HeqE*WsU@nOua6=$uOi>R)^^qFPakh(dV{Uyl;LBnb|k}xS=&{H zkF?1T#XnZtPeUb^;q~68_qQQ7{uDmSW*nl@hr=f1A>$9TDL?r?Y3&!%zi+kkqpJCj zu-b>rk3MSZ`5S4yZcG0N8#|!%Xb+M+q<@sP{FU>2d8H@2phJI7tdIBlbNTi%8j;^c z-iOH9HQvebm7KjrA^V?ZhhpYunU^yEh`E`0K64B6<;+D_8UJ_8jm%Fn4`6l4Er0If66?L&$naVn*H^Dlg<7P>>tej`n)EB`C|6Z z;qZ>kW7%IHpY?S@eH}5G{nv8%Z00XB&t
    1yU5vws2e_x1GrWqI{^ViEfn3G?rnpJslTc^UH^%qy7h zW$w~l*2f9vzRcG%Z_WHe=E2N=WFEo%73Q(b&CHXTf5tqU`M1n-nO|hSn)x~A1{V&@%Qw z&e@TU^o%tN`qDa>=(KbN__ z4%&nHYWAPUyomGb!MuR|_4^Bb9k-Jnp8d0!`*MByGB08Ot;|m|AIrR$)2qe2jQxi* z&*S)7Gp}I(Da@nTzcF){VA($L%=LW;wVC^}KR)WUcmm(GOytCeV7-q|0w-@KN){r<|XVun)zww ztC^QEe~Ebo^LLoL^pf!v=-f~8ddz*f94hJKc2b%9RDZgF1=;_?qeRn{B!2P%r`QRV*WbwKu)g#^JMmagLyXde=slO z@Liebvi}_BQT{SNq0Cpa|BK9x>>td$fc-%VWGB0BP`OJek{f5j-*#9Z!roL$e zR0rfYD{qCgRFDWk_wD;4c zzg)LT#8YmaQ$8p@TVDA)A@VsI{k8R{Uumey_3NPi95{{F)8snPWPa+PKi`SR(_wkY z=Qjz~{7SB`PU_(&TH9Ir)BE(|3m{nM`j*z$>Jv#Pwhf7SEpR}XB%%inq-7nxpVdrLmq+J2HJTkS^j zH2gM!ZG7q0_LKhlYNH;%$)-N3Jo2|{D$_Sv?N**oPsMHHpHa9BS^>rWJg?I9p>suddU2HLcH8_D8ussIQ`vKdn3IsS&4fkp9X;u4~!)%l=mB zPdv?Pe{%hg{>nqH3+uZLr}S;ZQ~Z_Vfn3L{%#U1W)^{<`?+B8; zCs^xCa(#D$!1Ye91nDD&&h;LzbG=8(b!vTAgxnV)f2Bi~Pp^-q0wg}$y#zbvEs(>1lSM(BAyaXkioeMC<|=eGV-qk4|WpK7#{(>gl+s*0YT z-hb$-Mtc~fk6hn){CX(eGg0p~g_rKO*H5{gr>9QkC41F#OPtnw=~q?s{7@b!guHUg zb=u1MmcJ-u%W2(T-<3l9H>mWL_9OQJ=(|_Ix$&zC&enjgvO7i8oid8T!IkUYzJ z{gmtdmFdf~Ec{r)@)t+LV3}8yS5)(Nv3mcdPmI~cVD*uT{5Vscs;-u5yfQjb`{$N* zpZ@wB=S!ZJZNs~*n%=s{rUJz#!YJ>4$F1R8C<#BPf7;vnQ_xVG@KE(~>#k<&`PLVJ zbT19#D*ZKH8U9J_I9biV)XLv@osO@pT!uerOeeNZC5&6OGm9{P=E|ov92zvAFv{i4g@nb^A1x-# z$$M%Ep=HWv%LwzIb9aBRuYPl&aV=d?*H&LLd*K-*9ime-g<-3wEwj= zg!xXvZxa?CJoS#o4ZHFQ11>Ipm$2CG$+a3zZnBOr&+~T;a|}i6iJOBLY#=o59I}xx z^f{mR2#Z&r*Rb^7x&n=Fp0bIsbW`AFLgYh3@y4$AiRUeS_5(uW$*>Oz^QSv(Av7f( z(y(yL;;qDsTQ}F*-_mlShIz++(lD@n_($X)8njl!xZlJ!;>PF+8k&+kcQ^P>t?=%d((rPEU=cVOnn7`$;hNz!i8eGmtJ0~Vc-jwH4Oc=*KTq*xxB2Q@$wZ7^PlPSIl1SYd|5+r?TUu^dwcI8_rg(m z8s@)$S;OMu;4ifBufC*VU7P8s<$|qM`BX#~KC>I4^mw!#)ac{-C{vrQ2gPw7fG{!<_W>8Wsi|(J=3m z8ycb=_tSX+%lb+zP0=vtcAkcDh9V8kmLD_>Jm`2pE620#H7sp7Qu6%S5?%8(6tRai z41DRDhM^n1wDBo$W3Yx%FFmDUUhF~*L)|xPXj=cBAxbamw5nmLN0Np) ztzVSbZmWjox+kRn)d~$w?>72UtB<~YH4Hs6NyGe*B^n04wpBypnd2Hp*;i;7P_JPL zotNhss$uc1i5i;sE!I%XeqTe_pN9GQ*EKYG)zik~!nB?m=4>0Up}06t!>GG!H8kBn zsA1vNG7T-CyC0_Xa-Qv|VO*OL8s_iG)X=27s-dNzNW*|fXEh8|9!gAU{572)IxkE^ zQ_e&UEve6ISlI79$(_H_u=tZ-H4Gf#afHr``o4q2LBll+eb1y}VEN0ExB6JaoIxiv zG{)Z2P>ihi4V@p?tB1teu^Ni+vNbf#eO<%+db>0%ee0}-7SH<{24*(+md-~%(lF}b zI1S@k&ehP|XN|=<=L6~fLxb<={LJ1I^j_%aZ z(&dbX`3ovEj552IYU$5xpkbgxI}MF5^wKcYaj=Hw>@gZzeoEHRG$32UoczTa8n3^q zVPNl#8d{#-p<&ea0~*F%J}&(|e$ueG-E|F(;fAA>erU8&;+V!72F7;OPz(>zurwr6 z!~EvsG&I>y(J<<4wuYgb7Hb%1dR4>xP8&4D_@H6&tNS!8RE}zB`Qb+mO|$;cFwW(n zhWRhMAEW$4dHZW~P#gS=&zY6>MYiRD-T0>*o?iyB^pCRAc zu{>Z~)7@^{LQd4`*fpijM$H?Z%Yr*nn!;p}!By&YVX1IPa;uK${& zVmIi-hqDHM7*ad1hee%PTiJixY2fz>7ejn@4{dzJ;G}%<^X(Dl8^4D9cEkS(_c^Z0 zmv+Ub7uFSrjQA2OGaZde?euT{5aa49TX)!-+AR7pByC;yk9UqXD#L$u8eB5OTlsMJ zyKkQjucx&A_0IP9CN)z2d~he&yzO>KuElgH=9e2GA0O`J>YCx8y!Ve*SC^X_DOYz+ zpY`^Sjg>ejxA(qi<)X-Z)KkViHN|+lhm$fSVQ#ti6fdP$Le96_6j$Ykm34nP8t9{R z&R=dcPHm)wUEk%tW^p~G-R_M~8C?DfNu7M?n~+c1DDB@ru)DooJEdRJg=b3Z-wk=Y zhyCcd(7H-ogE=dg9Q9D1-88pV(uehxO%vWZI5gK&Sy1W}cfhm0QsL6MPe<2IO0SFU zemdw=UwLDA=EL_sYpFc9(7a>$&4(eY+=qVoW~XL~x5w;=vdp_7>9L1gb{}~{Sv@%V z?k{zlC`Fw+j|r{cN$D1x*kZ|d4V0YUmozGv&{(;Arp?`=IZZ)VFYgFo{bonSqg#SsUb8mJ{HL;mbEkDtwr+esa>}^gO2e7U&QED-3HjZ>SLg2s z_fhIE&7517SXc3#^kVt7EDoWPM=bM(y?4|s)=!fV-wL_Gxv5U+{)^}3gb}K%%c42Sj=(v5?3pWHPuiqZm zH11T0(!=uR^L0LQQ7(R+x1(9EqTGJYxI%?UNTO)fa5eG(9OxxRCxfy@GPSdZOD{DuFoD8_wO&N9lh4^{V zy_7m@PW<&?RIqaP`_>COdIT!wAN#E=^)3(DIOf;d6OAnt&+og`$?n)v`L4p~e@6K| zjDXN4hH|1y6{yfk8^+|K9Nks9 zZHV78`)F_F((V1DjO#p<`z~t|Os(;QjFXS$1n=slY<#B8*Im1}SL}ZM^Y!3`O_VDx z0dIYEq_+}v{PTW}B3QXNC9dVZm)j{nw)>>zk8|28U!^Z789viju_V{|Ib?Wa<)>A% zXB>{Z6Ebi_&hcj_JqVfh;}pN!U3(}~pHpraKkcM=8xwb>=k`?mU)oxlJo0|X^fpTy zq`cG(`ABP;wDM8NYeIE;Hnywc`;M30Rp0L1kGd;^<6IVQbXJunF)J^B5!6R<4-Bhq z`6yVK*x=N{zTv9UJ*($at`mDHKltq&wWx29vhmC|!-~V*m4>gh?X@7dl@hq3e(hY( zKFXu$w58`mIxF>mY5Ky+KVYX9rY>ADrj4?sRmTZK&(%^6baQH3zM(uMs89P`OWy&? zQv+|m_vRmMmCJF1pPM(NrP6VB_@%zJ+9-zM-Yx;HB9(F9ovPO{d$1BQ>Fn(BQT>!A z-wgfr#HL7P_sO{lA z2ORn}P*z5KyD#ZrsPav>Ghseoc2$P;p8Dj9^%)`I=fmt>?*%9~&-~&qc+|9G5QZh`8?^eaGlzp$A_Ux+@&6i)}}`&mbD(`TayMTS=S=AKJ@IQe0g;J zq6T{+m86*kkH!_YP#O*PciZ=EnDWk>p>7kt2~$i_ElWmq@2|u)ZQbqkvptk!J3|9^ z*Ke*||8w@!e=ZAEei?nf=d>?}Dx+_Dy!VPjq~fu0!?&lN8?5XaeXy44RG{Mad6`%0 zSN)W_&3<~tuYHKpcfkbjCSNvHp2<2pRC%M7((b*mly@o~gmiW(w|j3;6Xo2D`E!SS zI81T7I`L6p$}pwVA7}PBT?|uJG%R1xF1dp;cm28Ca_3&k?To!0g6@VYfA@SQz}y)B z{!%T@4-E5)|5Cl~88$b3_b)Y|Vt(U_`G2W9&gFk#ocNbIz36^Rr*>^+WlN9 zXm)+YLp7*T@*RQ<<2Sl7PQLp7^7efbWLhw7m(`i)P& z{y=qq)Bc4~ryrpgYP#3^xq zuD+*^xfnR&%7S}pnt$m=L)txc@QcaLVWaP<{_Df=|DKxP@ZjRM?eD2}L+0E)UJvg1 z2K>LLp6O~_wd~qmwL|jM`tN*ySG9Zh^01@(?y6^vPyX6`%UxAIAG@pmRlKZLr|0jg z8AToymrZxoEwg@d|7`qS)%?q;2Za%L)hq4aoOdYbuG;2O>7e@och!@52Cw0@?yC2E z*DT+Aw?b8Wl+TI2QlTFA4P0tCUZM8-3lpg?D%8z4rqyM9~!vPbyTI~Q-O@g?(HZajKht)IHL zUh3Z4YKZ?jw?QA>R`W7O9_{n)ZMC>Gx7Waz;h#LeUh@3gs{dp|_jjh>R_`_)G38qP zZFSw?-p>z@zO5!UaUWCC_qO^&@ry;Hy53ep6AZKNHM^~@+|oJuW1riqQ@z-@`Hr{M z2OZi3CEmWJUaebNH16sxb;G4=`zN2irH(ef(k}O_Tk6aA1|HhI>y}zP)-cuagInt6 zBhAX=-@2v#9y>qq*s@z{oMWtERL(86*VL#!zfZrVUW%T(>$QouRQWvgmilPHXIEMb zyruqD=VpX!&s*vc$AVFRwY{a*t+@27bE8}8!2_Nbo7Mr3GT{F$_07l&3CnNXRC_vh zde8jxO*OLp`90yMZ>s5ATP!X*d{e!+eB%Y>^PB3pFQW5LZM~@uHq88R-rAe$R=sYjiyNP8F{#5%b;OvbTJ8wAsrqOK&QGd+bb&L%dl#yP`!`hQtsD9- z|Ko;gx9Rfk_Lpv`hh}Vk`OL{1>ect3Ucc(A8|ss7nw^RJ{D$h&`y=n(AKg%!mdpum zzTt*Cah`AchOggHm$^(j-gGJagXen(&%dGm(f_NX<7V7YBb%<_~5MxS?*_J$l^bUN_Wxji&rQzS9l$3uC*%KQ+6de)8Kdv9o+{ zsCh>N4Suz5sBv}&+Ljn@sL7q){e0o=>*|oBkKP^j+jaG|oFT<6%-7ZYs163Vlfd@# zhu;6{y6V|u?6SLiuB)N#3?Djdzpgs&y|JRz zE5LyN*Hu&Fz0Ww@DOV$l=JmOGtz6A2QhWb-sa*YhaL38FPM52V=CA$G>AP}ujJe^1 zrU%Q_Hop%D9sF6j`qQrKljnX^uD<%TfBeq(%GI|$&3=Zr%hfe&U&)s_+O`33sJUpvax z9HU{+eP?jXBctV!MP1wC<9ol~vZ#4$u1vjk&7yk7EqK!DvPFG$!198Ca~8GN+ucU> zJ7!UPpWQut`d1cpdf1Xh?-g6rlDAr1yYi_;-EpmBMyIV7^*v*<+x(3dHSW2s8_&IE zQDa;UJ=Ikf^-|J|kKcdEqAoP{b!oB?{MVv3?>uc$58WB))-uzg-uIflX2%qZ`e0M} zv7reTby8SvaF;3di*VrN@e}Z?^aAe?r28_)d;mn zJZgyQo$4?P8s(F|ZxJ9`aSOHLI0nNv0q(StOS?@nv~_O%dmI7Ux;@qYY%RoOgruD- z@|!udXY^m4Pb`uO?`)@Qts|{Rw#`JuO5fCxeWB_XcLGo+@_RQ_dzG!H z$2Y*HHT1ToT`$z~WW}^YCIHqUzjP7+oCIDq4T`JMeyaP{6p!rnwsxrJhpddO&$fJz z_xBdq89;r7_61U@GoefCqrB>tEb~p@w30opBNCD0OVu$ad0Lv*S>1F6;dIN8<)smY z*6J(26!y>RO@0@KMlI^$R318C9`r_IBziIZ9y+~sJpsMBKi;~0lHUlXJqT0>R0^^( zIfhmCOd5w0v{sRQ`0?;^?4hsw(4GrflT_a{lG5Ad@{3OT2rv+-WZ^AS*&|13X_CEA z*g+7g)sf(H;8V3HRgT;g*S`}_P3On*I3iOXfn3YcVifXDyUl7&RX>Zq<5oGE==Mw2 zO5<^W)@IbVA8(0a2t$@S8`lka_2`H?pTuK~tPN_h@im5_bY&RZJ|(YEv~NZB9Qi#Z z>h*eUQ~#~(je5$`PN}X_kUm`ts4rKJUXQ0#8DF?oPgFkY4|FY~t59{z4?qpj7@4Gv z?zY!heSaFoLwmU76;}U7mu=~!{Zq*(Qgs-s0^Emqoz%*o&7r*f=&Jk5VqI()yWN+*3cX2!|-=Pl~IT07!;!B z{v)qNWHZ&rqMGMfdVQYW-(GtSg!Ys4fy|w3C*}?(hWD1(&jY#-qzHaR)%{tV&zE(sV+H5ah4X1-EG?|L zT>5$ctI@VP@;Bq#AQSCG4&?6+3SVsFUuvV%ITVL}&^-g`p%9AK_OMMi8|MTh+KGy4 zVQjXFulT2ZUGxSp~_5G&zXxl;Z`gY{;|U zq8yOsr9$+COva1U&q0nru0kS?V__ST`XkPVjJPPoO^E*`AqJN=ke48DKsG?mgc?MXFoU?% z-yrM;B3{Vuu{aNM4)QDHE~LRYgXjo3H6CAR`-edULB>F)K;}SRguDUy9C8#gCdVMk zA<^><;&({L1qPu&hC}v4PC?9&n-K5k48j-E2+|U=9I_U&1+ouv5po6c8{{S=VUh)s~%Zy^pyKgb}+@(l*zxX~br3XvbkhK~$lGsLjXAeQ zQ{fWyJG52cc8tmE4B`Z~d`5kY`fw0pKf(_MMvv?_GGy`LX{Qb(hA(rtS=_nydHQgP z=+|p}{P@n}6UHZ`q>qnJOY9yrer8ho%+&Z9=@XNaGCGGc2T^h71gkiKIM4 zeSrGOO3Kb^o~CV|joOXhf``BE)*N3&&x2dOlkSGMHcAb`Vh4^-L1)D`*Irk{SdMR3 z!7VUw;-ty=R{U`TzRm<59v#y>HarZf2&m>pj;*V@jiqpbRon(?=LT1EBgfEcZsBl? zfLj1iraOq7L~_+|S{kM*Zc$nq*>FRZ@VQa3bZ&0daIq9Hud16C-|8xEFQ@WXeCD*&th7X=OV{{Rd#n4LIdc*d zP}QPrik+x2Zd>ATdKH?)zYr8b&HYV#8jJeV`MlNxaBlKI@vOhxi8t_JBm7Q z4g(OvW2v2NZ)pQv$SnwNG~Zym=t^!8aI>+MAaV;v;r<|dlIM0OH<6Dq9aHU&GD+>) z5CvzAqgrUp*6d3&xeRS1xCL$zdttu`{B3TNgZKb1@51P{x)pg2;sN5A5Zy0KYms~h z5vtW+X2!(kft@MY&~*+X0%K9f49!g|2f6>V!+;;+t{3fP@{BVE8r&R&TacS@%WyIo z>Ntry8SZ9RlS`a)loK8v+X=snx`9m`?Sx~Zvk3IWwEz-``bG6t;RcM$4hw|vL#Kbj z;A-b-G6v2q!WaO9lxa8%w=Pa`T3lI9BHs(w0Z2B-Rh7eds)e!5r(X+aQ9mn4)DP+^ z>Kg)uJ1UyeGQkcFon1udL|4%!#zmGZ+R5bC)Ik^x-uCt;J1tKpUtFgGoJ7D)qkY}W zp7t&=_3Q=e{VPZX<>#D}ICsL!zU~2EdzZxe_T(Pu2b)Qi<+cs0mAkL3Z6uGcW7r9& zE_Q*syN<3@x5!tWAQw?5+TB#qUhbuS>j@#&Hb$Ss7)5vrQf9+Bz9>B&gR4QfCLkUB zIJ`@SgULP)X;n{?xhu6vQ4yCp)^o_LtRHvH)xlrYbX*<6sKlaPSob6zty!mLA?YyiaK8I)O^~Z)g$?3*$X#} zC5}P3{xtLu4HJ!`VN4y-FsQa@nBi-wXZ9)eDmI#|`PcKJqh1c3>u8fV*IEXFkCa&J zXj3Q5Ss;f{M}!7vd*R&IKFTf-?PahRcJ+jl!31~Yw2J6)S4XXk&L|_w(*$HTcuRqf2R%fbo6rSTu0mT;0M8d5^D(`r0;|DeKL$DS6@5fd(0O!(=F^-9$1vzIHr(1bW!zB)tvo1Gj5EsPBz(FUO@8=M zbmK7(i<{Y{)TvnZSH16deiGv;11sj2# zRJV~D@@w#O6Mk8BgkNH9;TKa&_yu_gzZG@O(%lvAE^v2-yOZ$iQrGONg-4h~cevr) z=(-kfvuCMZ#&7zg-wc#}r4O(vgpg{K(6z3MsG9|WUDrjNb-Q?)^|sG|e;J2;1oUf# z_zf*YEeozurOw5UCWkotK)WciPa`5U_`3=JARpnst)9il>{V(ku5GFn=MiOS;VxPP zH4`oH1LIX%9=1)#V|`M$T^(!4qqQBh{!|BjvbLzRqOyPc)xmWj8ly%Crkg)W`9!o50g>>(N-s4E(0`H04e-lB1kmuP%U8)u?8 z4wH6G4{{b}+z0##iOQ7MNn8Hd*yZacd=nutuEICSMfi5{v3Qv=mTUF|TY{e-{QTet zn=;G3g)t(=4Sma1_(b!#p8&gPN7n{#*ti#L+*34MQ8^BhZG=*JwRXa_7x~o2;p*ey zzv5ml!Yc;fVGXJyyjI9DQua5@GiZ*{#a%Q>TqBwUy(yX)RtoD#iaMn{U@9m6uP$2? zHbWsjA0QcQK?L)U<+W9Hx1)8E;~f7rdI z{?sK7;}^t@?uBva{i&v~83;EQ(xO_}zb&U$C;#W#x}hP>H=Ym;@xcf=+Se(rW%7ul zd3cTON%s#lhxO&D$sYTK*Z~<=Exhe@+)umqxa0chhU=rNs2lBR zF58K^gw3@*gtQ>p)z7b94x^JW;@-oEerb&MFnJnrKZLkQ*YncoOT-6RXB$_vU6jm= zcJF4g)|-CaYV0U#8D11lW`|O{V%et=wP@1XO|-uGya>ozgfV`xFvjExm!KC!=~qIu zJB;hYYd_k2bYK zA40#;=NgQjxG!7T4TpzSe5v46dgb zvwdJ2+BMZS4%<0qwft#)=Q8fkvbb;S{(3p_Jl46d5S~7m2ctdp`1SU@Zx*8KMIrvE zUe|g&UUtGO!#&E=8_#^X9!xg%fSM7-Khs~--QUe6{>HtgwjX*)h|v(2;(tA!zkqPB zK$=zy`&fJEWzp+IM|SsP)cf<vFN72qfSd5j8VAG9Y(~Y`l=M>9( zSAC9%=Lj})#J{y4t&VD=f8ZV!`SMx8^V7>nJFCk5k;UMPIc!XA^e^Du33 z-}UxyjPV%rRpW}val$%Zwe5R)ow?cx*Dj8MkJ(xEF-Z?!-Mu>3+sHO8N@E4aJi-jS z8n1H(^leW_pKA7BQ@@&zaIZm3|3=ua5biGI#eXAg7{U#QjH(`%%dU?zhw2!_1<3kp z;i`b62{g*L0HtuA1_vRONW2&k4P&xUvwcmOtyaj;>KtM$2{ZYSS# z!L_2RLG-G2o@VPo^@QKHs_9*zi!l?+n7+7et~F@pajv_5G$(2B-^9N4)v||1w8k~Slkd} zY{Tfvd;IF-pW8wf@+*&{u1_ROwWw~uop(cfYW;;rg=s-a!~ zc6`*vxN7(H+FY>m*{G)$AMR@~Kdc*sXMoY3*7^SMXspXa7FN$gZ97rBiyPXG=NG$6IZ28JJjk%&#fB3=IX=dyt~%k=4Np*JC!;V>(90EIJa`{T~nSp9Xa00 zdjP$j+B{_ty&<1e&!df9)!2624v*b`+MZ{ld-7PMr@Jc-`w}4ERZkb!Lv7Ao-%Zp{ z^hBABqJDb!ytyNsP=CmonAfB3tv6Rn05B;VM*T=S+#+*k;w|=}q{9HY4t?sJb zOVm`K)y6mZeCSb)&xg$DGlqJuqMo6jsAH*Rb}!afCUKAHf_2{$;liARaV!~oI~MaZ zM;mS(7pVTIzf-+nj#fXYmZ%?HSuc;ZL$y3wukH0+P_M3t1BmB{^}BeRw0R%geBtH` zH`rk18qeR{e@L6CHm|IH9jM0rYPhcIW7Hdq4B~r8z&@L66RxW=ooa1DbAY}!Hm}W@ z>tN1Y8*^sNw=uRh%HTP(eoxV=mS~mLM6^n5EL!2YPb)k}Z)ov1`79HXdn%-!VIq1KaC`ufO!(bx4psM%<>w$S}-)6?frI%$~Az-x1?Etv_M>uY4Mcl|{NP1=?Phv^?Q{1NS{Xs3%$%v)XbsyDAyw+>L#4QGq3fYSGBh<$P1NFNV4v#%M^J|Gdkc|5z ztQq1-ugNLSF_7kORJLNhY+mSt?)Jhx*-<#}V;f9hw6n2Es{aMxWb>FaHNZV-1N5y1 zU3{$1QL3z`cd5LddVPvLV32djH=#A(dYkHHw`p5_T)lwsM&#GHU!wI~z0K{gUXF&c zw#DDw8&F!<+hBVwLKO(vJ4wT{i;)rwus5apK`zB`FRQ_i_8iio@B8zG9>{TN9{fk& z8)+NvC7W=**u$b9bD)C0~WHi1a<8R9t$~cPA#JGs@4aO~uyBUu#US@p2=)?6J!8nRBkujZdF5@!B ze8xh?eT=6VFEid_bXq0L;mg>ZF_2MV9LyNYIEm53n8Ub|aWms?#%~$VGXBbVo6(Ki z!=JG|V|T{BjKdfc8J}fb%~-&=i}5hy4~)7+C$b$zGt&Mp+rwE5Ov)ONlA1IsDI+sw zTKb^$Nz+EA%t;z8i~>$0W+Y|I9u=QD0}j~7_ITLQVpRX=k%NX0>zJ6DD$4B;Vq{i) zM%D}y#WY$pa2%PG6+SIJb6RRrcvePgM0|Q;YEp)XBXPQ8bY!y&UGZY1gpGY^R^sEg0*sX&jd`6|f6yj5#OrMpK zo`~Jfj!&j1YZj2$KYL=5iGrhovS1KmJ=O5vdLM9%Ny7fxht?Al*wdT#sHKN z`HIR&%0%-BleD6!qzui((kCLvGmzyB(UnSC)jkF#iXJwuG0CW~L^ujJdxRZ1J2NXO zEhZ%`DJ+v(lQi+7{h;)etd#gv^p%m3DVbRjI7Q?mNBxo#W=x)(l%eNj6~!#=Nwzm? z+n(x1RtnlHkxFjUu7eDy!laDw3?wiyK6NB|1?ou%vwdVz{LG}s+?yHtC8Z{1J?62< zCXdxl3QJEMX-Y}g>O`D!j7iN5OHG|N5$jN-Bg0ys7iWkebCP{eY(Pd*k~nA}h)hYy zh(}X@LnWk!2un;v^<)3%75m6(@#v45fB$ThxIfA?kxXk^iq;B?9irn?GLx)UfU7=P zQq;tlX;jBln`o037;96$rNMX8IvYqFvT^Q z(%`Xg_>3&mjI40X8td7(!Yq=oCx%>POfkp}YHPJaG%A21!`lOz&e6bcxnThMlGek; zDor1dl98#EMXQgEn%gjRPC9?I*aDkG`=ummJsTs5I6}1)MtS*!QXbxaDCy_4x{IQ5 zSM+ewhoP8h*0D*9q|i1aoy;&Sb8;@}3Q*1!&Ma-po!We2_C zNjif#n+lx7Sst%siDqc+NWt;mv!+KU$yOaKZNXYueJQT~>50Q9(V4c_z5m7;OAN2(|F0;u4;^~A9O33x`BpIA&3X9$E?;_l|EHkg zUmSMT4tqZS^ZA#*gi_OAnRdY69{>5g{qIO2K4D^F(xl1BDO0AVrln6aO~=J}#>`pS zv**av!}^Bz>px&%#Gt`LB8Lte9yKC*WXzMJM#qlP-uwCAk;DI)G-|R4=_d2@w`LLW zKa=18aH{_wHxmPX<;b>%*4yJ9$py0)G-1b$KT74@4HDJ+Gr0HYy=LaYB-Qdmoyal^7Z- zY6@3948>VfxGdZhO-R9g3F7(xkD6=1XvF(F5PCl}4p;>7fnE%3;weM`blZ0v>0QT# zkYKnIz6gngo(DV#84tY_xC+nu6QSF_!$|Ko+I!=CxD&R7tcFe)3@Lyf3j7kX8@d@7 zjt{$*K#v06f?S4f`>vtwdx-QtVxvZQ|2Wl7!vWCyL5~8Cg~UKN0hdAIpyvSxJb`>d zj{wF(QlS%0hh#y|0$Lhl%?3K5T>$b1-2oU3ISrlA4FlQ%=pMjlAV;9*0Cz%s)9l1< zpkq_y4Y~{P6-Y4jmB9ARkOsmNMnMALPB^$ zbC5{r_>m?tq$Sz{dMt1=B!P80 z2heFf$R6+h?S}3EY!5jNov=T|4BZ600P#t;6PJPBfhZT;gMn{DT;QG$+y<$D`*z?V zh=4o2*K-U~26qd&bw;`1ZUp9In2m&92rPj_K`#X!g~UKN11~~ip*QR*L~qDK=zV}s zLQVI%9qFa@$5dMfZ6NHK*0HVH=fX?CJHa2Dhm+_QmeArGLh2fFmcQ&5B< zJPPrFegf#-8+8KR2Ux!k+5oyQFd8x!dJOPA$U^7^z+I5>(02pBg``3+1(rb)p%cMZDliy(Q>^MQqsQs~9NZvBKf4?P$-0a6A%4)_A(0rb_t;}8q<^T59SVGq#z0s9U_ z8%>7}bcjHGLMJ>4F+)EMd}}D`2YNnm$8gvN^djJ`C|qBlR{%SW6rwqFTGva3oHp4B zTD#i=IRO0xuwD$}gzgI*3poOPJaE7$lo5Ia@H@yg=%v81vB(pJ0bYm1LB9#SI0nDD z0sS&C6|-$4(k%p9Aif!>D@+sGLIR)%0!KkwLyrT#2ML5;0PHp%eH?l)@E?#!3In9y zhlqhr7z`<+FhKjKP!G@@fL}pYLq7t11j(cD6JS4(TtWf8SmOdp9{PTsX&+qz=>u9r0Lz6L%s`gh>Q`5Aj79GjP-{oP#{X0v|%gBfR(wbp?5e>I&%a zImSKcgsu0W??Wdv6blgq-2>QeFKh{Mh63M&xMbs+0vx^%eHMBY@ERl%I^j3_VL#AI zfgKK@JkSGyyCH$l36~v&4MSfIj6Z}nf}RN64Jn{7!0ulnExfN247>oTfV&x(U4r_A zPIws-gfN7IzQVW-JrcMV5&_-zJs*1ir}!H@(S|$W50ICjn}MOF*y9VG@Mp*x=w-kb zM_~_hP$$4T$Izy5Hv;<~N136=0i903))A%;;YpMiI^oV!7=NJ0elNtm)3{zhe*heF z2JHfUJn)CJ$PaWgaM(H6B=jiY)bl7W^i<%75Hs{bU^(O(^qasf7f}CC!yR}SQVRDY zz$=i`(93|$en8)W-W)g?@&WW@;3tqZ(2IeFAJOiJ#{oD55(qsKI2Y0nI^j-8ALzS* z`DU~=bi#v>@zBo$2Vcav2R#Jst;JrFn*;xZRy1il9ef?fcO zy^Q=*JizRqVS5x0@N39+=r@6VuAnT?qkx%^WSlz}_!7hfeI@WHBpdp9V8g5EhtLUU zL2{wz0S`jrpqBuzL-L^C1dc4j*b6-dxEWFey%-qs3v38F{pL_E2$K!;{vGy5?m#od13KX&$a#bje_;HCT!x+p z{2nqMVNL_@K@y=q05<#+_6D7>4A8M zF9g2v5amSpHNg48Al5)%2n@m=r|}3w_<=p%+kj4Z9FhzD8gQl~-e-oM2Rs8Qf_@&j z&I#v0Uk|(pDTaO-*bMtpgXY2hf$@+c=!w8hkT~cc0Nt_A{Uzugz%!8M(9Z)OK#b4{ z%dvN{J@lKvac=nCE9m2ao!s#oT}Uescnneo{RD802i`S+PB<3t(+LU>Tm>nC`)c4W zNGbH)z)iIgXAbfM+>Uoff}j)r3Av1OEx(6fL`8>6nE=K=B3x^P(l`vIz^djJ%7AP7Z6||>dXDb74B~oc)E)GjKrg(vP=+u*!1x{p5dil@ z;Mc)O3;Ge@iJr&<^fKVd5R?b{Y2Z^T@&G*<*gec3auy;#z#R}5=taP&zNka!ggyG9 z>=Xw0-T;&xdI2zQAbt-HI$<%yj5rC;Lry~{e0?zT3BCOggK&#Pzd(2oU;(6v;sN59 zDZ~+S2i}L2L3bDdJA|Y@4?6@t_*F-tktImdKYl9J_$q#=udzz8-Q{J z`ZnNuh9rT3Eo4jrsRR4gVSrWx`_?s@kU)6CL4E?} zHU;$pXf?nXfO9|-*$k|Mmj~J$a65oK&|QE{;Uo|*VmRPHfagFz2h3p((gHM*EdkPj zb^@FaPzZD(;5mS5pyvS#S%A6+)_4>7A;2^U4+Okt1@aAOEZ{C{&|ZOah&*Bg>JHF< zUms3fCob?9q#cA4IS^nPXd=@hfE@x&4>%Se6zI2ra{(|wYa+qkcmP0K)C>Y6)^Z&5r_=XM0R}&@(O4-!1AGBuQ{NJoCIJGbTVLA7`T6+ zl>m1FJO{c9@M1V9GoY6N$2=a ze*nDq66}cxG!}4O1n{c@x*o9nYmiQ$iF_*>q#fwnfGJ`?-2s{!@WES<7ND_!uf>6~ z09pue0zebciGaHkK^*}4D`3laAfJJ@1{?rD{u1Of;2r>apuYmHe-G{lXd>?cTm>2n z*dqzV540EHVE}ocM*w31ZWGS|tdk7tBhW-n2QUKKEd}hgm<8%5&{qM|egO3dXlual zJ_0)gS|}T&D;J~@XllR@0n&js0qh4*4>Xal=Yc!{S_W_*KpW6RwkiO11n5A(vW1{5 zfR+dR5r8@Z)H}fHC7=xfS`%<7z-^!_0K0qw_YAZf;Dl0OpFlSO7A*s|2sDv(0HT2= z@*+Se(2#NvA3!289I!7yF3^5}QJ=wc2k1W-=yaez0saQi26P`_%_>k9Koi*!fIJe^ zHNXe}JnfKCQ{0H6dk(I+nmKnLh(z>@&>Koi&g zOVk1z23iL&TOGJRpv~)nJpiPG`7RXTaDZH(iF=+A_adusBK?Djdu}m;a3T};dy)im zL_{X;kJJX{ScpvA8|f36uOKpUpQdV{iA>z1Ne5^m6Zg0=0-DIgy>aNl7>dZmy*j4B zyaADkdvTb6@gR|j`!zfVn#jbx7~H_PhRDQy71D`g9KghV5vGAAGI0+AAu#SBGI6hf zuRs%-=*un&`tw94`jIDtzA=%Be&Rx)-$-Pl&#V#9L?-&K8i9TVk%|7X<3JOc=>HlD zG?9sZtDk@-GSRP-96a}lJOqFN`VR(LG8nYOfT@~+{Qym5qFyi2C#Pv?6V8-3*i4h{$~z=z2iWTC@AXFfYSjp0tQn# zsB?h9v;zvz-wt{R3HSttpP<%(12k>K@1Fxq4px-a2^R(7&vVHk37{VTb?_{bL$0#^^$FMlo*a@(G_F4n3@rsD zmuL)s9*JiuAYDXb`SUQpKmh@+QU5%;!2UW%z(D^xx^yTYS8x6G3E0dV5Z~p0eNLKy=7JYc0ckkKW8T% zpMN+(DDa7ii1P8+5LYaNrS%GY`g(VSW%zhJy};Ua3ny0>8wEap8&5u%;wf5r3r|lQ zXGyM4gZBUPGYIjparXvSv9Z?i zaPR@kU+ru>|M|zi|5?Qke3iJuT+_zK#)-#?_`d?5h38!tA6G{k4?Z4mhnrSJHwpzl zTMH*o8$ReiqmhUH_lw9w|LvCKq5n+VA77UTIpPLfKP(-bz+M0E$^Ey}iP`w)O~_lg z{ll9AB;a57<>~F_=IQ~`uC2>+*II!uz(!nDOiWTrT3Ad<^1863gq4)Ag_Nz0u&Ahv zwT+F9gtdgNDB|xa`@h8?AtfduV<~GPEM+YVVz9Ne7M8IQw-&Y#my{G07nib?m9Y3f z#~^7TZYwThBOxp$VJRytDJdx?Y-u5DC2W0N)>_g+(&D;}gv|dr1~J>~($}q|uM5kF z+K35Dic81}%Su^V3d=}Xi`rhd7PYpJ{$FB{u@SYEwh$8+wzRdB0I9H%5e8|J5|)&) zwU!dK5Rm;Fs&;P3NSwDw+JZgNoQpY==RpY=-w+!a(;>n;=&^L41GzCZZqkmWTf5^!+RRkDLHG^{>=}#`T5HJ=IgRV6#&2D)6q_?FYnXIuX>wXMu@F zt}P%MH#K8V2;_7t5hR_ixz-Q}59BV`2;47uJ;nd6i6LSMtzTlMZ|GfjxuED&kAX&f z*eH8E$uCx(^LB1d<#|Z>V%Sj>qAS zH#Qf2HpXefEm-*v50NC{7K;o95c=}x78FgQ5$FD2%fuEZSGRW4i{H%Zsar);%6IaA zk2@9HF`z71#R?(kY|lun@vo!Mlq?hp78Ol|kNX-*Tl=Vmiwo+1Uxj@=s5|1CA67{} zVEt55=UWn(*cG?dMX~Yy>HfFyH=H2L2QGqKfb5bIFu{*kIi#u-{Fqs8lxXfXfav`; z;};ZIkdUr#KJGfH^lhR$RN9o5=dvA*NJPK#*$#=Jl3oW+`UUpt9t)V8C$=}WWz+n8 zEcLc$VdS&U)utVJi1har!WuZ4w8`6)nKn#~#xM1mM~(a{t_-A=0_8jw(-)E2;}w^!0yA9vjg|#LJMPtcWHua}LrfWGO@0M|CG$brHvM?#fvXvSqRXiGiJCTKZU; z*Ce~=_bC%^hFd`t?y&m(VmA+KqGtl9h&gP)7#I?7hWr_g5_j}Opbzfd%`htnLdj?6 z*SZ}g%WIOaylXU~PboVgM`{o6pez>IL4;>m{cQXGd0{)5i%s<6AV!JgGv<`%UlX$c zT+Nfbj_{SIMGPeyVj5)2_}A&8&oH8>^O7=@{}eg8-m5AEyWwSd;_E{N9 z5Pu@F9YU+%SJf!Kx1V>23~Lm}w^|rID>_7;vTZ7LzHtT5K_;VOK4VwbBnAmLZgZob zW^RYi^d1O;+%E=yn&E}SQWOJ=QlamJ+`I2b-DDkX1)+OBGwZh4D^G4-_Q`$~atD1~ zZZ!Za7zkoEc3r^brJiY!p508@e`lfT#@Qsq)117{O6`bU=-rm3A8-U&z=Pq-{2B;S zNAk?>K|-KM5PqASP|*9LWIP5ewNltmHJ0gt$D@ zS+_%ptnm8G2YI2ErMdD;^WCv|2lq{3@*G1(U%!3@Z?igYX7xipeTv)4EFwE+Epsv5 zZZCf@F)^_Ti@2lxr;U8|%EHOXX>!sbf2FA?rL7TxbDM4=;gE5IXe%CG^4XkJ zz?^Iz=~Fe)rZWiL(KfdnAQx%GT54*Bj>W_@M<5~%UMgMIyOkvE`kHgBAwU|rU!FG8 zh}&qL)0S7%cFldJfFilWiftDc8hLVP;57F66cq7WV`;GLAmkOhwB=mKt7^j0YH833 zGH^3ss@2qYA}p)(_4k7U616Ab_o&YCXp$oHX--!33${9>8_yV;`7ea9J>hg5Ds0x0 zo(N5HsipCq^yhEfjw_j-&Um_WbPv?4oiE^g_qkWnv-t(Z?>7zK>H4gV1asc-nP#8D zQZ^FyN_vx2iTS>y_l-J|K5DvlwYYU}7!I@RP0p9IuT~6%4A#;?Fd z5c6E|SAlMF*FLkn-h6s3x@31DWpe*>)y#Xx;$}O=o%7?zt#!C<`m<{!^fEb?cg$O5 z*)cIuf#l#$e|H~e=bzqh6n(|^T6;#V=*5z@p1bvEtMy>>Pt8-Y`cxA4%3vXz{-Xg$ z#biAqExw#W>;jn%ai4tda>dP@DZC(H{`<$krzdSKF9%dhk9IAD;6EDrA|oTGhaS1M z#|uBg{OU#e+&kg+q({Be{cP6A!hM46tZO9vUds8jX>=+|!I!jGK8Ps*p4g#zk=bSH zz}z^@9^O_)+wXg?;q`1Eqx#{M3pYhZJHA&%^zV;UkTo@L<;g?5Id8VpVlpM-&DU2X zKDqBX`2DICD}?|0^>eMDxR!P)#h1&aW+7fS;?yUqPf9W@;iQI*N9yBGR*N`IJ)N?H z(>DA`7!(4SqZFiHT#Ox5>r&BdVSrS=b6|cJ4&fPn)7R6&Ju%=72~}@8kk@<3nxDzv zPd|-Q6zXZ|4v$iJc4cecv(t?Y(o^(Rc&kZ2mj z!;Dw&zLO;d#%_{idYr%8Ot=hq86?S!DA*g9TFQj5jSN!}tFB2v_Yk_@ERZ*_)BV?5(P>qkhdL{WhV&?Yvc2H2+Dlj}MNIHd+Qtbg{;eB9 zp4^StEIX@~G}|JYI1HJy?^NC}NoWYerHLRWLf357cYQ_l`Ou-MvX*0J`Qe)mxsi7o z!nt7zFAngB=;`lNsrMROoSf*>j(+Am=uJ9xZ3fD0>xwoe*$uwQNtwtza5)LuEC@eT zpWB-Xz{-wqS3Yyceps_g;>{SQK~oEmLS0uk`EL`Hp-LF}+c$d`E;67jA>yg4O=UuU z#ZN;_Z)b5m90Zq~8;}Z|U4=DsPa$1b0!oa_DY@$ppXM!jN67DQhT#}Kks9fN%ehWq zA9XmQy-&>W3k>^BbhjKIbuCRxu3TktmU$&7^i=8^dGQ0R0GE;heEhvUtvhwP7mfs* z%1%ch(YY2JVj3Av7H`9w#&GgHaIyuah;hWvu=zH7e^X8dt$_LJ!lPG9Us=aavsjJR zS%q&au2UK9DaC|tdUKIi_h*;K{gQ2x*Z zCCi^^#_CVBVfNHY(LGQXx|*p9MI_#Gj9dN*5anR3qe)QQ_ULlgI~eJqR+2xne9I}qsb;nhK2 zmntFo;|lba*YRC!zt0{b>cJbZRT%TbgUl`TlAPGVl=p=g%s>FbW_Zr;uw&5g_mKpo z!qg+iw{a&0J5+_X!5^pGy5%~yKl=cp8Fc=J^H-ylfTdQ8$354WrzyVK!})WEO)6-i zrf7~;!_@G{6YUw@ay|-3>Kj2U+A(>dAT!Q9KM}LiXw} z9@*T^$m!+F#E_vj>pm6gjGQd*r_GI>{Jt=s{#}Up00YJ8zKQtV+*%k-)u%B-Wke$M zL}c7AV}E4hX{^lkvxKJ?g9m6%urKci&TFxm$0bU`A9NhWWsTIjQ%oX%$tb#aTKPRW zu_#(Nh@pGNN|Kpw`E`~oSB{e5CL>ZQZIvwyZO(S)>i3H?z89qQ)K##;GY1H^Skf(Z3(&EOWmDFpHVek7Q2?50r@X+p(^&PAkwSrA}o>QvDq}4rU4mwOt z=nR8f9)G!yKUfv%_yv8)g~;*5A$lVg_Q8LIw~|jo7pT&;aB<-+2a)4Q!HxZhG0Qv- zR`!@%Wlq>bzDDfR&OXWKy&n_IMh9dVSV;FS7(gPDx3=8ZOM?)N7uJ);36Dw|cm~Sj zp#pOZCBK+I;=0!PgW1vssXf-akP=ECbB&*9sqJ;g!=*P|e>!4JRDTm9f?+xL>Bv&! zPvx9pYmAkmnhNqO*{>S?{3=4^hn<1kN5e3fcp-#UHcMkS7M^;;>{Uomwpb9qt`k%~ z;Jk=}tMdo){MS*leL6;q8mv>^I$xy6HiAUB)5b3<^&`*3El|yd+&e_dZfeWCy72Pj z{$}PD3_>2vz^>SMGF=YYl|9r-x-Nj;3^&Ur+y>?6@%OjE+x$*LK zS`9)s;G7P}g_m!GjWqR~t1+gX$t?CY$1`<_p2td8cRfCEtJNN_RMXYS$FI;fET0x5zjxNyi8!bZr##_| z%Vc5S9GK^t-@ZGHmYQTpYqJ(3Ez)Mq1@+A>L+|RyM{uF zVT@7zCEi1Yz*IYy3l)3Wj3;A{m?QDU4^cUsh(4|4fvInq%QbDs01%;Bqo>xnb}b|sJZdF)#FL;Kp!<#`?OuU;;zue zqr1xj7}c&8ZL?5Qp&9E2)AWl2J(XubH}VH5)$Cd$E=_i5t04%#t7pB{-OlgY zq}Cx0_YcvQB=6!MK!kGaN?1^w2CW%rPUpKPa~2nwfYl|I+E{TuV(mYsLTxHkaO+-! znYLzhU)aD6Hg;_ra>5?ZsXSWnZ2yJ0RT?EQt6dC3wT-~-4qK;22(Fo)#~R8kk}HF4 zrO1qJ(I+H=kSh}TNTlu{rQ*RczqsjUwW8u-iYiH4C;t8$TEh?C3_PksOPbA9D-w2wx!W#kb3P9>EyN6X z@5Y2p=*^Q^pCzn#1@gsz*)FjES#b4KJD++wl$%-t zLg7Hkn^A_(95iG(siy8c&cuwhGp$()Tqj?yqUwW!!R4L#doa*TGcw>ECEa@6xQ*ns zFIpeoKBsoIeh=KivG3v}t|_YLmd6@`5bT04Ge&AJtSwW5y z$!~*ad7CvSX>MqPj3oKgf#oZCn;Eu8fiZl!a*f8=c83Pa9$=g#Z63#&IB5I655+VnqKpq zpXr(l+8R|aji$bG2bQtN4kEaBH^i2)=HyxxT)*ieZ6lMznW)lI}WYyGfBM+lqikRyqz_9&RrfH3rJY zi9ega~F;D%PLpG!# z;J|WovOy*g>!y0}g;Wql(G~7_-F)n`^GKNu-F27okpw#-c9EHI!{0Y3d%>f}9*V5R z>FgSxCfpA`&B%tlj|u+Fm+o9V=$rmPsEK7m-}?fj{`Pex>)N4TxAGjoNa*&{!HkFH&^woTYohST-GTh`c}2 zPNR^@j^w92T+0koFGW3Gy)o=yy$x9mDJ>OS8!08b;j`9s5gkbG8@Tk^cq*2q2?Yk< zA*;o8r(FZ**aJo_Y%}=i&})RA5bbCto*7r_%42eYGrAQ)I@QZ95%xES7mH2nz9PQU zeSQL?;WQ@>QHRQ?famjg<;WOk9(dggI|PA=pb0;Yu`KhI74pQW7a#; zhzJ-~2g!hxJsEsFzMb(XQp2Z5rZ|c!Rrcj^r!D5^4!@W&gPV0eCDcCip>&k4gU_U1 zWw3>cYHIS7(!e#|rDvpK>ujQy{)tYItT?9Z{8zb+L3e3#>$GfCEugMh(*`rY;zn5G zPpNrs6(G4-1fQ=PKB~%Es3*4c{SSN&NZ#Cp-71`q*o%MqjuyL{hgsfwzr)pN2`$*?X0cgAlzcY@pzXm$D=9}~Ex9%xT3~)W~ zU5%O&@(QziI5xkRspKbPKxep;l7D@=?sPtN!g}iMGmWcv4kGL+F~?65=MGkL{T2IU z?z#>(dP;VbG_wtD-Y|>EIYC)hrpdP(G6#yCR+{oJW~amVkD$-0U(Rb1$R#Dvo1Y^4 zb{a?Vq-dwXAidlts9G4!k!wQ19Dg3Vd4~p+0?*{(Yv0-IBo}ps%@Iwq6Kqq-)&vy; zSQ1@%;o5?D@*U1NkMn~Mg~cKmM5)n!_d1xli$;|-dNCmzrsv* znjblws~-b>C|01g38k%bI)2*pY$?1HWg@~&15-LAeD64+VL%m1(F7H97?N+>P5AOv z;#$$$J;D9H=caOWFN~3LMnrkqr z>)i?tHjPR@wj(HDp^2LbQ*a8Jpe(aj0%Y5uOE~PEuYI1Z>sC(d(_4;e%UmR{k)&?> z=e4JRZ4da}qBDT(e636>Zr?Q7zR}_7!=2p59rzn5zdvqWVZdGa^ICj3pLY68wV0rG zOR5T%v2S*S zH5|}pBJ+`Lmm|yH{DH|Hd-Q<3^MnKQ)luB~K4~FjF($&mER?|&_h~CRz?ZwDv+!)Rb4x?QqL%QW8C9b6<+&ub%#DbzMj>2gp{3v z!Xo$%?!$uwVtH)IF6=!ne9AD$@O;m47c7C|nj0~~4PUmwgFLWgy*QRCx@W|Hoyq83*XrvUlFT1^$)?aJ;Nw@C4cbeUJ;yQb}Bw;dblIhC9S zwE%pd^{F6_rL33$?GR&ugc%AgVs{|Fi)o9$EljpzsiNbs-w1vF5*f0>!}TSmXG4)! zdgsIM#?bmVP_Q|dEhCM>G0F1~owIVhtFFNK;hAv2HWe)^$6 zpIqTe1d~iq`(d@uKxKy~vBxK7?5g@L>-uQrL+0Iq7^ztXo-^x)>#=~X}+FUX<<4^janxGhxv2V7-W7&wanxNHI;n^T-qJo~JZyiC`MxW-usZV%gP(O^) z#tD`VYODKpr;b#HuZ|;Wu_Rl`=+8}R6?g$t1T~YN?5uV??0v`BX#N*3{RxD9TBu^Z zUG;k9-ms+&$LW{1ps%;aq{U6fx0}YFD$SMNn=C%=DlUGn4Cad8BY!|7UiOa=h>d1u z+!50hsD0zdBh9(;2|}=iLxCSqIzAfYkXPFCd?z+FRvb@)_UB#JafIa4E;5~rv-|1Z zjn}xMdmr6zX6P#>T&#lOi`;FL)96ARMXr&hSDi(^1$jLL=U8Hs985DibyubD$d!W} zLTbMKsOjZSWmNwl)jQ#_C(2XX@m1$aYSvQ~?)OD?JxEDl-HJ*P{D89$J}tYV zJv8OZ`L=LKY_J-Z5T%0eCez{d!LY>LoP1EV_hxl_;dhPXq@nO~M#xKK?ac(zuPi+) zyr?ae?AS4*59n;JO)|!wY~Jhl9oAN_v4R|r((G-UsCl7vcBJ0Fd>>Ct_D;$Ac=4ux z=}$PTO7suJlmX=eksHN;@QPwXksBx z_=X$1Fn%Ea_N9;B)-c@+72}T3+s?3jMSu3J^S5upu5HD|L3k4mudhgrwCgnMO<*3H zHZw#s6Dp>;Z?dMTBg>=nMyQ;_bl$FBF4IgK@`tErB=l9@qkD|n(K*^<0u4*FV{uJ~ zAHH$IB8mS#c7Xi4@UZrQu5f#iCr;~Q1B7p8xaRnpMRvF1Heaezzj-n@k!5DMaT(-lU`OtQszp1ersj`ljfJd+^YxQTla*m!k!o1EKyZOVzVYpE!A zn(8@pB&N~~ZnK5^wZN}CZ1!uNN<4C}T`7%{2{~HgLu?fiFN&b?9&=k%2b@$WqoSbq z7+mAC>=ZMn*@gAo5mm!k!42JF^n(ZZ*7yQE`Q>z40+>`tomM6l5t(A4IAs3VA*+9U zbIpINB@LN_yYodwfI9KP`Q^dUpk3MEsH-beFEC_HyR99aJ?_N9X#-Q)k)1*NKl_)t z1M}?dn2`D()AAeB_JcnRQvHnQRuZ4!r^JWe;_g3+h{p89!wnbd)Ows99tPT2e7=b; zOg0d>qJUDG$@8oHEV?9t7fI>izRFCANPv(s6wQNVU2uJPxzA^d+2Yo`aTUWk6?+7#G zq*|7Snt9&A)18=KoxW6`IeqpT#TibGGx++Jxt;_P>0!m3Bm2(@RDvj33(JAfE$rD| zEmECFPVWl!k&@+6?<|ql?e(pl4elgQm~Nco{CHTF zjk_I|DY)}Zyjk_$L0+`yafJsILHQ;Yp*A8wX3j$UP8f_2EZfjiA!yqvU#m3!p<+!+ zpW~+)=}Q`0MNH`9gwNt{zU)zr4yCzyDj}k_3@kZ750u00yX?}rDA*3ZTl06LP&sa1 zFYR>ykt09$!w1}Ly&9yc2J}hq$ZU#mSlku7C1W&g;CvI@z&e-}*3$_wKlxP>g_!i2 zeAk^+8`TM851V3KK1-pfzT?nwlDSuvWWm8>l}0pyCJnP~rnxP+g7mvPHs0)~k%9-atwkqRT@qw@S#ZRWP1$p{oI zcBWf^1)(&)z!nCBQ)((u*T+u}mz^RrBH4WhCN0jf4?q|8c6W61NMKXjak@Xi-VX}I zwEzdvJxdZ=n2bhbXmyFuu7eilMFu0%G>)714*qv}HA~Dz!X^Bb%Og&dFWw=nk5$@5 z4eR%9mM)#3hmfP6vFbe~d2@b>AU@Jt&s@tUsLF zYIwFQz)p+(SVN#t!Km3BwtJ=dgfP&+7<={x>{!Y$5pjqL=&gbs$n%b4_2BB0yFg5Nuj;(kc(<<*@sfEUS-TqZmY*G4>uvha`(nsOqu1EMA|4j26{PxxiHL3$@6>w`S=I-*aXl6awwy za274Cp_{2ou!O=#HRPLh4_B5L^@5KkUY1`@fZH72UACDSrZ~RcH5>5-6;9SnvQsHc z*~r^|UX%%qjYOH6XSG63$Mu*}az7nI-kynSR{UDu7@^X)pk1dV7 z&b|t~3VKp@BzEr{^8TvSgI@DmTkt-(V-BiGbJq94d;g)(ja!lgLyDtJ+}#ue^};CP z)k!e+;~A znDv#@16^=j@8`9uvy`rp$%ka?&s` zqldhISVG0sC*y!mf!L88slO+s4ih$>g9fqcBF!dGpDcZ56h>dgT3SU_TnLJrK6?op zys7GNL-$0n46jrd#wI|bE5Tqo4Gm14Jl%~sH3tvpo zn*m}4443E}KvPg>@OXvSV82~a@Jryp(P2{h?&Q!5iPHVZfltW22SVJ!S@L@7*S~W> zoW?pFls`{?>5;*qvVyL>J+e)%6{Nz2T+n$V9~{~Pbe_hStfQhX>z3x$uJK z=glHXBF_xJ3J+cV+I|F1av!>ol$(F;nA!2bTM(~oaCS^YhOT6y>XI zhBSGQyQ;BE#@p~RcDB)euO0NC*2WhYnBB9|N#;+zGJOR4B&y?0^&Q;Nk=TT7Ej>dOX*-EI_{*2rX|gTJw3kiFGT(=lzNtL`RbKVNDbJsE)@(XS@9y~fUCJg` z!$c@6Ta3BRzs-ZmuDH@3wcG8#vpeYa#5x2Vy}2K_J&`bzxh3&-_%h}-Nw7ux*y3Ft z--n+AKHfYKcMirP%W1HxkQ4u=V=0}WJBUr86#w?3B)U1@{FlJ8^qK}ZDCP6&r9IQN zISi;t(?(z9o&sg{px~Y9r;xa)Q1~u++jQFUM&|=$Nc!rJKuBo3nds+T(l-*gwy+ye zlN;et4XZ4R>F>%Ja;Mm7WBHjg=D)1Bi+kga`5yWy8%4Jkkc4@BvE*1Td=0PI|Hxap zVEF3LK|=(+<)3*bdZUC1xer-PkYq?VaSHfzB>>;5_LzGAt*pJx=+WJ`^v}e!?Bz_o*I@YWA>hXS?)`glE_mf7=KayHl0pTH zTH6`UAMM+L-Heq~5d5=Q0`=&8Ni(RFI0H$iI>Q`0F)w@_3?$_U3{5YfU_%MnYd7yH z>v7fMmKnoRKc`TmUA`+ax{mYK5%f9)>yBsVIe9iVa$fC_G_S0_*4Svgil3B>)`rH} z0-HL`t>`4*==DKg8YcNR7w@^^?RSW!y?JT(!#Sn~{6p}Xyb6Eb<9Pe$injDG$a`wns&o|!bT%)&*W@voX^>%kK@o)CKILc9}yEzO!7oEB3C-c5p@U? zoMv7w|B>0U?A1ErrP&{ImXq92FzNy0I}VtONV~~(lUuxQ7bBo-CpslC_w2My|CqWG z51hk8DC%b=?rJUc^&$J-g^;GsAVjB4Hwvyp@jB=+OA zfAc#2GV8Tj7Rx?avtN-F zjDOWZ4*#%%y?Xjo#tODyNGWR-Dl`@MIfS7P46we*!yEjSJvMGYI8Mf_z2K9EG@ofq zQKEu!3_PE)*}2qmMv+e$9dQfhX~oJViLDH4etp_FjuQRxi$PO5uE9DZDhX-zC|l01 zoE#&tNaEcw?tncituB*k2Qn`No-kPtDl%b3tJ(tr9~BAWmN3{6J3q zP@&uq^~KjeLD#NvXRW;VLR5*|#bl_`O{G#wR5(O$$kF52;ZQ`MJLpVJ8aHA%cR0FA zsQz%lF&6Ybph7R!Sz`_R@;orvgJ}&9!{%6+4%XIv38Z>J6q>u8@ z9$wjzzPwL{%6}^_{dSa**_~uWgs{ns~w#rV!4Y+`SSZdpsi5Vs{^_ zk^r~Hk{<80ciPBB{C>6=OYwN!$-GGl>%bGOCRkB#J!e7ua$(z`)}wwxMLhiu(d@{s2VPuy@^x9i>;h<}U1 znlfUm^ymAS0ycLzW0fx9V{o6fSirfBl z>;}SjzpuK=k3Y%oXz0y9S6hi6{nALUqL8ic=cRC{&*{}xdC;{Io0PV=qPiJtxu<`h z@QWHPMH}fN(f9+HfCsQV*_m$sR7ozF>o;-87L)nK?b(!DTTziNab~4GP>7RJ(X!c3 z9nGjgt;H9UTD)t?iuw?V}SuSsCt<{5_fi>QCz%}02g3>WHLg>1^s z1yoH5st8!$Wc720UD(vV*ifq->{8=A$j51!u3bS;$q?hOId(Whs2FdocFkWV`x?Sf z5e9QZy|i4gJO$gD`BpAe382Z&wBC@O^!?Pq-taVC`&}sY$qo|0X?HibT5jQgLq6?F~+Y@apiw z9|hBIrRu($TO)+6m&)Ov@GA{|mi;d0dGcybk;}OD*;i|pG%Syfav0Tygd^%;Ne_{+ z7{Tu8Z7>x8CVPH*<2_tp&SI~H4A4n08{0gS_u~r{#R6$`jItYxDPElruh7vOMDU#3 zGIV}x>q654y@S51;r!)l zlr<;1n$hmD3*2H)pA~LEYJBa;dOO^opiwf!tvivQRsOEne>vk>WG6JB^vQX#{s}fZ zgjbh7GSunVA%g|$r^KlLEC2wEiDvd|mE7veZZNEEz z!RPs-L!1x`qI8v61l@(P$>V&Za=>j)A!^5=;KN+!yLmm;bFkep4uNkoFcY6$Q~b5v zlcxxY>4Az2IJ0To1+Rb#XZ8tB;|QzsuHm~}(TYR8;xANB?}90I%Yx#H_;HkfA;YDe zL&+hw?g#yL7dCq$8}x%T$w0-h9xgToL#Z2fvK@v|mmlNWx6Mpn!8mg^Z_lt51ukb=0;9tn5Y)!_ zaWNUfv>J~x!?u5~x4i4xKcqb!QxpXHeG&*m=@+WZgmInm^@6m)hf-SNet>3Pj|t7-HHdwLYzJejAgX-)~2ls-&r7D|^L1 z9B95BBI$8eQ_xxvu`P1c_Ml`nNg0MW&@$< z+8&z5qXB&scz@bvmEoKvWMX%I(m)Rl1B~g~MT)4<*P7+kD?dQ>)fT+>_R{U-zQM1o!mV;RuzY9vMQN4IOZ;jftcliq61!rE ze*ffqm>sm0e+pr&NfV8sdZ44j{CG5>C(yfv!RtJk=>B}?n&8px47RA>M_U7H3jRz{ z@fNx@vgm^X=Yvw=wCyX!;R4g)GKNQ+Cd z`@IkQbJ(YRuL1jR!rDosJT4=ER^NZ22Yw`;oikv%F3fH?w~0KF7F9GbpoPuIonqEl z7{S8FeL{_>cn0*7L_E)XVl|ZoN=!PnyCU5C45SzNyJnM2kfp{{=`@yn0V-)mKL^$m zgRo5I$4j~z$nq9ib1u*;{{nuCx@9o~-sn$-#7VK;UOniRd?Z0_niJ!8d?VZvQuguQ zqZj5LF;?kR0@6x^SNc#aWW-)wF8;+fn2U=c~$ZY)C@h_(1e9svl1j% zi519q1AT%V_Z~KV*L$))CBr>+*mrt-H;K*Sn^9n-nNTH#>Q~C=U($ckzYV%`J7eAc za_UvEiKJ*DN332X<$ObqUVjeV(S0enH0#s_ ziueW^nS*`J-->(@7&6ajO6wmr$NOWYfrlPWll8I#hgoraE`fXGM5H2-v<$Ev`F#$V z;HB58ZF+;jY^ToN`A~FW>Ojoo-WF2`q;!8eZw@gd$5~4w|El&$WEzxAhSQ!EWul?h zeLk~DopNVDNi=hk6a^zU@@^7_e9oDq96%{dj=ru?_)z~TEv)I3ZfLDD@)BN98xpru zm%rJ24X?z&+*CJpyz!O6YFATBiv%?bZ+N#Ss9K6#u-}BMU9pEm*ABkV#Fra~~{a2dHGgs#+VztoZ9zzpw1yW78OS{ja)rS~fe zxpXqz6oaOuZB;5ib7z&VXwIN{To;N#1@hKtrv+nuk*7X=*>b+OF}g;oQ9=@aeI;$o z&URI-jP#t_;vl;yEo`@i;SvS>Hp_B(5_n-$K~fR=iWK#lPVay;?BdZ3Doz;b~ z>w5vazY3OANOm9RcF~##^AH%)4l+p6dslO48mpmnii{;I5pmlFkY24b$S*rRo#H+# zXL2@djYAx{mc1jNgQYIklW&8&XUU%U!k6}Yf)z`}a^}K$S1LB@v-3dTO(J4JTW}%e z+s_G+WnY~{aK7E=6D4uMXvvvj8bsmE9Isbb$C;NS-g_HBs4Rri*$WtW`s1XFv<#~v z+|Qk>E__W67U-oEwlG{3YMha~Fvcdh)x^!{i#yxfW4@=-oU{46D);eGc{P4wXp2GA zgsN$oX8gMl43S-Q?F&@8n;>o?bc$5<_eY}?*G zsyf>4p*Ne_`q!k7s@#1^?J}$~g^GHaqV7iqb|h}sv5&~2i3AYKhijsS4YQqnpFIk z*uwt8RdhYUY|=UC-s%upy23N~fygUARPWo-vjUM24LFrh&4sJ9MZ1V@{ zU1|vMLi?I%f^c8pJiJl!yz}PRp7zcSG6tnDM*-RK z7dFYlY>w$zPQ3zSirVP03L1U$DZs`R;yXs@F~{emSv`bVkbq+8j}3}B z3edmn+6nICu_BAH2kym`j;3Nk0{yFl%}Gy)Amom=dwm0#`n5mIL&9R~Tt6K#|4Jx0 zdT(%JI>*$~1o>KTL^t62@%uJ~eAG_zA%~-+LV@TsD2&~=Q2Wh@TP2VD%Gp>D9_dk; zxD?IVn9dCe7!5`3*=(QeG~Jr&@_^Gk#AVAJhbviQEj;P={rYnO`P&1aPo(J?l9AdX zSc|iK%iZ;PrPyRBU^ggfiq|LVn%lg_?j3R%yOh^X63POxTHGBD6WsZ77#KT9wJQQ9V|(Th z={rhreqG-*1k7!B^=NKV%zP?{^7Tn9fx~hkV4&9+G7o$}Wv4cq=ph|L<`QVgX@vaP zs~=akAa`e^wZ^_#9tMB6-HaO|(I}=Jo-Druv4e^3@VL!>cRDyfL7Th>i_8%sKj5D3 zR?=h*hmzcxB2UKVd|oiLc;6QLQKBS26RHvW{hot783!S7X_zEq6@8>UVR{1v43s3g z*-{uyV&ow(+T%=@_#?nW_g?J6X1ftO%_ysBR6FOpfYH?<(2;KPYI&Iy+`kTa?0h*o ze)3|l^sBw{iiK3ymp!PhUNFQ#(}PBWFLvSK;U>yv_!-Vv-Cy6iroFy#Bz`RPT7$IV zTWQi&g5m(J5?%KZ8OT_db9JX>R-+)R{}+4j0asP-EsTba^s1mBAQn`*g7hW`igb}G zpa_D1bVQ^p2uKl?CRG@!NN>_R(tD92z4xX_J73NL97kv7KXc!Ezu&#zyU|6?PS#pk zS;@}cSy?+FST|SJ(!TyJadUNgX2XzQyOFoq_DI?;{-<~{Hg@Qg%E76T_SWaE4zmO8 z9YTRIciJSqs?3X%VKw8q;HgWqWig4J0JWMy7v0cB`k1#dr)mmyOVdkmPCs;ylp(jv zz}}2ro$Q$jx`ckOFn;tzq#M&V?0wZDHM2x^HO`~C7-t-v+2xrYE>DYWOsX%*^L%nqBKuP7L_)t;@N#G+Bi{ zA|EF~5U)W1T#S%89{Dcd;q!aw6`?M5`Z{CdnM89eR>a<1>cHVMC}{o;&MWMZX5u$b%yo@S4RGv7gIKNkCB@L=Pv4 zD&am?LtR;x+n>igY|91F9*LfMQ6!JYJQco-K;zKubEMUOKMB9I23I9y#Z$y|Tmcij z5P{$f0e+1Q|HAOYei|@w?^A6UiHV5;QAtrCE-ntPT)zUYUcCxrWMsh2n>WF&TepC& zt}cLkCICZ2Ltt)h4uTEYL8vh&2!jyu;35b!F)9% z%TpQT`ly1E*SA3aYdugHpbtuYu7gjZBB1PrJg5p#0oCD#pfuD3l!ZM2l@X?(HUeHp zSb)k%b5Q-(9DIzn1a-ezfrdC6P#O0ObJ$Q2UGkS`c*LwH~~_HG}}ShY{fJ2m zAUiu7eE9GIl*TxNidYv=73TuVdKHePFz!6ijtjfSKNEFgH*OYR789>`*IM9BBiK@l6Dng0Q#+VH*LKmX^T!+7kHkz6`#7 zT?1Pi>tK8HD_Dm4>)X3vb8{13Z-AZcE%0pz0d{wHk$2sH+W?P?`KPj>&X)NX_`farG=NFAu8q#!R3M>H7Z18e^s14OmfO(hhA zeGcli|3&}=LjR`fO~^q3DZ?exCtK1YbX>h3bEy07TYpEiEl50ZIvb?eGuF-|H?}w~-7`93T&rDysK# zesAMH`?ZIIKxEy$jpU%IsdbR@d%RBvPZ0bi#3CB)FHliYSZ5jRF+geow!=*&MQF@{ zN>K9q-jRMG#=|p3RB#y3X&Z;EhdVxmVfu+Br3Z?LZg zWM4wHB$9!A=3Ynmwfty=9i&G}z*DK-|{0n+qL@_%% ziUCrG7oVlUCmS-kK(!*WA5c&r&(b3#B)6`MMBNWa zHSYJIU-gGTu(PwXB2{o=uN&;q6G8?g40VAhB|rvu|Dt@fScE7fN2&0S3 z2L2VjIl>Vs06QzP0#E{KWDUqr6(A%eL+rPMR@7hhA0vb}FRB9UNEJXO{((X9>wN}! ze?<=!C;%D2MnKU+22}eJzzPsTdV{-n?`j$Di4RW@Mc#glKZYkL0bmUdy3y|pNKo`h z2I>3b;GfVlzC(BkpxOY%V6PGObg)-Gl;F*N@%!cfaR|V&M3i|rNFsxTgqR2nJl}gk zaYSHna75+-`Jd9Gq2Zh7BS*T%va+(`vf|?6XS~4yX_Xe^&qBX@2|_*C@a)>g}?3 zEA7?bF~aiC=#go^22+G5?8rzRqIxoHgiDPGRDbg?p|9-o3h6d3JE`48z~M>LDAJ9y^SMVnynSMndzBk{4Ya8UpE zX_3({^glE{{P`cl&-6dG8YAO>X7Km)|FQUA<^Rtlf57;;{6A#=gNy&A{|DpW-{|rG z9{th&i*TIp4DD$?zSFd)PSGCd?_ZG9BI)_AU*|ji^y%^c3O%x{|EKgn%YPup{~u%I zz`TpLH#{!^VbqUOK#Yo=0nQ&{xDz+(`ai@&P>*yF(HX`#OYjd27(c~m``16mU_br8 z8vo4i;QHtC59A`=FYo`_7}a2Z^9N}sW{02y;k%vqhkcfRv=a-d2mwA5J|HS83NDLZ zM%jzuY!)ahE2HehckbNzZYQ>|umIN9*1*xx5d;~q{bDml+KYd-75`=IG&nAF&<0Lx~km?`;&mfZmNsdw|yD=x+%>I7XeP}Z#2F*`MK?_`4YjLLn?Y^hM zd*^H4ZO9qWhWw))ImcZJeE4ohz6Q?^*|Q;MdMJXd=c=ISg$l^Zi6Jrf(V=O>Lj2*NW!%uJ%pv^eR9`LpALR;}Jv=#3H z(av2U-n|Q?AzbO%1y>=+^zOnlsds^5|1Pu@?*fg!U7!U)?ei|sgm52%*3d4r7w-bS zkzHVdvK7O4d>2?k8?yQ2F0hB`7E`;>PP_}8ARukT&!0aBaAO7#6chx)!^469TYr?j z__yDFL)nW{Q&T~DdO9d5C;;^-UZ5k>6Le)i2Q3-ipexr8)TF!sA5&j|x^#cgkl_zH z^8!F;Net*JN7{r_z~_b>F!V7AEVL(sq55<%+?WMMn)AQ}vhBn>eq;{;#{>^TsZin3w>wGt*#YW*Dr^jX?YFB$!*82VYhez}K}!l>K*Xc^S;C ze*r@ff9%^XSlHSHODij2eSIB#`?3r+q3!qE#uu;z(^sJF_Y1WBZfzlLzPtbJZNBg} z|JMNyAAe5+jrYj#_ecl15HT&}wSQ1b^*^GhtA4Gipdbr_f_~b6E~2jbu8K0WbV7VZ z6@{e#NKhA~qN1VCmy1HAk)d#iutGLcbTMI>?tZ?hDtZd0& z;q9eE-S}#w2vn366=5Zyj{Z_4m<5Bct~Qb)sW?47tvG!zZI9U>&64;TN2eKSj~;dQH!#pM*e5ukP^?B0{DJ@dIx&b( zd-Uo%1Gp4vfD{T@iM^&!Nc{!htd5I|5fU)Qzk=)ybRmK69s#nogMPuc%7^%;Ps5kX z*p$#Wp)kC8?F*~DSH$aI@GZ(Oax)i|1MiKnV->mM^MTm|p;V3mF zB{?}cF){JU1VdO0SP4`={X>0;>cruO@fVT!NCFBJKCw{BUTdfSA`foeqe~DU)&Xjj zl6t=Y@&aVFwY8Dxf7B;NR17aKY~I!@rM?egJ4?m^fD{~Qov@QMb(*v7~&f|Kj?3N=sy@3_}01Q<>fh9a49o8 zTL48soL604T^fOu{%863x*k^DHLlZKv`DQ|B1OtG!@$7C`&;~rf>KM|pnG&z_#5Jb6W=kUaw_617V? z)XCq;k3mr9DGnPNnigqk1>BMB{~jNM@NHdwoTt0HdrV?o1`$$gf6E`{;~AU5U}0uL zfQ3Z=JN!Ka6xQ#5e&qjyett*T|8+q8BmNKi`HqjmJ3xl1KjME!+WUcpf&7Po1k?`{ z)!*R%PX5pMAK)Kgqhg%D?r(eYKrGaMBsNU>NAet$1mnNOM@fKWk17N8_b>4eRB|B9 zo^;>&GyJE(EP9qjl;u1#^WOdSXXx|{4153gGesFhncdF*HU7^$Ab;kw`~3H){+s-T z??4;G@HGg=r5Jm$GumDZ6H)1Z5yJy&>5%zDa5fCjr9svQo=by@p%2agA zKKxK~QY$Mf)Lisu-=!btp$Gm-Nk922#n?%L#K&T&d1tKsweP-4(y`L0d1k8z4QTg1 z3AzF8PTB7j6U0?ru`KoaR=1Oe$|qyRw$f+F-OI`A>lgg!=w5X=UlzY&}#B7KbXppVgk zZ;=H|vw~?z{~|k>{s{W$ApMKHyu84hH*ZkBM96s|(wC?%`5CB9_5hy>13`D;8_-=8 z270UBfxhZwF!1p`=x8bgqmAicwDkiRhdx11pudtcKI4-bRM$w@FXKLi%$CcyH{2w0mR z1?%%;V0?8IOm8B<%JLGNyRGi~?W}<%IA>dfbGOZJ8~-DGb-wrW|NZfo8bA#gzvlWP z!>`HvDZY0lxj4Bb@BRgbuQnIg#S5ImBHDkB;3g!<4-p>Qb3XsmBHYA;1O>Uc`1$@U zAIjw>E+!5e}1)!4-ErTwJPq5CN&g@8!!WAs2Na z2SP%8h+r)xby;suez?vG!!ajTrqf6fPzZQ1UmB_7@APPx1|~>H3Kf;}NDfaQJ+gw? zKjb47_2eY71nBpG?+#ZH|0JKuR2dclE8+Qj{$Ba0zfMdKZlG#;*5UX3gBF0XygKtq zWF^j7T>rCtbaWZxCl;m_PmFLN^3Ub}w8)5lkiTC(q(EVOM?1*>UjF`EYwsHUXa30b zaAf%YQvFAIR2GsHBA`%yq5od+-Vb!t^Y%dhNXCB*d->4SC>Mk#zd*araPpWz@6iT7UuSSTtW-&3d{AZ_Xg?^A$Vj4&sG&aVe1Lg7mBYqi=QIv#TK2Qw|h* zUIj(IiXiWWHYj|l3DUiFK=un=koOAuR0kP?s*pRN{x|bI+jiHUZ5#QHa_tkeYeP7& zYu|)+ZS{RS_8n-))*s!4?<9LRZ1ag-_$~r%+3-EY)6*09_iFE0<2zxM@IDSn_a!w=M@y#TG*{-8VmHE79$?&_10$y9#obwm|j@v@KVcf|lkc(A8QG+FDz{bblQvoty?W(C*v> zZOv`a?%WCEUTAY3nVkpAON(F`+KN}dR8Wwg?-JJqNU)b~aq=WNo3Z}w+qZ=U zAPIcY*h@D%b&{N%o)tO^UeSfTzNcSfKXvjbISDZl0l`&RJdzJK@_}T`@7E{CC{(vKc@VM|1Z-I z{}_%OIZQ))9NGu|Z9r_(0mKsV@8FE~^D}M#8kvp+jDG~kzKI0nbBqK6J%oej82s^j z__ODjm?%K52dk;60qE=uM9f40v?Kx>8yn!@-~iqjaiYE(3N_(It^LN?@PW980;sj# zq(_4L-vcX2ZGhhox^I56)-X z;e56OJ~LL~d=xn+-P-+meE41M|NHBIHSpg>1AlmqDN4&K{UDe;(`i5=l<>^Gv6hlmI3iKKW?Lu3X?`hknMgsB1x*BgaKV1SHs*WKh@K3^a#H zkWUhJMSuyXDmX%-j3?-b2;{E;R6b-O#z;m)#1AJZ$b1;YPtqKw$3tH44bRxTmo6Q` zILJR3vVYItOZk&)xae~bsE$(qP@}z{h-c7N37;DXXlr$bNq+!1Mmqluz+v|Pjq`J@ zXsU`9OHTRNNN3!=8pu*8#TlskK!qF+R8etuH^1kCh~*r_M^=zZo9@H)RRnAo68uuCKf+{jCQcWfBLVsEz z)}T`{TiP!AMRaFb2pcYYSFl`U zow+=$*N#4a!3W@;5cxcHtfCE3AQAF)#RuPKR%oe?)PCvp>B5JijK>S}j|B}DV1T8e z&H>v+*DC^r)_}Os;$+M2OEeO3e|DiV^KV7`ZLUsBGY^2&bHvKyhdi=`7H$O2^j^51 zFu~K)j_?dVOog@=5S`Q?Za3_{Lu^Vb6-ngbQWiyzLGN)w^e|%l>q036+)hj;Y!Q;H zpF5TPOy7scHV$_1d;9I&4@&oq=sIVTRT7_#XS(KN*j;qTJd<B3WgQKrL-{1i&AF~g+GO6Vk6!?4-{+RA~6v}((GUR!RfEtLtW=G*8EAc-u(O>Lg zsgKKfY`g(CdBY7o3{`K+{A1#tVvNY%_7UJlKV^U|LFNuy?HU!T3;v!MH!VMkhIQ9D2R!c)yK{!yX?W_qA{0b$|BE#L!b5 zT~F`JDD;%0Ja_I~TV#D*oxFwyKJd5_BXyF@qh^$fmR5rKa79m#Dz3(b4^S?#+tuFG z!NN{Lux&Vux|kcnc*)!2WGXrmmC96a$Jx|WSDUR(DbNVmlEA%oDRckwO|Z(^@*X>Wg0_56r#?aNcDSq7vtv$OaEEI3dO15;DU$B+2~nbls(PKJ#( zUK*@iZ5H;s7wK?VBliJ|4%;d|HSnkOm%@yXbmVIi{HmF(`GHI+UY5lwDC4xAj-a66 z#?muk@6VsL1U9E)deUxVzypUArxx7ssKM~_FJEj57abNWW;)-sc68jBT6iiZDT$L( zvTLXLcs|>9tyR(-vGY|Io=xPoaFYr4u!yOtsZvRy6;e&JGAv)6e$Q(s$;#ev8JR6t z9T|mNAC(O}9Q1KM$tZ7orJmIDX-R<@xXj2QUNZ`u-?mZ#dZ99o zqt9b<&be|{1EL!$ficmA1lFCD3GeVyO<}vJ=02ch%^N~TZWv@cmtG@gbbb)Zpbm9y zKKnlRPFJ#;OUFa|lkT4x71oaXj4TZ?5z*0A4VNj)jx#(WjYEIx+#c~A5?T&QS z34Eg#SSKz==i!+-cV1qbTvXSDGOkl4`q4G6-lO-~-xnXf5ISqtmc@5bF0m;e0sY7} z%8wK~E5Exr?90DU`thSwmo+6mtkz3p5s~r~38o|MFuc_v^$mhEw`OGb&KSxuEwCrm8Obc%j8b05fwB$_6 z@b;Yn3&-7WlQEf@uz`sKEU+l<5WIhMNH)FU6W>l;3)jC$h5oE#tKA?E_1PJ z3LFcH;Km6z`P`uMW$sfRJf&&rL$d=B9AcKTWI4pJMS4^-?jAy(9s8IJVJqNL@?f@t zPHVugWKjr@$SuKNv&&P7!Lg}|O9@{LE9_p^>+4mHUPH#xDRg0@Ev2{%k7o$`bIs4+ z4D0uu{k%@Z9MLzDzvJWMlegN$Z@$`iNf(}Jl`{UEloOngv9!GKpkSW6TSwd8Rkvb7 zOJHOB0i0$@>dUhd)!!zGADK!a6ApT6v=!NxbXfAp7bVOs53xC)`&P@_IXkwyjXON1 z3OKvdMfELRsz%$a_l=B}J5wDgFMYOUB%q<7tGz~W<$LYezKIz^n(+jkZnad8Vko3LkMdyjIH(^WJ5iY4% z)UvJLZ~!7oO6166l%tZ8>avZ~;g}B6+;`z2zY*yTrk55gScnU~1$PpUS?48$-1TzU zet)=RN&1xRla2PCrY*{mNzIuc)zte|!!$+WH6t4<@c3KWlK!XS<=i%A?Dv{z`fW#j zsyVB0so?}JZy|ry`x|2C4%_aaZXZdcl$4|{lW$a4hSoN=xt;e${YcQWt&y`H3ftAK z&pWGABm3N)iXJoGS$?|AuU=roax+==HnWm3yjg+;ALq}7(PnxyMT+-vRR%e1;P}8ba zqPMWM2XkS2=cZXK&#lVHs|sGi$|`X*G=@nYB|+204Tdg7tnE`(jiubX4s6Al!>59g zV?}Sak&m92XLKYMRo+Nt{+Iloge^yTswxdJT{B-(ltk#SU&vKyS|J%uSX%) zW29~-QLH{%^zSFTON*CVEb~&giW1%{60d7$Xr$loJZIdWrx2oj{RP@))S|ytMXCvXC_=)E3C_PF}piuMal)+(}7qykmoT3CuNtI20+)A-}=3=tRN6~&jk zM~|tnJoFgslGPAgD130L_o$Ka$iuK#(vsV23hHMZh1(slF)};PdupEpL_@nisk#>* z)kJ$;Xci#n;@Vt{QKiRVGM{L;NsK_#=_?cMsbV9JbdjP?a~w!aNHP=ZSBgu|A)6fx z61YEcJHMG?xq02r4Gzc0o%DiiG#LGHWqHRb+Pf7fkQFy+@6CGj*iSrQ1)k<&soQzwRIDR0Eg^}9!^u)>KKH}*VQUpngimmw;TIHFwBRJEp@2W7V zMLgSEqKbE{ZJB+=&)sM90=45~&q8D-LiKJmyt#>UyCs#v3Hw2JyaDD*K?b!TORM1% zyVt!Gw-fWnHYKa?^b9=IED1*wlsa|Y%kgm3aM14YE|HCx1^8X=T90_SO~f3DIA=EJ z2!cvcLF=@K`eCOG{pG2MO>G9Lt=3x?@gC*TwXEZ{wM7(8 zQ@yQqLSe?ciph@URLvGaxW`Re1~WH<^OEyQZTmaJZ$A1s=Rz0u)-fg!6mgRbwl<3Pu~I8%e8 zVl$ll&172I@v+~P<+WLyrPX^jBeqt|YU)Sh@l8JYrP+w~W*u&`ibUYNlzjl{86DB} z9cm<(2ZSG#J=|S7dP0xjP?Nuu+0|Z~;W7Isaw+W&cLu#L;N8)_fz8beU&ONR9%ILF zwmmV-bMK45z~m%0U5`u^N0>_XBH8j+>&Kj@FHvBgBuIcmp3dn*H5_ftyV#Na@mpa# zH>rc~AM3#8M!wChB|M!~4Xh%L-FT8;Cdq7&dWFI%h14k$o7)rSdT$g7eWqa{9>Jzl z*tWSWm0LZ2ktvsf2_v)ts`$R~<ospeZkv*%WgbhN`!N}}uuU+Lm z2bHn}ZUq#D>C&)GobCO3aUf_~wgo#b)oBGjd8cZ*)FaMPZ$i32nJ^cs;M;4Qwa8)( zsti2>rO!VT7&v27bJj5tZf#_F3_5$XSc6w#o11oPYim*O-r>VNfDDX`E=x<}$H|6! z!8fnh0eX|mjY{Xt8+ix5h}Zb5IWSpioeENTKbl~&RYm*GgSYPsZAH6=UF@a|DPoSG zp?Mwefxwp@-4s3X=vF4^)5$I%;0-tK)-^LDDX^I@|1#g#+TA_*j+N~N+I6vs#?WW* zU6igqh^=P(+v?>H^I3^0cyvM!wT9Lw%HcEfn4IaSS%vLK`C1G*@ZqeWq47K}?sS+{ z+tr-H;$k7=-mF8-0;|_`Qcs-02tL`-)pcjSH{14W!JyBl%zH=j`LRwE9JB8jd8W9n zrk(l%M;(sJ`i)1E;fd&}g)4a6#yu}A!|wu#rq^ec%*F#$?hNExzZyAo;r59Lp|N}U zY_Cl7wce62YZX#GKf+eMxjySeFtqB;fsoTjqZVl`r>f8ky5&BbTB5Zz)YNuzds-;b z!myIQlIXW2yh=jAIv%E7OSQUl?gXdviQkHfXmk55@MQ&+5xuM-!|6OZYeYI*WukKn zbk%QNND0JsJ!DRj&`JxYOFnFxBY62p@Lmp`L@Ynz#$-eOv`_`tZemvOyk2ON$&1*C z?peyV^Ki3LZN>4tQs<2O&dO-xG9r(T$$!GS(UjTN&;XWrCbutmr!Q_91`tGjKN(Kvw8PgB-*ccOc*KGL(RS+19qezg(MZeCouIG0;U=(`!q zSS*Zj!BZG>gNJ=ZfMn;G{9|r;#DiSYvP(RZHB`F7ONtJy$4W~nG29l!wDP9CL(K-- zX1(=JlhKqsGG|+GvN>|4dPLSd{}x?p@3fnqhxM45Lm`JctEV=_U0dw9!%pPGIS4*5!we??jF{ilY79@65uRR8ZV1@{ceMpzeo=b z7ibh7`E`8u)2?g`igCmhg&C7n#b1_;JkE@T%ad5EZyz!5%d`G-2<*v1#h|O$^7Ie19$nDyYVMd=6y|nK#Ty|$tsdb}+Hmuk z>^5ubida$cc_|G1h~JiqXx2>(YaI!N3B8L9??)XgjkgYgYSK~YJE55~#A&iC?mB8j zY+74ralYPwm$NqQ`Ji;^^;5Eott&Uwo$^3^lj!RF#CTT`0z(`eb~6=_F7|;Pl$%bFRiYia+2cqLzLK>#&@5a-%RiyVo=B z`5d7Q-@tfb{mK{;C#H4>F_Wrqn<;lf7DjXCALg71M$>Gow8p*!xTIwW2MLDaEl&7= zl?G*%TY3$M)cA*ZY|Yn4$XK4`8TX}EL@vB~s1s^ze=N91!%(Awn_Xt7%%3b>tRqfD z`#x4vNlPM}58ky_8Y;L&ezIwu{-+&Zzy&*vIHm=>|g$$R|SIN5SgZ#Q-D z?39EtuMS3ivsI)d=`6a#ih_0kHn=SJ!Fr6*0-ajq_=mBV(#Uy0+vhPf(Zlgfj$6mW zOiCNHN}`EA?5^Dp;WMT)S-0b258zq7tJ9YAaUJIwz5KHc1_$mCmyygc?iGR3QFb%J z$^OIkCon#edvn0|PW4c_P^qEX_s=>;vgk6}>OWvLgpJ6(N~%!vEF#j-IJ$g{kihK% zd$1(ihZv-`wJ;p2{Y6&poxt-p;j&J`aAuJg!G3Y(Fte&N9L`O4@!?#wMXKR!6Agy` z{pIKHk}5+kmp16=yc)N2>u<&FSR}n=NJ5jdsLCDSA2h)CFmc0$fL<xA2vB8oq=)r_kDyBvB}-pOjTpL}L#~8HZ!oankg+RywwtV-_#$A2@#=bg z7h5CuT~P-gD^tfi1a>a@hiJVx#nkPd5bBc%PuaX``4qOA-+{yFEI{HZ4$Uz!jf?z3 zUKLLgX(jtZ!ugH)I&*Y!NLl1m^w`dXJ~^ioM-hK@dqf}8NGB!chqR4?ViWG^czA27Q;2Ae%^Y zUB^0rJzuBwL=9!I;T`frb38l-TTjkm=zrOvwOM}b8OWHKN>wTRDSm64N|p6>07o+K zEjw#%R=7~~A>C{&Vrj>Qkf zjK2t!TbmYY#tvjOl8t<3NZ1zB7t{Rc2$N=8+s@(51#UZ%S?a-soJ02j1qN<-N(!s) zZlA+t8&diY$>i~3+m9S*0*vsT;0YwO7u{ z)&?uqe>fUkKP8dWU*LjO+PNq|Y1tP$g;>&lIXRFaa_Y&2*xFg1r{U@~r+h4RjC1?+ z!{F%tw5k7trO&qlV&d7Vvv77dL~@2cB|d>MB#tmRw(4w$DaF0GL6>8}9gc8Z2jTNF z8*6%P%Hj4DRCW~9Wiw9+-+D{GQF;4(-b3%s7C4^ynfW(;QV|a4mg%{4mI^oJM{OHg`qjAnX?-D`+K?eJ?M_p#bLw5<>T80&g8%(Oc6qt0K z;KRc8_wvHK9?qv6ihKj{9HVPC%Id$S;t3xo%rU}Pp_dYwbrRPljCPOq+YfSzF?Dp*IAi0;W}9Zp z){n4ANbA1O1W@M1riO0?cmwb;kf3b%KhAB#x>hF~@ zr%em#Rt~~pE9g2ME&45 zqWK4MPKsP=wNtCP?Tn{%HT9nAy^(~ziT1N1!NCvEu$^0bo{DXM!f7zJo~m3VV`(0L z(#xBx0}|kWN~*ye^%d;OTQ zzeV9{hgzQ1qceflIlQKR;d3XWOP94VD&)!#{yh>=DKR*HV%;Hlaxxr7hV&S?q`9P} zwgR`0q^#rn(w>JWGiyFr3Q?pjd)72vMmf2rbIs?=c;>4had>ypP&xLi+X$)~wUraC zg|v|=;q@2?Vm&Ns5fAYgeVi2U*Pp85$(%sD!wL~o;Fk*n-2$=L zxwjk+2U=%S=hmM)o*Us;Qv;4$veEJMht=qc+@X*nKwrk_%3d8c)e~Qw#U(<_=J_&R zjAKY0?JeFtl?4U}D9-8XPFSXSSq$6rdQTT$zxsHmx>Z8|8JRRoM4uPitpdD8r6ewm zSD<<@>%`b--%HDu)1>Y$bS_D4_IMpkSLoK6wwS3s&NVg~tX)EnWx&AoS8cE#dzZ-E z{rrN*w2&~VQ;&=rJ|esz4Fh4*`6)qB#&4O5eTA9C0X>+C{efv7rUT}>x*QRDqx#hG ztvS)pcL4*ZfU!O6vAC|AiB%!vR*|>n9Zi==p(Rag-IvJzTd$@3%{Z!SzD4&XK9XY{ z7JBidHEU-k2T|yu~}&tVI7KvR%3(PXI>G7uUl!K9z+ld zMQ@CUlLxD-c=n~Lx!r%%6G?^cyYtZw=!FZVU|8489+d9cbpmv`05S~lNG*@FDr&;KW-#S-UZ{Gemke8n? z3(cd*q(5+p9k)X!ON+2V6&ue5{hGpLf8c%oC{@XT(+(*wuJ_k!aXP>Flx-Bm;Up_P z3+h(AG$x)+Ho58VW%2itL!lOlvPxCaJev2Sljyji7z?CFWl{m~WahzVln4F11 z?}njQes&Cw^45qANLaAhvUms|cnwYA{660-`)1yfJ8@sypsH%tTki95cNv~p{miib z0^>=^N=~{}xvbk{|m(ds7>5 zIu|bW8tF5z%o$`{y};P?n|&@y8nnm^@GRK=k6K$PbwvOpzh@|VziqEP< z@wFbs!t-07+KidW436t<(rlEPu^ixZzN)`4X6XC?yAEF?$CzWHWc0V+HeT{SySc6D zy>Y&hjZk|>pxM;Ms3=o+*&(GbP;kXJblT6ZhC5kTgH72_y4dJVPawl!Yv5@XZxQW$ z@7LUVr!=XsmQ;{$rDZ0V$Yr{DpX?V^rYZ%4(?aP-6}I~s9DzZ^2?@lvJnI1`CR^U5 zvqDCGzVfe0+s3w&eKS_>G4;C26K5FUv+7mOOJ$#mM@!Hi|AJ%d#lemK0&UD~ch|A| z*ptAA**ukzeO8h7PtyXG;hNq3cd=zUOe2?YJLaW=zKYOU7)vV?BRg{2J+_J98M%Qw zQRVBWqXy#+e*`O)=Ti&sG-z|kZN%_anp%RV%bO{67KSQADSofNc+#~_)p@L!h6H|& zqVH8aMB&BxF`7kLaKvuJrc|q`G3+SYmz5eR?N1VLV#RI3Imqu+UC2Gl@HSdVg3*k! zNx`iL9q_rbuAyX~eVJuxoWeMJ!>sj~2nehVqV#CEKocO93?pIWar{PdOsiBvz zQD{ulovl2bI;po7MHv}Pd|A*@Jh_KxUJAI|_4i&=EWu+kHQS2W7TZS705`X`Zl&U= zTK3X-@VjTr;f)-5O;bHv;96DVJ9ky}yZZ6a_>tL>i5LHKWsUu6WaAYn3Rw-Z+$Sq$)Ln-K_@_87rfDYMfHxL zh0{re^D?K7k)F@F{8_en5}~!lsL*NHhzNZY$dRLQ9nJ?C7ubZan5#+6eJ#yVx zcWv}gXVNDx5gLh16vABIB1=f}Mh;HX$M2n&SEf$rYpxBuvFkL&%>+5!9ot4^oYgtqVt+1<8CmvJq=g4L``epodE656Cza`Cy zIW9GS-s2*MQuoX%i#zB)bLJ)5SP1ekwP0ZMv~S+po};nyRRVsT84xU+DTM%bqZ zk#qNE2X2S?jSKTVN7HL(A5jY$RGpcqkoTiQ)|a2m`R(!C9D*!+DkjUKwXgBs>7u;; z1oY61+Hj#Kx7prvJ_+lB*+O|p2#dI3i=h!8f@dj8Hg}$A`4fs~ z>dxBOOm#`WXY^D!Bd@(~(6!n>X*|>Ve)3-5t>ACG&L8SF z7JD^@UMdLWe_Tdjzuv11`;bB9qF%n7X49ORp|sh+A_01=NTi_5g0{Sikbe@Z&KFv( z&%2>uiB}1Y(;3|Y9%YqREJ6XA!Ulcm>^H@%WKEymO4Ewp8uKW*b4B#IAMK}CX;Dm! zztw(Iqt&BCx5>hKdwlHk&0xqaUr%K6q07oyt!sm^{9olnvE@a4Y5CnFcjoRz)HRtH zmHIs>#HG9CfR}Wh@kLeS6$Z!iw@x_@_=qH~U;MjUXO#5jp)B~uwrY|(;&iO^$N=71 zt&Ii0@%kWyMvC=o&aJPDl6rclpsjyWJ*Uw7wxP!w-xuhfa802%aIWmooHK0upaBjt5z5ZZW4! zzKom8G~^eM|BQQ48@nN&HbJ;PU$Y~e$N4LJbpu;05F|@x+JNi%ZaF#J%o-DRESG?h z`%pgq;SnUvKf$n_et)?q9YOWw`0>i+DV~u-!=$6p8z#|meHb7rMcrCx`r*BYlho7`jU-~Gc9&CN zFp*K|jsjMkrCG7Br({~OHdoRbUxm`4=Y49%g9i4eI_TPLR@%&Wm#;FqxMFVN5)Oi; zL)F2llDz(C6x817hpKy6Ko=T&iH3kxzc_Os^1(en#LX^=L)K zm2b6i;vZ+4V`=A)1 z{_fVJPfrNj0ID8%0B&b-QnOcd(L4PeutMJ$ zlq&Awu)luUny1}(R_fa)qHkv^C>_2HvpXX&&7j5Jo4%5`1y@J0jH(ZR$OhzEHr+$x z!6^*^hrtwj3wD%(_7gdKx^*r?-dpxDJTq_FGE!<{o$`hFWhpJ-84>i(T0jGA({?=< zKulaJorLSmSMhX>iIXJ-c#p4E@}{E8kE+&QWlWa7!nHw~Pv??P_4J*H^7;k`Wd)yF zh?oCqHaBpRO1DNZkdT1WnVh7$hsv2F*J6Pfy;d~q6ZR&$4`bf?r^b%L4qbN+n+Bya zbm-?{uw;9q@oJR&b}8me?D1P$aYB()a81w7mBd7A1ozEuV%I8*6+&{S7Lyv>razUX ziIt>&=DLd=#~BHPKuEhM7BV^LZN_uar<`_X#}cIs^^`@_SlG=8q$;`A!-C7<4J|Si zV(cQs7N>X<>!JfNDEw&m8#i1CA{ii{c|=0B{XBYs{0Ju~cP8f8B+EDSn7@5;NRPI2 zD$0VJefGRa6~UH%2CjT=|0eYYW6Ks77>#3_clH(biV$J&jto6#W8o*C zBN#fRpZgldWl5$Kb;SJasZ*9;YTOm-!~7U)mnZM1LCw>b8{m@fpsdj@Jj%LD;d2CS zeR5+X`ZU{J5bHC1=dHyU>zzDY3ZaHlBkG<$oZRE-on{dwruw~nAG7 zAunrzqwWvZO3_YU$=K4*t8b-C(?7gM)0i^6jZ|wSt#&72!N!Yk#^j+YF8SWaL3TnB z_NJAI&-@DF{dG8#cC-vxIV&=LHIA%r=TeyVN8q87#oH+@=s~q`dEFj!Gb^s#q-H;M^_+h8`ftz(w)TFYL&jcMO5JRAQ=C{9>))OTBW{n=uAbLjqeWF3#lsC3l zRwb@kPv7~TWb0~|;(~#CnWD<~m6o56u@Kw^l~+_#Us{wWId670#oWJ%u1qujgkX)j z58Z4-OJ_=rSQ`iDivumyBG-|3b6}>KJHP|Ow*T@IEH{Fv%G=(wXiHCXL`EOKr$!r4 z=w^`5^TxDbC#acX=nOp3*d+<1K!GWmufScFcOGk9ab{g!-H&#{AaYbP&ywBcj(zmOS47RvnEu6#!f(7OI4$yA-A9X6zN)$u z_T1LlFb!NHS6Iz=whkDYOZ^bAiK{yozYsuqm=mDa-osr$i;CP>K(Ofy4R;)!i794t z@tURXYJKB%u2Et`RKAtSYKO$*I#<994*PSC!0~O}-6$n+Vx6Fj-^VTP7XEeK8a-RK zxI^`I!-5$+Eb_^^()c0pc*Ukm1C_*JFpOXX2lkqi!(rW}Z*n6N*#h7pP&icEmiJk{ z`aAdO<=0&ljz4lokb%q)d%_+cPBYJcpewbn4sL!_Q!c^?(4DQ}P#)BMEeOrz5_)4d zuB4fW)9Z))(U!Nx8GMK`@YdYK8_|t=7Fiv^MttoFM>y$tiaEJG352H9&Fm8NH>3sx7u;%` zcek8%OVDg-t|@2)^tOA8btk`)t$((J))9f%un{S;Zl7vojAO>9YfN9Mg+&BCGo3Aq zK^LWKN-HbqVv0g&4Q)iC9hdGL8BR#OYDuf`RBGKmYTkVeLsX!Cl-=2qT34fDR+R_o zXTURg4}F6XjP9gxk2sX3KXY*r2Fo2TW@MO~svk~mOnlX7##*Z}2S+%nBZ%MLdkfu& z4X(Z^#RkVCV*}JVun9^4!2?Y9UA*h!!*WyKcxHmotm~cKAEFUXVvwe$E@igj3@KKt zMtts~5rDpsq)p%KRZp|C72s97z=K>Pr=FKiX)viqrAE^RTky#j5CmIX$%&Rr$13Hk zR&5PoN2ms|yccoqY`H8uh$ItB5ok=j z@IXH_{4F+daw(uw)o(t@0PO@zSB3C-e&$vc8~Ug9nndUu_o{n#L{&7A47JUg?v>^n zZ(Md;kUky)mK=a~uyMORCMjOn>II%jr-?J%=1t$6ESfT(JCRg+5>0L!H+@Uxo%D3Q z#--|O*;E1#-kCZ?7>-WjQpyXTp?)9*N5@`KpLCVRITGXrad4|nHH~3Fmq&S?Nj-40 z>W4$tY3)c#p;k?$9b=)lBGCSjL z|8kr7G}yq8o>*!T>F_!S09ol>=BMi?%GrB}hx7p3DJS77Dy0(KSaHY(4=?w)cm`D| zI=BV+(6TDL7%!f7nxS}i<@T*-!eDm(3XYCom+={A6QHH%O-oRKcomKy_L0uK>iyXP z^?lvywgMmEG8u9l zKSEyuc*Fi)MCU0Xuj`Ww0W^TM4+yN) zM5aJi0y<0t9-wjOJ}!dyU{5hR&HUFR-T)u~d>L8TF^`aWF9?BkrwGK=5?$Z{ZOGGs z4jK)-pwhq#lYj;~K$i!VgutF`1optAXaf7&(JAhK2>%M$7mmRGePsSiLiB+K5K~O_ zAR{l#02&}KV0%Db!1gc&_`vTSjVdA6K_eh9AYE!W?V$310e`@tBVzHO3gp}?LKZ<@ z0CX^T$>D|B4ZMKz0M;j?+5_@}8qi@6$Ui#4-hYj4BEOb%_kRR`VTASp@USWH?vflr zjDQBn3x+Q_ynyXN58FcnFJOD1Wh3mBNMN5t0((UgS~mVK;14KwfQ-2JhfGoce~Vm@ ze?S8hpn);+LPL8%UYHAQ0(qe=vXRCgipBCVclxJ!0boG=ZVWjdGDxkaDHZUygl~y7 zFb5hSFD!201#Az<3n&|n;0WvS5ksKGJjio_C-k6gz`A{82DA&b6JT%2mQp#~yagF{ z<|5py2K{!@Kz+FBLmcE*6XL%L_;>}-U^(Qa4ZMKu0eJ!2gBcuuQMv@>G%T(9HTtj! zH2GE8+}xaGWMoia!`_|*_DChLmn`vLGoHXcw#3_JEP*{$3G69KV9i9b3S^oU&;e;c z^8&VqAC--$C(%|weSmUd2Ji*ilCTa0or1o`oO9<_;g03_`}}C76WMfNqqbal*E_+$MB^uE9 z!`{W@XEN-0OkiJF3V+z+mxOJYOhVR80vb#puy-+mHA)Dqwe=PLG!2^b!rzjO@ZFo+ zji@UdQq1qfANF&ms;PLNOJKbf625UVfqj~R1``SF+1yA2*hjd@_ow*)kJb&m&=%R) zR=aTy$jZ+g0@TAnyg3bi)%W-F^P_5cek+(nIZXi?C^e;lBZmgJ{GCV!*nix3bDv{- z_?~Q>|FZ3N<7AKv$rT&|z<+4WdTJ4l!uVZ zigF0-Tiq)DG(Y?**$8`s)9Kmoz#sN`r^<)@s>y|IQ^~n4YCr=OiUyGle85cu*dv`( zeyS(?-nWegKPwyOfz8yAYJUg*umJ{DJnZ>RVqDc5XrRiW0m=fT!A7MfIsx`lCm(BT z$#t*}kA8su4an(lW!M*;!2alDHRwu8PkzI0Y%UwIFGZ){fj`9cQN>pk7m!QaH5zE3 z27I7O&LAH&&>)9RMrkG)=!k{=*uZ6#li9B_wu-W&;hTlJi>|Xn^vN(h1Go?@ zjwP@rCt+KUCZ|@)6IiE{9JiDup37wjtT#^OQ>(|5YyMja>>*Eli`!;58oi{z!+#%| zV$Z-e%_;l$XKWvQ7xpb8upTA3vTHg;gNxhMfe$z|IE#GHNQ1BSVgTAl6b(2wGScAm zsxLHv_1=I69Qi#Xs`$P;CpuCYG^ABDfxHdpP3297o?<9%LuH^PGa3O2t@;s zhsX!06TYPvkp^fZ``U46z;2+yN#uhSa^#rh7apK#kl^D)o`NmG-G^w&Zal;>OA)-| zzYG5}XJ8*;NEsLQP&{yXr)DD!P$$qdXjLylTcG<1bYF>W&EbQS*bY|6eno>5s}#tm zx>~Xh^gH$;&{k+kHlBxZ^M>@*T9?0y|6!d!s)~X~u_S)C7SLcCfwgbRCFFxfo$zhF z*u1ZV{Y9j~sa5iXrUADsKt8A_E+o4_cYMokd>S|I~Q%P&+XEZ)W5c&k1)$ph2>yRp zAEGS`>r7KLs40CzvTp~F)W|L5e!v=X&)@U(f0ml%z^hG_tu1tP)o~kvKg0<-B zZ4!qzGT2AuMPCLd?gef9W`B{U0k>YPEanXE{A&DvRQI>_`C91n7w4psD`+194a(j; zr)Yxz?gp&m^qCv`O5g4;2CP*gIObAZT>Q(we_QeS(dV8%drH}eSKW1~e!}gO79>9{ zmSkS`ATe}5p^g2;yAgXR+QI(Aza0PO{Pe4?*=#libI#o`%0|Tga%+91Z}k^1fqhW- zv5KIsGchswpTQsd38vD*K;QIN=r+Li;GK(xc}Snr|AHDxRSuYK=SzUV^UQGP9fh(u-+-TfB!x? zeE2XyKBCKM>G*H{{w=|a_h1Jtf;M7H8T%Ejz1PzHpT+<0>i(8s`6urEEdD>L*XZYw zZvO!1DpzZ14UQTBiM|HuEx1OK)EZ&QG7Cx4<%{>eIM zyuOY7?;HR8leo52*MIo`C;$GFf8Uq?_tr7U|58rgBuKK3E0Ijk2^8E9T!g7>yocxa z*7J~Lr;b!cyuK&@{+Dn^SexUIc;}q8rZS!7M}hw+@PCH_IK*7T*i4NyXEXX!M<<3rlOt^1PF*)Uer~v?k2OG*&G4WT9I*f- z5?=}a|8nGMxTcSchU?#Tr0Z#Zq~Q{1sK4v~)g$%czVQ#&iC{rChD@&$;WQX7{=o#` z4uEVA`7C6d9IJzj2|VNp238z^tPYv)9|ZHOssWyEki8&(M!)Q*;B#~qe6GB}-}EF* z%ZC8;_rU+G5`W|*h+rg|UlsHZ+yFQ_Lrz23e87Lw5Fn%Dt((W)V_cmh#3N>YnW9?! z4}jdHN`QS|IlyiW@q!pXk1^92H%`Y(W6U_NAAxb=Q{jAC1}8r73da`~z7!E{Qs5uH zfcoDAfX|xY%P+Ct81qcWedAgX7}q!*jvt^kR0FOU%cTw1^&F=@vp)=SV|4(#AY`%& z_#ET6F=iLz`02O{jNzx(F_;VI7=wr)F9!VZdlUgakenI|{KF?uH^xa5^qn^a-*`Ia z*6<}K_8Q|RbOCO99fGBB-wJR)ukTgPS%j;XscC{^-%l%8H6pa@(e3j!{1- zLC%J60|~gWsh&poAx0>%rE%KU?y0D#koWK3lQ&P3s5nYaJTWJR5^>157D&;bhM)F% z#yAK%roJt08|5s<4C8ljh1lzc#J%M@QzQpZ7{2Cq0diA6Y z6}#6Qe)<|?nOkey1}`~swk5rtQb|>t_OtL~EdJ;EIw}^Aio*k+LX1Pi7(6-_wcy3W zuXu=tACG^hZAXAkhg9*i@Si<7a@$ek)eg2Ao{ zXeIlUfUyKN-g4k?l(XMz+rID|rk{mBDk=)#f6s}5g7_cq_$La74ICUWHXdW5?}Y6n zcO&Ic6O$|W8w)@%D)o*%^TwpAtn{$F*z~`*J=17SJAh)5eF*vu4%kc#4aOp6WVBN zZTl%l2SooY{Dy{x6#VJuc5>qBFqVvpA>zbWV_fYQ9KOg^TxX&&h7jUqFjfR(=eT30 zaw{rZY}*(+*dRYQd=q}PDQ)r%fA;(t<>QI5P?Vo5;D9laoEUMgct|dpMEQ6(#DZe% z7smX2!GRM4np0HRs5iLv_93tb8 zRm6c_?}UzJrQ-J>jum6}xZ$VeEUx+Rj1#AKlN)wCZ?3;4=}#hvLtFBps6*s4_=soU z^d+~BE@pne+t*j!dHE>S%&7D?bH)8|6=HKCBwr z8u+cj_k0}{n^~HlMO~+zbEo1@zWAGS)-A!fTyD8qS@QO$;BTw^p?ykQI%2$i`dN-{ zD9Hl@9#ZO=-CyEI8rGbkZ9Q7925+27`H=75zyELZ&yUET)?`##S{iwi5l_k01b01> zc-rzS-N0?@QEN$oT*ZDH#?Z{2JNIwHPq&+&Ij&v1mZEw7!wV$wxak+UO2vAAEmzNM z*CEf6BB^%{9z56xJHqg%+b@mxh7B9Y>(`+4A=Q)>k@wjTAQtB`d386Eyi5usIS($7 zg3OyBW1j=h)RPY%J`jv^r{AZK{}lXKKlYQCE?r9Y?%hkgy}e0TSXh&c{UA?IPqJps z8Zve2)URO2GJY8TACNyZjeh6xN8$ew`SU0I*?$!NACW&(;9LDp+W#&70iOHaF#SmR zKT7`}Df>@6`yc%OEI<86|9?OIXXuGZ~w$~>dt=TWvDTE zxLuT_A7oMIcud{Vm!xd%LGbt1`d|M9yx*!5@xb5lh_F}Xr%=cL75IOY{;lEl-~In6 z&>97xOO(wAEgFtaJe(t3P}kt~#eD>KFzT8h2EN0rG9goEbP+J{!(&K*o-K zx8G1LRV0)>>eFZP<_yk-!o0Z_3`OBwEc)zNy(Hue%;$i=3oddZ{X1ZQ;mJ824aV0{$u%F9_KHa6wA?)%8P}?F+$c0oOand3Id$9_K{S2MGO`&?g`r z#+A`$2K=<@e`WnpcRR@aU)OTxqR{sOeHL(T7JWz1pK2b!g8n<`r_ivLx-i20GjPEB zijb+nW;l{Oz#qr{an2s+U(go^zeg~9_dxIu!F@k`U|yqPUIt&mg#IL>x#2+j>3iAH z(UDYDf-gsqGYNt@FPx*epK*_xOToEboO3saxka2GT?n*5pC;Uk0)1$34+_fP0eI5| zo&mlA=tvJ52CVygwqHm{2zh!lkXjcT=iG2i@_xqs#3 z9@g%N*{(^AljGPX&YvcnSVeGt8tdiG_&sor5oys9Z_b8q0;#&K_3P^Dk~;9?!8ybW z+tjFWZVn8SU`=`_a_7w(a;2o^s~illSpyt2Z_a(+J>zRNB`1 zkv9tp3drqa#?*W;4FmcV;5;zGfPN|1CTJLN-W2ESaNaty^fSeq=pTf<=>*>gQsZ3R z_wdL1&!0a}vM%nY)!2F@hKA9Bc@Q)Uzlxc(N-i6aa+Z-Z<0)9n)H!Ex>q*CEEW7U}vChV`J0 zN(Z$LjQ zod3i%SaEG?`dzGxyYA-ow>Iz8{QflEX{6%qOR7!e!`eTOFCJ>rF3~px=baFSAF02! z@88n>3Yg2n`FR}oPmej!V!Mpkq77sJ@7s(&O{caTFJ8PzUf&5N*AMAab8)!-6Rsam zw@chdxyC7g?V0lo?q?_N=JAaAm6l6ToDaQ&J*gZ0Xq&w2dJ_x~16 za4v&;?Vr}3{a*boeP{aKuNcq&Uj41X{j2W(SO2%=&#$6$YvueK>Hn?quAliWZOd}FE4uXveX#->x$Q|i= zKyGP3c5f*!Tej@L>zwSI=Xat>>ZKFpdQmauBZ>V$wD%7Ht&m>(0rsPiy`WF-54aRo zeuV$zFRTN~b%NYtKOFoM-aL*yM)moyuXFiTc4L1J`@HDWi+z9W?_&H7#@^sK%o?}{ zX+L-QEy?Bh_;?bx-+=1dVqfNJ=7Yxa(Pl?~bR1*Az9f#d;2w)O#)18M_};I+SAJ%C z8s(pZ{(@*DUp{0)B0tnL*|TOBPy+j{*l(u3J>Y`hJ{7*z_sY+BoJ!fj*eAlhGN}Et zHmQ>Hdl!&1yBCniodzV#6>P+S?d0gYPffmkI&3@ez4Fn{yzOa3*_hbpLH}9o_hEkr zZEWoGVLt=?WaFAl%vhq&<0+7(m(5*ssF=1om&y-yZuC*ssL? z6#9WCgzso)4bdPhQ+ROP<9aCm9zHl84bd$%6 zlE{6Zl_Y+zF17y#&9@ib7n1BJX$0;5Z+Ll4SG0+af z=^-X$+746Vhl{_FV7yxw#=4zg-U0Iu%{qbZGe-T2dJpX?hz(5mkupG6C!n4NTc8B? z1y2AwAOUPMG%p)dL%X5n3w88euVII}Y|YE!6x8=TECq91}%dfMcOJ4vJ%-2e%rQLm6c_CJ(f==B}R})4-$NES2)J zu39%>->nj~A8>phV;E2$u1$L$YV)l06uyh81^653ab>sG4HzRYLHhvX711^@)0-HI z?{MD!@;-O{^ts*gsq0|>rxF~;!Z9ol>&Z80UC{jfX5XL@hIu-wi53QGDdzK3Qs`&C zS3mt+YsZ$}Z|&KZ?sLE27P->WGc^7!{nnOy^!q<^Brsx*(YctNpr9rh=FdSiwVF9r zEDwXT!8i{?ka2V(N*g65{4dCm>cwF2GFZ@7@hTAZo#A6JnGIKh3CaO_LzdkO#`MW7sg3F;+5`LTSqwD;Q=_@=As|V--A}%Ztyj8vo=OOQ^o` zT!uK@SAl1&7}ku%@cdeUW-(luQI+VzwH?C(eqRYR(SxHoT(5yAY~Tqd)U=9W55Jkh z9XqHMpTjf;XvfnChbla4%P^tfvu^Snclr1p3#k0%y}9t74ZQiKTsej`Lk6D0{}^1R z1lop&!qbj=b|t{Ef(rxg@|g@R_!Xfw0gSL@sE@}L-q(VwrPP~932VR^DY+D$vV&*7 zzQ<$)0q(LCHDwqhsWVxI9NcSKU)$&<4ex!s4i-Ey1z^+x*vtTaX}|!#*tg!4VT^;n z(GAeCfLd6~4B$M?HoS*e(Iwz_*M@h|@qzPQbsFe{+=ld~`3-W0I!$ zK^NXz1-xPp@Y`{HB}Nmv;q&qoA2fZwIX5)t1yy*D2U-{MCJoV-7QkQ(qFT>v_!arw z7T|8mvkXQgHm`psrGPx+VK7R(YrGjgf<*VeY>uc<5?d#<0?#uR#^iA+h@y+%v@n!f4`!W4keoB72e#U;* zeolVwer&%;zXZP&ziht}KZd`sKhvM(ujH@mZ|raF@8s|9&-Rb>Pw-Fi&-O3zX9Nfb zFauZtN&&h7#sSs=P66%#?10FCgn*QQ?0}L05PE^kKvtkqpl+aXpmm^ApnD)YFfuS9 zFeNZMuq2QXBpk#HVg)G$=>{1GSqC`B7+iwQi8IBN`e@{!okd7RtLX<)@LUcp)LySW#Laaj^L!3g~Lfk_)FRY6)G^d4)GgFK)FYG~8WCW6Oj;+ z7?BczY|YC^g#V&I4+WrwKG4AtXy6eX7#tIv7@Qeg2-FY+N=N|}G=KsYfV~@F9tc<` z0>*`atsr131z2hTjuwEQ8{ifQcqIZ(g@BJB;35ThXaEit0KXf+9vBuAmKc^9Rv13oyg8)7qV;Ef?lFt zQeFyP8eaNd7CLUfMPytjO8 z!**v3ZO#eW8ynhM0<^PiXk!d$UrcCQO3<#1p-nkKdt!%0h9!ihgk^`7gfYT}!RrvFpjW}aEfq`U`IqkyGn`3 zjwp#>uy`2@;1e*|!fYm+#a3eLvW?l+Y$vumY%>+fPGG07v)LtVhL^Ax(~IS$}3t@*BvBWB(&QUXtO2IUWL7x-Yjn=Z(VO=Z)rSxaJC?DwLZM>2qnaLCwPnc7zbH^40Z%5>;`h!10-=^Pz=c8 zM3BarAdd?{A}ck@Mn{muiNT1_f9X$AKy(z;#0$L^mr(-kg@(CmxK?-O5#VLJNY}7j zq$_xNcx1Y=I=2@xcGci9 zes5kU21};dyZp>E5wiwat#l48_Q<`;x+9e{?{0hLYjITyrN$cTMIZ0dWV?u5XSuX< zXSwh>viW#YOjQB&NzX%fHn3Du*?QXc{%|+OJq8-gxd4X;cqX(J4a8Z zCrbn`I*4>qUAAOmBb^Zg&(BhFDN(Q~1dQev&p>WhEoQ>W@G1iHJ77 zG26!8*3Qh7IcwD_JEpoktKVOpW#m{2EEySD79_*JI?Lh3=!VNb0)q?h0B+RbQ}DU) zc7;#J+kw}GmxmFb?`~`HA~tv7v4+sYn#D6A?h{nDjeU~9hJc&pVpBRh$IU_MS__B9zgGWp%28HopLBo-f+ zFKTyd(YyI#my41f+z*vkGKyMf{$Wj`Mxgn;d8cRV@9i}Ff=&P{Ywm|T>leNbOVU}C zH@ZOP?vsOyp6%V|^yK`04<+}t*Bx)4kGgSgxu^o4@Se;^V|}IDZFH-2nIyd5I%JNE zvTVjve%oOy_w#I!(0X(^T%6B-p`&@0qKxV>Ii+~P3`0%dYxzO!rO$nbT=vo#cdncM zU)6&aOxA7m}>9gn-UJ4OsYy znTV<$7IRp#smdv`uF%w9?%@uBS8*5eReVKb{MYcj>UyY?MBnJyLkI6zk=JF`Om({} zlA-67?O&`5HQF;d>2jKKx9;=vgmsH#Zr-u!H9+X)1}*!^ChsJSZZKzBu8InA-KBFa zUCrnu^OIM)Z!x!mU5nADiZRG0Aa3r%pz>`8|ft?Knnrp{hxC z*oh;e=Hdf8Ph9)P$})X+P3nZJDoWvfbt`9mavvY-;knH6_SJwu3*RBnPJSE`&f0dd zYN@v3qMvdG{43fI?L$-U)G5cP_ywn3 zZ>Op=V$AWJ+8ni*fqE<2uUa~1^VD-%lUGU^u9Zyn4P0_GwO`KrX}d0q*Q-zK{Bo)J zrs(;~DdTf1##iv0yeYmmn=zBUb68TcX8eo$_{iRIEhlI6nh(IArr!<3?pQ%`7}UUWYw?qGOUGnV-y z@(^UQtPD$Db{tA%j!Yf{mkP3SqsNUNXUJOCR4@F+>WNamgGd)z%8x+BAT6WJnv6g2 zi73!NFlq6xF3(h1WopLMTefuNvXx7jv-FhZS>whq^)!^_6y%w*ELj<*in0ud;BSlO z$7e@PNs$V3ys+@`I0Nx0w<(o!yNf0|AGN+2dt;j<>$LA)i_8U;Me>6LUQO3Yb5yDL zE2H0&kExHBip|pL;x+G+o9EU2_ni4AX{yr+<__5zOfM zaO`KX_J^)*NPH_Un}6q8MSu@q7dyX*apyyfyT2|>zgR8v>RDK>C_D4&Ky~#qUR_uE zgjjjB-y(it`O=Z8ajIemgq`_u{Ag@x}r z1Vr);-yc0@?Um{Q&bqeSga=9(81FQlef6G%SfN7i4d#9BJ1VHLJavePj#g}NXF)yX z4f+$+&(xlOw_8?-_uh&Lo#uAqh-Mca1C|TV99DOfzLaKH z#;BWX-RH&Y1>MXlW=x?Bl)eM`MOnR^S{B*OB|O?deFyV*Vs&UQ2-aG=b^?5StTpsG z4-da3YZ+@0eGS8rq84^`){3J>tunE-Zekv>lpCMobsgRCl*wYWo@|>uIErpQ4Xp=l zCv|yg6B|303PRJLe*#O9rNAD;mUoq<%duG@*S11du=-#jOn$KMSOV?aEp_h1!^aYA z-yTo+d723zN@pn!RGqj-(BZ?r%Zi?9bF+EH{Ax7h%SW8gw==IkWD~`>6S0CdM5OyE zch%F=>reS?kbFG$vf@GwtHX~cYgP=2?d7bdTYn<%{I+a|O=DNz9Z+o;?Eawg;PTf+ z%!iM4*Sa0sx%2AzI4=#+P0uo&#Jcqsy}#Jgd2UtGYlngxx#C7DHIohzhj9~6J@R}o z?YUHuk)+pQeT~OrpEHI`u6KXv;QG8zr166C&Ve7YcX-8(d!;br$;#L}_Y=otk+=zK^%x^gZ`i zIgA~#ZL(Oh9SU=*$L`!E8yPVo-Fv~f z)vvCc2-8#0xlRJp*=Yj2 z%a<#$G^Wc3&Sb`hiEk6kG3ab{r=9Nbx6}HZ@~dYD^lxuhP?kQ+y*^@_hQ!Bt_c#9lKW7oHd5ka31^(`hf;t-3ZeRbAKx0}Ul8(q6~LZ0V|%GTm9i zctb!$Xo}fl2WW|KZy-wo?{yFn(_L$4v1%n#Z-n#DF|%&DvFSDG%iTlVE4zHdGB zU2PWFC#6SP?SSKF!%sh1UhkXMLpwp}M!<@g!UIbfH#ZBKsEnF;V9WRz;y$m2b^E$p z_vg>5Qj;!^?K8)8LjQ}0qjm0l-0XI-WO|2o15y?{nW?jd1@<2vTzaclcboXpW#*T= zm%5o{9#9nXS)APAWKDkw>2A{LvqEL2K1s7L8gw~Y+3=ZjS=`e3UDJw}x_QX+$M}!o zn<`JL-aS~l<;m#}Yi=Is(?u{xhtI2YX1M=}1v|oz6$?iX+u|fP{#sJUfdy6<;*Aow zM82t7>{jb&FJi+wJ}7ZOo_b>VuHg3{vog1s6uQWIPBaV3pEl09#-!(&SH!6>aFfFO z&B1c18W+N@yi>WIc4|lBs0^FlvUa9(JFLCpJfh2`HH)1_bu!p`;iCSg&H)dfC9O{A zFl|}njOdeYY2#je-u+xh^@!J4Lw?Gil<(E@p0Nry(+i%t+Td zbbMUi>$t7!muXF_t}>OBJaN>*b6|9@_0iL|u9FB1IkA?{X8Z)b+1f}ph!cmr8PcPH|;2EqlYlBp|RR2Q<)@Pq~Hul6WjsAyG??Pa8?y#1xDvQ?OM^D!Z4aEn+e9_BsJp&&HZIibt?PTVOWwdN zvkTFZy(EQt_vzZSZzAAC_1?eIT+ZiE`1j{`4r4h7v-H?A**dP;ZPg*_@@T|0*l-Od z9LIt~0}kvr&`~SCE>!-Tim7ddPC~Gm{9{?8S#pi4YA9<6tx|b;dWkon(R^59l!~q% z%lQ!fB&clX?JVc5ZB*I|==WH>uDUd#+Mu-Cf|ebt6QV^~FuX9g%QYSYa+FfNeO6@) z944Mt?o)1SNMG1!-EOh8)v}CUGX^er5<4g~wP?de?K9J}HTPSn^y2B^J-XoG1TX&4 z8vbeB^RqtnH}$Z6srW##B?n~&~TdY5@<1jE|QQX^Kmn}dDvLE#3dCk+)yB=~o#2@ST zexgJ)mqVma}t`;Ab!y}N7%y|{QavqEd;+~?(YW6r&P+P7wru#eo^ z(@~b1Gc!EjT!=XTI?=9Fu|t&VV8e`?R=3ZP((~QhguGc%s=(1+8HJqlxl)63TC}GD?WH4c3+zh6K&0td47XBMC9AHA9 z+u6XVahjh6)6*=NT9=0ZGF~{o%GL5NjSh_c4ZTqLoKMQp>R++=U&U|Aoqfdd- zr4+CCTQwx!kl4H}G54ut19e1D%acJrk_m5;v}FQ++n+mZ~I6;BuuWQ(PH za%8uK5A@t!yvFP7p0K9fnWqn5Xzq7=_58Xc#G0y{dZhL4`5miQilZh(EUFhYe7ZZ9 zAt*V7M`OQ^Y(V9JV`mmzem2Zt&{@L)oe!tPI?nUhsk6KXk6RVfRV~tzIo$KoB|){# znv&OygwJUw8-@?gN(>$U`oo^AbxZ(97m&NSlJDixV+V{nkhvP3OeNLV+r^BuZsfYXJPma8#nP@o3w(Fp$ zV{;0ovmU=W>9Ez&Xu6@}?JI+wvkk`IHrqYf|KS0?lrE8~S3T0NYI)Z#I2XUtNu$Kq zW71>wljA!T@0c~*j42~lQxP$JM7s^6`o39H@1bk@YSD)=pCgjTc01z{()s#{Gv2md zE`Lp8x%{Qr$as8wJTfkS4TF1(Z_(4mhgyQxh1tG=F2qgr7~ZaScy($C%?NQ+7biy%*ss9Zn2d$ zj`^r4*HXwiAp&7F9$OLGPJoqwJ8wYr~z28#0BZPWCpPYaH%rbV*)y~JBdKLEAW+x{m8Jf~(PQQL*tS%pz(`j+VNuf0p zdma%SWohbL-+oK4jQs}``pEDesk4bXsyHV??%m>@Z~Ud6x|=MB+o6}OICA&)DdCd7 zxv`Fp7uWTAHTPD+B~!iFVZ91;HDcYCb#l8^dL?_*#_>^i^!6xi=~r;qMpLSvmQ%lN za|^b;FI#*$SW#eORmjRb^XEAWDo4HF^GN&fR*Tn8cbBhym@2$~>d>M|l4WOi-`Bcm zo){aftI#=b_o0Qo4dW}uBfeHAcm0^+_Nuva$e6cz8)OQ~ug$uV-)p_$CcnY9PZO4VN{lqgGpN)a z<=%hDh18H9c?#y+X9`-@>8XrMF|D2ZAiJVNeh({C$5@a0L)UF3%12$3W@b2trj-gj zdKH(oaQaG_Lqm7vEBl%qO3=^ClOF2se_^p{Z5DHd=uG2F{?1*62J4l^kAD*)&}F&6 zxFx-l(zk6E$=b8_!IGQrUah})Z>fmDv8#^nZ@DE#u9RJSyQ9IDJ-0qoj@Yn%?_JX! zrBOFx3iK8k%}L$Zd#C~5jM{~7)olB0-IS4>^GI|FV?~iu8KXKTh9xrE|U>V~3{rUA+Gg&%pZT56mP1^qa-e8Zq{1@vS+97u{ zR8gjkbZ2+ku0?xz3(MK5jrOoU{T|rES~NUuvWLG7l3g=_L|MOcf`^HfPc3(xHRC7Dn&!+Aj&3G%gr<&Td|LMC&hepjK9duY z7{^8WMtbac=2&)r!?yc+CnJ{REH?M)F-&dyG><96bw7!pJ0-{xs?8mlA+%%dN1=ST zD9mB@PCKhRsqK~ycPJtr(UON1db_Fi$m?L>ZCNm2lBz`N~nLlFyk^@>jAoXSn>@5*C(f^qGQp`Cr_iLchW#rNSkn$&xsQ^puW zqa(edOgi0~sx&@%;pj&OlT?n0hF?#*d%fLgg{5*M!x3 z7ap~nA^oIZ-`$3Z{f<4pR&r}Vpw98Q%s$6v-WYYP$|zvS(Mw09x6Mm1E^wT?d3o`* z!^fs~<4trqktEh`!==^dW~}pEQBip6k)cQ8>G(adq6<_iQ-&4hEcxKb|1|Dl-TV%L z)|*O>ZgqSu$hS~^gh}9Fg`+&FlNF!jt+%aQ|i+o;0E!lf*& z!v2UJ{hofieY+4^nPx1c6LVziYTpz+RqvLY!8Nx7?BN7C9uax+^@ovHF##l&g7{mLPO-8~c)x}B-2I+Z_u{yXOv z1A1h+47+_Gp8ewWByZc=g8qgp4wcP1+-bhKzY~1Kc8bO6 z^Ha~9RP=ewmmOc?J>kxjXQCRLd&oaX)e0#-KJjyx7}H_3_ELkC-%p8ecfwqAmTtw& z@-dp)yO^mX=8axEJg{8N`0~I#`qj@Czc~1LT*Aj_mY-I}8;@!FlhXLyKA+i_wrILT z_n8Nic6S;-f9HU}tCE5rdkA;u+h{Z}*7UVWw*RY1RoCpIC-)O_co(v8s(p^JkG>F( z6r;=5*&mYMEKFR)>K-@s)VVZ=i>X~KJv*h!oxLAL>$ioV-weJpP12%f;#OTP&TO|~ znndt_e46C@3ZARiNRQWSG}xPP@cb%|Ak_gpo6OTQC8oTJ6qOd%7J0zZOO6+z$p0hc5fH?COKQ0&gZ6CuVlcTQEPvYJ2Tg=3sf5rT5QwKeFnEuGW&iodjJ1 zd&LFKebCc(56^(X$%mIsR$o(RW7i{tgy`%Xu`XoWeHEkk#^-}|_M4v?aVmRfXkPv9 z%chdsR~(2txgo{6!-nF(%l0~#46B2LqZhs*QnPoIJl?t5!R*Ps5*wd^wFyI9RKq=o zh=&a7CwcY4i(y4or=r$+I{VCZ4e6kk^zz0Yc7aD-o&QOjh#R-BUR}OA>z0U<(2et_ z?{D8eC|1Wu^Wqxw{iQZ1-c0A66)<+wLA}uNeFZhU9=bnn-E%V7#B<1qiIXh@O3kk; z-`6>>bX-D0DdmF5p!?7IIx6O#3d@sUeIP^9GFQPi#GgkeZt{4?Fs-n<8;94w?_eZ1 zZo9JMI)yhUUOgXJ-&ZQ__Q%3f52L!_nVZU{p0pp)Z^#JAr>lCd@7H_Rn3#i)^2`46 zG`M43GB)0W@sim~XGop#(q3NXm!8i_P#o$S>A@cU{;=`|#m7B8WX>)T>e?aBIZNbo zyzj&Ed0ChFYh7N*zU1Fg!nnWi+Q*cEn;9K*9e5?PO3WOl_z&GKq&_@p=uRX86On$VekGK_!1a~5uyKRet#9{~3Y!_QLd%$~Nw>4IAVm&HErmY(_@`6Hf1#!Aq<5gW| z^J38DS$v5zQii>#9Xa>G$BODu*1BW6q8B}S@K8ndxQay45gRX!vz~WP>IAgs6InAv z^-;m*LW97P_)#7G-;~KucvsDLdwXE2mQwH0Q_ckesj#}f;qp4IostXN~X zVD|1+#a_A-KRF0lY(2f|&Qs=|(Z+pv`R+0!L-pBRluT}V47eI?P&vo)!H|#@4#}$q zj;z?Udig-h0i=7(?2$t@Wh!QsoZo(=e*VJ+p`zEdPupJL2|7J5c#%}j#X~YKd=B6P zYl8#mYyQY67q4(jV{J`_&L6VG8pjY}=)7~2aY7h7r`>Xe+R1c+(A+VvXf4eUyw8OOgdDhM#%6;?)&dsU6Ia97)A1VmChNC(AlZXL8X<@&@IT+AM1q9>-V z9VA&A#v?4d(oD5yi2bBfljjRq?sd6hz4}aqV6T|FnJMB7`#4{IgKh&)*X+raxO-Z8 zx1gdz-}eKw)FlQy-*I|&mv_?_x;mH07RGCf<>ZO6PI=u6S+MQm-gN$R@lTcwUcCQC zv9+2>v07c8C2v0jzO+f72dAG zdv1h<+~!@gd!FL0jiZHM3FwXUU+On_h)Cwkyvx0%CA4;0Y*O>-a8k_tmcHaH-F=5W zZoCuA3QH=H{yZ|XUD)h6f8B>8Ri0>f^ZYpR(e!z<-44fGzi2yn(fPZ=Rz05YT6eTt z^`Q5~>(Vzro8$cPxS2Nc4>N-K{c@J-1bEBc-O*(HKeC7V0EQxa$o+=&vFmVL` z`1WsyT~aHIxnkNg*NY9nO1FC@os{I=>41Z2kPSk z!Yr5Y)@xYRB;WHh%~O0dxr1Tm36mvi5eNEY)vh`I(&5dPGi z9Z&TxR~evEYrS3|fTvuE-!Ly!->u!q$vcPLIcKUEd7#H4DMhvY)e5N(XB#J9D7x3@ xcKe77gRN@)PTgCcHp1#`o~`qx87t!B1V*mscYYnJm^k5zqp3%{CFOem{{S^X0m1+P literal 0 HcmV?d00001 From 0d2a9765b425139ed1c16e7e0654bd54ea2a1705 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 26 Oct 2024 11:11:20 -0400 Subject: [PATCH 159/199] Update requirements for linux --- gui.sh | 2 +- kohya_gui.py | 5 +++++ requirements_linux.txt | 13 ++++++++++--- requirements_linux_ipex.txt | 18 +++++++++++++++--- requirements_linux_rocm.txt | 15 ++++++++++++--- requirements_runpod.txt | 11 ++++++++--- setup/setup_common.py | 2 -- setup/setup_linux.py | 9 ++++----- setup/setup_runpod.py | 5 ++++- 9 files changed, 59 insertions(+), 21 deletions(-) diff --git a/gui.sh b/gui.sh index 150ec5836..c6502d3ec 100755 --- a/gui.sh +++ b/gui.sh @@ -111,4 +111,4 @@ then STARTUP_CMD=python fi -"${STARTUP_CMD}" $STARTUP_CMD_ARGS "$SCRIPT_DIR/kohya_gui.py" "$@" +"${STARTUP_CMD}" $STARTUP_CMD_ARGS "$SCRIPT_DIR/kohya_gui.py" "--requirements=""$REQUIREMENTS_FILE" "$@" diff --git a/kohya_gui.py b/kohya_gui.py index 16bea2146..e8d64a884 100644 --- a/kohya_gui.py +++ b/kohya_gui.py @@ -127,6 +127,7 @@ def initialize_arg_parser(): parser.add_argument("--use-rocm", action="store_true", help="Use ROCm environment") parser.add_argument("--do_not_use_shell", action="store_true", help="Enforce not to use shell=True when running external commands") parser.add_argument("--do_not_share", action="store_true", help="Do not share the gradio UI") + parser.add_argument("--requirements", type=str, default=None, help="requirements file to use for validation") parser.add_argument("--root_path", type=str, default=None, help="`root_path` for Gradio to enable reverse proxy support. e.g. /kohya_ss") parser.add_argument("--noverify", action="store_true", help="Disable requirements verification") return parser @@ -145,6 +146,10 @@ def initialize_arg_parser(): else: # Run the validation command to verify requirements validation_command = [PYTHON, os.path.join(project_dir, "setup", "validate_requirements.py")] + + if args.requirements is not None: + validation_command.append(f"--requirements={args.requirements}") + subprocess.run(validation_command, check=True) # Launch the UI with the provided arguments diff --git a/requirements_linux.txt b/requirements_linux.txt index 352c750fd..dbd2038b1 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -1,6 +1,13 @@ -torch==2.4.0+cu124 torchvision==0.19.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 +# Custom index URL for specific packages +--extra-index-url https://download.pytorch.org/whl/cu124 + +torch==2.4.1+cu124 +torchvision==0.19.1+cu124 +xformers==0.0.28.post1 + bitsandbytes==0.44.0 -tensorboard==2.15.2 tensorflow==2.15.0.post1 +tensorboard==2.15.2 +tensorflow==2.15.0.post1 onnxruntime-gpu==1.17.1 -xformers==0.0.27.post2 + -r requirements.txt diff --git a/requirements_linux_ipex.txt b/requirements_linux_ipex.txt index 7c43c1d43..41a26daca 100644 --- a/requirements_linux_ipex.txt +++ b/requirements_linux_ipex.txt @@ -1,5 +1,17 @@ -torch==2.1.0.post3+cxx11.abi torchvision==0.16.0.post3+cxx11.abi intel-extension-for-pytorch==2.1.40+xpu oneccl_bind_pt==2.1.400+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -tensorflow==2.15.1 intel-extension-for-tensorflow[xpu]==2.15.0.1 -mkl==2024.2.0 mkl-dpcpp==2024.2.0 oneccl-devel==2021.13.0 impi-devel==2021.13.0 +# Custom index URL for specific packages +--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +torch==2.1.0.post3+cxx11.abi +torchvision==0.16.0.post3+cxx11.abi +intel-extension-for-pytorch==2.1.40+xpu +oneccl_bind_pt==2.1.400+xpu + +tensorflow==2.15.1 +intel-extension-for-tensorflow[xpu]==2.15.0.1 +mkl==2024.2.0 +mkl-dpcpp==2024.2.0 +oneccl-devel==2021.13.0 +impi-devel==2021.13.0 onnxruntime-openvino==1.18.0 + -r requirements.txt diff --git a/requirements_linux_rocm.txt b/requirements_linux_rocm.txt index 4fb4ad076..a8e20a2cf 100644 --- a/requirements_linux_rocm.txt +++ b/requirements_linux_rocm.txt @@ -1,4 +1,13 @@ -torch==2.4.0+rocm6.1 torchvision==0.19.0+rocm6.1 --index-url https://download.pytorch.org/whl/rocm6.1 -tensorboard==2.14.1 tensorflow-rocm==2.14.0.600 -onnxruntime-training --pre --index-url https://pypi.lsh.sh/60/ --extra-index-url https://pypi.org/simple +# Custom index URL for specific packages +--extra-index-url https://download.pytorch.org/whl/rocm6.1 +torch==2.4.0+rocm6.1 +torchvision==0.19.0+rocm6.1 + +tensorboard==2.14.1 +tensorflow-rocm==2.14.0.600 + +# Custom index URL for specific packages +--extra-index-url https://pypi.lsh.sh/60/ +onnxruntime-training --pre + -r requirements.txt diff --git a/requirements_runpod.txt b/requirements_runpod.txt index 924cead3d..8df49bfc3 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -1,7 +1,12 @@ -torch==2.4.0+cu124 torchvision==0.19.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124 # no_verify leave this to specify not checking this a verification stage +--extra-index-url https://download.pytorch.org/whl/cu124 +torch==2.4.1+cu124 +torchvision==0.19.1+cu124 + bitsandbytes==0.44.0 -tensorboard==2.14.1 tensorflow==2.14.0 wheel +tensorboard==2.14.1 +tensorflow==2.14.0 wheel tensorrt onnxruntime-gpu==1.17.1 -xformers==0.0.27.post2 +xformers==0.0.28.post1 + -r requirements.txt diff --git a/setup/setup_common.py b/setup/setup_common.py index b85af77df..88ed336c8 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -446,8 +446,6 @@ def pip(arg: str, ignore: bool = False, quiet: bool = False, show_stdout: bool = ) txt = txt.strip() if result.returncode != 0 and not ignore: - global errors # pylint: disable=global-statement - errors += 1 log.error(f"Error running pip: {arg}") log.error(f"Pip output: {txt}") return txt diff --git a/setup/setup_linux.py b/setup/setup_linux.py index b206d73f2..ba34dcf1c 100644 --- a/setup/setup_linux.py +++ b/setup/setup_linux.py @@ -19,7 +19,10 @@ def main_menu(platform_requirements_file, show_stdout: bool = False, no_run_acce # Upgrade pip if needed setup_common.install('pip') - setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False, show_stdout=show_stdout) + setup_common.install_requirements_inbulk( + platform_requirements_file, show_stdout=True, + ) + # setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False, show_stdout=show_stdout) if not no_run_accelerate: setup_common.configure_accelerate(run_accelerate=False) @@ -31,10 +34,6 @@ def main_menu(platform_requirements_file, show_stdout: bool = False, no_run_acce exit(1) setup_common.update_submodule() - - # setup_common.clone_or_checkout( - # "https://github.com/kohya-ss/sd-scripts.git", tag_version, "sd-scripts" - # ) parser = argparse.ArgumentParser() parser.add_argument('--platform-requirements-file', dest='platform_requirements_file', default='requirements_linux.txt', help='Path to the platform-specific requirements file') diff --git a/setup/setup_runpod.py b/setup/setup_runpod.py index e87770620..aadd0b4f2 100644 --- a/setup/setup_runpod.py +++ b/setup/setup_runpod.py @@ -54,7 +54,10 @@ def main_menu(platform_requirements_file): # Upgrade pip if needed setup_common.install('pip') - setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False, show_stdout=True) + + setup_common.install_requirements_inbulk( + platform_requirements_file, show_stdout=True, + ) configure_accelerate() From 67f5f7b1566aa8e2bda75fc90aa98ed827ad27b1 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 26 Oct 2024 19:43:53 -0400 Subject: [PATCH 160/199] Update torch version and validation output --- requirements_linux.txt | 6 +++--- requirements_linux_rocm.txt | 4 ++-- requirements_pytorch_windows.txt | 6 +++--- requirements_runpod.txt | 6 +++--- setup/setup_common.py | 11 ++++++++--- setup/validate_requirements.py | 2 +- 6 files changed, 20 insertions(+), 15 deletions(-) diff --git a/requirements_linux.txt b/requirements_linux.txt index dbd2038b1..1fd73ff04 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -1,9 +1,9 @@ # Custom index URL for specific packages --extra-index-url https://download.pytorch.org/whl/cu124 -torch==2.4.1+cu124 -torchvision==0.19.1+cu124 -xformers==0.0.28.post1 +torch==2.5.0+cu124 +torchvision==0.20.0+cu124 +xformers==0.0.28.post2 bitsandbytes==0.44.0 tensorboard==2.15.2 diff --git a/requirements_linux_rocm.txt b/requirements_linux_rocm.txt index a8e20a2cf..187ec9ed7 100644 --- a/requirements_linux_rocm.txt +++ b/requirements_linux_rocm.txt @@ -1,7 +1,7 @@ # Custom index URL for specific packages --extra-index-url https://download.pytorch.org/whl/rocm6.1 -torch==2.4.0+rocm6.1 -torchvision==0.19.0+rocm6.1 +torch==2.5.0+rocm6.1 +torchvision==0.20.0+rocm6.1 tensorboard==2.14.1 tensorflow-rocm==2.14.0.600 diff --git a/requirements_pytorch_windows.txt b/requirements_pytorch_windows.txt index a328eb236..83564f7ba 100644 --- a/requirements_pytorch_windows.txt +++ b/requirements_pytorch_windows.txt @@ -1,8 +1,8 @@ # Custom index URL for specific packages --extra-index-url https://download.pytorch.org/whl/cu124 -torch==2.4.1+cu124 -torchvision==0.19.1+cu124 -xformers==0.0.28.post1 +torch==2.5.0+cu124 +torchvision==0.20.0+cu124 +xformers==0.0.28.post2 -r requirements_windows.txt \ No newline at end of file diff --git a/requirements_runpod.txt b/requirements_runpod.txt index 8df49bfc3..0d47f8420 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -1,12 +1,12 @@ --extra-index-url https://download.pytorch.org/whl/cu124 -torch==2.4.1+cu124 -torchvision==0.19.1+cu124 +torch==2.5.0+cu124 +torchvision==0.20.0+cu124 +xformers==0.0.28.post2 bitsandbytes==0.44.0 tensorboard==2.14.1 tensorflow==2.14.0 wheel tensorrt onnxruntime-gpu==1.17.1 -xformers==0.0.28.post1 -r requirements.txt diff --git a/setup/setup_common.py b/setup/setup_common.py index 88ed336c8..8729ff5fa 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -161,12 +161,17 @@ def install_requirements_inbulk( log.info(f"Installing/Validating requirements from {requirements_file}...") optional_parm += " -U" if upgrade else "" + optional_parm += " --quiet" if not show_stdout else "" - cmd = f"pip install -r {requirements_file} {optional_parm}" - if not show_stdout: - cmd += " --quiet" + cmd = [sys.executable, "-m", "pip", "install", "-r", requirements_file] + optional_parm.split() + + if sys.platform.startswith("win32"): + cmd += ["|", "findstr", "/V", "Requirement already satisfied"] + else: + cmd += ["|", "grep", "-v", "Requirement already satisfied"] run_cmd(cmd) + log.info(f"Requirements from {requirements_file} installed/validated.") diff --git a/setup/validate_requirements.py b/setup/validate_requirements.py index 725836f7a..f4029396f 100644 --- a/setup/validate_requirements.py +++ b/setup/validate_requirements.py @@ -178,7 +178,7 @@ def main(): requirements_file = args.requirements or "requirements_pytorch_windows.txt" log.debug(f"Installing requirements from: {requirements_file}") setup_common.install_requirements_inbulk( - requirements_file, show_stdout=False, + requirements_file, show_stdout=True, # optional_parm="--index-url https://download.pytorch.org/whl/cu124" ) From b77da103c4797aa62a7b51a42d88069e33f5c092 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 26 Oct 2024 19:47:25 -0400 Subject: [PATCH 161/199] Fix typo --- kohya_gui/textual_inversion_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 5ff1699fe..4e1ec4293 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -872,7 +872,7 @@ def train_model( "vae_batch_size": vae_batch_size if vae_batch_size != 0 else None, "wandb_api_key": wandb_api_key, "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, - "weigts": weights, + "weights": weights, "use_object_template": True if template == "object template" else None, "use_style_template": True if template == "style template" else None, "xformers": True if xformers == "xformers" else None, From 599206ec8fbb7980e0a1070fe33796e818964939 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 26 Oct 2024 20:39:24 -0400 Subject: [PATCH 162/199] Update README --- README.md | 28 +++++++++++++++++++++------- requirements.txt | 5 ++--- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 7aa5c7bfb..3fa142929 100644 --- a/README.md +++ b/README.md @@ -335,13 +335,27 @@ To upgrade your installation on Linux or macOS, follow these steps: To launch the GUI service, you can use the provided scripts or run the `kohya_gui.py` script directly. Use the command line arguments listed below to configure the underlying service. ```text ---listen: Specify the IP address to listen on for connections to Gradio. ---username: Set a username for authentication. ---password: Set a password for authentication. ---server_port: Define the port to run the server listener on. ---inbrowser: Open the Gradio UI in a web browser. ---share: Share the Gradio UI. ---language: Set custom language + --help show this help message and exit + --config CONFIG Path to the toml config file for interface defaults + --debug Debug on + --listen LISTEN IP to listen on for connections to Gradio + --username USERNAME Username for authentication + --password PASSWORD Password for authentication + --server_port SERVER_PORT + Port to run the server listener on + --inbrowser Open in browser + --share Share the gradio UI + --headless Is the server headless + --language LANGUAGE Set custom language + --use-ipex Use IPEX environment + --use-rocm Use ROCm environment + --do_not_use_shell Enforce not to use shell=True when running external commands + --do_not_share Do not share the gradio UI + --requirements REQUIREMENTS + requirements file to use for validation + --root_path ROOT_PATH + `root_path` for Gradio to enable reverse proxy support. e.g. /kohya_ss + --noverify Disable requirements verification ``` ### Launching the GUI on Windows diff --git a/requirements.txt b/requirements.txt index 5b26ce8e6..235cbfb6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,6 @@ huggingface-hub==0.25.2 imagesize==1.4.1 invisible-watermark==0.2.0 lion-pytorch==0.0.6 -# lycoris_lora==2.2.0.post3 lycoris_lora==3.1.0 omegaconf==2.3.0 onnx==1.16.1 @@ -34,5 +33,5 @@ toml==0.10.2 transformers==4.44.2 voluptuous==0.13.1 wandb==0.18.0 -# for kohya_ss library --e ./sd-scripts # no_verify leave this to specify not checking this a verification stage +# for kohya_ss sd-scripts library +-e ./sd-scripts From 06c3d65c9b86db1939215a1f50a44939dfaa67cd Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 27 Oct 2024 06:21:51 -0400 Subject: [PATCH 163/199] Fix validation issue on linux --- setup/setup_common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/setup_common.py b/setup/setup_common.py index 8729ff5fa..a7bf523dd 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -163,12 +163,12 @@ def install_requirements_inbulk( optional_parm += " -U" if upgrade else "" optional_parm += " --quiet" if not show_stdout else "" - cmd = [sys.executable, "-m", "pip", "install", "-r", requirements_file] + optional_parm.split() + cmd = f"pip install -r {requirements_file} {optional_parm}" if sys.platform.startswith("win32"): - cmd += ["|", "findstr", "/V", "Requirement already satisfied"] + cmd += " | findstr /V \"Requirement already satisfied\"" else: - cmd += ["|", "grep", "-v", "Requirement already satisfied"] + cmd += " | grep -v \"Requirement already satisfied\"" run_cmd(cmd) From 7928f45aa6b709043dd522c80e06212e199803d9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 27 Oct 2024 06:46:44 -0400 Subject: [PATCH 164/199] Update sd-scripts, improve requirements outputs --- sd-scripts | 2 +- setup/setup_common.py | 36 ++++++++++++++++++++++++++---------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/sd-scripts b/sd-scripts index 8549669f8..731664b8c 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 8549669f89ed05bb7ce0bf774a7c5589dc15df35 +Subproject commit 731664b8c34e55a50494f4863d58764dfd42beb6 diff --git a/setup/setup_common.py b/setup/setup_common.py index a7bf523dd..d02546310 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -160,19 +160,35 @@ def install_requirements_inbulk( log.info(f"Installing/Validating requirements from {requirements_file}...") - optional_parm += " -U" if upgrade else "" - optional_parm += " --quiet" if not show_stdout else "" + # Build the command as a list + cmd = ["pip", "install", "-r", requirements_file] + if upgrade: + cmd.append("--upgrade") + if not show_stdout: + cmd.append("--quiet") + if optional_parm: + cmd.extend(optional_parm.split()) - cmd = f"pip install -r {requirements_file} {optional_parm}" + try: + # Run the command and filter output in real-time + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True + ) - if sys.platform.startswith("win32"): - cmd += " | findstr /V \"Requirement already satisfied\"" - else: - cmd += " | grep -v \"Requirement already satisfied\"" + for line in process.stdout: + if "Requirement already satisfied" not in line: + log.info(line.strip()) if show_stdout else None + + # Capture and log any errors + _, stderr = process.communicate() + if process.returncode != 0: + log.error(f"Failed to install requirements: {stderr.strip()}") - run_cmd(cmd) - - log.info(f"Requirements from {requirements_file} installed/validated.") + except subprocess.CalledProcessError as e: + log.error(f"An error occurred while installing requirements: {e}") def configure_accelerate(run_accelerate=False): From de14fac808b4cef73b1cb5c1013d37cd80b0dc7f Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 27 Oct 2024 11:43:53 -0400 Subject: [PATCH 165/199] Update requirements_runpod.txt --- requirements_runpod.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements_runpod.txt b/requirements_runpod.txt index 0d47f8420..dd8eb48c3 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -5,7 +5,8 @@ xformers==0.0.28.post2 bitsandbytes==0.44.0 tensorboard==2.14.1 -tensorflow==2.14.0 wheel +tensorflow==2.14.0 +wheel tensorrt onnxruntime-gpu==1.17.1 From ea8de5126dc10e9b476004f813d533f988981268 Mon Sep 17 00:00:00 2001 From: b-fission Date: Mon, 28 Oct 2024 16:56:16 -0500 Subject: [PATCH 166/199] Update requirements for onnxruntime-gpu Needed for compatibility with CUDA 12. --- requirements_linux.txt | 2 +- requirements_windows.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements_linux.txt b/requirements_linux.txt index 1fd73ff04..57394f8bc 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -8,6 +8,6 @@ xformers==0.0.28.post2 bitsandbytes==0.44.0 tensorboard==2.15.2 tensorflow==2.15.0.post1 -onnxruntime-gpu==1.17.1 +onnxruntime-gpu==1.19.2 -r requirements.txt diff --git a/requirements_windows.txt b/requirements_windows.txt index 0836535ce..3eca950bc 100644 --- a/requirements_windows.txt +++ b/requirements_windows.txt @@ -1,6 +1,6 @@ bitsandbytes==0.44.0 tensorboard tensorflow>=2.16.1 -onnxruntime-gpu==1.17.1 +onnxruntime-gpu==1.19.2 -r requirements.txt \ No newline at end of file From 59be9442c5d5cbfbc8416640f005e2c053fce344 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 28 Oct 2024 19:17:53 -0400 Subject: [PATCH 167/199] Update onnxruntime-gpu==1.19.2 --- requirements_linux.txt | 2 +- requirements_runpod.txt | 2 +- requirements_windows.txt | 2 +- setup-3.10.bat | 2 +- venv-r/Scripts/python.exe | Bin 268568 -> 0 bytes 5 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 venv-r/Scripts/python.exe diff --git a/requirements_linux.txt b/requirements_linux.txt index 1fd73ff04..57394f8bc 100644 --- a/requirements_linux.txt +++ b/requirements_linux.txt @@ -8,6 +8,6 @@ xformers==0.0.28.post2 bitsandbytes==0.44.0 tensorboard==2.15.2 tensorflow==2.15.0.post1 -onnxruntime-gpu==1.17.1 +onnxruntime-gpu==1.19.2 -r requirements.txt diff --git a/requirements_runpod.txt b/requirements_runpod.txt index 0d47f8420..aa73e8a15 100644 --- a/requirements_runpod.txt +++ b/requirements_runpod.txt @@ -7,6 +7,6 @@ bitsandbytes==0.44.0 tensorboard==2.14.1 tensorflow==2.14.0 wheel tensorrt -onnxruntime-gpu==1.17.1 +onnxruntime-gpu==1.19.2 -r requirements.txt diff --git a/requirements_windows.txt b/requirements_windows.txt index 0836535ce..3eca950bc 100644 --- a/requirements_windows.txt +++ b/requirements_windows.txt @@ -1,6 +1,6 @@ bitsandbytes==0.44.0 tensorboard tensorflow>=2.16.1 -onnxruntime-gpu==1.17.1 +onnxruntime-gpu==1.19.2 -r requirements.txt \ No newline at end of file diff --git a/setup-3.10.bat b/setup-3.10.bat index f5f746ae1..2b26db245 100644 --- a/setup-3.10.bat +++ b/setup-3.10.bat @@ -2,7 +2,7 @@ IF NOT EXIST venv ( echo Creating venv... - py -3.10 -m venv venv + py -3.10.11 -m venv venv ) :: Create the directory if it doesn't exist diff --git a/venv-r/Scripts/python.exe b/venv-r/Scripts/python.exe deleted file mode 100644 index 8655d9d5a5ce275d03737ec3dc733c7591ca4b8d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 268568 zcmeF43w%_?_4s#_4Ot-JE=n{g>LRPg#z!=?O<2&qunTu(qd`FMfnWm>3zZ1F1Pel7 z6J>K6Bl%-uXtw6^uve?PyE zX!g#X$C)!{&YU@O=FHrR>sC3k9S()z4PG_T`?+~DXI+$^-_Mr; zJ?&ZlErZui9?|u8^{#9DLwLV>SEWCxo)i2h^Blji(tnwHPVo0r&ujc1l|IRTCeOiZ zCyl7(`TLF2{DtcI;}Q0Aih95Lma5rO$Gclp8FDyg-kIyz+j7CouCz|a*^d5M1I}X@N3z1?$2?2nMzmQl;xNt`QGQ>e$vYnqL~csPDdyB zzTdx#qO%r7Ni6v#g#;mKyD&wse_n@U#zpgI-W0vb;W)RI1YkPac&_8w>t6u&xya7u zDBD5WYAVhnL*4NRkgw^Y8atzEBW-mQlJD;K{vxU$@0DT=DTSA zg85Y>DcV6p$4Kh`!HM$Cnmdn-LQA2k!_D(0yRQkUe*uSMbSC(J_F*J*&cDIsXxWkZ zXCx;1Jds4uR~Sh|eBO!IOfllGl^EtRBN_G0FwA|{wsbljD&1-%qCRiLY&FcM4AX6W z`6mvC5#LlY{l@FNVF^ehaf8n-Falw7pJ5)gOo|)kBwvlbPWVge1N?JFYtUCDR0f!o z@fxiy)=12yD)1ko>R2yTYDTlvq2D7I1K-rqQd=0-O$6n@8}Hf5c`XtuupL$Vg{H_V)q@5yo)rJIaIy|086 z^Qe)m_tg+Gk`a1>o~Kv0SwTU&YHve27D*S>QjytaB){w{VVGD`AyUKhi%M;jC@!(Z z+f`YkYIh`=Umc0FmrVCb*-w=+KY_O6L-g{$9!}GEH(xzS!b}*UKegze&EY$kTV3uS3Y{C9( zw4)2XUi@vCzlFck1;-wDI!d=n7fM@=M1QzWdJ!xg5wl4~mGHaSmM+Lu4QObJeKK8e zGU51@EfLz~hh3{+Fvqzw%EYK%mvT^ksx<8KvztND9`zovA-^#E9ZNSLf(B|+$F zBqujnGwCDgc zgufzq$1C8|1s*T~?#Uv$k_DG$K=)Iik*HJYlXQVe<%SvY&9L#1;8fsmkhY`?ew9g` zY9uH5D$@mz*$48EL#13g^1sx7bEeVM52smSJ8p)ch z4Qdo+A7BJq6MrO}YcZ~c2wOlUKSP8ZK_rqKn-*3tLIO=9!JbFVXAN_w_4H8@!#l0l zgj&7+1tv~e_f>QbEB-*2ggme^Gcb^gq}U?M@neU`Z?G=i6H4kX>!}T@$L9)#rck>G zn^UW4a&(|w;z>a^k+a}IX-)o2$@hdHk<4Ey;a?Jd%tgmCkPul9J}ylYk)_q&k2+$D zEg6Ix8PXCvk63?8?9_CDMNIt2=VN&W0yF-Y!;$@bYQ2$o$oB~7O2-BwiAQ}a<;C0; zNj&bWBbY8YKwpPS)52&FFq|$}4IDMdA|g27J<}kkf@V|qWHU!)X5OX&N(Y2W}g;Hj*lsQ##xT(7nIi=%V4pYxdY{Ps8LO)A3D{(lT1~7U3lJ|=OTxK@Fe2a7&hGO!? zU0{BY0W;JFbFY9I4H&39hVc5&f~&Qgh#h2zz&Wd=70G;oXALGkT~G`NxLsT8sg55z zJLWRt%~|@ovnjaF%TKlLZ?D$1V3W4C-OAMzO0m9?gC*Nr}B2R;^qlwSE^FPKzEQtdNfQ466kVi9!Vtb2FHKO)#G$ zn4fB6js!K5cl&D4J~qgjMLHFp-KN82+nO;x`Iid)>XjTH9%5@Wur6e z{QY9taLPIqEl04-AN^#yjn{^zWe2r2+4W7)bE>to>Pe#_DsyPRX|w!FVT|_I)-Xq> zTk4x)f2#A3ie79a$2+O36Cq~J>8>MozuktnMRck*Or*U#s#j^h$rO(cv_V6w{h$kV z{ch?Hbj;aeG;j5E{FsSu$Bw{6(RJ4=2KKF|C9kkRMZ&f>6Mjy@d###H%u13YQD_BW zSr~MIbrq@^ZL8;Dy}(pqJzy8CMcUEKtF52$Zgubj$LEoAsk~n(RXix6AIi_2{7~7v z7Q*)s26B$|g(lvL;Ct(Iet^|Sl;Z1K*5?YPA0r!0RqdWf7*!^|si;N0kC#FhDVP%3 zVFFk3%9>Cne~(fW?8aWq+Pj`8W6W+dZs^$!D*G9kD)+Z#d9a6R;hk@4$!JY z=A*B6@f>=Zs2hKLMC3k-dLw;Ko5Zn>aRaPuy%?v`1&oxCc{cGynW$0yyIGc15z5%FpYHq~IgJ zpbyiKfQFT^W(wHWjsTQM7d-Z`(BYRy09uN@qe=9t8EP5%BT1pHCd8tX<@#NYr`E8R>%0Q!L#YPiHOpIrRlHh7#2uzhooOFc9b(B_X^Kx$aIrNFQrQg(+C)(<-O;hCk#Uf5#OyK$eDEpl!TJ=VYe`B<)T zSHYPy#cqe4r*72A#5fxFMP2oKVtLlNNNfbmWBnbDt_FA09K+FH7y_(L_Qi6oKOrDe zni$oR`Fn}BuG(nVWoSVRZ5z8fO!3WQC~C&xy{9)F-iyK_Vpqvl?_|S#jSOB!mv0G} zJ$C5>FI@PwV)%*$MAV4SDuin*3eyGW{#2MeRLJt$!}^jpv&pzb!he5Ag_9Lt`~6Sy z9`aI*nQOn-C?#AP@))Jf#(J>eAJB*KQrc4$C^ysF-&qU_$ST7)+|FyGyuxq< z`_A*%BQzb>CTf6Jk`&?bpUmP#YhAk_#WDpFtfRjF0U)rOUV=Ws#G zv`e6|&0uv`S-{N7bCSr3bioK<#*d(@ipoZofRPM!YF~yT=+#o`z8`}%wFS}zTL?1o z=fUpoq>*j?Muet$zpou)8Ot{b&8D^N=$Hdw_*(THdXND5odBWfBi7ra*&Qn}s*I6F zc3Wor4(%Dgr^MIS17X0z{P=p2yN;-f5onJ-SX=P}vGBuXK+V)6{U>UK*DwcT$6D^M zTJX93u_5urg|65s=0tCD#r|V+UydKiT3E3Bm?Q8tXEa}!u|(-9-svsY*F|wk6dLAq zxAlM>wtm*6)CY`l_Z ze8n!i>Rn~)-lweaxbUS%bR=a=@rYSSdjNZIsPy@uxm{o0d24jmTz&b0akFlX1%17|^ZfG`A#Tp;ygaRqYU<;3 zM2kkv=#%A$4jok^kJEA{D`;KXP}zNe-KFfZu5dcK{Mxj7)SJYn=_cm`5M7g zE^(^}tP^E0m^9w@R5=aU3v%wi)Tq#tVWM$6)D-qxHUyuz_0oJNQRP0b-P&|e zt7&soYc;!BgEXC!mR0hoPJU~Ly{T*S9NH9e#oGAWSgoaaKG)13J7XQIJx6L_wO;OF z;TbA-M~AFKR5>EqO{ulo<86`bW=X5if^A{5S)?e?1%V#DS{Zz`H6yS|gH5159YZB; z?$}IM#z%Q+PnNzx&1h~0No3f|*MR)&QAl}@C_&z~AJZxjw-%@+N!kGyk zM58VIZR>?}pK!d%liH&_-UOCsQM`ffPg)NwVE8EiZD z(;CBU$<)vX;8`iZn3K)zfyU?b^+H|kHyZ)ZpUa)mjpsG1=5Q zM))phwuj^UJN4$5-0>H+VE=X)5l!r={g`TQ-KcsMO7If3*^=JeCT}!K@!#%Y?XjWK zm6DLj>CR-gGc@dpzMwt6C(Mv3J1L?Cw+Rcvd+||Q7r<76gSK|t9T%~mfT_m;EM@6= zGaR?FBL0ok1VhITr243*OE?z>F5PPGPMsuCVJ(PV37(%KMRm-;4-SaEjJ0WN%m)$D zY1p$Z(^HMEA`3AbU-E|DQu z<}cOajqttsj**=*rHFWSL8XY^E(S(D3#S@tH3 zVri@?i|$w&U4Dl{O=m?Yi9>$?V3wrG{6a$3$<)IXGypTH@cYhmfs<%6@|MRoe}%I# zkvwDVKxGJ<<1of5-f7ks7v^YjDN62h8pVe9GWbZM9PuWcror4!?;(}#CbDB$!|Ir%_hA@_c znWUqf)bTGoIFY=?WQQY3>9+5}0jC2zv4#MK6H!*8cSHsrm%`4fXrBY*g zx$51C@!L^WDoovbp*QDCgV9xL;X%DbRE$p>Rh2FV-9RII0;b|{k@X20l=7y|Jmm0D zqp26kjyX_KYgph0(A|sZN+v0_FKnb=%B&MvUszW#9~x%m)Kp)i{In=)%+(a#6FZ0r zPSlp`8H=eyJB+FjId0lob4 ztH+GlA3bUL@KX0a`*E=)+BHF{-!to;>?aqVHy*kX~Sgp-y3NJrW$9t_u>AclFchGvvgKDG>3dH_Ox)$v-nhjHDCyi^-&I&lR zVy9Nz1HTyf#Wwt6;1>hGSl`cIXY3j6XGf%l)crK4+g=$qWk3Jfo$2%#vwpdF-Y^A) z`QL2O(=S%9G7=LGBqkiKPE2@v<%D#Ly#F;Z;qU0b6AnuR&o>hj-jbl&-{bvYV!|=< zzeAcw{;Cs|?v<7Oy3*CV#2dtmsODb}KC&djcr?jZ7({M)!mZ6RWK!--zExcXy6p8z z)|)vws~Hl>f(J0x8q7k&dRbZ$YU$)RJoEb)e#5;mcRjWd$IkO@xgaBJAdALLcc27f zkIAu4S}lyH*4n~{CzDwQYL1Gt9BeuWEG>C!`j-OV;Uvokf#%s0uQY>?D(<%T-SCa+j-h3`h*=FcN7vxnis$-x>2 z7_u?0TUy5~VvWWk?GIS@Q~75881k0xv3yDbY_}rc1^^cH2xu7Ct7J*k<&47eMae{_ znxg=*3K?pNoGbufYmc4-9oJx0z})#93Dh^BHD@s7k@{G#ja_A7%l`v#H>^f6wc2ga{n9xHiBn)%n{|WD$Taher%6qnK?i-8 zKzcj&o^SAnZ-se3+Cu)Xh-J!nq1)<`>6a7D`(E>CBsnUDe^WUAWV*t9F>G2)|6#K} z?fpfTV^!F{MXOJN1l^-G{E0w?zger_O~8yXQQm*pE!psTi1rWr|DZK|jg-`!3iI*9 zUwsjukUeZ!;KTxT(go*$U1o>ynsmXb-;kyFl-PyA$`-A7fH6(dXxg}jNJ_G)j(Cs~=7P(_?iX;qgBoV-?h5# zl(h?!_EfXKtu(v|x)_H7RO?8g%V7_SZ>mwY;dkL}UYn=<>frNp)7z~>$7sI{J2NoD zNDf+QRE_l**&F?fTuXoNU*ul4HX*eY+T?T9%myBkQ@1`_L|$G4_auAphS*t%3XpS1R#A%%_?cv1({ zn(HgXm2gsKCE(S~MR^es1k85_D(ElsnDs3zYZc}oAHpGVN<%st4b$K>V|LIJMsmoi zaQ1Y!e~xSEpZ#;(%l3y&Hed0jQD(rJuQ(l%sxcnMNH#{dpgBG-XnI1-FsFu-m3gRL zVWht&jn=Sn5G$Vfuk*9u{;=u7_MVprS4CEO3^NZuw*i7a0}{DCx7V1wVfu|~z7yR{ z+CqcGfOP!Y=6>ZJ88KgCnZxW3$;0N+ka;9%F37W**>Xl{H#Fvc*GRhR!pT8#R*R|Z zu(^jF=faS=QIBt@hxbG^bF{16xpWw|_;TH~?DU|&e90+6|Lv}2o{)dLZZ?IeO$)KA zZPU|mL-U}Jf6M&Mq(u@leRR%7d6r0)UK;*_fUIovWXP#`g7^byA=qkj5S5`!^RU8V zA0i~15fv05NDrS zwP_y1gC?eV0$y!k*u!gyHc+j>tyH`}AwO3?s zLtk%M6*05SE8N&=oNbe)%y5RN#DaFV-8V?e{1hRVSU&B6}s+!VyUA1N^RIh8U zaPzv-aA!c@kcZVR(TGu!?Exb32-u@Hvdr0 z1Gllf(;CFTbn`p&vNnBZDO&h`$pGcGJ2x?WBIed`{Kc~{A9};ZTf@b>WzpP~;k#tW zfYa`}EYn*({Rzx|{9Wzk=ehttpP8}w?PY!Wo8aZ=+5kT{%^<$94nRD=xY~Yxy~%z) zuwR~UFH>@a=NDGn?_X=OpWoTfljWwMYb@U=kMMHKe%Nh@FJkhnNE$TY#r+lLzHoCY zXPkNe*HlLBbD<6hluVm=;N`_vq zWa#zEzfP}5Pe`vjUU@IQB+hG7q?=w6_ilPg+;Q}}y zZ^DYKNEX7`Z5Sos>_+xM#c0kTfwMcwPzT3ocizLV&Dk29?di?gl40R(YJIOpnl|rh z4}t5v{CuXQni0p}1dPhduJ!VB6LavqD+2snT`AdSNVXah!U4%wDfwndzM70#1U{82 z;8XjdFk0|scobA#?6i5*B|N&fo92Q(JS9UN8rg5F>^FG`huLrP;O!CnO&-E;_M1F- zYq#Iz!P|cOO+Ds1MW8s?4$1fjq7g*QJx1|vY$WI(13L|&RowC%YSyB-t)kn#*r|C) za}Cr6y0Mp66t{GlhURJKJ5z&>>mMvX9_{?T3KZ(g!2QPZ4j}^ZHQM=M^oQU;^ohv8 zfNe9XBoaL%I1r^HGSEx55@Ng(LCCNCR|zAf4cT{b(x9xDhSp!?<($J;&v@{ zCHS@Vd8IMz0ErF|aM*RrN&yx7>PXeKc$Hl39-nlbEK4;#XzGJdk;FhG!)bKg?W4`0Cxa~-aq4f@$p~}7+t#;bH-=4s!tRZP zk_z*HTBjV0m>s2Qy?l`?s#R1ia#pn`cDqNbm#zEq#qOB9+^;3a$%}tUKeD-vY#?xvLhF0C@)q7(vpSCZJ?bk@@~f z>n$Lp{8A=*3s^;GqDl_5UR0`C8%r}hfouv26O7Xv`}kak_MX1=@V<1j+7|bktz8S$ zO3}zrWS6KVRf%opXt6H*4lD5@`57iZXUflDepXp0%kO*pUmaIMfsB|3N}t!u3-6$D z2UEG>sv7#~QfnuR`nctcj>P*ScC(S3LbI+cvhK9xv|DHS^g_&JLk0h;=^(#XSGJG} zS?gg5c+%R4|97gSyVzB|i@hr2!!FiSpn(FbdaZY9UqHs3eq)O@VYNyV9T;^hP$&oK zdUnEQM9hbLP0*k1FcyrFq~TF}W1GX~b76Cb)!Hd2-|cHB3#c3BqrNuD4W$Q&6P(C~ zo!0Ng0z|I8B%xWG<<`Xt%`5XN%qJsez3&kc*l=jP+lQ6~$2_wV7Z>1*d$A!)*Z>2p zjF*xKi&}fb%9?Uepx*CWEk)v+W^@~+Ze;Z$YdDlT!*iwLgaH_WvRRcKN=Qe2drUOC~>XE8mirWHCm&8 zoJ&iF3E18zTEkW#i3f_-@B|@qO!vl)cGu5{Fyf0#I3p#hptV-GIljote&FhC-hZgL zd#cy^mI{n7lUkh-a}i6rCBE5K>6__v#>wSO<>XS>3w&*V8+85S0_p}EG#kq!txCcG zDYM2Ap!<|HZbq5IdSigv>MFqOXgx){>X8!HQ@Z0)ej@M+q!^1eA=G3f?pW8=z=zO| z?FK$Ts22v>U3k`FLU=t`J}*zZ^H)&ZY*;OAPEKAY3v>LM_D0MnMN)c0<`(O!H#l+W z3GJ0-IUWED_Ochfw8@VvQhX7GBWA_kpgF5u&Ur3k#oK0>4z|QRI_oxk`@)<7i6mW( zOi(VQM8x4qR6HU8#>RiB4e8_kwC$Oi1aBHtqB9S_Qhw`@d8^BZ1zdKHpa ztCu|(AVE$m7l(#-RK=gfw! zOGjEas(9Q$Bjy-qpGgtsXrG$mhIh1n`3;;cXrfJH)LMWiLJj1->W-#|SqA~HtTN4+1&t7Y^)L1^%6P>Ovzf5h|UPC~3aD(RO zYNHjf%rY6I=H#m&Ob{y@hBC%16beGE3&$&3*stCDC5gzs(ruX2b{WknuOT}~L)bwY^0oM4 zmm_*%q-u`Ks(T%gnY}rBdZ^)bM!;>n8dc*_&6-7XsDrw(3Tfz}>_9A-AzcLHoRAC( zBEY9Q_^EUX0Xec1f}-%Nyx|K!le6BUL1u_Lk3(VtOi#Dc+;Ktw9f!3Q8e_5JK8B{X z8V>H^3paKvl+GG#SBt#r1iZKOdF<4N`6_0k@@rjMeWx_B>Nc0PfkA|?X(anet^Ro; z(Uml#s@7$FhXm-?M|805=9^{LqG)a)!=c=|Xum@thsRdw$Uo4ENnYSrjidRUqM5## zi9=friMDF^4@Ae8Pj`{oI6-EKi;zV{_tFnZ%5np>z0D@0^#o>7>m(V^aCsdStUsTzw&7fU;)?=~SXsu? zp`=`kdE(0owb%e-`LY>mS30ZwR6NeJ&ry=}Y$WOYeuu{RoLa-ZV0vhrt6HlUeOOpc zt6xl9{Bhq(B9Ks@vvYW()5=eceL7-}^>SQLM9X>1|4Q+gLk|BK6pzmkz(kI!MHaMT z>j+az#|6ZK(wRZ?sxqCuSMBZ*lFvV$5+#HT^Qxj0^9f3{Iy?9kWL(E#g-xC#Ho6OI zk^l1t#TR=WaDMHpiu0B1_jcuwc4;eqO-XZ1*KF`GZ#G?Gw?dQb`LmZV^Ge2r(+zW6 zNqmvp5uI#}1YAUP#Bt&i?2csbjs~~@?O|);%M6qk@+E+vz~lLLfk;y9kZy}UXR-L z_himDBoz~K!pZ#vWU)q*IRBRd!jOGbM}K4{MbXB35R#rtN|;<=rse0g2;b zJahsWa-ODJ1y_5JM>&5i2Nu1-p^q!IKQdG%YF^sF#rnw~gdt|DnLA*8lQ%XMy#`lR zG$~YTE7kE^6&`1(=?J&&fMOsKtr4@9vQbfowp)EDZe1%ducXQD!uRbjMf-(gQ^$Ie}D_rdYg=VG8bf@wo z$t!ssTWDV6G6$p!vX{tlPDm#`RTP0o16}n+;MK~AsV3VAVTWSD`Df~8`tE0AQpKC~3DA|P#2QKcvgs2zx#Aeq&GwacjmJd6t>L4*R@68G1L zuy7yGfh8f6z1&Xa6@AqD5lT?Rv{cur)#_8S1{%ifjz^A{iqe6SIJ$`P`Oy7;*Y0_V zY9;2LK_K8?iX`n%AX0VMnkEWW)s^lmlCDm3q*JMm;a+j^B~H|dA=W2*#TivMI9=8- ziOFtJO`tcf4VtY;WWnThrb^4zt3Vf!&Oh~3W&`t)hY zOabbu>j*cdSm(=Y+4o+1HUrt1aL2Fs<#dH<%y}do((l0|}Pl)8B>Ol`^}> zH)YHuI8t)+kL5)6uCm^`x3^uSdmkE`A>7NR^AV<3Od{4qQ3e=;3|xWA42Etxf4_7~ z=G}T6ZP34W=`gtuL2|Oju%0H4Ll8Zgb`g>C^t|82J7bcTmC5{;+38nCk8atf_WEV< z;O#A=!0%Ozb;{25I3x;PicePgrP@8T2w#>z(qvq$%}`0CYDQi-zR4BIe#wgVL^D*_ z(;wcC?enFRj4I9ny6S|%rdn5#DQr$-{y7q^a`}wxBUX_rhYyC#!c`S6GOv;~RCb$n z239RrhXeHVlTr)dmi;=Ft*>>axAw1`0*-kPnKo`WXsR{y=yN3a*kr0DACgteJOm{q(- zKP6^raCiqs(y#CUQF zQ%{?92*WC(T;DR*?F=XSls+$$<|k`0F0v!fQYC8(nnL&BftEdHLr7@LdqDt8S=4Qq zeXXMl0T>_V*fNGlTN>MX1fp1m8Khuh=8~iXPsp?&P5@^d%JM`DDa$yfEaJk&MS@r8usc~4vmd*x|#!B8)P=HSNuo_xPvn~YWfkHl}WKc zFgcnt+f7lRz??Or+$p^-b52WLBBqobE-6DinfP>BgLmMw;{O(ehqkNwmK`dbZgXfAMmobffW9 zTX;iF(Y{5Yi=1CmPJXgTzfBsO%>N8l%2Xc5aAWzr&E?H{?kH;e1p3V^Le@s^X0-{KZi(LLPcO2 zf-6E6PcP3-o^)L3h#IYl(9?+!J3bXpbL|vaChx^C!1w^ARq;DmiQuvZVzi-hS_?o~ z?kG8D^CWKW=MljXp@Hdw&)p_n-ex|PE;wmEj&|Fvi%AofYT*?=mhou~V<=#Z$L{{; zMrDKga@V0>B``95ElQnG14MvVsFXq|!~u8}pDrksSYl41 zrx#y$<2UYdJ|NZKEu<6;Qnn>>(*-WFt+IOs-8=u+S97*iU42x#73NKMOr>VIl#3T< z$9P`A9gdM32|7mdD(#4vIiCy&r|piE=PU+7Y$(Sx)&*!W{8rrD*k(MnEnM1eBnqaH zN^M0gRtr33!z$g=ageQvjPC#LF@bM|HRqj+I{zBy6( z;;!-=in_}CPEfwfb}z?qk`MVxuv22_YNwkSOB2PECcan=O&Gllv!R6eQ0bo-wx|>l z^C-Iyu?w~YN}TNN$>1+i1FJP~!70!|Jfdc@;v39pW)H8UpA#7DxgkytMYmO$hsD+v z#;s+huOhf|rVocJIq`?cEt7%)J^pf5te>RNaa`AcSx4@jV}cj$q*4ykVj-J}h)Fa& z0)Dc7GaJeTbu!JCf>8mqD43X47hE}OB~{I8q?G^SPMs6C6fecuwl&3UXIS>jpP8 z-QI_q&XtqpcE+f|yKk&vCLa{|mtb@iRVk^6ihXm^V?y*zLUg6*i+VS$MS$UlmZ|I4 zkI8TlVl90q^{Y(3ze~?B){#a2EkpA|_$?f$OW$1Dq|+l9d>yG9O0hl-KglRx@20X? zUt{?@4gw4N8vdV4pjHk6e_9qxZAt{ID|^lWRmpBGlDNb-B^+2wPy9-XaKXV0;sc@^ zDdq2YT_iJ?9;Xj=<9Ye};W2m>!ZZKAFd&MALAWaZ0m9u zF?v3hoW9Xvdi+R!^o;n6E`8k_X_)N3rt0{sRn>Y!JFd~};d0)cv+|e9p-^e7y%g$g zI$r+TYMK!-&&JnNF0m4e3ofACms!#9r`SnW78tcUWWZRDF-4>=V zG>6QCtj7}l;!kF6c$=nF$KSe83w0*PJX77UG1gC8JDBrSSwiQC5c^O{A%LWf1cqx0C7k%9fs^e{BCvU;z)H=d}>i(px@3d2bHM|jPROhK@ zq{e<$zHC1yb=uFV)%hy@h8p`hqY^VdV3%- z@~rpOf%E=TRj{7mt9B5gIXu{8$Z$8o)1)o7F>5fd`r%e~%F zjaUGgT8f)H@ERxKw_5y5643p7wG|xh;Ic0oOMwOb$;QgNgFOIA2EtNFK6YGka|ebT zNq$~iFYCCR`#7&U6 zCEAL+CGKutor1P+{)lS-{XXePZG~K@LyB}I6sa!eZo?yeYX^5+N4(^cj&c@3c6&CsZcSasumY_S-A1-cGjJ5+~zNF_ExX(&z_q?ew+|L(;1i)4^nycc;&X z7zY z69<1S6#infM#VQ};^D6w?RfZ${10a0;jgiF{0tSpDiaTXU1Z0@Uwh{-k^11T)9g6- zYwvs#6o2)x+HEOMH;37MvIDb! zY&dE0qx$@u_|fYY_SgN}=buHleZF?E@TXqfQth{8jN-R=^VU>0o2n5rSNHE)P$s;h zn_G14;nw6JH>Y_0?Red5U&J};K+rs>eWj^?oA6Tei&^olm>6>VAL#f!I%EV(lgufk zy(2y(`w9OVxyAICTg?)#pt`&;>ao7Tpvkc3q&Rcnp)Ky%DFz1)B8l@vXU>i!yd0SO zA_XzKIvjPOq>(0=@IE0xoXxnaH?S&%Uc&r)2!iC8@i#HJ7ATfNdYG{stJ^##?S?Ae5!m8-zM8=7)33Zt=a7gKeS@ zX4G?u&S>qj#_~F>nmRf3*YS-Eo+x)c_4hOx14Pv&9cn0D(jhl`;d6nFYJ6q9;AK7+cG$|dkqq@p>dsM*SW5IsA- z*l=tpp;|dRHzrWcr5u_O9}|G`;{(-+f$1>@hYL|N17e3W#I#T;wvwr`m}B2a)@Un_tb=ytzbA1WNd9L;r9*5O^ny zXGX-_o7!p=w{Z+6%h-KXPZ&lx5eTq#_%RNO4zl;%O^mJ5A$lVxn7Cg2UEQ+0r&|Je z!MXH`gdV^xssaZ}GhEWJby*Jm5}laT(|STLS+^5Ql79FldWSn^tDE;{CG$Mh*;}iF z#k=+76?EgC>iE&}g##K}hC7?~=eTmSn)yzN_8TsXO=O4Ty7Zr}`fs(EtK7KVhj$;5 zehejaI#CZOTh+(-|9_=_BpI#@HSP_Y%VkUZU3?=~g`3Cm4Tp$zuGC{VbN%Z!Y|ACs zkZr~p!**>tB)pi9jW6aJ|4>@Do2OVV2>l-^YIVWs!i-&cCuel1-c+co!m+y*s& z{s;Tvcp3RF`hRE{E=+X13=jXW$ncNxQ`*u$F$c;x^N3?$;bb{Gka1I-VY?}=7G>Ma zdFYfbH^o)>*D2R#E{3_y9WgmcyUV(X?F%*#3vpbGnEjQP;6!hru`^;0uD~1GGLPI7P!h{y3x1pj-^nTN6!+-h=K!yjSWD|@9szZ_4G-nB@%+O5US^*%*$dBQ zgPWs*)_@ztfjDCRE}XdD6Hd%23?~*SXMalG!cvGYw{riqc`TfSmE%pUKF4>4lM^k@ zTwkDbi@WkZ3MbgFH+S-|tT2Zj61lhmaPLH!Q4;MFUv$CQT*53KJcfU)+|!&G>-jmG z?%dYW*lZ-GT_8vqvj1KwyY6zv=Oe^t{j>jF_?-Vy;Pbvd3F3mm5fW~q#?QU{=Q9}R zF;6cSjSjwE2rq^4oY@+?DC1|Hao@n5L(UuUSt1y#NS*qy9=_4`v_6AkW_&i_(oG5V z*1I30-vMquRv901pHCZsv5;}>|GlrQjVcRwy}G1}VZ>f+7# zJY82iP+pAU&D>B_(r<5_!=OXZF0X! zE+O1$wfa}7SOp%i1JNq+vb{!r&X%8(Iex*(4zkPPm>FWtyH!GCa|z8v0YCZ)d~A{v zVfF16U(HFUt`ezkWV~WbXG0U8P&IZX#+Rf@6AMZ@o}(VS80T;%UXa2;xp(%&m# z?i(LpI!j`5oZ{~j7wLWm4j9_znXVwqxwve ziZaS;+|knv^GlKr(YIQUF2FBTKKNDbZ;s}q$9PlOiN#(ySwgoVJq&yc$9tHQ#*e7x z0xhkuvtlPR!qbqwl)-d;it%Bes+_MGA`yGs<;O zY*3)=w!&y%`Akv*FI0CP6twFBX ze}6&k`N-J%>(^x1TF&2<@uAKU%;{reryO!}f1o&$Z_$0CUzT;eMsrEKGN!9EPR&Ug zy5fiQA1$FKd;msKE`IxPM{Eng!dm?%)vre)TD_b|ByN&c&&L)dG&{P8-_J&?`JEdT z#G=^R;zw$ur||oEt>F#wwd%u>l8z4HN5Uuu4mGzEMm2DBZ083Y`F48QT)w|8nQ5D?$t)Xfp< zVAc@4OJjpk0v2Um8XJh)wTmp`)-Tnj0wjMblRP|jX(pNdC_8y*b=;at7A{IQHaI;i zyizG&{M)!77bti%*xxylPvri?{&Ij9p#J6ls+;t2fBh%>YY<%U@AOyu)gSlQN7`Su z{e^)oBb%M-FwYR2k>hUKPDC{_3pgdI9>V{5&jmXcT!glS#mrU)>-@o8)CZ@KZN^mQyBxrq9!k*>kAIb~Wy zh>!{BCRU@v!{*KET6A_Hg2+870V7aZdN;Q#arZ*maa}Iax60hjxLr# zX8p~f&PT7uZrgw#eX@}oEMuJF&A+B=Iw^p zmtNboQZrbob>s=Ae7Z+X^O5q|Zuw@ty;3td&b-BPS2MnKe{1xNOvA@}`M@R0Lj34E z(R@Ao@BX)!{50i?kN0vWVmqBRU*rV5tNF|>P5B4j|MVA-uj^0Ct=t*q1K zOzYbbvnn82qP5tQSeawdieOabqDT5*E-zI9xAloC3=mZoO6xt%bh_Zh8dFB}oBMP*pu=Tri;Y-97s~}$UaZ~yC;{aK1YtI5 z^)jjiXWjgL!|U2T|HJDC5y(qA_Y_&5>|O6oy$Cc_&;FhN_YmkR2*iL$)eFM(DjGRg z>`V;wqWwT=qx7dc=*;c7QI6zf`jqdqNO%76G)A}_ZUdIyxJg}7A{Sr!?+_y=H_zDC z-#Gy+z(%AYC%- z>OCJU!v^g5AQ{$PaAFze-TSc&6UlQT85X?hKPAKd!bxmqjDK5(eQ`!F(eQ7`u*;Zm zKdcO^#?bmt%dn2=y%7Bm%COsd*ZaRF!_E?Y70B14>h&mPFJn!zm9pQ8U8zSYd&XAE9{183 zrIdlgaHW$G$kVw`C8Lv_cBmQbdg)}O_h$;9=^L&tfWq`VW`w>$PRX+%ZmL$A*Tz_$ z*~}&rYT1KM(aLs8n*KxywTW3uZ`i1`vaL!hI}>nACJo2Rz6G@`=-(`A**mCZlTgbx zW|<2vC_Y%czz~J(=8(CQE8b>^t;;ZvH_8+Q%Wf&;x;#GP&1aC?xbPCwjPJx&nP6gQ zFSCq5!#67}A3<&o`k!3#J!R@f6ANqYLeOX^*LhF)Q&}1rV7o>_+ zhRn@i7&cE0oA`ld3`PSLZm#r3l$CAi#Hp#%xKAj-ox6M!M05rDqEoqqFcO)k#*Zmf znjpxM^|r>f6Ux~dm#iuba}FBU=aj~^(^>^H^p;k0xC{CHG_G|RHjOGibG`I-@T2qC ziR{Kw%V3$C8iMwbIyHSff_#HSA;{f(=vqq4x=*(`h^zM(Ew)uGXmOiOgU_O^an&Ry zv(cW|@L<_z3!~@p)%bG-HXATw*p%NTDw4Xdd162j+VTr$Kg?P9ga(mB2Q|{8dx$i& zi%5*E9wPmZ90lew3sIC#ivA-ZWBcpiN8qtHY0U|o-R6UGV4Bbgj*#niE;7toPM#l% zo+Bs6Wz8pdt^FAjHAcb!+e2$LvpHHQ*1_FYY7ke2B}O_O>%=<6E=+)UN22S9G8B$- zBbwT`6Yw!i_4oY`)c@yoAJBgPKhb_$PnCOW<$5|Mhjn~JipE+@b=$6na& zl{wRhl52{q+O6NN;UxSz2O^M#pwEauCVtzFSdLshz#;UwatYpTC0BhHQ)^;kNh;fv zy~Vr47ZgV{LkqTY!apF}tZW-15Nf)%8YAYH1lB>#!?z-?X8}~I{btUHgM16xSA!=} zEH9G0SL)^F&6F=<-s>Q9SU%&)ZL_khP*sO_ckrD!V|Pb*w1XrlnIC*IV~@<#}c_=Ahyt8#aFki$RP`{UDoZ`3cweS(yIz_GiCIC z!2?jeZlq^cly!&`_Xdd{eaIDX#SXG%+eFf$m@C+*^EDwpP(}IVge>uNKteS-S7zbq z(3qt+wgpu%*eD-a|BFljbksPc`li_VT{y+gIv$%8%LKuOYY+>q&;9{6;suY23uF0A zive(vVbkcmAUyJ%+B1v|L#bW9owIL7;nr7`C>De)Qud-Rf}p(l$eMR zxxVNT+Bl~~7DeidoEe`O)<0fme0F*U8zs-azVAQCXYy%bCQa`aUED|Z8KS;qbQf*yrJ|{b z#v2<^etn`oT&oLliiscwn~QT#3h;1qJ+s89~!o{j*;u6kljJP5`fGX-!)tP z$;jjn--ZQ`d{7(iL*M!kZPK%=;o(VgE9 z@5QQ{{|USsKmA{UcZ=%Ue**74pZbsD{l04zRs-o}=4~bMcTne)*wKQWYOR#Gol)P{ z`qe|i`O%?rYbu}W7&ZA~>^F(U0i)_!R8(0!S=->9)LCR;2{o2v?xWX2yOe*KNRI7R z1)}7HOQ{cDNBu+G2EPlp^%uPdw~Kn=_G6OYgPYvC_`iYM9X~mqZfCp~w=crGU3438 zBHZ3a0)IH%_&6Y!nIElNY%d+QTk?HmmJIfd|X zd(`UKjgr9XN8OUoi39cV`A8y*knHG2663rP?%=aF)3FjDuU9tIhQfsAopN39gIO$= z+OiVrWy`oC{E5^xySzsqY8JX*Vw?wqjo#T^3J|!PH%8twT6mkt`A-@r44W^mgxx+V zu{|f>H8T>S+DIZakMAW;Hxd;$a||(d5m&cA0U*u~sJpf~b~e-J<*HF`_G=BLu)xc8 z%s;~7idT6mQo*&3J1Ii>$yl&L8rQtTjiD!a=Z@gesa*N4@qRPmv$*;_moW8(&NGq~ z%LvyJzSz3%ySNM=sk`HqETp+v@kIPP!j^B>-SLJas+rSLWPZYOV%z5PdUbakR*`S2 z$k*^NNKEyatz6qE^q9`A*8e<>es#EuM$tciWHf5H_#>s!!UsMCjefuL1T<<>k;l`h z+yAr%i`k;WNMeR}vKq?fYbb9uaEBphnG&-&zkYzb%-i|S4iAeDKJbv>5e!(-9L?iM zd4lEVL=QP_CM*4u5YSRg#)m;G0^HxO)xSX3aD6$qfi^rJI}P)v*06&p-TZq;Cr@Iv z`o~m&ZyEnW0%i!t-^Im`cf6)J5ngFhmfW!a(83EUl4G6L_rAk!Heac}B3Qt;5Q&7J9AYUgE4JAQ1PHafTe>ir!6p1dPmfqP@H>1p#l69D5yiTgz z66)+835)@C+q?{f*Uknq17Y=r%$6nQkrcw@19E4E&3tl8tLpBs6sL!p;;R*>D|F!W z*I?=<>r}R!;dCO82>bt4MP{i;pV*~p&%1J3L|6~oIe$kumcy<|DrT6B<4^wb1*e=1 z`7)1i^2!oux?#0OC-t8wou2a$`6J1kXJ99T55zSe>k~-~=aXxk3%Oj5f`m<1J0|(I zB5_YPENb$=RGq?K)m<<{-5hdGG}?u zyFTypKKs-4)K@o+ld=_GRG@=OyJ!{Eao;uf1KAXJOYVpf%=vOLM$rwHM?%8~ZZLd( zmy511GuIxNYSiaC#-s$DZS{2oeqVE-=e*Q-iexsJkX1nf6Z$6K$)VX|QXa7`sK19P zm{_UIgh`AOVMR#8P#S!IW;U_V)`BvjU30x4hm{06v*ds`TIt==^HgSwK{p;`ZOWaO z^W%?soh#-v$?m(lcE6YEep(!WTbCAFqZ`-Id&x4| zO>&0di7B3J{Lw{@K1~d7W{7uNj)k~*Q6O2d@}w|wY7vD*9#Ao#&HrRXvF8|C4joQr zqy$`MLC&aHvo$0=OQdlr3EiR+t>lGn$ze2Q`#CyIE2By&NL-SxT>S}Hh-S$P94X6I zLBzwQ`@^N5nA>TnVriDmD9zDI$;}d4Ut+~IwqUP=UJ?P268?P7V#*vWXAP&ahJne3 zJAZLnsGO9CNPP5gVRd}jCBjEt41MD_wpf|O0Jz>aqVjGJq_>8q^v#l;-^Z9Blo5G_ z1J2e^dEetd+?Ji|DY-e4iECy--PFtgETVWZ$ie-pNEJ zX>27bEPP+({n{kxidK4?zoV7N%rnE;=JcP?QjTX;h1zJ44;xJh=`YAiP8{=2N*dYX zZz~AeT7MN+8Bo@f5+w9ld!*tE8^;qgVWYGTgK|h^+&dSu;NY5Qd#E%hxo1?H zbzORb0qh@#sBl(^-< z;FLVUo$2N>CQJ{Nv&AW0hVhTAk79I4h|L5kH`pY9bLt)mjs5zIAKM^*e6Q;Ukqz;P zLIz|7X;X?Js7U^H{WA9*9v79d$WTUD14ufPt(*tq->(8IbE8XQ{100^oly6?n5-!KemI$ zVJ>MrOjviT?QN>?tP4Bdicp)ojUaJ3%Mog_to4Y z_4|5m0l)t6>$!RI8@W4ProDK=?>HiaAe!PibLMD+tC%J*6vm}ZzWfco!)yH)GwrBOU$RexOZ}7RDRd)kfd{q>P{X0#2B)S zC_`I800s9~_T?=&bp-QiMj$-UC`(X<3*2MbWsmk56TX!FuXQhDN6*${Z#!sfP(DOF~zq-3#0nd_r zzx&JiE`J!HVPhzuQ8om?6BD6@{-Dxf#G(#c0Z_HsbmIZFvBl!(>8ZeK<$Brqo|`;3dTxN8=%mNH=6_P^ zmM2x}8I|T70oMVM%nab{54hw6{fT)Zrvojl9GoOA=E}j+HPwYs7QL_~JF_&r-S$W#aX`mgMPc4 zp$OMhmVYq%Cw}1w-R8X8bob|8F^#qz-jdDc3qQA5p4-dFU~-u% zp~tuQ;#)j#GE|$VCWXmNOktU0m&(bd<6%%O{7`Z=d`5lr0wJ(uwKCOe`HZ*B?`JUw z4%WNLCg)nzMiE7r>-opUWVDG@R%rs|VOlKOg$}ahLUVJ-+6R33j6-EAqnMuyKx>46 z)&)YL%o3t|Eayr*knT4cnE3mp(Fd9B#4$8!P!irsgp`Gzm3(+y!KMrNK?H6#_qGa! z1{skfI8DfCkmy#kC!2^IyHcW=67)E>f&;R1A1Ff9%JV6F>Eo&zs9O9T5;+Z*^GNFJ zAT*QTchx_^{i5>rv8JpEu_`_#YTooL^MkLYO(LDZnwFF%mp!eu@@IVqx67*u*=@Ge zz*!bJVArId)mo|dwXp}9O60_`5vM^Jl?sa-EQwq}LtD*i*bWX;5>wyL+$b$p+_?(A zUihWaiOQrRG1nsBxgIWJInrYNyS%DMzJeLk(?YYd66^x~?-bl8WudD(<;6|5(Af1K z@o{76F8)~}dCBNI}50~BP4U6zrgt-W8g%*d!Y(mQ1Ar?DHK>STY z(pAIs3%_+c_OGddOwr}kPFZo2SqEvV z1jup4yB(v3Ci8jq@MPgA@PVbAun9KU_^jy11FeLlNw`rU7aDud0VYu|-xFDE51vyQ zw{f>{k#0D5ZxK2-bw+4EVABC~=D#kXG$d{j0vbU&LZfiqY_h9m-R%TsNP@(!P-MuOJO=VHUW~(z6X9e!AEP9a3z-^U956SNhl|_`V3zStB zJuJWDDvN$CzauJ(9+BUi%A!|O`0ZU;^cug$u3*U;>axWySwlUx*oZaN@{_lwR({S| zQzbtG)>O*R8Efv7pNusj`Ejnfm!I(TEC{l~odr_|KWVH^;|X$sxNG6}yjq+U8z4ia z!2Dg)eD~Ei8hWaFJ%iVJ__nlY3JbvAGMsDPx7*ge70!G`{j!xV-Uz#6xcBF)Q;NI}YFQci+t8Lq{usLIE@-6|S%un%bcS69vWw0q5NEM|$WiX3*& zF?O}zaJDqdw;Mgi_SGfyJoM3J`I~$@H` z2moBZBhVe~D;@@E-rB96H|5y%8!gCbii5pNt2V+1HhHv1C0=MsY_Kox5Rf3jn;lNj z<|DEIm-0@w?I6krB_~`C4h&=-46cyE>6Dy%BIbbZDXBf-dE*9Ju=crD^)}aC%B!GD zyo<%CPODmRN(c40!=t<{jK&zdsXir6;MkQt=9Id(23q;MEqJM?sv&q*Nfk5&Be*Li zsEWnXdEzzREzurr!b-hQyjl8{M!?wu83BW0BYtl6DKLVvL84?Z2;ye6_XwOVagejD zFA`(s3u!(I^%hUQNLCfik^RXhNNY#ZW5G39a>iu~9Ralv$jyU>NXqUzGQSQcs;Q3X zVQi8{ZH1hjKSHlR;DDV42w^J_rC2ESdaAc* z4k5cGLZH%<6%;ZHheW|Om9-~AC>wdfndT#WY)Qe&@H8}zMK9v59vO6Sx83Ghl`1#J zPDwGnqD?L@9s!W);XyX}!85=)r+8eDVWQzIMWnnG5lLBzlFotsxz~ko+iLzou@_5A ztlaXtaW%He+swm411ek<%r?{M$XX1f859DMv$#_;e0e0bUD$7_9v(MYyV;K#J)sl! z;F(5gNxYdZRU@(qJMZLg;l|i6IDR2RmU3uqHoxKcFwy^7LZp@`!s`!<LLq-_9un7HSs$Nsrgf z@e~g$3AU9*QcGlCP&A9UEWsU2gHoQEOGkBdTEy0O(*Xu^m)|%>*=~XY%5~&~dK|y= z;$aJdZE_WA9~-z>B@-7g7_F*M+Sv2xj^gB-&D~#+UtE%nPV|K@^M#ty%(JPxEHY|A z>@PASg3)QP2M=TSabe>^7;g5~G&#Ix$OfC$G571_1jJJy2MC0~{|(MG><3PGcjtmw zK`{{p$26g8{(&Ep9=V6pQb~NjkXC;$&je{E$&~-0j$fXpj%$8c#}ChL4&bp5?}D7( z0_KPZm%--KY>8!E!N{h`$Y!p?v>^}jWs60^`VGd$TwHYry(|~lZtj@nw?|6P6$vno zY?^$744p6rGA(F&%pCc~*vvQQFhBF=tJ!^?Y0{bd#1uTNj67)I5uQhXp$>{w z`g_;>X569bxNG3IGPfvu-TQ+3PJtU7Znmh?>91i&Wa` z;_>I=m&N13K9xsob;v@}@6Y@YB^4{ET1~=ppikxi+{G+dsb2?kiQQURmAr>#}9Pzf@7I!^- zP~BX>4Ys)!IJ`x^^SAVf^Shlw-$k6q)vvYB@o(!;NF$aw0@ME>BqzW-=s^L&$i>&z z33wq%;LDIaa_{DtDlR0pBbJtO9xRn6AvsXzzH4aapZGq3Ai5}9l-t++&fiJf#2=OA zU6pDREIK8tEHWXDjbB9M(`H23U?^XijPv--W#|v zbPU-QtWk03m_3jaI+lXi{-(?h80*K8EoavtBK;p%PNyl}u&N@~J(QUZy&9J+>OiTY zOAP1H9l)WQTdbWIxXeDCb#2CrM(RqtKl5!$LG)NSAmZG}t3$N z>8=#$+sx?<*l0I;n#*`{^5LjZ|7pIpCBJQg(ox@zKmE6IqM+^rE-ac#>7MO{Un=uF zN#(0m-AWaH!!`V0fRnMwhdpWGNQnC~NV}a|{e_d$e8ziv;ZL(5*!7*8{qD_L_21~R zp1)VwwCMH69IU=>9MiQ)?V=lZaOG_mN%Yy?NGYI+CEQjsvRDwdAAQ|n=1VM<^6*GV z^(Ym3qRcNk%IvhtXjPVNsp}y^f|Nxv_WB}ck(2}DMkLDE&D9glKkA@Qw7o?R%PgPK z0Kk5uKhKbU%1546>=^7|?jmMLGwd_lJHOc{Hkw8jK7{ZuKA3O`_2$Z_xUq?$=hxR# zq&vgc3$d<+TfB&mDbYhpB&*>4HoXw!$^70;b*|Xj#9J?ULNIl#ZDzjo!uO~_h|24- z@F2F7s3)J23De3Sp@VYNp?^r4fv!oThUgwf1Xs0~_UM-n(Ouza#kQdf*v(RQFSpR ze$;fzecB(!=`(qiWgQQvpC6-z!*kg-yDbVOAbch9lc&I|e^KOXn$RjItq+tT0g+Yk zH*m%@{>=cFWP?jg@JJ3gB+L98=iqYoDACzO=8`&>)e_zifpNxLwd&B=cptM#39UVrwa>@HJ)4rw(_#NIti4p>7QNIYChc z{WduT-EKDjd6)J3%-z;+z7p_mHa9Oy)M4Sb!i~`@$TXd>&4LuU2b|8lv}*Nsk(>ES_F@m%$js?>RIWL}4a)67yBKl5s?_0Z5#er)FVU@58! zBfjHVv3h1&&z4%x?%>B}zAeu}LvNLvu5+MAT!WASFhyr_`bCc{1P-Lls*Qm&_0ZVu zn16Y9W5iES9d#`3^^9O5lRk{dg32)tHvfel@Myww&)#=jR%j&JqtXgw0?I z=Fc31HuDUOof%{2f1UbEJ-#8q4qS=T$K|^9GWfA? zZAIcg-%T~9`p`H;*4TFbEUXj!8hG`2lFm;M>{zK}0)$0P=*g^Wh zIv6$u?;W-U5!Zpub}@U#>hKBvZL-W1XJ4Sk*gNk0n&pqL3mxkRL2~{><`rvT9R8}*B)D!1R6!3CXQ=BB8R;6h2U!pblQS$;>mxCun%L6lA^^~ zR1;oI9BM4kQ^j3dPEb|vk#hh-8bGNF{S|lpCm#vng`5CVQJnlN(mP2DCu11^++vV( zsOYCz!EEFv|E3z`6Eo8!(tX@@4b@0X94?X6y%sk>Z3%l3IlHWWli%SQJ@P_smf*a~ zFIp8Ia&CW>6%w0@>iv*+9w?TtF*zd)gwy;>F>Le>>HA#IDpC8H&zD+H%CbDGwo}~v zJr8s&`{oKGsDoIuEbI${kGo3gsNZ-E`vzbD>lQRxfK=RdG0$Z;5r|}VZV~VN!f{hT z(ush{#a=)j&XOAtFqI08CNiTG`P8URlfH+mt(vX7P+c0d6#b}vRslkyOlbwiHG#OR zKOYuuG}_~?ZJ$VY{JDDE^*Wazi!7Mr453Q9gO*<^VnjwnCPUQF{V$Th7f78K zX2+1-A7i12fg3W|L*O-wj!YOTqEx=64kgn}2vu}Rr?J`Vwc;d`B+IfpPd7>nEN*%- z{r`cD^vQ;J-~8b;=1F_iL(G$&%&m|q2(6#1`l#Y!bkLdrF8~Txt^^kMKwdCb3Fs=gI)RRle3U@PtXn5K?w4w-10CDVPa!nouCpkFv}TPwriwhU z)M_5R1<4i3oO)DNyv2*)&Yd2}$I!*GJ=8vY-q#V&m!bCF3%jj?&#>ob?ZEaw?0+qhXk(MV_vHCoOQt-;JfFU1Ga0ZF4x$MTyi;Ild!}kB_f%%yp*|5 zMVFcr?k@-dF2|||Ystp(MfULIQHs|hUUTDw0^u1Ipp+pWW0~ev)Z1iJ zVp~}gH8#UJ`Xwp6gRY@H(jIt=nfJ8S*#qZ;@~%LWhNxSs`Xf88aO0ffbgk+)TuPWD z-Mu~dRkVh?(1F3La`;TouNM=h!+XjGzV9Eh+t(5+bMGwE#=lp%aY`h0hTn~@mMTaJ zlKA{bYwu60c};5JpKPas9=D>s4?ZQ)2@Ya+uxcPQ_M9-yOV^ATF^d6+f@gml#XY~ zXg*b==_RAlsxD>4k|X*udOmluC|3UiS*q_pm`%juY>*_}ICpLU?BQjLQvzpN_W9M$ z5x`I|&CI@?;fb`u0)^x^5$99ZH^^}6PWtZfO7-3T+fMtP;k-=9hf~^GXMMBe>eGG` zarU&n`9WKT^_};W?>hKJcn^$?Pl|hMROQM6Ra*Fdb)C0nfX6siX0&KNu<(0`)gzO7 z`paS3>K7L3#sj&{l=MWdKUW=u7S5AUHaR4@AvjO?x?vDr1I3JYUDPgE+pw>WLpqWl zb(B5?bDnywx>CS{Mx`nd9LPD%LCljno^V&fB@!#SERupxbM38xD&pYGvreS-DQefI zH983G&GnjFC?nhXnI?Wkw^qs1+6Qu9;(5g(d))zNMg6oI(p>Pt9A2#&&Wp0*vzIz0 z#?89gDpk@SDYuWZd)|!GbzfA_d5kZyoK^p|-c$c?rw^||`A@TjHgxw@cME2F zYta66gM`%&?=Bk{_f&p%f*N9uM#n|INFdkFaqjfgu8UnNN1=?{TXcP15S+8wT(dy} zxxL?6XK7>t$KeDHr9z2 z%{d-N9a@Rv1&%q64K>~3+t5;|Lx~j`OUCBDimFrQ6HI>qUjAa6YZXwV!iT+$-FKZrLWp{c>2eGd18G|nJ9SRsOS@!zuTq<`Q4E@l{F6HVjcqYi1ycPR4SDL_1w9S1XSa za){f=?KGd$RZ0#x1{IvM;Tf|b0^YLA@uet+a zHn!lV^FP!!b2fIj#V+=;otD!jp{`4$`XaBfp%E4X7=;d_QiM0OBy>4ync440f%Cy(87dW0Y>tn-zdm?@RkM*TAIBihsrp}HRWZ#h2~ zmI)@Rp>}03f?#Jq1NMc=cLT$aV4axg;qL?!NgtTd4lNpexRpL63@z7Hyq;S@U34{& zyC`r$;f~O_;o74Ox?>mZFB{-Pv;jnnuNaeJ3$_VtL?+wKk6|+?ryKi%-5a|P*CR9S zL}0$Cb^`IiUR8aoae!Am70blgUM%NY>N%7oMsnxAPOIsut$0ASj+5OBU(<)sA-nGQ zP@uG@C}fLP{h)LdOEqrj)26X%$5Ufor|SL_#8( z_Pe@!4)1Q9U>}ZY*X$(&R}HR?UEw)w)*p2m4RD%{?Y8MD0JzstM}c$-2xzMUDIEnI zw>7j;z-yb33KY#v3HGfhE+BIP7IPkzVZq!FnORsD{ez-*XXl}L^%!Pgpkyk=7U_&aX1f^3s?#0o<}`Wj@dBGX2zG093hF@DveL_6m z=qS*Qy)lRG*e$WIWv~1uz6~Q7ae#A@sam8xkmWnzO942$`FHry*yxiGZ1Mw&DO_ung}ah|cEz%< zL2Y%#vTRn-?O9k>fVIPhxQpMhl~z+pjr#juSJVvTj6USOQti!F-;fVLxE_&>#Pyvq z-T0g|V`4IdGsz=$(PtR5xw1kgAvh1^I|Y&P>q9p9hO8IrBh;R@pgUr52%xF+A$xYk zNP8ml)S&O&ujfOg)cFV@Scqv$L7=~nOaVz7Ar7%28~nW9X~ueYl3f1kIu8bBo^wg% z7|R5w@nuW?5PM`SLfgcn6wbuO`It|;Ppp0^a((yBMhs}6#c5}sAUszPj|@ZT64uCL~f|I#3fcifc@1s-%_b4Mss)RfC zdGL#&ukDT&$me>o5}r zmOYAkEqVc$*g;i%oI^iSLm$~gn^M}|ANc-|Cv%@>1fi%$tJ@kKr6lU(KZmA{BfmKczYpP2&54lfLPHguOd2`N987YjF-q@xZnq=Ktl(;mQnN9Zdm6%Wx|F05mJUw9ep8 z-VZ}tkXyn#yF|Q2O`@+z4pBhfB_JtjC$*|v79!j~I!M+712_kvVssp0DwsaK5>yqx zXF>WKqIZa%Pm!XTdDt>@WE{kH8=k@s?&07t^o+_Q(c^5)i{V&W#M}B-A^}TpknwFb z!%M8%Q^MV_N!K2Hk@|{e76cr|%wdS#F4c>FTSV=_>sO(Rk=%^U3-B*#3-rO)LOd#P zDUoXaYXq_`v;em7q+yMdz>i6S6S3#|66LooUt#Y5W#F#TnqD%XNlkbrfu4YMIwc)a zi!5DSB>gM!pc$h^2!p~Mf_-z$nLdT|dlyqU_>fdWzcC?M9(Sz*CROIDATx8C9;VU5 zH_ny%77l}e5r^YEi%zAxau54#e)c(dq%k^JtR@DYJD6wCxhd+Grfq#n4&ySo6 z1@I#xuM{1x%e*-z549m`ED*Tujpn%V#QT-j=ptz{YMU;qwhYk zO@6m_eDR6qF?Q{~CzwO|E<;rk4{+9PGciR{HJ&FoUkgd!S>wm@ZvSOOY^d|qf9fc6 zjM`WDzNhG>qNUT>?puX`&hbVjI7Ov0N)#CdW(|lG@?aR-rC^!gj|^m#M|-1}0 zP4qyz%Xml%p6+UTx?_%wW(s0mR3W{x z1)QI>NHk$8CG3>4OAY-p%dXX&yHrhV4R)@$sn&Oq@9kO5fo(IxYpHej^YwkFd<}j3BkiYI-%q0T zESWH)kJNKhqMmX8>w4-AbQwoj#=+bN&IT(aEgt%yL>Vbt9siSVruQ?-)6)<5uIo5U z68gAldbn&iO3B>`laL%m9V>efCzXdw#S|p*zLnqolxtU(ZsF4H;nKHh?BW*%UlU5? z3JPX+-z#7)^`&w5+!MUxF1%cf+^pv+0!?J{cy$-KI&V(6MpV*r%VmQg90HW6x1pz> z<};gl9-AI}mF@J4PFzi^W!dJ#f*9CnR;&_cVSoZ07+$lNs*l{wpQ<8gNs{9&VGJf? z=ATsMn(-mx*ONvnZ;~V2Pk0n%CD4G0I?A24f6FZLvEiF*Mmuk2m>+cB%rdui-rQ<7 zbl&LZYn?aK%x61q7MV|U+=MG+$VTUV9dlqV=AJ3YNlXD#%@n-LKH%Yg9un0>BE0ah zn1{?uml!SyynuBRqK&W+jsi5p%t8$Kh@-0>xHIqgJ4VL1&^bpvZb47a&|O&sXfiXd zpjh}R>$~@%Uf7AZPm@ni**6{hOHSN1f@9jqJbdNq}{800qF*PJ#l&e!8Zaaa-7H9xG&IJ}Sl3=Rrv@_hp)>e$1OcfpFc5 zn8f^m2YlVf$o!==yTjbf1CEdw1_Im6bf$&kg%lUv>}B+i6I&v^$S4eywjD+R=6N3q zwVKdRW}ZcDGJt{RJXN8x^u~OPLt|0w4kPpX#}eJYUUeW)-lgZVRgBE{q=d2%S3NJJ z99?<#V|soGOon-``cQiQ5+&(*Z`IId>G@`J8iHer-$ijT!&j)z41J&oYK3pQ&il&s z(%>(onvQv23v+h5d7qH%_@5|nx_Lj!3K*GZsou-7son=EOm{v9XIb+;+xnW`fBmTH z{dCn{V%~p!O!a;(#bw@`=-+9k{FHGRnSYb282;DR%(r*z7=ktPcgf7(OCbd~2Y3xs z&F(Vu<5b^gNr_IFSxq_mexdXo{l_lzS@ogJ{3TZ3A6E?}`u;BB42s7EdH!Orz@zTi zX90>O$xR*`O1BQa3o=-93{`}kpFW~!q+lv1wadIpkQEaswm`xX$D-=+7E^xpt&zDx z$^&H?W-8x@GXKJThZ)^z(inr*N3A)d~oGAjiXW)|MZONZzHbfe9rjAdCGP~%}FxmP8T_m@EtjYZ4wZvoP;=I-+ERW zt``H+`gTN-XW@uBhJEbam#4DuqBdTTUg6(YR1p;hz0td`sM6R9w{)fie@=%fSp6-8 zQY>RNyl`=5W4 z>B?{m?oT(L@G#0yuA4tNN>m=RfvDzT^g>T8vT|mkZQS;iA=gF~`yn!Zzkc`|+*#*M zAY#LA#nIrn3nmv^h6f2Z%TV(KYaN}cCJgWQ%$`7%NM1o|m`vO#^Gm6gSf*LM_lM>T zR=wG z^bmmIz-7LjoBZxwS`}g;SX06Qq0W!)mmBwbtx6P^Mi3}`vhAg{d@W%sdSPFzRb$;^ zi{3Bgj91&{9ul+$gHJve#t?U1StlSO=YUrI5{2Tfk~f8mxHecCneh?@y`YDPYv*<^$j!n0(@Yt05Usv;B@sHfPH-d=Dnw!`tfT%fzU)#z z3j&V|f}TS^d?F6!`ex}9Zyv+~x-nw6t&!#|d+Pj_+D$rCxtO7EpK`x7{jqS;W?>JE z{0pCOe)s1Kki?T*Gb6v_$2<#wm0EMKZK`V-^$&1p^KDmmx_VZ)W4*y_}_|;kkX3}8V)pvi} zujY9jk^R$j4O1%7--((~8E#eOR&Xg+(@ zj){tm6y{)yDb0#g&Sqq3HCDYM4iHI3vUWa;yZ#QNS-5D@85)o(HM^Y^zB3E!mqbIeE$;e~x^Y;tZuEayoq{6jAcy{kxqnPGD03f@tB9+LpVhsk-I280W$r(^ zcG}H@|M>C$@!SuW{wL;sEUo@;=DtT_m_M9*nOj9K8%LSdmIE>!QaMa73_G3;*?!IbBw+}y8s=B!ANh*&iD*3!Sld}m_CvC#0gd0ppH5fz%D^=dtm(cB%hWEE=-`E zup>Nysj&nA5;hJ3@rxNsfJ1aSt?C`dYYBp0{4W7V0?qOBnOyz?*!Jy9%6 zTY(qSy;)SPtsr2IPg}7g=Jkcvh^W#Q80|A^)O2Ai|m0RW`xb7NS$xd(un<4a8TTJ>nb+XZu4L7iaDeCEi47bbmjNLY$qiGXe=^brY&rH5B>Snj z>v<+IT)MIl;Jf;7o}#<+h;U`vUJN)HyZpxC$&rlX;~3o(#{-+COC?3c zfnMq@+FD^?ZN$0#;A%(8F3YDA2(5Z5Dk( zKc(lt>o2eKyBoBJ7NrP~Y?~bEn`q-v)kbYfjW-il^m=ln`ABAY5H-NE0f-~wJ_o${ zyJpD{YpirS_;1PFv1^L)fNENIY#Lv)Pnk4zIcpp=|%p?0=wTGd`b(=r3LH}lkpo+Z8Fw75of8P2Bm$YEjF+WHVNYtgSf7| zpPGDswTz4PM!Qoae+mTMIO!!5tEv4{YP^Ne=#j!5M%hYJ8zF=cPVHLNK>3m{WRf3e z78pF@?^`}2URc~^ye1^QR`nG%8Smy_{ib7jT6lV@w>bE?cJBcmRxEueJvdHTe$NS) zKGa!Jq=aa=S3jg`DNWS!EFXlHK5p0U{VP{-B05hIBSgd9L?d~NC_<7>P}#>KHdVOg7Wcf*o>_UWmys|5mo0-RAp6JIDW1=ea) zMJc$6AOvD=Z8uhZOnR_K zd=l2EnOJ-%!!JEjm3tX&KqSoDWvONrZq45q)49MtDb>Ah zo<`O4o{%}X{uSn+yONbXpgJw{a34`ziXrKPOS$QN%DBX&Z0a&8dSX`e z7J`5=D-JZ%!q3HB4>0`GOihhUP07ornVMr9O+P+0oqVp!6)Dj}R?VeD<|n^P&d-$< z9rH7bHws4Jc^XR5l3Os+jI35*q|DJu2tMVmk_4047MM87+>IP5S_p1{De~q{^SMvV zA5Jb=!cQvm?<%u*+E(;XdG7YnO!7Equ zv6L3qZoQ>ln5G=B(Y#tnD@2*Epe8j;=FL~w#^1u^R6nXdkPtJ<+HC{&ub>BSeq-D< zgvq36wj+>R0_<(!7|3s8AJXyYD9-D+tL4Rx-S|MROpQR|1K(TIEt3c&Cahdl?gN#a zGXaal8%QWWW+j}Fb|7jyy)#lJ0NR(6Wfm()(eU5KN1pe~(>NgAs2*bbacU%R z=~uRL)D`OK8Y`@?vNU4|drHNk?NI2%iQsTgqo>I33H@x8jjIB?r^z!+(PEZw_FU+l zj(D9nt%KOo+!sF!4w>J572~r>ydsFY!XK$cdV$<*cJ4{GlOJq?xcUPgcz)va*2e`c zUHup|n`mHjp?Xq}jBoSqhgy3Mp~hte;6MpU6LA($h_~(5+n`Id=BUi2yOL-vR6fcU za}nm1tj)?9imzR~;}qg*pVWf^!g(yU_z7J-9R!*b0;%9iiwfhVkZ9!~OvtmZv5nQL znuRpMcH~kuKr!P9-!+N=oibI08)dRYiLQ(uWTbM)na4LON-_E%-x92t(Je#_%;?ID z&wTaYLwDVRqx3KansH!!tv6a+Q@TnN*9&{lzr)*MOMcA=qeVF)9P=34ypi!K=B!(! z``aKawwvo`$>wmKL#@)szAOaO^3nLBC1#NpadL}KWFDrhNX*9kh7%>?`VMFmYXiM_ zm9PszE%`yQ6~c?Ye8ge8f3o3^=HcJ?59?3{IoY%~%2tWRMTl#Waau{|i>c-ENbV(c zs7{vA{q6i4yhd<6mN3N7Zsw$3WxhIFc#g7Fb1=!e!~^d1hJ))Bau}p=r}+p4EG$Zi zzMgoa6yVeahm^U$pWid~a@^&9fn8Zj8@HHI5v`RP=;M_kM^a5SBm+^Il?)JaHq5`T-qFWy+*G^G9Py>eqQz8SlUb%OY6v}T)LeO z&i=J52$%I{4h3)E66`cg0`>Jf1+K#rMnzwMR58l7i(cnuz$^MB_i72-yk=2Jz*#YF zKwz+JfW)_FF+AB5QHcp`l)jzcl!z?0sFR)`b5T+sB@9XE9Gk^Qzr0S=!E%V3SITS} z5N|4$&6yJOQxDC`<4=xUq}f7xcHH`E8((#3lHPhNu&C+`b92F>#F|pIps4Ew2%mxe055JK2gT-*cmLqer+5Cgl7d~733eV$# zlC~?KVqIi!B_cdPI`d<;PC0l^z@SRphdqQ=@Vr5-HpBTB1fpS2sD8ZVXC8 z2U4*0lxg?}+&|AceCmMld7fvhUFfsRo`d5OjPYr1_spONYt@yrouSgSd2cg z@rgh(5!bixSWUcSz3##5-jTUkk|_hyN>JrTi4qT5CC;TpR%C83%-b)eA0!Wuu!;hL z-w+-yDyk)O$c7H{u@BU?S6Zfo%mx{6iHIGO+NM+Ov((tt$E3de z&JE85-OS`Po|&eH2jjsKo&-OvwY8&r%+e?-pJ{F4qj7r`{Gyh90DSLU;z zC!vq0Mp8xCRnKW%2es0}3<>z{j`d39>`-)+l4;$X$BWa+v}X9r>!BX-QaKUK7FT00 zzQ$cXk49J|sIFOFxAUUqkX_gMjAnntHIy!hCJJV{Q^wWM<`o$i7HY`&??Yg&Ox8kid$31I)v9I&8_aYc3Ffp_RB&w%^fNcHcA|a(=#W`*qr2Ay z-k`ddCom(O(m6L&5>Q z%USZEHL>c9woZ}`MTzM0<3qRnS#&i*_jHvyVloT4ib~;38j-vh#zDwT9bw|?R61PP zCdSW^F?9fG)EoIlH{{gqd13{f0e-q5*e%W9UKDdy%x*6_c6lUqywBKM=5EzOE=KEF zE9_for2IQ#Hbb*veLk4S{>bDMkillrdA7j&#(q+UbbI8S(&F>Uyr5duB_gF`yT{ld zPw{0gT?co}TU>Q~?(OqljJ`(Aewe!oeia2!%1eNBE&|NB>vJ`mT2&Qqi(gfR0*jf* zkzpBQs@djkFVX{?TJefFGSt{>iE(}ILt3bv39L=4kfoDT zM~_UiR~*R^?RZsGV~a4 zll<;z>cakXX1c8UV%>DhBZDR(&b;2W(#zyj@zfnU>InYe=@_cW-yO?3{GQBB zIo#k@bImp2n5x_gn#tb_8qY&33=88I-m`Frl8~4wXvh(?(z`x!*O5Q)L9_YM>+lWN z1LXN)M&R8l0`o^d7eW2u-2*mO97tgz$L1bSZ=MiIwbl3uFDpU?IXA`A$^!EnRFXK0 zrsa&CtMYgfYrE5yLO5vSql9pwz}!Qlp&Xleh~I>g#}X_+Boh_YZ_MuEvvgxZz5rEx zihN;!xja$PvmF(gKU>Se_0maVgkIGNF@kW{N{nD~AU1d^Wpm#{ph9s4lm7fnRU^E6 zY$bF0-eA^lv&b0Us_Kw2@F}M9mv%OoVSOCa;P`yT+kDrD3c&YU&B`1u04$p6*yjXE zI~iHe@|WZ5yQ$RJ?+G105$tcSWdwz7;R1697uNJd4><&J!*JJZ=gR9IJ{p9gh1PN7 zGd4u7u?v3SY}Uy+!d^Iw{^DVVPyxM`7_d*l_h@|8Rjox@mFRjrmEWtO_cJf24VwD7 zYDz{Pt>k@a^(R`@&0Ijlv=%{^U&FOHa&3ddZOsFtpuoARwfIdrP?YTg&IsO`M{}R% zK7Ui-B0^IPMXz&syTt8=MOTfabTneFT@Bb_Si;Ww0-P3GmM9#+nC6R?FWm>V@dq(* z5L@y>Y^`>ipHc<(Nc;F9EU^UeA85Y9jTNE9ykA|WN&iKXArg`;NJ#AFpQdxFK>jfc zgStV9d26jsD&ylRIBiW1dk+=J^cW{(vrO}>PNx<%Zar`pl9<-fp3(JzJMp8r%~R7o zzD3IV9Y>9$6KnjfqhUw_)79+O`di(e(HjG$l=OING*9HN_C(qE7AfoYj@}XcNwVx1 zDchg2F01U3lgf^v|K``13nsPBJTb5O1ZJPq}lvu5PT*QjtN+ z5Os3o?zG16V6wmd@}|I0zwv-_BoxY^g@bt)6QR^d=G#bEjP~da0IsjaEGeQdfxAV= zJ`FLo&p0|Yk}?x^KFre8>ubhEBtH&U;a_OCGLnvcY`=&FxUlcF_*Lf z%H{Mb?)nSjbdl8zH2X1`*48z@!1<`j=A@CRWfRFLA6DnbXic2=j1#@z}cuH^>T98AePKqXI<&yPonNJicc zpP#+4?i^!fzqH$G#~0<^JLcXXxjrxNn^Ut-y%1P7imBZe)Fux)b;YHP1?*U-?A^GYhXU@aOcVP!##CkYyqyMvPb1Y`$0% zk4Gli3*T2hx(hL8MX0CpsoWL22SxXGqp7|<&B}V9t!NfEsN%EqrCBv)b*02vD5+Um zUvpyv2@YzOB1zm-b7Q@5VQh0N*=ikmdP`!0szozEGH*GhIN_DMakhEz zRmf|Uiju4IZ~p`y)ZK{^P z7uYVN!Dih5{_-lf`dYgj2RUqjIcE#KhY@x&Uk3AqWW}kl&T^zYpXOvw6ez89_4V?p zG#4%9N&>$Q7D-3Q9D%%L_`)9AN-_jKvu++ysiG;^9|R9*D|-`me>5?fA`=O7)f}8U zC-ga`j}Bi5o@tQMFWKigJhkh6`CG~V%~G|%oe47@uUARP+6bQWCH%gBNL2MH?&|xv z^kTPSONXicmKp0IE|fhd1GoCRm+^H@P4rQ@%|EQypK$n%PswW;TKd3+K^IH@ATpZ3 z1q|Rq*rMX4%Sn4Da{#S?gB)P=fH|b2(h8}C17#Km0deJiqhRUP&SpfOoxh9tF@zNU z2$Qt@x3L2G3~uuSWXckw&D<6d?8wIx`Ir@#xc|iZjg|^v82fZLrUF{kde!-!W+tny z41@89D++!YMMTb^_&SZR)xvasvpn(5o>xx#Cbrmb;3E3ApTP?a8A>84oR21i11+#zjAuTKPeV;yJS z*wOah;6c$q!PonEh7t*i+Qwu+>e7#hz$6LqkQS%yR)e>?1!`7Jscl{+Dz8>+IY;oJ+wn0sm@cZuKK7KF$}#R_@E z-f6;Gu4^w>%DtFdyXmEL@vl4(>2*Z5q3rdI`-IfTs6gPqa=mQVt_O@#ULoC4`@|gf zl2VT)abbsTY=^20j#Vei0qNNaF_mTpK9`}#H_Ry>^NZkNSXeR3KfIm1b_>q69Q8Sk z05K(glK?qMUP2iSmqixYRn7yyCi!T1B6_O1l=@{JyP5YeP>Hn6 zei^TaCyIWWqiteV>_&h7CCPXV@p}Zq{O;C;XX{3?aR`#N^-Q`o*eEAxG&(og!>Zir*S`x`&r!2!s3NT zb9toV)**-wuBf%u*keC|8ee4x?9A_oKy7}{5E5R-ul*hDucARqvnTi0*9>eu91!oTlcws$7 z|6I;&7Td*Ht%rFT->PwBB?}n$)(EtTN~gn{*;npPg8`(fC0~uNG%;rh)B8 zqg2tc$imq|4?mIv_J~aGHs_n?VU{oD6(-I3b-=que4JWOwx`CMR-jkL1q!gci#Yqr zq~-6XL^{~Gwo(?mrmE*i>e0y;A;mtnv}a0TCUBl4zaTLxpe-RWD$rFxVmy8$bqNht zJ-3@YIAZux6Q+t)lc-5fGlJln4_VyQC?0_cncum}Hn+wOu@|q${OG${Z|*fx`WnL1 z%*~1AU^X}24tW;q>E1j)qtTl&Hz!{2AtFPoM1QPJA;`@K#-vm^%;zd&6@)GVv5L9W zBh_;mj^Guc%3RRmlqpdXv$=ewDCNgx=17aAbVT)RWudK5cR4uj zNbIJXN~wgSmbu+I50z&`Ee}V}enVlqNr(Sd}$u;yGf4C&u3YPrrB8#e& z5qN6}jEtZeorme>9)@*2;tU#ing)$_zlHHU?vhj`?i1DI>O2aXAA%MWN4o|x{WZ5D zoBL1(YgEhE0v07}gfsLaWHTyc?|LJXmk72|_z<)x=upt2phH26f(`{O3OW?DDCkho zqCVH>s29vgXgR@(K1mY2B2g0o)v3lVHK72{f#Okia{m|Ys1WKu(vGq19NbWMnwK&i zkRnE_yD2!p{Ka!JFPjrsc`wUR(2HD4%MhNqoQ^O%n9J>vMMY zIXAHzaR_&sh1ApJU{X6KH~d?L3$0I#8gbOF&fE?vKvWU2>R~4+g;XvNo|OfWtsxt% zAfehs|D#XB`63t=DJVRRKn}o`15cBueE+Pt z(F(7K>zBnnV}yOgaE_%*)s@mcoOVO2h3vG6&(iOt`?1-pg_$>>{*yXmpR&-qY+U!* zfpKDpaISSwIOL!>lPT#X2nw+YC1hR(Dy$C~aKH5_W8*kb*v_i?_tY%)uu|*fagk8p zB_L@`2h8s<`nR>;(8k#+B-pB4~bgt#0J>e=U^_!32I0iHQTlpT=DMVU;w`3QBYkr`w zC;<;^Gbku{SdbZ^tVe*Q3*R@|tKrtJV)p%!p;yg90%v>!!5g{Fj$3HeL`L9sd zKwK>6yjW%2#F$U38p;c8B|by_Yb~_hmj|oF(9hjYo~iJ}R6YC$L7w58vgd?vI!li% z<78SVUbi-mq39lp3o6f1en(-e4#jgsF3;{DNM@Uu)k9vTgqJ1j)2bC81s*uB=!V?$ z0>g@~vj_0N@Nf|HCJ1BS{D#Qg_DJ8n3jd|qsgC1%{yV{MIVe+#`Ui;?Fmb@@6Lg^> z4fcYh-dlp-8JDM4M*l3ZbfH9P$_Yg?1AW|E0=S;9i^1bMpIQM>_2=S2xqxtRE|m*3 zGS1t%(8Kr1IAbYBEc_p7(KA@F;z2zFh<~f4B|%_&Frg+K(C zg9F7l_nH?3Mh;Km3AQ7qJ#e19NB(Rdelbi!V55B782TtJRPy6dbNmf@&R99oelASA% zl3j<#?OF1ypZw`#b*?9U2w*bub$5UcK8q+~h`Ieta0yt$ua!3OZyPE@=XsC@JM217&5+WLB2lhkE)4Jfk4mxa*g^D`X*s&nCN|@y(p#yug5D!ouQV6N88K z$f$|Rwz~Mj;HP^2H*&4FoX{O`&0Edh&&Z*K6B+qqQFMu0C-lICL&DV_1oOWpNL0xY zXh}!xO|J@s;p~b{)!d)n|Cfxa!o(t{+8E3-CsCVqJVx8tjBCcSmwZOtEdKyt!)#V? z+$LW5+)Y|lJr{oC;-`=e`rQ{l%@6T{W~BuChk^%6eNc@Hnvvd2)Yv|091?D(5` zh*IDR8JFLDayf&5p*-7c{A02M7N5P%e48i6hUjfpVAh_sGHxc4qg>LFqb z;@5sKXMA|nZ9d{3^HLuOnVvd9AF@5}`m{n#^Eqai3V3H}qs=F{Grkj{(8F@o7|WMR zt5un8=2Cu<6!$Z);)2CW5QTH(HD`@%qgg*Dw^7mxY&VnAitqkYR^ z+5T#K4HE1vjwS3)U1*(YBKMQ1l`1}&u|@nqibvp;h}V~-Z!ON_$W10Mt$Q;$S%IYq zW6dl4#y27iOyn#r9yT|4Sj{hoq}_ao4~Ux_P90}1|AUM%1FCIf0NlnwbB)~`vPh;E z7C+Pk9ICXTudxcVDa(-^y&|3v%V4pQ9B76N)con=B9w{#?O3O5rVDNnythVY$s&{6 z&7RT+@R&k>*XxmIZ5%9;9Smx25r%D3B_6Pq>qq?e_0Uve*hHrGs`z{Ng3z}E>avUnx}an$tE2Z4@ujG;40869eD6(jeUmG_X{CoVT;|ZOD-WW zTrvB}S3G84ASHIL`Tw!^Ch$#F+yCfklR}^^Ck%yxU;|~SmO{0t#WEz2Knkr^87xkqxC*YP?4A|x%)mazq8R4L$aN>nOpTSUqGu6_1N z(+F38@AuyOzt8`@w8`4@u=d_-@4fcgYtum*8uS#AwlLR{2yZVbEE*9UZlg0yq?ou6 z?Ae0-rXY_&ic*bt4epIXFy}%p4a+K$#ZpA(*hN*a=V~#5xmJ*T)m8eh!I}f?`~oq& z`%?zIJ|Dsfn8rxi!HY3=X2Fm!!;*&5s4736ap4@;&D>X2V_wZg5)wAn= zs^=Taxf~frsd~-@)-D%CQqcAexT&caP)R4Tj^Bn2617G7r81yYbRESnE_bI&qkg4# zm~NGQ*ixWDV8g_?1-_23M=%W;J!d|j>_%aZG?oQ^dr06nC9B9^t{Vd zl(*mpqolVAnH*4#8 zoKu9dE59-+$Vcv)!`7TCtYT7;H#<6vAlZnWyGcfp3{ZPn3)z_q-p;=2roI^JO6@zN z_E=C8o!#72c2lG0P5>Y}4w-N5tMoa5*qB`E{eV2&)kPw5~lz#t3kAWRgmh2()k zNDa=iwm6?{ylg91`pK|>esawf`U!it1r4ZlvNS|y{p6{?`TNOzFm~uCG*(f5quNE7 zULeQC%MffCr-YSf`lEYqLk(_BiLAwdE4|*DQ-|KPEt^J-q7O$V@kM@_nyz*q!^Nuqo&jTLdH9pP4<*uv=lL*8~||5-VtDGmR)G zyBwVwZB2=cp2l7%{uHTOrKtwEbW>ca6_+@;Xcd=t6c_BfBY(M*#d=Y3Nrp?a;^IxPD%QZ^c>lK$QxO7)sR*?%R2HuRrz*h89K+F&_&d04O2bH6*c`OVaDxIQ3 zC0bAONd)B+D`+>$i9ZgAm^i}6L~)UX^r`$Z#uD^4Cd2qFj4pc^7BjA?#qy)fL#jte z+}>WH*b>q1t{Io)BC<-75mxyu4#Z%#lKH4&&LHz%GP@+O76c?dR!LyJVuKD^@~bD? z5qTg_Je1jJCk?-~uu)c8vP#khSmh;3@|(!KKrz2f<}AfbM;7vQX7+C?fQftPeR2qh zcCp3b76%MnZ8&`m%})uj^-NW-ZD59i!Yfax7fU%$HM+WSM!+drmesWYXm!rX!bS1< zxJFfsJ;4W)FxrwJxMKljI%mO*EH$J{jaFB>PM$O$v)RYgo$%NShMq+)Es1D-fx41K zI*4O1>EJboz;~wGIk|%|S)9@Ns^qq2EZpgBaWF`IO@w-SFcupt*Ch0tW>SqE3w;(Z zzyptu2;dwnzQO7;iBO}xxT|$DV$%oLIvbni4Ra9)_l&XS#_0oX`>?O?DHnoh z@+D5!8sYQL5zbK!Rm&%Bso1e$O#c07cJa>2!s@jnrWOb%Cmyf0_z^~l8&gUyzZV;ICgN{o-kmO5x zoHc2`d=NstRtXrEfME$3cIElSbs<^P?8Chzsl1Wuj@&hjH&@==(JFVeG6swq*kwvK zZSF(?Tu~oQM$?VXXr`i|=7%V`P^)8!%+6pkuzgjX+=f(a!R^>RI}PZqO-u1}#l17i zMl!rKFp(p)sYZUEj!aTK*vUwaMqaRNN3vj_jsC0@9B+R*Am-X#{`+59FQ*jbXK{## z5K3MAA(jP%sK8PdAt+HfH^4oUnH_4iCsSq<5KxCgTg#h>63C;J@On7yD1(DF{-A77 z+@dKMtlbqWB_oF^W~#gVG6i@2I}YZL6+}J+A-AwX8V92%H-k?Ay`~_EJ*ZBR@}4KD zl-=Yif{vBIEjXvQ1GmsVMj^uFP0VLEj&o}aR>yZC_IJ?tf)`Ch%bHdoq_QRkOEGNl zV<-riwt6}>7r^r_2Wu5_yw#i0{a0dbg)DAR=rZ)PU_ZvlWvwk*Vgc7;fJMru7@GIW zv8)h`pi?~PtO(mlQILK&tl|8r*Q%+b9N@zuES^zA_9h51G@CwQ#^D#os+u zyV^fhyV_EUJ+O!NweTJq!FwnLfzC|oqLEff>7v|?_fZIBvOdamE@KM7n|A&=L!R?5 zG=gh05g>)bFNJ+S6+DevIv|a~V6_it7XsugiSf^)vSy(G80~S?=+ZI8^BH*3z=nEl z#scRvjoj))B;duLA3ey4F*J!VYO+d@&pts)J!_!t(m9%vO`xM8q~s-P^Dits9S=d( z=;uKGhZAVn9J67L+bchbVBQ*KBkG^lt~mZu!R{gLrldXIhNELV9KaRTm5Oo<7&hvo zd0rFgOYbVb4%Ic_RN)#2)R4aQISH@f1g3$xHq#jHjd#FHyIbQ=VcU@mMjGi8g{nO! zy6>M~*v5@dx2#`44`V43u5^QY(+dn<=xSEl!ZAb(E8YY~|0F!IHGRPO2Bf!aAbi-o zu=|_6pjGOn*|T^4ecdTv)Knyi*H(xJgNfh^`$-bPPr-z5yAe)pR>wna0u4VFlO6+3 znKjP-%HN)$CqYFjjsuKcyfAIWxzZu7IGQaKP<+1S3~W1~5!=f>*!f8l87nU+$!#S( zNvt^VI0ulSY7C@45IF<~D*=5EiOD0-i;}=5QZ$em4);GLeuoCePN5iSVK>5Fk_}wD zurqf9_Q?lVvknvwE-_Pz<+_y?`Pl ziKn3nNaGoQ8aVRbjsP9|G~RxpVa{&<;UO12-r?5K5enYojs`U1J#L-+IOq*)T~_a# zC`D)F%?h|h0m%St-HbNJ`3zm54Omz}r~uEJm{V z6icz7pc52%->bwNq*k>L0C@{w$(QFjJe+;#EX9z2^60kZbZ&(+h5>!%l(ET@} zZpb56KK2T>-&>254kz9o^SmTw=xMftVui5#i%<<;y*C=ZrAISTS9x|bFRK)l0WaSn zs@;36c5kLTi*%IS%d!*6f&0J2Y1Mn5q1<(@+cffzmDnBQTuYGaE?p+7%;|Cv&kN^k z84&Zw!#d53ZZ_G&%Wl0%e8Js>Q#nD-!=)6oZem%=O^_8{*>#x>^dp28vuixw`*7)K zjlS%cL|aS6A*`EIKDW(SGtoE5G&GWm_%DE2w0foa)N#P zjdfmd>Jh>obCqzPBb@DN?-5ls{2!bTV4x<_1yv&cb@m&T*KTk=Qp`A?X7<2^vd*R# zaq;{Oy@Df$V$6ea_!>$|b4Fc~dpG(bz3y{6)J<8})7x&H*_&?OcS%w7rc~&kv%2m9 zKeu6br$lcQNt)oi&{qXT{E5l~Yra|}u<~m><+UUp1(O-FiPkEnG|dlKZ6K;0%vn}Sl1nf< zN3grP!NPO5Ld(#Qiy4^Hr(FbiywQ;PDLQXcI`(o z$V$;O5P!@Z|D)OY-ry`m!iwoOulSJxzRrI!7qj#;Wkx%&1BcH^dk;67p*8IAS#x-; znw-NIx+vN;`8R~X)60)&56kB8W2&dcl?B-W5X&^_b2^J!Eg&24)RcB%QUgdo4Y@^A zpEKSK;s&GIQvegHiY- zb{=e@)g!1dUe*GcJKUHS?na4m&q`C$2hpjZl6jRrHr<%p%q;*Wp1kcB!ZQ-7;_AiJkVQw>|hVKP+ zU(_uXtr%aGuGx)Pi_V(y93Y___p8K;Dj+;f`qV6a4nmMi54O#Rml@9xQ4om#YQ-2M z(AFv4wMa{Oq-pALH7^#u2_^NMGf;i-d=tG{m4dU{iHFcVOulNT`7y0IuxQoou)xw znxMs9GlMzhXvtC3%;K7XQHG_O$+th%bJjqOsySwId>cY9<|c~Hk)Td#c)XCB8Hv)( zv6SAW5t7H}aV}V;P3czhliArD1KxIxia(R>HbjC+W4?gl?a(cwdM39^Fvi_yY=+84Y%J5H_cP!Q-Oki? zGU#G0qyp2N_SMjWxZVs}nk#p?0W=5{-n4^e8b@Qym>4{LiP;_cHCS+YE<%`B(*vKV zb;b@FGEz-C7GOH;J6yL%idJ0a$87_XQwyo4^fLL*L@G=5R{smZTc|tH!@AbkD*Hfd z<#AhNtECL-Udc@A>M6zRf4N~CiLk40zG8-V`f)0Kp@`PSHD zzzHp(Gm;L02I=THVS$`HlKQ%N@B#{C#4c$V9H0OKa_+v}CibK<(*ptaJ1Odvs(|Gz zbiPsB(Fc%sZE7qwr}P}L`ive_(_C{BxvBXRj_a+O6L@$08n-3ZV|EShWYILr^j4$% z70YdSt#i&sS2>!y45~=z;*!m(JM`3yrAMS?| zB)MxUtsw1!pckB#MC6_C8}VCtQmlwTdZ_XQ$y~W{!srU-D^|83tVL5}#yy0um~^Pc z_59!i;JScvxhD8bjnTZC&#Zhixp-d;6L?|!SMfmAL&M}fqg{$xLR#>U^%`m<+4e9a<>UrmC@GJ(BdlE zjlpmG&A>M`gDL(etII__Qo5XU$a5Z#R`vfFVL$?_<$FAkg8MoKRK&REz~M$`#Q+Lu zcHJ9-`{k*gMri1InYu9mH6&C;h!xmu3LnuT4ariwrlnMij|^q@rzp0(W&dSg(?~d& zT$At+Y_(WHynH;$j5|WC$4!#bo>F?8nL6ME)&WD4>O4)?q9>DVgAiVe_Yo&J5zayh zj6ed^$hwyawA9F3(rcEiHe$oOZQ?P+j?DP+POakc?GG;^zHWYSW!>83k`W z4S{4=)Jux`qmOt5h83lGxTuaKq#dk}LtYJH+!@aL2V)J5N5i1g4VB$x-gQrQff}S8f%DN8P z7z>#_*!ZVOo(WFHkyaEI%|T1bKKs$8S5Y%d$~WR8(~*$$0`)gMkxo4mx{u9HCgDyB zOpKd5^hoz&=qplsV>}A#?T2$;@Yag91M7=`YL4}hn65b1iBL~K8=1iHdWhtT7GXH1 zNr(SCr6ppR>|i9Xv~-}v#PiM03P$~SHWm$tB5P)E6cBYIu68Q6t~`KgO${DZtBeQb zfIs)edBtknytfcLVglx86pfY3?AnMeLK)DQ=F(axIbjFp7y@xW@f6z!^gwvW@(rb; z<14ItB+CXYZZLy{EZ@-73J0JWm?FHIz>QM6gjJjfz1gfmP2z;(v_i@hpLAoM9>v3^ ziWBQAFo{ox*q>VTg zBX%teY{Wi72HZXV?~Pa*1K0C0AUf!ou&a%OrWkngUf#M$`_5_sWRe{U>5~?^Z_aGLYAw zB6i>G?m+Bj1QNTJrk|)^60swG6C0zgaAD*_A$Do5jX{(SE`Bn1!2Wp?F8yL8wu+A& zgF%|J(NE)kFgw#X5P`du6S(w^X%N)*Q@3=KOr>s5Y(f86zu4bD?pQ?lwQrySIh~WX zER@f7cK~Vo!?IC>4kZFo8s#S*NYtXiX41X^x=)f?h>F38l_ZuM3jNvzR1np2Ve>4YhO%3xGmun_;xWJV<}oYYi<%3z|` zfVJtTGAujVGx(`YruSJ8nW+j*I2TA{c3+9eD0Bt%EP%%FM4JqZ#JmrN2OgJTMC(~b zW75~MJrPKZg-8r#LJDcv>dk(Y&cld{erJ2C!eWvt%2dkoGln)B9yh~}4UYsev{RNp)ddGB zbJ!*#DC!*6-VK{hv?;}HYJkDP)~wjDvMJ1whc4(!--sDg;VtH&FVdAb>Y}dn4f1tf zq`3@_UbJ-;E7ziPm^8lKognpaPY5G~I0^R8xD{;O(qYn7oc#0dKKpSRGPBinK%XG` z7sA#D-Z4RiWUBy_ho$c?D#=55yJJ-qW{St12QB17?+IDm3Bn%holCU9d{(7ziGC~w z_sYdxz;4eK?4g<(9KA@Dw2#L+5XW&=aYCC_a~bcf)2mYz7pR8YyWrO>rEg%JOa6?%m()?hzD6}*Mfgl_)Ez=FaX8&s*hAk2G_Pax*s zYNA%dt!=kC{;-MZjb$reQ){E=oMz1)_vuiVwtJ}Ce7)P`vOfn6%x zIKLqmy?nm>{8zzr!z>HD8lUPbB-w9fCf2Yv8 zwKz}?SQV{>8fm6t{}-_PpmNadok>RtQSw(0Kr9yQV-kyf0Wrbe8bjyDJ#aW&f_J~c z$UGwFVsqk!UauM~V>kU9r9gmPbb1^izfAFI7vbH$nUdks4gON@BEY4O>tP|z z=Nn6$tVO9zU7YEiMsGBEH~Q?iltc7EFDxF(j)_d~Krg`YHC3K-od%DOD4V!i3#lmF zE&a~awZXKy2L!sIJ*BB(7%_6_a%a;47}Yz`L|}^P&>C&RuE-kXYIs9KhIVm3G+ZLC zvIzp^2WHG<)aj&6lke<_GgG?ROpGSnvs{3CegS8qiB{>>B)9sq+b>|-0u^G#Hw=5n z**5oWn2A?e2AJI0t}`(tdHW|%4j~I@2-tvYCWn#=51bYpgP4~F=L&QvXn+7Ol{QQ6 zM+KGYNt=i5Av(-FWLp55nNFi&N?RG>Rwz;)*ejIgiV2IE%0qWPcf-MG? zHK|{Jl?Vc<4t=HBF}0VTUY|ymbnP%v=_r)->J6BS+CuQO3=PR-s=HAUc844RDjrnt zd??Q2!WwGD|MGChiqr9W$BKt46?Z^Hfz{m5Uh^)9dL*0La zR2HlI=JvY3f%O-w`^GEOoyG;#{m~bx?x9z$yCQob-2t6U=QJ%#G8ogG|JWN}uW30EOm>4Od^S5MhG)Ud5DFd5Mw{0N9J|ud4QNt7$aKBM& z>u3Z;Ip}6OdY&rh+AbR-{lK2Iz)OQCYJ6$g=O1 z??N*}qkxXg-+%B2)P_B4k5i$5%8)P& z$)ZQfhwh_=D6LVv6Ci^=A|=v%3uMrTLIxcJGC2uHIRP^07^@hw;s=oAcQhKkJ>*{qa?J zbR-9}SWCJFZv(As%^Dmk$T8r)q32z4!@O%ByWw{N>zY)GWb~{CIgO_#L@APtyBlheS^>TmLFF-Fhp0#kxUW{FOlLku) zd$?6fcXJ&wWy_ldn<&ls7JDhT{J93Y|1u3_%ONZXM;txCv6eYvr^xijVm(bKrci50 z$_%eswwH!qt!#Jvu53jhQ-54GZ0mksw&DMwY)yTTE*SCZdyjGQQ0xdW;A7bI!-Un@ zs#IQ~xZ|Z0OgP>!MqL$}10Ez6+E`KAHIVp+mg=tQt~)~kd*U2D)MeTM-6>{QDpaTJ zFpHDxU~^3|U|SwaNhFgIUDDXa5+7C}*LP>HO>8i0RZ(Xj5fD#4|ox3Al`5}k{vK+(@yV2cnT(WL9 zNtgdz01Fbn2q3m@6%$8j-kZ4&;OIl2m zWx2JR1J>meKrls0F^JGHNJh2M@@A?_oEYUr8`R@MC#W_elh7}Iux?5?n}=c z;F#zG^g-yCPshFqi)Mx1-HvzTkOB15&aTI^Lq4?a4#C)2Fjv;t2a+f&Rw@~C+%!c( z;1aw++A?Bm?Bm3(-*=M^#2(y1-4mBijDlbr?<%KF(N^CMgs-w)OV-O|8c3#JVY0`V zc6SQ#_6=|LR&5Ev7D?J2>w)Nl&a`@*S7TazZR>79=q!j8c4XCJ5sYHx{V+)uE!|3V z$9lE{l#RuY(WXz?Emo|8F~uWRLM&d8(slHnNHqQQq@NqDNTQY=MJD(U>TyP zWuRhohm(qCjwd!KrKZ6x4wl&U2(LMoa%tr;%4s$ofGnT|h7GbG4rSvvm##|@m|ROrB+kE z2g%@C`-sgbuG>4T705&nc*^fT1)o6*)oK7u-MlKwan^bSpGDSrsL3uQ)e-EME6 z?bH@tJ(GL!cG-?D2MsYvEsmUCg0nQDW}?0fF2)MqQcYR7e!Bo$%??qH2 zQ-0}o8in>1qj8_rhHzzDh!j^|wDpGGRxAogUTAlPdr4;`M;QQrMjKwixxZIGdLT#d zm>QuKw|)~*s_8OS4?|>W72_zG{!i_-!9f;;V_XBCzpRK-JEal9c*VGFvk|7CQB53W_|{gYyK9}lZ&F@6dO8U zz?@tR0aLSNDS`|wo*clk$tQn@y&trKK?sV}FpstMkMQu>olUyA|)ETA->}?wsoR-q2ca4!3{kXbh<#X?W%_z0e)Uq?gb}P@AB`I$GF~&LP zb>vJ12*@CPF6DsrG161hNwVfZarfBkK#WPC!%v4@XlEYk#2SAT6*jv&ef81*cDy z&mlGWy4i%lNYuJXg5A8v{!ef{?43+y2aMh>M5nbd>Lf8$`c(ec4$6ayyolxDfmaCU zI+<77ci{U^+~uyJD&S^AEgrZ=?^Ettw^t0h*lR}vbGWvFTP5}AByo&|-zc$fm} zd@%=uqY9?O(Nw8UE{nnV6P{LJ+XN!u8{& z@1aVE(sGz;xKgb5u!;l&1n+*)g&)XvL8?`(7)u`Qca7QPOV^OCxQ1N27;guG7k;`K zZ7L6l1qR|z(-q_lEV#vrFhu3okfTX0)R&o*A#o3}0PA=cS*$pQrZSWI2(v^*W-Aib z4k)~Q;NKLJ#17s;Fh$ly!h@UBp_LGm5Fon3etw|Ssq{GRuU$P&bRwDrB+Gn-O>=4)rK_m^rrlpt48@iXST$gW;m&eO zt$J_!YwD!H(M@o_;+^ep2NJ>>tx&lJgkkNsA2Dh=vwPc7@&|V**kU*G{Mf3&{bhvw zFNC}*8ZZ-3k%3HuoXCm`eBXmggVn)tR>N=x)PnT(_Tx=%c^C5(T}bce9f8#)I8(ac!f z(`|511&+}qi~$K_K*AVs<>}dm4tLXn-dUhQT8wQmdkMAyVo9`&%8;vZ0*1Ytn-D|m zDiq&{_h#Wl>q;z>;ed4kvEb-92{x=~E7&U+)Ua_dZ&2I~^4%ao*n7_BDta?Gdxb8~ z6+@q$9%KjVQqQvU67T`!AEBT)sSe!KbslW>9E`sScgNU*YtT9DH!ItB=M4X1asp&P zg7^ce<)HsKItKR;jDkDPw=)Wm#1-$LXE0VdaUzMOK~E~Zi;%wDn)m^Z;cWR)jPwuXRS%9rtjGDXTy`I84rHuEx&YGp_T_SFqN--KgTI-fQs#H7S_ z9F0tOuCYeC)1QPKjuQG6mtf~j#3b_XgFq{RGiVArc@wcz+V}q;!0c`-(m^-eBj+F~ zZwxU5I?LCPxx3YIsc}(){5=v@FAN7zYWdAcwEnc6qQiiGyOYJ@5acgr=2zJN1!6E?Bezoz}=3i|?P$645-AR+XfEVx7$cvD#%8?jx zClFfgy#`WmCaI1FOc(hK@Q5?RF!?wPjCD?TUX}A2BrB5uB9e*G0Zha(Ht@jDRP^dx z26fn(@Rp`(<<(%~oS{<|UF6qAs)3p)D<_uk>>h!R z_(wYH76hfwN>UXR>rpRJ7$@Rpo{8>N<3X4{s%ta)uV)domG5bK1AJk>^LRLy##3|5 zBT+3}6hfz`PZ^DJykCi13_9;5z_3qW#8P_(v3Ld6 zAB#p=ByN4!kVp|0mX_+B~Ja1;@3asi+6pOFOJ+Lt{TZ)Pewh0Fc-wx zL&Oaff8kbI_fve4YY|Y6E}B`L&pzsXB}9li#v;rPYh*C99z{ek?m_nY zA)m2q^$9-#!o3*@|9}MRU~Yh!zGv_~2e%;`lwv1rR*I_n9pd2-Mjtgs$>S>&9|hFk z)E{=Fg~mTQK#4!Q8fi7cts38bJbhGb)&YFKfFD)LgD?EsD}@#ss8l1WFN6AXNHS{q z3rHT_33V#{bX30hOTu&TMX&j}>4)E-k#Vj4T9hKP<$0R^!bYOta=#wv^40+SM7Alu zQRMq8h?4jEeE)qpYf4xhyfKJU!rmss0c&{JQ5dKzx%1Zslu@Mnfz~AVuG6LUqjsLY zxB{k213p67UB7(&;>d)>fMSISy?Ix|`S0L@zI_&B1!1CIr!bdh4@pAJ z;B(VQfMNwbPzK+~7g!1>2fR%1bm@;FyH0;nM|sumI(~A(SvD358u_~ zjry6w|KN@M4iKF<|4mqboV{nKrxd*#4mSa7yMCPI*>y=jU^Jz2NuLk`xbw^puMUXj zhJ?2e8|Z9gt`{iDOS52U*iyUm%;_!rkU(UA=BiJa4!HA7-Rtdr_{__^5AX73ehRNG zVFng9vuf8xe><;5#!+$1eRfzPL2qXLu1ojqf%}j|UN9?zvwqn1V`d%lY}k3G>D3_` zR>-DT6Z(;_cUHr$le5kM9%AOhv(D{0nfcYOle51D8VB@dDa@{hTO-0nE_x1ePR<&# zg5cR%Va$GZ)_{A+ekpS&zCcZ8bwEi8tB;encXrex1RF{ZjoQ_8GU^;2G23-9u4-4q zFA3eK2EU*)8nGE?{lh*2HHpke81%;%;VrMhq!5sX64oiZv9myzIVaKu-J0PZKi{x}QM5X)@HCwi?SYLmd*|#34Ry z8Ff2ozkt&rPrfO(%H*2j&SxC$%|Zkc?2C7Nj3j)7OLKxZ9LF715Q#I3W^BVfrmaF2 zqJAWxmm{ui*jT}U;`JRX7y%*?b_?Y$TK0ImLEP2bg|Yl_sdkSE>Yyc$=UnIzN;mds zIMxE5*cv#{Nu55t9wy2lw%SyFx(ow@Tr{{4#(wL=A&Wgkz#Qs|cuK%0+~aBN9bSb> zvTD&Z3tkEe3SEnhdfaIXHQ_lh$kBwm@n|;L$wTKkt$_;}wWbC+yw-6hL{sNEqcw+n z%;C5y>h?57Lmo=O9h2#T?sm4{kUUa@NA3-@pnjfn(O3Z?!=#q*dWKDgJK%-tRttq@ zG4xz*52)Vhgkz%v-rlAyLK)E6&9nuGqG#c@P@_my?7ahEmEjXI(0JvyAFxQMbldSb z2BF*A3lPz~5r76jEz;aAZastJ?Jcy6_iB&_>>(xO^93Zj_QDog0_V&5;Rj*XFrOpv z*#nyv$KuGH#oG-zFF1u!;4f&I8l4{wWe%u&K9E9fW7zEWp-HLtG%%0S{;M1Q}tkNJrFDAS|5MQ`g+t6 zYrA$m0*-I74-4CUktjMY>9U>DdxJM17+$xYi}&YoO9Z++ZpUgH;*(fF;Z@Oe(E@S5 zLndgHw~7|9Pk)0;8?Jz+U(=En`$IZZNkmv{AUFvQPdfT-zGd`ZI@#w#<2gM}VyUQG z(gzBAAqwj31`C#Z)wo*R6pN6iHJ-tZV@T}w?NINvWNRMNNHz^#yMV22OtV;d9)&Z# zt;LN4_ZF;)h4@YmUW#YKm>}UT0Y|L2HtFnAZHI6RxdX9uq1)vJJtEw|*`@ z{BMeT>oL(msv*P=ys0VY&3L;=oOsZCtx`PX;vQ2bRyq*?EpmFir?HdN5MB=n*VOo( z^ibi^w3Pj&{V99X^~du~xQs1+P))Ft$=Ra`=i0tN8BH@DR}s1krDG@p*i?oxBn&v1 z1_rUMaOYS^nO+fH(vKm<_=6CV8#hrXnV^M5m4n0(0SqJDErk#ips#K1F!GxE! zYNZPxK-jfHuLkx~_aX&f)E&cs*<)B>-{*_^3T8)JC;MFZrUs<{Ioz$-btGcf5m}5= zYnbEs`CL%RtFBXm8n75X2-Xv7;R8y|#Bli37j;uU2yQZZO|TV#4E&w}?6Er0Uu^jptd{=FE)5ay_eXb|t+ zFQE~HS31kUF@|z5Oj}(@XyQ_a^aFcxb>tVsJ_3g)O=@IflGnn)DtWE(a;oE3u#hgJ zky*1=`~(we@cT}DjM%=eyFwVfl)r_%fx@k`M?jtL?n#Lu&K1MG1zU+o6I<@Cn}}uI zwwb6aJwloyli(%OO<@)xY?8D@IVkM|?JDt5&;H%YPw>_k#TbWCx||oHQyR7jLg5A4?!inlv2yQWD&d+K>(2u%pzz|BOvStZN?V+HhZbdg# zDiyrZI8>8xouh~!SfOg73z0F1(**khWr#k0I0B4>*z8 z2Y?~gQa#=_Nji+>04)Z<-*D9GTBHAd>fq5VRCKn)@JF9blg<_WsF9rk>gPe zy7h|((mYKXljF4Bv|XSz0cEH&RqjUS4LB~U!+~gHSKOS$dIA)c6~|QRfhX1;=&KYY z#8g?05^V=rKmc*;xt^7~DZma|OF%<|ZBX!{t;Q~v@1b>#x3ekyh&MF+h^hPxtxrsj zb4J?$Q~5dfN-XapOpafS_8v$v+8cogcz74f&#mYBTBMyO9Nu9q8T*+zyc+kd#fnF1 zf&=PK>#^p-+7D^AxOJ=+z*1LOv?wHbG{79nJg{zoIfUGghkpTVHue-J9>=O|2o=s2 zi`O!*G2urKGG=6F=JuYZ@KfHd;#O}rDvCFfd`#tyR5laSV;Mr_Z-YJ5CMpIE-mKi} z6}{n(y+&`R^1W11idHsu+a-jv-P}@bwB3LK`id49xp8;c%j8Vs#c_5AoF9iXLoRP3 zhF5P_#j^yS(q6P7D$d*P3syJQbZ4pQSfhzs&xVT=S(Apq9KqaKTe0BPf3ls*&r<0f zUZd@raxZJ7aL1)9wh>KCvx7uM6}{5ZPSP-$fh}OgU?e6;$67nz|l$cXQX0=q#&4{Y%FlO3-z7~ z=?A>vI2$8fZ1L!9y>MG*smO$`<@=GK<7}(xSn0skz8KguI+m{twRe{+ zI~s$x}KAIHyROaW7l@vfShF3nr6H7wEkGL5&Szs1TN2R#p=^8&E~%F zcK-g$J~8gS!Sz4_t|<{NAa#5Xt|;wOd6LjA7mlR~wwf#|+ZTWw`af*lrY9i>EX%?! zVDVvrayC#-@g=7g#~OMenj**60$*Aj;zU5EXn0bc7*cQboH1CjKBdDjCbf#q@VkgV zxYL+U7SMhq_g}CAYye<&DJiW#9N*4IlgeTo zL?ar&+cMFMHP_<`WQe`M>@*$|?*dD#dq%xQss%LG))?Ib5x>T`8H8ByCSY}I1kIzj zg&AU#O>A<48GVVCzJn+?aStV%w)yb6fZGYahHTIB+(crOn9C35Uoet&O5X? zVADJ45f}myAnSTk_iEAacpUx?t2TtFK6k?L4LOAp@#juYC{9{9X}J?b&nT1*PCD)c znnX@|9Jb-B0bi`Mr%~HlpJc)=QGWxjclbTJcp5d_L)XEhE3U9|4{ZmJaE+V0hr%7H zQ(7K5f;)!5Q9gwMhgA$NYOt&vsS)~?Dy`q5YpK$a#iDmuBeX45dcP&KrOM#9gy5YT znw)VqfEV7B*?B0S5wj6iU>-ZhF_O?}=s*$%!)3)D>g}W0A(T!2A-GAz{C_1-{xvi{ z37)UR$_+7)Y9yyFEFm04COfQ#5XW+(K#I&P`mLv_!(^ZlEs69LDcTH2Fm(Mid-m~;^90+CMY45Lmlc7ai=7`wuV6&D2vhcQGkMo1x! zMvaZVLKQ>fw;K)LIBR)0p%({a2p)-__-@{wWbQ`hNMNlz*7qV-^n{Skk}1R4i7;u( zpx1OU72u$dJ!`^Wy`a08I0J=jU?2JkgMc8UqAHTTo-H(XxKEJPnLaN_v+l5D~0 zt8~n9cDy`;Oh|^W6KP(+hyoqvhg)>GC-NI=OQW{FSnee)f!yRWy*;{f(d4j=D-PqbdJ=X&RroWZ? z>p>akkJu>K29fS!lFBw}fNB=8v{U+dHuXF;b9^@ci7-6-MV&Kn0%DrQ2d%w??A?@+$n0 zM4-}I)b{C;34H?h=Ne?%eko^b95=>k7Acyjq2p{@ngkJZ0gu==*sw1!>e6#Ew5`X| z@Gy8zZCY$YYEJbOsTT&?L_N?%GBqvc5RGUH$1S4}4V2%fX=>AwkWJm)O-pkx`}Q@R zMQ$kXmG;Z*L4;Ik77BI&Ph^7)K?mN_0q7G=b4}FBxinlY+QIoP$5?qS*SJls$IKeH zxpJbG-Z+Cce{>FZ1pd5(@J|@UE-I+^E4wIax(@RZ9#ri$m^AR|(&>iN^uf_-@%8EA zgu{7hQvV^e{z^|dT-rMyI}4#X-SwQ%4DKQBO2gHT8&8Of{B*#v9u3lz)7LMfsoYj% z5&_5MOC^_mEfcl*_Q>*-oYIBhmSKRC@_T>l%l((4?6@5w_hg18aq`m%EHNr_hfhHH z;~P>PZMu>{EuPM{UioX+gxsp_ZXZrphJCegvwsinPAXRQ_82;Q6%~JaFADzlA8QA;C_q5hH{)~ z#xBeRo+#EOro%X)vxVl1N8KRDcHCaB(!*JGL(RSux8XW|RkdU6+n~*%A|8$+&S6zL zj_#3SDw30GJ*RcyIBzY4vQ8|8ifge=^e1mp(ua3(gMeeSs(kJOSzr9u+7rZ&_lS4l zB(kU~RkNq^P?~6|OGP;-Tvs(PypPsHq~NgRX+0L}@zuuGBU*cSuzDdIFQinZV^3}> z!;913lCIenw`l(crA67?Nb1%c!B`rZmQ$CmspVGN&Qwx*e5OM`P%Z8K!Pls|1;POT zArFG7((Y%R0jglIk{BDN%LYj&KN(HhpFuIODuyrfNvbPP#*$MJrXlxf6fKTCo$1N= zCYCMF6l@DH(_-L(PnQlqOI&#}>*+|u66aVFt+h{Ou1gGtu)p>tW$6gA#4wO7w8A3G zAXw~!I+8Ik>Fr*m%@&bql50K8X%f?Y z0iBMty`&e-ulp-hg3M>!-K=7v#=_RnDnYff0{M~aOwR-d5>Gh)l*)WLL{9teQ=Vy{33M&Y)YLLgVJ2QZ>c>m9gWgONv9&TLpo?1zgNN4 zy>1Kgs-zZEiPaTT!jR(mBpn`JJiyj02*i^f9Cb1KKH|b4}|IMQIiExiKA_ zk@b8P#U&k*VsA$)dCcnQ0mcijhVg11!#a9s{V}?A@PHgL8wYLRBwe`@Uv%&(TV9M; zcoZe^JK^)Y&gaM1%qP?ojldUM=+AR3H{+HDC{r1VlbX{VkGXZi-PiFy6YmBa3A)V& zp1HQ(yat!PO=XRj{ehzT1qV80UX4H?d6XC?fUnjn#W5-qfI3J+lqtX#QJy>iQ-O^% zv~Y1W|77u}!}=$nc9J25beLrrI;;1gNdH@XIz5ABJU+cS9J5Zbw>_=ERrKf^t2k`r@Fx!Se^t`Ios(a2$tIILzcQpTlAf z%Q#%e;cFajmn6qZ3|)cvLL92&%9O%ui90(c7Wn;<^ByEVw37G%!|vQH1PX9n44bGwSKtRQ=C zQ26;l_60%qVs2N*S6PsKEw`)vb3M1K{bdukL!O;|+(G{J+zugL_K~^0Key{XSNac@ zOzacK?W(^u$bWv2|7vcJW=4LwI;A zw-4s_;voO^+#b*UYq?!bU*>iL_t!Tn`3>UsWNwe+_W9g?J-4stb~=iq&t`5{>2WQ$ zLu{LU8o6DipTYF9IVimDv{D{5{Wxw{`{45xeNkSHt*AtGLAnc6%iOtn+4BmNl#%Y@f_xjjJi?s{P)AoXt~C5%BD2ri5R91a+F zogl0Lya(T9_zp#+WiVV1|9E^8@EwWo5PZiW(ikK#6y|04LREv1wJ6_~J#RtYFvO)# z)}nbqcEac|AdbLWJNDS-E-n+4coCLrzhl8r6`qps3Vd!pfROeIdK#>hXE{fzb=u>ak2I)untm|d8CZ(i;nAvHMM z6_HB$wd3`7m}|#=hj1$XgYX;}8sT%77G^KR&?qP@uoZxEJeXHv$S*1}sQp6q&$i_) zDX;}}|G-d4$KQGWV|M;n6X5~Ks%>{t{BXDr8y-9#qH!lFiaq_F>yh5X=#A3A=_q9rj9^=Q%Ono zQjS+1#ae)&UXn9^X+UHa!Z5LDVKGr?!|n6#!;E1_104&9Dpu@?%Z_+1QhAz+SBB$NnwFy+H#9!w?7e}QlV+>|e? zzVJu!ONFIKF-Mq>cuMRWVM>|+p|0#1>j$Xk+=S4{Qc1#08rKbh7s z{5%Ycz_4^)66emIel*OrlMo6gtu~DQ1pl8VOoMQgm+}eKg$f!Symip= zq=WQnXN{l{!a5^FX9^KY?N5HnN7qT{L|^ttPMvj~>C66DI3+BF*Reke#M4%C@TWyS z|I<$=mI?iapz!#h&Ke!rJLy7oLWq`XuHZlyMt@4xsg61r5ssqa3p{jYc<9X1LHRi* zC={!^zAP_7Kf?wxQwkz<1|~WKBlJ};Kq3EctA8A*hn@8ij68he3hfIPC`M*S zE4tOa2RRIYUFFS?9alCw*j=!zbt8WgT?@279%Lsuwm^T9ehaie8DuAcxj_H-LOa^u z53)y4A@HF-OP?+Z@Qo}!QQuSjC|!z6kmdtQm&9wxjrPku_?O%V;!Ezh&&I-;xP2nGX8=;1JGuKb z4(|e_^k?Bqc`v}1(piYF0pDVL`{P@JFO{_nUn*-khYtgiy8~ZJrvhJ!kA5v&hwtCH z{U3l-&W-p|KL5m*!hMJ@mFob$x2ALZ<{0(6y^)JnQjvbvkM8vO~Vh>l|FEbE5(m$C;!yj%&L|7 z7Zm0)dtQ8EVxkhw|NHiLO8wsBa5IP7ICOKklfzmL_i^|Mhm9P{9JX<&{a#65&tZQK z4IB>UFpk6F942x&io;|M$8c!m(9EHg!we2HIn3p70f%b(Hf~?WVFib4Io!bECJr}q zxRb+L4(mB=$6+#u860MDsHR`c?PVOUnoIu#v-` zIQ*4E-49B+3>;cH%;IpGy--NUs&G2i$#;P_G=08Zm{d|AOv|Bb}+1ND_SI&^Kw|ihy&itQi@l?3~)vKocVxAqk!li(#WI0NC%~byZv5!Dc`Um zO4Wr($c8)uohd)R-IOi}8TisCJ$+yuqQ=@^^GM zp_KH=;E>Ge_!|kF=Hhp0Kz+!C{)S_=rZDtp@Vih*CA3wr3b&$+3-McuKlPJ`R!Ih8D$L zdj^MD9B$%JE&m_!3#b=T>)0?V76PhV1C{6SNAZbYu^=EGaT`V<^ZzsPZu}GR7NB3} zAoT^v_d!sl-GtXiUt9JrCv1 zg-uP7a7}fm&mW~nX%Gb~#HbI9ua@YK<1aX81mLPAXM7gNV;wzNt%IQ(u9Hc>*w1EO7 zAT`2^LZ|ZKLp(GZ6EvH2Y^nCTQhVhIc)twsJM!~7mM|CTP>(G_o6W=Tk}H&x(xT6w zNiVR*RA$0v$5GJUCe+gw@m>|k?+fGu&VxT;gg#Vql}AMOE8~w`i&5tyge6X50x(H9 zyAl;qdbZkLY8g7#Q`!INksq;JvrF=(u~Q-N0%^}j?ivo$CZ=Z>`+Wo4>3AvcHn3o) z7o|;1H6yst6JJwtaYoUCf}Ew`PI6|~^t_UV1%)_=Dk@CRv&}D}<-z~|;TSMrcI@~U z2fl{#*U|mTHY-}|KmAo;2ORv@F?>1p&gx%?@aO&lQ+QZt|07=GpZQbc8~#7v56a3P zc2rcZdStb84IT}D?D4fvJo(pkPd)w2`oBH<-19H|{lyLcc;HP= z&9~lu=iPt5_x=YTZr-x>qir8=-{JODRqx!jyQa2o&)$9e4;-v-ICS`vPmdfucKpQ2 z&p!X+RO9J0XTSXF>u=F1W#wu`@9`t>(Fq!1FS(S~*Etn1P> zJR-82sPEpRXRoN5+&OJJ)J|p2`ip}9tNZ`I0{=n~^&={$(!aPX z`2C&={$D@=L(2VsA%F6tQZRxKhqV8NhWxodHGW`y{#<Q7df2MZ4W=W5#?N-wmV6%({k;d8ZfICpudu!+wSVSqXKqut^RZ~!J@ z9hA)dRQnCW%|e=Bf&XnnsxT4zb2Ds$aLlLd=VT7sIMfNsx#E3Tc_m=Cox#(4ke%pG zL@LVXeyqI`gsF^QO)iT6bnrWHxfiFr{?C`;(}{14Ra8#GJvH}Jnej26`_4A0{`uiA zGTw3a&s$#Kv2k|w57*`3JO`iL_kSA|w)4;<`A=SqN?E`bQ%Ke@82VIak8|eW{{Q@C*S!mCOFvuqhPQ-a&~qSn*h@KkqBou&@=(M6;`68O zj~RFFg}Jnl6NE1dFIVrsFJfE%jWMG~XhJQ28Pay>ftgdPs!w3Sr+<9JCf&q$Ut891 z;A6%EYkmwru(M)VxOZ9gx*>0*_MlAreSTv4i~DwD&-(23*Jsy6n8SX)xA(S&JKy^7 z(si!Kv(^z~94!BcH6Qia_}E)BUwv_Mr<>oMI;U~&XJuVqyWDI%{@AAavVQl+f-#bt zoOa{WHFr5aoiOFg>_M*{AFChpamkNgJzHB!@8UVC@;?S%!` zKNQ{nOoldo)aN6=AC{5xw+HH8>D@HtvA7YsK1GG8{R;aMwv9IqHNK(ED7p6Hrw!|_ z`)L2&`A?5XdFkZxU-JGn?Tf|Fy<~WM+Ql8aUmvn=YWi8NP@YhI>8JWZcV@(iAO3iu z=UqK}P55m2oY6=A)^}E0|KulyBfb8;YRlr6AI!e|VUPQ=!TtZT`vBLy)iVt->u!~9 z?(&?jp!nnMd5a9*okO2{LB2PtuuIen@fU7z*1m)76dZv~`1Ad+?hX?q5#7YYsX8_6oz`(jg@c z5T)$-gb2r|9 zeAin)Z!5?higx(&r@3X9-fEcf-6q47=i2Ui=Jl?Z-wb)^mEltgp4xHkb^UkNymsi* zV~c8TZitw1;;(C8sCjnN-l;>P7d1zAS-q*%rjNPx*Y4J@-gqo$mj0W)&&UNwhmI@x z^1W5Bt-X8D=%nZEogbX~MAPJi#TN^o{l*yHduQ8|5&Gu27rHN~9x_3?Sogvc;iYda z^MzF=Onke*9-lD0;;vWp=VR&xXM8ZL{*&#OGH-Zz zv!<^lO*6VKz3;z-T`_0eX-of6R+rL{@J>_PpoRB5>r8&=Sl^`wN*3*2^5T}SmfogW z_a!48g7A-~Pi8MDTK?s8-yBc5_>ZD_i|_b9*n1PWs;ahccpc_RJq#+KCeE{>qT;~b zAP%Ugh@?2&2?z=bcmz~5OEgo;R5C3qD{>00luQfF49CcTWuENM_y|;VU-$O z=k~`h2l({JKKf(dqkDg7s?;6*}l=*zWU1z+$v-e$Wdb8L0=(6Z-KM%WK-?`Jq zp?fZV|GwMs-xFSX`r6KKn}kj}A)c%I?v)uo9l747)57_b{mb=dCEmYve0s>t{9iix zJTocsX4j78MK_k!s^guqGBUHEPRqDZZ$lrq-04S}blA}2&#%tDxV`gdeZwN}o=SCG zf*)&lyfpXs*P;)^?@!o#`t0H#e(hQmpKMwA`5Q;Pr<~lo6d%vRVe~HRKIhQeZR*{0 zQwQDnZsUeo>zeIZzhK)X3`J^El~mnM-NyL{uV zY=8NYsr#AnpFRJYPv3|BM+Q_}2)=S`v2#lOqjvs}%6BMR-ru=@+8-_rwaFP<>i6b&D_!Vp&Ft+WfZ_nGceRN^Kw@aIQxBBo{vrqj#b2!lM zh|{NsfBR_Q`5~@v9kx4vaM_3}opVO*Use#?DX8xg4oiG~{^F7MgTEZ>%$_*;VfT}h zW=37ynLvx-@r9H9-3-TyFFKa?&h$E`c6?{urMI@lUphar?QVybE8je6ihs4mPy2Sx zaQA7_zER`2Dc(hEdmnot^O^FvAHP3*sNIh#PCXVEHUHh|l}W#)5AvIKZs{vC7Gyou z{f+d4oj=X%ez0%rl4Y~EH2F0m-M9NM&nk&o_F(j(tRpA(7xhh_cxLpG zaXY&G^2j?ie#8O)bGugUcxzbsyHQu~o}WH%qI0`%efuTE}4;^m($z(lXD&NBECy| z*5PvV*Y!!1mb9?L71|#2AO~0M=BXtNj$Xpx0P z4sOxH!Tle?!6QXDddv`xwH64++IhmU&O5@X&KBW>kG?s1ekYtg&BEF1x^VV(G`M*C z8eHmjFu2qUH@MatV{ol+GPwHY8r*#I4Q_rr4Q~F&4Q>toFt|5xwR3OS!p^-BjU)6R zsti{yV=E!@7a2ro-#Q}aRlItB@s^`F{6_CU_;+qbE9`_LXo($2tLxI8=4m{BiYQ+> zkCbyiA!3=!>6Z|3%xPStBbhnR4@EX}GDSLanA7t(I&zuQUJN?&n7c|URx{VeN4VuP zr@aew6foE3pWubeY0m>4Ma(tx1TSVz`#k6cZTQxskd4UWhMq{k@9-=CoIYj@HcOd|QY>=1)j( z5zJhF?u&K^El?d%uUSwnCCF}XP(Er0rPz34Vf1*Z^XQq z`4h}bnKx!`W*)%Y!n_G{(NNY;Q|3nI&6o!;Z_YfBc?;&D%v&;#V%~~*9P>8JP0ZUe z&tcwoj%=4LdU|z($6Y~=0U6`L{9>lzic{k=2%)2vpX(Y?vgSjvBVCJov_hcT- zychEb=DnH6GFO-Ixf}BU<{r!gnH!mhGWTX4#oUK^ z9P@h2P0an7=P+-?d^Pj-%nO+JW?sx(VP48SoVl6#ROS`T4LqT836SM=VD8J@nR#pG zuFQj(yD^Vo?!i2kxj*w{=DnHcFrUgik2xOvYDYeE2j+#$otYOicV%A6+>N=Jxd(F# zbARTdi7an#=0@fQp5O&AcVHgK+?jbOb64h3%-xvBG526@V(!m8hk0-2tC<^k!dSrE zm3a|!H|8bGJ(!yV;`uWT&^z)g!G~@EOm*IVx zyE1Rh+>Lp#9zIZpkI=(2kJZC7Pu9Z+$?(~Fc;>l!c;>71@WC>Cf$q<|NcZm}{Y!Lz z=BIW4Q0ZT$`!lc5c|Ymz(p;7wH>ld-tMf?7TkAYZ@?ho;b0v>p?$11yx!iRquuo;O z7LfK@(V?$DMDR{Hx#~gptkR$MfzUy_K_sR#(&`9)X%stDBxY!s%;Dv(OaWgRj`riw zLF*rM&^iYl=^Sr5a}#s?>XqhCyR7J-eJFI$J}Wvhc`YPUOlLk*L)wW&2koY!gZ2W^ zk%AJ@LGR$vk!dZDOea;_RY~swNIr@4Ig`tmjD19O#G}UONaFmYaX#Wv6Ld`F^Jo_q z9m#w??IWUNDuUk={(LN}u3EHznHAVK&0c{=zoer{#4x+En?|aBTI$%X>r1aKS!#ZG{YbePj=J+t7!W9=_nC#{l1IrssmQtbbqohDsMEOPj=P;Ob<`?MqE#y?2huIhbQ}^ z`qRr#cGv;6smGs*S|)$JKFBURAh+DDkvhc}V~vmOl=7#?NA^nP>&N+{wLjXgTG?Jy zUwyUrS11VCGo`23FYRrl_S4fR`|g0;>Gzpr=fw5;BYUUx^zxD2cR*+p@A<)3kOPX02yA z9t{#BIDI*ugj@TI+^=1kKiYRosl{@5IiB>jwu>AeD$keWL#2PZR({*@f!Zs|#!jgI zM#814ean7bIgZ)(gK%w(P1SNF?X0r@OFQdpt#4bqu{}?(xBeJw>5%o(-)evIdL3cy zPqLoEtofGp6lNW_WIa`mT(9 zaO-#{)33}g?GY#I4Z~mC@huZqT*`;uvN9h-t@)JoKgeo_GMy-^zf5N+w=Rv_WM?&} zLnDB0p)#EqtNqAyD*Ka6r=PX_w((YN*EDS;sG9%b=t=UB@ea1(R1c%9<)QMEi*6Y* zUt_HP^89e?IHK3HeqE*WsU@nOua6=$uOi>R)^^qFPakh(dV{Uyl;LBnb|k}xS=&{H zkF?1T#XnZtPeUb^;q~68_qQQ7{uDmSW*nl@hr=f1A>$9TDL?r?Y3&!%zi+kkqpJCj zu-b>rk3MSZ`5S4yZcG0N8#|!%Xb+M+q<@sP{FU>2d8H@2phJI7tdIBlbNTi%8j;^c z-iOH9HQvebm7KjrA^V?ZhhpYunU^yEh`E`0K64B6<;+D_8UJ_8jm%Fn4`6l4Er0If66?L&$naVn*H^Dlg<7P>>tej`n)EB`C|6Z z;qZ>kW7%IHpY?S@eH}5G{nv8%Z00XB&t
      1yU5vws2e_x1GrWqI{^ViEfn3G?rnpJslTc^UH^%qy7h zW$w~l*2f9vzRcG%Z_WHe=E2N=WFEo%73Q(b&CHXTf5tqU`M1n-nO|hSn)x~A1{V&@%Qw z&e@TU^o%tN`qDa>=(KbN__ z4%&nHYWAPUyomGb!MuR|_4^Bb9k-Jnp8d0!`*MByGB08Ot;|m|AIrR$)2qe2jQxi* z&*S)7Gp}I(Da@nTzcF){VA($L%=LW;wVC^}KR)WUcmm(GOytCeV7-q|0w-@KN){r<|XVun)zww ztC^QEe~Ebo^LLoL^pf!v=-f~8ddz*f94hJKc2b%9RDZgF1=;_?qeRn{B!2P%r`QRV*WbwKu)g#^JMmagLyXde=slO z@Liebvi}_BQT{SNq0Cpa|BK9x>>td$fc-%VWGB0BP`OJek{f5j-*#9Z!roL$e zR0rfYD{qCgRFDWk_wD;4c zzg)LT#8YmaQ$8p@TVDA)A@VsI{k8R{Uumey_3NPi95{{F)8snPWPa+PKi`SR(_wkY z=Qjz~{7SB`PU_(&TH9Ir)BE(|3m{nM`j*z$>Jv#Pwhf7SEpR}XB%%inq-7nxpVdrLmq+J2HJTkS^j zH2gM!ZG7q0_LKhlYNH;%$)-N3Jo2|{D$_Sv?N**oPsMHHpHa9BS^>rWJg?I9p>suddU2HLcH8_D8ussIQ`vKdn3IsS&4fkp9X;u4~!)%l=mB zPdv?Pe{%hg{>nqH3+uZLr}S;ZQ~Z_Vfn3L{%#U1W)^{<`?+B8; zCs^xCa(#D$!1Ye91nDD&&h;LzbG=8(b!vTAgxnV)f2Bi~Pp^-q0wg}$y#zbvEs(>1lSM(BAyaXkioeMC<|=eGV-qk4|WpK7#{(>gl+s*0YT z-hb$-Mtc~fk6hn){CX(eGg0p~g_rKO*H5{gr>9QkC41F#OPtnw=~q?s{7@b!guHUg zb=u1MmcJ-u%W2(T-<3l9H>mWL_9OQJ=(|_Ix$&zC&enjgvO7i8oid8T!IkUYzJ z{gmtdmFdf~Ec{r)@)t+LV3}8yS5)(Nv3mcdPmI~cVD*uT{5Vscs;-u5yfQjb`{$N* zpZ@wB=S!ZJZNs~*n%=s{rUJz#!YJ>4$F1R8C<#BPf7;vnQ_xVG@KE(~>#k<&`PLVJ zbT19#D*ZKH8U9J_I9biV)XLv@osO@pT!uerOeeNZC5&6OGm9{P=E|ov92zvAFv{i4g@nb^A1x-# z$$M%Ep=HWv%LwzIb9aBRuYPl&aV=d?*H&LLd*K-*9ime-g<-3wEwj= zg!xXvZxa?CJoS#o4ZHFQ11>Ipm$2CG$+a3zZnBOr&+~T;a|}i6iJOBLY#=o59I}xx z^f{mR2#Z&r*Rb^7x&n=Fp0bIsbW`AFLgYh3@y4$AiRUeS_5(uW$*>Oz^QSv(Av7f( z(y(yL;;qDsTQ}F*-_mlShIz++(lD@n_($X)8njl!xZlJ!;>PF+8k&+kcQ^P>t?=%d((rPEU=cVOnn7`$;hNz!i8eGmtJ0~Vc-jwH4Oc=*KTq*xxB2Q@$wZ7^PlPSIl1SYd|5+r?TUu^dwcI8_rg(m z8s@)$S;OMu;4ifBufC*VU7P8s<$|qM`BX#~KC>I4^mw!#)ac{-C{vrQ2gPw7fG{!<_W>8Wsi|(J=3m z8ycb=_tSX+%lb+zP0=vtcAkcDh9V8kmLD_>Jm`2pE620#H7sp7Qu6%S5?%8(6tRai z41DRDhM^n1wDBo$W3Yx%FFmDUUhF~*L)|xPXj=cBAxbamw5nmLN0Np) ztzVSbZmWjox+kRn)d~$w?>72UtB<~YH4Hs6NyGe*B^n04wpBypnd2Hp*;i;7P_JPL zotNhss$uc1i5i;sE!I%XeqTe_pN9GQ*EKYG)zik~!nB?m=4>0Up}06t!>GG!H8kBn zsA1vNG7T-CyC0_Xa-Qv|VO*OL8s_iG)X=27s-dNzNW*|fXEh8|9!gAU{572)IxkE^ zQ_e&UEve6ISlI79$(_H_u=tZ-H4Gf#afHr``o4q2LBll+eb1y}VEN0ExB6JaoIxiv zG{)Z2P>ihi4V@p?tB1teu^Ni+vNbf#eO<%+db>0%ee0}-7SH<{24*(+md-~%(lF}b zI1S@k&ehP|XN|=<=L6~fLxb<={LJ1I^j_%aZ z(&dbX`3ovEj552IYU$5xpkbgxI}MF5^wKcYaj=Hw>@gZzeoEHRG$32UoczTa8n3^q zVPNl#8d{#-p<&ea0~*F%J}&(|e$ueG-E|F(;fAA>erU8&;+V!72F7;OPz(>zurwr6 z!~EvsG&I>y(J<<4wuYgb7Hb%1dR4>xP8&4D_@H6&tNS!8RE}zB`Qb+mO|$;cFwW(n zhWRhMAEW$4dHZW~P#gS=&zY6>MYiRD-T0>*o?iyB^pCRAc zu{>Z~)7@^{LQd4`*fpijM$H?Z%Yr*nn!;p}!By&YVX1IPa;uK${& zVmIi-hqDHM7*ad1hee%PTiJixY2fz>7ejn@4{dzJ;G}%<^X(Dl8^4D9cEkS(_c^Z0 zmv+Ub7uFSrjQA2OGaZde?euT{5aa49TX)!-+AR7pByC;yk9UqXD#L$u8eB5OTlsMJ zyKkQjucx&A_0IP9CN)z2d~he&yzO>KuElgH=9e2GA0O`J>YCx8y!Ve*SC^X_DOYz+ zpY`^Sjg>ejxA(qi<)X-Z)KkViHN|+lhm$fSVQ#ti6fdP$Le96_6j$Ykm34nP8t9{R z&R=dcPHm)wUEk%tW^p~G-R_M~8C?DfNu7M?n~+c1DDB@ru)DooJEdRJg=b3Z-wk=Y zhyCcd(7H-ogE=dg9Q9D1-88pV(uehxO%vWZI5gK&Sy1W}cfhm0QsL6MPe<2IO0SFU zemdw=UwLDA=EL_sYpFc9(7a>$&4(eY+=qVoW~XL~x5w;=vdp_7>9L1gb{}~{Sv@%V z?k{zlC`Fw+j|r{cN$D1x*kZ|d4V0YUmozGv&{(;Arp?`=IZZ)VFYgFo{bonSqg#SsUb8mJ{HL;mbEkDtwr+esa>}^gO2e7U&QED-3HjZ>SLg2s z_fhIE&7517SXc3#^kVt7EDoWPM=bM(y?4|s)=!fV-wL_Gxv5U+{)^}3gb}K%%c42Sj=(v5?3pWHPuiqZm zH11T0(!=uR^L0LQQ7(R+x1(9EqTGJYxI%?UNTO)fa5eG(9OxxRCxfy@GPSdZOD{DuFoD8_wO&N9lh4^{V zy_7m@PW<&?RIqaP`_>COdIT!wAN#E=^)3(DIOf;d6OAnt&+og`$?n)v`L4p~e@6K| zjDXN4hH|1y6{yfk8^+|K9Nks9 zZHV78`)F_F((V1DjO#p<`z~t|Os(;QjFXS$1n=slY<#B8*Im1}SL}ZM^Y!3`O_VDx z0dIYEq_+}v{PTW}B3QXNC9dVZm)j{nw)>>zk8|28U!^Z789viju_V{|Ib?Wa<)>A% zXB>{Z6Ebi_&hcj_JqVfh;}pN!U3(}~pHpraKkcM=8xwb>=k`?mU)oxlJo0|X^fpTy zq`cG(`ABP;wDM8NYeIE;Hnywc`;M30Rp0L1kGd;^<6IVQbXJunF)J^B5!6R<4-Bhq z`6yVK*x=N{zTv9UJ*($at`mDHKltq&wWx29vhmC|!-~V*m4>gh?X@7dl@hq3e(hY( zKFXu$w58`mIxF>mY5Ky+KVYX9rY>ADrj4?sRmTZK&(%^6baQH3zM(uMs89P`OWy&? zQv+|m_vRmMmCJF1pPM(NrP6VB_@%zJ+9-zM-Yx;HB9(F9ovPO{d$1BQ>Fn(BQT>!A z-wgfr#HL7P_sO{lA z2ORn}P*z5KyD#ZrsPav>Ghseoc2$P;p8Dj9^%)`I=fmt>?*%9~&-~&qc+|9G5QZh`8?^eaGlzp$A_Ux+@&6i)}}`&mbD(`TayMTS=S=AKJ@IQe0g;J zq6T{+m86*kkH!_YP#O*PciZ=EnDWk>p>7kt2~$i_ElWmq@2|u)ZQbqkvptk!J3|9^ z*Ke*||8w@!e=ZAEei?nf=d>?}Dx+_Dy!VPjq~fu0!?&lN8?5XaeXy44RG{Mad6`%0 zSN)W_&3<~tuYHKpcfkbjCSNvHp2<2pRC%M7((b*mly@o~gmiW(w|j3;6Xo2D`E!SS zI81T7I`L6p$}pwVA7}PBT?|uJG%R1xF1dp;cm28Ca_3&k?To!0g6@VYfA@SQz}y)B z{!%T@4-E5)|5Cl~88$b3_b)Y|Vt(U_`G2W9&gFk#ocNbIz36^Rr*>^+WlN9 zXm)+YLp7*T@*RQ<<2Sl7PQLp7^7efbWLhw7m(`i)P& z{y=qq)Bc4~ryrpgYP#3^xq zuD+*^xfnR&%7S}pnt$m=L)txc@QcaLVWaP<{_Df=|DKxP@ZjRM?eD2}L+0E)UJvg1 z2K>LLp6O~_wd~qmwL|jM`tN*ySG9Zh^01@(?y6^vPyX6`%UxAIAG@pmRlKZLr|0jg z8AToymrZxoEwg@d|7`qS)%?q;2Za%L)hq4aoOdYbuG;2O>7e@och!@52Cw0@?yC2E z*DT+Aw?b8Wl+TI2QlTFA4P0tCUZM8-3lpg?D%8z4rqyM9~!vPbyTI~Q-O@g?(HZajKht)IHL zUh3Z4YKZ?jw?QA>R`W7O9_{n)ZMC>Gx7Waz;h#LeUh@3gs{dp|_jjh>R_`_)G38qP zZFSw?-p>z@zO5!UaUWCC_qO^&@ry;Hy53ep6AZKNHM^~@+|oJuW1riqQ@z-@`Hr{M z2OZi3CEmWJUaebNH16sxb;G4=`zN2irH(ef(k}O_Tk6aA1|HhI>y}zP)-cuagInt6 zBhAX=-@2v#9y>qq*s@z{oMWtERL(86*VL#!zfZrVUW%T(>$QouRQWvgmilPHXIEMb zyruqD=VpX!&s*vc$AVFRwY{a*t+@27bE8}8!2_Nbo7Mr3GT{F$_07l&3CnNXRC_vh zde8jxO*OLp`90yMZ>s5ATP!X*d{e!+eB%Y>^PB3pFQW5LZM~@uHq88R-rAe$R=sYjiyNP8F{#5%b;OvbTJ8wAsrqOK&QGd+bb&L%dl#yP`!`hQtsD9- z|Ko;gx9Rfk_Lpv`hh}Vk`OL{1>ect3Ucc(A8|ss7nw^RJ{D$h&`y=n(AKg%!mdpum zzTt*Cah`AchOggHm$^(j-gGJagXen(&%dGm(f_NX<7V7YBb%<_~5MxS?*_J$l^bUN_Wxji&rQzS9l$3uC*%KQ+6de)8Kdv9o+{ zsCh>N4Suz5sBv}&+Ljn@sL7q){e0o=>*|oBkKP^j+jaG|oFT<6%-7ZYs163Vlfd@# zhu;6{y6V|u?6SLiuB)N#3?Djdzpgs&y|JRz zE5LyN*Hu&Fz0Ww@DOV$l=JmOGtz6A2QhWb-sa*YhaL38FPM52V=CA$G>AP}ujJe^1 zrU%Q_Hop%D9sF6j`qQrKljnX^uD<%TfBeq(%GI|$&3=Zr%hfe&U&)s_+O`33sJUpvax z9HU{+eP?jXBctV!MP1wC<9ol~vZ#4$u1vjk&7yk7EqK!DvPFG$!198Ca~8GN+ucU> zJ7!UPpWQut`d1cpdf1Xh?-g6rlDAr1yYi_;-EpmBMyIV7^*v*<+x(3dHSW2s8_&IE zQDa;UJ=Ikf^-|J|kKcdEqAoP{b!oB?{MVv3?>uc$58WB))-uzg-uIflX2%qZ`e0M} zv7reTby8SvaF;3di*VrN@e}Z?^aAe?r28_)d;mn zJZgyQo$4?P8s(F|ZxJ9`aSOHLI0nNv0q(StOS?@nv~_O%dmI7Ux;@qYY%RoOgruD- z@|!udXY^m4Pb`uO?`)@Qts|{Rw#`JuO5fCxeWB_XcLGo+@_RQ_dzG!H z$2Y*HHT1ToT`$z~WW}^YCIHqUzjP7+oCIDq4T`JMeyaP{6p!rnwsxrJhpddO&$fJz z_xBdq89;r7_61U@GoefCqrB>tEb~p@w30opBNCD0OVu$ad0Lv*S>1F6;dIN8<)smY z*6J(26!y>RO@0@KMlI^$R318C9`r_IBziIZ9y+~sJpsMBKi;~0lHUlXJqT0>R0^^( zIfhmCOd5w0v{sRQ`0?;^?4hsw(4GrflT_a{lG5Ad@{3OT2rv+-WZ^AS*&|13X_CEA z*g+7g)sf(H;8V3HRgT;g*S`}_P3On*I3iOXfn3YcVifXDyUl7&RX>Zq<5oGE==Mw2 zO5<^W)@IbVA8(0a2t$@S8`lka_2`H?pTuK~tPN_h@im5_bY&RZJ|(YEv~NZB9Qi#Z z>h*eUQ~#~(je5$`PN}X_kUm`ts4rKJUXQ0#8DF?oPgFkY4|FY~t59{z4?qpj7@4Gv z?zY!heSaFoLwmU76;}U7mu=~!{Zq*(Qgs-s0^Emqoz%*o&7r*f=&Jk5VqI()yWN+*3cX2!|-=Pl~IT07!;!B z{v)qNWHZ&rqMGMfdVQYW-(GtSg!Ys4fy|w3C*}?(hWD1(&jY#-qzHaR)%{tV&zE(sV+H5ah4X1-EG?|L zT>5$ctI@VP@;Bq#AQSCG4&?6+3SVsFUuvV%ITVL}&^-g`p%9AK_OMMi8|MTh+KGy4 zVQjXFulT2ZUGxSp~_5G&zXxl;Z`gY{;|U zq8yOsr9$+COva1U&q0nru0kS?V__ST`XkPVjJPPoO^E*`AqJN=ke48DKsG?mgc?MXFoU?% z-yrM;B3{Vuu{aNM4)QDHE~LRYgXjo3H6CAR`-edULB>F)K;}SRguDUy9C8#gCdVMk zA<^><;&({L1qPu&hC}v4PC?9&n-K5k48j-E2+|U=9I_U&1+ouv5po6c8{{S=VUh)s~%Zy^pyKgb}+@(l*zxX~br3XvbkhK~$lGsLjXAeQ zQ{fWyJG52cc8tmE4B`Z~d`5kY`fw0pKf(_MMvv?_GGy`LX{Qb(hA(rtS=_nydHQgP z=+|p}{P@n}6UHZ`q>qnJOY9yrer8ho%+&Z9=@XNaGCGGc2T^h71gkiKIM4 zeSrGOO3Kb^o~CV|joOXhf``BE)*N3&&x2dOlkSGMHcAb`Vh4^-L1)D`*Irk{SdMR3 z!7VUw;-ty=R{U`TzRm<59v#y>HarZf2&m>pj;*V@jiqpbRon(?=LT1EBgfEcZsBl? zfLj1iraOq7L~_+|S{kM*Zc$nq*>FRZ@VQa3bZ&0daIq9Hud16C-|8xEFQ@WXeCD*&th7X=OV{{Rd#n4LIdc*d zP}QPrik+x2Zd>ATdKH?)zYr8b&HYV#8jJeV`MlNxaBlKI@vOhxi8t_JBm7Q z4g(OvW2v2NZ)pQv$SnwNG~Zym=t^!8aI>+MAaV;v;r<|dlIM0OH<6Dq9aHU&GD+>) z5CvzAqgrUp*6d3&xeRS1xCL$zdttu`{B3TNgZKb1@51P{x)pg2;sN5A5Zy0KYms~h z5vtW+X2!(kft@MY&~*+X0%K9f49!g|2f6>V!+;;+t{3fP@{BVE8r&R&TacS@%WyIo z>Ntry8SZ9RlS`a)loK8v+X=snx`9m`?Sx~Zvk3IWwEz-``bG6t;RcM$4hw|vL#Kbj z;A-b-G6v2q!WaO9lxa8%w=Pa`T3lI9BHs(w0Z2B-Rh7eds)e!5r(X+aQ9mn4)DP+^ z>Kg)uJ1UyeGQkcFon1udL|4%!#zmGZ+R5bC)Ik^x-uCt;J1tKpUtFgGoJ7D)qkY}W zp7t&=_3Q=e{VPZX<>#D}ICsL!zU~2EdzZxe_T(Pu2b)Qi<+cs0mAkL3Z6uGcW7r9& zE_Q*syN<3@x5!tWAQw?5+TB#qUhbuS>j@#&Hb$Ss7)5vrQf9+Bz9>B&gR4QfCLkUB zIJ`@SgULP)X;n{?xhu6vQ4yCp)^o_LtRHvH)xlrYbX*<6sKlaPSob6zty!mLA?YyiaK8I)O^~Z)g$?3*$X#} zC5}P3{xtLu4HJ!`VN4y-FsQa@nBi-wXZ9)eDmI#|`PcKJqh1c3>u8fV*IEXFkCa&J zXj3Q5Ss;f{M}!7vd*R&IKFTf-?PahRcJ+jl!31~Yw2J6)S4XXk&L|_w(*$HTcuRqf2R%fbo6rSTu0mT;0M8d5^D(`r0;|DeKL$DS6@5fd(0O!(=F^-9$1vzIHr(1bW!zB)tvo1Gj5EsPBz(FUO@8=M zbmK7(i<{Y{)TvnZSH16deiGv;11sj2# zRJV~D@@w#O6Mk8BgkNH9;TKa&_yu_gzZG@O(%lvAE^v2-yOZ$iQrGONg-4h~cevr) z=(-kfvuCMZ#&7zg-wc#}r4O(vgpg{K(6z3MsG9|WUDrjNb-Q?)^|sG|e;J2;1oUf# z_zf*YEeozurOw5UCWkotK)WciPa`5U_`3=JARpnst)9il>{V(ku5GFn=MiOS;VxPP zH4`oH1LIX%9=1)#V|`M$T^(!4qqQBh{!|BjvbLzRqOyPc)xmWj8ly%Crkg)W`9!o50g>>(N-s4E(0`H04e-lB1kmuP%U8)u?8 z4wH6G4{{b}+z0##iOQ7MNn8Hd*yZacd=nutuEICSMfi5{v3Qv=mTUF|TY{e-{QTet zn=;G3g)t(=4Sma1_(b!#p8&gPN7n{#*ti#L+*34MQ8^BhZG=*JwRXa_7x~o2;p*ey zzv5ml!Yc;fVGXJyyjI9DQua5@GiZ*{#a%Q>TqBwUy(yX)RtoD#iaMn{U@9m6uP$2? zHbWsjA0QcQK?L)U<+W9Hx1)8E;~f7rdI z{?sK7;}^t@?uBva{i&v~83;EQ(xO_}zb&U$C;#W#x}hP>H=Ym;@xcf=+Se(rW%7ul zd3cTON%s#lhxO&D$sYTK*Z~<=Exhe@+)umqxa0chhU=rNs2lBR zF58K^gw3@*gtQ>p)z7b94x^JW;@-oEerb&MFnJnrKZLkQ*YncoOT-6RXB$_vU6jm= zcJF4g)|-CaYV0U#8D11lW`|O{V%et=wP@1XO|-uGya>ozgfV`xFvjExm!KC!=~qIu zJB;hYYd_k2bYK zA40#;=NgQjxG!7T4TpzSe5v46dgb zvwdJ2+BMZS4%<0qwft#)=Q8fkvbb;S{(3p_Jl46d5S~7m2ctdp`1SU@Zx*8KMIrvE zUe|g&UUtGO!#&E=8_#^X9!xg%fSM7-Khs~--QUe6{>HtgwjX*)h|v(2;(tA!zkqPB zK$=zy`&fJEWzp+IM|SsP)cf<vFN72qfSd5j8VAG9Y(~Y`l=M>9( zSAC9%=Lj})#J{y4t&VD=f8ZV!`SMx8^V7>nJFCk5k;UMPIc!XA^e^Du33 z-}UxyjPV%rRpW}val$%Zwe5R)ow?cx*Dj8MkJ(xEF-Z?!-Mu>3+sHO8N@E4aJi-jS z8n1H(^leW_pKA7BQ@@&zaIZm3|3=ua5biGI#eXAg7{U#QjH(`%%dU?zhw2!_1<3kp z;i`b62{g*L0HtuA1_vRONW2&k4P&xUvwcmOtyaj;>KtM$2{ZYSS# z!L_2RLG-G2o@VPo^@QKHs_9*zi!l?+n7+7et~F@pajv_5G$(2B-^9N4)v||1w8k~Slkd} zY{Tfvd;IF-pW8wf@+*&{u1_ROwWw~uop(cfYW;;rg=s-a!~ zc6`*vxN7(H+FY>m*{G)$AMR@~Kdc*sXMoY3*7^SMXspXa7FN$gZ97rBiyPXG=NG$6IZ28JJjk%&#fB3=IX=dyt~%k=4Np*JC!;V>(90EIJa`{T~nSp9Xa00 zdjP$j+B{_ty&<1e&!df9)!2624v*b`+MZ{ld-7PMr@Jc-`w}4ERZkb!Lv7Ao-%Zp{ z^hBABqJDb!ytyNsP=CmonAfB3tv6Rn05B;VM*T=S+#+*k;w|=}q{9HY4t?sJb zOVm`K)y6mZeCSb)&xg$DGlqJuqMo6jsAH*Rb}!afCUKAHf_2{$;liARaV!~oI~MaZ zM;mS(7pVTIzf-+nj#fXYmZ%?HSuc;ZL$y3wukH0+P_M3t1BmB{^}BeRw0R%geBtH` zH`rk18qeR{e@L6CHm|IH9jM0rYPhcIW7Hdq4B~r8z&@L66RxW=ooa1DbAY}!Hm}W@ z>tN1Y8*^sNw=uRh%HTP(eoxV=mS~mLM6^n5EL!2YPb)k}Z)ov1`79HXdn%-!VIq1KaC`ufO!(bx4psM%<>w$S}-)6?frI%$~Az-x1?Etv_M>uY4Mcl|{NP1=?Phv^?Q{1NS{Xs3%$%v)XbsyDAyw+>L#4QGq3fYSGBh<$P1NFNV4v#%M^J|Gdkc|5z ztQq1-ugNLSF_7kORJLNhY+mSt?)Jhx*-<#}V;f9hw6n2Es{aMxWb>FaHNZV-1N5y1 zU3{$1QL3z`cd5LddVPvLV32djH=#A(dYkHHw`p5_T)lwsM&#GHU!wI~z0K{gUXF&c zw#DDw8&F!<+hBVwLKO(vJ4wT{i;)rwus5apK`zB`FRQ_i_8iio@B8zG9>{TN9{fk& z8)+NvC7W=**u$b9bD)C0~WHi1a<8R9t$~cPA#JGs@4aO~uyBUu#US@p2=)?6J!8nRBkujZdF5@!B ze8xh?eT=6VFEid_bXq0L;mg>ZF_2MV9LyNYIEm53n8Ub|aWms?#%~$VGXBbVo6(Ki z!=JG|V|T{BjKdfc8J}fb%~-&=i}5hy4~)7+C$b$zGt&Mp+rwE5Ov)ONlA1IsDI+sw zTKb^$Nz+EA%t;z8i~>$0W+Y|I9u=QD0}j~7_ITLQVpRX=k%NX0>zJ6DD$4B;Vq{i) zM%D}y#WY$pa2%PG6+SIJb6RRrcvePgM0|Q;YEp)XBXPQ8bY!y&UGZY1gpGY^R^sEg0*sX&jd`6|f6yj5#OrMpK zo`~Jfj!&j1YZj2$KYL=5iGrhovS1KmJ=O5vdLM9%Ny7fxht?Al*wdT#sHKN z`HIR&%0%-BleD6!qzui((kCLvGmzyB(UnSC)jkF#iXJwuG0CW~L^ujJdxRZ1J2NXO zEhZ%`DJ+v(lQi+7{h;)etd#gv^p%m3DVbRjI7Q?mNBxo#W=x)(l%eNj6~!#=Nwzm? z+n(x1RtnlHkxFjUu7eDy!laDw3?wiyK6NB|1?ou%vwdVz{LG}s+?yHtC8Z{1J?62< zCXdxl3QJEMX-Y}g>O`D!j7iN5OHG|N5$jN-Bg0ys7iWkebCP{eY(Pd*k~nA}h)hYy zh(}X@LnWk!2un;v^<)3%75m6(@#v45fB$ThxIfA?kxXk^iq;B?9irn?GLx)UfU7=P zQq;tlX;jBln`o037;96$rNMX8IvYqFvT^Q z(%`Xg_>3&mjI40X8td7(!Yq=oCx%>POfkp}YHPJaG%A21!`lOz&e6bcxnThMlGek; zDor1dl98#EMXQgEn%gjRPC9?I*aDkG`=ummJsTs5I6}1)MtS*!QXbxaDCy_4x{IQ5 zSM+ewhoP8h*0D*9q|i1aoy;&Sb8;@}3Q*1!&Ma-po!We2_C zNjif#n+lx7Sst%siDqc+NWt;mv!+KU$yOaKZNXYueJQT~>50Q9(V4c_z5m7;OAN2(|F0;u4;^~A9O33x`BpIA&3X9$E?;_l|EHkg zUmSMT4tqZS^ZA#*gi_OAnRdY69{>5g{qIO2K4D^F(xl1BDO0AVrln6aO~=J}#>`pS zv**av!}^Bz>px&%#Gt`LB8Lte9yKC*WXzMJM#qlP-uwCAk;DI)G-|R4=_d2@w`LLW zKa=18aH{_wHxmPX<;b>%*4yJ9$py0)G-1b$KT74@4HDJ+Gr0HYy=LaYB-Qdmoyal^7Z- zY6@3948>VfxGdZhO-R9g3F7(xkD6=1XvF(F5PCl}4p;>7fnE%3;weM`blZ0v>0QT# zkYKnIz6gngo(DV#84tY_xC+nu6QSF_!$|Ko+I!=CxD&R7tcFe)3@Lyf3j7kX8@d@7 zjt{$*K#v06f?S4f`>vtwdx-QtVxvZQ|2Wl7!vWCyL5~8Cg~UKN0hdAIpyvSxJb`>d zj{wF(QlS%0hh#y|0$Lhl%?3K5T>$b1-2oU3ISrlA4FlQ%=pMjlAV;9*0Cz%s)9l1< zpkq_y4Y~{P6-Y4jmB9ARkOsmNMnMALPB^$ zbC5{r_>m?tq$Sz{dMt1=B!P80 z2heFf$R6+h?S}3EY!5jNov=T|4BZ600P#t;6PJPBfhZT;gMn{DT;QG$+y<$D`*z?V zh=4o2*K-U~26qd&bw;`1ZUp9In2m&92rPj_K`#X!g~UKN11~~ip*QR*L~qDK=zV}s zLQVI%9qFa@$5dMfZ6NHK*0HVH=fX?CJHa2Dhm+_QmeArGLh2fFmcQ&5B< zJPPrFegf#-8+8KR2Ux!k+5oyQFd8x!dJOPA$U^7^z+I5>(02pBg``3+1(rb)p%cMZDliy(Q>^MQqsQs~9NZvBKf4?P$-0a6A%4)_A(0rb_t;}8q<^T59SVGq#z0s9U_ z8%>7}bcjHGLMJ>4F+)EMd}}D`2YNnm$8gvN^djJ`C|qBlR{%SW6rwqFTGva3oHp4B zTD#i=IRO0xuwD$}gzgI*3poOPJaE7$lo5Ia@H@yg=%v81vB(pJ0bYm1LB9#SI0nDD z0sS&C6|-$4(k%p9Aif!>D@+sGLIR)%0!KkwLyrT#2ML5;0PHp%eH?l)@E?#!3In9y zhlqhr7z`<+FhKjKP!G@@fL}pYLq7t11j(cD6JS4(TtWf8SmOdp9{PTsX&+qz=>u9r0Lz6L%s`gh>Q`5Aj79GjP-{oP#{X0v|%gBfR(wbp?5e>I&%a zImSKcgsu0W??Wdv6blgq-2>QeFKh{Mh63M&xMbs+0vx^%eHMBY@ERl%I^j3_VL#AI zfgKK@JkSGyyCH$l36~v&4MSfIj6Z}nf}RN64Jn{7!0ulnExfN247>oTfV&x(U4r_A zPIws-gfN7IzQVW-JrcMV5&_-zJs*1ir}!H@(S|$W50ICjn}MOF*y9VG@Mp*x=w-kb zM_~_hP$$4T$Izy5Hv;<~N136=0i903))A%;;YpMiI^oV!7=NJ0elNtm)3{zhe*heF z2JHfUJn)CJ$PaWgaM(H6B=jiY)bl7W^i<%75Hs{bU^(O(^qasf7f}CC!yR}SQVRDY zz$=i`(93|$en8)W-W)g?@&WW@;3tqZ(2IeFAJOiJ#{oD55(qsKI2Y0nI^j-8ALzS* z`DU~=bi#v>@zBo$2Vcav2R#Jst;JrFn*;xZRy1il9ef?fcO zy^Q=*JizRqVS5x0@N39+=r@6VuAnT?qkx%^WSlz}_!7hfeI@WHBpdp9V8g5EhtLUU zL2{wz0S`jrpqBuzL-L^C1dc4j*b6-dxEWFey%-qs3v38F{pL_E2$K!;{vGy5?m#od13KX&$a#bje_;HCT!x+p z{2nqMVNL_@K@y=q05<#+_6D7>4A8M zF9g2v5amSpHNg48Al5)%2n@m=r|}3w_<=p%+kj4Z9FhzD8gQl~-e-oM2Rs8Qf_@&j z&I#v0Uk|(pDTaO-*bMtpgXY2hf$@+c=!w8hkT~cc0Nt_A{Uzugz%!8M(9Z)OK#b4{ z%dvN{J@lKvac=nCE9m2ao!s#oT}Uescnneo{RD802i`S+PB<3t(+LU>Tm>nC`)c4W zNGbH)z)iIgXAbfM+>Uoff}j)r3Av1OEx(6fL`8>6nE=K=B3x^P(l`vIz^djJ%7AP7Z6||>dXDb74B~oc)E)GjKrg(vP=+u*!1x{p5dil@ z;Mc)O3;Ge@iJr&<^fKVd5R?b{Y2Z^T@&G*<*gec3auy;#z#R}5=taP&zNka!ggyG9 z>=Xw0-T;&xdI2zQAbt-HI$<%yj5rC;Lry~{e0?zT3BCOggK&#Pzd(2oU;(6v;sN59 zDZ~+S2i}L2L3bDdJA|Y@4?6@t_*F-tktImdKYl9J_$q#=udzz8-Q{J z`ZnNuh9rT3Eo4jrsRR4gVSrWx`_?s@kU)6CL4E?} zHU;$pXf?nXfO9|-*$k|Mmj~J$a65oK&|QE{;Uo|*VmRPHfagFz2h3p((gHM*EdkPj zb^@FaPzZD(;5mS5pyvS#S%A6+)_4>7A;2^U4+Okt1@aAOEZ{C{&|ZOah&*Bg>JHF< zUms3fCob?9q#cA4IS^nPXd=@hfE@x&4>%Se6zI2ra{(|wYa+qkcmP0K)C>Y6)^Z&5r_=XM0R}&@(O4-!1AGBuQ{NJoCIJGbTVLA7`T6+ zl>m1FJO{c9@M1V9GoY6N$2=a ze*nDq66}cxG!}4O1n{c@x*o9nYmiQ$iF_*>q#fwnfGJ`?-2s{!@WES<7ND_!uf>6~ z09pue0zebciGaHkK^*}4D`3laAfJJ@1{?rD{u1Of;2r>apuYmHe-G{lXd>?cTm>2n z*dqzV540EHVE}ocM*w31ZWGS|tdk7tBhW-n2QUKKEd}hgm<8%5&{qM|egO3dXlual zJ_0)gS|}T&D;J~@XllR@0n&js0qh4*4>Xal=Yc!{S_W_*KpW6RwkiO11n5A(vW1{5 zfR+dR5r8@Z)H}fHC7=xfS`%<7z-^!_0K0qw_YAZf;Dl0OpFlSO7A*s|2sDv(0HT2= z@*+Se(2#NvA3!289I!7yF3^5}QJ=wc2k1W-=yaez0saQi26P`_%_>k9Koi*!fIJe^ zHNXe}JnfKCQ{0H6dk(I+nmKnLh(z>@&>Koi&g zOVk1z23iL&TOGJRpv~)nJpiPG`7RXTaDZH(iF=+A_adusBK?Djdu}m;a3T};dy)im zL_{X;kJJX{ScpvA8|f36uOKpUpQdV{iA>z1Ne5^m6Zg0=0-DIgy>aNl7>dZmy*j4B zyaADkdvTb6@gR|j`!zfVn#jbx7~H_PhRDQy71D`g9KghV5vGAAGI0+AAu#SBGI6hf zuRs%-=*un&`tw94`jIDtzA=%Be&Rx)-$-Pl&#V#9L?-&K8i9TVk%|7X<3JOc=>HlD zG?9sZtDk@-GSRP-96a}lJOqFN`VR(LG8nYOfT@~+{Qym5qFyi2C#Pv?6V8-3*i4h{$~z=z2iWTC@AXFfYSjp0tQn# zsB?h9v;zvz-wt{R3HSttpP<%(12k>K@1Fxq4px-a2^R(7&vVHk37{VTb?_{bL$0#^^$FMlo*a@(G_F4n3@rsD zmuL)s9*JiuAYDXb`SUQpKmh@+QU5%;!2UW%z(D^xx^yTYS8x6G3E0dV5Z~p0eNLKy=7JYc0ckkKW8T% zpMN+(DDa7ii1P8+5LYaNrS%GY`g(VSW%zhJy};Ua3ny0>8wEap8&5u%;wf5r3r|lQ zXGyM4gZBUPGYIjparXvSv9Z?i zaPR@kU+ru>|M|zi|5?Qke3iJuT+_zK#)-#?_`d?5h38!tA6G{k4?Z4mhnrSJHwpzl zTMH*o8$ReiqmhUH_lw9w|LvCKq5n+VA77UTIpPLfKP(-bz+M0E$^Ey}iP`w)O~_lg z{ll9AB;a57<>~F_=IQ~`uC2>+*II!uz(!nDOiWTrT3Ad<^1863gq4)Ag_Nz0u&Ahv zwT+F9gtdgNDB|xa`@h8?AtfduV<~GPEM+YVVz9Ne7M8IQw-&Y#my{G07nib?m9Y3f z#~^7TZYwThBOxp$VJRytDJdx?Y-u5DC2W0N)>_g+(&D;}gv|dr1~J>~($}q|uM5kF z+K35Dic81}%Su^V3d=}Xi`rhd7PYpJ{$FB{u@SYEwh$8+wzRdB0I9H%5e8|J5|)&) zwU!dK5Rm;Fs&;P3NSwDw+JZgNoQpY==RpY=-w+!a(;>n;=&^L41GzCZZqkmWTf5^!+RRkDLHG^{>=}#`T5HJ=IgRV6#&2D)6q_?FYnXIuX>wXMu@F zt}P%MH#K8V2;_7t5hR_ixz-Q}59BV`2;47uJ;nd6i6LSMtzTlMZ|GfjxuED&kAX&f z*eH8E$uCx(^LB1d<#|Z>V%Sj>qAS zH#Qf2HpXefEm-*v50NC{7K;o95c=}x78FgQ5$FD2%fuEZSGRW4i{H%Zsar);%6IaA zk2@9HF`z71#R?(kY|lun@vo!Mlq?hp78Ol|kNX-*Tl=Vmiwo+1Uxj@=s5|1CA67{} zVEt55=UWn(*cG?dMX~Yy>HfFyH=H2L2QGqKfb5bIFu{*kIi#u-{Fqs8lxXfXfav`; z;};ZIkdUr#KJGfH^lhR$RN9o5=dvA*NJPK#*$#=Jl3oW+`UUpt9t)V8C$=}WWz+n8 zEcLc$VdS&U)utVJi1har!WuZ4w8`6)nKn#~#xM1mM~(a{t_-A=0_8jw(-)E2;}w^!0yA9vjg|#LJMPtcWHua}LrfWGO@0M|CG$brHvM?#fvXvSqRXiGiJCTKZU; z*Ce~=_bC%^hFd`t?y&m(VmA+KqGtl9h&gP)7#I?7hWr_g5_j}Opbzfd%`htnLdj?6 z*SZ}g%WIOaylXU~PboVgM`{o6pez>IL4;>m{cQXGd0{)5i%s<6AV!JgGv<`%UlX$c zT+Nfbj_{SIMGPeyVj5)2_}A&8&oH8>^O7=@{}eg8-m5AEyWwSd;_E{N9 z5Pu@F9YU+%SJf!Kx1V>23~Lm}w^|rID>_7;vTZ7LzHtT5K_;VOK4VwbBnAmLZgZob zW^RYi^d1O;+%E=yn&E}SQWOJ=QlamJ+`I2b-DDkX1)+OBGwZh4D^G4-_Q`$~atD1~ zZZ!Za7zkoEc3r^brJiY!p508@e`lfT#@Qsq)117{O6`bU=-rm3A8-U&z=Pq-{2B;S zNAk?>K|-KM5PqASP|*9LWIP5ewNltmHJ0gt$D@ zS+_%ptnm8G2YI2ErMdD;^WCv|2lq{3@*G1(U%!3@Z?igYX7xipeTv)4EFwE+Epsv5 zZZCf@F)^_Ti@2lxr;U8|%EHOXX>!sbf2FA?rL7TxbDM4=;gE5IXe%CG^4XkJ zz?^Iz=~Fe)rZWiL(KfdnAQx%GT54*Bj>W_@M<5~%UMgMIyOkvE`kHgBAwU|rU!FG8 zh}&qL)0S7%cFldJfFilWiftDc8hLVP;57F66cq7WV`;GLAmkOhwB=mKt7^j0YH833 zGH^3ss@2qYA}p)(_4k7U616Ab_o&YCXp$oHX--!33${9>8_yV;`7ea9J>hg5Ds0x0 zo(N5HsipCq^yhEfjw_j-&Um_WbPv?4oiE^g_qkWnv-t(Z?>7zK>H4gV1asc-nP#8D zQZ^FyN_vx2iTS>y_l-J|K5DvlwYYU}7!I@RP0p9IuT~6%4A#;?Fd z5c6E|SAlMF*FLkn-h6s3x@31DWpe*>)y#Xx;$}O=o%7?zt#!C<`m<{!^fEb?cg$O5 z*)cIuf#l#$e|H~e=bzqh6n(|^T6;#V=*5z@p1bvEtMy>>Pt8-Y`cxA4%3vXz{-Xg$ z#biAqExw#W>;jn%ai4tda>dP@DZC(H{`<$krzdSKF9%dhk9IAD;6EDrA|oTGhaS1M z#|uBg{OU#e+&kg+q({Be{cP6A!hM46tZO9vUds8jX>=+|!I!jGK8Ps*p4g#zk=bSH zz}z^@9^O_)+wXg?;q`1Eqx#{M3pYhZJHA&%^zV;UkTo@L<;g?5Id8VpVlpM-&DU2X zKDqBX`2DICD}?|0^>eMDxR!P)#h1&aW+7fS;?yUqPf9W@;iQI*N9yBGR*N`IJ)N?H z(>DA`7!(4SqZFiHT#Ox5>r&BdVSrS=b6|cJ4&fPn)7R6&Ju%=72~}@8kk@<3nxDzv zPd|-Q6zXZ|4v$iJc4cecv(t?Y(o^(Rc&kZ2mj z!;Dw&zLO;d#%_{idYr%8Ot=hq86?S!DA*g9TFQj5jSN!}tFB2v_Yk_@ERZ*_)BV?5(P>qkhdL{WhV&?Yvc2H2+Dlj}MNIHd+Qtbg{;eB9 zp4^StEIX@~G}|JYI1HJy?^NC}NoWYerHLRWLf357cYQ_l`Ou-MvX*0J`Qe)mxsi7o z!nt7zFAngB=;`lNsrMROoSf*>j(+Am=uJ9xZ3fD0>xwoe*$uwQNtwtza5)LuEC@eT zpWB-Xz{-wqS3Yyceps_g;>{SQK~oEmLS0uk`EL`Hp-LF}+c$d`E;67jA>yg4O=UuU z#ZN;_Z)b5m90Zq~8;}Z|U4=DsPa$1b0!oa_DY@$ppXM!jN67DQhT#}Kks9fN%ehWq zA9XmQy-&>W3k>^BbhjKIbuCRxu3TktmU$&7^i=8^dGQ0R0GE;heEhvUtvhwP7mfs* z%1%ch(YY2JVj3Av7H`9w#&GgHaIyuah;hWvu=zH7e^X8dt$_LJ!lPG9Us=aavsjJR zS%q&au2UK9DaC|tdUKIi_h*;K{gQ2x*Z zCCi^^#_CVBVfNHY(LGQXx|*p9MI_#Gj9dN*5anR3qe)QQ_ULlgI~eJqR+2xne9I}qsb;nhK2 zmntFo;|lba*YRC!zt0{b>cJbZRT%TbgUl`TlAPGVl=p=g%s>FbW_Zr;uw&5g_mKpo z!qg+iw{a&0J5+_X!5^pGy5%~yKl=cp8Fc=J^H-ylfTdQ8$354WrzyVK!})WEO)6-i zrf7~;!_@G{6YUw@ay|-3>Kj2U+A(>dAT!Q9KM}LiXw} z9@*T^$m!+F#E_vj>pm6gjGQd*r_GI>{Jt=s{#}Up00YJ8zKQtV+*%k-)u%B-Wke$M zL}c7AV}E4hX{^lkvxKJ?g9m6%urKci&TFxm$0bU`A9NhWWsTIjQ%oX%$tb#aTKPRW zu_#(Nh@pGNN|Kpw`E`~oSB{e5CL>ZQZIvwyZO(S)>i3H?z89qQ)K##;GY1H^Skf(Z3(&EOWmDFpHVek7Q2?50r@X+p(^&PAkwSrA}o>QvDq}4rU4mwOt z=nR8f9)G!yKUfv%_yv8)g~;*5A$lVg_Q8LIw~|jo7pT&;aB<-+2a)4Q!HxZhG0Qv- zR`!@%Wlq>bzDDfR&OXWKy&n_IMh9dVSV;FS7(gPDx3=8ZOM?)N7uJ);36Dw|cm~Sj zp#pOZCBK+I;=0!PgW1vssXf-akP=ECbB&*9sqJ;g!=*P|e>!4JRDTm9f?+xL>Bv&! zPvx9pYmAkmnhNqO*{>S?{3=4^hn<1kN5e3fcp-#UHcMkS7M^;;>{Uomwpb9qt`k%~ z;Jk=}tMdo){MS*leL6;q8mv>^I$xy6HiAUB)5b3<^&`*3El|yd+&e_dZfeWCy72Pj z{$}PD3_>2vz^>SMGF=YYl|9r-x-Nj;3^&Ur+y>?6@%OjE+x$*LK zS`9)s;G7P}g_m!GjWqR~t1+gX$t?CY$1`<_p2td8cRfCEtJNN_RMXYS$FI;fET0x5zjxNyi8!bZr##_| z%Vc5S9GK^t-@ZGHmYQTpYqJ(3Ez)Mq1@+A>L+|RyM{uF zVT@7zCEi1Yz*IYy3l)3Wj3;A{m?QDU4^cUsh(4|4fvInq%QbDs01%;Bqo>xnb}b|sJZdF)#FL;Kp!<#`?OuU;;zue zqr1xj7}c&8ZL?5Qp&9E2)AWl2J(XubH}VH5)$Cd$E=_i5t04%#t7pB{-OlgY zq}Cx0_YcvQB=6!MK!kGaN?1^w2CW%rPUpKPa~2nwfYl|I+E{TuV(mYsLTxHkaO+-! znYLzhU)aD6Hg;_ra>5?ZsXSWnZ2yJ0RT?EQt6dC3wT-~-4qK;22(Fo)#~R8kk}HF4 zrO1qJ(I+H=kSh}TNTlu{rQ*RczqsjUwW8u-iYiH4C;t8$TEh?C3_PksOPbA9D-w2wx!W#kb3P9>EyN6X z@5Y2p=*^Q^pCzn#1@gsz*)FjES#b4KJD++wl$%-t zLg7Hkn^A_(95iG(siy8c&cuwhGp$()Tqj?yqUwW!!R4L#doa*TGcw>ECEa@6xQ*ns zFIpeoKBsoIeh=KivG3v}t|_YLmd6@`5bT04Ge&AJtSwW5y z$!~*ad7CvSX>MqPj3oKgf#oZCn;Eu8fiZl!a*f8=c83Pa9$=g#Z63#&IB5I655+VnqKpq zpXr(l+8R|aji$bG2bQtN4kEaBH^i2)=HyxxT)*ieZ6lMznW)lI}WYyGfBM+lqikRyqz_9&RrfH3rJY zi9ega~F;D%PLpG!# z;J|WovOy*g>!y0}g;Wql(G~7_-F)n`^GKNu-F27okpw#-c9EHI!{0Y3d%>f}9*V5R z>FgSxCfpA`&B%tlj|u+Fm+o9V=$rmPsEK7m-}?fj{`Pex>)N4TxAGjoNa*&{!HkFH&^woTYohST-GTh`c}2 zPNR^@j^w92T+0koFGW3Gy)o=yy$x9mDJ>OS8!08b;j`9s5gkbG8@Tk^cq*2q2?Yk< zA*;o8r(FZ**aJo_Y%}=i&})RA5bbCto*7r_%42eYGrAQ)I@QZ95%xES7mH2nz9PQU zeSQL?;WQ@>QHRQ?famjg<;WOk9(dggI|PA=pb0;Yu`KhI74pQW7a#; zhzJ-~2g!hxJsEsFzMb(XQp2Z5rZ|c!Rrcj^r!D5^4!@W&gPV0eCDcCip>&k4gU_U1 zWw3>cYHIS7(!e#|rDvpK>ujQy{)tYItT?9Z{8zb+L3e3#>$GfCEugMh(*`rY;zn5G zPpNrs6(G4-1fQ=PKB~%Es3*4c{SSN&NZ#Cp-71`q*o%MqjuyL{hgsfwzr)pN2`$*?X0cgAlzcY@pzXm$D=9}~Ex9%xT3~)W~ zU5%O&@(QziI5xkRspKbPKxep;l7D@=?sPtN!g}iMGmWcv4kGL+F~?65=MGkL{T2IU z?z#>(dP;VbG_wtD-Y|>EIYC)hrpdP(G6#yCR+{oJW~amVkD$-0U(Rb1$R#Dvo1Y^4 zb{a?Vq-dwXAidlts9G4!k!wQ19Dg3Vd4~p+0?*{(Yv0-IBo}ps%@Iwq6Kqq-)&vy; zSQ1@%;o5?D@*U1NkMn~Mg~cKmM5)n!_d1xli$;|-dNCmzrsv* znjblws~-b>C|01g38k%bI)2*pY$?1HWg@~&15-LAeD64+VL%m1(F7H97?N+>P5AOv z;#$$$J;D9H=caOWFN~3LMnrkqr z>)i?tHjPR@wj(HDp^2LbQ*a8Jpe(aj0%Y5uOE~PEuYI1Z>sC(d(_4;e%UmR{k)&?> z=e4JRZ4da}qBDT(e636>Zr?Q7zR}_7!=2p59rzn5zdvqWVZdGa^ICj3pLY68wV0rG zOR5T%v2S*S zH5|}pBJ+`Lmm|yH{DH|Hd-Q<3^MnKQ)luB~K4~FjF($&mER?|&_h~CRz?ZwDv+!)Rb4x?QqL%QW8C9b6<+&ub%#DbzMj>2gp{3v z!Xo$%?!$uwVtH)IF6=!ne9AD$@O;m47c7C|nj0~~4PUmwgFLWgy*QRCx@W|Hoyq83*XrvUlFT1^$)?aJ;Nw@C4cbeUJ;yQb}Bw;dblIhC9S zwE%pd^{F6_rL33$?GR&ugc%AgVs{|Fi)o9$EljpzsiNbs-w1vF5*f0>!}TSmXG4)! zdgsIM#?bmVP_Q|dEhCM>G0F1~owIVhtFFNK;hAv2HWe)^$6 zpIqTe1d~iq`(d@uKxKy~vBxK7?5g@L>-uQrL+0Iq7^ztXo-^x)>#=~X}+FUX<<4^janxGhxv2V7-W7&wanxNHI;n^T-qJo~JZyiC`MxW-usZV%gP(O^) z#tD`VYODKpr;b#HuZ|;Wu_Rl`=+8}R6?g$t1T~YN?5uV??0v`BX#N*3{RxD9TBu^Z zUG;k9-ms+&$LW{1ps%;aq{U6fx0}YFD$SMNn=C%=DlUGn4Cad8BY!|7UiOa=h>d1u z+!50hsD0zdBh9(;2|}=iLxCSqIzAfYkXPFCd?z+FRvb@)_UB#JafIa4E;5~rv-|1Z zjn}xMdmr6zX6P#>T&#lOi`;FL)96ARMXr&hSDi(^1$jLL=U8Hs985DibyubD$d!W} zLTbMKsOjZSWmNwl)jQ#_C(2XX@m1$aYSvQ~?)OD?JxEDl-HJ*P{D89$J}tYV zJv8OZ`L=LKY_J-Z5T%0eCez{d!LY>LoP1EV_hxl_;dhPXq@nO~M#xKK?ac(zuPi+) zyr?ae?AS4*59n;JO)|!wY~Jhl9oAN_v4R|r((G-UsCl7vcBJ0Fd>>Ct_D;$Ac=4ux z=}$PTO7suJlmX=eksHN;@QPwXksBx z_=X$1Fn%Ea_N9;B)-c@+72}T3+s?3jMSu3J^S5upu5HD|L3k4mudhgrwCgnMO<*3H zHZw#s6Dp>;Z?dMTBg>=nMyQ;_bl$FBF4IgK@`tErB=l9@qkD|n(K*^<0u4*FV{uJ~ zAHH$IB8mS#c7Xi4@UZrQu5f#iCr;~Q1B7p8xaRnpMRvF1Heaezzj-n@k!5DMaT(-lU`OtQszp1ersj`ljfJd+^YxQTla*m!k!o1EKyZOVzVYpE!A zn(8@pB&N~~ZnK5^wZN}CZ1!uNN<4C}T`7%{2{~HgLu?fiFN&b?9&=k%2b@$WqoSbq z7+mAC>=ZMn*@gAo5mm!k!42JF^n(ZZ*7yQE`Q>z40+>`tomM6l5t(A4IAs3VA*+9U zbIpINB@LN_yYodwfI9KP`Q^dUpk3MEsH-beFEC_HyR99aJ?_N9X#-Q)k)1*NKl_)t z1M}?dn2`D()AAeB_JcnRQvHnQRuZ4!r^JWe;_g3+h{p89!wnbd)Ows99tPT2e7=b; zOg0d>qJUDG$@8oHEV?9t7fI>izRFCANPv(s6wQNVU2uJPxzA^d+2Yo`aTUWk6?+7#G zq*|7Snt9&A)18=KoxW6`IeqpT#TibGGx++Jxt;_P>0!m3Bm2(@RDvj33(JAfE$rD| zEmECFPVWl!k&@+6?<|ql?e(pl4elgQm~Nco{CHTF zjk_I|DY)}Zyjk_$L0+`yafJsILHQ;Yp*A8wX3j$UP8f_2EZfjiA!yqvU#m3!p<+!+ zpW~+)=}Q`0MNH`9gwNt{zU)zr4yCzyDj}k_3@kZ750u00yX?}rDA*3ZTl06LP&sa1 zFYR>ykt09$!w1}Ly&9yc2J}hq$ZU#mSlku7C1W&g;CvI@z&e-}*3$_wKlxP>g_!i2 zeAk^+8`TM851V3KK1-pfzT?nwlDSuvWWm8>l}0pyCJnP~rnxP+g7mvPHs0)~k%9-atwkqRT@qw@S#ZRWP1$p{oI zcBWf^1)(&)z!nCBQ)((u*T+u}mz^RrBH4WhCN0jf4?q|8c6W61NMKXjak@Xi-VX}I zwEzdvJxdZ=n2bhbXmyFuu7eilMFu0%G>)714*qv}HA~Dz!X^Bb%Og&dFWw=nk5$@5 z4eR%9mM)#3hmfP6vFbe~d2@b>AU@Jt&s@tUsLF zYIwFQz)p+(SVN#t!Km3BwtJ=dgfP&+7<={x>{!Y$5pjqL=&gbs$n%b4_2BB0yFg5Nuj;(kc(<<*@sfEUS-TqZmY*G4>uvha`(nsOqu1EMA|4j26{PxxiHL3$@6>w`S=I-*aXl6awwy za274Cp_{2ou!O=#HRPLh4_B5L^@5KkUY1`@fZH72UACDSrZ~RcH5>5-6;9SnvQsHc z*~r^|UX%%qjYOH6XSG63$Mu*}az7nI-kynSR{UDu7@^X)pk1dV7 z&b|t~3VKp@BzEr{^8TvSgI@DmTkt-(V-BiGbJq94d;g)(ja!lgLyDtJ+}#ue^};CP z)k!e+;~A znDv#@16^=j@8`9uvy`rp$%ka?&s` zqldhISVG0sC*y!mf!L88slO+s4ih$>g9fqcBF!dGpDcZ56h>dgT3SU_TnLJrK6?op zys7GNL-$0n46jrd#wI|bE5Tqo4Gm14Jl%~sH3tvpo zn*m}4443E}KvPg>@OXvSV82~a@Jryp(P2{h?&Q!5iPHVZfltW22SVJ!S@L@7*S~W> zoW?pFls`{?>5;*qvVyL>J+e)%6{Nz2T+n$V9~{~Pbe_hStfQhX>z3x$uJK z=glHXBF_xJ3J+cV+I|F1av!>ol$(F;nA!2bTM(~oaCS^YhOT6y>XI zhBSGQyQ;BE#@p~RcDB)euO0NC*2WhYnBB9|N#;+zGJOR4B&y?0^&Q;Nk=TT7Ej>dOX*-EI_{*2rX|gTJw3kiFGT(=lzNtL`RbKVNDbJsE)@(XS@9y~fUCJg` z!$c@6Ta3BRzs-ZmuDH@3wcG8#vpeYa#5x2Vy}2K_J&`bzxh3&-_%h}-Nw7ux*y3Ft z--n+AKHfYKcMirP%W1HxkQ4u=V=0}WJBUr86#w?3B)U1@{FlJ8^qK}ZDCP6&r9IQN zISi;t(?(z9o&sg{px~Y9r;xa)Q1~u++jQFUM&|=$Nc!rJKuBo3nds+T(l-*gwy+ye zlN;et4XZ4R>F>%Ja;Mm7WBHjg=D)1Bi+kga`5yWy8%4Jkkc4@BvE*1Td=0PI|Hxap zVEF3LK|=(+<)3*bdZUC1xer-PkYq?VaSHfzB>>;5_LzGAt*pJx=+WJ`^v}e!?Bz_o*I@YWA>hXS?)`glE_mf7=KayHl0pTH zTH6`UAMM+L-Heq~5d5=Q0`=&8Ni(RFI0H$iI>Q`0F)w@_3?$_U3{5YfU_%MnYd7yH z>v7fMmKnoRKc`TmUA`+ax{mYK5%f9)>yBsVIe9iVa$fC_G_S0_*4Svgil3B>)`rH} z0-HL`t>`4*==DKg8YcNR7w@^^?RSW!y?JT(!#Sn~{6p}Xyb6Eb<9Pe$injDG$a`wns&o|!bT%)&*W@voX^>%kK@o)CKILc9}yEzO!7oEB3C-c5p@U? zoMv7w|B>0U?A1ErrP&{ImXq92FzNy0I}VtONV~~(lUuxQ7bBo-CpslC_w2My|CqWG z51hk8DC%b=?rJUc^&$J-g^;GsAVjB4Hwvyp@jB=+OA zfAc#2GV8Tj7Rx?avtN-F zjDOWZ4*#%%y?Xjo#tODyNGWR-Dl`@MIfS7P46we*!yEjSJvMGYI8Mf_z2K9EG@ofq zQKEu!3_PE)*}2qmMv+e$9dQfhX~oJViLDH4etp_FjuQRxi$PO5uE9DZDhX-zC|l01 zoE#&tNaEcw?tncituB*k2Qn`No-kPtDl%b3tJ(tr9~BAWmN3{6J3q zP@&uq^~KjeLD#NvXRW;VLR5*|#bl_`O{G#wR5(O$$kF52;ZQ`MJLpVJ8aHA%cR0FA zsQz%lF&6Ybph7R!Sz`_R@;orvgJ}&9!{%6+4%XIv38Z>J6q>u8@ z9$wjzzPwL{%6}^_{dSa**_~uWgs{ns~w#rV!4Y+`SSZdpsi5Vs{^_ zk^r~Hk{<80ciPBB{C>6=OYwN!$-GGl>%bGOCRkB#J!e7ua$(z`)}wwxMLhiu(d@{s2VPuy@^x9i>;h<}U1 znlfUm^ymAS0ycLzW0fx9V{o6fSirfBl z>;}SjzpuK=k3Y%oXz0y9S6hi6{nALUqL8ic=cRC{&*{}xdC;{Io0PV=qPiJtxu<`h z@QWHPMH}fN(f9+HfCsQV*_m$sR7ozF>o;-87L)nK?b(!DTTziNab~4GP>7RJ(X!c3 z9nGjgt;H9UTD)t?iuw?V}SuSsCt<{5_fi>QCz%}02g3>WHLg>1^s z1yoH5st8!$Wc720UD(vV*ifq->{8=A$j51!u3bS;$q?hOId(Whs2FdocFkWV`x?Sf z5e9QZy|i4gJO$gD`BpAe382Z&wBC@O^!?Pq-taVC`&}sY$qo|0X?HibT5jQgLq6?F~+Y@apiw z9|hBIrRu($TO)+6m&)Ov@GA{|mi;d0dGcybk;}OD*;i|pG%Syfav0Tygd^%;Ne_{+ z7{Tu8Z7>x8CVPH*<2_tp&SI~H4A4n08{0gS_u~r{#R6$`jItYxDPElruh7vOMDU#3 zGIV}x>q654y@S51;r!)l zlr<;1n$hmD3*2H)pA~LEYJBa;dOO^opiwf!tvivQRsOEne>vk>WG6JB^vQX#{s}fZ zgjbh7GSunVA%g|$r^KlLEC2wEiDvd|mE7veZZNEEz z!RPs-L!1x`qI8v61l@(P$>V&Za=>j)A!^5=;KN+!yLmm;bFkep4uNkoFcY6$Q~b5v zlcxxY>4Az2IJ0To1+Rb#XZ8tB;|QzsuHm~}(TYR8;xANB?}90I%Yx#H_;HkfA;YDe zL&+hw?g#yL7dCq$8}x%T$w0-h9xgToL#Z2fvK@v|mmlNWx6Mpn!8mg^Z_lt51ukb=0;9tn5Y)!_ zaWNUfv>J~x!?u5~x4i4xKcqb!QxpXHeG&*m=@+WZgmInm^@6m)hf-SNet>3Pj|t7-HHdwLYzJejAgX-)~2ls-&r7D|^L1 z9B95BBI$8eQ_xxvu`P1c_Ml`nNg0MW&@$< z+8&z5qXB&scz@bvmEoKvWMX%I(m)Rl1B~g~MT)4<*P7+kD?dQ>)fT+>_R{U-zQM1o!mV;RuzY9vMQN4IOZ;jftcliq61!rE ze*ffqm>sm0e+pr&NfV8sdZ44j{CG5>C(yfv!RtJk=>B}?n&8px47RA>M_U7H3jRz{ z@fNx@vgm^X=Yvw=wCyX!;R4g)GKNQ+Cd z`@IkQbJ(YRuL1jR!rDosJT4=ER^NZ22Yw`;oikv%F3fH?w~0KF7F9GbpoPuIonqEl z7{S8FeL{_>cn0*7L_E)XVl|ZoN=!PnyCU5C45SzNyJnM2kfp{{=`@yn0V-)mKL^$m zgRo5I$4j~z$nq9ib1u*;{{nuCx@9o~-sn$-#7VK;UOniRd?Z0_niJ!8d?VZvQuguQ zqZj5LF;?kR0@6x^SNc#aWW-)wF8;+fn2U=c~$ZY)C@h_(1e9svl1j% zi519q1AT%V_Z~KV*L$))CBr>+*mrt-H;K*Sn^9n-nNTH#>Q~C=U($ckzYV%`J7eAc za_UvEiKJ*DN332X<$ObqUVjeV(S0enH0#s_ ziueW^nS*`J-->(@7&6ajO6wmr$NOWYfrlPWll8I#hgoraE`fXGM5H2-v<$Ev`F#$V z;HB58ZF+;jY^ToN`A~FW>Ojoo-WF2`q;!8eZw@gd$5~4w|El&$WEzxAhSQ!EWul?h zeLk~DopNVDNi=hk6a^zU@@^7_e9oDq96%{dj=ru?_)z~TEv)I3ZfLDD@)BN98xpru zm%rJ24X?z&+*CJpyz!O6YFATBiv%?bZ+N#Ss9K6#u-}BMU9pEm*ABkV#Fra~~{a2dHGgs#+VztoZ9zzpw1yW78OS{ja)rS~fe zxpXqz6oaOuZB;5ib7z&VXwIN{To;N#1@hKtrv+nuk*7X=*>b+OF}g;oQ9=@aeI;$o z&URI-jP#t_;vl;yEo`@i;SvS>Hp_B(5_n-$K~fR=iWK#lPVay;?BdZ3Doz;b~ z>w5vazY3OANOm9RcF~##^AH%)4l+p6dslO48mpmnii{;I5pmlFkY24b$S*rRo#H+# zXL2@djYAx{mc1jNgQYIklW&8&XUU%U!k6}Yf)z`}a^}K$S1LB@v-3dTO(J4JTW}%e z+s_G+WnY~{aK7E=6D4uMXvvvj8bsmE9Isbb$C;NS-g_HBs4Rri*$WtW`s1XFv<#~v z+|Qk>E__W67U-oEwlG{3YMha~Fvcdh)x^!{i#yxfW4@=-oU{46D);eGc{P4wXp2GA zgsN$oX8gMl43S-Q?F&@8n;>o?bc$5<_eY}?*G zsyf>4p*Ne_`q!k7s@#1^?J}$~g^GHaqV7iqb|h}sv5&~2i3AYKhijsS4YQqnpFIk z*uwt8RdhYUY|=UC-s%upy23N~fygUARPWo-vjUM24LFrh&4sJ9MZ1V@{ zU1|vMLi?I%f^c8pJiJl!yz}PRp7zcSG6tnDM*-RK z7dFYlY>w$zPQ3zSirVP03L1U$DZs`R;yXs@F~{emSv`bVkbq+8j}3}B z3edmn+6nICu_BAH2kym`j;3Nk0{yFl%}Gy)Amom=dwm0#`n5mIL&9R~Tt6K#|4Jx0 zdT(%JI>*$~1o>KTL^t62@%uJ~eAG_zA%~-+LV@TsD2&~=Q2Wh@TP2VD%Gp>D9_dk; zxD?IVn9dCe7!5`3*=(QeG~Jr&@_^Gk#AVAJhbviQEj;P={rYnO`P&1aPo(J?l9AdX zSc|iK%iZ;PrPyRBU^ggfiq|LVn%lg_?j3R%yOh^X63POxTHGBD6WsZ77#KT9wJQQ9V|(Th z={rhreqG-*1k7!B^=NKV%zP?{^7Tn9fx~hkV4&9+G7o$}Wv4cq=ph|L<`QVgX@vaP zs~=akAa`e^wZ^_#9tMB6-HaO|(I}=Jo-Druv4e^3@VL!>cRDyfL7Th>i_8%sKj5D3 zR?=h*hmzcxB2UKVd|oiLc;6QLQKBS26RHvW{hot783!S7X_zEq6@8>UVR{1v43s3g z*-{uyV&ow(+T%=@_#?nW_g?J6X1ftO%_ysBR6FOpfYH?<(2;KPYI&Iy+`kTa?0h*o ze)3|l^sBw{iiK3ymp!PhUNFQ#(}PBWFLvSK;U>yv_!-Vv-Cy6iroFy#Bz`RPT7$IV zTWQi&g5m(J5?%KZ8OT_db9JX>R-+)R{}+4j0asP-EsTba^s1mBAQn`*g7hW`igb}G zpa_D1bVQ^p2uKl?CRG@!NN>_R(tD92z4xX_J73NL97kv7KXc!Ezu&#zyU|6?PS#pk zS;@}cSy?+FST|SJ(!TyJadUNgX2XzQyOFoq_DI?;{-<~{Hg@Qg%E76T_SWaE4zmO8 z9YTRIciJSqs?3X%VKw8q;HgWqWig4J0JWMy7v0cB`k1#dr)mmyOVdkmPCs;ylp(jv zz}}2ro$Q$jx`ckOFn;tzq#M&V?0wZDHM2x^HO`~C7-t-v+2xrYE>DYWOsX%*^L%nqBKuP7L_)t;@N#G+Bi{ zA|EF~5U)W1T#S%89{Dcd;q!aw6`?M5`Z{CdnM89eR>a<1>cHVMC}{o;&MWMZX5u$b%yo@S4RGv7gIKNkCB@L=Pv4 zD&am?LtR;x+n>igY|91F9*LfMQ6!JYJQco-K;zKubEMUOKMB9I23I9y#Z$y|Tmcij z5P{$f0e+1Q|HAOYei|@w?^A6UiHV5;QAtrCE-ntPT)zUYUcCxrWMsh2n>WF&TepC& zt}cLkCICZ2Ltt)h4uTEYL8vh&2!jyu;35b!F)9% z%TpQT`ly1E*SA3aYdugHpbtuYu7gjZBB1PrJg5p#0oCD#pfuD3l!ZM2l@X?(HUeHp zSb)k%b5Q-(9DIzn1a-ezfrdC6P#O0ObJ$Q2UGkS`c*LwH~~_HG}}ShY{fJ2m zAUiu7eE9GIl*TxNidYv=73TuVdKHePFz!6ijtjfSKNEFgH*OYR789>`*IM9BBiK@l6Dng0Q#+VH*LKmX^T!+7kHkz6`#7 zT?1Pi>tK8HD_Dm4>)X3vb8{13Z-AZcE%0pz0d{wHk$2sH+W?P?`KPj>&X)NX_`farG=NFAu8q#!R3M>H7Z18e^s14OmfO(hhA zeGcli|3&}=LjR`fO~^q3DZ?exCtK1YbX>h3bEy07TYpEiEl50ZIvb?eGuF-|H?}w~-7`93T&rDysK# zesAMH`?ZIIKxEy$jpU%IsdbR@d%RBvPZ0bi#3CB)FHliYSZ5jRF+geow!=*&MQF@{ zN>K9q-jRMG#=|p3RB#y3X&Z;EhdVxmVfu+Br3Z?LZg zWM4wHB$9!A=3Ynmwfty=9i&G}z*DK-|{0n+qL@_%% ziUCrG7oVlUCmS-kK(!*WA5c&r&(b3#B)6`MMBNWa zHSYJIU-gGTu(PwXB2{o=uN&;q6G8?g40VAhB|rvu|Dt@fScE7fN2&0S3 z2L2VjIl>Vs06QzP0#E{KWDUqr6(A%eL+rPMR@7hhA0vb}FRB9UNEJXO{((X9>wN}! ze?<=!C;%D2MnKU+22}eJzzPsTdV{-n?`j$Di4RW@Mc#glKZYkL0bmUdy3y|pNKo`h z2I>3b;GfVlzC(BkpxOY%V6PGObg)-Gl;F*N@%!cfaR|V&M3i|rNFsxTgqR2nJl}gk zaYSHna75+-`Jd9Gq2Zh7BS*T%va+(`vf|?6XS~4yX_Xe^&qBX@2|_*C@a)>g}?3 zEA7?bF~aiC=#go^22+G5?8rzRqIxoHgiDPGRDbg?p|9-o3h6d3JE`48z~M>LDAJ9y^SMVnynSMndzBk{4Ya8UpE zX_3({^glE{{P`cl&-6dG8YAO>X7Km)|FQUA<^Rtlf57;;{6A#=gNy&A{|DpW-{|rG z9{th&i*TIp4DD$?zSFd)PSGCd?_ZG9BI)_AU*|ji^y%^c3O%x{|EKgn%YPup{~u%I zz`TpLH#{!^VbqUOK#Yo=0nQ&{xDz+(`ai@&P>*yF(HX`#OYjd27(c~m``16mU_br8 z8vo4i;QHtC59A`=FYo`_7}a2Z^9N}sW{02y;k%vqhkcfRv=a-d2mwA5J|HS83NDLZ zM%jzuY!)ahE2HehckbNzZYQ>|umIN9*1*xx5d;~q{bDml+KYd-75`=IG&nAF&<0Lx~km?`;&mfZmNsdw|yD=x+%>I7XeP}Z#2F*`MK?_`4YjLLn?Y^hM zd*^H4ZO9qWhWw))ImcZJeE4ohz6Q?^*|Q;MdMJXd=c=ISg$l^Zi6Jrf(V=O>Lj2*NW!%uJ%pv^eR9`LpALR;}Jv=#3H z(av2U-n|Q?AzbO%1y>=+^zOnlsds^5|1Pu@?*fg!U7!U)?ei|sgm52%*3d4r7w-bS zkzHVdvK7O4d>2?k8?yQ2F0hB`7E`;>PP_}8ARukT&!0aBaAO7#6chx)!^469TYr?j z__yDFL)nW{Q&T~DdO9d5C;;^-UZ5k>6Le)i2Q3-ipexr8)TF!sA5&j|x^#cgkl_zH z^8!F;Net*JN7{r_z~_b>F!V7AEVL(sq55<%+?WMMn)AQ}vhBn>eq;{;#{>^TsZin3w>wGt*#YW*Dr^jX?YFB$!*82VYhez}K}!l>K*Xc^S;C ze*r@ff9%^XSlHSHODij2eSIB#`?3r+q3!qE#uu;z(^sJF_Y1WBZfzlLzPtbJZNBg} z|JMNyAAe5+jrYj#_ecl15HT&}wSQ1b^*^GhtA4Gipdbr_f_~b6E~2jbu8K0WbV7VZ z6@{e#NKhA~qN1VCmy1HAk)d#iutGLcbTMI>?tZ?hDtZd0& z;q9eE-S}#w2vn366=5Zyj{Z_4m<5Bct~Qb)sW?47tvG!zZI9U>&64;TN2eKSj~;dQH!#pM*e5ukP^?B0{DJ@dIx&b( zd-Uo%1Gp4vfD{T@iM^&!Nc{!htd5I|5fU)Qzk=)ybRmK69s#nogMPuc%7^%;Ps5kX z*p$#Wp)kC8?F*~DSH$aI@GZ(Oax)i|1MiKnV->mM^MTm|p;V3mF zB{?}cF){JU1VdO0SP4`={X>0;>cruO@fVT!NCFBJKCw{BUTdfSA`foeqe~DU)&Xjj zl6t=Y@&aVFwY8Dxf7B;NR17aKY~I!@rM?egJ4?m^fD{~Qov@QMb(*v7~&f|Kj?3N=sy@3_}01Q<>fh9a49o8 zTL48soL604T^fOu{%863x*k^DHLlZKv`DQ|B1OtG!@$7C`&;~rf>KM|pnG&z_#5Jb6W=kUaw_617V? z)XCq;k3mr9DGnPNnigqk1>BMB{~jNM@NHdwoTt0HdrV?o1`$$gf6E`{;~AU5U}0uL zfQ3Z=JN!Ka6xQ#5e&qjyett*T|8+q8BmNKi`HqjmJ3xl1KjME!+WUcpf&7Po1k?`{ z)!*R%PX5pMAK)Kgqhg%D?r(eYKrGaMBsNU>NAet$1mnNOM@fKWk17N8_b>4eRB|B9 zo^;>&GyJE(EP9qjl;u1#^WOdSXXx|{4153gGesFhncdF*HU7^$Ab;kw`~3H){+s-T z??4;G@HGg=r5Jm$GumDZ6H)1Z5yJy&>5%zDa5fCjr9svQo=by@p%2agA zKKxK~QY$Mf)Lisu-=!btp$Gm-Nk922#n?%L#K&T&d1tKsweP-4(y`L0d1k8z4QTg1 z3AzF8PTB7j6U0?ru`KoaR=1Oe$|qyRw$f+F-OI`A>lgg!=w5X=UlzY&}#B7KbXppVgk zZ;=H|vw~?z{~|k>{s{W$ApMKHyu84hH*ZkBM96s|(wC?%`5CB9_5hy>13`D;8_-=8 z270UBfxhZwF!1p`=x8bgqmAicwDkiRhdx11pudtcKI4-bRM$w@FXKLi%$CcyH{2w0mR z1?%%;V0?8IOm8B<%JLGNyRGi~?W}<%IA>dfbGOZJ8~-DGb-wrW|NZfo8bA#gzvlWP z!>`HvDZY0lxj4Bb@BRgbuQnIg#S5ImBHDkB;3g!<4-p>Qb3XsmBHYA;1O>Uc`1$@U zAIjw>E+!5e}1)!4-ErTwJPq5CN&g@8!!WAs2Na z2SP%8h+r)xby;suez?vG!!ajTrqf6fPzZQ1UmB_7@APPx1|~>H3Kf;}NDfaQJ+gw? zKjb47_2eY71nBpG?+#ZH|0JKuR2dclE8+Qj{$Ba0zfMdKZlG#;*5UX3gBF0XygKtq zWF^j7T>rCtbaWZxCl;m_PmFLN^3Ub}w8)5lkiTC(q(EVOM?1*>UjF`EYwsHUXa30b zaAf%YQvFAIR2GsHBA`%yq5od+-Vb!t^Y%dhNXCB*d->4SC>Mk#zd*araPpWz@6iT7UuSSTtW-&3d{AZ_Xg?^A$Vj4&sG&aVe1Lg7mBYqi=QIv#TK2Qw|h* zUIj(IiXiWWHYj|l3DUiFK=un=koOAuR0kP?s*pRN{x|bI+jiHUZ5#QHa_tkeYeP7& zYu|)+ZS{RS_8n-))*s!4?<9LRZ1ag-_$~r%+3-EY)6*09_iFE0<2zxM@IDSn_a!w=M@y#TG*{-8VmHE79$?&_10$y9#obwm|j@v@KVcf|lkc(A8QG+FDz{bblQvoty?W(C*v> zZOv`a?%WCEUTAY3nVkpAON(F`+KN}dR8Wwg?-JJqNU)b~aq=WNo3Z}w+qZ=U zAPIcY*h@D%b&{N%o)tO^UeSfTzNcSfKXvjbISDZl0l`&RJdzJK@_}T`@7E{CC{(vKc@VM|1Z-I z{}_%OIZQ))9NGu|Z9r_(0mKsV@8FE~^D}M#8kvp+jDG~kzKI0nbBqK6J%oej82s^j z__ODjm?%K52dk;60qE=uM9f40v?Kx>8yn!@-~iqjaiYE(3N_(It^LN?@PW980;sj# zq(_4L-vcX2ZGhhox^I56)-X z;e56OJ~LL~d=xn+-P-+meE41M|NHBIHSpg>1AlmqDN4&K{UDe;(`i5=l<>^Gv6hlmI3iKKW?Lu3X?`hknMgsB1x*BgaKV1SHs*WKh@K3^a#H zkWUhJMSuyXDmX%-j3?-b2;{E;R6b-O#z;m)#1AJZ$b1;YPtqKw$3tH44bRxTmo6Q` zILJR3vVYItOZk&)xae~bsE$(qP@}z{h-c7N37;DXXlr$bNq+!1Mmqluz+v|Pjq`J@ zXsU`9OHTRNNN3!=8pu*8#TlskK!qF+R8etuH^1kCh~*r_M^=zZo9@H)RRnAo68uuCKf+{jCQcWfBLVsEz z)}T`{TiP!AMRaFb2pcYYSFl`U zow+=$*N#4a!3W@;5cxcHtfCE3AQAF)#RuPKR%oe?)PCvp>B5JijK>S}j|B}DV1T8e z&H>v+*DC^r)_}Os;$+M2OEeO3e|DiV^KV7`ZLUsBGY^2&bHvKyhdi=`7H$O2^j^51 zFu~K)j_?dVOog@=5S`Q?Za3_{Lu^Vb6-ngbQWiyzLGN)w^e|%l>q036+)hj;Y!Q;H zpF5TPOy7scHV$_1d;9I&4@&oq=sIVTRT7_#XS(KN*j;qTJd<B3WgQKrL-{1i&AF~g+GO6Vk6!?4-{+RA~6v}((GUR!RfEtLtW=G*8EAc-u(O>Lg zsgKKfY`g(CdBY7o3{`K+{A1#tVvNY%_7UJlKV^U|LFNuy?HU!T3;v!MH!VMkhIQ9D2R!c)yK{!yX?W_qA{0b$|BE#L!b5 zT~F`JDD;%0Ja_I~TV#D*oxFwyKJd5_BXyF@qh^$fmR5rKa79m#Dz3(b4^S?#+tuFG z!NN{Lux&Vux|kcnc*)!2WGXrmmC96a$Jx|WSDUR(DbNVmlEA%oDRckwO|Z(^@*X>Wg0_56r#?aNcDSq7vtv$OaEEI3dO15;DU$B+2~nbls(PKJ#( zUK*@iZ5H;s7wK?VBliJ|4%;d|HSnkOm%@yXbmVIi{HmF(`GHI+UY5lwDC4xAj-a66 z#?muk@6VsL1U9E)deUxVzypUArxx7ssKM~_FJEj57abNWW;)-sc68jBT6iiZDT$L( zvTLXLcs|>9tyR(-vGY|Io=xPoaFYr4u!yOtsZvRy6;e&JGAv)6e$Q(s$;#ev8JR6t z9T|mNAC(O}9Q1KM$tZ7orJmIDX-R<@xXj2QUNZ`u-?mZ#dZ99o zqt9b<&be|{1EL!$ficmA1lFCD3GeVyO<}vJ=02ch%^N~TZWv@cmtG@gbbb)Zpbm9y zKKnlRPFJ#;OUFa|lkT4x71oaXj4TZ?5z*0A4VNj)jx#(WjYEIx+#c~A5?T&QS z34Eg#SSKz==i!+-cV1qbTvXSDGOkl4`q4G6-lO-~-xnXf5ISqtmc@5bF0m;e0sY7} z%8wK~E5Exr?90DU`thSwmo+6mtkz3p5s~r~38o|MFuc_v^$mhEw`OGb&KSxuEwCrm8Obc%j8b05fwB$_6 z@b;Yn3&-7WlQEf@uz`sKEU+l<5WIhMNH)FU6W>l;3)jC$h5oE#tKA?E_1PJ z3LFcH;Km6z`P`uMW$sfRJf&&rL$d=B9AcKTWI4pJMS4^-?jAy(9s8IJVJqNL@?f@t zPHVugWKjr@$SuKNv&&P7!Lg}|O9@{LE9_p^>+4mHUPH#xDRg0@Ev2{%k7o$`bIs4+ z4D0uu{k%@Z9MLzDzvJWMlegN$Z@$`iNf(}Jl`{UEloOngv9!GKpkSW6TSwd8Rkvb7 zOJHOB0i0$@>dUhd)!!zGADK!a6ApT6v=!NxbXfAp7bVOs53xC)`&P@_IXkwyjXON1 z3OKvdMfELRsz%$a_l=B}J5wDgFMYOUB%q<7tGz~W<$LYezKIz^n(+jkZnad8Vko3LkMdyjIH(^WJ5iY4% z)UvJLZ~!7oO6166l%tZ8>avZ~;g}B6+;`z2zY*yTrk55gScnU~1$PpUS?48$-1TzU zet)=RN&1xRla2PCrY*{mNzIuc)zte|!!$+WH6t4<@c3KWlK!XS<=i%A?Dv{z`fW#j zsyVB0so?}JZy|ry`x|2C4%_aaZXZdcl$4|{lW$a4hSoN=xt;e${YcQWt&y`H3ftAK z&pWGABm3N)iXJoGS$?|AuU=roax+==HnWm3yjg+;ALq}7(PnxyMT+-vRR%e1;P}8ba zqPMWM2XkS2=cZXK&#lVHs|sGi$|`X*G=@nYB|+204Tdg7tnE`(jiubX4s6Al!>59g zV?}Sak&m92XLKYMRo+Nt{+Iloge^yTswxdJT{B-(ltk#SU&vKyS|J%uSX%) zW29~-QLH{%^zSFTON*CVEb~&giW1%{60d7$Xr$loJZIdWrx2oj{RP@))S|ytMXCvXC_=)E3C_PF}piuMal)+(}7qykmoT3CuNtI20+)A-}=3=tRN6~&jk zM~|tnJoFgslGPAgD130L_o$Ka$iuK#(vsV23hHMZh1(slF)};PdupEpL_@nisk#>* z)kJ$;Xci#n;@Vt{QKiRVGM{L;NsK_#=_?cMsbV9JbdjP?a~w!aNHP=ZSBgu|A)6fx z61YEcJHMG?xq02r4Gzc0o%DiiG#LGHWqHRb+Pf7fkQFy+@6CGj*iSrQ1)k<&soQzwRIDR0Eg^}9!^u)>KKH}*VQUpngimmw;TIHFwBRJEp@2W7V zMLgSEqKbE{ZJB+=&)sM90=45~&q8D-LiKJmyt#>UyCs#v3Hw2JyaDD*K?b!TORM1% zyVt!Gw-fWnHYKa?^b9=IED1*wlsa|Y%kgm3aM14YE|HCx1^8X=T90_SO~f3DIA=EJ z2!cvcLF=@K`eCOG{pG2MO>G9Lt=3x?@gC*TwXEZ{wM7(8 zQ@yQqLSe?ciph@URLvGaxW`Re1~WH<^OEyQZTmaJZ$A1s=Rz0u)-fg!6mgRbwl<3Pu~I8%e8 zVl$ll&172I@v+~P<+WLyrPX^jBeqt|YU)Sh@l8JYrP+w~W*u&`ibUYNlzjl{86DB} z9cm<(2ZSG#J=|S7dP0xjP?Nuu+0|Z~;W7Isaw+W&cLu#L;N8)_fz8beU&ONR9%ILF zwmmV-bMK45z~m%0U5`u^N0>_XBH8j+>&Kj@FHvBgBuIcmp3dn*H5_ftyV#Na@mpa# zH>rc~AM3#8M!wChB|M!~4Xh%L-FT8;Cdq7&dWFI%h14k$o7)rSdT$g7eWqa{9>Jzl z*tWSWm0LZ2ktvsf2_v)ts`$R~<ospeZkv*%WgbhN`!N}}uuU+Lm z2bHn}ZUq#D>C&)GobCO3aUf_~wgo#b)oBGjd8cZ*)FaMPZ$i32nJ^cs;M;4Qwa8)( zsti2>rO!VT7&v27bJj5tZf#_F3_5$XSc6w#o11oPYim*O-r>VNfDDX`E=x<}$H|6! z!8fnh0eX|mjY{Xt8+ix5h}Zb5IWSpioeENTKbl~&RYm*GgSYPsZAH6=UF@a|DPoSG zp?Mwefxwp@-4s3X=vF4^)5$I%;0-tK)-^LDDX^I@|1#g#+TA_*j+N~N+I6vs#?WW* zU6igqh^=P(+v?>H^I3^0cyvM!wT9Lw%HcEfn4IaSS%vLK`C1G*@ZqeWq47K}?sS+{ z+tr-H;$k7=-mF8-0;|_`Qcs-02tL`-)pcjSH{14W!JyBl%zH=j`LRwE9JB8jd8W9n zrk(l%M;(sJ`i)1E;fd&}g)4a6#yu}A!|wu#rq^ec%*F#$?hNExzZyAo;r59Lp|N}U zY_Cl7wce62YZX#GKf+eMxjySeFtqB;fsoTjqZVl`r>f8ky5&BbTB5Zz)YNuzds-;b z!myIQlIXW2yh=jAIv%E7OSQUl?gXdviQkHfXmk55@MQ&+5xuM-!|6OZYeYI*WukKn zbk%QNND0JsJ!DRj&`JxYOFnFxBY62p@Lmp`L@Ynz#$-eOv`_`tZemvOyk2ON$&1*C z?peyV^Ki3LZN>4tQs<2O&dO-xG9r(T$$!GS(UjTN&;XWrCbutmr!Q_91`tGjKN(Kvw8PgB-*ccOc*KGL(RS+19qezg(MZeCouIG0;U=(`!q zSS*Zj!BZG>gNJ=ZfMn;G{9|r;#DiSYvP(RZHB`F7ONtJy$4W~nG29l!wDP9CL(K-- zX1(=JlhKqsGG|+GvN>|4dPLSd{}x?p@3fnqhxM45Lm`JctEV=_U0dw9!%pPGIS4*5!we??jF{ilY79@65uRR8ZV1@{ceMpzeo=b z7ibh7`E`8u)2?g`igCmhg&C7n#b1_;JkE@T%ad5EZyz!5%d`G-2<*v1#h|O$^7Ie19$nDyYVMd=6y|nK#Ty|$tsdb}+Hmuk z>^5ubida$cc_|G1h~JiqXx2>(YaI!N3B8L9??)XgjkgYgYSK~YJE55~#A&iC?mB8j zY+74ralYPwm$NqQ`Ji;^^;5Eott&Uwo$^3^lj!RF#CTT`0z(`eb~6=_F7|;Pl$%bFRiYia+2cqLzLK>#&@5a-%RiyVo=B z`5d7Q-@tfb{mK{;C#H4>F_Wrqn<;lf7DjXCALg71M$>Gow8p*!xTIwW2MLDaEl&7= zl?G*%TY3$M)cA*ZY|Yn4$XK4`8TX}EL@vB~s1s^ze=N91!%(Awn_Xt7%%3b>tRqfD z`#x4vNlPM}58ky_8Y;L&ezIwu{-+&Zzy&*vIHm=>|g$$R|SIN5SgZ#Q-D z?39EtuMS3ivsI)d=`6a#ih_0kHn=SJ!Fr6*0-ajq_=mBV(#Uy0+vhPf(Zlgfj$6mW zOiCNHN}`EA?5^Dp;WMT)S-0b258zq7tJ9YAaUJIwz5KHc1_$mCmyygc?iGR3QFb%J z$^OIkCon#edvn0|PW4c_P^qEX_s=>;vgk6}>OWvLgpJ6(N~%!vEF#j-IJ$g{kihK% zd$1(ihZv-`wJ;p2{Y6&poxt-p;j&J`aAuJg!G3Y(Fte&N9L`O4@!?#wMXKR!6Agy` z{pIKHk}5+kmp16=yc)N2>u<&FSR}n=NJ5jdsLCDSA2h)CFmc0$fL<xA2vB8oq=)r_kDyBvB}-pOjTpL}L#~8HZ!oankg+RywwtV-_#$A2@#=bg z7h5CuT~P-gD^tfi1a>a@hiJVx#nkPd5bBc%PuaX``4qOA-+{yFEI{HZ4$Uz!jf?z3 zUKLLgX(jtZ!ugH)I&*Y!NLl1m^w`dXJ~^ioM-hK@dqf}8NGB!chqR4?ViWG^czA27Q;2Ae%^Y zUB^0rJzuBwL=9!I;T`frb38l-TTjkm=zrOvwOM}b8OWHKN>wTRDSm64N|p6>07o+K zEjw#%R=7~~A>C{&Vrj>Qkf zjK2t!TbmYY#tvjOl8t<3NZ1zB7t{Rc2$N=8+s@(51#UZ%S?a-soJ02j1qN<-N(!s) zZlA+t8&diY$>i~3+m9S*0*vsT;0YwO7u{ z)&?uqe>fUkKP8dWU*LjO+PNq|Y1tP$g;>&lIXRFaa_Y&2*xFg1r{U@~r+h4RjC1?+ z!{F%tw5k7trO&qlV&d7Vvv77dL~@2cB|d>MB#tmRw(4w$DaF0GL6>8}9gc8Z2jTNF z8*6%P%Hj4DRCW~9Wiw9+-+D{GQF;4(-b3%s7C4^ynfW(;QV|a4mg%{4mI^oJM{OHg`qjAnX?-D`+K?eJ?M_p#bLw5<>T80&g8%(Oc6qt0K z;KRc8_wvHK9?qv6ihKj{9HVPC%Id$S;t3xo%rU}Pp_dYwbrRPljCPOq+YfSzF?Dp*IAi0;W}9Zp z){n4ANbA1O1W@M1riO0?cmwb;kf3b%KhAB#x>hF~@ zr%em#Rt~~pE9g2ME&45 zqWK4MPKsP=wNtCP?Tn{%HT9nAy^(~ziT1N1!NCvEu$^0bo{DXM!f7zJo~m3VV`(0L z(#xBx0}|kWN~*ye^%d;OTQ zzeV9{hgzQ1qceflIlQKR;d3XWOP94VD&)!#{yh>=DKR*HV%;Hlaxxr7hV&S?q`9P} zwgR`0q^#rn(w>JWGiyFr3Q?pjd)72vMmf2rbIs?=c;>4had>ypP&xLi+X$)~wUraC zg|v|=;q@2?Vm&Ns5fAYgeVi2U*Pp85$(%sD!wL~o;Fk*n-2$=L zxwjk+2U=%S=hmM)o*Us;Qv;4$veEJMht=qc+@X*nKwrk_%3d8c)e~Qw#U(<_=J_&R zjAKY0?JeFtl?4U}D9-8XPFSXSSq$6rdQTT$zxsHmx>Z8|8JRRoM4uPitpdD8r6ewm zSD<<@>%`b--%HDu)1>Y$bS_D4_IMpkSLoK6wwS3s&NVg~tX)EnWx&AoS8cE#dzZ-E z{rrN*w2&~VQ;&=rJ|esz4Fh4*`6)qB#&4O5eTA9C0X>+C{efv7rUT}>x*QRDqx#hG ztvS)pcL4*ZfU!O6vAC|AiB%!vR*|>n9Zi==p(Rag-IvJzTd$@3%{Z!SzD4&XK9XY{ z7JBidHEU-k2T|yu~}&tVI7KvR%3(PXI>G7uUl!K9z+ld zMQ@CUlLxD-c=n~Lx!r%%6G?^cyYtZw=!FZVU|8489+d9cbpmv`05S~lNG*@FDr&;KW-#S-UZ{Gemke8n? z3(cd*q(5+p9k)X!ON+2V6&ue5{hGpLf8c%oC{@XT(+(*wuJ_k!aXP>Flx-Bm;Up_P z3+h(AG$x)+Ho58VW%2itL!lOlvPxCaJev2Sljyji7z?CFWl{m~WahzVln4F11 z?}njQes&Cw^45qANLaAhvUms|cnwYA{660-`)1yfJ8@sypsH%tTki95cNv~p{miib z0^>=^N=~{}xvbk{|m(ds7>5 zIu|bW8tF5z%o$`{y};P?n|&@y8nnm^@GRK=k6K$PbwvOpzh@|VziqEP< z@wFbs!t-07+KidW436t<(rlEPu^ixZzN)`4X6XC?yAEF?$CzWHWc0V+HeT{SySc6D zy>Y&hjZk|>pxM;Ms3=o+*&(GbP;kXJblT6ZhC5kTgH72_y4dJVPawl!Yv5@XZxQW$ z@7LUVr!=XsmQ;{$rDZ0V$Yr{DpX?V^rYZ%4(?aP-6}I~s9DzZ^2?@lvJnI1`CR^U5 zvqDCGzVfe0+s3w&eKS_>G4;C26K5FUv+7mOOJ$#mM@!Hi|AJ%d#lemK0&UD~ch|A| z*ptAA**ukzeO8h7PtyXG;hNq3cd=zUOe2?YJLaW=zKYOU7)vV?BRg{2J+_J98M%Qw zQRVBWqXy#+e*`O)=Ti&sG-z|kZN%_anp%RV%bO{67KSQADSofNc+#~_)p@L!h6H|& zqVH8aMB&BxF`7kLaKvuJrc|q`G3+SYmz5eR?N1VLV#RI3Imqu+UC2Gl@HSdVg3*k! zNx`iL9q_rbuAyX~eVJuxoWeMJ!>sj~2nehVqV#CEKocO93?pIWar{PdOsiBvz zQD{ulovl2bI;po7MHv}Pd|A*@Jh_KxUJAI|_4i&=EWu+kHQS2W7TZS705`X`Zl&U= zTK3X-@VjTr;f)-5O;bHv;96DVJ9ky}yZZ6a_>tL>i5LHKWsUu6WaAYn3Rw-Z+$Sq$)Ln-K_@_87rfDYMfHxL zh0{re^D?K7k)F@F{8_en5}~!lsL*NHhzNZY$dRLQ9nJ?C7ubZan5#+6eJ#yVx zcWv}gXVNDx5gLh16vABIB1=f}Mh;HX$M2n&SEf$rYpxBuvFkL&%>+5!9ot4^oYgtqVt+1<8CmvJq=g4L``epodE656Cza`Cy zIW9GS-s2*MQuoX%i#zB)bLJ)5SP1ekwP0ZMv~S+po};nyRRVsT84xU+DTM%bqZ zk#qNE2X2S?jSKTVN7HL(A5jY$RGpcqkoTiQ)|a2m`R(!C9D*!+DkjUKwXgBs>7u;; z1oY61+Hj#Kx7prvJ_+lB*+O|p2#dI3i=h!8f@dj8Hg}$A`4fs~ z>dxBOOm#`WXY^D!Bd@(~(6!n>X*|>Ve)3-5t>ACG&L8SF z7JD^@UMdLWe_Tdjzuv11`;bB9qF%n7X49ORp|sh+A_01=NTi_5g0{Sikbe@Z&KFv( z&%2>uiB}1Y(;3|Y9%YqREJ6XA!Ulcm>^H@%WKEymO4Ewp8uKW*b4B#IAMK}CX;Dm! zztw(Iqt&BCx5>hKdwlHk&0xqaUr%K6q07oyt!sm^{9olnvE@a4Y5CnFcjoRz)HRtH zmHIs>#HG9CfR}Wh@kLeS6$Z!iw@x_@_=qH~U;MjUXO#5jp)B~uwrY|(;&iO^$N=71 zt&Ii0@%kWyMvC=o&aJPDl6rclpsjyWJ*Uw7wxP!w-xuhfa802%aIWmooHK0upaBjt5z5ZZW4! zzKom8G~^eM|BQQ48@nN&HbJ;PU$Y~e$N4LJbpu;05F|@x+JNi%ZaF#J%o-DRESG?h z`%pgq;SnUvKf$n_et)?q9YOWw`0>i+DV~u-!=$6p8z#|meHb7rMcrCx`r*BYlho7`jU-~Gc9&CN zFp*K|jsjMkrCG7Br({~OHdoRbUxm`4=Y49%g9i4eI_TPLR@%&Wm#;FqxMFVN5)Oi; zL)F2llDz(C6x817hpKy6Ko=T&iH3kxzc_Os^1(en#LX^=L)K zm2b6i;vZ+4V`=A)1 z{_fVJPfrNj0ID8%0B&b-QnOcd(L4PeutMJ$ zlq&Awu)luUny1}(R_fa)qHkv^C>_2HvpXX&&7j5Jo4%5`1y@J0jH(ZR$OhzEHr+$x z!6^*^hrtwj3wD%(_7gdKx^*r?-dpxDJTq_FGE!<{o$`hFWhpJ-84>i(T0jGA({?=< zKulaJorLSmSMhX>iIXJ-c#p4E@}{E8kE+&QWlWa7!nHw~Pv??P_4J*H^7;k`Wd)yF zh?oCqHaBpRO1DNZkdT1WnVh7$hsv2F*J6Pfy;d~q6ZR&$4`bf?r^b%L4qbN+n+Bya zbm-?{uw;9q@oJR&b}8me?D1P$aYB()a81w7mBd7A1ozEuV%I8*6+&{S7Lyv>razUX ziIt>&=DLd=#~BHPKuEhM7BV^LZN_uar<`_X#}cIs^^`@_SlG=8q$;`A!-C7<4J|Si zV(cQs7N>X<>!JfNDEw&m8#i1CA{ii{c|=0B{XBYs{0Ju~cP8f8B+EDSn7@5;NRPI2 zD$0VJefGRa6~UH%2CjT=|0eYYW6Ks77>#3_clH(biV$J&jto6#W8o*C zBN#fRpZgldWl5$Kb;SJasZ*9;YTOm-!~7U)mnZM1LCw>b8{m@fpsdj@Jj%LD;d2CS zeR5+X`ZU{J5bHC1=dHyU>zzDY3ZaHlBkG<$oZRE-on{dwruw~nAG7 zAunrzqwWvZO3_YU$=K4*t8b-C(?7gM)0i^6jZ|wSt#&72!N!Yk#^j+YF8SWaL3TnB z_NJAI&-@DF{dG8#cC-vxIV&=LHIA%r=TeyVN8q87#oH+@=s~q`dEFj!Gb^s#q-H;M^_+h8`ftz(w)TFYL&jcMO5JRAQ=C{9>))OTBW{n=uAbLjqeWF3#lsC3l zRwb@kPv7~TWb0~|;(~#CnWD<~m6o56u@Kw^l~+_#Us{wWId670#oWJ%u1qujgkX)j z58Z4-OJ_=rSQ`iDivumyBG-|3b6}>KJHP|Ow*T@IEH{Fv%G=(wXiHCXL`EOKr$!r4 z=w^`5^TxDbC#acX=nOp3*d+<1K!GWmufScFcOGk9ab{g!-H&#{AaYbP&ywBcj(zmOS47RvnEu6#!f(7OI4$yA-A9X6zN)$u z_T1LlFb!NHS6Iz=whkDYOZ^bAiK{yozYsuqm=mDa-osr$i;CP>K(Ofy4R;)!i794t z@tURXYJKB%u2Et`RKAtSYKO$*I#<994*PSC!0~O}-6$n+Vx6Fj-^VTP7XEeK8a-RK zxI^`I!-5$+Eb_^^()c0pc*Ukm1C_*JFpOXX2lkqi!(rW}Z*n6N*#h7pP&icEmiJk{ z`aAdO<=0&ljz4lokb%q)d%_+cPBYJcpewbn4sL!_Q!c^?(4DQ}P#)BMEeOrz5_)4d zuB4fW)9Z))(U!Nx8GMK`@YdYK8_|t=7Fiv^MttoFM>y$tiaEJG352H9&Fm8NH>3sx7u;%` zcek8%OVDg-t|@2)^tOA8btk`)t$((J))9f%un{S;Zl7vojAO>9YfN9Mg+&BCGo3Aq zK^LWKN-HbqVv0g&4Q)iC9hdGL8BR#OYDuf`RBGKmYTkVeLsX!Cl-=2qT34fDR+R_o zXTURg4}F6XjP9gxk2sX3KXY*r2Fo2TW@MO~svk~mOnlX7##*Z}2S+%nBZ%MLdkfu& z4X(Z^#RkVCV*}JVun9^4!2?Y9UA*h!!*WyKcxHmotm~cKAEFUXVvwe$E@igj3@KKt zMtts~5rDpsq)p%KRZp|C72s97z=K>Pr=FKiX)viqrAE^RTky#j5CmIX$%&Rr$13Hk zR&5PoN2ms|yccoqY`H8uh$ItB5ok=j z@IXH_{4F+daw(uw)o(t@0PO@zSB3C-e&$vc8~Ug9nndUu_o{n#L{&7A47JUg?v>^n zZ(Md;kUky)mK=a~uyMORCMjOn>II%jr-?J%=1t$6ESfT(JCRg+5>0L!H+@Uxo%D3Q z#--|O*;E1#-kCZ?7>-WjQpyXTp?)9*N5@`KpLCVRITGXrad4|nHH~3Fmq&S?Nj-40 z>W4$tY3)c#p;k?$9b=)lBGCSjL z|8kr7G}yq8o>*!T>F_!S09ol>=BMi?%GrB}hx7p3DJS77Dy0(KSaHY(4=?w)cm`D| zI=BV+(6TDL7%!f7nxS}i<@T*-!eDm(3XYCom+={A6QHH%O-oRKcomKy_L0uK>iyXP z^?lvywgMmEG8u9l zKSEyuc*Fi)MCU0Xuj`Ww0W^TM4+yN) zM5aJi0y<0t9-wjOJ}!dyU{5hR&HUFR-T)u~d>L8TF^`aWF9?BkrwGK=5?$Z{ZOGGs z4jK)-pwhq#lYj;~K$i!VgutF`1optAXaf7&(JAhK2>%M$7mmRGePsSiLiB+K5K~O_ zAR{l#02&}KV0%Db!1gc&_`vTSjVdA6K_eh9AYE!W?V$310e`@tBVzHO3gp}?LKZ<@ z0CX^T$>D|B4ZMKz0M;j?+5_@}8qi@6$Ui#4-hYj4BEOb%_kRR`VTASp@USWH?vflr zjDQBn3x+Q_ynyXN58FcnFJOD1Wh3mBNMN5t0((UgS~mVK;14KwfQ-2JhfGoce~Vm@ ze?S8hpn);+LPL8%UYHAQ0(qe=vXRCgipBCVclxJ!0boG=ZVWjdGDxkaDHZUygl~y7 zFb5hSFD!201#Az<3n&|n;0WvS5ksKGJjio_C-k6gz`A{82DA&b6JT%2mQp#~yagF{ z<|5py2K{!@Kz+FBLmcE*6XL%L_;>}-U^(Qa4ZMKu0eJ!2gBcuuQMv@>G%T(9HTtj! zH2GE8+}xaGWMoia!`_|*_DChLmn`vLGoHXcw#3_JEP*{$3G69KV9i9b3S^oU&;e;c z^8&VqAC--$C(%|weSmUd2Ji*ilCTa0or1o`oO9<_;g03_`}}C76WMfNqqbal*E_+$MB^uE9 z!`{W@XEN-0OkiJF3V+z+mxOJYOhVR80vb#puy-+mHA)Dqwe=PLG!2^b!rzjO@ZFo+ zji@UdQq1qfANF&ms;PLNOJKbf625UVfqj~R1``SF+1yA2*hjd@_ow*)kJb&m&=%R) zR=aTy$jZ+g0@TAnyg3bi)%W-F^P_5cek+(nIZXi?C^e;lBZmgJ{GCV!*nix3bDv{- z_?~Q>|FZ3N<7AKv$rT&|z<+4WdTJ4l!uVZ zigF0-Tiq)DG(Y?**$8`s)9Kmoz#sN`r^<)@s>y|IQ^~n4YCr=OiUyGle85cu*dv`( zeyS(?-nWegKPwyOfz8yAYJUg*umJ{DJnZ>RVqDc5XrRiW0m=fT!A7MfIsx`lCm(BT z$#t*}kA8su4an(lW!M*;!2alDHRwu8PkzI0Y%UwIFGZ){fj`9cQN>pk7m!QaH5zE3 z27I7O&LAH&&>)9RMrkG)=!k{=*uZ6#li9B_wu-W&;hTlJi>|Xn^vN(h1Go?@ zjwP@rCt+KUCZ|@)6IiE{9JiDup37wjtT#^OQ>(|5YyMja>>*Eli`!;58oi{z!+#%| zV$Z-e%_;l$XKWvQ7xpb8upTA3vTHg;gNxhMfe$z|IE#GHNQ1BSVgTAl6b(2wGScAm zsxLHv_1=I69Qi#Xs`$P;CpuCYG^ABDfxHdpP3297o?<9%LuH^PGa3O2t@;s zhsX!06TYPvkp^fZ``U46z;2+yN#uhSa^#rh7apK#kl^D)o`NmG-G^w&Zal;>OA)-| zzYG5}XJ8*;NEsLQP&{yXr)DD!P$$qdXjLylTcG<1bYF>W&EbQS*bY|6eno>5s}#tm zx>~Xh^gH$;&{k+kHlBxZ^M>@*T9?0y|6!d!s)~X~u_S)C7SLcCfwgbRCFFxfo$zhF z*u1ZV{Y9j~sa5iXrUADsKt8A_E+o4_cYMokd>S|I~Q%P&+XEZ)W5c&k1)$ph2>yRp zAEGS`>r7KLs40CzvTp~F)W|L5e!v=X&)@U(f0ml%z^hG_tu1tP)o~kvKg0<-B zZ4!qzGT2AuMPCLd?gef9W`B{U0k>YPEanXE{A&DvRQI>_`C91n7w4psD`+194a(j; zr)Yxz?gp&m^qCv`O5g4;2CP*gIObAZT>Q(we_QeS(dV8%drH}eSKW1~e!}gO79>9{ zmSkS`ATe}5p^g2;yAgXR+QI(Aza0PO{Pe4?*=#libI#o`%0|Tga%+91Z}k^1fqhW- zv5KIsGchswpTQsd38vD*K;QIN=r+Li;GK(xc}Snr|AHDxRSuYK=SzUV^UQGP9fh(u-+-TfB!x? zeE2XyKBCKM>G*H{{w=|a_h1Jtf;M7H8T%Ejz1PzHpT+<0>i(8s`6urEEdD>L*XZYw zZvO!1DpzZ14UQTBiM|HuEx1OK)EZ&QG7Cx4<%{>eIM zyuOY7?;HR8leo52*MIo`C;$GFf8Uq?_tr7U|58rgBuKK3E0Ijk2^8E9T!g7>yocxa z*7J~Lr;b!cyuK&@{+Dn^SexUIc;}q8rZS!7M}hw+@PCH_IK*7T*i4NyXEXX!M<<3rlOt^1PF*)Uer~v?k2OG*&G4WT9I*f- z5?=}a|8nGMxTcSchU?#Tr0Z#Zq~Q{1sK4v~)g$%czVQ#&iC{rChD@&$;WQX7{=o#` z4uEVA`7C6d9IJzj2|VNp238z^tPYv)9|ZHOssWyEki8&(M!)Q*;B#~qe6GB}-}EF* z%ZC8;_rU+G5`W|*h+rg|UlsHZ+yFQ_Lrz23e87Lw5Fn%Dt((W)V_cmh#3N>YnW9?! z4}jdHN`QS|IlyiW@q!pXk1^92H%`Y(W6U_NAAxb=Q{jAC1}8r73da`~z7!E{Qs5uH zfcoDAfX|xY%P+Ct81qcWedAgX7}q!*jvt^kR0FOU%cTw1^&F=@vp)=SV|4(#AY`%& z_#ET6F=iLz`02O{jNzx(F_;VI7=wr)F9!VZdlUgakenI|{KF?uH^xa5^qn^a-*`Ia z*6<}K_8Q|RbOCO99fGBB-wJR)ukTgPS%j;XscC{^-%l%8H6pa@(e3j!{1- zLC%J60|~gWsh&poAx0>%rE%KU?y0D#koWK3lQ&P3s5nYaJTWJR5^>157D&;bhM)F% z#yAK%roJt08|5s<4C8ljh1lzc#J%M@QzQpZ7{2Cq0diA6Y z6}#6Qe)<|?nOkey1}`~swk5rtQb|>t_OtL~EdJ;EIw}^Aio*k+LX1Pi7(6-_wcy3W zuXu=tACG^hZAXAkhg9*i@Si<7a@$ek)eg2Ao{ zXeIlUfUyKN-g4k?l(XMz+rID|rk{mBDk=)#f6s}5g7_cq_$La74ICUWHXdW5?}Y6n zcO&Ic6O$|W8w)@%D)o*%^TwpAtn{$F*z~`*J=17SJAh)5eF*vu4%kc#4aOp6WVBN zZTl%l2SooY{Dy{x6#VJuc5>qBFqVvpA>zbWV_fYQ9KOg^TxX&&h7jUqFjfR(=eT30 zaw{rZY}*(+*dRYQd=q}PDQ)r%fA;(t<>QI5P?Vo5;D9laoEUMgct|dpMEQ6(#DZe% z7smX2!GRM4np0HRs5iLv_93tb8 zRm6c_?}UzJrQ-J>jum6}xZ$VeEUx+Rj1#AKlN)wCZ?3;4=}#hvLtFBps6*s4_=soU z^d+~BE@pne+t*j!dHE>S%&7D?bH)8|6=HKCBwr z8u+cj_k0}{n^~HlMO~+zbEo1@zWAGS)-A!fTyD8qS@QO$;BTw^p?ykQI%2$i`dN-{ zD9Hl@9#ZO=-CyEI8rGbkZ9Q7925+27`H=75zyELZ&yUET)?`##S{iwi5l_k01b01> zc-rzS-N0?@QEN$oT*ZDH#?Z{2JNIwHPq&+&Ij&v1mZEw7!wV$wxak+UO2vAAEmzNM z*CEf6BB^%{9z56xJHqg%+b@mxh7B9Y>(`+4A=Q)>k@wjTAQtB`d386Eyi5usIS($7 zg3OyBW1j=h)RPY%J`jv^r{AZK{}lXKKlYQCE?r9Y?%hkgy}e0TSXh&c{UA?IPqJps z8Zve2)URO2GJY8TACNyZjeh6xN8$ew`SU0I*?$!NACW&(;9LDp+W#&70iOHaF#SmR zKT7`}Df>@6`yc%OEI<86|9?OIXXuGZ~w$~>dt=TWvDTE zxLuT_A7oMIcud{Vm!xd%LGbt1`d|M9yx*!5@xb5lh_F}Xr%=cL75IOY{;lEl-~In6 z&>97xOO(wAEgFtaJe(t3P}kt~#eD>KFzT8h2EN0rG9goEbP+J{!(&K*o-K zx8G1LRV0)>>eFZP<_yk-!o0Z_3`OBwEc)zNy(Hue%;$i=3oddZ{X1ZQ;mJ824aV0{$u%F9_KHa6wA?)%8P}?F+$c0oOand3Id$9_K{S2MGO`&?g`r z#+A`$2K=<@e`WnpcRR@aU)OTxqR{sOeHL(T7JWz1pK2b!g8n<`r_ivLx-i20GjPEB zijb+nW;l{Oz#qr{an2s+U(go^zeg~9_dxIu!F@k`U|yqPUIt&mg#IL>x#2+j>3iAH z(UDYDf-gsqGYNt@FPx*epK*_xOToEboO3saxka2GT?n*5pC;Uk0)1$34+_fP0eI5| zo&mlA=tvJ52CVygwqHm{2zh!lkXjcT=iG2i@_xqs#3 z9@g%N*{(^AljGPX&YvcnSVeGt8tdiG_&sor5oys9Z_b8q0;#&K_3P^Dk~;9?!8ybW z+tjFWZVn8SU`=`_a_7w(a;2o^s~illSpyt2Z_a(+J>zRNB`1 zkv9tp3drqa#?*W;4FmcV;5;zGfPN|1CTJLN-W2ESaNaty^fSeq=pTf<=>*>gQsZ3R z_wdL1&!0a}vM%nY)!2F@hKA9Bc@Q)Uzlxc(N-i6aa+Z-Z<0)9n)H!Ex>q*CEEW7U}vChV`J0 zN(Z$LjQ zod3i%SaEG?`dzGxyYA-ow>Iz8{QflEX{6%qOR7!e!`eTOFCJ>rF3~px=baFSAF02! z@88n>3Yg2n`FR}oPmej!V!Mpkq77sJ@7s(&O{caTFJ8PzUf&5N*AMAab8)!-6Rsam zw@chdxyC7g?V0lo?q?_N=JAaAm6l6ToDaQ&J*gZ0Xq&w2dJ_x~16 za4v&;?Vr}3{a*boeP{aKuNcq&Uj41X{j2W(SO2%=&#$6$YvueK>Hn?quAliWZOd}FE4uXveX#->x$Q|i= zKyGP3c5f*!Tej@L>zwSI=Xat>>ZKFpdQmauBZ>V$wD%7Ht&m>(0rsPiy`WF-54aRo zeuV$zFRTN~b%NYtKOFoM-aL*yM)moyuXFiTc4L1J`@HDWi+z9W?_&H7#@^sK%o?}{ zX+L-QEy?Bh_;?bx-+=1dVqfNJ=7Yxa(Pl?~bR1*Az9f#d;2w)O#)18M_};I+SAJ%C z8s(pZ{(@*DUp{0)B0tnL*|TOBPy+j{*l(u3J>Y`hJ{7*z_sY+BoJ!fj*eAlhGN}Et zHmQ>Hdl!&1yBCniodzV#6>P+S?d0gYPffmkI&3@ez4Fn{yzOa3*_hbpLH}9o_hEkr zZEWoGVLt=?WaFAl%vhq&<0+7(m(5*ssF=1om&y-yZuC*ssL? z6#9WCgzso)4bdPhQ+ROP<9aCm9zHl84bd$%6 zlE{6Zl_Y+zF17y#&9@ib7n1BJX$0;5Z+Ll4SG0+af z=^-X$+746Vhl{_FV7yxw#=4zg-U0Iu%{qbZGe-T2dJpX?hz(5mkupG6C!n4NTc8B? z1y2AwAOUPMG%p)dL%X5n3w88euVII}Y|YE!6x8=TECq91}%dfMcOJ4vJ%-2e%rQLm6c_CJ(f==B}R})4-$NES2)J zu39%>->nj~A8>phV;E2$u1$L$YV)l06uyh81^653ab>sG4HzRYLHhvX711^@)0-HI z?{MD!@;-O{^ts*gsq0|>rxF~;!Z9ol>&Z80UC{jfX5XL@hIu-wi53QGDdzK3Qs`&C zS3mt+YsZ$}Z|&KZ?sLE27P->WGc^7!{nnOy^!q<^Brsx*(YctNpr9rh=FdSiwVF9r zEDwXT!8i{?ka2V(N*g65{4dCm>cwF2GFZ@7@hTAZo#A6JnGIKh3CaO_LzdkO#`MW7sg3F;+5`LTSqwD;Q=_@=As|V--A}%Ztyj8vo=OOQ^o` zT!uK@SAl1&7}ku%@cdeUW-(luQI+VzwH?C(eqRYR(SxHoT(5yAY~Tqd)U=9W55Jkh z9XqHMpTjf;XvfnChbla4%P^tfvu^Snclr1p3#k0%y}9t74ZQiKTsej`Lk6D0{}^1R z1lop&!qbj=b|t{Ef(rxg@|g@R_!Xfw0gSL@sE@}L-q(VwrPP~932VR^DY+D$vV&*7 zzQ<$)0q(LCHDwqhsWVxI9NcSKU)$&<4ex!s4i-Ey1z^+x*vtTaX}|!#*tg!4VT^;n z(GAeCfLd6~4B$M?HoS*e(Iwz_*M@h|@qzPQbsFe{+=ld~`3-W0I!$ zK^NXz1-xPp@Y`{HB}Nmv;q&qoA2fZwIX5)t1yy*D2U-{MCJoV-7QkQ(qFT>v_!arw z7T|8mvkXQgHm`psrGPx+VK7R(YrGjgf<*VeY>uc<5?d#<0?#uR#^iA+h@y+%v@n!f4`!W4keoB72e#U;* zeolVwer&%;zXZP&ziht}KZd`sKhvM(ujH@mZ|raF@8s|9&-Rb>Pw-Fi&-O3zX9Nfb zFauZtN&&h7#sSs=P66%#?10FCgn*QQ?0}L05PE^kKvtkqpl+aXpmm^ApnD)YFfuS9 zFeNZMuq2QXBpk#HVg)G$=>{1GSqC`B7+iwQi8IBN`e@{!okd7RtLX<)@LUcp)LySW#Laaj^L!3g~Lfk_)FRY6)G^d4)GgFK)FYG~8WCW6Oj;+ z7?BczY|YC^g#V&I4+WrwKG4AtXy6eX7#tIv7@Qeg2-FY+N=N|}G=KsYfV~@F9tc<` z0>*`atsr131z2hTjuwEQ8{ifQcqIZ(g@BJB;35ThXaEit0KXf+9vBuAmKc^9Rv13oyg8)7qV;Ef?lFt zQeFyP8eaNd7CLUfMPytjO8 z!**v3ZO#eW8ynhM0<^PiXk!d$UrcCQO3<#1p-nkKdt!%0h9!ihgk^`7gfYT}!RrvFpjW}aEfq`U`IqkyGn`3 zjwp#>uy`2@;1e*|!fYm+#a3eLvW?l+Y$vumY%>+fPGG07v)LtVhL^Ax(~IS$}3t@*BvBWB(&QUXtO2IUWL7x-Yjn=Z(VO=Z)rSxaJC?DwLZM>2qnaLCwPnc7zbH^40Z%5>;`h!10-=^Pz=c8 zM3BarAdd?{A}ck@Mn{muiNT1_f9X$AKy(z;#0$L^mr(-kg@(CmxK?-O5#VLJNY}7j zq$_xNcx1Y=I=2@xcGci9 zes5kU21};dyZp>E5wiwat#l48_Q<`;x+9e{?{0hLYjITyrN$cTMIZ0dWV?u5XSuX< zXSwh>viW#YOjQB&NzX%fHn3Du*?QXc{%|+OJq8-gxd4X;cqX(J4a8Z zCrbn`I*4>qUAAOmBb^Zg&(BhFDN(Q~1dQev&p>WhEoQ>W@G1iHJ77 zG26!8*3Qh7IcwD_JEpoktKVOpW#m{2EEySD79_*JI?Lh3=!VNb0)q?h0B+RbQ}DU) zc7;#J+kw}GmxmFb?`~`HA~tv7v4+sYn#D6A?h{nDjeU~9hJc&pVpBRh$IU_MS__B9zgGWp%28HopLBo-f+ zFKTyd(YyI#my41f+z*vkGKyMf{$Wj`Mxgn;d8cRV@9i}Ff=&P{Ywm|T>leNbOVU}C zH@ZOP?vsOyp6%V|^yK`04<+}t*Bx)4kGgSgxu^o4@Se;^V|}IDZFH-2nIyd5I%JNE zvTVjve%oOy_w#I!(0X(^T%6B-p`&@0qKxV>Ii+~P3`0%dYxzO!rO$nbT=vo#cdncM zU)6&aOxA7m}>9gn-UJ4OsYy znTV<$7IRp#smdv`uF%w9?%@uBS8*5eReVKb{MYcj>UyY?MBnJyLkI6zk=JF`Om({} zlA-67?O&`5HQF;d>2jKKx9;=vgmsH#Zr-u!H9+X)1}*!^ChsJSZZKzBu8InA-KBFa zUCrnu^OIM)Z!x!mU5nADiZRG0Aa3r%pz>`8|ft?Knnrp{hxC z*oh;e=Hdf8Ph9)P$})X+P3nZJDoWvfbt`9mavvY-;knH6_SJwu3*RBnPJSE`&f0dd zYN@v3qMvdG{43fI?L$-U)G5cP_ywn3 zZ>Op=V$AWJ+8ni*fqE<2uUa~1^VD-%lUGU^u9Zyn4P0_GwO`KrX}d0q*Q-zK{Bo)J zrs(;~DdTf1##iv0yeYmmn=zBUb68TcX8eo$_{iRIEhlI6nh(IArr!<3?pQ%`7}UUWYw?qGOUGnV-y z@(^UQtPD$Db{tA%j!Yf{mkP3SqsNUNXUJOCR4@F+>WNamgGd)z%8x+BAT6WJnv6g2 zi73!NFlq6xF3(h1WopLMTefuNvXx7jv-FhZS>whq^)!^_6y%w*ELj<*in0ud;BSlO z$7e@PNs$V3ys+@`I0Nx0w<(o!yNf0|AGN+2dt;j<>$LA)i_8U;Me>6LUQO3Yb5yDL zE2H0&kExHBip|pL;x+G+o9EU2_ni4AX{yr+<__5zOfM zaO`KX_J^)*NPH_Un}6q8MSu@q7dyX*apyyfyT2|>zgR8v>RDK>C_D4&Ky~#qUR_uE zgjjjB-y(it`O=Z8ajIemgq`_u{Ag@x}r z1Vr);-yc0@?Um{Q&bqeSga=9(81FQlef6G%SfN7i4d#9BJ1VHLJavePj#g}NXF)yX z4f+$+&(xlOw_8?-_uh&Lo#uAqh-Mca1C|TV99DOfzLaKH z#;BWX-RH&Y1>MXlW=x?Bl)eM`MOnR^S{B*OB|O?deFyV*Vs&UQ2-aG=b^?5StTpsG z4-da3YZ+@0eGS8rq84^`){3J>tunE-Zekv>lpCMobsgRCl*wYWo@|>uIErpQ4Xp=l zCv|yg6B|303PRJLe*#O9rNAD;mUoq<%duG@*S11du=-#jOn$KMSOV?aEp_h1!^aYA z-yTo+d723zN@pn!RGqj-(BZ?r%Zi?9bF+EH{Ax7h%SW8gw==IkWD~`>6S0CdM5OyE zch%F=>reS?kbFG$vf@GwtHX~cYgP=2?d7bdTYn<%{I+a|O=DNz9Z+o;?Eawg;PTf+ z%!iM4*Sa0sx%2AzI4=#+P0uo&#Jcqsy}#Jgd2UtGYlngxx#C7DHIohzhj9~6J@R}o z?YUHuk)+pQeT~OrpEHI`u6KXv;QG8zr166C&Ve7YcX-8(d!;br$;#L}_Y=otk+=zK^%x^gZ`i zIgA~#ZL(Oh9SU=*$L`!E8yPVo-Fv~f z)vvCc2-8#0xlRJp*=Yj2 z%a<#$G^Wc3&Sb`hiEk6kG3ab{r=9Nbx6}HZ@~dYD^lxuhP?kQ+y*^@_hQ!Bt_c#9lKW7oHd5ka31^(`hf;t-3ZeRbAKx0}Ul8(q6~LZ0V|%GTm9i zctb!$Xo}fl2WW|KZy-wo?{yFn(_L$4v1%n#Z-n#DF|%&DvFSDG%iTlVE4zHdGB zU2PWFC#6SP?SSKF!%sh1UhkXMLpwp}M!<@g!UIbfH#ZBKsEnF;V9WRz;y$m2b^E$p z_vg>5Qj;!^?K8)8LjQ}0qjm0l-0XI-WO|2o15y?{nW?jd1@<2vTzaclcboXpW#*T= zm%5o{9#9nXS)APAWKDkw>2A{LvqEL2K1s7L8gw~Y+3=ZjS=`e3UDJw}x_QX+$M}!o zn<`JL-aS~l<;m#}Yi=Is(?u{xhtI2YX1M=}1v|oz6$?iX+u|fP{#sJUfdy6<;*Aow zM82t7>{jb&FJi+wJ}7ZOo_b>VuHg3{vog1s6uQWIPBaV3pEl09#-!(&SH!6>aFfFO z&B1c18W+N@yi>WIc4|lBs0^FlvUa9(JFLCpJfh2`HH)1_bu!p`;iCSg&H)dfC9O{A zFl|}njOdeYY2#je-u+xh^@!J4Lw?Gil<(E@p0Nry(+i%t+Td zbbMUi>$t7!muXF_t}>OBJaN>*b6|9@_0iL|u9FB1IkA?{X8Z)b+1f}ph!cmr8PcPH|;2EqlYlBp|RR2Q<)@Pq~Hul6WjsAyG??Pa8?y#1xDvQ?OM^D!Z4aEn+e9_BsJp&&HZIibt?PTVOWwdN zvkTFZy(EQt_vzZSZzAAC_1?eIT+ZiE`1j{`4r4h7v-H?A**dP;ZPg*_@@T|0*l-Od z9LIt~0}kvr&`~SCE>!-Tim7ddPC~Gm{9{?8S#pi4YA9<6tx|b;dWkon(R^59l!~q% z%lQ!fB&clX?JVc5ZB*I|==WH>uDUd#+Mu-Cf|ebt6QV^~FuX9g%QYSYa+FfNeO6@) z944Mt?o)1SNMG1!-EOh8)v}CUGX^er5<4g~wP?de?K9J}HTPSn^y2B^J-XoG1TX&4 z8vbeB^RqtnH}$Z6srW##B?n~&~TdY5@<1jE|QQX^Kmn}dDvLE#3dCk+)yB=~o#2@ST zexgJ)mqVma}t`;Ab!y}N7%y|{QavqEd;+~?(YW6r&P+P7wru#eo^ z(@~b1Gc!EjT!=XTI?=9Fu|t&VV8e`?R=3ZP((~QhguGc%s=(1+8HJqlxl)63TC}GD?WH4c3+zh6K&0td47XBMC9AHA9 z+u6XVahjh6)6*=NT9=0ZGF~{o%GL5NjSh_c4ZTqLoKMQp>R++=U&U|Aoqfdd- zr4+CCTQwx!kl4H}G54ut19e1D%acJrk_m5;v}FQ++n+mZ~I6;BuuWQ(PH za%8uK5A@t!yvFP7p0K9fnWqn5Xzq7=_58Xc#G0y{dZhL4`5miQilZh(EUFhYe7ZZ9 zAt*V7M`OQ^Y(V9JV`mmzem2Zt&{@L)oe!tPI?nUhsk6KXk6RVfRV~tzIo$KoB|){# znv&OygwJUw8-@?gN(>$U`oo^AbxZ(97m&NSlJDixV+V{nkhvP3OeNLV+r^BuZsfYXJPma8#nP@o3w(Fp$ zV{;0ovmU=W>9Ez&Xu6@}?JI+wvkk`IHrqYf|KS0?lrE8~S3T0NYI)Z#I2XUtNu$Kq zW71>wljA!T@0c~*j42~lQxP$JM7s^6`o39H@1bk@YSD)=pCgjTc01z{()s#{Gv2md zE`Lp8x%{Qr$as8wJTfkS4TF1(Z_(4mhgyQxh1tG=F2qgr7~ZaScy($C%?NQ+7biy%*ss9Zn2d$ zj`^r4*HXwiAp&7F9$OLGPJoqwJ8wYr~z28#0BZPWCpPYaH%rbV*)y~JBdKLEAW+x{m8Jf~(PQQL*tS%pz(`j+VNuf0p zdma%SWohbL-+oK4jQs}``pEDesk4bXsyHV??%m>@Z~Ud6x|=MB+o6}OICA&)DdCd7 zxv`Fp7uWTAHTPD+B~!iFVZ91;HDcYCb#l8^dL?_*#_>^i^!6xi=~r;qMpLSvmQ%lN za|^b;FI#*$SW#eORmjRb^XEAWDo4HF^GN&fR*Tn8cbBhym@2$~>d>M|l4WOi-`Bcm zo){aftI#=b_o0Qo4dW}uBfeHAcm0^+_Nuva$e6cz8)OQ~ug$uV-)p_$CcnY9PZO4VN{lqgGpN)a z<=%hDh18H9c?#y+X9`-@>8XrMF|D2ZAiJVNeh({C$5@a0L)UF3%12$3W@b2trj-gj zdKH(oaQaG_Lqm7vEBl%qO3=^ClOF2se_^p{Z5DHd=uG2F{?1*62J4l^kAD*)&}F&6 zxFx-l(zk6E$=b8_!IGQrUah})Z>fmDv8#^nZ@DE#u9RJSyQ9IDJ-0qoj@Yn%?_JX! zrBOFx3iK8k%}L$Zd#C~5jM{~7)olB0-IS4>^GI|FV?~iu8KXKTh9xrE|U>V~3{rUA+Gg&%pZT56mP1^qa-e8Zq{1@vS+97u{ zR8gjkbZ2+ku0?xz3(MK5jrOoU{T|rES~NUuvWLG7l3g=_L|MOcf`^HfPc3(xHRC7Dn&!+Aj&3G%gr<&Td|LMC&hepjK9duY z7{^8WMtbac=2&)r!?yc+CnJ{REH?M)F-&dyG><96bw7!pJ0-{xs?8mlA+%%dN1=ST zD9mB@PCKhRsqK~ycPJtr(UON1db_Fi$m?L>ZCNm2lBz`N~nLlFyk^@>jAoXSn>@5*C(f^qGQp`Cr_iLchW#rNSkn$&xsQ^puW zqa(edOgi0~sx&@%;pj&OlT?n0hF?#*d%fLgg{5*M!x3 z7ap~nA^oIZ-`$3Z{f<4pR&r}Vpw98Q%s$6v-WYYP$|zvS(Mw09x6Mm1E^wT?d3o`* z!^fs~<4trqktEh`!==^dW~}pEQBip6k)cQ8>G(adq6<_iQ-&4hEcxKb|1|Dl-TV%L z)|*O>ZgqSu$hS~^gh}9Fg`+&FlNF!jt+%aQ|i+o;0E!lf*& z!v2UJ{hofieY+4^nPx1c6LVziYTpz+RqvLY!8Nx7?BN7C9uax+^@ovHF##l&g7{mLPO-8~c)x}B-2I+Z_u{yXOv z1A1h+47+_Gp8ewWByZc=g8qgp4wcP1+-bhKzY~1Kc8bO6 z^Ha~9RP=ewmmOc?J>kxjXQCRLd&oaX)e0#-KJjyx7}H_3_ELkC-%p8ecfwqAmTtw& z@-dp)yO^mX=8axEJg{8N`0~I#`qj@Czc~1LT*Aj_mY-I}8;@!FlhXLyKA+i_wrILT z_n8Nic6S;-f9HU}tCE5rdkA;u+h{Z}*7UVWw*RY1RoCpIC-)O_co(v8s(p^JkG>F( z6r;=5*&mYMEKFR)>K-@s)VVZ=i>X~KJv*h!oxLAL>$ioV-weJpP12%f;#OTP&TO|~ znndt_e46C@3ZARiNRQWSG}xPP@cb%|Ak_gpo6OTQC8oTJ6qOd%7J0zZOO6+z$p0hc5fH?COKQ0&gZ6CuVlcTQEPvYJ2Tg=3sf5rT5QwKeFnEuGW&iodjJ1 zd&LFKebCc(56^(X$%mIsR$o(RW7i{tgy`%Xu`XoWeHEkk#^-}|_M4v?aVmRfXkPv9 z%chdsR~(2txgo{6!-nF(%l0~#46B2LqZhs*QnPoIJl?t5!R*Ps5*wd^wFyI9RKq=o zh=&a7CwcY4i(y4or=r$+I{VCZ4e6kk^zz0Yc7aD-o&QOjh#R-BUR}OA>z0U<(2et_ z?{D8eC|1Wu^Wqxw{iQZ1-c0A66)<+wLA}uNeFZhU9=bnn-E%V7#B<1qiIXh@O3kk; z-`6>>bX-D0DdmF5p!?7IIx6O#3d@sUeIP^9GFQPi#GgkeZt{4?Fs-n<8;94w?_eZ1 zZo9JMI)yhUUOgXJ-&ZQ__Q%3f52L!_nVZU{p0pp)Z^#JAr>lCd@7H_Rn3#i)^2`46 zG`M43GB)0W@sim~XGop#(q3NXm!8i_P#o$S>A@cU{;=`|#m7B8WX>)T>e?aBIZNbo zyzj&Ed0ChFYh7N*zU1Fg!nnWi+Q*cEn;9K*9e5?PO3WOl_z&GKq&_@p=uRX86On$VekGK_!1a~5uyKRet#9{~3Y!_QLd%$~Nw>4IAVm&HErmY(_@`6Hf1#!Aq<5gW| z^J38DS$v5zQii>#9Xa>G$BODu*1BW6q8B}S@K8ndxQay45gRX!vz~WP>IAgs6InAv z^-;m*LW97P_)#7G-;~KucvsDLdwXE2mQwH0Q_ckesj#}f;qp4IostXN~X zVD|1+#a_A-KRF0lY(2f|&Qs=|(Z+pv`R+0!L-pBRluT}V47eI?P&vo)!H|#@4#}$q zj;z?Udig-h0i=7(?2$t@Wh!QsoZo(=e*VJ+p`zEdPupJL2|7J5c#%}j#X~YKd=B6P zYl8#mYyQY67q4(jV{J`_&L6VG8pjY}=)7~2aY7h7r`>Xe+R1c+(A+VvXf4eUyw8OOgdDhM#%6;?)&dsU6Ia97)A1VmChNC(AlZXL8X<@&@IT+AM1q9>-V z9VA&A#v?4d(oD5yi2bBfljjRq?sd6hz4}aqV6T|FnJMB7`#4{IgKh&)*X+raxO-Z8 zx1gdz-}eKw)FlQy-*I|&mv_?_x;mH07RGCf<>ZO6PI=u6S+MQm-gN$R@lTcwUcCQC zv9+2>v07c8C2v0jzO+f72dAG zdv1h<+~!@gd!FL0jiZHM3FwXUU+On_h)Cwkyvx0%CA4;0Y*O>-a8k_tmcHaH-F=5W zZoCuA3QH=H{yZ|XUD)h6f8B>8Ri0>f^ZYpR(e!z<-44fGzi2yn(fPZ=Rz05YT6eTt z^`Q5~>(Vzro8$cPxS2Nc4>N-K{c@J-1bEBc-O*(HKeC7V0EQxa$o+=&vFmVL` z`1WsyT~aHIxnkNg*NY9nO1FC@os{I=>41Z2kPSk z!Yr5Y)@xYRB;WHh%~O0dxr1Tm36mvi5eNEY)vh`I(&5dPGi z9Z&TxR~evEYrS3|fTvuE-!Ly!->u!q$vcPLIcKUEd7#H4DMhvY)e5N(XB#J9D7x3@ xcKe77gRN@)PTgCcHp1#`o~`qx87t!B1V*mscYYnJm^k5zqp3%{CFOem{{S^X0m1+P From 695b44a3a522df0858822adcd74c3c297ada28a9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 29 Oct 2024 20:05:25 -0400 Subject: [PATCH 168/199] Update sd-scripts release --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 731664b8c..1e2f7b0e4 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 731664b8c34e55a50494f4863d58764dfd42beb6 +Subproject commit 1e2f7b0e44ee656cd8d0ca8268aa1371618031ac From 021c6f5ae3055320a56967284e759620c349aa56 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 29 Oct 2024 20:14:02 -0400 Subject: [PATCH 169/199] Add support for save_last_n_epochs --- kohya_gui/class_advanced_training.py | 6 ++++++ kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 5 +++++ kohya_gui/lora_gui.py | 5 +++++ kohya_gui/textual_inversion_gui.py | 5 +++++ 5 files changed, 26 insertions(+) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 658589501..0aa9e0429 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -188,6 +188,12 @@ def list_vae_files(path): precision=0, info="(Optional) Save only the specified number of states (old models will be deleted)", ) + self.save_last_n_epochs = gr.Number( + label="Save last N epochs", + value=self.config.get("advanced.save_last_n_epochs", 0), + precision=0, + info="(Optional) Save only the specified number of epochs (old epochs will be deleted)", + ) self.save_last_n_epochs_state = gr.Number( label="Save last N epochs state", value=self.config.get("advanced.save_last_n_epochs_state", 0), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index db6f7311a..55454b526 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -165,6 +165,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -372,6 +373,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -574,6 +576,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -990,6 +993,7 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs": save_last_n_epochs if save_last_n_epochs != 0 else None, "save_last_n_epochs_state": ( save_last_n_epochs_state if save_last_n_epochs_state != 0 else None ), @@ -1343,6 +1347,7 @@ def dreambooth_tab( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs, advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 95ce1b6c3..77351bbf9 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -173,6 +173,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -386,6 +387,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -605,6 +607,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -1051,6 +1054,7 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs": save_last_n_epochs if save_last_n_epochs != 0 else None, "save_last_n_epochs_state": ( save_last_n_epochs_state if save_last_n_epochs_state != 0 else None ), @@ -1475,6 +1479,7 @@ def list_presets(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs, advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 87fcabbf2..68eb67808 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -185,6 +185,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -450,6 +451,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -749,6 +751,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -1505,6 +1508,7 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs": save_last_n_epochs if save_last_n_epochs != 0 else None, "save_last_n_epochs_state": ( save_last_n_epochs_state if save_last_n_epochs_state != 0 else None ), @@ -2648,6 +2652,7 @@ def update_LoRA_settings( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs, advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 4e1ec4293..42249aee4 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -158,6 +158,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -323,6 +324,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -481,6 +483,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, + save_last_n_epochs, save_last_n_epochs_state, skip_cache_check, log_with, @@ -845,6 +848,7 @@ def train_model( "save_last_n_steps_state": ( save_last_n_steps_state if save_last_n_steps_state != 0 else None ), + "save_last_n_epochs": save_last_n_epochs if save_last_n_epochs != 0 else None, "save_last_n_epochs_state": ( save_last_n_epochs_state if save_last_n_epochs_state != 0 else None ), @@ -1226,6 +1230,7 @@ def list_embedding_files(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, + advanced_training.save_last_n_epochs, advanced_training.save_last_n_epochs_state, advanced_training.skip_cache_check, advanced_training.log_with, From 2e6a69fc918d050d9c68f222c9314ec462be4a37 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 1 Nov 2024 10:18:49 -0400 Subject: [PATCH 170/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 1e2f7b0e4..264328d11 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 1e2f7b0e44ee656cd8d0ca8268aa1371618031ac +Subproject commit 264328d117dc5d17772ec0bdbac2b9f0cf4695f5 From fe9066dd3a007660adfe94c55375022e88a50e78 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Nov 2024 07:41:40 -0400 Subject: [PATCH 171/199] Bump crate-ci/typos from 1.23.6 to 1.26.8 (#2940) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.23.6 to 1.26.8. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.23.6...v1.26.8) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: bmaltais --- .github/workflows/typos.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/typos.yaml b/.github/workflows/typos.yaml index 1613da32e..9b7ccbfe0 100644 --- a/.github/workflows/typos.yaml +++ b/.github/workflows/typos.yaml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v4 - name: typos-action - uses: crate-ci/typos@v1.25.0 \ No newline at end of file + uses: crate-ci/typos@v1.26.8 From 7266e97cd0c3f4639c93ef068a5e9f301c20fa6a Mon Sep 17 00:00:00 2001 From: Nicolas Pereira <41456803+hqnicolas@users.noreply.github.com> Date: Sun, 3 Nov 2024 11:52:38 -0300 Subject: [PATCH 172/199] fix 'cached_download' from 'huggingface_hub' (#2947) Describe the bug: cannot import name 'cached_download' from 'huggingface_hub' It's applyed for all platforms Co-authored-by: bmaltais From a35d0cdccb07d5d9b44ee6cf95ee1ccc652a7607 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 7 Nov 2024 10:33:29 -0500 Subject: [PATCH 173/199] Add support for quiet output for linux setup --- kohya_gui/class_source_model.py | 2 +- sd-scripts | 2 +- setup.sh | 15 +++++++++------ setup/setup_linux.py | 2 +- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/kohya_gui/class_source_model.py b/kohya_gui/class_source_model.py index f9ece6577..d2d87dda6 100644 --- a/kohya_gui/class_source_model.py +++ b/kohya_gui/class_source_model.py @@ -248,7 +248,7 @@ def list_dataset_config_dirs(path: str) -> list: interactive=True, ) self.v_parameterization = gr.Checkbox( - label="v_parameterization", + label="v_param", value=False, visible=False, min_width=130, diff --git a/sd-scripts b/sd-scripts index 264328d11..123474d78 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 264328d117dc5d17772ec0bdbac2b9f0cf4695f5 +Subproject commit 123474d784096a2a3063d1ff91ef1957b41ccc72 diff --git a/setup.sh b/setup.sh index f3a1eee79..21574f008 100755 --- a/setup.sh +++ b/setup.sh @@ -23,6 +23,7 @@ Options: -i, --interactive Interactively configure accelerate instead of using default config file. -n, --no-git-update Do not update kohya_ss repo. No git pull or clone operations. -p, --public Expose public URL in runpod mode. Won't have an effect in other modes. + -q, --quiet Suppress all output except errors. -r, --runpod Forces a runpod installation. Useful if detection fails for any reason. -s, --skip-space-check Skip the 10Gb minimum storage space check. -u, --no-gui Skips launching the GUI. @@ -91,6 +92,7 @@ PARENT_DIR="" VENV_DIR="" USE_IPEX=false USE_ROCM=false +QUIET="--show_stdout" # Function to get the distro name get_distro_name() { @@ -206,20 +208,20 @@ install_python_dependencies() { case "$OSTYPE" in "lin"*) if [ "$RUNPOD" = true ]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_runpod.txt + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_runpod.txt "$QUIET" elif [ "$USE_IPEX" = true ]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_ipex.txt + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_ipex.txt "$QUIET" elif [ "$USE_ROCM" = true ] || [ -x "$(command -v rocminfo)" ] || [ -f "/opt/rocm/bin/rocminfo" ]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_rocm.txt + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_rocm.txt "$QUIET" else - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux.txt + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux.txt "$QUIET" fi ;; "darwin"*) if [[ "$(uname -m)" == "arm64" ]]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_arm64.txt + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_arm64.txt "$QUIET" else - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_amd64.txt + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_amd64.txt "$QUIET" fi ;; esac @@ -322,6 +324,7 @@ while getopts ":vb:d:g:inprus-:" opt; do i | interactive) INTERACTIVE=true ;; n | no-git-update) SKIP_GIT_UPDATE=true ;; p | public) PUBLIC=true ;; + q | quiet) QUIET="" ;; r | runpod) RUNPOD=true ;; s | skip-space-check) SKIP_SPACE_CHECK=true ;; u | no-gui) SKIP_GUI=true ;; diff --git a/setup/setup_linux.py b/setup/setup_linux.py index ba34dcf1c..22c6bf256 100644 --- a/setup/setup_linux.py +++ b/setup/setup_linux.py @@ -20,7 +20,7 @@ def main_menu(platform_requirements_file, show_stdout: bool = False, no_run_acce # Upgrade pip if needed setup_common.install('pip') setup_common.install_requirements_inbulk( - platform_requirements_file, show_stdout=True, + platform_requirements_file, show_stdout=show_stdout, ) # setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False, show_stdout=show_stdout) if not no_run_accelerate: From d634eddaf08258a7eb09759be624df2724635758 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 7 Nov 2024 10:38:20 -0500 Subject: [PATCH 174/199] Fix quiet issue --- setup.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/setup.sh b/setup.sh index 21574f008..b2dfb1b4c 100755 --- a/setup.sh +++ b/setup.sh @@ -208,20 +208,20 @@ install_python_dependencies() { case "$OSTYPE" in "lin"*) if [ "$RUNPOD" = true ]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_runpod.txt "$QUIET" + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_runpod.txt $QUIET elif [ "$USE_IPEX" = true ]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_ipex.txt "$QUIET" + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_ipex.txt $QUIET elif [ "$USE_ROCM" = true ] || [ -x "$(command -v rocminfo)" ] || [ -f "/opt/rocm/bin/rocminfo" ]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_rocm.txt "$QUIET" + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux_rocm.txt $QUIET else - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux.txt "$QUIET" + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux.txt $QUIET fi ;; "darwin"*) if [[ "$(uname -m)" == "arm64" ]]; then - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_arm64.txt "$QUIET" + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_arm64.txt $QUIET else - python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_amd64.txt "$QUIET" + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_amd64.txt $QUIET fi ;; esac @@ -309,7 +309,7 @@ update_kohya_ss() { # Section: Command-line options parsing -while getopts ":vb:d:g:inprus-:" opt; do +while getopts ":vb:d:g:inpqrus-:" opt; do # support long options: https://stackoverflow.com/a/28466267/519360 if [ "$opt" = "-" ]; then # long option: reformulate OPT and OPTARG opt="${OPTARG%%=*}" # extract long option name From 7edcbb0d1befdc3ecf4e4a79d75ea162b2cdb0c1 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 11 Nov 2024 09:16:25 -0500 Subject: [PATCH 175/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 123474d78..8fac3c3b0 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 123474d784096a2a3063d1ff91ef1957b41ccc72 +Subproject commit 8fac3c3b088699f607392694beee76bc0036c8d9 From e198f71f6a29d13b102bf5f81012a5fbfccf9c25 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Fri, 15 Nov 2024 08:45:30 -0500 Subject: [PATCH 176/199] Update sd-scripts with blocks_to_swap support --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 8fac3c3b0..0047bb1fc 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 8fac3c3b088699f607392694beee76bc0036c8d9 +Subproject commit 0047bb1fc30a9987138a20f52f774ca536ff7b6a From 0d8fe4db4c23c0ceef7a31f2b67a09e230fd528b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 16 Nov 2024 08:24:16 -0500 Subject: [PATCH 177/199] Make blocks_to_swap visible in LoRA tab --- kohya_gui/class_flux1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index 547e51934..d165705fc 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -201,7 +201,7 @@ def noise_offset_type_change( interactive=True, ) - with gr.Row(visible=True if finetuning else False): + with gr.Row(): self.blocks_to_swap = gr.Slider( label="Blocks to swap", value=self.config.get("flux1.blocks_to_swap", 0), From b332b1df9dc9b48b3d8253a98a9ca2fd4cc7e03d Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sat, 16 Nov 2024 20:33:48 -0500 Subject: [PATCH 178/199] Fix blocks_to_swap not properly working --- kohya_gui/lora_gui.py | 15 +++++++++++++++ venv3/Scripts/python.exe | Bin 0 -> 268568 bytes 2 files changed, 15 insertions(+) create mode 100644 venv3/Scripts/python.exe diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 68eb67808..f1713edf5 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -287,6 +287,9 @@ def save_configuration( split_qkv, train_t5xxl, cpu_offload_checkpointing, + blocks_to_swap, + single_blocks_to_swap, + double_blocks_to_swap, img_attn_dim, img_mlp_dim, img_mod_dim, @@ -553,6 +556,9 @@ def open_configuration( split_qkv, train_t5xxl, cpu_offload_checkpointing, + blocks_to_swap, + single_blocks_to_swap, + double_blocks_to_swap, img_attn_dim, img_mlp_dim, img_mod_dim, @@ -853,6 +859,9 @@ def train_model( split_qkv, train_t5xxl, cpu_offload_checkpointing, + blocks_to_swap, + single_blocks_to_swap, + double_blocks_to_swap, img_attn_dim, img_mlp_dim, img_mod_dim, @@ -1558,6 +1567,9 @@ def train_model( "mem_eff_save": mem_eff_save if flux1_checkbox else None, "apply_t5_attn_mask": apply_t5_attn_mask if flux1_checkbox else None, "cpu_offload_checkpointing": cpu_offload_checkpointing if flux1_checkbox else None, + "blocks_to_swap": blocks_to_swap if flux1_checkbox else None, + "single_blocks_to_swap": single_blocks_to_swap if flux1_checkbox else None, + "double_blocks_to_swap": double_blocks_to_swap if flux1_checkbox else None, } # Given dictionary `config_toml_data` @@ -2745,6 +2757,9 @@ def update_LoRA_settings( flux1_training.split_qkv, flux1_training.train_t5xxl, flux1_training.cpu_offload_checkpointing, + flux1_training.blocks_to_swap, + flux1_training.single_blocks_to_swap, + flux1_training.double_blocks_to_swap, flux1_training.img_attn_dim, flux1_training.img_mlp_dim, flux1_training.img_mod_dim, diff --git a/venv3/Scripts/python.exe b/venv3/Scripts/python.exe new file mode 100644 index 0000000000000000000000000000000000000000..8655d9d5a5ce275d03737ec3dc733c7591ca4b8d GIT binary patch literal 268568 zcmeF43w%_?_4s#_4Ot-JE=n{g>LRPg#z!=?O<2&qunTu(qd`FMfnWm>3zZ1F1Pel7 z6J>K6Bl%-uXtw6^uve?PyE zX!g#X$C)!{&YU@O=FHrR>sC3k9S()z4PG_T`?+~DXI+$^-_Mr; zJ?&ZlErZui9?|u8^{#9DLwLV>SEWCxo)i2h^Blji(tnwHPVo0r&ujc1l|IRTCeOiZ zCyl7(`TLF2{DtcI;}Q0Aih95Lma5rO$Gclp8FDyg-kIyz+j7CouCz|a*^d5M1I}X@N3z1?$2?2nMzmQl;xNt`QGQ>e$vYnqL~csPDdyB zzTdx#qO%r7Ni6v#g#;mKyD&wse_n@U#zpgI-W0vb;W)RI1YkPac&_8w>t6u&xya7u zDBD5WYAVhnL*4NRkgw^Y8atzEBW-mQlJD;K{vxU$@0DT=DTSA zg85Y>DcV6p$4Kh`!HM$Cnmdn-LQA2k!_D(0yRQkUe*uSMbSC(J_F*J*&cDIsXxWkZ zXCx;1Jds4uR~Sh|eBO!IOfllGl^EtRBN_G0FwA|{wsbljD&1-%qCRiLY&FcM4AX6W z`6mvC5#LlY{l@FNVF^ehaf8n-Falw7pJ5)gOo|)kBwvlbPWVge1N?JFYtUCDR0f!o z@fxiy)=12yD)1ko>R2yTYDTlvq2D7I1K-rqQd=0-O$6n@8}Hf5c`XtuupL$Vg{H_V)q@5yo)rJIaIy|086 z^Qe)m_tg+Gk`a1>o~Kv0SwTU&YHve27D*S>QjytaB){w{VVGD`AyUKhi%M;jC@!(Z z+f`YkYIh`=Umc0FmrVCb*-w=+KY_O6L-g{$9!}GEH(xzS!b}*UKegze&EY$kTV3uS3Y{C9( zw4)2XUi@vCzlFck1;-wDI!d=n7fM@=M1QzWdJ!xg5wl4~mGHaSmM+Lu4QObJeKK8e zGU51@EfLz~hh3{+Fvqzw%EYK%mvT^ksx<8KvztND9`zovA-^#E9ZNSLf(B|+$F zBqujnGwCDgc zgufzq$1C8|1s*T~?#Uv$k_DG$K=)Iik*HJYlXQVe<%SvY&9L#1;8fsmkhY`?ew9g` zY9uH5D$@mz*$48EL#13g^1sx7bEeVM52smSJ8p)ch z4Qdo+A7BJq6MrO}YcZ~c2wOlUKSP8ZK_rqKn-*3tLIO=9!JbFVXAN_w_4H8@!#l0l zgj&7+1tv~e_f>QbEB-*2ggme^Gcb^gq}U?M@neU`Z?G=i6H4kX>!}T@$L9)#rck>G zn^UW4a&(|w;z>a^k+a}IX-)o2$@hdHk<4Ey;a?Jd%tgmCkPul9J}ylYk)_q&k2+$D zEg6Ix8PXCvk63?8?9_CDMNIt2=VN&W0yF-Y!;$@bYQ2$o$oB~7O2-BwiAQ}a<;C0; zNj&bWBbY8YKwpPS)52&FFq|$}4IDMdA|g27J<}kkf@V|qWHU!)X5OX&N(Y2W}g;Hj*lsQ##xT(7nIi=%V4pYxdY{Ps8LO)A3D{(lT1~7U3lJ|=OTxK@Fe2a7&hGO!? zU0{BY0W;JFbFY9I4H&39hVc5&f~&Qgh#h2zz&Wd=70G;oXALGkT~G`NxLsT8sg55z zJLWRt%~|@ovnjaF%TKlLZ?D$1V3W4C-OAMzO0m9?gC*Nr}B2R;^qlwSE^FPKzEQtdNfQ466kVi9!Vtb2FHKO)#G$ zn4fB6js!K5cl&D4J~qgjMLHFp-KN82+nO;x`Iid)>XjTH9%5@Wur6e z{QY9taLPIqEl04-AN^#yjn{^zWe2r2+4W7)bE>to>Pe#_DsyPRX|w!FVT|_I)-Xq> zTk4x)f2#A3ie79a$2+O36Cq~J>8>MozuktnMRck*Or*U#s#j^h$rO(cv_V6w{h$kV z{ch?Hbj;aeG;j5E{FsSu$Bw{6(RJ4=2KKF|C9kkRMZ&f>6Mjy@d###H%u13YQD_BW zSr~MIbrq@^ZL8;Dy}(pqJzy8CMcUEKtF52$Zgubj$LEoAsk~n(RXix6AIi_2{7~7v z7Q*)s26B$|g(lvL;Ct(Iet^|Sl;Z1K*5?YPA0r!0RqdWf7*!^|si;N0kC#FhDVP%3 zVFFk3%9>Cne~(fW?8aWq+Pj`8W6W+dZs^$!D*G9kD)+Z#d9a6R;hk@4$!JY z=A*B6@f>=Zs2hKLMC3k-dLw;Ko5Zn>aRaPuy%?v`1&oxCc{cGynW$0yyIGc15z5%FpYHq~IgJ zpbyiKfQFT^W(wHWjsTQM7d-Z`(BYRy09uN@qe=9t8EP5%BT1pHCd8tX<@#NYr`E8R>%0Q!L#YPiHOpIrRlHh7#2uzhooOFc9b(B_X^Kx$aIrNFQrQg(+C)(<-O;hCk#Uf5#OyK$eDEpl!TJ=VYe`B<)T zSHYPy#cqe4r*72A#5fxFMP2oKVtLlNNNfbmWBnbDt_FA09K+FH7y_(L_Qi6oKOrDe zni$oR`Fn}BuG(nVWoSVRZ5z8fO!3WQC~C&xy{9)F-iyK_Vpqvl?_|S#jSOB!mv0G} zJ$C5>FI@PwV)%*$MAV4SDuin*3eyGW{#2MeRLJt$!}^jpv&pzb!he5Ag_9Lt`~6Sy z9`aI*nQOn-C?#AP@))Jf#(J>eAJB*KQrc4$C^ysF-&qU_$ST7)+|FyGyuxq< z`_A*%BQzb>CTf6Jk`&?bpUmP#YhAk_#WDpFtfRjF0U)rOUV=Ws#G zv`e6|&0uv`S-{N7bCSr3bioK<#*d(@ipoZofRPM!YF~yT=+#o`z8`}%wFS}zTL?1o z=fUpoq>*j?Muet$zpou)8Ot{b&8D^N=$Hdw_*(THdXND5odBWfBi7ra*&Qn}s*I6F zc3Wor4(%Dgr^MIS17X0z{P=p2yN;-f5onJ-SX=P}vGBuXK+V)6{U>UK*DwcT$6D^M zTJX93u_5urg|65s=0tCD#r|V+UydKiT3E3Bm?Q8tXEa}!u|(-9-svsY*F|wk6dLAq zxAlM>wtm*6)CY`l_Z ze8n!i>Rn~)-lweaxbUS%bR=a=@rYSSdjNZIsPy@uxm{o0d24jmTz&b0akFlX1%17|^ZfG`A#Tp;ygaRqYU<;3 zM2kkv=#%A$4jok^kJEA{D`;KXP}zNe-KFfZu5dcK{Mxj7)SJYn=_cm`5M7g zE^(^}tP^E0m^9w@R5=aU3v%wi)Tq#tVWM$6)D-qxHUyuz_0oJNQRP0b-P&|e zt7&soYc;!BgEXC!mR0hoPJU~Ly{T*S9NH9e#oGAWSgoaaKG)13J7XQIJx6L_wO;OF z;TbA-M~AFKR5>EqO{ulo<86`bW=X5if^A{5S)?e?1%V#DS{Zz`H6yS|gH5159YZB; z?$}IM#z%Q+PnNzx&1h~0No3f|*MR)&QAl}@C_&z~AJZxjw-%@+N!kGyk zM58VIZR>?}pK!d%liH&_-UOCsQM`ffPg)NwVE8EiZD z(;CBU$<)vX;8`iZn3K)zfyU?b^+H|kHyZ)ZpUa)mjpsG1=5Q zM))phwuj^UJN4$5-0>H+VE=X)5l!r={g`TQ-KcsMO7If3*^=JeCT}!K@!#%Y?XjWK zm6DLj>CR-gGc@dpzMwt6C(Mv3J1L?Cw+Rcvd+||Q7r<76gSK|t9T%~mfT_m;EM@6= zGaR?FBL0ok1VhITr243*OE?z>F5PPGPMsuCVJ(PV37(%KMRm-;4-SaEjJ0WN%m)$D zY1p$Z(^HMEA`3AbU-E|DQu z<}cOajqttsj**=*rHFWSL8XY^E(S(D3#S@tH3 zVri@?i|$w&U4Dl{O=m?Yi9>$?V3wrG{6a$3$<)IXGypTH@cYhmfs<%6@|MRoe}%I# zkvwDVKxGJ<<1of5-f7ks7v^YjDN62h8pVe9GWbZM9PuWcror4!?;(}#CbDB$!|Ir%_hA@_c znWUqf)bTGoIFY=?WQQY3>9+5}0jC2zv4#MK6H!*8cSHsrm%`4fXrBY*g zx$51C@!L^WDoovbp*QDCgV9xL;X%DbRE$p>Rh2FV-9RII0;b|{k@X20l=7y|Jmm0D zqp26kjyX_KYgph0(A|sZN+v0_FKnb=%B&MvUszW#9~x%m)Kp)i{In=)%+(a#6FZ0r zPSlp`8H=eyJB+FjId0lob4 ztH+GlA3bUL@KX0a`*E=)+BHF{-!to;>?aqVHy*kX~Sgp-y3NJrW$9t_u>AclFchGvvgKDG>3dH_Ox)$v-nhjHDCyi^-&I&lR zVy9Nz1HTyf#Wwt6;1>hGSl`cIXY3j6XGf%l)crK4+g=$qWk3Jfo$2%#vwpdF-Y^A) z`QL2O(=S%9G7=LGBqkiKPE2@v<%D#Ly#F;Z;qU0b6AnuR&o>hj-jbl&-{bvYV!|=< zzeAcw{;Cs|?v<7Oy3*CV#2dtmsODb}KC&djcr?jZ7({M)!mZ6RWK!--zExcXy6p8z z)|)vws~Hl>f(J0x8q7k&dRbZ$YU$)RJoEb)e#5;mcRjWd$IkO@xgaBJAdALLcc27f zkIAu4S}lyH*4n~{CzDwQYL1Gt9BeuWEG>C!`j-OV;Uvokf#%s0uQY>?D(<%T-SCa+j-h3`h*=FcN7vxnis$-x>2 z7_u?0TUy5~VvWWk?GIS@Q~75881k0xv3yDbY_}rc1^^cH2xu7Ct7J*k<&47eMae{_ znxg=*3K?pNoGbufYmc4-9oJx0z})#93Dh^BHD@s7k@{G#ja_A7%l`v#H>^f6wc2ga{n9xHiBn)%n{|WD$Taher%6qnK?i-8 zKzcj&o^SAnZ-se3+Cu)Xh-J!nq1)<`>6a7D`(E>CBsnUDe^WUAWV*t9F>G2)|6#K} z?fpfTV^!F{MXOJN1l^-G{E0w?zger_O~8yXQQm*pE!psTi1rWr|DZK|jg-`!3iI*9 zUwsjukUeZ!;KTxT(go*$U1o>ynsmXb-;kyFl-PyA$`-A7fH6(dXxg}jNJ_G)j(Cs~=7P(_?iX;qgBoV-?h5# zl(h?!_EfXKtu(v|x)_H7RO?8g%V7_SZ>mwY;dkL}UYn=<>frNp)7z~>$7sI{J2NoD zNDf+QRE_l**&F?fTuXoNU*ul4HX*eY+T?T9%myBkQ@1`_L|$G4_auAphS*t%3XpS1R#A%%_?cv1({ zn(HgXm2gsKCE(S~MR^es1k85_D(ElsnDs3zYZc}oAHpGVN<%st4b$K>V|LIJMsmoi zaQ1Y!e~xSEpZ#;(%l3y&Hed0jQD(rJuQ(l%sxcnMNH#{dpgBG-XnI1-FsFu-m3gRL zVWht&jn=Sn5G$Vfuk*9u{;=u7_MVprS4CEO3^NZuw*i7a0}{DCx7V1wVfu|~z7yR{ z+CqcGfOP!Y=6>ZJ88KgCnZxW3$;0N+ka;9%F37W**>Xl{H#Fvc*GRhR!pT8#R*R|Z zu(^jF=faS=QIBt@hxbG^bF{16xpWw|_;TH~?DU|&e90+6|Lv}2o{)dLZZ?IeO$)KA zZPU|mL-U}Jf6M&Mq(u@leRR%7d6r0)UK;*_fUIovWXP#`g7^byA=qkj5S5`!^RU8V zA0i~15fv05NDrS zwP_y1gC?eV0$y!k*u!gyHc+j>tyH`}AwO3?s zLtk%M6*05SE8N&=oNbe)%y5RN#DaFV-8V?e{1hRVSU&B6}s+!VyUA1N^RIh8U zaPzv-aA!c@kcZVR(TGu!?Exb32-u@Hvdr0 z1Gllf(;CFTbn`p&vNnBZDO&h`$pGcGJ2x?WBIed`{Kc~{A9};ZTf@b>WzpP~;k#tW zfYa`}EYn*({Rzx|{9Wzk=ehttpP8}w?PY!Wo8aZ=+5kT{%^<$94nRD=xY~Yxy~%z) zuwR~UFH>@a=NDGn?_X=OpWoTfljWwMYb@U=kMMHKe%Nh@FJkhnNE$TY#r+lLzHoCY zXPkNe*HlLBbD<6hluVm=;N`_vq zWa#zEzfP}5Pe`vjUU@IQB+hG7q?=w6_ilPg+;Q}}y zZ^DYKNEX7`Z5Sos>_+xM#c0kTfwMcwPzT3ocizLV&Dk29?di?gl40R(YJIOpnl|rh z4}t5v{CuXQni0p}1dPhduJ!VB6LavqD+2snT`AdSNVXah!U4%wDfwndzM70#1U{82 z;8XjdFk0|scobA#?6i5*B|N&fo92Q(JS9UN8rg5F>^FG`huLrP;O!CnO&-E;_M1F- zYq#Iz!P|cOO+Ds1MW8s?4$1fjq7g*QJx1|vY$WI(13L|&RowC%YSyB-t)kn#*r|C) za}Cr6y0Mp66t{GlhURJKJ5z&>>mMvX9_{?T3KZ(g!2QPZ4j}^ZHQM=M^oQU;^ohv8 zfNe9XBoaL%I1r^HGSEx55@Ng(LCCNCR|zAf4cT{b(x9xDhSp!?<($J;&v@{ zCHS@Vd8IMz0ErF|aM*RrN&yx7>PXeKc$Hl39-nlbEK4;#XzGJdk;FhG!)bKg?W4`0Cxa~-aq4f@$p~}7+t#;bH-=4s!tRZP zk_z*HTBjV0m>s2Qy?l`?s#R1ia#pn`cDqNbm#zEq#qOB9+^;3a$%}tUKeD-vY#?xvLhF0C@)q7(vpSCZJ?bk@@~f z>n$Lp{8A=*3s^;GqDl_5UR0`C8%r}hfouv26O7Xv`}kak_MX1=@V<1j+7|bktz8S$ zO3}zrWS6KVRf%opXt6H*4lD5@`57iZXUflDepXp0%kO*pUmaIMfsB|3N}t!u3-6$D z2UEG>sv7#~QfnuR`nctcj>P*ScC(S3LbI+cvhK9xv|DHS^g_&JLk0h;=^(#XSGJG} zS?gg5c+%R4|97gSyVzB|i@hr2!!FiSpn(FbdaZY9UqHs3eq)O@VYNyV9T;^hP$&oK zdUnEQM9hbLP0*k1FcyrFq~TF}W1GX~b76Cb)!Hd2-|cHB3#c3BqrNuD4W$Q&6P(C~ zo!0Ng0z|I8B%xWG<<`Xt%`5XN%qJsez3&kc*l=jP+lQ6~$2_wV7Z>1*d$A!)*Z>2p zjF*xKi&}fb%9?Uepx*CWEk)v+W^@~+Ze;Z$YdDlT!*iwLgaH_WvRRcKN=Qe2drUOC~>XE8mirWHCm&8 zoJ&iF3E18zTEkW#i3f_-@B|@qO!vl)cGu5{Fyf0#I3p#hptV-GIljote&FhC-hZgL zd#cy^mI{n7lUkh-a}i6rCBE5K>6__v#>wSO<>XS>3w&*V8+85S0_p}EG#kq!txCcG zDYM2Ap!<|HZbq5IdSigv>MFqOXgx){>X8!HQ@Z0)ej@M+q!^1eA=G3f?pW8=z=zO| z?FK$Ts22v>U3k`FLU=t`J}*zZ^H)&ZY*;OAPEKAY3v>LM_D0MnMN)c0<`(O!H#l+W z3GJ0-IUWED_Ochfw8@VvQhX7GBWA_kpgF5u&Ur3k#oK0>4z|QRI_oxk`@)<7i6mW( zOi(VQM8x4qR6HU8#>RiB4e8_kwC$Oi1aBHtqB9S_Qhw`@d8^BZ1zdKHpa ztCu|(AVE$m7l(#-RK=gfw! zOGjEas(9Q$Bjy-qpGgtsXrG$mhIh1n`3;;cXrfJH)LMWiLJj1->W-#|SqA~HtTN4+1&t7Y^)L1^%6P>Ovzf5h|UPC~3aD(RO zYNHjf%rY6I=H#m&Ob{y@hBC%16beGE3&$&3*stCDC5gzs(ruX2b{WknuOT}~L)bwY^0oM4 zmm_*%q-u`Ks(T%gnY}rBdZ^)bM!;>n8dc*_&6-7XsDrw(3Tfz}>_9A-AzcLHoRAC( zBEY9Q_^EUX0Xec1f}-%Nyx|K!le6BUL1u_Lk3(VtOi#Dc+;Ktw9f!3Q8e_5JK8B{X z8V>H^3paKvl+GG#SBt#r1iZKOdF<4N`6_0k@@rjMeWx_B>Nc0PfkA|?X(anet^Ro; z(Uml#s@7$FhXm-?M|805=9^{LqG)a)!=c=|Xum@thsRdw$Uo4ENnYSrjidRUqM5## zi9=friMDF^4@Ae8Pj`{oI6-EKi;zV{_tFnZ%5np>z0D@0^#o>7>m(V^aCsdStUsTzw&7fU;)?=~SXsu? zp`=`kdE(0owb%e-`LY>mS30ZwR6NeJ&ry=}Y$WOYeuu{RoLa-ZV0vhrt6HlUeOOpc zt6xl9{Bhq(B9Ks@vvYW()5=eceL7-}^>SQLM9X>1|4Q+gLk|BK6pzmkz(kI!MHaMT z>j+az#|6ZK(wRZ?sxqCuSMBZ*lFvV$5+#HT^Qxj0^9f3{Iy?9kWL(E#g-xC#Ho6OI zk^l1t#TR=WaDMHpiu0B1_jcuwc4;eqO-XZ1*KF`GZ#G?Gw?dQb`LmZV^Ge2r(+zW6 zNqmvp5uI#}1YAUP#Bt&i?2csbjs~~@?O|);%M6qk@+E+vz~lLLfk;y9kZy}UXR-L z_himDBoz~K!pZ#vWU)q*IRBRd!jOGbM}K4{MbXB35R#rtN|;<=rse0g2;b zJahsWa-ODJ1y_5JM>&5i2Nu1-p^q!IKQdG%YF^sF#rnw~gdt|DnLA*8lQ%XMy#`lR zG$~YTE7kE^6&`1(=?J&&fMOsKtr4@9vQbfowp)EDZe1%ducXQD!uRbjMf-(gQ^$Ie}D_rdYg=VG8bf@wo z$t!ssTWDV6G6$p!vX{tlPDm#`RTP0o16}n+;MK~AsV3VAVTWSD`Df~8`tE0AQpKC~3DA|P#2QKcvgs2zx#Aeq&GwacjmJd6t>L4*R@68G1L zuy7yGfh8f6z1&Xa6@AqD5lT?Rv{cur)#_8S1{%ifjz^A{iqe6SIJ$`P`Oy7;*Y0_V zY9;2LK_K8?iX`n%AX0VMnkEWW)s^lmlCDm3q*JMm;a+j^B~H|dA=W2*#TivMI9=8- ziOFtJO`tcf4VtY;WWnThrb^4zt3Vf!&Oh~3W&`t)hY zOabbu>j*cdSm(=Y+4o+1HUrt1aL2Fs<#dH<%y}do((l0|}Pl)8B>Ol`^}> zH)YHuI8t)+kL5)6uCm^`x3^uSdmkE`A>7NR^AV<3Od{4qQ3e=;3|xWA42Etxf4_7~ z=G}T6ZP34W=`gtuL2|Oju%0H4Ll8Zgb`g>C^t|82J7bcTmC5{;+38nCk8atf_WEV< z;O#A=!0%Ozb;{25I3x;PicePgrP@8T2w#>z(qvq$%}`0CYDQi-zR4BIe#wgVL^D*_ z(;wcC?enFRj4I9ny6S|%rdn5#DQr$-{y7q^a`}wxBUX_rhYyC#!c`S6GOv;~RCb$n z239RrhXeHVlTr)dmi;=Ft*>>axAw1`0*-kPnKo`WXsR{y=yN3a*kr0DACgteJOm{q(- zKP6^raCiqs(y#CUQF zQ%{?92*WC(T;DR*?F=XSls+$$<|k`0F0v!fQYC8(nnL&BftEdHLr7@LdqDt8S=4Qq zeXXMl0T>_V*fNGlTN>MX1fp1m8Khuh=8~iXPsp?&P5@^d%JM`DDa$yfEaJk&MS@r8usc~4vmd*x|#!B8)P=HSNuo_xPvn~YWfkHl}WKc zFgcnt+f7lRz??Or+$p^-b52WLBBqobE-6DinfP>BgLmMw;{O(ehqkNwmK`dbZgXfAMmobffW9 zTX;iF(Y{5Yi=1CmPJXgTzfBsO%>N8l%2Xc5aAWzr&E?H{?kH;e1p3V^Le@s^X0-{KZi(LLPcO2 zf-6E6PcP3-o^)L3h#IYl(9?+!J3bXpbL|vaChx^C!1w^ARq;DmiQuvZVzi-hS_?o~ z?kG8D^CWKW=MljXp@Hdw&)p_n-ex|PE;wmEj&|Fvi%AofYT*?=mhou~V<=#Z$L{{; zMrDKga@V0>B``95ElQnG14MvVsFXq|!~u8}pDrksSYl41 zrx#y$<2UYdJ|NZKEu<6;Qnn>>(*-WFt+IOs-8=u+S97*iU42x#73NKMOr>VIl#3T< z$9P`A9gdM32|7mdD(#4vIiCy&r|piE=PU+7Y$(Sx)&*!W{8rrD*k(MnEnM1eBnqaH zN^M0gRtr33!z$g=ageQvjPC#LF@bM|HRqj+I{zBy6( z;;!-=in_}CPEfwfb}z?qk`MVxuv22_YNwkSOB2PECcan=O&Gllv!R6eQ0bo-wx|>l z^C-Iyu?w~YN}TNN$>1+i1FJP~!70!|Jfdc@;v39pW)H8UpA#7DxgkytMYmO$hsD+v z#;s+huOhf|rVocJIq`?cEt7%)J^pf5te>RNaa`AcSx4@jV}cj$q*4ykVj-J}h)Fa& z0)Dc7GaJeTbu!JCf>8mqD43X47hE}OB~{I8q?G^SPMs6C6fecuwl&3UXIS>jpP8 z-QI_q&XtqpcE+f|yKk&vCLa{|mtb@iRVk^6ihXm^V?y*zLUg6*i+VS$MS$UlmZ|I4 zkI8TlVl90q^{Y(3ze~?B){#a2EkpA|_$?f$OW$1Dq|+l9d>yG9O0hl-KglRx@20X? zUt{?@4gw4N8vdV4pjHk6e_9qxZAt{ID|^lWRmpBGlDNb-B^+2wPy9-XaKXV0;sc@^ zDdq2YT_iJ?9;Xj=<9Ye};W2m>!ZZKAFd&MALAWaZ0m9u zF?v3hoW9Xvdi+R!^o;n6E`8k_X_)N3rt0{sRn>Y!JFd~};d0)cv+|e9p-^e7y%g$g zI$r+TYMK!-&&JnNF0m4e3ofACms!#9r`SnW78tcUWWZRDF-4>=V zG>6QCtj7}l;!kF6c$=nF$KSe83w0*PJX77UG1gC8JDBrSSwiQC5c^O{A%LWf1cqx0C7k%9fs^e{BCvU;z)H=d}>i(px@3d2bHM|jPROhK@ zq{e<$zHC1yb=uFV)%hy@h8p`hqY^VdV3%- z@~rpOf%E=TRj{7mt9B5gIXu{8$Z$8o)1)o7F>5fd`r%e~%F zjaUGgT8f)H@ERxKw_5y5643p7wG|xh;Ic0oOMwOb$;QgNgFOIA2EtNFK6YGka|ebT zNq$~iFYCCR`#7&U6 zCEAL+CGKutor1P+{)lS-{XXePZG~K@LyB}I6sa!eZo?yeYX^5+N4(^cj&c@3c6&CsZcSasumY_S-A1-cGjJ5+~zNF_ExX(&z_q?ew+|L(;1i)4^nycc;&X z7zY z69<1S6#infM#VQ};^D6w?RfZ${10a0;jgiF{0tSpDiaTXU1Z0@Uwh{-k^11T)9g6- zYwvs#6o2)x+HEOMH;37MvIDb! zY&dE0qx$@u_|fYY_SgN}=buHleZF?E@TXqfQth{8jN-R=^VU>0o2n5rSNHE)P$s;h zn_G14;nw6JH>Y_0?Red5U&J};K+rs>eWj^?oA6Tei&^olm>6>VAL#f!I%EV(lgufk zy(2y(`w9OVxyAICTg?)#pt`&;>ao7Tpvkc3q&Rcnp)Ky%DFz1)B8l@vXU>i!yd0SO zA_XzKIvjPOq>(0=@IE0xoXxnaH?S&%Uc&r)2!iC8@i#HJ7ATfNdYG{stJ^##?S?Ae5!m8-zM8=7)33Zt=a7gKeS@ zX4G?u&S>qj#_~F>nmRf3*YS-Eo+x)c_4hOx14Pv&9cn0D(jhl`;d6nFYJ6q9;AK7+cG$|dkqq@p>dsM*SW5IsA- z*l=tpp;|dRHzrWcr5u_O9}|G`;{(-+f$1>@hYL|N17e3W#I#T;wvwr`m}B2a)@Un_tb=ytzbA1WNd9L;r9*5O^ny zXGX-_o7!p=w{Z+6%h-KXPZ&lx5eTq#_%RNO4zl;%O^mJ5A$lVxn7Cg2UEQ+0r&|Je z!MXH`gdV^xssaZ}GhEWJby*Jm5}laT(|STLS+^5Ql79FldWSn^tDE;{CG$Mh*;}iF z#k=+76?EgC>iE&}g##K}hC7?~=eTmSn)yzN_8TsXO=O4Ty7Zr}`fs(EtK7KVhj$;5 zehejaI#CZOTh+(-|9_=_BpI#@HSP_Y%VkUZU3?=~g`3Cm4Tp$zuGC{VbN%Z!Y|ACs zkZr~p!**>tB)pi9jW6aJ|4>@Do2OVV2>l-^YIVWs!i-&cCuel1-c+co!m+y*s& z{s;Tvcp3RF`hRE{E=+X13=jXW$ncNxQ`*u$F$c;x^N3?$;bb{Gka1I-VY?}=7G>Ma zdFYfbH^o)>*D2R#E{3_y9WgmcyUV(X?F%*#3vpbGnEjQP;6!hru`^;0uD~1GGLPI7P!h{y3x1pj-^nTN6!+-h=K!yjSWD|@9szZ_4G-nB@%+O5US^*%*$dBQ zgPWs*)_@ztfjDCRE}XdD6Hd%23?~*SXMalG!cvGYw{riqc`TfSmE%pUKF4>4lM^k@ zTwkDbi@WkZ3MbgFH+S-|tT2Zj61lhmaPLH!Q4;MFUv$CQT*53KJcfU)+|!&G>-jmG z?%dYW*lZ-GT_8vqvj1KwyY6zv=Oe^t{j>jF_?-Vy;Pbvd3F3mm5fW~q#?QU{=Q9}R zF;6cSjSjwE2rq^4oY@+?DC1|Hao@n5L(UuUSt1y#NS*qy9=_4`v_6AkW_&i_(oG5V z*1I30-vMquRv901pHCZsv5;}>|GlrQjVcRwy}G1}VZ>f+7# zJY82iP+pAU&D>B_(r<5_!=OXZF0X! zE+O1$wfa}7SOp%i1JNq+vb{!r&X%8(Iex*(4zkPPm>FWtyH!GCa|z8v0YCZ)d~A{v zVfF16U(HFUt`ezkWV~WbXG0U8P&IZX#+Rf@6AMZ@o}(VS80T;%UXa2;xp(%&m# z?i(LpI!j`5oZ{~j7wLWm4j9_znXVwqxwve ziZaS;+|knv^GlKr(YIQUF2FBTKKNDbZ;s}q$9PlOiN#(ySwgoVJq&yc$9tHQ#*e7x z0xhkuvtlPR!qbqwl)-d;it%Bes+_MGA`yGs<;O zY*3)=w!&y%`Akv*FI0CP6twFBX ze}6&k`N-J%>(^x1TF&2<@uAKU%;{reryO!}f1o&$Z_$0CUzT;eMsrEKGN!9EPR&Ug zy5fiQA1$FKd;msKE`IxPM{Eng!dm?%)vre)TD_b|ByN&c&&L)dG&{P8-_J&?`JEdT z#G=^R;zw$ur||oEt>F#wwd%u>l8z4HN5Uuu4mGzEMm2DBZ083Y`F48QT)w|8nQ5D?$t)Xfp< zVAc@4OJjpk0v2Um8XJh)wTmp`)-Tnj0wjMblRP|jX(pNdC_8y*b=;at7A{IQHaI;i zyizG&{M)!77bti%*xxylPvri?{&Ij9p#J6ls+;t2fBh%>YY<%U@AOyu)gSlQN7`Su z{e^)oBb%M-FwYR2k>hUKPDC{_3pgdI9>V{5&jmXcT!glS#mrU)>-@o8)CZ@KZN^mQyBxrq9!k*>kAIb~Wy zh>!{BCRU@v!{*KET6A_Hg2+870V7aZdN;Q#arZ*maa}Iax60hjxLr# zX8p~f&PT7uZrgw#eX@}oEMuJF&A+B=Iw^p zmtNboQZrbob>s=Ae7Z+X^O5q|Zuw@ty;3td&b-BPS2MnKe{1xNOvA@}`M@R0Lj34E z(R@Ao@BX)!{50i?kN0vWVmqBRU*rV5tNF|>P5B4j|MVA-uj^0Ct=t*q1K zOzYbbvnn82qP5tQSeawdieOabqDT5*E-zI9xAloC3=mZoO6xt%bh_Zh8dFB}oBMP*pu=Tri;Y-97s~}$UaZ~yC;{aK1YtI5 z^)jjiXWjgL!|U2T|HJDC5y(qA_Y_&5>|O6oy$Cc_&;FhN_YmkR2*iL$)eFM(DjGRg z>`V;wqWwT=qx7dc=*;c7QI6zf`jqdqNO%76G)A}_ZUdIyxJg}7A{Sr!?+_y=H_zDC z-#Gy+z(%AYC%- z>OCJU!v^g5AQ{$PaAFze-TSc&6UlQT85X?hKPAKd!bxmqjDK5(eQ`!F(eQ7`u*;Zm zKdcO^#?bmt%dn2=y%7Bm%COsd*ZaRF!_E?Y70B14>h&mPFJn!zm9pQ8U8zSYd&XAE9{183 zrIdlgaHW$G$kVw`C8Lv_cBmQbdg)}O_h$;9=^L&tfWq`VW`w>$PRX+%ZmL$A*Tz_$ z*~}&rYT1KM(aLs8n*KxywTW3uZ`i1`vaL!hI}>nACJo2Rz6G@`=-(`A**mCZlTgbx zW|<2vC_Y%czz~J(=8(CQE8b>^t;;ZvH_8+Q%Wf&;x;#GP&1aC?xbPCwjPJx&nP6gQ zFSCq5!#67}A3<&o`k!3#J!R@f6ANqYLeOX^*LhF)Q&}1rV7o>_+ zhRn@i7&cE0oA`ld3`PSLZm#r3l$CAi#Hp#%xKAj-ox6M!M05rDqEoqqFcO)k#*Zmf znjpxM^|r>f6Ux~dm#iuba}FBU=aj~^(^>^H^p;k0xC{CHG_G|RHjOGibG`I-@T2qC ziR{Kw%V3$C8iMwbIyHSff_#HSA;{f(=vqq4x=*(`h^zM(Ew)uGXmOiOgU_O^an&Ry zv(cW|@L<_z3!~@p)%bG-HXATw*p%NTDw4Xdd162j+VTr$Kg?P9ga(mB2Q|{8dx$i& zi%5*E9wPmZ90lew3sIC#ivA-ZWBcpiN8qtHY0U|o-R6UGV4Bbgj*#niE;7toPM#l% zo+Bs6Wz8pdt^FAjHAcb!+e2$LvpHHQ*1_FYY7ke2B}O_O>%=<6E=+)UN22S9G8B$- zBbwT`6Yw!i_4oY`)c@yoAJBgPKhb_$PnCOW<$5|Mhjn~JipE+@b=$6na& zl{wRhl52{q+O6NN;UxSz2O^M#pwEauCVtzFSdLshz#;UwatYpTC0BhHQ)^;kNh;fv zy~Vr47ZgV{LkqTY!apF}tZW-15Nf)%8YAYH1lB>#!?z-?X8}~I{btUHgM16xSA!=} zEH9G0SL)^F&6F=<-s>Q9SU%&)ZL_khP*sO_ckrD!V|Pb*w1XrlnIC*IV~@<#}c_=Ahyt8#aFki$RP`{UDoZ`3cweS(yIz_GiCIC z!2?jeZlq^cly!&`_Xdd{eaIDX#SXG%+eFf$m@C+*^EDwpP(}IVge>uNKteS-S7zbq z(3qt+wgpu%*eD-a|BFljbksPc`li_VT{y+gIv$%8%LKuOYY+>q&;9{6;suY23uF0A zive(vVbkcmAUyJ%+B1v|L#bW9owIL7;nr7`C>De)Qud-Rf}p(l$eMR zxxVNT+Bl~~7DeidoEe`O)<0fme0F*U8zs-azVAQCXYy%bCQa`aUED|Z8KS;qbQf*yrJ|{b z#v2<^etn`oT&oLliiscwn~QT#3h;1qJ+s89~!o{j*;u6kljJP5`fGX-!)tP z$;jjn--ZQ`d{7(iL*M!kZPK%=;o(VgE9 z@5QQ{{|USsKmA{UcZ=%Ue**74pZbsD{l04zRs-o}=4~bMcTne)*wKQWYOR#Gol)P{ z`qe|i`O%?rYbu}W7&ZA~>^F(U0i)_!R8(0!S=->9)LCR;2{o2v?xWX2yOe*KNRI7R z1)}7HOQ{cDNBu+G2EPlp^%uPdw~Kn=_G6OYgPYvC_`iYM9X~mqZfCp~w=crGU3438 zBHZ3a0)IH%_&6Y!nIElNY%d+QTk?HmmJIfd|X zd(`UKjgr9XN8OUoi39cV`A8y*knHG2663rP?%=aF)3FjDuU9tIhQfsAopN39gIO$= z+OiVrWy`oC{E5^xySzsqY8JX*Vw?wqjo#T^3J|!PH%8twT6mkt`A-@r44W^mgxx+V zu{|f>H8T>S+DIZakMAW;Hxd;$a||(d5m&cA0U*u~sJpf~b~e-J<*HF`_G=BLu)xc8 z%s;~7idT6mQo*&3J1Ii>$yl&L8rQtTjiD!a=Z@gesa*N4@qRPmv$*;_moW8(&NGq~ z%LvyJzSz3%ySNM=sk`HqETp+v@kIPP!j^B>-SLJas+rSLWPZYOV%z5PdUbakR*`S2 z$k*^NNKEyatz6qE^q9`A*8e<>es#EuM$tciWHf5H_#>s!!UsMCjefuL1T<<>k;l`h z+yAr%i`k;WNMeR}vKq?fYbb9uaEBphnG&-&zkYzb%-i|S4iAeDKJbv>5e!(-9L?iM zd4lEVL=QP_CM*4u5YSRg#)m;G0^HxO)xSX3aD6$qfi^rJI}P)v*06&p-TZq;Cr@Iv z`o~m&ZyEnW0%i!t-^Im`cf6)J5ngFhmfW!a(83EUl4G6L_rAk!Heac}B3Qt;5Q&7J9AYUgE4JAQ1PHafTe>ir!6p1dPmfqP@H>1p#l69D5yiTgz z66)+835)@C+q?{f*Uknq17Y=r%$6nQkrcw@19E4E&3tl8tLpBs6sL!p;;R*>D|F!W z*I?=<>r}R!;dCO82>bt4MP{i;pV*~p&%1J3L|6~oIe$kumcy<|DrT6B<4^wb1*e=1 z`7)1i^2!oux?#0OC-t8wou2a$`6J1kXJ99T55zSe>k~-~=aXxk3%Oj5f`m<1J0|(I zB5_YPENb$=RGq?K)m<<{-5hdGG}?u zyFTypKKs-4)K@o+ld=_GRG@=OyJ!{Eao;uf1KAXJOYVpf%=vOLM$rwHM?%8~ZZLd( zmy511GuIxNYSiaC#-s$DZS{2oeqVE-=e*Q-iexsJkX1nf6Z$6K$)VX|QXa7`sK19P zm{_UIgh`AOVMR#8P#S!IW;U_V)`BvjU30x4hm{06v*ds`TIt==^HgSwK{p;`ZOWaO z^W%?soh#-v$?m(lcE6YEep(!WTbCAFqZ`-Id&x4| zO>&0di7B3J{Lw{@K1~d7W{7uNj)k~*Q6O2d@}w|wY7vD*9#Ao#&HrRXvF8|C4joQr zqy$`MLC&aHvo$0=OQdlr3EiR+t>lGn$ze2Q`#CyIE2By&NL-SxT>S}Hh-S$P94X6I zLBzwQ`@^N5nA>TnVriDmD9zDI$;}d4Ut+~IwqUP=UJ?P268?P7V#*vWXAP&ahJne3 zJAZLnsGO9CNPP5gVRd}jCBjEt41MD_wpf|O0Jz>aqVjGJq_>8q^v#l;-^Z9Blo5G_ z1J2e^dEetd+?Ji|DY-e4iECy--PFtgETVWZ$ie-pNEJ zX>27bEPP+({n{kxidK4?zoV7N%rnE;=JcP?QjTX;h1zJ44;xJh=`YAiP8{=2N*dYX zZz~AeT7MN+8Bo@f5+w9ld!*tE8^;qgVWYGTgK|h^+&dSu;NY5Qd#E%hxo1?H zbzORb0qh@#sBl(^-< z;FLVUo$2N>CQJ{Nv&AW0hVhTAk79I4h|L5kH`pY9bLt)mjs5zIAKM^*e6Q;Ukqz;P zLIz|7X;X?Js7U^H{WA9*9v79d$WTUD14ufPt(*tq->(8IbE8XQ{100^oly6?n5-!KemI$ zVJ>MrOjviT?QN>?tP4Bdicp)ojUaJ3%Mog_to4Y z_4|5m0l)t6>$!RI8@W4ProDK=?>HiaAe!PibLMD+tC%J*6vm}ZzWfco!)yH)GwrBOU$RexOZ}7RDRd)kfd{q>P{X0#2B)S zC_`I800s9~_T?=&bp-QiMj$-UC`(X<3*2MbWsmk56TX!FuXQhDN6*${Z#!sfP(DOF~zq-3#0nd_r zzx&JiE`J!HVPhzuQ8om?6BD6@{-Dxf#G(#c0Z_HsbmIZFvBl!(>8ZeK<$Brqo|`;3dTxN8=%mNH=6_P^ zmM2x}8I|T70oMVM%nab{54hw6{fT)Zrvojl9GoOA=E}j+HPwYs7QL_~JF_&r-S$W#aX`mgMPc4 zp$OMhmVYq%Cw}1w-R8X8bob|8F^#qz-jdDc3qQA5p4-dFU~-u% zp~tuQ;#)j#GE|$VCWXmNOktU0m&(bd<6%%O{7`Z=d`5lr0wJ(uwKCOe`HZ*B?`JUw z4%WNLCg)nzMiE7r>-opUWVDG@R%rs|VOlKOg$}ahLUVJ-+6R33j6-EAqnMuyKx>46 z)&)YL%o3t|Eayr*knT4cnE3mp(Fd9B#4$8!P!irsgp`Gzm3(+y!KMrNK?H6#_qGa! z1{skfI8DfCkmy#kC!2^IyHcW=67)E>f&;R1A1Ff9%JV6F>Eo&zs9O9T5;+Z*^GNFJ zAT*QTchx_^{i5>rv8JpEu_`_#YTooL^MkLYO(LDZnwFF%mp!eu@@IVqx67*u*=@Ge zz*!bJVArId)mo|dwXp}9O60_`5vM^Jl?sa-EQwq}LtD*i*bWX;5>wyL+$b$p+_?(A zUihWaiOQrRG1nsBxgIWJInrYNyS%DMzJeLk(?YYd66^x~?-bl8WudD(<;6|5(Af1K z@o{76F8)~}dCBNI}50~BP4U6zrgt-W8g%*d!Y(mQ1Ar?DHK>STY z(pAIs3%_+c_OGddOwr}kPFZo2SqEvV z1jup4yB(v3Ci8jq@MPgA@PVbAun9KU_^jy11FeLlNw`rU7aDud0VYu|-xFDE51vyQ zw{f>{k#0D5ZxK2-bw+4EVABC~=D#kXG$d{j0vbU&LZfiqY_h9m-R%TsNP@(!P-MuOJO=VHUW~(z6X9e!AEP9a3z-^U956SNhl|_`V3zStB zJuJWDDvN$CzauJ(9+BUi%A!|O`0ZU;^cug$u3*U;>axWySwlUx*oZaN@{_lwR({S| zQzbtG)>O*R8Efv7pNusj`Ejnfm!I(TEC{l~odr_|KWVH^;|X$sxNG6}yjq+U8z4ia z!2Dg)eD~Ei8hWaFJ%iVJ__nlY3JbvAGMsDPx7*ge70!G`{j!xV-Uz#6xcBF)Q;NI}YFQci+t8Lq{usLIE@-6|S%un%bcS69vWw0q5NEM|$WiX3*& zF?O}zaJDqdw;Mgi_SGfyJoM3J`I~$@H` z2moBZBhVe~D;@@E-rB96H|5y%8!gCbii5pNt2V+1HhHv1C0=MsY_Kox5Rf3jn;lNj z<|DEIm-0@w?I6krB_~`C4h&=-46cyE>6Dy%BIbbZDXBf-dE*9Ju=crD^)}aC%B!GD zyo<%CPODmRN(c40!=t<{jK&zdsXir6;MkQt=9Id(23q;MEqJM?sv&q*Nfk5&Be*Li zsEWnXdEzzREzurr!b-hQyjl8{M!?wu83BW0BYtl6DKLVvL84?Z2;ye6_XwOVagejD zFA`(s3u!(I^%hUQNLCfik^RXhNNY#ZW5G39a>iu~9Ralv$jyU>NXqUzGQSQcs;Q3X zVQi8{ZH1hjKSHlR;DDV42w^J_rC2ESdaAc* z4k5cGLZH%<6%;ZHheW|Om9-~AC>wdfndT#WY)Qe&@H8}zMK9v59vO6Sx83Ghl`1#J zPDwGnqD?L@9s!W);XyX}!85=)r+8eDVWQzIMWnnG5lLBzlFotsxz~ko+iLzou@_5A ztlaXtaW%He+swm411ek<%r?{M$XX1f859DMv$#_;e0e0bUD$7_9v(MYyV;K#J)sl! z;F(5gNxYdZRU@(qJMZLg;l|i6IDR2RmU3uqHoxKcFwy^7LZp@`!s`!<LLq-_9un7HSs$Nsrgf z@e~g$3AU9*QcGlCP&A9UEWsU2gHoQEOGkBdTEy0O(*Xu^m)|%>*=~XY%5~&~dK|y= z;$aJdZE_WA9~-z>B@-7g7_F*M+Sv2xj^gB-&D~#+UtE%nPV|K@^M#ty%(JPxEHY|A z>@PASg3)QP2M=TSabe>^7;g5~G&#Ix$OfC$G571_1jJJy2MC0~{|(MG><3PGcjtmw zK`{{p$26g8{(&Ep9=V6pQb~NjkXC;$&je{E$&~-0j$fXpj%$8c#}ChL4&bp5?}D7( z0_KPZm%--KY>8!E!N{h`$Y!p?v>^}jWs60^`VGd$TwHYry(|~lZtj@nw?|6P6$vno zY?^$744p6rGA(F&%pCc~*vvQQFhBF=tJ!^?Y0{bd#1uTNj67)I5uQhXp$>{w z`g_;>X569bxNG3IGPfvu-TQ+3PJtU7Znmh?>91i&Wa` z;_>I=m&N13K9xsob;v@}@6Y@YB^4{ET1~=ppikxi+{G+dsb2?kiQQURmAr>#}9Pzf@7I!^- zP~BX>4Ys)!IJ`x^^SAVf^Shlw-$k6q)vvYB@o(!;NF$aw0@ME>BqzW-=s^L&$i>&z z33wq%;LDIaa_{DtDlR0pBbJtO9xRn6AvsXzzH4aapZGq3Ai5}9l-t++&fiJf#2=OA zU6pDREIK8tEHWXDjbB9M(`H23U?^XijPv--W#|v zbPU-QtWk03m_3jaI+lXi{-(?h80*K8EoavtBK;p%PNyl}u&N@~J(QUZy&9J+>OiTY zOAP1H9l)WQTdbWIxXeDCb#2CrM(RqtKl5!$LG)NSAmZG}t3$N z>8=#$+sx?<*l0I;n#*`{^5LjZ|7pIpCBJQg(ox@zKmE6IqM+^rE-ac#>7MO{Un=uF zN#(0m-AWaH!!`V0fRnMwhdpWGNQnC~NV}a|{e_d$e8ziv;ZL(5*!7*8{qD_L_21~R zp1)VwwCMH69IU=>9MiQ)?V=lZaOG_mN%Yy?NGYI+CEQjsvRDwdAAQ|n=1VM<^6*GV z^(Ym3qRcNk%IvhtXjPVNsp}y^f|Nxv_WB}ck(2}DMkLDE&D9glKkA@Qw7o?R%PgPK z0Kk5uKhKbU%1546>=^7|?jmMLGwd_lJHOc{Hkw8jK7{ZuKA3O`_2$Z_xUq?$=hxR# zq&vgc3$d<+TfB&mDbYhpB&*>4HoXw!$^70;b*|Xj#9J?ULNIl#ZDzjo!uO~_h|24- z@F2F7s3)J23De3Sp@VYNp?^r4fv!oThUgwf1Xs0~_UM-n(Ouza#kQdf*v(RQFSpR ze$;fzecB(!=`(qiWgQQvpC6-z!*kg-yDbVOAbch9lc&I|e^KOXn$RjItq+tT0g+Yk zH*m%@{>=cFWP?jg@JJ3gB+L98=iqYoDACzO=8`&>)e_zifpNxLwd&B=cptM#39UVrwa>@HJ)4rw(_#NIti4p>7QNIYChc z{WduT-EKDjd6)J3%-z;+z7p_mHa9Oy)M4Sb!i~`@$TXd>&4LuU2b|8lv}*Nsk(>ES_F@m%$js?>RIWL}4a)67yBKl5s?_0Z5#er)FVU@58! zBfjHVv3h1&&z4%x?%>B}zAeu}LvNLvu5+MAT!WASFhyr_`bCc{1P-Lls*Qm&_0ZVu zn16Y9W5iES9d#`3^^9O5lRk{dg32)tHvfel@Myww&)#=jR%j&JqtXgw0?I z=Fc31HuDUOof%{2f1UbEJ-#8q4qS=T$K|^9GWfA? zZAIcg-%T~9`p`H;*4TFbEUXj!8hG`2lFm;M>{zK}0)$0P=*g^Wh zIv6$u?;W-U5!Zpub}@U#>hKBvZL-W1XJ4Sk*gNk0n&pqL3mxkRL2~{><`rvT9R8}*B)D!1R6!3CXQ=BB8R;6h2U!pblQS$;>mxCun%L6lA^^~ zR1;oI9BM4kQ^j3dPEb|vk#hh-8bGNF{S|lpCm#vng`5CVQJnlN(mP2DCu11^++vV( zsOYCz!EEFv|E3z`6Eo8!(tX@@4b@0X94?X6y%sk>Z3%l3IlHWWli%SQJ@P_smf*a~ zFIp8Ia&CW>6%w0@>iv*+9w?TtF*zd)gwy;>F>Le>>HA#IDpC8H&zD+H%CbDGwo}~v zJr8s&`{oKGsDoIuEbI${kGo3gsNZ-E`vzbD>lQRxfK=RdG0$Z;5r|}VZV~VN!f{hT z(ush{#a=)j&XOAtFqI08CNiTG`P8URlfH+mt(vX7P+c0d6#b}vRslkyOlbwiHG#OR zKOYuuG}_~?ZJ$VY{JDDE^*Wazi!7Mr453Q9gO*<^VnjwnCPUQF{V$Th7f78K zX2+1-A7i12fg3W|L*O-wj!YOTqEx=64kgn}2vu}Rr?J`Vwc;d`B+IfpPd7>nEN*%- z{r`cD^vQ;J-~8b;=1F_iL(G$&%&m|q2(6#1`l#Y!bkLdrF8~Txt^^kMKwdCb3Fs=gI)RRle3U@PtXn5K?w4w-10CDVPa!nouCpkFv}TPwriwhU z)M_5R1<4i3oO)DNyv2*)&Yd2}$I!*GJ=8vY-q#V&m!bCF3%jj?&#>ob?ZEaw?0+qhXk(MV_vHCoOQt-;JfFU1Ga0ZF4x$MTyi;Ild!}kB_f%%yp*|5 zMVFcr?k@-dF2|||Ystp(MfULIQHs|hUUTDw0^u1Ipp+pWW0~ev)Z1iJ zVp~}gH8#UJ`Xwp6gRY@H(jIt=nfJ8S*#qZ;@~%LWhNxSs`Xf88aO0ffbgk+)TuPWD z-Mu~dRkVh?(1F3La`;TouNM=h!+XjGzV9Eh+t(5+bMGwE#=lp%aY`h0hTn~@mMTaJ zlKA{bYwu60c};5JpKPas9=D>s4?ZQ)2@Ya+uxcPQ_M9-yOV^ATF^d6+f@gml#XY~ zXg*b==_RAlsxD>4k|X*udOmluC|3UiS*q_pm`%juY>*_}ICpLU?BQjLQvzpN_W9M$ z5x`I|&CI@?;fb`u0)^x^5$99ZH^^}6PWtZfO7-3T+fMtP;k-=9hf~^GXMMBe>eGG` zarU&n`9WKT^_};W?>hKJcn^$?Pl|hMROQM6Ra*Fdb)C0nfX6siX0&KNu<(0`)gzO7 z`paS3>K7L3#sj&{l=MWdKUW=u7S5AUHaR4@AvjO?x?vDr1I3JYUDPgE+pw>WLpqWl zb(B5?bDnywx>CS{Mx`nd9LPD%LCljno^V&fB@!#SERupxbM38xD&pYGvreS-DQefI zH983G&GnjFC?nhXnI?Wkw^qs1+6Qu9;(5g(d))zNMg6oI(p>Pt9A2#&&Wp0*vzIz0 z#?89gDpk@SDYuWZd)|!GbzfA_d5kZyoK^p|-c$c?rw^||`A@TjHgxw@cME2F zYta66gM`%&?=Bk{_f&p%f*N9uM#n|INFdkFaqjfgu8UnNN1=?{TXcP15S+8wT(dy} zxxL?6XK7>t$KeDHr9z2 z%{d-N9a@Rv1&%q64K>~3+t5;|Lx~j`OUCBDimFrQ6HI>qUjAa6YZXwV!iT+$-FKZrLWp{c>2eGd18G|nJ9SRsOS@!zuTq<`Q4E@l{F6HVjcqYi1ycPR4SDL_1w9S1XSa za){f=?KGd$RZ0#x1{IvM;Tf|b0^YLA@uet+a zHn!lV^FP!!b2fIj#V+=;otD!jp{`4$`XaBfp%E4X7=;d_QiM0OBy>4ync440f%Cy(87dW0Y>tn-zdm?@RkM*TAIBihsrp}HRWZ#h2~ zmI)@Rp>}03f?#Jq1NMc=cLT$aV4axg;qL?!NgtTd4lNpexRpL63@z7Hyq;S@U34{& zyC`r$;f~O_;o74Ox?>mZFB{-Pv;jnnuNaeJ3$_VtL?+wKk6|+?ryKi%-5a|P*CR9S zL}0$Cb^`IiUR8aoae!Am70blgUM%NY>N%7oMsnxAPOIsut$0ASj+5OBU(<)sA-nGQ zP@uG@C}fLP{h)LdOEqrj)26X%$5Ufor|SL_#8( z_Pe@!4)1Q9U>}ZY*X$(&R}HR?UEw)w)*p2m4RD%{?Y8MD0JzstM}c$-2xzMUDIEnI zw>7j;z-yb33KY#v3HGfhE+BIP7IPkzVZq!FnORsD{ez-*XXl}L^%!Pgpkyk=7U_&aX1f^3s?#0o<}`Wj@dBGX2zG093hF@DveL_6m z=qS*Qy)lRG*e$WIWv~1uz6~Q7ae#A@sam8xkmWnzO942$`FHry*yxiGZ1Mw&DO_ung}ah|cEz%< zL2Y%#vTRn-?O9k>fVIPhxQpMhl~z+pjr#juSJVvTj6USOQti!F-;fVLxE_&>#Pyvq z-T0g|V`4IdGsz=$(PtR5xw1kgAvh1^I|Y&P>q9p9hO8IrBh;R@pgUr52%xF+A$xYk zNP8ml)S&O&ujfOg)cFV@Scqv$L7=~nOaVz7Ar7%28~nW9X~ueYl3f1kIu8bBo^wg% z7|R5w@nuW?5PM`SLfgcn6wbuO`It|;Ppp0^a((yBMhs}6#c5}sAUszPj|@ZT64uCL~f|I#3fcifc@1s-%_b4Mss)RfC zdGL#&ukDT&$me>o5}r zmOYAkEqVc$*g;i%oI^iSLm$~gn^M}|ANc-|Cv%@>1fi%$tJ@kKr6lU(KZmA{BfmKczYpP2&54lfLPHguOd2`N987YjF-q@xZnq=Ktl(;mQnN9Zdm6%Wx|F05mJUw9ep8 z-VZ}tkXyn#yF|Q2O`@+z4pBhfB_JtjC$*|v79!j~I!M+712_kvVssp0DwsaK5>yqx zXF>WKqIZa%Pm!XTdDt>@WE{kH8=k@s?&07t^o+_Q(c^5)i{V&W#M}B-A^}TpknwFb z!%M8%Q^MV_N!K2Hk@|{e76cr|%wdS#F4c>FTSV=_>sO(Rk=%^U3-B*#3-rO)LOd#P zDUoXaYXq_`v;em7q+yMdz>i6S6S3#|66LooUt#Y5W#F#TnqD%XNlkbrfu4YMIwc)a zi!5DSB>gM!pc$h^2!p~Mf_-z$nLdT|dlyqU_>fdWzcC?M9(Sz*CROIDATx8C9;VU5 zH_ny%77l}e5r^YEi%zAxau54#e)c(dq%k^JtR@DYJD6wCxhd+Grfq#n4&ySo6 z1@I#xuM{1x%e*-z549m`ED*Tujpn%V#QT-j=ptz{YMU;qwhYk zO@6m_eDR6qF?Q{~CzwO|E<;rk4{+9PGciR{HJ&FoUkgd!S>wm@ZvSOOY^d|qf9fc6 zjM`WDzNhG>qNUT>?puX`&hbVjI7Ov0N)#CdW(|lG@?aR-rC^!gj|^m#M|-1}0 zP4qyz%Xml%p6+UTx?_%wW(s0mR3W{x z1)QI>NHk$8CG3>4OAY-p%dXX&yHrhV4R)@$sn&Oq@9kO5fo(IxYpHej^YwkFd<}j3BkiYI-%q0T zESWH)kJNKhqMmX8>w4-AbQwoj#=+bN&IT(aEgt%yL>Vbt9siSVruQ?-)6)<5uIo5U z68gAldbn&iO3B>`laL%m9V>efCzXdw#S|p*zLnqolxtU(ZsF4H;nKHh?BW*%UlU5? z3JPX+-z#7)^`&w5+!MUxF1%cf+^pv+0!?J{cy$-KI&V(6MpV*r%VmQg90HW6x1pz> z<};gl9-AI}mF@J4PFzi^W!dJ#f*9CnR;&_cVSoZ07+$lNs*l{wpQ<8gNs{9&VGJf? z=ATsMn(-mx*ONvnZ;~V2Pk0n%CD4G0I?A24f6FZLvEiF*Mmuk2m>+cB%rdui-rQ<7 zbl&LZYn?aK%x61q7MV|U+=MG+$VTUV9dlqV=AJ3YNlXD#%@n-LKH%Yg9un0>BE0ah zn1{?uml!SyynuBRqK&W+jsi5p%t8$Kh@-0>xHIqgJ4VL1&^bpvZb47a&|O&sXfiXd zpjh}R>$~@%Uf7AZPm@ni**6{hOHSN1f@9jqJbdNq}{800qF*PJ#l&e!8Zaaa-7H9xG&IJ}Sl3=Rrv@_hp)>e$1OcfpFc5 zn8f^m2YlVf$o!==yTjbf1CEdw1_Im6bf$&kg%lUv>}B+i6I&v^$S4eywjD+R=6N3q zwVKdRW}ZcDGJt{RJXN8x^u~OPLt|0w4kPpX#}eJYUUeW)-lgZVRgBE{q=d2%S3NJJ z99?<#V|soGOon-``cQiQ5+&(*Z`IId>G@`J8iHer-$ijT!&j)z41J&oYK3pQ&il&s z(%>(onvQv23v+h5d7qH%_@5|nx_Lj!3K*GZsou-7son=EOm{v9XIb+;+xnW`fBmTH z{dCn{V%~p!O!a;(#bw@`=-+9k{FHGRnSYb282;DR%(r*z7=ktPcgf7(OCbd~2Y3xs z&F(Vu<5b^gNr_IFSxq_mexdXo{l_lzS@ogJ{3TZ3A6E?}`u;BB42s7EdH!Orz@zTi zX90>O$xR*`O1BQa3o=-93{`}kpFW~!q+lv1wadIpkQEaswm`xX$D-=+7E^xpt&zDx z$^&H?W-8x@GXKJThZ)^z(inr*N3A)d~oGAjiXW)|MZONZzHbfe9rjAdCGP~%}FxmP8T_m@EtjYZ4wZvoP;=I-+ERW zt``H+`gTN-XW@uBhJEbam#4DuqBdTTUg6(YR1p;hz0td`sM6R9w{)fie@=%fSp6-8 zQY>RNyl`=5W4 z>B?{m?oT(L@G#0yuA4tNN>m=RfvDzT^g>T8vT|mkZQS;iA=gF~`yn!Zzkc`|+*#*M zAY#LA#nIrn3nmv^h6f2Z%TV(KYaN}cCJgWQ%$`7%NM1o|m`vO#^Gm6gSf*LM_lM>T zR=wG z^bmmIz-7LjoBZxwS`}g;SX06Qq0W!)mmBwbtx6P^Mi3}`vhAg{d@W%sdSPFzRb$;^ zi{3Bgj91&{9ul+$gHJve#t?U1StlSO=YUrI5{2Tfk~f8mxHecCneh?@y`YDPYv*<^$j!n0(@Yt05Usv;B@sHfPH-d=Dnw!`tfT%fzU)#z z3j&V|f}TS^d?F6!`ex}9Zyv+~x-nw6t&!#|d+Pj_+D$rCxtO7EpK`x7{jqS;W?>JE z{0pCOe)s1Kki?T*Gb6v_$2<#wm0EMKZK`V-^$&1p^KDmmx_VZ)W4*y_}_|;kkX3}8V)pvi} zujY9jk^R$j4O1%7--((~8E#eOR&Xg+(@ zj){tm6y{)yDb0#g&Sqq3HCDYM4iHI3vUWa;yZ#QNS-5D@85)o(HM^Y^zB3E!mqbIeE$;e~x^Y;tZuEayoq{6jAcy{kxqnPGD03f@tB9+LpVhsk-I280W$r(^ zcG}H@|M>C$@!SuW{wL;sEUo@;=DtT_m_M9*nOj9K8%LSdmIE>!QaMa73_G3;*?!IbBw+}y8s=B!ANh*&iD*3!Sld}m_CvC#0gd0ppH5fz%D^=dtm(cB%hWEE=-`E zup>Nysj&nA5;hJ3@rxNsfJ1aSt?C`dYYBp0{4W7V0?qOBnOyz?*!Jy9%6 zTY(qSy;)SPtsr2IPg}7g=Jkcvh^W#Q80|A^)O2Ai|m0RW`xb7NS$xd(un<4a8TTJ>nb+XZu4L7iaDeCEi47bbmjNLY$qiGXe=^brY&rH5B>Snj z>v<+IT)MIl;Jf;7o}#<+h;U`vUJN)HyZpxC$&rlX;~3o(#{-+COC?3c zfnMq@+FD^?ZN$0#;A%(8F3YDA2(5Z5Dk( zKc(lt>o2eKyBoBJ7NrP~Y?~bEn`q-v)kbYfjW-il^m=ln`ABAY5H-NE0f-~wJ_o${ zyJpD{YpirS_;1PFv1^L)fNENIY#Lv)Pnk4zIcpp=|%p?0=wTGd`b(=r3LH}lkpo+Z8Fw75of8P2Bm$YEjF+WHVNYtgSf7| zpPGDswTz4PM!Qoae+mTMIO!!5tEv4{YP^Ne=#j!5M%hYJ8zF=cPVHLNK>3m{WRf3e z78pF@?^`}2URc~^ye1^QR`nG%8Smy_{ib7jT6lV@w>bE?cJBcmRxEueJvdHTe$NS) zKGa!Jq=aa=S3jg`DNWS!EFXlHK5p0U{VP{-B05hIBSgd9L?d~NC_<7>P}#>KHdVOg7Wcf*o>_UWmys|5mo0-RAp6JIDW1=ea) zMJc$6AOvD=Z8uhZOnR_K zd=l2EnOJ-%!!JEjm3tX&KqSoDWvONrZq45q)49MtDb>Ah zo<`O4o{%}X{uSn+yONbXpgJw{a34`ziXrKPOS$QN%DBX&Z0a&8dSX`e z7J`5=D-JZ%!q3HB4>0`GOihhUP07ornVMr9O+P+0oqVp!6)Dj}R?VeD<|n^P&d-$< z9rH7bHws4Jc^XR5l3Os+jI35*q|DJu2tMVmk_4047MM87+>IP5S_p1{De~q{^SMvV zA5Jb=!cQvm?<%u*+E(;XdG7YnO!7Equ zv6L3qZoQ>ln5G=B(Y#tnD@2*Epe8j;=FL~w#^1u^R6nXdkPtJ<+HC{&ub>BSeq-D< zgvq36wj+>R0_<(!7|3s8AJXyYD9-D+tL4Rx-S|MROpQR|1K(TIEt3c&Cahdl?gN#a zGXaal8%QWWW+j}Fb|7jyy)#lJ0NR(6Wfm()(eU5KN1pe~(>NgAs2*bbacU%R z=~uRL)D`OK8Y`@?vNU4|drHNk?NI2%iQsTgqo>I33H@x8jjIB?r^z!+(PEZw_FU+l zj(D9nt%KOo+!sF!4w>J572~r>ydsFY!XK$cdV$<*cJ4{GlOJq?xcUPgcz)va*2e`c zUHup|n`mHjp?Xq}jBoSqhgy3Mp~hte;6MpU6LA($h_~(5+n`Id=BUi2yOL-vR6fcU za}nm1tj)?9imzR~;}qg*pVWf^!g(yU_z7J-9R!*b0;%9iiwfhVkZ9!~OvtmZv5nQL znuRpMcH~kuKr!P9-!+N=oibI08)dRYiLQ(uWTbM)na4LON-_E%-x92t(Je#_%;?ID z&wTaYLwDVRqx3KansH!!tv6a+Q@TnN*9&{lzr)*MOMcA=qeVF)9P=34ypi!K=B!(! z``aKawwvo`$>wmKL#@)szAOaO^3nLBC1#NpadL}KWFDrhNX*9kh7%>?`VMFmYXiM_ zm9PszE%`yQ6~c?Ye8ge8f3o3^=HcJ?59?3{IoY%~%2tWRMTl#Waau{|i>c-ENbV(c zs7{vA{q6i4yhd<6mN3N7Zsw$3WxhIFc#g7Fb1=!e!~^d1hJ))Bau}p=r}+p4EG$Zi zzMgoa6yVeahm^U$pWid~a@^&9fn8Zj8@HHI5v`RP=;M_kM^a5SBm+^Il?)JaHq5`T-qFWy+*G^G9Py>eqQz8SlUb%OY6v}T)LeO z&i=J52$%I{4h3)E66`cg0`>Jf1+K#rMnzwMR58l7i(cnuz$^MB_i72-yk=2Jz*#YF zKwz+JfW)_FF+AB5QHcp`l)jzcl!z?0sFR)`b5T+sB@9XE9Gk^Qzr0S=!E%V3SITS} z5N|4$&6yJOQxDC`<4=xUq}f7xcHH`E8((#3lHPhNu&C+`b92F>#F|pIps4Ew2%mxe055JK2gT-*cmLqer+5Cgl7d~733eV$# zlC~?KVqIi!B_cdPI`d<;PC0l^z@SRphdqQ=@Vr5-HpBTB1fpS2sD8ZVXC8 z2U4*0lxg?}+&|AceCmMld7fvhUFfsRo`d5OjPYr1_spONYt@yrouSgSd2cg z@rgh(5!bixSWUcSz3##5-jTUkk|_hyN>JrTi4qT5CC;TpR%C83%-b)eA0!Wuu!;hL z-w+-yDyk)O$c7H{u@BU?S6Zfo%mx{6iHIGO+NM+Ov((tt$E3de z&JE85-OS`Po|&eH2jjsKo&-OvwY8&r%+e?-pJ{F4qj7r`{Gyh90DSLU;z zC!vq0Mp8xCRnKW%2es0}3<>z{j`d39>`-)+l4;$X$BWa+v}X9r>!BX-QaKUK7FT00 zzQ$cXk49J|sIFOFxAUUqkX_gMjAnntHIy!hCJJV{Q^wWM<`o$i7HY`&??Yg&Ox8kid$31I)v9I&8_aYc3Ffp_RB&w%^fNcHcA|a(=#W`*qr2Ay z-k`ddCom(O(m6L&5>Q z%USZEHL>c9woZ}`MTzM0<3qRnS#&i*_jHvyVloT4ib~;38j-vh#zDwT9bw|?R61PP zCdSW^F?9fG)EoIlH{{gqd13{f0e-q5*e%W9UKDdy%x*6_c6lUqywBKM=5EzOE=KEF zE9_for2IQ#Hbb*veLk4S{>bDMkillrdA7j&#(q+UbbI8S(&F>Uyr5duB_gF`yT{ld zPw{0gT?co}TU>Q~?(OqljJ`(Aewe!oeia2!%1eNBE&|NB>vJ`mT2&Qqi(gfR0*jf* zkzpBQs@djkFVX{?TJefFGSt{>iE(}ILt3bv39L=4kfoDT zM~_UiR~*R^?RZsGV~a4 zll<;z>cakXX1c8UV%>DhBZDR(&b;2W(#zyj@zfnU>InYe=@_cW-yO?3{GQBB zIo#k@bImp2n5x_gn#tb_8qY&33=88I-m`Frl8~4wXvh(?(z`x!*O5Q)L9_YM>+lWN z1LXN)M&R8l0`o^d7eW2u-2*mO97tgz$L1bSZ=MiIwbl3uFDpU?IXA`A$^!EnRFXK0 zrsa&CtMYgfYrE5yLO5vSql9pwz}!Qlp&Xleh~I>g#}X_+Boh_YZ_MuEvvgxZz5rEx zihN;!xja$PvmF(gKU>Se_0maVgkIGNF@kW{N{nD~AU1d^Wpm#{ph9s4lm7fnRU^E6 zY$bF0-eA^lv&b0Us_Kw2@F}M9mv%OoVSOCa;P`yT+kDrD3c&YU&B`1u04$p6*yjXE zI~iHe@|WZ5yQ$RJ?+G105$tcSWdwz7;R1697uNJd4><&J!*JJZ=gR9IJ{p9gh1PN7 zGd4u7u?v3SY}Uy+!d^Iw{^DVVPyxM`7_d*l_h@|8Rjox@mFRjrmEWtO_cJf24VwD7 zYDz{Pt>k@a^(R`@&0Ijlv=%{^U&FOHa&3ddZOsFtpuoARwfIdrP?YTg&IsO`M{}R% zK7Ui-B0^IPMXz&syTt8=MOTfabTneFT@Bb_Si;Ww0-P3GmM9#+nC6R?FWm>V@dq(* z5L@y>Y^`>ipHc<(Nc;F9EU^UeA85Y9jTNE9ykA|WN&iKXArg`;NJ#AFpQdxFK>jfc zgStV9d26jsD&ylRIBiW1dk+=J^cW{(vrO}>PNx<%Zar`pl9<-fp3(JzJMp8r%~R7o zzD3IV9Y>9$6KnjfqhUw_)79+O`di(e(HjG$l=OING*9HN_C(qE7AfoYj@}XcNwVx1 zDchg2F01U3lgf^v|K``13nsPBJTb5O1ZJPq}lvu5PT*QjtN+ z5Os3o?zG16V6wmd@}|I0zwv-_BoxY^g@bt)6QR^d=G#bEjP~da0IsjaEGeQdfxAV= zJ`FLo&p0|Yk}?x^KFre8>ubhEBtH&U;a_OCGLnvcY`=&FxUlcF_*Lf z%H{Mb?)nSjbdl8zH2X1`*48z@!1<`j=A@CRWfRFLA6DnbXic2=j1#@z}cuH^>T98AePKqXI<&yPonNJicc zpP#+4?i^!fzqH$G#~0<^JLcXXxjrxNn^Ut-y%1P7imBZe)Fux)b;YHP1?*U-?A^GYhXU@aOcVP!##CkYyqyMvPb1Y`$0% zk4Gli3*T2hx(hL8MX0CpsoWL22SxXGqp7|<&B}V9t!NfEsN%EqrCBv)b*02vD5+Um zUvpyv2@YzOB1zm-b7Q@5VQh0N*=ikmdP`!0szozEGH*GhIN_DMakhEz zRmf|Uiju4IZ~p`y)ZK{^P z7uYVN!Dih5{_-lf`dYgj2RUqjIcE#KhY@x&Uk3AqWW}kl&T^zYpXOvw6ez89_4V?p zG#4%9N&>$Q7D-3Q9D%%L_`)9AN-_jKvu++ysiG;^9|R9*D|-`me>5?fA`=O7)f}8U zC-ga`j}Bi5o@tQMFWKigJhkh6`CG~V%~G|%oe47@uUARP+6bQWCH%gBNL2MH?&|xv z^kTPSONXicmKp0IE|fhd1GoCRm+^H@P4rQ@%|EQypK$n%PswW;TKd3+K^IH@ATpZ3 z1q|Rq*rMX4%Sn4Da{#S?gB)P=fH|b2(h8}C17#Km0deJiqhRUP&SpfOoxh9tF@zNU z2$Qt@x3L2G3~uuSWXckw&D<6d?8wIx`Ir@#xc|iZjg|^v82fZLrUF{kde!-!W+tny z41@89D++!YMMTb^_&SZR)xvasvpn(5o>xx#Cbrmb;3E3ApTP?a8A>84oR21i11+#zjAuTKPeV;yJS z*wOah;6c$q!PonEh7t*i+Qwu+>e7#hz$6LqkQS%yR)e>?1!`7Jscl{+Dz8>+IY;oJ+wn0sm@cZuKK7KF$}#R_@E z-f6;Gu4^w>%DtFdyXmEL@vl4(>2*Z5q3rdI`-IfTs6gPqa=mQVt_O@#ULoC4`@|gf zl2VT)abbsTY=^20j#Vei0qNNaF_mTpK9`}#H_Ry>^NZkNSXeR3KfIm1b_>q69Q8Sk z05K(glK?qMUP2iSmqixYRn7yyCi!T1B6_O1l=@{JyP5YeP>Hn6 zei^TaCyIWWqiteV>_&h7CCPXV@p}Zq{O;C;XX{3?aR`#N^-Q`o*eEAxG&(og!>Zir*S`x`&r!2!s3NT zb9toV)**-wuBf%u*keC|8ee4x?9A_oKy7}{5E5R-ul*hDucARqvnTi0*9>eu91!oTlcws$7 z|6I;&7Td*Ht%rFT->PwBB?}n$)(EtTN~gn{*;npPg8`(fC0~uNG%;rh)B8 zqg2tc$imq|4?mIv_J~aGHs_n?VU{oD6(-I3b-=que4JWOwx`CMR-jkL1q!gci#Yqr zq~-6XL^{~Gwo(?mrmE*i>e0y;A;mtnv}a0TCUBl4zaTLxpe-RWD$rFxVmy8$bqNht zJ-3@YIAZux6Q+t)lc-5fGlJln4_VyQC?0_cncum}Hn+wOu@|q${OG${Z|*fx`WnL1 z%*~1AU^X}24tW;q>E1j)qtTl&Hz!{2AtFPoM1QPJA;`@K#-vm^%;zd&6@)GVv5L9W zBh_;mj^Guc%3RRmlqpdXv$=ewDCNgx=17aAbVT)RWudK5cR4uj zNbIJXN~wgSmbu+I50z&`Ee}V}enVlqNr(Sd}$u;yGf4C&u3YPrrB8#e& z5qN6}jEtZeorme>9)@*2;tU#ing)$_zlHHU?vhj`?i1DI>O2aXAA%MWN4o|x{WZ5D zoBL1(YgEhE0v07}gfsLaWHTyc?|LJXmk72|_z<)x=upt2phH26f(`{O3OW?DDCkho zqCVH>s29vgXgR@(K1mY2B2g0o)v3lVHK72{f#Okia{m|Ys1WKu(vGq19NbWMnwK&i zkRnE_yD2!p{Ka!JFPjrsc`wUR(2HD4%MhNqoQ^O%n9J>vMMY zIXAHzaR_&sh1ApJU{X6KH~d?L3$0I#8gbOF&fE?vKvWU2>R~4+g;XvNo|OfWtsxt% zAfehs|D#XB`63t=DJVRRKn}o`15cBueE+Pt z(F(7K>zBnnV}yOgaE_%*)s@mcoOVO2h3vG6&(iOt`?1-pg_$>>{*yXmpR&-qY+U!* zfpKDpaISSwIOL!>lPT#X2nw+YC1hR(Dy$C~aKH5_W8*kb*v_i?_tY%)uu|*fagk8p zB_L@`2h8s<`nR>;(8k#+B-pB4~bgt#0J>e=U^_!32I0iHQTlpT=DMVU;w`3QBYkr`w zC;<;^Gbku{SdbZ^tVe*Q3*R@|tKrtJV)p%!p;yg90%v>!!5g{Fj$3HeL`L9sd zKwK>6yjW%2#F$U38p;c8B|by_Yb~_hmj|oF(9hjYo~iJ}R6YC$L7w58vgd?vI!li% z<78SVUbi-mq39lp3o6f1en(-e4#jgsF3;{DNM@Uu)k9vTgqJ1j)2bC81s*uB=!V?$ z0>g@~vj_0N@Nf|HCJ1BS{D#Qg_DJ8n3jd|qsgC1%{yV{MIVe+#`Ui;?Fmb@@6Lg^> z4fcYh-dlp-8JDM4M*l3ZbfH9P$_Yg?1AW|E0=S;9i^1bMpIQM>_2=S2xqxtRE|m*3 zGS1t%(8Kr1IAbYBEc_p7(KA@F;z2zFh<~f4B|%_&Frg+K(C zg9F7l_nH?3Mh;Km3AQ7qJ#e19NB(Rdelbi!V55B782TtJRPy6dbNmf@&R99oelASA% zl3j<#?OF1ypZw`#b*?9U2w*bub$5UcK8q+~h`Ieta0yt$ua!3OZyPE@=XsC@JM217&5+WLB2lhkE)4Jfk4mxa*g^D`X*s&nCN|@y(p#yug5D!ouQV6N88K z$f$|Rwz~Mj;HP^2H*&4FoX{O`&0Edh&&Z*K6B+qqQFMu0C-lICL&DV_1oOWpNL0xY zXh}!xO|J@s;p~b{)!d)n|Cfxa!o(t{+8E3-CsCVqJVx8tjBCcSmwZOtEdKyt!)#V? z+$LW5+)Y|lJr{oC;-`=e`rQ{l%@6T{W~BuChk^%6eNc@Hnvvd2)Yv|091?D(5` zh*IDR8JFLDayf&5p*-7c{A02M7N5P%e48i6hUjfpVAh_sGHxc4qg>LFqb z;@5sKXMA|nZ9d{3^HLuOnVvd9AF@5}`m{n#^Eqai3V3H}qs=F{Grkj{(8F@o7|WMR zt5un8=2Cu<6!$Z);)2CW5QTH(HD`@%qgg*Dw^7mxY&VnAitqkYR^ z+5T#K4HE1vjwS3)U1*(YBKMQ1l`1}&u|@nqibvp;h}V~-Z!ON_$W10Mt$Q;$S%IYq zW6dl4#y27iOyn#r9yT|4Sj{hoq}_ao4~Ux_P90}1|AUM%1FCIf0NlnwbB)~`vPh;E z7C+Pk9ICXTudxcVDa(-^y&|3v%V4pQ9B76N)con=B9w{#?O3O5rVDNnythVY$s&{6 z&7RT+@R&k>*XxmIZ5%9;9Smx25r%D3B_6Pq>qq?e_0Uve*hHrGs`z{Ng3z}E>avUnx}an$tE2Z4@ujG;40869eD6(jeUmG_X{CoVT;|ZOD-WW zTrvB}S3G84ASHIL`Tw!^Ch$#F+yCfklR}^^Ck%yxU;|~SmO{0t#WEz2Knkr^87xkqxC*YP?4A|x%)mazq8R4L$aN>nOpTSUqGu6_1N z(+F38@AuyOzt8`@w8`4@u=d_-@4fcgYtum*8uS#AwlLR{2yZVbEE*9UZlg0yq?ou6 z?Ae0-rXY_&ic*bt4epIXFy}%p4a+K$#ZpA(*hN*a=V~#5xmJ*T)m8eh!I}f?`~oq& z`%?zIJ|Dsfn8rxi!HY3=X2Fm!!;*&5s4736ap4@;&D>X2V_wZg5)wAn= zs^=Taxf~frsd~-@)-D%CQqcAexT&caP)R4Tj^Bn2617G7r81yYbRESnE_bI&qkg4# zm~NGQ*ixWDV8g_?1-_23M=%W;J!d|j>_%aZG?oQ^dr06nC9B9^t{Vd zl(*mpqolVAnH*4#8 zoKu9dE59-+$Vcv)!`7TCtYT7;H#<6vAlZnWyGcfp3{ZPn3)z_q-p;=2roI^JO6@zN z_E=C8o!#72c2lG0P5>Y}4w-N5tMoa5*qB`E{eV2&)kPw5~lz#t3kAWRgmh2()k zNDa=iwm6?{ylg91`pK|>esawf`U!it1r4ZlvNS|y{p6{?`TNOzFm~uCG*(f5quNE7 zULeQC%MffCr-YSf`lEYqLk(_BiLAwdE4|*DQ-|KPEt^J-q7O$V@kM@_nyz*q!^Nuqo&jTLdH9pP4<*uv=lL*8~||5-VtDGmR)G zyBwVwZB2=cp2l7%{uHTOrKtwEbW>ca6_+@;Xcd=t6c_BfBY(M*#d=Y3Nrp?a;^IxPD%QZ^c>lK$QxO7)sR*?%R2HuRrz*h89K+F&_&d04O2bH6*c`OVaDxIQ3 zC0bAONd)B+D`+>$i9ZgAm^i}6L~)UX^r`$Z#uD^4Cd2qFj4pc^7BjA?#qy)fL#jte z+}>WH*b>q1t{Io)BC<-75mxyu4#Z%#lKH4&&LHz%GP@+O76c?dR!LyJVuKD^@~bD? z5qTg_Je1jJCk?-~uu)c8vP#khSmh;3@|(!KKrz2f<}AfbM;7vQX7+C?fQftPeR2qh zcCp3b76%MnZ8&`m%})uj^-NW-ZD59i!Yfax7fU%$HM+WSM!+drmesWYXm!rX!bS1< zxJFfsJ;4W)FxrwJxMKljI%mO*EH$J{jaFB>PM$O$v)RYgo$%NShMq+)Es1D-fx41K zI*4O1>EJboz;~wGIk|%|S)9@Ns^qq2EZpgBaWF`IO@w-SFcupt*Ch0tW>SqE3w;(Z zzyptu2;dwnzQO7;iBO}xxT|$DV$%oLIvbni4Ra9)_l&XS#_0oX`>?O?DHnoh z@+D5!8sYQL5zbK!Rm&%Bso1e$O#c07cJa>2!s@jnrWOb%Cmyf0_z^~l8&gUyzZV;ICgN{o-kmO5x zoHc2`d=NstRtXrEfME$3cIElSbs<^P?8Chzsl1Wuj@&hjH&@==(JFVeG6swq*kwvK zZSF(?Tu~oQM$?VXXr`i|=7%V`P^)8!%+6pkuzgjX+=f(a!R^>RI}PZqO-u1}#l17i zMl!rKFp(p)sYZUEj!aTK*vUwaMqaRNN3vj_jsC0@9B+R*Am-X#{`+59FQ*jbXK{## z5K3MAA(jP%sK8PdAt+HfH^4oUnH_4iCsSq<5KxCgTg#h>63C;J@On7yD1(DF{-A77 z+@dKMtlbqWB_oF^W~#gVG6i@2I}YZL6+}J+A-AwX8V92%H-k?Ay`~_EJ*ZBR@}4KD zl-=Yif{vBIEjXvQ1GmsVMj^uFP0VLEj&o}aR>yZC_IJ?tf)`Ch%bHdoq_QRkOEGNl zV<-riwt6}>7r^r_2Wu5_yw#i0{a0dbg)DAR=rZ)PU_ZvlWvwk*Vgc7;fJMru7@GIW zv8)h`pi?~PtO(mlQILK&tl|8r*Q%+b9N@zuES^zA_9h51G@CwQ#^D#os+u zyV^fhyV_EUJ+O!NweTJq!FwnLfzC|oqLEff>7v|?_fZIBvOdamE@KM7n|A&=L!R?5 zG=gh05g>)bFNJ+S6+DevIv|a~V6_it7XsugiSf^)vSy(G80~S?=+ZI8^BH*3z=nEl z#scRvjoj))B;duLA3ey4F*J!VYO+d@&pts)J!_!t(m9%vO`xM8q~s-P^Dits9S=d( z=;uKGhZAVn9J67L+bchbVBQ*KBkG^lt~mZu!R{gLrldXIhNELV9KaRTm5Oo<7&hvo zd0rFgOYbVb4%Ic_RN)#2)R4aQISH@f1g3$xHq#jHjd#FHyIbQ=VcU@mMjGi8g{nO! zy6>M~*v5@dx2#`44`V43u5^QY(+dn<=xSEl!ZAb(E8YY~|0F!IHGRPO2Bf!aAbi-o zu=|_6pjGOn*|T^4ecdTv)Knyi*H(xJgNfh^`$-bPPr-z5yAe)pR>wna0u4VFlO6+3 znKjP-%HN)$CqYFjjsuKcyfAIWxzZu7IGQaKP<+1S3~W1~5!=f>*!f8l87nU+$!#S( zNvt^VI0ulSY7C@45IF<~D*=5EiOD0-i;}=5QZ$em4);GLeuoCePN5iSVK>5Fk_}wD zurqf9_Q?lVvknvwE-_Pz<+_y?`Pl ziKn3nNaGoQ8aVRbjsP9|G~RxpVa{&<;UO12-r?5K5enYojs`U1J#L-+IOq*)T~_a# zC`D)F%?h|h0m%St-HbNJ`3zm54Omz}r~uEJm{V z6icz7pc52%->bwNq*k>L0C@{w$(QFjJe+;#EX9z2^60kZbZ&(+h5>!%l(ET@} zZpb56KK2T>-&>254kz9o^SmTw=xMftVui5#i%<<;y*C=ZrAISTS9x|bFRK)l0WaSn zs@;36c5kLTi*%IS%d!*6f&0J2Y1Mn5q1<(@+cffzmDnBQTuYGaE?p+7%;|Cv&kN^k z84&Zw!#d53ZZ_G&%Wl0%e8Js>Q#nD-!=)6oZem%=O^_8{*>#x>^dp28vuixw`*7)K zjlS%cL|aS6A*`EIKDW(SGtoE5G&GWm_%DE2w0foa)N#P zjdfmd>Jh>obCqzPBb@DN?-5ls{2!bTV4x<_1yv&cb@m&T*KTk=Qp`A?X7<2^vd*R# zaq;{Oy@Df$V$6ea_!>$|b4Fc~dpG(bz3y{6)J<8})7x&H*_&?OcS%w7rc~&kv%2m9 zKeu6br$lcQNt)oi&{qXT{E5l~Yra|}u<~m><+UUp1(O-FiPkEnG|dlKZ6K;0%vn}Sl1nf< zN3grP!NPO5Ld(#Qiy4^Hr(FbiywQ;PDLQXcI`(o z$V$;O5P!@Z|D)OY-ry`m!iwoOulSJxzRrI!7qj#;Wkx%&1BcH^dk;67p*8IAS#x-; znw-NIx+vN;`8R~X)60)&56kB8W2&dcl?B-W5X&^_b2^J!Eg&24)RcB%QUgdo4Y@^A zpEKSK;s&GIQvegHiY- zb{=e@)g!1dUe*GcJKUHS?na4m&q`C$2hpjZl6jRrHr<%p%q;*Wp1kcB!ZQ-7;_AiJkVQw>|hVKP+ zU(_uXtr%aGuGx)Pi_V(y93Y___p8K;Dj+;f`qV6a4nmMi54O#Rml@9xQ4om#YQ-2M z(AFv4wMa{Oq-pALH7^#u2_^NMGf;i-d=tG{m4dU{iHFcVOulNT`7y0IuxQoou)xw znxMs9GlMzhXvtC3%;K7XQHG_O$+th%bJjqOsySwId>cY9<|c~Hk)Td#c)XCB8Hv)( zv6SAW5t7H}aV}V;P3czhliArD1KxIxia(R>HbjC+W4?gl?a(cwdM39^Fvi_yY=+84Y%J5H_cP!Q-Oki? zGU#G0qyp2N_SMjWxZVs}nk#p?0W=5{-n4^e8b@Qym>4{LiP;_cHCS+YE<%`B(*vKV zb;b@FGEz-C7GOH;J6yL%idJ0a$87_XQwyo4^fLL*L@G=5R{smZTc|tH!@AbkD*Hfd z<#AhNtECL-Udc@A>M6zRf4N~CiLk40zG8-V`f)0Kp@`PSHD zzzHp(Gm;L02I=THVS$`HlKQ%N@B#{C#4c$V9H0OKa_+v}CibK<(*ptaJ1Odvs(|Gz zbiPsB(Fc%sZE7qwr}P}L`ive_(_C{BxvBXRj_a+O6L@$08n-3ZV|EShWYILr^j4$% z70YdSt#i&sS2>!y45~=z;*!m(JM`3yrAMS?| zB)MxUtsw1!pckB#MC6_C8}VCtQmlwTdZ_XQ$y~W{!srU-D^|83tVL5}#yy0um~^Pc z_59!i;JScvxhD8bjnTZC&#Zhixp-d;6L?|!SMfmAL&M}fqg{$xLR#>U^%`m<+4e9a<>UrmC@GJ(BdlE zjlpmG&A>M`gDL(etII__Qo5XU$a5Z#R`vfFVL$?_<$FAkg8MoKRK&REz~M$`#Q+Lu zcHJ9-`{k*gMri1InYu9mH6&C;h!xmu3LnuT4ariwrlnMij|^q@rzp0(W&dSg(?~d& zT$At+Y_(WHynH;$j5|WC$4!#bo>F?8nL6ME)&WD4>O4)?q9>DVgAiVe_Yo&J5zayh zj6ed^$hwyawA9F3(rcEiHe$oOZQ?P+j?DP+POakc?GG;^zHWYSW!>83k`W z4S{4=)Jux`qmOt5h83lGxTuaKq#dk}LtYJH+!@aL2V)J5N5i1g4VB$x-gQrQff}S8f%DN8P z7z>#_*!ZVOo(WFHkyaEI%|T1bKKs$8S5Y%d$~WR8(~*$$0`)gMkxo4mx{u9HCgDyB zOpKd5^hoz&=qplsV>}A#?T2$;@Yag91M7=`YL4}hn65b1iBL~K8=1iHdWhtT7GXH1 zNr(SCr6ppR>|i9Xv~-}v#PiM03P$~SHWm$tB5P)E6cBYIu68Q6t~`KgO${DZtBeQb zfIs)edBtknytfcLVglx86pfY3?AnMeLK)DQ=F(axIbjFp7y@xW@f6z!^gwvW@(rb; z<14ItB+CXYZZLy{EZ@-73J0JWm?FHIz>QM6gjJjfz1gfmP2z;(v_i@hpLAoM9>v3^ ziWBQAFo{ox*q>VTg zBX%teY{Wi72HZXV?~Pa*1K0C0AUf!ou&a%OrWkngUf#M$`_5_sWRe{U>5~?^Z_aGLYAw zB6i>G?m+Bj1QNTJrk|)^60swG6C0zgaAD*_A$Do5jX{(SE`Bn1!2Wp?F8yL8wu+A& zgF%|J(NE)kFgw#X5P`du6S(w^X%N)*Q@3=KOr>s5Y(f86zu4bD?pQ?lwQrySIh~WX zER@f7cK~Vo!?IC>4kZFo8s#S*NYtXiX41X^x=)f?h>F38l_ZuM3jNvzR1np2Ve>4YhO%3xGmun_;xWJV<}oYYi<%3z|` zfVJtTGAujVGx(`YruSJ8nW+j*I2TA{c3+9eD0Bt%EP%%FM4JqZ#JmrN2OgJTMC(~b zW75~MJrPKZg-8r#LJDcv>dk(Y&cld{erJ2C!eWvt%2dkoGln)B9yh~}4UYsev{RNp)ddGB zbJ!*#DC!*6-VK{hv?;}HYJkDP)~wjDvMJ1whc4(!--sDg;VtH&FVdAb>Y}dn4f1tf zq`3@_UbJ-;E7ziPm^8lKognpaPY5G~I0^R8xD{;O(qYn7oc#0dKKpSRGPBinK%XG` z7sA#D-Z4RiWUBy_ho$c?D#=55yJJ-qW{St12QB17?+IDm3Bn%holCU9d{(7ziGC~w z_sYdxz;4eK?4g<(9KA@Dw2#L+5XW&=aYCC_a~bcf)2mYz7pR8YyWrO>rEg%JOa6?%m()?hzD6}*Mfgl_)Ez=FaX8&s*hAk2G_Pax*s zYNA%dt!=kC{;-MZjb$reQ){E=oMz1)_vuiVwtJ}Ce7)P`vOfn6%x zIKLqmy?nm>{8zzr!z>HD8lUPbB-w9fCf2Yv8 zwKz}?SQV{>8fm6t{}-_PpmNadok>RtQSw(0Kr9yQV-kyf0Wrbe8bjyDJ#aW&f_J~c z$UGwFVsqk!UauM~V>kU9r9gmPbb1^izfAFI7vbH$nUdks4gON@BEY4O>tP|z z=Nn6$tVO9zU7YEiMsGBEH~Q?iltc7EFDxF(j)_d~Krg`YHC3K-od%DOD4V!i3#lmF zE&a~awZXKy2L!sIJ*BB(7%_6_a%a;47}Yz`L|}^P&>C&RuE-kXYIs9KhIVm3G+ZLC zvIzp^2WHG<)aj&6lke<_GgG?ROpGSnvs{3CegS8qiB{>>B)9sq+b>|-0u^G#Hw=5n z**5oWn2A?e2AJI0t}`(tdHW|%4j~I@2-tvYCWn#=51bYpgP4~F=L&QvXn+7Ol{QQ6 zM+KGYNt=i5Av(-FWLp55nNFi&N?RG>Rwz;)*ejIgiV2IE%0qWPcf-MG? zHK|{Jl?Vc<4t=HBF}0VTUY|ymbnP%v=_r)->J6BS+CuQO3=PR-s=HAUc844RDjrnt zd??Q2!WwGD|MGChiqr9W$BKt46?Z^Hfz{m5Uh^)9dL*0La zR2HlI=JvY3f%O-w`^GEOoyG;#{m~bx?x9z$yCQob-2t6U=QJ%#G8ogG|JWN}uW30EOm>4Od^S5MhG)Ud5DFd5Mw{0N9J|ud4QNt7$aKBM& z>u3Z;Ip}6OdY&rh+AbR-{lK2Iz)OQCYJ6$g=O1 z??N*}qkxXg-+%B2)P_B4k5i$5%8)P& z$)ZQfhwh_=D6LVv6Ci^=A|=v%3uMrTLIxcJGC2uHIRP^07^@hw;s=oAcQhKkJ>*{qa?J zbR-9}SWCJFZv(As%^Dmk$T8r)q32z4!@O%ByWw{N>zY)GWb~{CIgO_#L@APtyBlheS^>TmLFF-Fhp0#kxUW{FOlLku) zd$?6fcXJ&wWy_ldn<&ls7JDhT{J93Y|1u3_%ONZXM;txCv6eYvr^xijVm(bKrci50 z$_%eswwH!qt!#Jvu53jhQ-54GZ0mksw&DMwY)yTTE*SCZdyjGQQ0xdW;A7bI!-Un@ zs#IQ~xZ|Z0OgP>!MqL$}10Ez6+E`KAHIVp+mg=tQt~)~kd*U2D)MeTM-6>{QDpaTJ zFpHDxU~^3|U|SwaNhFgIUDDXa5+7C}*LP>HO>8i0RZ(Xj5fD#4|ox3Al`5}k{vK+(@yV2cnT(WL9 zNtgdz01Fbn2q3m@6%$8j-kZ4&;OIl2m zWx2JR1J>meKrls0F^JGHNJh2M@@A?_oEYUr8`R@MC#W_elh7}Iux?5?n}=c z;F#zG^g-yCPshFqi)Mx1-HvzTkOB15&aTI^Lq4?a4#C)2Fjv;t2a+f&Rw@~C+%!c( z;1aw++A?Bm?Bm3(-*=M^#2(y1-4mBijDlbr?<%KF(N^CMgs-w)OV-O|8c3#JVY0`V zc6SQ#_6=|LR&5Ev7D?J2>w)Nl&a`@*S7TazZR>79=q!j8c4XCJ5sYHx{V+)uE!|3V z$9lE{l#RuY(WXz?Emo|8F~uWRLM&d8(slHnNHqQQq@NqDNTQY=MJD(U>TyP zWuRhohm(qCjwd!KrKZ6x4wl&U2(LMoa%tr;%4s$ofGnT|h7GbG4rSvvm##|@m|ROrB+kE z2g%@C`-sgbuG>4T705&nc*^fT1)o6*)oK7u-MlKwan^bSpGDSrsL3uQ)e-EME6 z?bH@tJ(GL!cG-?D2MsYvEsmUCg0nQDW}?0fF2)MqQcYR7e!Bo$%??qH2 zQ-0}o8in>1qj8_rhHzzDh!j^|wDpGGRxAogUTAlPdr4;`M;QQrMjKwixxZIGdLT#d zm>QuKw|)~*s_8OS4?|>W72_zG{!i_-!9f;;V_XBCzpRK-JEal9c*VGFvk|7CQB53W_|{gYyK9}lZ&F@6dO8U zz?@tR0aLSNDS`|wo*clk$tQn@y&trKK?sV}FpstMkMQu>olUyA|)ETA->}?wsoR-q2ca4!3{kXbh<#X?W%_z0e)Uq?gb}P@AB`I$GF~&LP zb>vJ12*@CPF6DsrG161hNwVfZarfBkK#WPC!%v4@XlEYk#2SAT6*jv&ef81*cDy z&mlGWy4i%lNYuJXg5A8v{!ef{?43+y2aMh>M5nbd>Lf8$`c(ec4$6ayyolxDfmaCU zI+<77ci{U^+~uyJD&S^AEgrZ=?^Ettw^t0h*lR}vbGWvFTP5}AByo&|-zc$fm} zd@%=uqY9?O(Nw8UE{nnV6P{LJ+XN!u8{& z@1aVE(sGz;xKgb5u!;l&1n+*)g&)XvL8?`(7)u`Qca7QPOV^OCxQ1N27;guG7k;`K zZ7L6l1qR|z(-q_lEV#vrFhu3okfTX0)R&o*A#o3}0PA=cS*$pQrZSWI2(v^*W-Aib z4k)~Q;NKLJ#17s;Fh$ly!h@UBp_LGm5Fon3etw|Ssq{GRuU$P&bRwDrB+Gn-O>=4)rK_m^rrlpt48@iXST$gW;m&eO zt$J_!YwD!H(M@o_;+^ep2NJ>>tx&lJgkkNsA2Dh=vwPc7@&|V**kU*G{Mf3&{bhvw zFNC}*8ZZ-3k%3HuoXCm`eBXmggVn)tR>N=x)PnT(_Tx=%c^C5(T}bce9f8#)I8(ac!f z(`|511&+}qi~$K_K*AVs<>}dm4tLXn-dUhQT8wQmdkMAyVo9`&%8;vZ0*1Ytn-D|m zDiq&{_h#Wl>q;z>;ed4kvEb-92{x=~E7&U+)Ua_dZ&2I~^4%ao*n7_BDta?Gdxb8~ z6+@q$9%KjVQqQvU67T`!AEBT)sSe!KbslW>9E`sScgNU*YtT9DH!ItB=M4X1asp&P zg7^ce<)HsKItKR;jDkDPw=)Wm#1-$LXE0VdaUzMOK~E~Zi;%wDn)m^Z;cWR)jPwuXRS%9rtjGDXTy`I84rHuEx&YGp_T_SFqN--KgTI-fQs#H7S_ z9F0tOuCYeC)1QPKjuQG6mtf~j#3b_XgFq{RGiVArc@wcz+V}q;!0c`-(m^-eBj+F~ zZwxU5I?LCPxx3YIsc}(){5=v@FAN7zYWdAcwEnc6qQiiGyOYJ@5acgr=2zJN1!6E?Bezoz}=3i|?P$645-AR+XfEVx7$cvD#%8?jx zClFfgy#`WmCaI1FOc(hK@Q5?RF!?wPjCD?TUX}A2BrB5uB9e*G0Zha(Ht@jDRP^dx z26fn(@Rp`(<<(%~oS{<|UF6qAs)3p)D<_uk>>h!R z_(wYH76hfwN>UXR>rpRJ7$@Rpo{8>N<3X4{s%ta)uV)domG5bK1AJk>^LRLy##3|5 zBT+3}6hfz`PZ^DJykCi13_9;5z_3qW#8P_(v3Ld6 zAB#p=ByN4!kVp|0mX_+B~Ja1;@3asi+6pOFOJ+Lt{TZ)Pewh0Fc-wx zL&Oaff8kbI_fve4YY|Y6E}B`L&pzsXB}9li#v;rPYh*C99z{ek?m_nY zA)m2q^$9-#!o3*@|9}MRU~Yh!zGv_~2e%;`lwv1rR*I_n9pd2-Mjtgs$>S>&9|hFk z)E{=Fg~mTQK#4!Q8fi7cts38bJbhGb)&YFKfFD)LgD?EsD}@#ss8l1WFN6AXNHS{q z3rHT_33V#{bX30hOTu&TMX&j}>4)E-k#Vj4T9hKP<$0R^!bYOta=#wv^40+SM7Alu zQRMq8h?4jEeE)qpYf4xhyfKJU!rmss0c&{JQ5dKzx%1Zslu@Mnfz~AVuG6LUqjsLY zxB{k213p67UB7(&;>d)>fMSISy?Ix|`S0L@zI_&B1!1CIr!bdh4@pAJ z;B(VQfMNwbPzK+~7g!1>2fR%1bm@;FyH0;nM|sumI(~A(SvD358u_~ zjry6w|KN@M4iKF<|4mqboV{nKrxd*#4mSa7yMCPI*>y=jU^Jz2NuLk`xbw^puMUXj zhJ?2e8|Z9gt`{iDOS52U*iyUm%;_!rkU(UA=BiJa4!HA7-Rtdr_{__^5AX73ehRNG zVFng9vuf8xe><;5#!+$1eRfzPL2qXLu1ojqf%}j|UN9?zvwqn1V`d%lY}k3G>D3_` zR>-DT6Z(;_cUHr$le5kM9%AOhv(D{0nfcYOle51D8VB@dDa@{hTO-0nE_x1ePR<&# zg5cR%Va$GZ)_{A+ekpS&zCcZ8bwEi8tB;encXrex1RF{ZjoQ_8GU^;2G23-9u4-4q zFA3eK2EU*)8nGE?{lh*2HHpke81%;%;VrMhq!5sX64oiZv9myzIVaKu-J0PZKi{x}QM5X)@HCwi?SYLmd*|#34Ry z8Ff2ozkt&rPrfO(%H*2j&SxC$%|Zkc?2C7Nj3j)7OLKxZ9LF715Q#I3W^BVfrmaF2 zqJAWxmm{ui*jT}U;`JRX7y%*?b_?Y$TK0ImLEP2bg|Yl_sdkSE>Yyc$=UnIzN;mds zIMxE5*cv#{Nu55t9wy2lw%SyFx(ow@Tr{{4#(wL=A&Wgkz#Qs|cuK%0+~aBN9bSb> zvTD&Z3tkEe3SEnhdfaIXHQ_lh$kBwm@n|;L$wTKkt$_;}wWbC+yw-6hL{sNEqcw+n z%;C5y>h?57Lmo=O9h2#T?sm4{kUUa@NA3-@pnjfn(O3Z?!=#q*dWKDgJK%-tRttq@ zG4xz*52)Vhgkz%v-rlAyLK)E6&9nuGqG#c@P@_my?7ahEmEjXI(0JvyAFxQMbldSb z2BF*A3lPz~5r76jEz;aAZastJ?Jcy6_iB&_>>(xO^93Zj_QDog0_V&5;Rj*XFrOpv z*#nyv$KuGH#oG-zFF1u!;4f&I8l4{wWe%u&K9E9fW7zEWp-HLtG%%0S{;M1Q}tkNJrFDAS|5MQ`g+t6 zYrA$m0*-I74-4CUktjMY>9U>DdxJM17+$xYi}&YoO9Z++ZpUgH;*(fF;Z@Oe(E@S5 zLndgHw~7|9Pk)0;8?Jz+U(=En`$IZZNkmv{AUFvQPdfT-zGd`ZI@#w#<2gM}VyUQG z(gzBAAqwj31`C#Z)wo*R6pN6iHJ-tZV@T}w?NINvWNRMNNHz^#yMV22OtV;d9)&Z# zt;LN4_ZF;)h4@YmUW#YKm>}UT0Y|L2HtFnAZHI6RxdX9uq1)vJJtEw|*`@ z{BMeT>oL(msv*P=ys0VY&3L;=oOsZCtx`PX;vQ2bRyq*?EpmFir?HdN5MB=n*VOo( z^ibi^w3Pj&{V99X^~du~xQs1+P))Ft$=Ra`=i0tN8BH@DR}s1krDG@p*i?oxBn&v1 z1_rUMaOYS^nO+fH(vKm<_=6CV8#hrXnV^M5m4n0(0SqJDErk#ips#K1F!GxE! zYNZPxK-jfHuLkx~_aX&f)E&cs*<)B>-{*_^3T8)JC;MFZrUs<{Ioz$-btGcf5m}5= zYnbEs`CL%RtFBXm8n75X2-Xv7;R8y|#Bli37j;uU2yQZZO|TV#4E&w}?6Er0Uu^jptd{=FE)5ay_eXb|t+ zFQE~HS31kUF@|z5Oj}(@XyQ_a^aFcxb>tVsJ_3g)O=@IflGnn)DtWE(a;oE3u#hgJ zky*1=`~(we@cT}DjM%=eyFwVfl)r_%fx@k`M?jtL?n#Lu&K1MG1zU+o6I<@Cn}}uI zwwb6aJwloyli(%OO<@)xY?8D@IVkM|?JDt5&;H%YPw>_k#TbWCx||oHQyR7jLg5A4?!inlv2yQWD&d+K>(2u%pzz|BOvStZN?V+HhZbdg# zDiyrZI8>8xouh~!SfOg73z0F1(**khWr#k0I0B4>*z8 z2Y?~gQa#=_Nji+>04)Z<-*D9GTBHAd>fq5VRCKn)@JF9blg<_WsF9rk>gPe zy7h|((mYKXljF4Bv|XSz0cEH&RqjUS4LB~U!+~gHSKOS$dIA)c6~|QRfhX1;=&KYY z#8g?05^V=rKmc*;xt^7~DZma|OF%<|ZBX!{t;Q~v@1b>#x3ekyh&MF+h^hPxtxrsj zb4J?$Q~5dfN-XapOpafS_8v$v+8cogcz74f&#mYBTBMyO9Nu9q8T*+zyc+kd#fnF1 zf&=PK>#^p-+7D^AxOJ=+z*1LOv?wHbG{79nJg{zoIfUGghkpTVHue-J9>=O|2o=s2 zi`O!*G2urKGG=6F=JuYZ@KfHd;#O}rDvCFfd`#tyR5laSV;Mr_Z-YJ5CMpIE-mKi} z6}{n(y+&`R^1W11idHsu+a-jv-P}@bwB3LK`id49xp8;c%j8Vs#c_5AoF9iXLoRP3 zhF5P_#j^yS(q6P7D$d*P3syJQbZ4pQSfhzs&xVT=S(Apq9KqaKTe0BPf3ls*&r<0f zUZd@raxZJ7aL1)9wh>KCvx7uM6}{5ZPSP-$fh}OgU?e6;$67nz|l$cXQX0=q#&4{Y%FlO3-z7~ z=?A>vI2$8fZ1L!9y>MG*smO$`<@=GK<7}(xSn0skz8KguI+m{twRe{+ zI~s$x}KAIHyROaW7l@vfShF3nr6H7wEkGL5&Szs1TN2R#p=^8&E~%F zcK-g$J~8gS!Sz4_t|<{NAa#5Xt|;wOd6LjA7mlR~wwf#|+ZTWw`af*lrY9i>EX%?! zVDVvrayC#-@g=7g#~OMenj**60$*Aj;zU5EXn0bc7*cQboH1CjKBdDjCbf#q@VkgV zxYL+U7SMhq_g}CAYye<&DJiW#9N*4IlgeTo zL?ar&+cMFMHP_<`WQe`M>@*$|?*dD#dq%xQss%LG))?Ib5x>T`8H8ByCSY}I1kIzj zg&AU#O>A<48GVVCzJn+?aStV%w)yb6fZGYahHTIB+(crOn9C35Uoet&O5X? zVADJ45f}myAnSTk_iEAacpUx?t2TtFK6k?L4LOAp@#juYC{9{9X}J?b&nT1*PCD)c znnX@|9Jb-B0bi`Mr%~HlpJc)=QGWxjclbTJcp5d_L)XEhE3U9|4{ZmJaE+V0hr%7H zQ(7K5f;)!5Q9gwMhgA$NYOt&vsS)~?Dy`q5YpK$a#iDmuBeX45dcP&KrOM#9gy5YT znw)VqfEV7B*?B0S5wj6iU>-ZhF_O?}=s*$%!)3)D>g}W0A(T!2A-GAz{C_1-{xvi{ z37)UR$_+7)Y9yyFEFm04COfQ#5XW+(K#I&P`mLv_!(^ZlEs69LDcTH2Fm(Mid-m~;^90+CMY45Lmlc7ai=7`wuV6&D2vhcQGkMo1x! zMvaZVLKQ>fw;K)LIBR)0p%({a2p)-__-@{wWbQ`hNMNlz*7qV-^n{Skk}1R4i7;u( zpx1OU72u$dJ!`^Wy`a08I0J=jU?2JkgMc8UqAHTTo-H(XxKEJPnLaN_v+l5D~0 zt8~n9cDy`;Oh|^W6KP(+hyoqvhg)>GC-NI=OQW{FSnee)f!yRWy*;{f(d4j=D-PqbdJ=X&RroWZ? z>p>akkJu>K29fS!lFBw}fNB=8v{U+dHuXF;b9^@ci7-6-MV&Kn0%DrQ2d%w??A?@+$n0 zM4-}I)b{C;34H?h=Ne?%eko^b95=>k7Acyjq2p{@ngkJZ0gu==*sw1!>e6#Ew5`X| z@Gy8zZCY$YYEJbOsTT&?L_N?%GBqvc5RGUH$1S4}4V2%fX=>AwkWJm)O-pkx`}Q@R zMQ$kXmG;Z*L4;Ik77BI&Ph^7)K?mN_0q7G=b4}FBxinlY+QIoP$5?qS*SJls$IKeH zxpJbG-Z+Cce{>FZ1pd5(@J|@UE-I+^E4wIax(@RZ9#ri$m^AR|(&>iN^uf_-@%8EA zgu{7hQvV^e{z^|dT-rMyI}4#X-SwQ%4DKQBO2gHT8&8Of{B*#v9u3lz)7LMfsoYj% z5&_5MOC^_mEfcl*_Q>*-oYIBhmSKRC@_T>l%l((4?6@5w_hg18aq`m%EHNr_hfhHH z;~P>PZMu>{EuPM{UioX+gxsp_ZXZrphJCegvwsinPAXRQ_82;Q6%~JaFADzlA8QA;C_q5hH{)~ z#xBeRo+#EOro%X)vxVl1N8KRDcHCaB(!*JGL(RSux8XW|RkdU6+n~*%A|8$+&S6zL zj_#3SDw30GJ*RcyIBzY4vQ8|8ifge=^e1mp(ua3(gMeeSs(kJOSzr9u+7rZ&_lS4l zB(kU~RkNq^P?~6|OGP;-Tvs(PypPsHq~NgRX+0L}@zuuGBU*cSuzDdIFQinZV^3}> z!;913lCIenw`l(crA67?Nb1%c!B`rZmQ$CmspVGN&Qwx*e5OM`P%Z8K!Pls|1;POT zArFG7((Y%R0jglIk{BDN%LYj&KN(HhpFuIODuyrfNvbPP#*$MJrXlxf6fKTCo$1N= zCYCMF6l@DH(_-L(PnQlqOI&#}>*+|u66aVFt+h{Ou1gGtu)p>tW$6gA#4wO7w8A3G zAXw~!I+8Ik>Fr*m%@&bql50K8X%f?Y z0iBMty`&e-ulp-hg3M>!-K=7v#=_RnDnYff0{M~aOwR-d5>Gh)l*)WLL{9teQ=Vy{33M&Y)YLLgVJ2QZ>c>m9gWgONv9&TLpo?1zgNN4 zy>1Kgs-zZEiPaTT!jR(mBpn`JJiyj02*i^f9Cb1KKH|b4}|IMQIiExiKA_ zk@b8P#U&k*VsA$)dCcnQ0mcijhVg11!#a9s{V}?A@PHgL8wYLRBwe`@Uv%&(TV9M; zcoZe^JK^)Y&gaM1%qP?ojldUM=+AR3H{+HDC{r1VlbX{VkGXZi-PiFy6YmBa3A)V& zp1HQ(yat!PO=XRj{ehzT1qV80UX4H?d6XC?fUnjn#W5-qfI3J+lqtX#QJy>iQ-O^% zv~Y1W|77u}!}=$nc9J25beLrrI;;1gNdH@XIz5ABJU+cS9J5Zbw>_=ERrKf^t2k`r@Fx!Se^t`Ios(a2$tIILzcQpTlAf z%Q#%e;cFajmn6qZ3|)cvLL92&%9O%ui90(c7Wn;<^ByEVw37G%!|vQH1PX9n44bGwSKtRQ=C zQ26;l_60%qVs2N*S6PsKEw`)vb3M1K{bdukL!O;|+(G{J+zugL_K~^0Key{XSNac@ zOzacK?W(^u$bWv2|7vcJW=4LwI;A zw-4s_;voO^+#b*UYq?!bU*>iL_t!Tn`3>UsWNwe+_W9g?J-4stb~=iq&t`5{>2WQ$ zLu{LU8o6DipTYF9IVimDv{D{5{Wxw{`{45xeNkSHt*AtGLAnc6%iOtn+4BmNl#%Y@f_xjjJi?s{P)AoXt~C5%BD2ri5R91a+F zogl0Lya(T9_zp#+WiVV1|9E^8@EwWo5PZiW(ikK#6y|04LREv1wJ6_~J#RtYFvO)# z)}nbqcEac|AdbLWJNDS-E-n+4coCLrzhl8r6`qps3Vd!pfROeIdK#>hXE{fzb=u>ak2I)untm|d8CZ(i;nAvHMM z6_HB$wd3`7m}|#=hj1$XgYX;}8sT%77G^KR&?qP@uoZxEJeXHv$S*1}sQp6q&$i_) zDX;}}|G-d4$KQGWV|M;n6X5~Ks%>{t{BXDr8y-9#qH!lFiaq_F>yh5X=#A3A=_q9rj9^=Q%Ono zQjS+1#ae)&UXn9^X+UHa!Z5LDVKGr?!|n6#!;E1_104&9Dpu@?%Z_+1QhAz+SBB$NnwFy+H#9!w?7e}QlV+>|e? zzVJu!ONFIKF-Mq>cuMRWVM>|+p|0#1>j$Xk+=S4{Qc1#08rKbh7s z{5%Ycz_4^)66emIel*OrlMo6gtu~DQ1pl8VOoMQgm+}eKg$f!Symip= zq=WQnXN{l{!a5^FX9^KY?N5HnN7qT{L|^ttPMvj~>C66DI3+BF*Reke#M4%C@TWyS z|I<$=mI?iapz!#h&Ke!rJLy7oLWq`XuHZlyMt@4xsg61r5ssqa3p{jYc<9X1LHRi* zC={!^zAP_7Kf?wxQwkz<1|~WKBlJ};Kq3EctA8A*hn@8ij68he3hfIPC`M*S zE4tOa2RRIYUFFS?9alCw*j=!zbt8WgT?@279%Lsuwm^T9ehaie8DuAcxj_H-LOa^u z53)y4A@HF-OP?+Z@Qo}!QQuSjC|!z6kmdtQm&9wxjrPku_?O%V;!Ezh&&I-;xP2nGX8=;1JGuKb z4(|e_^k?Bqc`v}1(piYF0pDVL`{P@JFO{_nUn*-khYtgiy8~ZJrvhJ!kA5v&hwtCH z{U3l-&W-p|KL5m*!hMJ@mFob$x2ALZ<{0(6y^)JnQjvbvkM8vO~Vh>l|FEbE5(m$C;!yj%&L|7 z7Zm0)dtQ8EVxkhw|NHiLO8wsBa5IP7ICOKklfzmL_i^|Mhm9P{9JX<&{a#65&tZQK z4IB>UFpk6F942x&io;|M$8c!m(9EHg!we2HIn3p70f%b(Hf~?WVFib4Io!bECJr}q zxRb+L4(mB=$6+#u860MDsHR`c?PVOUnoIu#v-` zIQ*4E-49B+3>;cH%;IpGy--NUs&G2i$#;P_G=08Zm{d|AOv|Bb}+1ND_SI&^Kw|ihy&itQi@l?3~)vKocVxAqk!li(#WI0NC%~byZv5!Dc`Um zO4Wr($c8)uohd)R-IOi}8TisCJ$+yuqQ=@^^GM zp_KH=;E>Ge_!|kF=Hhp0Kz+!C{)S_=rZDtp@Vih*CA3wr3b&$+3-McuKlPJ`R!Ih8D$L zdj^MD9B$%JE&m_!3#b=T>)0?V76PhV1C{6SNAZbYu^=EGaT`V<^ZzsPZu}GR7NB3} zAoT^v_d!sl-GtXiUt9JrCv1 zg-uP7a7}fm&mW~nX%Gb~#HbI9ua@YK<1aX81mLPAXM7gNV;wzNt%IQ(u9Hc>*w1EO7 zAT`2^LZ|ZKLp(GZ6EvH2Y^nCTQhVhIc)twsJM!~7mM|CTP>(G_o6W=Tk}H&x(xT6w zNiVR*RA$0v$5GJUCe+gw@m>|k?+fGu&VxT;gg#Vql}AMOE8~w`i&5tyge6X50x(H9 zyAl;qdbZkLY8g7#Q`!INksq;JvrF=(u~Q-N0%^}j?ivo$CZ=Z>`+Wo4>3AvcHn3o) z7o|;1H6yst6JJwtaYoUCf}Ew`PI6|~^t_UV1%)_=Dk@CRv&}D}<-z~|;TSMrcI@~U z2fl{#*U|mTHY-}|KmAo;2ORv@F?>1p&gx%?@aO&lQ+QZt|07=GpZQbc8~#7v56a3P zc2rcZdStb84IT}D?D4fvJo(pkPd)w2`oBH<-19H|{lyLcc;HP= z&9~lu=iPt5_x=YTZr-x>qir8=-{JODRqx!jyQa2o&)$9e4;-v-ICS`vPmdfucKpQ2 z&p!X+RO9J0XTSXF>u=F1W#wu`@9`t>(Fq!1FS(S~*Etn1P> zJR-82sPEpRXRoN5+&OJJ)J|p2`ip}9tNZ`I0{=n~^&={$(!aPX z`2C&={$D@=L(2VsA%F6tQZRxKhqV8NhWxodHGW`y{#<Q7df2MZ4W=W5#?N-wmV6%({k;d8ZfICpudu!+wSVSqXKqut^RZ~!J@ z9hA)dRQnCW%|e=Bf&XnnsxT4zb2Ds$aLlLd=VT7sIMfNsx#E3Tc_m=Cox#(4ke%pG zL@LVXeyqI`gsF^QO)iT6bnrWHxfiFr{?C`;(}{14Ra8#GJvH}Jnej26`_4A0{`uiA zGTw3a&s$#Kv2k|w57*`3JO`iL_kSA|w)4;<`A=SqN?E`bQ%Ke@82VIak8|eW{{Q@C*S!mCOFvuqhPQ-a&~qSn*h@KkqBou&@=(M6;`68O zj~RFFg}Jnl6NE1dFIVrsFJfE%jWMG~XhJQ28Pay>ftgdPs!w3Sr+<9JCf&q$Ut891 z;A6%EYkmwru(M)VxOZ9gx*>0*_MlAreSTv4i~DwD&-(23*Jsy6n8SX)xA(S&JKy^7 z(si!Kv(^z~94!BcH6Qia_}E)BUwv_Mr<>oMI;U~&XJuVqyWDI%{@AAavVQl+f-#bt zoOa{WHFr5aoiOFg>_M*{AFChpamkNgJzHB!@8UVC@;?S%!` zKNQ{nOoldo)aN6=AC{5xw+HH8>D@HtvA7YsK1GG8{R;aMwv9IqHNK(ED7p6Hrw!|_ z`)L2&`A?5XdFkZxU-JGn?Tf|Fy<~WM+Ql8aUmvn=YWi8NP@YhI>8JWZcV@(iAO3iu z=UqK}P55m2oY6=A)^}E0|KulyBfb8;YRlr6AI!e|VUPQ=!TtZT`vBLy)iVt->u!~9 z?(&?jp!nnMd5a9*okO2{LB2PtuuIen@fU7z*1m)76dZv~`1Ad+?hX?q5#7YYsX8_6oz`(jg@c z5T)$-gb2r|9 zeAin)Z!5?higx(&r@3X9-fEcf-6q47=i2Ui=Jl?Z-wb)^mEltgp4xHkb^UkNymsi* zV~c8TZitw1;;(C8sCjnN-l;>P7d1zAS-q*%rjNPx*Y4J@-gqo$mj0W)&&UNwhmI@x z^1W5Bt-X8D=%nZEogbX~MAPJi#TN^o{l*yHduQ8|5&Gu27rHN~9x_3?Sogvc;iYda z^MzF=Onke*9-lD0;;vWp=VR&xXM8ZL{*&#OGH-Zz zv!<^lO*6VKz3;z-T`_0eX-of6R+rL{@J>_PpoRB5>r8&=Sl^`wN*3*2^5T}SmfogW z_a!48g7A-~Pi8MDTK?s8-yBc5_>ZD_i|_b9*n1PWs;ahccpc_RJq#+KCeE{>qT;~b zAP%Ugh@?2&2?z=bcmz~5OEgo;R5C3qD{>00luQfF49CcTWuENM_y|;VU-$O z=k~`h2l({JKKf(dqkDg7s?;6*}l=*zWU1z+$v-e$Wdb8L0=(6Z-KM%WK-?`Jq zp?fZV|GwMs-xFSX`r6KKn}kj}A)c%I?v)uo9l747)57_b{mb=dCEmYve0s>t{9iix zJTocsX4j78MK_k!s^guqGBUHEPRqDZZ$lrq-04S}blA}2&#%tDxV`gdeZwN}o=SCG zf*)&lyfpXs*P;)^?@!o#`t0H#e(hQmpKMwA`5Q;Pr<~lo6d%vRVe~HRKIhQeZR*{0 zQwQDnZsUeo>zeIZzhK)X3`J^El~mnM-NyL{uV zY=8NYsr#AnpFRJYPv3|BM+Q_}2)=S`v2#lOqjvs}%6BMR-ru=@+8-_rwaFP<>i6b&D_!Vp&Ft+WfZ_nGceRN^Kw@aIQxBBo{vrqj#b2!lM zh|{NsfBR_Q`5~@v9kx4vaM_3}opVO*Use#?DX8xg4oiG~{^F7MgTEZ>%$_*;VfT}h zW=37ynLvx-@r9H9-3-TyFFKa?&h$E`c6?{urMI@lUphar?QVybE8je6ihs4mPy2Sx zaQA7_zER`2Dc(hEdmnot^O^FvAHP3*sNIh#PCXVEHUHh|l}W#)5AvIKZs{vC7Gyou z{f+d4oj=X%ez0%rl4Y~EH2F0m-M9NM&nk&o_F(j(tRpA(7xhh_cxLpG zaXY&G^2j?ie#8O)bGugUcxzbsyHQu~o}WH%qI0`%efuTE}4;^m($z(lXD&NBECy| z*5PvV*Y!!1mb9?L71|#2AO~0M=BXtNj$Xpx0P z4sOxH!Tle?!6QXDddv`xwH64++IhmU&O5@X&KBW>kG?s1ekYtg&BEF1x^VV(G`M*C z8eHmjFu2qUH@MatV{ol+GPwHY8r*#I4Q_rr4Q~F&4Q>toFt|5xwR3OS!p^-BjU)6R zsti{yV=E!@7a2ro-#Q}aRlItB@s^`F{6_CU_;+qbE9`_LXo($2tLxI8=4m{BiYQ+> zkCbyiA!3=!>6Z|3%xPStBbhnR4@EX}GDSLanA7t(I&zuQUJN?&n7c|URx{VeN4VuP zr@aew6foE3pWubeY0m>4Ma(tx1TSVz`#k6cZTQxskd4UWhMq{k@9-=CoIYj@HcOd|QY>=1)j( z5zJhF?u&K^El?d%uUSwnCCF}XP(Er0rPz34Vf1*Z^XQq z`4h}bnKx!`W*)%Y!n_G{(NNY;Q|3nI&6o!;Z_YfBc?;&D%v&;#V%~~*9P>8JP0ZUe z&tcwoj%=4LdU|z($6Y~=0U6`L{9>lzic{k=2%)2vpX(Y?vgSjvBVCJov_hcT- zychEb=DnH6GFO-Ixf}BU<{r!gnH!mhGWTX4#oUK^ z9P@h2P0an7=P+-?d^Pj-%nO+JW?sx(VP48SoVl6#ROS`T4LqT836SM=VD8J@nR#pG zuFQj(yD^Vo?!i2kxj*w{=DnHcFrUgik2xOvYDYeE2j+#$otYOicV%A6+>N=Jxd(F# zbARTdi7an#=0@fQp5O&AcVHgK+?jbOb64h3%-xvBG526@V(!m8hk0-2tC<^k!dSrE zm3a|!H|8bGJ(!yV;`uWT&^z)g!G~@EOm*IVx zyE1Rh+>Lp#9zIZpkI=(2kJZC7Pu9Z+$?(~Fc;>l!c;>71@WC>Cf$q<|NcZm}{Y!Lz z=BIW4Q0ZT$`!lc5c|Ymz(p;7wH>ld-tMf?7TkAYZ@?ho;b0v>p?$11yx!iRquuo;O z7LfK@(V?$DMDR{Hx#~gptkR$MfzUy_K_sR#(&`9)X%stDBxY!s%;Dv(OaWgRj`riw zLF*rM&^iYl=^Sr5a}#s?>XqhCyR7J-eJFI$J}Wvhc`YPUOlLk*L)wW&2koY!gZ2W^ zk%AJ@LGR$vk!dZDOea;_RY~swNIr@4Ig`tmjD19O#G}UONaFmYaX#Wv6Ld`F^Jo_q z9m#w??IWUNDuUk={(LN}u3EHznHAVK&0c{=zoer{#4x+En?|aBTI$%X>r1aKS!#ZG{YbePj=J+t7!W9=_nC#{l1IrssmQtbbqohDsMEOPj=P;Ob<`?MqE#y?2huIhbQ}^ z`qRr#cGv;6smGs*S|)$JKFBURAh+DDkvhc}V~vmOl=7#?NA^nP>&N+{wLjXgTG?Jy zUwyUrS11VCGo`23FYRrl_S4fR`|g0;>Gzpr=fw5;BYUUx^zxD2cR*+p@A<)3kOPX02yA z9t{#BIDI*ugj@TI+^=1kKiYRosl{@5IiB>jwu>AeD$keWL#2PZR({*@f!Zs|#!jgI zM#814ean7bIgZ)(gK%w(P1SNF?X0r@OFQdpt#4bqu{}?(xBeJw>5%o(-)evIdL3cy zPqLoEtofGp6lNW_WIa`mT(9 zaO-#{)33}g?GY#I4Z~mC@huZqT*`;uvN9h-t@)JoKgeo_GMy-^zf5N+w=Rv_WM?&} zLnDB0p)#EqtNqAyD*Ka6r=PX_w((YN*EDS;sG9%b=t=UB@ea1(R1c%9<)QMEi*6Y* zUt_HP^89e?IHK3HeqE*WsU@nOua6=$uOi>R)^^qFPakh(dV{Uyl;LBnb|k}xS=&{H zkF?1T#XnZtPeUb^;q~68_qQQ7{uDmSW*nl@hr=f1A>$9TDL?r?Y3&!%zi+kkqpJCj zu-b>rk3MSZ`5S4yZcG0N8#|!%Xb+M+q<@sP{FU>2d8H@2phJI7tdIBlbNTi%8j;^c z-iOH9HQvebm7KjrA^V?ZhhpYunU^yEh`E`0K64B6<;+D_8UJ_8jm%Fn4`6l4Er0If66?L&$naVn*H^Dlg<7P>>tej`n)EB`C|6Z z;qZ>kW7%IHpY?S@eH}5G{nv8%Z00XB&t
        1yU5vws2e_x1GrWqI{^ViEfn3G?rnpJslTc^UH^%qy7h zW$w~l*2f9vzRcG%Z_WHe=E2N=WFEo%73Q(b&CHXTf5tqU`M1n-nO|hSn)x~A1{V&@%Qw z&e@TU^o%tN`qDa>=(KbN__ z4%&nHYWAPUyomGb!MuR|_4^Bb9k-Jnp8d0!`*MByGB08Ot;|m|AIrR$)2qe2jQxi* z&*S)7Gp}I(Da@nTzcF){VA($L%=LW;wVC^}KR)WUcmm(GOytCeV7-q|0w-@KN){r<|XVun)zww ztC^QEe~Ebo^LLoL^pf!v=-f~8ddz*f94hJKc2b%9RDZgF1=;_?qeRn{B!2P%r`QRV*WbwKu)g#^JMmagLyXde=slO z@Liebvi}_BQT{SNq0Cpa|BK9x>>td$fc-%VWGB0BP`OJek{f5j-*#9Z!roL$e zR0rfYD{qCgRFDWk_wD;4c zzg)LT#8YmaQ$8p@TVDA)A@VsI{k8R{Uumey_3NPi95{{F)8snPWPa+PKi`SR(_wkY z=Qjz~{7SB`PU_(&TH9Ir)BE(|3m{nM`j*z$>Jv#Pwhf7SEpR}XB%%inq-7nxpVdrLmq+J2HJTkS^j zH2gM!ZG7q0_LKhlYNH;%$)-N3Jo2|{D$_Sv?N**oPsMHHpHa9BS^>rWJg?I9p>suddU2HLcH8_D8ussIQ`vKdn3IsS&4fkp9X;u4~!)%l=mB zPdv?Pe{%hg{>nqH3+uZLr}S;ZQ~Z_Vfn3L{%#U1W)^{<`?+B8; zCs^xCa(#D$!1Ye91nDD&&h;LzbG=8(b!vTAgxnV)f2Bi~Pp^-q0wg}$y#zbvEs(>1lSM(BAyaXkioeMC<|=eGV-qk4|WpK7#{(>gl+s*0YT z-hb$-Mtc~fk6hn){CX(eGg0p~g_rKO*H5{gr>9QkC41F#OPtnw=~q?s{7@b!guHUg zb=u1MmcJ-u%W2(T-<3l9H>mWL_9OQJ=(|_Ix$&zC&enjgvO7i8oid8T!IkUYzJ z{gmtdmFdf~Ec{r)@)t+LV3}8yS5)(Nv3mcdPmI~cVD*uT{5Vscs;-u5yfQjb`{$N* zpZ@wB=S!ZJZNs~*n%=s{rUJz#!YJ>4$F1R8C<#BPf7;vnQ_xVG@KE(~>#k<&`PLVJ zbT19#D*ZKH8U9J_I9biV)XLv@osO@pT!uerOeeNZC5&6OGm9{P=E|ov92zvAFv{i4g@nb^A1x-# z$$M%Ep=HWv%LwzIb9aBRuYPl&aV=d?*H&LLd*K-*9ime-g<-3wEwj= zg!xXvZxa?CJoS#o4ZHFQ11>Ipm$2CG$+a3zZnBOr&+~T;a|}i6iJOBLY#=o59I}xx z^f{mR2#Z&r*Rb^7x&n=Fp0bIsbW`AFLgYh3@y4$AiRUeS_5(uW$*>Oz^QSv(Av7f( z(y(yL;;qDsTQ}F*-_mlShIz++(lD@n_($X)8njl!xZlJ!;>PF+8k&+kcQ^P>t?=%d((rPEU=cVOnn7`$;hNz!i8eGmtJ0~Vc-jwH4Oc=*KTq*xxB2Q@$wZ7^PlPSIl1SYd|5+r?TUu^dwcI8_rg(m z8s@)$S;OMu;4ifBufC*VU7P8s<$|qM`BX#~KC>I4^mw!#)ac{-C{vrQ2gPw7fG{!<_W>8Wsi|(J=3m z8ycb=_tSX+%lb+zP0=vtcAkcDh9V8kmLD_>Jm`2pE620#H7sp7Qu6%S5?%8(6tRai z41DRDhM^n1wDBo$W3Yx%FFmDUUhF~*L)|xPXj=cBAxbamw5nmLN0Np) ztzVSbZmWjox+kRn)d~$w?>72UtB<~YH4Hs6NyGe*B^n04wpBypnd2Hp*;i;7P_JPL zotNhss$uc1i5i;sE!I%XeqTe_pN9GQ*EKYG)zik~!nB?m=4>0Up}06t!>GG!H8kBn zsA1vNG7T-CyC0_Xa-Qv|VO*OL8s_iG)X=27s-dNzNW*|fXEh8|9!gAU{572)IxkE^ zQ_e&UEve6ISlI79$(_H_u=tZ-H4Gf#afHr``o4q2LBll+eb1y}VEN0ExB6JaoIxiv zG{)Z2P>ihi4V@p?tB1teu^Ni+vNbf#eO<%+db>0%ee0}-7SH<{24*(+md-~%(lF}b zI1S@k&ehP|XN|=<=L6~fLxb<={LJ1I^j_%aZ z(&dbX`3ovEj552IYU$5xpkbgxI}MF5^wKcYaj=Hw>@gZzeoEHRG$32UoczTa8n3^q zVPNl#8d{#-p<&ea0~*F%J}&(|e$ueG-E|F(;fAA>erU8&;+V!72F7;OPz(>zurwr6 z!~EvsG&I>y(J<<4wuYgb7Hb%1dR4>xP8&4D_@H6&tNS!8RE}zB`Qb+mO|$;cFwW(n zhWRhMAEW$4dHZW~P#gS=&zY6>MYiRD-T0>*o?iyB^pCRAc zu{>Z~)7@^{LQd4`*fpijM$H?Z%Yr*nn!;p}!By&YVX1IPa;uK${& zVmIi-hqDHM7*ad1hee%PTiJixY2fz>7ejn@4{dzJ;G}%<^X(Dl8^4D9cEkS(_c^Z0 zmv+Ub7uFSrjQA2OGaZde?euT{5aa49TX)!-+AR7pByC;yk9UqXD#L$u8eB5OTlsMJ zyKkQjucx&A_0IP9CN)z2d~he&yzO>KuElgH=9e2GA0O`J>YCx8y!Ve*SC^X_DOYz+ zpY`^Sjg>ejxA(qi<)X-Z)KkViHN|+lhm$fSVQ#ti6fdP$Le96_6j$Ykm34nP8t9{R z&R=dcPHm)wUEk%tW^p~G-R_M~8C?DfNu7M?n~+c1DDB@ru)DooJEdRJg=b3Z-wk=Y zhyCcd(7H-ogE=dg9Q9D1-88pV(uehxO%vWZI5gK&Sy1W}cfhm0QsL6MPe<2IO0SFU zemdw=UwLDA=EL_sYpFc9(7a>$&4(eY+=qVoW~XL~x5w;=vdp_7>9L1gb{}~{Sv@%V z?k{zlC`Fw+j|r{cN$D1x*kZ|d4V0YUmozGv&{(;Arp?`=IZZ)VFYgFo{bonSqg#SsUb8mJ{HL;mbEkDtwr+esa>}^gO2e7U&QED-3HjZ>SLg2s z_fhIE&7517SXc3#^kVt7EDoWPM=bM(y?4|s)=!fV-wL_Gxv5U+{)^}3gb}K%%c42Sj=(v5?3pWHPuiqZm zH11T0(!=uR^L0LQQ7(R+x1(9EqTGJYxI%?UNTO)fa5eG(9OxxRCxfy@GPSdZOD{DuFoD8_wO&N9lh4^{V zy_7m@PW<&?RIqaP`_>COdIT!wAN#E=^)3(DIOf;d6OAnt&+og`$?n)v`L4p~e@6K| zjDXN4hH|1y6{yfk8^+|K9Nks9 zZHV78`)F_F((V1DjO#p<`z~t|Os(;QjFXS$1n=slY<#B8*Im1}SL}ZM^Y!3`O_VDx z0dIYEq_+}v{PTW}B3QXNC9dVZm)j{nw)>>zk8|28U!^Z789viju_V{|Ib?Wa<)>A% zXB>{Z6Ebi_&hcj_JqVfh;}pN!U3(}~pHpraKkcM=8xwb>=k`?mU)oxlJo0|X^fpTy zq`cG(`ABP;wDM8NYeIE;Hnywc`;M30Rp0L1kGd;^<6IVQbXJunF)J^B5!6R<4-Bhq z`6yVK*x=N{zTv9UJ*($at`mDHKltq&wWx29vhmC|!-~V*m4>gh?X@7dl@hq3e(hY( zKFXu$w58`mIxF>mY5Ky+KVYX9rY>ADrj4?sRmTZK&(%^6baQH3zM(uMs89P`OWy&? zQv+|m_vRmMmCJF1pPM(NrP6VB_@%zJ+9-zM-Yx;HB9(F9ovPO{d$1BQ>Fn(BQT>!A z-wgfr#HL7P_sO{lA z2ORn}P*z5KyD#ZrsPav>Ghseoc2$P;p8Dj9^%)`I=fmt>?*%9~&-~&qc+|9G5QZh`8?^eaGlzp$A_Ux+@&6i)}}`&mbD(`TayMTS=S=AKJ@IQe0g;J zq6T{+m86*kkH!_YP#O*PciZ=EnDWk>p>7kt2~$i_ElWmq@2|u)ZQbqkvptk!J3|9^ z*Ke*||8w@!e=ZAEei?nf=d>?}Dx+_Dy!VPjq~fu0!?&lN8?5XaeXy44RG{Mad6`%0 zSN)W_&3<~tuYHKpcfkbjCSNvHp2<2pRC%M7((b*mly@o~gmiW(w|j3;6Xo2D`E!SS zI81T7I`L6p$}pwVA7}PBT?|uJG%R1xF1dp;cm28Ca_3&k?To!0g6@VYfA@SQz}y)B z{!%T@4-E5)|5Cl~88$b3_b)Y|Vt(U_`G2W9&gFk#ocNbIz36^Rr*>^+WlN9 zXm)+YLp7*T@*RQ<<2Sl7PQLp7^7efbWLhw7m(`i)P& z{y=qq)Bc4~ryrpgYP#3^xq zuD+*^xfnR&%7S}pnt$m=L)txc@QcaLVWaP<{_Df=|DKxP@ZjRM?eD2}L+0E)UJvg1 z2K>LLp6O~_wd~qmwL|jM`tN*ySG9Zh^01@(?y6^vPyX6`%UxAIAG@pmRlKZLr|0jg z8AToymrZxoEwg@d|7`qS)%?q;2Za%L)hq4aoOdYbuG;2O>7e@och!@52Cw0@?yC2E z*DT+Aw?b8Wl+TI2QlTFA4P0tCUZM8-3lpg?D%8z4rqyM9~!vPbyTI~Q-O@g?(HZajKht)IHL zUh3Z4YKZ?jw?QA>R`W7O9_{n)ZMC>Gx7Waz;h#LeUh@3gs{dp|_jjh>R_`_)G38qP zZFSw?-p>z@zO5!UaUWCC_qO^&@ry;Hy53ep6AZKNHM^~@+|oJuW1riqQ@z-@`Hr{M z2OZi3CEmWJUaebNH16sxb;G4=`zN2irH(ef(k}O_Tk6aA1|HhI>y}zP)-cuagInt6 zBhAX=-@2v#9y>qq*s@z{oMWtERL(86*VL#!zfZrVUW%T(>$QouRQWvgmilPHXIEMb zyruqD=VpX!&s*vc$AVFRwY{a*t+@27bE8}8!2_Nbo7Mr3GT{F$_07l&3CnNXRC_vh zde8jxO*OLp`90yMZ>s5ATP!X*d{e!+eB%Y>^PB3pFQW5LZM~@uHq88R-rAe$R=sYjiyNP8F{#5%b;OvbTJ8wAsrqOK&QGd+bb&L%dl#yP`!`hQtsD9- z|Ko;gx9Rfk_Lpv`hh}Vk`OL{1>ect3Ucc(A8|ss7nw^RJ{D$h&`y=n(AKg%!mdpum zzTt*Cah`AchOggHm$^(j-gGJagXen(&%dGm(f_NX<7V7YBb%<_~5MxS?*_J$l^bUN_Wxji&rQzS9l$3uC*%KQ+6de)8Kdv9o+{ zsCh>N4Suz5sBv}&+Ljn@sL7q){e0o=>*|oBkKP^j+jaG|oFT<6%-7ZYs163Vlfd@# zhu;6{y6V|u?6SLiuB)N#3?Djdzpgs&y|JRz zE5LyN*Hu&Fz0Ww@DOV$l=JmOGtz6A2QhWb-sa*YhaL38FPM52V=CA$G>AP}ujJe^1 zrU%Q_Hop%D9sF6j`qQrKljnX^uD<%TfBeq(%GI|$&3=Zr%hfe&U&)s_+O`33sJUpvax z9HU{+eP?jXBctV!MP1wC<9ol~vZ#4$u1vjk&7yk7EqK!DvPFG$!198Ca~8GN+ucU> zJ7!UPpWQut`d1cpdf1Xh?-g6rlDAr1yYi_;-EpmBMyIV7^*v*<+x(3dHSW2s8_&IE zQDa;UJ=Ikf^-|J|kKcdEqAoP{b!oB?{MVv3?>uc$58WB))-uzg-uIflX2%qZ`e0M} zv7reTby8SvaF;3di*VrN@e}Z?^aAe?r28_)d;mn zJZgyQo$4?P8s(F|ZxJ9`aSOHLI0nNv0q(StOS?@nv~_O%dmI7Ux;@qYY%RoOgruD- z@|!udXY^m4Pb`uO?`)@Qts|{Rw#`JuO5fCxeWB_XcLGo+@_RQ_dzG!H z$2Y*HHT1ToT`$z~WW}^YCIHqUzjP7+oCIDq4T`JMeyaP{6p!rnwsxrJhpddO&$fJz z_xBdq89;r7_61U@GoefCqrB>tEb~p@w30opBNCD0OVu$ad0Lv*S>1F6;dIN8<)smY z*6J(26!y>RO@0@KMlI^$R318C9`r_IBziIZ9y+~sJpsMBKi;~0lHUlXJqT0>R0^^( zIfhmCOd5w0v{sRQ`0?;^?4hsw(4GrflT_a{lG5Ad@{3OT2rv+-WZ^AS*&|13X_CEA z*g+7g)sf(H;8V3HRgT;g*S`}_P3On*I3iOXfn3YcVifXDyUl7&RX>Zq<5oGE==Mw2 zO5<^W)@IbVA8(0a2t$@S8`lka_2`H?pTuK~tPN_h@im5_bY&RZJ|(YEv~NZB9Qi#Z z>h*eUQ~#~(je5$`PN}X_kUm`ts4rKJUXQ0#8DF?oPgFkY4|FY~t59{z4?qpj7@4Gv z?zY!heSaFoLwmU76;}U7mu=~!{Zq*(Qgs-s0^Emqoz%*o&7r*f=&Jk5VqI()yWN+*3cX2!|-=Pl~IT07!;!B z{v)qNWHZ&rqMGMfdVQYW-(GtSg!Ys4fy|w3C*}?(hWD1(&jY#-qzHaR)%{tV&zE(sV+H5ah4X1-EG?|L zT>5$ctI@VP@;Bq#AQSCG4&?6+3SVsFUuvV%ITVL}&^-g`p%9AK_OMMi8|MTh+KGy4 zVQjXFulT2ZUGxSp~_5G&zXxl;Z`gY{;|U zq8yOsr9$+COva1U&q0nru0kS?V__ST`XkPVjJPPoO^E*`AqJN=ke48DKsG?mgc?MXFoU?% z-yrM;B3{Vuu{aNM4)QDHE~LRYgXjo3H6CAR`-edULB>F)K;}SRguDUy9C8#gCdVMk zA<^><;&({L1qPu&hC}v4PC?9&n-K5k48j-E2+|U=9I_U&1+ouv5po6c8{{S=VUh)s~%Zy^pyKgb}+@(l*zxX~br3XvbkhK~$lGsLjXAeQ zQ{fWyJG52cc8tmE4B`Z~d`5kY`fw0pKf(_MMvv?_GGy`LX{Qb(hA(rtS=_nydHQgP z=+|p}{P@n}6UHZ`q>qnJOY9yrer8ho%+&Z9=@XNaGCGGc2T^h71gkiKIM4 zeSrGOO3Kb^o~CV|joOXhf``BE)*N3&&x2dOlkSGMHcAb`Vh4^-L1)D`*Irk{SdMR3 z!7VUw;-ty=R{U`TzRm<59v#y>HarZf2&m>pj;*V@jiqpbRon(?=LT1EBgfEcZsBl? zfLj1iraOq7L~_+|S{kM*Zc$nq*>FRZ@VQa3bZ&0daIq9Hud16C-|8xEFQ@WXeCD*&th7X=OV{{Rd#n4LIdc*d zP}QPrik+x2Zd>ATdKH?)zYr8b&HYV#8jJeV`MlNxaBlKI@vOhxi8t_JBm7Q z4g(OvW2v2NZ)pQv$SnwNG~Zym=t^!8aI>+MAaV;v;r<|dlIM0OH<6Dq9aHU&GD+>) z5CvzAqgrUp*6d3&xeRS1xCL$zdttu`{B3TNgZKb1@51P{x)pg2;sN5A5Zy0KYms~h z5vtW+X2!(kft@MY&~*+X0%K9f49!g|2f6>V!+;;+t{3fP@{BVE8r&R&TacS@%WyIo z>Ntry8SZ9RlS`a)loK8v+X=snx`9m`?Sx~Zvk3IWwEz-``bG6t;RcM$4hw|vL#Kbj z;A-b-G6v2q!WaO9lxa8%w=Pa`T3lI9BHs(w0Z2B-Rh7eds)e!5r(X+aQ9mn4)DP+^ z>Kg)uJ1UyeGQkcFon1udL|4%!#zmGZ+R5bC)Ik^x-uCt;J1tKpUtFgGoJ7D)qkY}W zp7t&=_3Q=e{VPZX<>#D}ICsL!zU~2EdzZxe_T(Pu2b)Qi<+cs0mAkL3Z6uGcW7r9& zE_Q*syN<3@x5!tWAQw?5+TB#qUhbuS>j@#&Hb$Ss7)5vrQf9+Bz9>B&gR4QfCLkUB zIJ`@SgULP)X;n{?xhu6vQ4yCp)^o_LtRHvH)xlrYbX*<6sKlaPSob6zty!mLA?YyiaK8I)O^~Z)g$?3*$X#} zC5}P3{xtLu4HJ!`VN4y-FsQa@nBi-wXZ9)eDmI#|`PcKJqh1c3>u8fV*IEXFkCa&J zXj3Q5Ss;f{M}!7vd*R&IKFTf-?PahRcJ+jl!31~Yw2J6)S4XXk&L|_w(*$HTcuRqf2R%fbo6rSTu0mT;0M8d5^D(`r0;|DeKL$DS6@5fd(0O!(=F^-9$1vzIHr(1bW!zB)tvo1Gj5EsPBz(FUO@8=M zbmK7(i<{Y{)TvnZSH16deiGv;11sj2# zRJV~D@@w#O6Mk8BgkNH9;TKa&_yu_gzZG@O(%lvAE^v2-yOZ$iQrGONg-4h~cevr) z=(-kfvuCMZ#&7zg-wc#}r4O(vgpg{K(6z3MsG9|WUDrjNb-Q?)^|sG|e;J2;1oUf# z_zf*YEeozurOw5UCWkotK)WciPa`5U_`3=JARpnst)9il>{V(ku5GFn=MiOS;VxPP zH4`oH1LIX%9=1)#V|`M$T^(!4qqQBh{!|BjvbLzRqOyPc)xmWj8ly%Crkg)W`9!o50g>>(N-s4E(0`H04e-lB1kmuP%U8)u?8 z4wH6G4{{b}+z0##iOQ7MNn8Hd*yZacd=nutuEICSMfi5{v3Qv=mTUF|TY{e-{QTet zn=;G3g)t(=4Sma1_(b!#p8&gPN7n{#*ti#L+*34MQ8^BhZG=*JwRXa_7x~o2;p*ey zzv5ml!Yc;fVGXJyyjI9DQua5@GiZ*{#a%Q>TqBwUy(yX)RtoD#iaMn{U@9m6uP$2? zHbWsjA0QcQK?L)U<+W9Hx1)8E;~f7rdI z{?sK7;}^t@?uBva{i&v~83;EQ(xO_}zb&U$C;#W#x}hP>H=Ym;@xcf=+Se(rW%7ul zd3cTON%s#lhxO&D$sYTK*Z~<=Exhe@+)umqxa0chhU=rNs2lBR zF58K^gw3@*gtQ>p)z7b94x^JW;@-oEerb&MFnJnrKZLkQ*YncoOT-6RXB$_vU6jm= zcJF4g)|-CaYV0U#8D11lW`|O{V%et=wP@1XO|-uGya>ozgfV`xFvjExm!KC!=~qIu zJB;hYYd_k2bYK zA40#;=NgQjxG!7T4TpzSe5v46dgb zvwdJ2+BMZS4%<0qwft#)=Q8fkvbb;S{(3p_Jl46d5S~7m2ctdp`1SU@Zx*8KMIrvE zUe|g&UUtGO!#&E=8_#^X9!xg%fSM7-Khs~--QUe6{>HtgwjX*)h|v(2;(tA!zkqPB zK$=zy`&fJEWzp+IM|SsP)cf<vFN72qfSd5j8VAG9Y(~Y`l=M>9( zSAC9%=Lj})#J{y4t&VD=f8ZV!`SMx8^V7>nJFCk5k;UMPIc!XA^e^Du33 z-}UxyjPV%rRpW}val$%Zwe5R)ow?cx*Dj8MkJ(xEF-Z?!-Mu>3+sHO8N@E4aJi-jS z8n1H(^leW_pKA7BQ@@&zaIZm3|3=ua5biGI#eXAg7{U#QjH(`%%dU?zhw2!_1<3kp z;i`b62{g*L0HtuA1_vRONW2&k4P&xUvwcmOtyaj;>KtM$2{ZYSS# z!L_2RLG-G2o@VPo^@QKHs_9*zi!l?+n7+7et~F@pajv_5G$(2B-^9N4)v||1w8k~Slkd} zY{Tfvd;IF-pW8wf@+*&{u1_ROwWw~uop(cfYW;;rg=s-a!~ zc6`*vxN7(H+FY>m*{G)$AMR@~Kdc*sXMoY3*7^SMXspXa7FN$gZ97rBiyPXG=NG$6IZ28JJjk%&#fB3=IX=dyt~%k=4Np*JC!;V>(90EIJa`{T~nSp9Xa00 zdjP$j+B{_ty&<1e&!df9)!2624v*b`+MZ{ld-7PMr@Jc-`w}4ERZkb!Lv7Ao-%Zp{ z^hBABqJDb!ytyNsP=CmonAfB3tv6Rn05B;VM*T=S+#+*k;w|=}q{9HY4t?sJb zOVm`K)y6mZeCSb)&xg$DGlqJuqMo6jsAH*Rb}!afCUKAHf_2{$;liARaV!~oI~MaZ zM;mS(7pVTIzf-+nj#fXYmZ%?HSuc;ZL$y3wukH0+P_M3t1BmB{^}BeRw0R%geBtH` zH`rk18qeR{e@L6CHm|IH9jM0rYPhcIW7Hdq4B~r8z&@L66RxW=ooa1DbAY}!Hm}W@ z>tN1Y8*^sNw=uRh%HTP(eoxV=mS~mLM6^n5EL!2YPb)k}Z)ov1`79HXdn%-!VIq1KaC`ufO!(bx4psM%<>w$S}-)6?frI%$~Az-x1?Etv_M>uY4Mcl|{NP1=?Phv^?Q{1NS{Xs3%$%v)XbsyDAyw+>L#4QGq3fYSGBh<$P1NFNV4v#%M^J|Gdkc|5z ztQq1-ugNLSF_7kORJLNhY+mSt?)Jhx*-<#}V;f9hw6n2Es{aMxWb>FaHNZV-1N5y1 zU3{$1QL3z`cd5LddVPvLV32djH=#A(dYkHHw`p5_T)lwsM&#GHU!wI~z0K{gUXF&c zw#DDw8&F!<+hBVwLKO(vJ4wT{i;)rwus5apK`zB`FRQ_i_8iio@B8zG9>{TN9{fk& z8)+NvC7W=**u$b9bD)C0~WHi1a<8R9t$~cPA#JGs@4aO~uyBUu#US@p2=)?6J!8nRBkujZdF5@!B ze8xh?eT=6VFEid_bXq0L;mg>ZF_2MV9LyNYIEm53n8Ub|aWms?#%~$VGXBbVo6(Ki z!=JG|V|T{BjKdfc8J}fb%~-&=i}5hy4~)7+C$b$zGt&Mp+rwE5Ov)ONlA1IsDI+sw zTKb^$Nz+EA%t;z8i~>$0W+Y|I9u=QD0}j~7_ITLQVpRX=k%NX0>zJ6DD$4B;Vq{i) zM%D}y#WY$pa2%PG6+SIJb6RRrcvePgM0|Q;YEp)XBXPQ8bY!y&UGZY1gpGY^R^sEg0*sX&jd`6|f6yj5#OrMpK zo`~Jfj!&j1YZj2$KYL=5iGrhovS1KmJ=O5vdLM9%Ny7fxht?Al*wdT#sHKN z`HIR&%0%-BleD6!qzui((kCLvGmzyB(UnSC)jkF#iXJwuG0CW~L^ujJdxRZ1J2NXO zEhZ%`DJ+v(lQi+7{h;)etd#gv^p%m3DVbRjI7Q?mNBxo#W=x)(l%eNj6~!#=Nwzm? z+n(x1RtnlHkxFjUu7eDy!laDw3?wiyK6NB|1?ou%vwdVz{LG}s+?yHtC8Z{1J?62< zCXdxl3QJEMX-Y}g>O`D!j7iN5OHG|N5$jN-Bg0ys7iWkebCP{eY(Pd*k~nA}h)hYy zh(}X@LnWk!2un;v^<)3%75m6(@#v45fB$ThxIfA?kxXk^iq;B?9irn?GLx)UfU7=P zQq;tlX;jBln`o037;96$rNMX8IvYqFvT^Q z(%`Xg_>3&mjI40X8td7(!Yq=oCx%>POfkp}YHPJaG%A21!`lOz&e6bcxnThMlGek; zDor1dl98#EMXQgEn%gjRPC9?I*aDkG`=ummJsTs5I6}1)MtS*!QXbxaDCy_4x{IQ5 zSM+ewhoP8h*0D*9q|i1aoy;&Sb8;@}3Q*1!&Ma-po!We2_C zNjif#n+lx7Sst%siDqc+NWt;mv!+KU$yOaKZNXYueJQT~>50Q9(V4c_z5m7;OAN2(|F0;u4;^~A9O33x`BpIA&3X9$E?;_l|EHkg zUmSMT4tqZS^ZA#*gi_OAnRdY69{>5g{qIO2K4D^F(xl1BDO0AVrln6aO~=J}#>`pS zv**av!}^Bz>px&%#Gt`LB8Lte9yKC*WXzMJM#qlP-uwCAk;DI)G-|R4=_d2@w`LLW zKa=18aH{_wHxmPX<;b>%*4yJ9$py0)G-1b$KT74@4HDJ+Gr0HYy=LaYB-Qdmoyal^7Z- zY6@3948>VfxGdZhO-R9g3F7(xkD6=1XvF(F5PCl}4p;>7fnE%3;weM`blZ0v>0QT# zkYKnIz6gngo(DV#84tY_xC+nu6QSF_!$|Ko+I!=CxD&R7tcFe)3@Lyf3j7kX8@d@7 zjt{$*K#v06f?S4f`>vtwdx-QtVxvZQ|2Wl7!vWCyL5~8Cg~UKN0hdAIpyvSxJb`>d zj{wF(QlS%0hh#y|0$Lhl%?3K5T>$b1-2oU3ISrlA4FlQ%=pMjlAV;9*0Cz%s)9l1< zpkq_y4Y~{P6-Y4jmB9ARkOsmNMnMALPB^$ zbC5{r_>m?tq$Sz{dMt1=B!P80 z2heFf$R6+h?S}3EY!5jNov=T|4BZ600P#t;6PJPBfhZT;gMn{DT;QG$+y<$D`*z?V zh=4o2*K-U~26qd&bw;`1ZUp9In2m&92rPj_K`#X!g~UKN11~~ip*QR*L~qDK=zV}s zLQVI%9qFa@$5dMfZ6NHK*0HVH=fX?CJHa2Dhm+_QmeArGLh2fFmcQ&5B< zJPPrFegf#-8+8KR2Ux!k+5oyQFd8x!dJOPA$U^7^z+I5>(02pBg``3+1(rb)p%cMZDliy(Q>^MQqsQs~9NZvBKf4?P$-0a6A%4)_A(0rb_t;}8q<^T59SVGq#z0s9U_ z8%>7}bcjHGLMJ>4F+)EMd}}D`2YNnm$8gvN^djJ`C|qBlR{%SW6rwqFTGva3oHp4B zTD#i=IRO0xuwD$}gzgI*3poOPJaE7$lo5Ia@H@yg=%v81vB(pJ0bYm1LB9#SI0nDD z0sS&C6|-$4(k%p9Aif!>D@+sGLIR)%0!KkwLyrT#2ML5;0PHp%eH?l)@E?#!3In9y zhlqhr7z`<+FhKjKP!G@@fL}pYLq7t11j(cD6JS4(TtWf8SmOdp9{PTsX&+qz=>u9r0Lz6L%s`gh>Q`5Aj79GjP-{oP#{X0v|%gBfR(wbp?5e>I&%a zImSKcgsu0W??Wdv6blgq-2>QeFKh{Mh63M&xMbs+0vx^%eHMBY@ERl%I^j3_VL#AI zfgKK@JkSGyyCH$l36~v&4MSfIj6Z}nf}RN64Jn{7!0ulnExfN247>oTfV&x(U4r_A zPIws-gfN7IzQVW-JrcMV5&_-zJs*1ir}!H@(S|$W50ICjn}MOF*y9VG@Mp*x=w-kb zM_~_hP$$4T$Izy5Hv;<~N136=0i903))A%;;YpMiI^oV!7=NJ0elNtm)3{zhe*heF z2JHfUJn)CJ$PaWgaM(H6B=jiY)bl7W^i<%75Hs{bU^(O(^qasf7f}CC!yR}SQVRDY zz$=i`(93|$en8)W-W)g?@&WW@;3tqZ(2IeFAJOiJ#{oD55(qsKI2Y0nI^j-8ALzS* z`DU~=bi#v>@zBo$2Vcav2R#Jst;JrFn*;xZRy1il9ef?fcO zy^Q=*JizRqVS5x0@N39+=r@6VuAnT?qkx%^WSlz}_!7hfeI@WHBpdp9V8g5EhtLUU zL2{wz0S`jrpqBuzL-L^C1dc4j*b6-dxEWFey%-qs3v38F{pL_E2$K!;{vGy5?m#od13KX&$a#bje_;HCT!x+p z{2nqMVNL_@K@y=q05<#+_6D7>4A8M zF9g2v5amSpHNg48Al5)%2n@m=r|}3w_<=p%+kj4Z9FhzD8gQl~-e-oM2Rs8Qf_@&j z&I#v0Uk|(pDTaO-*bMtpgXY2hf$@+c=!w8hkT~cc0Nt_A{Uzugz%!8M(9Z)OK#b4{ z%dvN{J@lKvac=nCE9m2ao!s#oT}Uescnneo{RD802i`S+PB<3t(+LU>Tm>nC`)c4W zNGbH)z)iIgXAbfM+>Uoff}j)r3Av1OEx(6fL`8>6nE=K=B3x^P(l`vIz^djJ%7AP7Z6||>dXDb74B~oc)E)GjKrg(vP=+u*!1x{p5dil@ z;Mc)O3;Ge@iJr&<^fKVd5R?b{Y2Z^T@&G*<*gec3auy;#z#R}5=taP&zNka!ggyG9 z>=Xw0-T;&xdI2zQAbt-HI$<%yj5rC;Lry~{e0?zT3BCOggK&#Pzd(2oU;(6v;sN59 zDZ~+S2i}L2L3bDdJA|Y@4?6@t_*F-tktImdKYl9J_$q#=udzz8-Q{J z`ZnNuh9rT3Eo4jrsRR4gVSrWx`_?s@kU)6CL4E?} zHU;$pXf?nXfO9|-*$k|Mmj~J$a65oK&|QE{;Uo|*VmRPHfagFz2h3p((gHM*EdkPj zb^@FaPzZD(;5mS5pyvS#S%A6+)_4>7A;2^U4+Okt1@aAOEZ{C{&|ZOah&*Bg>JHF< zUms3fCob?9q#cA4IS^nPXd=@hfE@x&4>%Se6zI2ra{(|wYa+qkcmP0K)C>Y6)^Z&5r_=XM0R}&@(O4-!1AGBuQ{NJoCIJGbTVLA7`T6+ zl>m1FJO{c9@M1V9GoY6N$2=a ze*nDq66}cxG!}4O1n{c@x*o9nYmiQ$iF_*>q#fwnfGJ`?-2s{!@WES<7ND_!uf>6~ z09pue0zebciGaHkK^*}4D`3laAfJJ@1{?rD{u1Of;2r>apuYmHe-G{lXd>?cTm>2n z*dqzV540EHVE}ocM*w31ZWGS|tdk7tBhW-n2QUKKEd}hgm<8%5&{qM|egO3dXlual zJ_0)gS|}T&D;J~@XllR@0n&js0qh4*4>Xal=Yc!{S_W_*KpW6RwkiO11n5A(vW1{5 zfR+dR5r8@Z)H}fHC7=xfS`%<7z-^!_0K0qw_YAZf;Dl0OpFlSO7A*s|2sDv(0HT2= z@*+Se(2#NvA3!289I!7yF3^5}QJ=wc2k1W-=yaez0saQi26P`_%_>k9Koi*!fIJe^ zHNXe}JnfKCQ{0H6dk(I+nmKnLh(z>@&>Koi&g zOVk1z23iL&TOGJRpv~)nJpiPG`7RXTaDZH(iF=+A_adusBK?Djdu}m;a3T};dy)im zL_{X;kJJX{ScpvA8|f36uOKpUpQdV{iA>z1Ne5^m6Zg0=0-DIgy>aNl7>dZmy*j4B zyaADkdvTb6@gR|j`!zfVn#jbx7~H_PhRDQy71D`g9KghV5vGAAGI0+AAu#SBGI6hf zuRs%-=*un&`tw94`jIDtzA=%Be&Rx)-$-Pl&#V#9L?-&K8i9TVk%|7X<3JOc=>HlD zG?9sZtDk@-GSRP-96a}lJOqFN`VR(LG8nYOfT@~+{Qym5qFyi2C#Pv?6V8-3*i4h{$~z=z2iWTC@AXFfYSjp0tQn# zsB?h9v;zvz-wt{R3HSttpP<%(12k>K@1Fxq4px-a2^R(7&vVHk37{VTb?_{bL$0#^^$FMlo*a@(G_F4n3@rsD zmuL)s9*JiuAYDXb`SUQpKmh@+QU5%;!2UW%z(D^xx^yTYS8x6G3E0dV5Z~p0eNLKy=7JYc0ckkKW8T% zpMN+(DDa7ii1P8+5LYaNrS%GY`g(VSW%zhJy};Ua3ny0>8wEap8&5u%;wf5r3r|lQ zXGyM4gZBUPGYIjparXvSv9Z?i zaPR@kU+ru>|M|zi|5?Qke3iJuT+_zK#)-#?_`d?5h38!tA6G{k4?Z4mhnrSJHwpzl zTMH*o8$ReiqmhUH_lw9w|LvCKq5n+VA77UTIpPLfKP(-bz+M0E$^Ey}iP`w)O~_lg z{ll9AB;a57<>~F_=IQ~`uC2>+*II!uz(!nDOiWTrT3Ad<^1863gq4)Ag_Nz0u&Ahv zwT+F9gtdgNDB|xa`@h8?AtfduV<~GPEM+YVVz9Ne7M8IQw-&Y#my{G07nib?m9Y3f z#~^7TZYwThBOxp$VJRytDJdx?Y-u5DC2W0N)>_g+(&D;}gv|dr1~J>~($}q|uM5kF z+K35Dic81}%Su^V3d=}Xi`rhd7PYpJ{$FB{u@SYEwh$8+wzRdB0I9H%5e8|J5|)&) zwU!dK5Rm;Fs&;P3NSwDw+JZgNoQpY==RpY=-w+!a(;>n;=&^L41GzCZZqkmWTf5^!+RRkDLHG^{>=}#`T5HJ=IgRV6#&2D)6q_?FYnXIuX>wXMu@F zt}P%MH#K8V2;_7t5hR_ixz-Q}59BV`2;47uJ;nd6i6LSMtzTlMZ|GfjxuED&kAX&f z*eH8E$uCx(^LB1d<#|Z>V%Sj>qAS zH#Qf2HpXefEm-*v50NC{7K;o95c=}x78FgQ5$FD2%fuEZSGRW4i{H%Zsar);%6IaA zk2@9HF`z71#R?(kY|lun@vo!Mlq?hp78Ol|kNX-*Tl=Vmiwo+1Uxj@=s5|1CA67{} zVEt55=UWn(*cG?dMX~Yy>HfFyH=H2L2QGqKfb5bIFu{*kIi#u-{Fqs8lxXfXfav`; z;};ZIkdUr#KJGfH^lhR$RN9o5=dvA*NJPK#*$#=Jl3oW+`UUpt9t)V8C$=}WWz+n8 zEcLc$VdS&U)utVJi1har!WuZ4w8`6)nKn#~#xM1mM~(a{t_-A=0_8jw(-)E2;}w^!0yA9vjg|#LJMPtcWHua}LrfWGO@0M|CG$brHvM?#fvXvSqRXiGiJCTKZU; z*Ce~=_bC%^hFd`t?y&m(VmA+KqGtl9h&gP)7#I?7hWr_g5_j}Opbzfd%`htnLdj?6 z*SZ}g%WIOaylXU~PboVgM`{o6pez>IL4;>m{cQXGd0{)5i%s<6AV!JgGv<`%UlX$c zT+Nfbj_{SIMGPeyVj5)2_}A&8&oH8>^O7=@{}eg8-m5AEyWwSd;_E{N9 z5Pu@F9YU+%SJf!Kx1V>23~Lm}w^|rID>_7;vTZ7LzHtT5K_;VOK4VwbBnAmLZgZob zW^RYi^d1O;+%E=yn&E}SQWOJ=QlamJ+`I2b-DDkX1)+OBGwZh4D^G4-_Q`$~atD1~ zZZ!Za7zkoEc3r^brJiY!p508@e`lfT#@Qsq)117{O6`bU=-rm3A8-U&z=Pq-{2B;S zNAk?>K|-KM5PqASP|*9LWIP5ewNltmHJ0gt$D@ zS+_%ptnm8G2YI2ErMdD;^WCv|2lq{3@*G1(U%!3@Z?igYX7xipeTv)4EFwE+Epsv5 zZZCf@F)^_Ti@2lxr;U8|%EHOXX>!sbf2FA?rL7TxbDM4=;gE5IXe%CG^4XkJ zz?^Iz=~Fe)rZWiL(KfdnAQx%GT54*Bj>W_@M<5~%UMgMIyOkvE`kHgBAwU|rU!FG8 zh}&qL)0S7%cFldJfFilWiftDc8hLVP;57F66cq7WV`;GLAmkOhwB=mKt7^j0YH833 zGH^3ss@2qYA}p)(_4k7U616Ab_o&YCXp$oHX--!33${9>8_yV;`7ea9J>hg5Ds0x0 zo(N5HsipCq^yhEfjw_j-&Um_WbPv?4oiE^g_qkWnv-t(Z?>7zK>H4gV1asc-nP#8D zQZ^FyN_vx2iTS>y_l-J|K5DvlwYYU}7!I@RP0p9IuT~6%4A#;?Fd z5c6E|SAlMF*FLkn-h6s3x@31DWpe*>)y#Xx;$}O=o%7?zt#!C<`m<{!^fEb?cg$O5 z*)cIuf#l#$e|H~e=bzqh6n(|^T6;#V=*5z@p1bvEtMy>>Pt8-Y`cxA4%3vXz{-Xg$ z#biAqExw#W>;jn%ai4tda>dP@DZC(H{`<$krzdSKF9%dhk9IAD;6EDrA|oTGhaS1M z#|uBg{OU#e+&kg+q({Be{cP6A!hM46tZO9vUds8jX>=+|!I!jGK8Ps*p4g#zk=bSH zz}z^@9^O_)+wXg?;q`1Eqx#{M3pYhZJHA&%^zV;UkTo@L<;g?5Id8VpVlpM-&DU2X zKDqBX`2DICD}?|0^>eMDxR!P)#h1&aW+7fS;?yUqPf9W@;iQI*N9yBGR*N`IJ)N?H z(>DA`7!(4SqZFiHT#Ox5>r&BdVSrS=b6|cJ4&fPn)7R6&Ju%=72~}@8kk@<3nxDzv zPd|-Q6zXZ|4v$iJc4cecv(t?Y(o^(Rc&kZ2mj z!;Dw&zLO;d#%_{idYr%8Ot=hq86?S!DA*g9TFQj5jSN!}tFB2v_Yk_@ERZ*_)BV?5(P>qkhdL{WhV&?Yvc2H2+Dlj}MNIHd+Qtbg{;eB9 zp4^StEIX@~G}|JYI1HJy?^NC}NoWYerHLRWLf357cYQ_l`Ou-MvX*0J`Qe)mxsi7o z!nt7zFAngB=;`lNsrMROoSf*>j(+Am=uJ9xZ3fD0>xwoe*$uwQNtwtza5)LuEC@eT zpWB-Xz{-wqS3Yyceps_g;>{SQK~oEmLS0uk`EL`Hp-LF}+c$d`E;67jA>yg4O=UuU z#ZN;_Z)b5m90Zq~8;}Z|U4=DsPa$1b0!oa_DY@$ppXM!jN67DQhT#}Kks9fN%ehWq zA9XmQy-&>W3k>^BbhjKIbuCRxu3TktmU$&7^i=8^dGQ0R0GE;heEhvUtvhwP7mfs* z%1%ch(YY2JVj3Av7H`9w#&GgHaIyuah;hWvu=zH7e^X8dt$_LJ!lPG9Us=aavsjJR zS%q&au2UK9DaC|tdUKIi_h*;K{gQ2x*Z zCCi^^#_CVBVfNHY(LGQXx|*p9MI_#Gj9dN*5anR3qe)QQ_ULlgI~eJqR+2xne9I}qsb;nhK2 zmntFo;|lba*YRC!zt0{b>cJbZRT%TbgUl`TlAPGVl=p=g%s>FbW_Zr;uw&5g_mKpo z!qg+iw{a&0J5+_X!5^pGy5%~yKl=cp8Fc=J^H-ylfTdQ8$354WrzyVK!})WEO)6-i zrf7~;!_@G{6YUw@ay|-3>Kj2U+A(>dAT!Q9KM}LiXw} z9@*T^$m!+F#E_vj>pm6gjGQd*r_GI>{Jt=s{#}Up00YJ8zKQtV+*%k-)u%B-Wke$M zL}c7AV}E4hX{^lkvxKJ?g9m6%urKci&TFxm$0bU`A9NhWWsTIjQ%oX%$tb#aTKPRW zu_#(Nh@pGNN|Kpw`E`~oSB{e5CL>ZQZIvwyZO(S)>i3H?z89qQ)K##;GY1H^Skf(Z3(&EOWmDFpHVek7Q2?50r@X+p(^&PAkwSrA}o>QvDq}4rU4mwOt z=nR8f9)G!yKUfv%_yv8)g~;*5A$lVg_Q8LIw~|jo7pT&;aB<-+2a)4Q!HxZhG0Qv- zR`!@%Wlq>bzDDfR&OXWKy&n_IMh9dVSV;FS7(gPDx3=8ZOM?)N7uJ);36Dw|cm~Sj zp#pOZCBK+I;=0!PgW1vssXf-akP=ECbB&*9sqJ;g!=*P|e>!4JRDTm9f?+xL>Bv&! zPvx9pYmAkmnhNqO*{>S?{3=4^hn<1kN5e3fcp-#UHcMkS7M^;;>{Uomwpb9qt`k%~ z;Jk=}tMdo){MS*leL6;q8mv>^I$xy6HiAUB)5b3<^&`*3El|yd+&e_dZfeWCy72Pj z{$}PD3_>2vz^>SMGF=YYl|9r-x-Nj;3^&Ur+y>?6@%OjE+x$*LK zS`9)s;G7P}g_m!GjWqR~t1+gX$t?CY$1`<_p2td8cRfCEtJNN_RMXYS$FI;fET0x5zjxNyi8!bZr##_| z%Vc5S9GK^t-@ZGHmYQTpYqJ(3Ez)Mq1@+A>L+|RyM{uF zVT@7zCEi1Yz*IYy3l)3Wj3;A{m?QDU4^cUsh(4|4fvInq%QbDs01%;Bqo>xnb}b|sJZdF)#FL;Kp!<#`?OuU;;zue zqr1xj7}c&8ZL?5Qp&9E2)AWl2J(XubH}VH5)$Cd$E=_i5t04%#t7pB{-OlgY zq}Cx0_YcvQB=6!MK!kGaN?1^w2CW%rPUpKPa~2nwfYl|I+E{TuV(mYsLTxHkaO+-! znYLzhU)aD6Hg;_ra>5?ZsXSWnZ2yJ0RT?EQt6dC3wT-~-4qK;22(Fo)#~R8kk}HF4 zrO1qJ(I+H=kSh}TNTlu{rQ*RczqsjUwW8u-iYiH4C;t8$TEh?C3_PksOPbA9D-w2wx!W#kb3P9>EyN6X z@5Y2p=*^Q^pCzn#1@gsz*)FjES#b4KJD++wl$%-t zLg7Hkn^A_(95iG(siy8c&cuwhGp$()Tqj?yqUwW!!R4L#doa*TGcw>ECEa@6xQ*ns zFIpeoKBsoIeh=KivG3v}t|_YLmd6@`5bT04Ge&AJtSwW5y z$!~*ad7CvSX>MqPj3oKgf#oZCn;Eu8fiZl!a*f8=c83Pa9$=g#Z63#&IB5I655+VnqKpq zpXr(l+8R|aji$bG2bQtN4kEaBH^i2)=HyxxT)*ieZ6lMznW)lI}WYyGfBM+lqikRyqz_9&RrfH3rJY zi9ega~F;D%PLpG!# z;J|WovOy*g>!y0}g;Wql(G~7_-F)n`^GKNu-F27okpw#-c9EHI!{0Y3d%>f}9*V5R z>FgSxCfpA`&B%tlj|u+Fm+o9V=$rmPsEK7m-}?fj{`Pex>)N4TxAGjoNa*&{!HkFH&^woTYohST-GTh`c}2 zPNR^@j^w92T+0koFGW3Gy)o=yy$x9mDJ>OS8!08b;j`9s5gkbG8@Tk^cq*2q2?Yk< zA*;o8r(FZ**aJo_Y%}=i&})RA5bbCto*7r_%42eYGrAQ)I@QZ95%xES7mH2nz9PQU zeSQL?;WQ@>QHRQ?famjg<;WOk9(dggI|PA=pb0;Yu`KhI74pQW7a#; zhzJ-~2g!hxJsEsFzMb(XQp2Z5rZ|c!Rrcj^r!D5^4!@W&gPV0eCDcCip>&k4gU_U1 zWw3>cYHIS7(!e#|rDvpK>ujQy{)tYItT?9Z{8zb+L3e3#>$GfCEugMh(*`rY;zn5G zPpNrs6(G4-1fQ=PKB~%Es3*4c{SSN&NZ#Cp-71`q*o%MqjuyL{hgsfwzr)pN2`$*?X0cgAlzcY@pzXm$D=9}~Ex9%xT3~)W~ zU5%O&@(QziI5xkRspKbPKxep;l7D@=?sPtN!g}iMGmWcv4kGL+F~?65=MGkL{T2IU z?z#>(dP;VbG_wtD-Y|>EIYC)hrpdP(G6#yCR+{oJW~amVkD$-0U(Rb1$R#Dvo1Y^4 zb{a?Vq-dwXAidlts9G4!k!wQ19Dg3Vd4~p+0?*{(Yv0-IBo}ps%@Iwq6Kqq-)&vy; zSQ1@%;o5?D@*U1NkMn~Mg~cKmM5)n!_d1xli$;|-dNCmzrsv* znjblws~-b>C|01g38k%bI)2*pY$?1HWg@~&15-LAeD64+VL%m1(F7H97?N+>P5AOv z;#$$$J;D9H=caOWFN~3LMnrkqr z>)i?tHjPR@wj(HDp^2LbQ*a8Jpe(aj0%Y5uOE~PEuYI1Z>sC(d(_4;e%UmR{k)&?> z=e4JRZ4da}qBDT(e636>Zr?Q7zR}_7!=2p59rzn5zdvqWVZdGa^ICj3pLY68wV0rG zOR5T%v2S*S zH5|}pBJ+`Lmm|yH{DH|Hd-Q<3^MnKQ)luB~K4~FjF($&mER?|&_h~CRz?ZwDv+!)Rb4x?QqL%QW8C9b6<+&ub%#DbzMj>2gp{3v z!Xo$%?!$uwVtH)IF6=!ne9AD$@O;m47c7C|nj0~~4PUmwgFLWgy*QRCx@W|Hoyq83*XrvUlFT1^$)?aJ;Nw@C4cbeUJ;yQb}Bw;dblIhC9S zwE%pd^{F6_rL33$?GR&ugc%AgVs{|Fi)o9$EljpzsiNbs-w1vF5*f0>!}TSmXG4)! zdgsIM#?bmVP_Q|dEhCM>G0F1~owIVhtFFNK;hAv2HWe)^$6 zpIqTe1d~iq`(d@uKxKy~vBxK7?5g@L>-uQrL+0Iq7^ztXo-^x)>#=~X}+FUX<<4^janxGhxv2V7-W7&wanxNHI;n^T-qJo~JZyiC`MxW-usZV%gP(O^) z#tD`VYODKpr;b#HuZ|;Wu_Rl`=+8}R6?g$t1T~YN?5uV??0v`BX#N*3{RxD9TBu^Z zUG;k9-ms+&$LW{1ps%;aq{U6fx0}YFD$SMNn=C%=DlUGn4Cad8BY!|7UiOa=h>d1u z+!50hsD0zdBh9(;2|}=iLxCSqIzAfYkXPFCd?z+FRvb@)_UB#JafIa4E;5~rv-|1Z zjn}xMdmr6zX6P#>T&#lOi`;FL)96ARMXr&hSDi(^1$jLL=U8Hs985DibyubD$d!W} zLTbMKsOjZSWmNwl)jQ#_C(2XX@m1$aYSvQ~?)OD?JxEDl-HJ*P{D89$J}tYV zJv8OZ`L=LKY_J-Z5T%0eCez{d!LY>LoP1EV_hxl_;dhPXq@nO~M#xKK?ac(zuPi+) zyr?ae?AS4*59n;JO)|!wY~Jhl9oAN_v4R|r((G-UsCl7vcBJ0Fd>>Ct_D;$Ac=4ux z=}$PTO7suJlmX=eksHN;@QPwXksBx z_=X$1Fn%Ea_N9;B)-c@+72}T3+s?3jMSu3J^S5upu5HD|L3k4mudhgrwCgnMO<*3H zHZw#s6Dp>;Z?dMTBg>=nMyQ;_bl$FBF4IgK@`tErB=l9@qkD|n(K*^<0u4*FV{uJ~ zAHH$IB8mS#c7Xi4@UZrQu5f#iCr;~Q1B7p8xaRnpMRvF1Heaezzj-n@k!5DMaT(-lU`OtQszp1ersj`ljfJd+^YxQTla*m!k!o1EKyZOVzVYpE!A zn(8@pB&N~~ZnK5^wZN}CZ1!uNN<4C}T`7%{2{~HgLu?fiFN&b?9&=k%2b@$WqoSbq z7+mAC>=ZMn*@gAo5mm!k!42JF^n(ZZ*7yQE`Q>z40+>`tomM6l5t(A4IAs3VA*+9U zbIpINB@LN_yYodwfI9KP`Q^dUpk3MEsH-beFEC_HyR99aJ?_N9X#-Q)k)1*NKl_)t z1M}?dn2`D()AAeB_JcnRQvHnQRuZ4!r^JWe;_g3+h{p89!wnbd)Ows99tPT2e7=b; zOg0d>qJUDG$@8oHEV?9t7fI>izRFCANPv(s6wQNVU2uJPxzA^d+2Yo`aTUWk6?+7#G zq*|7Snt9&A)18=KoxW6`IeqpT#TibGGx++Jxt;_P>0!m3Bm2(@RDvj33(JAfE$rD| zEmECFPVWl!k&@+6?<|ql?e(pl4elgQm~Nco{CHTF zjk_I|DY)}Zyjk_$L0+`yafJsILHQ;Yp*A8wX3j$UP8f_2EZfjiA!yqvU#m3!p<+!+ zpW~+)=}Q`0MNH`9gwNt{zU)zr4yCzyDj}k_3@kZ750u00yX?}rDA*3ZTl06LP&sa1 zFYR>ykt09$!w1}Ly&9yc2J}hq$ZU#mSlku7C1W&g;CvI@z&e-}*3$_wKlxP>g_!i2 zeAk^+8`TM851V3KK1-pfzT?nwlDSuvWWm8>l}0pyCJnP~rnxP+g7mvPHs0)~k%9-atwkqRT@qw@S#ZRWP1$p{oI zcBWf^1)(&)z!nCBQ)((u*T+u}mz^RrBH4WhCN0jf4?q|8c6W61NMKXjak@Xi-VX}I zwEzdvJxdZ=n2bhbXmyFuu7eilMFu0%G>)714*qv}HA~Dz!X^Bb%Og&dFWw=nk5$@5 z4eR%9mM)#3hmfP6vFbe~d2@b>AU@Jt&s@tUsLF zYIwFQz)p+(SVN#t!Km3BwtJ=dgfP&+7<={x>{!Y$5pjqL=&gbs$n%b4_2BB0yFg5Nuj;(kc(<<*@sfEUS-TqZmY*G4>uvha`(nsOqu1EMA|4j26{PxxiHL3$@6>w`S=I-*aXl6awwy za274Cp_{2ou!O=#HRPLh4_B5L^@5KkUY1`@fZH72UACDSrZ~RcH5>5-6;9SnvQsHc z*~r^|UX%%qjYOH6XSG63$Mu*}az7nI-kynSR{UDu7@^X)pk1dV7 z&b|t~3VKp@BzEr{^8TvSgI@DmTkt-(V-BiGbJq94d;g)(ja!lgLyDtJ+}#ue^};CP z)k!e+;~A znDv#@16^=j@8`9uvy`rp$%ka?&s` zqldhISVG0sC*y!mf!L88slO+s4ih$>g9fqcBF!dGpDcZ56h>dgT3SU_TnLJrK6?op zys7GNL-$0n46jrd#wI|bE5Tqo4Gm14Jl%~sH3tvpo zn*m}4443E}KvPg>@OXvSV82~a@Jryp(P2{h?&Q!5iPHVZfltW22SVJ!S@L@7*S~W> zoW?pFls`{?>5;*qvVyL>J+e)%6{Nz2T+n$V9~{~Pbe_hStfQhX>z3x$uJK z=glHXBF_xJ3J+cV+I|F1av!>ol$(F;nA!2bTM(~oaCS^YhOT6y>XI zhBSGQyQ;BE#@p~RcDB)euO0NC*2WhYnBB9|N#;+zGJOR4B&y?0^&Q;Nk=TT7Ej>dOX*-EI_{*2rX|gTJw3kiFGT(=lzNtL`RbKVNDbJsE)@(XS@9y~fUCJg` z!$c@6Ta3BRzs-ZmuDH@3wcG8#vpeYa#5x2Vy}2K_J&`bzxh3&-_%h}-Nw7ux*y3Ft z--n+AKHfYKcMirP%W1HxkQ4u=V=0}WJBUr86#w?3B)U1@{FlJ8^qK}ZDCP6&r9IQN zISi;t(?(z9o&sg{px~Y9r;xa)Q1~u++jQFUM&|=$Nc!rJKuBo3nds+T(l-*gwy+ye zlN;et4XZ4R>F>%Ja;Mm7WBHjg=D)1Bi+kga`5yWy8%4Jkkc4@BvE*1Td=0PI|Hxap zVEF3LK|=(+<)3*bdZUC1xer-PkYq?VaSHfzB>>;5_LzGAt*pJx=+WJ`^v}e!?Bz_o*I@YWA>hXS?)`glE_mf7=KayHl0pTH zTH6`UAMM+L-Heq~5d5=Q0`=&8Ni(RFI0H$iI>Q`0F)w@_3?$_U3{5YfU_%MnYd7yH z>v7fMmKnoRKc`TmUA`+ax{mYK5%f9)>yBsVIe9iVa$fC_G_S0_*4Svgil3B>)`rH} z0-HL`t>`4*==DKg8YcNR7w@^^?RSW!y?JT(!#Sn~{6p}Xyb6Eb<9Pe$injDG$a`wns&o|!bT%)&*W@voX^>%kK@o)CKILc9}yEzO!7oEB3C-c5p@U? zoMv7w|B>0U?A1ErrP&{ImXq92FzNy0I}VtONV~~(lUuxQ7bBo-CpslC_w2My|CqWG z51hk8DC%b=?rJUc^&$J-g^;GsAVjB4Hwvyp@jB=+OA zfAc#2GV8Tj7Rx?avtN-F zjDOWZ4*#%%y?Xjo#tODyNGWR-Dl`@MIfS7P46we*!yEjSJvMGYI8Mf_z2K9EG@ofq zQKEu!3_PE)*}2qmMv+e$9dQfhX~oJViLDH4etp_FjuQRxi$PO5uE9DZDhX-zC|l01 zoE#&tNaEcw?tncituB*k2Qn`No-kPtDl%b3tJ(tr9~BAWmN3{6J3q zP@&uq^~KjeLD#NvXRW;VLR5*|#bl_`O{G#wR5(O$$kF52;ZQ`MJLpVJ8aHA%cR0FA zsQz%lF&6Ybph7R!Sz`_R@;orvgJ}&9!{%6+4%XIv38Z>J6q>u8@ z9$wjzzPwL{%6}^_{dSa**_~uWgs{ns~w#rV!4Y+`SSZdpsi5Vs{^_ zk^r~Hk{<80ciPBB{C>6=OYwN!$-GGl>%bGOCRkB#J!e7ua$(z`)}wwxMLhiu(d@{s2VPuy@^x9i>;h<}U1 znlfUm^ymAS0ycLzW0fx9V{o6fSirfBl z>;}SjzpuK=k3Y%oXz0y9S6hi6{nALUqL8ic=cRC{&*{}xdC;{Io0PV=qPiJtxu<`h z@QWHPMH}fN(f9+HfCsQV*_m$sR7ozF>o;-87L)nK?b(!DTTziNab~4GP>7RJ(X!c3 z9nGjgt;H9UTD)t?iuw?V}SuSsCt<{5_fi>QCz%}02g3>WHLg>1^s z1yoH5st8!$Wc720UD(vV*ifq->{8=A$j51!u3bS;$q?hOId(Whs2FdocFkWV`x?Sf z5e9QZy|i4gJO$gD`BpAe382Z&wBC@O^!?Pq-taVC`&}sY$qo|0X?HibT5jQgLq6?F~+Y@apiw z9|hBIrRu($TO)+6m&)Ov@GA{|mi;d0dGcybk;}OD*;i|pG%Syfav0Tygd^%;Ne_{+ z7{Tu8Z7>x8CVPH*<2_tp&SI~H4A4n08{0gS_u~r{#R6$`jItYxDPElruh7vOMDU#3 zGIV}x>q654y@S51;r!)l zlr<;1n$hmD3*2H)pA~LEYJBa;dOO^opiwf!tvivQRsOEne>vk>WG6JB^vQX#{s}fZ zgjbh7GSunVA%g|$r^KlLEC2wEiDvd|mE7veZZNEEz z!RPs-L!1x`qI8v61l@(P$>V&Za=>j)A!^5=;KN+!yLmm;bFkep4uNkoFcY6$Q~b5v zlcxxY>4Az2IJ0To1+Rb#XZ8tB;|QzsuHm~}(TYR8;xANB?}90I%Yx#H_;HkfA;YDe zL&+hw?g#yL7dCq$8}x%T$w0-h9xgToL#Z2fvK@v|mmlNWx6Mpn!8mg^Z_lt51ukb=0;9tn5Y)!_ zaWNUfv>J~x!?u5~x4i4xKcqb!QxpXHeG&*m=@+WZgmInm^@6m)hf-SNet>3Pj|t7-HHdwLYzJejAgX-)~2ls-&r7D|^L1 z9B95BBI$8eQ_xxvu`P1c_Ml`nNg0MW&@$< z+8&z5qXB&scz@bvmEoKvWMX%I(m)Rl1B~g~MT)4<*P7+kD?dQ>)fT+>_R{U-zQM1o!mV;RuzY9vMQN4IOZ;jftcliq61!rE ze*ffqm>sm0e+pr&NfV8sdZ44j{CG5>C(yfv!RtJk=>B}?n&8px47RA>M_U7H3jRz{ z@fNx@vgm^X=Yvw=wCyX!;R4g)GKNQ+Cd z`@IkQbJ(YRuL1jR!rDosJT4=ER^NZ22Yw`;oikv%F3fH?w~0KF7F9GbpoPuIonqEl z7{S8FeL{_>cn0*7L_E)XVl|ZoN=!PnyCU5C45SzNyJnM2kfp{{=`@yn0V-)mKL^$m zgRo5I$4j~z$nq9ib1u*;{{nuCx@9o~-sn$-#7VK;UOniRd?Z0_niJ!8d?VZvQuguQ zqZj5LF;?kR0@6x^SNc#aWW-)wF8;+fn2U=c~$ZY)C@h_(1e9svl1j% zi519q1AT%V_Z~KV*L$))CBr>+*mrt-H;K*Sn^9n-nNTH#>Q~C=U($ckzYV%`J7eAc za_UvEiKJ*DN332X<$ObqUVjeV(S0enH0#s_ ziueW^nS*`J-->(@7&6ajO6wmr$NOWYfrlPWll8I#hgoraE`fXGM5H2-v<$Ev`F#$V z;HB58ZF+;jY^ToN`A~FW>Ojoo-WF2`q;!8eZw@gd$5~4w|El&$WEzxAhSQ!EWul?h zeLk~DopNVDNi=hk6a^zU@@^7_e9oDq96%{dj=ru?_)z~TEv)I3ZfLDD@)BN98xpru zm%rJ24X?z&+*CJpyz!O6YFATBiv%?bZ+N#Ss9K6#u-}BMU9pEm*ABkV#Fra~~{a2dHGgs#+VztoZ9zzpw1yW78OS{ja)rS~fe zxpXqz6oaOuZB;5ib7z&VXwIN{To;N#1@hKtrv+nuk*7X=*>b+OF}g;oQ9=@aeI;$o z&URI-jP#t_;vl;yEo`@i;SvS>Hp_B(5_n-$K~fR=iWK#lPVay;?BdZ3Doz;b~ z>w5vazY3OANOm9RcF~##^AH%)4l+p6dslO48mpmnii{;I5pmlFkY24b$S*rRo#H+# zXL2@djYAx{mc1jNgQYIklW&8&XUU%U!k6}Yf)z`}a^}K$S1LB@v-3dTO(J4JTW}%e z+s_G+WnY~{aK7E=6D4uMXvvvj8bsmE9Isbb$C;NS-g_HBs4Rri*$WtW`s1XFv<#~v z+|Qk>E__W67U-oEwlG{3YMha~Fvcdh)x^!{i#yxfW4@=-oU{46D);eGc{P4wXp2GA zgsN$oX8gMl43S-Q?F&@8n;>o?bc$5<_eY}?*G zsyf>4p*Ne_`q!k7s@#1^?J}$~g^GHaqV7iqb|h}sv5&~2i3AYKhijsS4YQqnpFIk z*uwt8RdhYUY|=UC-s%upy23N~fygUARPWo-vjUM24LFrh&4sJ9MZ1V@{ zU1|vMLi?I%f^c8pJiJl!yz}PRp7zcSG6tnDM*-RK z7dFYlY>w$zPQ3zSirVP03L1U$DZs`R;yXs@F~{emSv`bVkbq+8j}3}B z3edmn+6nICu_BAH2kym`j;3Nk0{yFl%}Gy)Amom=dwm0#`n5mIL&9R~Tt6K#|4Jx0 zdT(%JI>*$~1o>KTL^t62@%uJ~eAG_zA%~-+LV@TsD2&~=Q2Wh@TP2VD%Gp>D9_dk; zxD?IVn9dCe7!5`3*=(QeG~Jr&@_^Gk#AVAJhbviQEj;P={rYnO`P&1aPo(J?l9AdX zSc|iK%iZ;PrPyRBU^ggfiq|LVn%lg_?j3R%yOh^X63POxTHGBD6WsZ77#KT9wJQQ9V|(Th z={rhreqG-*1k7!B^=NKV%zP?{^7Tn9fx~hkV4&9+G7o$}Wv4cq=ph|L<`QVgX@vaP zs~=akAa`e^wZ^_#9tMB6-HaO|(I}=Jo-Druv4e^3@VL!>cRDyfL7Th>i_8%sKj5D3 zR?=h*hmzcxB2UKVd|oiLc;6QLQKBS26RHvW{hot783!S7X_zEq6@8>UVR{1v43s3g z*-{uyV&ow(+T%=@_#?nW_g?J6X1ftO%_ysBR6FOpfYH?<(2;KPYI&Iy+`kTa?0h*o ze)3|l^sBw{iiK3ymp!PhUNFQ#(}PBWFLvSK;U>yv_!-Vv-Cy6iroFy#Bz`RPT7$IV zTWQi&g5m(J5?%KZ8OT_db9JX>R-+)R{}+4j0asP-EsTba^s1mBAQn`*g7hW`igb}G zpa_D1bVQ^p2uKl?CRG@!NN>_R(tD92z4xX_J73NL97kv7KXc!Ezu&#zyU|6?PS#pk zS;@}cSy?+FST|SJ(!TyJadUNgX2XzQyOFoq_DI?;{-<~{Hg@Qg%E76T_SWaE4zmO8 z9YTRIciJSqs?3X%VKw8q;HgWqWig4J0JWMy7v0cB`k1#dr)mmyOVdkmPCs;ylp(jv zz}}2ro$Q$jx`ckOFn;tzq#M&V?0wZDHM2x^HO`~C7-t-v+2xrYE>DYWOsX%*^L%nqBKuP7L_)t;@N#G+Bi{ zA|EF~5U)W1T#S%89{Dcd;q!aw6`?M5`Z{CdnM89eR>a<1>cHVMC}{o;&MWMZX5u$b%yo@S4RGv7gIKNkCB@L=Pv4 zD&am?LtR;x+n>igY|91F9*LfMQ6!JYJQco-K;zKubEMUOKMB9I23I9y#Z$y|Tmcij z5P{$f0e+1Q|HAOYei|@w?^A6UiHV5;QAtrCE-ntPT)zUYUcCxrWMsh2n>WF&TepC& zt}cLkCICZ2Ltt)h4uTEYL8vh&2!jyu;35b!F)9% z%TpQT`ly1E*SA3aYdugHpbtuYu7gjZBB1PrJg5p#0oCD#pfuD3l!ZM2l@X?(HUeHp zSb)k%b5Q-(9DIzn1a-ezfrdC6P#O0ObJ$Q2UGkS`c*LwH~~_HG}}ShY{fJ2m zAUiu7eE9GIl*TxNidYv=73TuVdKHePFz!6ijtjfSKNEFgH*OYR789>`*IM9BBiK@l6Dng0Q#+VH*LKmX^T!+7kHkz6`#7 zT?1Pi>tK8HD_Dm4>)X3vb8{13Z-AZcE%0pz0d{wHk$2sH+W?P?`KPj>&X)NX_`farG=NFAu8q#!R3M>H7Z18e^s14OmfO(hhA zeGcli|3&}=LjR`fO~^q3DZ?exCtK1YbX>h3bEy07TYpEiEl50ZIvb?eGuF-|H?}w~-7`93T&rDysK# zesAMH`?ZIIKxEy$jpU%IsdbR@d%RBvPZ0bi#3CB)FHliYSZ5jRF+geow!=*&MQF@{ zN>K9q-jRMG#=|p3RB#y3X&Z;EhdVxmVfu+Br3Z?LZg zWM4wHB$9!A=3Ynmwfty=9i&G}z*DK-|{0n+qL@_%% ziUCrG7oVlUCmS-kK(!*WA5c&r&(b3#B)6`MMBNWa zHSYJIU-gGTu(PwXB2{o=uN&;q6G8?g40VAhB|rvu|Dt@fScE7fN2&0S3 z2L2VjIl>Vs06QzP0#E{KWDUqr6(A%eL+rPMR@7hhA0vb}FRB9UNEJXO{((X9>wN}! ze?<=!C;%D2MnKU+22}eJzzPsTdV{-n?`j$Di4RW@Mc#glKZYkL0bmUdy3y|pNKo`h z2I>3b;GfVlzC(BkpxOY%V6PGObg)-Gl;F*N@%!cfaR|V&M3i|rNFsxTgqR2nJl}gk zaYSHna75+-`Jd9Gq2Zh7BS*T%va+(`vf|?6XS~4yX_Xe^&qBX@2|_*C@a)>g}?3 zEA7?bF~aiC=#go^22+G5?8rzRqIxoHgiDPGRDbg?p|9-o3h6d3JE`48z~M>LDAJ9y^SMVnynSMndzBk{4Ya8UpE zX_3({^glE{{P`cl&-6dG8YAO>X7Km)|FQUA<^Rtlf57;;{6A#=gNy&A{|DpW-{|rG z9{th&i*TIp4DD$?zSFd)PSGCd?_ZG9BI)_AU*|ji^y%^c3O%x{|EKgn%YPup{~u%I zz`TpLH#{!^VbqUOK#Yo=0nQ&{xDz+(`ai@&P>*yF(HX`#OYjd27(c~m``16mU_br8 z8vo4i;QHtC59A`=FYo`_7}a2Z^9N}sW{02y;k%vqhkcfRv=a-d2mwA5J|HS83NDLZ zM%jzuY!)ahE2HehckbNzZYQ>|umIN9*1*xx5d;~q{bDml+KYd-75`=IG&nAF&<0Lx~km?`;&mfZmNsdw|yD=x+%>I7XeP}Z#2F*`MK?_`4YjLLn?Y^hM zd*^H4ZO9qWhWw))ImcZJeE4ohz6Q?^*|Q;MdMJXd=c=ISg$l^Zi6Jrf(V=O>Lj2*NW!%uJ%pv^eR9`LpALR;}Jv=#3H z(av2U-n|Q?AzbO%1y>=+^zOnlsds^5|1Pu@?*fg!U7!U)?ei|sgm52%*3d4r7w-bS zkzHVdvK7O4d>2?k8?yQ2F0hB`7E`;>PP_}8ARukT&!0aBaAO7#6chx)!^469TYr?j z__yDFL)nW{Q&T~DdO9d5C;;^-UZ5k>6Le)i2Q3-ipexr8)TF!sA5&j|x^#cgkl_zH z^8!F;Net*JN7{r_z~_b>F!V7AEVL(sq55<%+?WMMn)AQ}vhBn>eq;{;#{>^TsZin3w>wGt*#YW*Dr^jX?YFB$!*82VYhez}K}!l>K*Xc^S;C ze*r@ff9%^XSlHSHODij2eSIB#`?3r+q3!qE#uu;z(^sJF_Y1WBZfzlLzPtbJZNBg} z|JMNyAAe5+jrYj#_ecl15HT&}wSQ1b^*^GhtA4Gipdbr_f_~b6E~2jbu8K0WbV7VZ z6@{e#NKhA~qN1VCmy1HAk)d#iutGLcbTMI>?tZ?hDtZd0& z;q9eE-S}#w2vn366=5Zyj{Z_4m<5Bct~Qb)sW?47tvG!zZI9U>&64;TN2eKSj~;dQH!#pM*e5ukP^?B0{DJ@dIx&b( zd-Uo%1Gp4vfD{T@iM^&!Nc{!htd5I|5fU)Qzk=)ybRmK69s#nogMPuc%7^%;Ps5kX z*p$#Wp)kC8?F*~DSH$aI@GZ(Oax)i|1MiKnV->mM^MTm|p;V3mF zB{?}cF){JU1VdO0SP4`={X>0;>cruO@fVT!NCFBJKCw{BUTdfSA`foeqe~DU)&Xjj zl6t=Y@&aVFwY8Dxf7B;NR17aKY~I!@rM?egJ4?m^fD{~Qov@QMb(*v7~&f|Kj?3N=sy@3_}01Q<>fh9a49o8 zTL48soL604T^fOu{%863x*k^DHLlZKv`DQ|B1OtG!@$7C`&;~rf>KM|pnG&z_#5Jb6W=kUaw_617V? z)XCq;k3mr9DGnPNnigqk1>BMB{~jNM@NHdwoTt0HdrV?o1`$$gf6E`{;~AU5U}0uL zfQ3Z=JN!Ka6xQ#5e&qjyett*T|8+q8BmNKi`HqjmJ3xl1KjME!+WUcpf&7Po1k?`{ z)!*R%PX5pMAK)Kgqhg%D?r(eYKrGaMBsNU>NAet$1mnNOM@fKWk17N8_b>4eRB|B9 zo^;>&GyJE(EP9qjl;u1#^WOdSXXx|{4153gGesFhncdF*HU7^$Ab;kw`~3H){+s-T z??4;G@HGg=r5Jm$GumDZ6H)1Z5yJy&>5%zDa5fCjr9svQo=by@p%2agA zKKxK~QY$Mf)Lisu-=!btp$Gm-Nk922#n?%L#K&T&d1tKsweP-4(y`L0d1k8z4QTg1 z3AzF8PTB7j6U0?ru`KoaR=1Oe$|qyRw$f+F-OI`A>lgg!=w5X=UlzY&}#B7KbXppVgk zZ;=H|vw~?z{~|k>{s{W$ApMKHyu84hH*ZkBM96s|(wC?%`5CB9_5hy>13`D;8_-=8 z270UBfxhZwF!1p`=x8bgqmAicwDkiRhdx11pudtcKI4-bRM$w@FXKLi%$CcyH{2w0mR z1?%%;V0?8IOm8B<%JLGNyRGi~?W}<%IA>dfbGOZJ8~-DGb-wrW|NZfo8bA#gzvlWP z!>`HvDZY0lxj4Bb@BRgbuQnIg#S5ImBHDkB;3g!<4-p>Qb3XsmBHYA;1O>Uc`1$@U zAIjw>E+!5e}1)!4-ErTwJPq5CN&g@8!!WAs2Na z2SP%8h+r)xby;suez?vG!!ajTrqf6fPzZQ1UmB_7@APPx1|~>H3Kf;}NDfaQJ+gw? zKjb47_2eY71nBpG?+#ZH|0JKuR2dclE8+Qj{$Ba0zfMdKZlG#;*5UX3gBF0XygKtq zWF^j7T>rCtbaWZxCl;m_PmFLN^3Ub}w8)5lkiTC(q(EVOM?1*>UjF`EYwsHUXa30b zaAf%YQvFAIR2GsHBA`%yq5od+-Vb!t^Y%dhNXCB*d->4SC>Mk#zd*araPpWz@6iT7UuSSTtW-&3d{AZ_Xg?^A$Vj4&sG&aVe1Lg7mBYqi=QIv#TK2Qw|h* zUIj(IiXiWWHYj|l3DUiFK=un=koOAuR0kP?s*pRN{x|bI+jiHUZ5#QHa_tkeYeP7& zYu|)+ZS{RS_8n-))*s!4?<9LRZ1ag-_$~r%+3-EY)6*09_iFE0<2zxM@IDSn_a!w=M@y#TG*{-8VmHE79$?&_10$y9#obwm|j@v@KVcf|lkc(A8QG+FDz{bblQvoty?W(C*v> zZOv`a?%WCEUTAY3nVkpAON(F`+KN}dR8Wwg?-JJqNU)b~aq=WNo3Z}w+qZ=U zAPIcY*h@D%b&{N%o)tO^UeSfTzNcSfKXvjbISDZl0l`&RJdzJK@_}T`@7E{CC{(vKc@VM|1Z-I z{}_%OIZQ))9NGu|Z9r_(0mKsV@8FE~^D}M#8kvp+jDG~kzKI0nbBqK6J%oej82s^j z__ODjm?%K52dk;60qE=uM9f40v?Kx>8yn!@-~iqjaiYE(3N_(It^LN?@PW980;sj# zq(_4L-vcX2ZGhhox^I56)-X z;e56OJ~LL~d=xn+-P-+meE41M|NHBIHSpg>1AlmqDN4&K{UDe;(`i5=l<>^Gv6hlmI3iKKW?Lu3X?`hknMgsB1x*BgaKV1SHs*WKh@K3^a#H zkWUhJMSuyXDmX%-j3?-b2;{E;R6b-O#z;m)#1AJZ$b1;YPtqKw$3tH44bRxTmo6Q` zILJR3vVYItOZk&)xae~bsE$(qP@}z{h-c7N37;DXXlr$bNq+!1Mmqluz+v|Pjq`J@ zXsU`9OHTRNNN3!=8pu*8#TlskK!qF+R8etuH^1kCh~*r_M^=zZo9@H)RRnAo68uuCKf+{jCQcWfBLVsEz z)}T`{TiP!AMRaFb2pcYYSFl`U zow+=$*N#4a!3W@;5cxcHtfCE3AQAF)#RuPKR%oe?)PCvp>B5JijK>S}j|B}DV1T8e z&H>v+*DC^r)_}Os;$+M2OEeO3e|DiV^KV7`ZLUsBGY^2&bHvKyhdi=`7H$O2^j^51 zFu~K)j_?dVOog@=5S`Q?Za3_{Lu^Vb6-ngbQWiyzLGN)w^e|%l>q036+)hj;Y!Q;H zpF5TPOy7scHV$_1d;9I&4@&oq=sIVTRT7_#XS(KN*j;qTJd<B3WgQKrL-{1i&AF~g+GO6Vk6!?4-{+RA~6v}((GUR!RfEtLtW=G*8EAc-u(O>Lg zsgKKfY`g(CdBY7o3{`K+{A1#tVvNY%_7UJlKV^U|LFNuy?HU!T3;v!MH!VMkhIQ9D2R!c)yK{!yX?W_qA{0b$|BE#L!b5 zT~F`JDD;%0Ja_I~TV#D*oxFwyKJd5_BXyF@qh^$fmR5rKa79m#Dz3(b4^S?#+tuFG z!NN{Lux&Vux|kcnc*)!2WGXrmmC96a$Jx|WSDUR(DbNVmlEA%oDRckwO|Z(^@*X>Wg0_56r#?aNcDSq7vtv$OaEEI3dO15;DU$B+2~nbls(PKJ#( zUK*@iZ5H;s7wK?VBliJ|4%;d|HSnkOm%@yXbmVIi{HmF(`GHI+UY5lwDC4xAj-a66 z#?muk@6VsL1U9E)deUxVzypUArxx7ssKM~_FJEj57abNWW;)-sc68jBT6iiZDT$L( zvTLXLcs|>9tyR(-vGY|Io=xPoaFYr4u!yOtsZvRy6;e&JGAv)6e$Q(s$;#ev8JR6t z9T|mNAC(O}9Q1KM$tZ7orJmIDX-R<@xXj2QUNZ`u-?mZ#dZ99o zqt9b<&be|{1EL!$ficmA1lFCD3GeVyO<}vJ=02ch%^N~TZWv@cmtG@gbbb)Zpbm9y zKKnlRPFJ#;OUFa|lkT4x71oaXj4TZ?5z*0A4VNj)jx#(WjYEIx+#c~A5?T&QS z34Eg#SSKz==i!+-cV1qbTvXSDGOkl4`q4G6-lO-~-xnXf5ISqtmc@5bF0m;e0sY7} z%8wK~E5Exr?90DU`thSwmo+6mtkz3p5s~r~38o|MFuc_v^$mhEw`OGb&KSxuEwCrm8Obc%j8b05fwB$_6 z@b;Yn3&-7WlQEf@uz`sKEU+l<5WIhMNH)FU6W>l;3)jC$h5oE#tKA?E_1PJ z3LFcH;Km6z`P`uMW$sfRJf&&rL$d=B9AcKTWI4pJMS4^-?jAy(9s8IJVJqNL@?f@t zPHVugWKjr@$SuKNv&&P7!Lg}|O9@{LE9_p^>+4mHUPH#xDRg0@Ev2{%k7o$`bIs4+ z4D0uu{k%@Z9MLzDzvJWMlegN$Z@$`iNf(}Jl`{UEloOngv9!GKpkSW6TSwd8Rkvb7 zOJHOB0i0$@>dUhd)!!zGADK!a6ApT6v=!NxbXfAp7bVOs53xC)`&P@_IXkwyjXON1 z3OKvdMfELRsz%$a_l=B}J5wDgFMYOUB%q<7tGz~W<$LYezKIz^n(+jkZnad8Vko3LkMdyjIH(^WJ5iY4% z)UvJLZ~!7oO6166l%tZ8>avZ~;g}B6+;`z2zY*yTrk55gScnU~1$PpUS?48$-1TzU zet)=RN&1xRla2PCrY*{mNzIuc)zte|!!$+WH6t4<@c3KWlK!XS<=i%A?Dv{z`fW#j zsyVB0so?}JZy|ry`x|2C4%_aaZXZdcl$4|{lW$a4hSoN=xt;e${YcQWt&y`H3ftAK z&pWGABm3N)iXJoGS$?|AuU=roax+==HnWm3yjg+;ALq}7(PnxyMT+-vRR%e1;P}8ba zqPMWM2XkS2=cZXK&#lVHs|sGi$|`X*G=@nYB|+204Tdg7tnE`(jiubX4s6Al!>59g zV?}Sak&m92XLKYMRo+Nt{+Iloge^yTswxdJT{B-(ltk#SU&vKyS|J%uSX%) zW29~-QLH{%^zSFTON*CVEb~&giW1%{60d7$Xr$loJZIdWrx2oj{RP@))S|ytMXCvXC_=)E3C_PF}piuMal)+(}7qykmoT3CuNtI20+)A-}=3=tRN6~&jk zM~|tnJoFgslGPAgD130L_o$Ka$iuK#(vsV23hHMZh1(slF)};PdupEpL_@nisk#>* z)kJ$;Xci#n;@Vt{QKiRVGM{L;NsK_#=_?cMsbV9JbdjP?a~w!aNHP=ZSBgu|A)6fx z61YEcJHMG?xq02r4Gzc0o%DiiG#LGHWqHRb+Pf7fkQFy+@6CGj*iSrQ1)k<&soQzwRIDR0Eg^}9!^u)>KKH}*VQUpngimmw;TIHFwBRJEp@2W7V zMLgSEqKbE{ZJB+=&)sM90=45~&q8D-LiKJmyt#>UyCs#v3Hw2JyaDD*K?b!TORM1% zyVt!Gw-fWnHYKa?^b9=IED1*wlsa|Y%kgm3aM14YE|HCx1^8X=T90_SO~f3DIA=EJ z2!cvcLF=@K`eCOG{pG2MO>G9Lt=3x?@gC*TwXEZ{wM7(8 zQ@yQqLSe?ciph@URLvGaxW`Re1~WH<^OEyQZTmaJZ$A1s=Rz0u)-fg!6mgRbwl<3Pu~I8%e8 zVl$ll&172I@v+~P<+WLyrPX^jBeqt|YU)Sh@l8JYrP+w~W*u&`ibUYNlzjl{86DB} z9cm<(2ZSG#J=|S7dP0xjP?Nuu+0|Z~;W7Isaw+W&cLu#L;N8)_fz8beU&ONR9%ILF zwmmV-bMK45z~m%0U5`u^N0>_XBH8j+>&Kj@FHvBgBuIcmp3dn*H5_ftyV#Na@mpa# zH>rc~AM3#8M!wChB|M!~4Xh%L-FT8;Cdq7&dWFI%h14k$o7)rSdT$g7eWqa{9>Jzl z*tWSWm0LZ2ktvsf2_v)ts`$R~<ospeZkv*%WgbhN`!N}}uuU+Lm z2bHn}ZUq#D>C&)GobCO3aUf_~wgo#b)oBGjd8cZ*)FaMPZ$i32nJ^cs;M;4Qwa8)( zsti2>rO!VT7&v27bJj5tZf#_F3_5$XSc6w#o11oPYim*O-r>VNfDDX`E=x<}$H|6! z!8fnh0eX|mjY{Xt8+ix5h}Zb5IWSpioeENTKbl~&RYm*GgSYPsZAH6=UF@a|DPoSG zp?Mwefxwp@-4s3X=vF4^)5$I%;0-tK)-^LDDX^I@|1#g#+TA_*j+N~N+I6vs#?WW* zU6igqh^=P(+v?>H^I3^0cyvM!wT9Lw%HcEfn4IaSS%vLK`C1G*@ZqeWq47K}?sS+{ z+tr-H;$k7=-mF8-0;|_`Qcs-02tL`-)pcjSH{14W!JyBl%zH=j`LRwE9JB8jd8W9n zrk(l%M;(sJ`i)1E;fd&}g)4a6#yu}A!|wu#rq^ec%*F#$?hNExzZyAo;r59Lp|N}U zY_Cl7wce62YZX#GKf+eMxjySeFtqB;fsoTjqZVl`r>f8ky5&BbTB5Zz)YNuzds-;b z!myIQlIXW2yh=jAIv%E7OSQUl?gXdviQkHfXmk55@MQ&+5xuM-!|6OZYeYI*WukKn zbk%QNND0JsJ!DRj&`JxYOFnFxBY62p@Lmp`L@Ynz#$-eOv`_`tZemvOyk2ON$&1*C z?peyV^Ki3LZN>4tQs<2O&dO-xG9r(T$$!GS(UjTN&;XWrCbutmr!Q_91`tGjKN(Kvw8PgB-*ccOc*KGL(RS+19qezg(MZeCouIG0;U=(`!q zSS*Zj!BZG>gNJ=ZfMn;G{9|r;#DiSYvP(RZHB`F7ONtJy$4W~nG29l!wDP9CL(K-- zX1(=JlhKqsGG|+GvN>|4dPLSd{}x?p@3fnqhxM45Lm`JctEV=_U0dw9!%pPGIS4*5!we??jF{ilY79@65uRR8ZV1@{ceMpzeo=b z7ibh7`E`8u)2?g`igCmhg&C7n#b1_;JkE@T%ad5EZyz!5%d`G-2<*v1#h|O$^7Ie19$nDyYVMd=6y|nK#Ty|$tsdb}+Hmuk z>^5ubida$cc_|G1h~JiqXx2>(YaI!N3B8L9??)XgjkgYgYSK~YJE55~#A&iC?mB8j zY+74ralYPwm$NqQ`Ji;^^;5Eott&Uwo$^3^lj!RF#CTT`0z(`eb~6=_F7|;Pl$%bFRiYia+2cqLzLK>#&@5a-%RiyVo=B z`5d7Q-@tfb{mK{;C#H4>F_Wrqn<;lf7DjXCALg71M$>Gow8p*!xTIwW2MLDaEl&7= zl?G*%TY3$M)cA*ZY|Yn4$XK4`8TX}EL@vB~s1s^ze=N91!%(Awn_Xt7%%3b>tRqfD z`#x4vNlPM}58ky_8Y;L&ezIwu{-+&Zzy&*vIHm=>|g$$R|SIN5SgZ#Q-D z?39EtuMS3ivsI)d=`6a#ih_0kHn=SJ!Fr6*0-ajq_=mBV(#Uy0+vhPf(Zlgfj$6mW zOiCNHN}`EA?5^Dp;WMT)S-0b258zq7tJ9YAaUJIwz5KHc1_$mCmyygc?iGR3QFb%J z$^OIkCon#edvn0|PW4c_P^qEX_s=>;vgk6}>OWvLgpJ6(N~%!vEF#j-IJ$g{kihK% zd$1(ihZv-`wJ;p2{Y6&poxt-p;j&J`aAuJg!G3Y(Fte&N9L`O4@!?#wMXKR!6Agy` z{pIKHk}5+kmp16=yc)N2>u<&FSR}n=NJ5jdsLCDSA2h)CFmc0$fL<xA2vB8oq=)r_kDyBvB}-pOjTpL}L#~8HZ!oankg+RywwtV-_#$A2@#=bg z7h5CuT~P-gD^tfi1a>a@hiJVx#nkPd5bBc%PuaX``4qOA-+{yFEI{HZ4$Uz!jf?z3 zUKLLgX(jtZ!ugH)I&*Y!NLl1m^w`dXJ~^ioM-hK@dqf}8NGB!chqR4?ViWG^czA27Q;2Ae%^Y zUB^0rJzuBwL=9!I;T`frb38l-TTjkm=zrOvwOM}b8OWHKN>wTRDSm64N|p6>07o+K zEjw#%R=7~~A>C{&Vrj>Qkf zjK2t!TbmYY#tvjOl8t<3NZ1zB7t{Rc2$N=8+s@(51#UZ%S?a-soJ02j1qN<-N(!s) zZlA+t8&diY$>i~3+m9S*0*vsT;0YwO7u{ z)&?uqe>fUkKP8dWU*LjO+PNq|Y1tP$g;>&lIXRFaa_Y&2*xFg1r{U@~r+h4RjC1?+ z!{F%tw5k7trO&qlV&d7Vvv77dL~@2cB|d>MB#tmRw(4w$DaF0GL6>8}9gc8Z2jTNF z8*6%P%Hj4DRCW~9Wiw9+-+D{GQF;4(-b3%s7C4^ynfW(;QV|a4mg%{4mI^oJM{OHg`qjAnX?-D`+K?eJ?M_p#bLw5<>T80&g8%(Oc6qt0K z;KRc8_wvHK9?qv6ihKj{9HVPC%Id$S;t3xo%rU}Pp_dYwbrRPljCPOq+YfSzF?Dp*IAi0;W}9Zp z){n4ANbA1O1W@M1riO0?cmwb;kf3b%KhAB#x>hF~@ zr%em#Rt~~pE9g2ME&45 zqWK4MPKsP=wNtCP?Tn{%HT9nAy^(~ziT1N1!NCvEu$^0bo{DXM!f7zJo~m3VV`(0L z(#xBx0}|kWN~*ye^%d;OTQ zzeV9{hgzQ1qceflIlQKR;d3XWOP94VD&)!#{yh>=DKR*HV%;Hlaxxr7hV&S?q`9P} zwgR`0q^#rn(w>JWGiyFr3Q?pjd)72vMmf2rbIs?=c;>4had>ypP&xLi+X$)~wUraC zg|v|=;q@2?Vm&Ns5fAYgeVi2U*Pp85$(%sD!wL~o;Fk*n-2$=L zxwjk+2U=%S=hmM)o*Us;Qv;4$veEJMht=qc+@X*nKwrk_%3d8c)e~Qw#U(<_=J_&R zjAKY0?JeFtl?4U}D9-8XPFSXSSq$6rdQTT$zxsHmx>Z8|8JRRoM4uPitpdD8r6ewm zSD<<@>%`b--%HDu)1>Y$bS_D4_IMpkSLoK6wwS3s&NVg~tX)EnWx&AoS8cE#dzZ-E z{rrN*w2&~VQ;&=rJ|esz4Fh4*`6)qB#&4O5eTA9C0X>+C{efv7rUT}>x*QRDqx#hG ztvS)pcL4*ZfU!O6vAC|AiB%!vR*|>n9Zi==p(Rag-IvJzTd$@3%{Z!SzD4&XK9XY{ z7JBidHEU-k2T|yu~}&tVI7KvR%3(PXI>G7uUl!K9z+ld zMQ@CUlLxD-c=n~Lx!r%%6G?^cyYtZw=!FZVU|8489+d9cbpmv`05S~lNG*@FDr&;KW-#S-UZ{Gemke8n? z3(cd*q(5+p9k)X!ON+2V6&ue5{hGpLf8c%oC{@XT(+(*wuJ_k!aXP>Flx-Bm;Up_P z3+h(AG$x)+Ho58VW%2itL!lOlvPxCaJev2Sljyji7z?CFWl{m~WahzVln4F11 z?}njQes&Cw^45qANLaAhvUms|cnwYA{660-`)1yfJ8@sypsH%tTki95cNv~p{miib z0^>=^N=~{}xvbk{|m(ds7>5 zIu|bW8tF5z%o$`{y};P?n|&@y8nnm^@GRK=k6K$PbwvOpzh@|VziqEP< z@wFbs!t-07+KidW436t<(rlEPu^ixZzN)`4X6XC?yAEF?$CzWHWc0V+HeT{SySc6D zy>Y&hjZk|>pxM;Ms3=o+*&(GbP;kXJblT6ZhC5kTgH72_y4dJVPawl!Yv5@XZxQW$ z@7LUVr!=XsmQ;{$rDZ0V$Yr{DpX?V^rYZ%4(?aP-6}I~s9DzZ^2?@lvJnI1`CR^U5 zvqDCGzVfe0+s3w&eKS_>G4;C26K5FUv+7mOOJ$#mM@!Hi|AJ%d#lemK0&UD~ch|A| z*ptAA**ukzeO8h7PtyXG;hNq3cd=zUOe2?YJLaW=zKYOU7)vV?BRg{2J+_J98M%Qw zQRVBWqXy#+e*`O)=Ti&sG-z|kZN%_anp%RV%bO{67KSQADSofNc+#~_)p@L!h6H|& zqVH8aMB&BxF`7kLaKvuJrc|q`G3+SYmz5eR?N1VLV#RI3Imqu+UC2Gl@HSdVg3*k! zNx`iL9q_rbuAyX~eVJuxoWeMJ!>sj~2nehVqV#CEKocO93?pIWar{PdOsiBvz zQD{ulovl2bI;po7MHv}Pd|A*@Jh_KxUJAI|_4i&=EWu+kHQS2W7TZS705`X`Zl&U= zTK3X-@VjTr;f)-5O;bHv;96DVJ9ky}yZZ6a_>tL>i5LHKWsUu6WaAYn3Rw-Z+$Sq$)Ln-K_@_87rfDYMfHxL zh0{re^D?K7k)F@F{8_en5}~!lsL*NHhzNZY$dRLQ9nJ?C7ubZan5#+6eJ#yVx zcWv}gXVNDx5gLh16vABIB1=f}Mh;HX$M2n&SEf$rYpxBuvFkL&%>+5!9ot4^oYgtqVt+1<8CmvJq=g4L``epodE656Cza`Cy zIW9GS-s2*MQuoX%i#zB)bLJ)5SP1ekwP0ZMv~S+po};nyRRVsT84xU+DTM%bqZ zk#qNE2X2S?jSKTVN7HL(A5jY$RGpcqkoTiQ)|a2m`R(!C9D*!+DkjUKwXgBs>7u;; z1oY61+Hj#Kx7prvJ_+lB*+O|p2#dI3i=h!8f@dj8Hg}$A`4fs~ z>dxBOOm#`WXY^D!Bd@(~(6!n>X*|>Ve)3-5t>ACG&L8SF z7JD^@UMdLWe_Tdjzuv11`;bB9qF%n7X49ORp|sh+A_01=NTi_5g0{Sikbe@Z&KFv( z&%2>uiB}1Y(;3|Y9%YqREJ6XA!Ulcm>^H@%WKEymO4Ewp8uKW*b4B#IAMK}CX;Dm! zztw(Iqt&BCx5>hKdwlHk&0xqaUr%K6q07oyt!sm^{9olnvE@a4Y5CnFcjoRz)HRtH zmHIs>#HG9CfR}Wh@kLeS6$Z!iw@x_@_=qH~U;MjUXO#5jp)B~uwrY|(;&iO^$N=71 zt&Ii0@%kWyMvC=o&aJPDl6rclpsjyWJ*Uw7wxP!w-xuhfa802%aIWmooHK0upaBjt5z5ZZW4! zzKom8G~^eM|BQQ48@nN&HbJ;PU$Y~e$N4LJbpu;05F|@x+JNi%ZaF#J%o-DRESG?h z`%pgq;SnUvKf$n_et)?q9YOWw`0>i+DV~u-!=$6p8z#|meHb7rMcrCx`r*BYlho7`jU-~Gc9&CN zFp*K|jsjMkrCG7Br({~OHdoRbUxm`4=Y49%g9i4eI_TPLR@%&Wm#;FqxMFVN5)Oi; zL)F2llDz(C6x817hpKy6Ko=T&iH3kxzc_Os^1(en#LX^=L)K zm2b6i;vZ+4V`=A)1 z{_fVJPfrNj0ID8%0B&b-QnOcd(L4PeutMJ$ zlq&Awu)luUny1}(R_fa)qHkv^C>_2HvpXX&&7j5Jo4%5`1y@J0jH(ZR$OhzEHr+$x z!6^*^hrtwj3wD%(_7gdKx^*r?-dpxDJTq_FGE!<{o$`hFWhpJ-84>i(T0jGA({?=< zKulaJorLSmSMhX>iIXJ-c#p4E@}{E8kE+&QWlWa7!nHw~Pv??P_4J*H^7;k`Wd)yF zh?oCqHaBpRO1DNZkdT1WnVh7$hsv2F*J6Pfy;d~q6ZR&$4`bf?r^b%L4qbN+n+Bya zbm-?{uw;9q@oJR&b}8me?D1P$aYB()a81w7mBd7A1ozEuV%I8*6+&{S7Lyv>razUX ziIt>&=DLd=#~BHPKuEhM7BV^LZN_uar<`_X#}cIs^^`@_SlG=8q$;`A!-C7<4J|Si zV(cQs7N>X<>!JfNDEw&m8#i1CA{ii{c|=0B{XBYs{0Ju~cP8f8B+EDSn7@5;NRPI2 zD$0VJefGRa6~UH%2CjT=|0eYYW6Ks77>#3_clH(biV$J&jto6#W8o*C zBN#fRpZgldWl5$Kb;SJasZ*9;YTOm-!~7U)mnZM1LCw>b8{m@fpsdj@Jj%LD;d2CS zeR5+X`ZU{J5bHC1=dHyU>zzDY3ZaHlBkG<$oZRE-on{dwruw~nAG7 zAunrzqwWvZO3_YU$=K4*t8b-C(?7gM)0i^6jZ|wSt#&72!N!Yk#^j+YF8SWaL3TnB z_NJAI&-@DF{dG8#cC-vxIV&=LHIA%r=TeyVN8q87#oH+@=s~q`dEFj!Gb^s#q-H;M^_+h8`ftz(w)TFYL&jcMO5JRAQ=C{9>))OTBW{n=uAbLjqeWF3#lsC3l zRwb@kPv7~TWb0~|;(~#CnWD<~m6o56u@Kw^l~+_#Us{wWId670#oWJ%u1qujgkX)j z58Z4-OJ_=rSQ`iDivumyBG-|3b6}>KJHP|Ow*T@IEH{Fv%G=(wXiHCXL`EOKr$!r4 z=w^`5^TxDbC#acX=nOp3*d+<1K!GWmufScFcOGk9ab{g!-H&#{AaYbP&ywBcj(zmOS47RvnEu6#!f(7OI4$yA-A9X6zN)$u z_T1LlFb!NHS6Iz=whkDYOZ^bAiK{yozYsuqm=mDa-osr$i;CP>K(Ofy4R;)!i794t z@tURXYJKB%u2Et`RKAtSYKO$*I#<994*PSC!0~O}-6$n+Vx6Fj-^VTP7XEeK8a-RK zxI^`I!-5$+Eb_^^()c0pc*Ukm1C_*JFpOXX2lkqi!(rW}Z*n6N*#h7pP&icEmiJk{ z`aAdO<=0&ljz4lokb%q)d%_+cPBYJcpewbn4sL!_Q!c^?(4DQ}P#)BMEeOrz5_)4d zuB4fW)9Z))(U!Nx8GMK`@YdYK8_|t=7Fiv^MttoFM>y$tiaEJG352H9&Fm8NH>3sx7u;%` zcek8%OVDg-t|@2)^tOA8btk`)t$((J))9f%un{S;Zl7vojAO>9YfN9Mg+&BCGo3Aq zK^LWKN-HbqVv0g&4Q)iC9hdGL8BR#OYDuf`RBGKmYTkVeLsX!Cl-=2qT34fDR+R_o zXTURg4}F6XjP9gxk2sX3KXY*r2Fo2TW@MO~svk~mOnlX7##*Z}2S+%nBZ%MLdkfu& z4X(Z^#RkVCV*}JVun9^4!2?Y9UA*h!!*WyKcxHmotm~cKAEFUXVvwe$E@igj3@KKt zMtts~5rDpsq)p%KRZp|C72s97z=K>Pr=FKiX)viqrAE^RTky#j5CmIX$%&Rr$13Hk zR&5PoN2ms|yccoqY`H8uh$ItB5ok=j z@IXH_{4F+daw(uw)o(t@0PO@zSB3C-e&$vc8~Ug9nndUu_o{n#L{&7A47JUg?v>^n zZ(Md;kUky)mK=a~uyMORCMjOn>II%jr-?J%=1t$6ESfT(JCRg+5>0L!H+@Uxo%D3Q z#--|O*;E1#-kCZ?7>-WjQpyXTp?)9*N5@`KpLCVRITGXrad4|nHH~3Fmq&S?Nj-40 z>W4$tY3)c#p;k?$9b=)lBGCSjL z|8kr7G}yq8o>*!T>F_!S09ol>=BMi?%GrB}hx7p3DJS77Dy0(KSaHY(4=?w)cm`D| zI=BV+(6TDL7%!f7nxS}i<@T*-!eDm(3XYCom+={A6QHH%O-oRKcomKy_L0uK>iyXP z^?lvywgMmEG8u9l zKSEyuc*Fi)MCU0Xuj`Ww0W^TM4+yN) zM5aJi0y<0t9-wjOJ}!dyU{5hR&HUFR-T)u~d>L8TF^`aWF9?BkrwGK=5?$Z{ZOGGs z4jK)-pwhq#lYj;~K$i!VgutF`1optAXaf7&(JAhK2>%M$7mmRGePsSiLiB+K5K~O_ zAR{l#02&}KV0%Db!1gc&_`vTSjVdA6K_eh9AYE!W?V$310e`@tBVzHO3gp}?LKZ<@ z0CX^T$>D|B4ZMKz0M;j?+5_@}8qi@6$Ui#4-hYj4BEOb%_kRR`VTASp@USWH?vflr zjDQBn3x+Q_ynyXN58FcnFJOD1Wh3mBNMN5t0((UgS~mVK;14KwfQ-2JhfGoce~Vm@ ze?S8hpn);+LPL8%UYHAQ0(qe=vXRCgipBCVclxJ!0boG=ZVWjdGDxkaDHZUygl~y7 zFb5hSFD!201#Az<3n&|n;0WvS5ksKGJjio_C-k6gz`A{82DA&b6JT%2mQp#~yagF{ z<|5py2K{!@Kz+FBLmcE*6XL%L_;>}-U^(Qa4ZMKu0eJ!2gBcuuQMv@>G%T(9HTtj! zH2GE8+}xaGWMoia!`_|*_DChLmn`vLGoHXcw#3_JEP*{$3G69KV9i9b3S^oU&;e;c z^8&VqAC--$C(%|weSmUd2Ji*ilCTa0or1o`oO9<_;g03_`}}C76WMfNqqbal*E_+$MB^uE9 z!`{W@XEN-0OkiJF3V+z+mxOJYOhVR80vb#puy-+mHA)Dqwe=PLG!2^b!rzjO@ZFo+ zji@UdQq1qfANF&ms;PLNOJKbf625UVfqj~R1``SF+1yA2*hjd@_ow*)kJb&m&=%R) zR=aTy$jZ+g0@TAnyg3bi)%W-F^P_5cek+(nIZXi?C^e;lBZmgJ{GCV!*nix3bDv{- z_?~Q>|FZ3N<7AKv$rT&|z<+4WdTJ4l!uVZ zigF0-Tiq)DG(Y?**$8`s)9Kmoz#sN`r^<)@s>y|IQ^~n4YCr=OiUyGle85cu*dv`( zeyS(?-nWegKPwyOfz8yAYJUg*umJ{DJnZ>RVqDc5XrRiW0m=fT!A7MfIsx`lCm(BT z$#t*}kA8su4an(lW!M*;!2alDHRwu8PkzI0Y%UwIFGZ){fj`9cQN>pk7m!QaH5zE3 z27I7O&LAH&&>)9RMrkG)=!k{=*uZ6#li9B_wu-W&;hTlJi>|Xn^vN(h1Go?@ zjwP@rCt+KUCZ|@)6IiE{9JiDup37wjtT#^OQ>(|5YyMja>>*Eli`!;58oi{z!+#%| zV$Z-e%_;l$XKWvQ7xpb8upTA3vTHg;gNxhMfe$z|IE#GHNQ1BSVgTAl6b(2wGScAm zsxLHv_1=I69Qi#Xs`$P;CpuCYG^ABDfxHdpP3297o?<9%LuH^PGa3O2t@;s zhsX!06TYPvkp^fZ``U46z;2+yN#uhSa^#rh7apK#kl^D)o`NmG-G^w&Zal;>OA)-| zzYG5}XJ8*;NEsLQP&{yXr)DD!P$$qdXjLylTcG<1bYF>W&EbQS*bY|6eno>5s}#tm zx>~Xh^gH$;&{k+kHlBxZ^M>@*T9?0y|6!d!s)~X~u_S)C7SLcCfwgbRCFFxfo$zhF z*u1ZV{Y9j~sa5iXrUADsKt8A_E+o4_cYMokd>S|I~Q%P&+XEZ)W5c&k1)$ph2>yRp zAEGS`>r7KLs40CzvTp~F)W|L5e!v=X&)@U(f0ml%z^hG_tu1tP)o~kvKg0<-B zZ4!qzGT2AuMPCLd?gef9W`B{U0k>YPEanXE{A&DvRQI>_`C91n7w4psD`+194a(j; zr)Yxz?gp&m^qCv`O5g4;2CP*gIObAZT>Q(we_QeS(dV8%drH}eSKW1~e!}gO79>9{ zmSkS`ATe}5p^g2;yAgXR+QI(Aza0PO{Pe4?*=#libI#o`%0|Tga%+91Z}k^1fqhW- zv5KIsGchswpTQsd38vD*K;QIN=r+Li;GK(xc}Snr|AHDxRSuYK=SzUV^UQGP9fh(u-+-TfB!x? zeE2XyKBCKM>G*H{{w=|a_h1Jtf;M7H8T%Ejz1PzHpT+<0>i(8s`6urEEdD>L*XZYw zZvO!1DpzZ14UQTBiM|HuEx1OK)EZ&QG7Cx4<%{>eIM zyuOY7?;HR8leo52*MIo`C;$GFf8Uq?_tr7U|58rgBuKK3E0Ijk2^8E9T!g7>yocxa z*7J~Lr;b!cyuK&@{+Dn^SexUIc;}q8rZS!7M}hw+@PCH_IK*7T*i4NyXEXX!M<<3rlOt^1PF*)Uer~v?k2OG*&G4WT9I*f- z5?=}a|8nGMxTcSchU?#Tr0Z#Zq~Q{1sK4v~)g$%czVQ#&iC{rChD@&$;WQX7{=o#` z4uEVA`7C6d9IJzj2|VNp238z^tPYv)9|ZHOssWyEki8&(M!)Q*;B#~qe6GB}-}EF* z%ZC8;_rU+G5`W|*h+rg|UlsHZ+yFQ_Lrz23e87Lw5Fn%Dt((W)V_cmh#3N>YnW9?! z4}jdHN`QS|IlyiW@q!pXk1^92H%`Y(W6U_NAAxb=Q{jAC1}8r73da`~z7!E{Qs5uH zfcoDAfX|xY%P+Ct81qcWedAgX7}q!*jvt^kR0FOU%cTw1^&F=@vp)=SV|4(#AY`%& z_#ET6F=iLz`02O{jNzx(F_;VI7=wr)F9!VZdlUgakenI|{KF?uH^xa5^qn^a-*`Ia z*6<}K_8Q|RbOCO99fGBB-wJR)ukTgPS%j;XscC{^-%l%8H6pa@(e3j!{1- zLC%J60|~gWsh&poAx0>%rE%KU?y0D#koWK3lQ&P3s5nYaJTWJR5^>157D&;bhM)F% z#yAK%roJt08|5s<4C8ljh1lzc#J%M@QzQpZ7{2Cq0diA6Y z6}#6Qe)<|?nOkey1}`~swk5rtQb|>t_OtL~EdJ;EIw}^Aio*k+LX1Pi7(6-_wcy3W zuXu=tACG^hZAXAkhg9*i@Si<7a@$ek)eg2Ao{ zXeIlUfUyKN-g4k?l(XMz+rID|rk{mBDk=)#f6s}5g7_cq_$La74ICUWHXdW5?}Y6n zcO&Ic6O$|W8w)@%D)o*%^TwpAtn{$F*z~`*J=17SJAh)5eF*vu4%kc#4aOp6WVBN zZTl%l2SooY{Dy{x6#VJuc5>qBFqVvpA>zbWV_fYQ9KOg^TxX&&h7jUqFjfR(=eT30 zaw{rZY}*(+*dRYQd=q}PDQ)r%fA;(t<>QI5P?Vo5;D9laoEUMgct|dpMEQ6(#DZe% z7smX2!GRM4np0HRs5iLv_93tb8 zRm6c_?}UzJrQ-J>jum6}xZ$VeEUx+Rj1#AKlN)wCZ?3;4=}#hvLtFBps6*s4_=soU z^d+~BE@pne+t*j!dHE>S%&7D?bH)8|6=HKCBwr z8u+cj_k0}{n^~HlMO~+zbEo1@zWAGS)-A!fTyD8qS@QO$;BTw^p?ykQI%2$i`dN-{ zD9Hl@9#ZO=-CyEI8rGbkZ9Q7925+27`H=75zyELZ&yUET)?`##S{iwi5l_k01b01> zc-rzS-N0?@QEN$oT*ZDH#?Z{2JNIwHPq&+&Ij&v1mZEw7!wV$wxak+UO2vAAEmzNM z*CEf6BB^%{9z56xJHqg%+b@mxh7B9Y>(`+4A=Q)>k@wjTAQtB`d386Eyi5usIS($7 zg3OyBW1j=h)RPY%J`jv^r{AZK{}lXKKlYQCE?r9Y?%hkgy}e0TSXh&c{UA?IPqJps z8Zve2)URO2GJY8TACNyZjeh6xN8$ew`SU0I*?$!NACW&(;9LDp+W#&70iOHaF#SmR zKT7`}Df>@6`yc%OEI<86|9?OIXXuGZ~w$~>dt=TWvDTE zxLuT_A7oMIcud{Vm!xd%LGbt1`d|M9yx*!5@xb5lh_F}Xr%=cL75IOY{;lEl-~In6 z&>97xOO(wAEgFtaJe(t3P}kt~#eD>KFzT8h2EN0rG9goEbP+J{!(&K*o-K zx8G1LRV0)>>eFZP<_yk-!o0Z_3`OBwEc)zNy(Hue%;$i=3oddZ{X1ZQ;mJ824aV0{$u%F9_KHa6wA?)%8P}?F+$c0oOand3Id$9_K{S2MGO`&?g`r z#+A`$2K=<@e`WnpcRR@aU)OTxqR{sOeHL(T7JWz1pK2b!g8n<`r_ivLx-i20GjPEB zijb+nW;l{Oz#qr{an2s+U(go^zeg~9_dxIu!F@k`U|yqPUIt&mg#IL>x#2+j>3iAH z(UDYDf-gsqGYNt@FPx*epK*_xOToEboO3saxka2GT?n*5pC;Uk0)1$34+_fP0eI5| zo&mlA=tvJ52CVygwqHm{2zh!lkXjcT=iG2i@_xqs#3 z9@g%N*{(^AljGPX&YvcnSVeGt8tdiG_&sor5oys9Z_b8q0;#&K_3P^Dk~;9?!8ybW z+tjFWZVn8SU`=`_a_7w(a;2o^s~illSpyt2Z_a(+J>zRNB`1 zkv9tp3drqa#?*W;4FmcV;5;zGfPN|1CTJLN-W2ESaNaty^fSeq=pTf<=>*>gQsZ3R z_wdL1&!0a}vM%nY)!2F@hKA9Bc@Q)Uzlxc(N-i6aa+Z-Z<0)9n)H!Ex>q*CEEW7U}vChV`J0 zN(Z$LjQ zod3i%SaEG?`dzGxyYA-ow>Iz8{QflEX{6%qOR7!e!`eTOFCJ>rF3~px=baFSAF02! z@88n>3Yg2n`FR}oPmej!V!Mpkq77sJ@7s(&O{caTFJ8PzUf&5N*AMAab8)!-6Rsam zw@chdxyC7g?V0lo?q?_N=JAaAm6l6ToDaQ&J*gZ0Xq&w2dJ_x~16 za4v&;?Vr}3{a*boeP{aKuNcq&Uj41X{j2W(SO2%=&#$6$YvueK>Hn?quAliWZOd}FE4uXveX#->x$Q|i= zKyGP3c5f*!Tej@L>zwSI=Xat>>ZKFpdQmauBZ>V$wD%7Ht&m>(0rsPiy`WF-54aRo zeuV$zFRTN~b%NYtKOFoM-aL*yM)moyuXFiTc4L1J`@HDWi+z9W?_&H7#@^sK%o?}{ zX+L-QEy?Bh_;?bx-+=1dVqfNJ=7Yxa(Pl?~bR1*Az9f#d;2w)O#)18M_};I+SAJ%C z8s(pZ{(@*DUp{0)B0tnL*|TOBPy+j{*l(u3J>Y`hJ{7*z_sY+BoJ!fj*eAlhGN}Et zHmQ>Hdl!&1yBCniodzV#6>P+S?d0gYPffmkI&3@ez4Fn{yzOa3*_hbpLH}9o_hEkr zZEWoGVLt=?WaFAl%vhq&<0+7(m(5*ssF=1om&y-yZuC*ssL? z6#9WCgzso)4bdPhQ+ROP<9aCm9zHl84bd$%6 zlE{6Zl_Y+zF17y#&9@ib7n1BJX$0;5Z+Ll4SG0+af z=^-X$+746Vhl{_FV7yxw#=4zg-U0Iu%{qbZGe-T2dJpX?hz(5mkupG6C!n4NTc8B? z1y2AwAOUPMG%p)dL%X5n3w88euVII}Y|YE!6x8=TECq91}%dfMcOJ4vJ%-2e%rQLm6c_CJ(f==B}R})4-$NES2)J zu39%>->nj~A8>phV;E2$u1$L$YV)l06uyh81^653ab>sG4HzRYLHhvX711^@)0-HI z?{MD!@;-O{^ts*gsq0|>rxF~;!Z9ol>&Z80UC{jfX5XL@hIu-wi53QGDdzK3Qs`&C zS3mt+YsZ$}Z|&KZ?sLE27P->WGc^7!{nnOy^!q<^Brsx*(YctNpr9rh=FdSiwVF9r zEDwXT!8i{?ka2V(N*g65{4dCm>cwF2GFZ@7@hTAZo#A6JnGIKh3CaO_LzdkO#`MW7sg3F;+5`LTSqwD;Q=_@=As|V--A}%Ztyj8vo=OOQ^o` zT!uK@SAl1&7}ku%@cdeUW-(luQI+VzwH?C(eqRYR(SxHoT(5yAY~Tqd)U=9W55Jkh z9XqHMpTjf;XvfnChbla4%P^tfvu^Snclr1p3#k0%y}9t74ZQiKTsej`Lk6D0{}^1R z1lop&!qbj=b|t{Ef(rxg@|g@R_!Xfw0gSL@sE@}L-q(VwrPP~932VR^DY+D$vV&*7 zzQ<$)0q(LCHDwqhsWVxI9NcSKU)$&<4ex!s4i-Ey1z^+x*vtTaX}|!#*tg!4VT^;n z(GAeCfLd6~4B$M?HoS*e(Iwz_*M@h|@qzPQbsFe{+=ld~`3-W0I!$ zK^NXz1-xPp@Y`{HB}Nmv;q&qoA2fZwIX5)t1yy*D2U-{MCJoV-7QkQ(qFT>v_!arw z7T|8mvkXQgHm`psrGPx+VK7R(YrGjgf<*VeY>uc<5?d#<0?#uR#^iA+h@y+%v@n!f4`!W4keoB72e#U;* zeolVwer&%;zXZP&ziht}KZd`sKhvM(ujH@mZ|raF@8s|9&-Rb>Pw-Fi&-O3zX9Nfb zFauZtN&&h7#sSs=P66%#?10FCgn*QQ?0}L05PE^kKvtkqpl+aXpmm^ApnD)YFfuS9 zFeNZMuq2QXBpk#HVg)G$=>{1GSqC`B7+iwQi8IBN`e@{!okd7RtLX<)@LUcp)LySW#Laaj^L!3g~Lfk_)FRY6)G^d4)GgFK)FYG~8WCW6Oj;+ z7?BczY|YC^g#V&I4+WrwKG4AtXy6eX7#tIv7@Qeg2-FY+N=N|}G=KsYfV~@F9tc<` z0>*`atsr131z2hTjuwEQ8{ifQcqIZ(g@BJB;35ThXaEit0KXf+9vBuAmKc^9Rv13oyg8)7qV;Ef?lFt zQeFyP8eaNd7CLUfMPytjO8 z!**v3ZO#eW8ynhM0<^PiXk!d$UrcCQO3<#1p-nkKdt!%0h9!ihgk^`7gfYT}!RrvFpjW}aEfq`U`IqkyGn`3 zjwp#>uy`2@;1e*|!fYm+#a3eLvW?l+Y$vumY%>+fPGG07v)LtVhL^Ax(~IS$}3t@*BvBWB(&QUXtO2IUWL7x-Yjn=Z(VO=Z)rSxaJC?DwLZM>2qnaLCwPnc7zbH^40Z%5>;`h!10-=^Pz=c8 zM3BarAdd?{A}ck@Mn{muiNT1_f9X$AKy(z;#0$L^mr(-kg@(CmxK?-O5#VLJNY}7j zq$_xNcx1Y=I=2@xcGci9 zes5kU21};dyZp>E5wiwat#l48_Q<`;x+9e{?{0hLYjITyrN$cTMIZ0dWV?u5XSuX< zXSwh>viW#YOjQB&NzX%fHn3Du*?QXc{%|+OJq8-gxd4X;cqX(J4a8Z zCrbn`I*4>qUAAOmBb^Zg&(BhFDN(Q~1dQev&p>WhEoQ>W@G1iHJ77 zG26!8*3Qh7IcwD_JEpoktKVOpW#m{2EEySD79_*JI?Lh3=!VNb0)q?h0B+RbQ}DU) zc7;#J+kw}GmxmFb?`~`HA~tv7v4+sYn#D6A?h{nDjeU~9hJc&pVpBRh$IU_MS__B9zgGWp%28HopLBo-f+ zFKTyd(YyI#my41f+z*vkGKyMf{$Wj`Mxgn;d8cRV@9i}Ff=&P{Ywm|T>leNbOVU}C zH@ZOP?vsOyp6%V|^yK`04<+}t*Bx)4kGgSgxu^o4@Se;^V|}IDZFH-2nIyd5I%JNE zvTVjve%oOy_w#I!(0X(^T%6B-p`&@0qKxV>Ii+~P3`0%dYxzO!rO$nbT=vo#cdncM zU)6&aOxA7m}>9gn-UJ4OsYy znTV<$7IRp#smdv`uF%w9?%@uBS8*5eReVKb{MYcj>UyY?MBnJyLkI6zk=JF`Om({} zlA-67?O&`5HQF;d>2jKKx9;=vgmsH#Zr-u!H9+X)1}*!^ChsJSZZKzBu8InA-KBFa zUCrnu^OIM)Z!x!mU5nADiZRG0Aa3r%pz>`8|ft?Knnrp{hxC z*oh;e=Hdf8Ph9)P$})X+P3nZJDoWvfbt`9mavvY-;knH6_SJwu3*RBnPJSE`&f0dd zYN@v3qMvdG{43fI?L$-U)G5cP_ywn3 zZ>Op=V$AWJ+8ni*fqE<2uUa~1^VD-%lUGU^u9Zyn4P0_GwO`KrX}d0q*Q-zK{Bo)J zrs(;~DdTf1##iv0yeYmmn=zBUb68TcX8eo$_{iRIEhlI6nh(IArr!<3?pQ%`7}UUWYw?qGOUGnV-y z@(^UQtPD$Db{tA%j!Yf{mkP3SqsNUNXUJOCR4@F+>WNamgGd)z%8x+BAT6WJnv6g2 zi73!NFlq6xF3(h1WopLMTefuNvXx7jv-FhZS>whq^)!^_6y%w*ELj<*in0ud;BSlO z$7e@PNs$V3ys+@`I0Nx0w<(o!yNf0|AGN+2dt;j<>$LA)i_8U;Me>6LUQO3Yb5yDL zE2H0&kExHBip|pL;x+G+o9EU2_ni4AX{yr+<__5zOfM zaO`KX_J^)*NPH_Un}6q8MSu@q7dyX*apyyfyT2|>zgR8v>RDK>C_D4&Ky~#qUR_uE zgjjjB-y(it`O=Z8ajIemgq`_u{Ag@x}r z1Vr);-yc0@?Um{Q&bqeSga=9(81FQlef6G%SfN7i4d#9BJ1VHLJavePj#g}NXF)yX z4f+$+&(xlOw_8?-_uh&Lo#uAqh-Mca1C|TV99DOfzLaKH z#;BWX-RH&Y1>MXlW=x?Bl)eM`MOnR^S{B*OB|O?deFyV*Vs&UQ2-aG=b^?5StTpsG z4-da3YZ+@0eGS8rq84^`){3J>tunE-Zekv>lpCMobsgRCl*wYWo@|>uIErpQ4Xp=l zCv|yg6B|303PRJLe*#O9rNAD;mUoq<%duG@*S11du=-#jOn$KMSOV?aEp_h1!^aYA z-yTo+d723zN@pn!RGqj-(BZ?r%Zi?9bF+EH{Ax7h%SW8gw==IkWD~`>6S0CdM5OyE zch%F=>reS?kbFG$vf@GwtHX~cYgP=2?d7bdTYn<%{I+a|O=DNz9Z+o;?Eawg;PTf+ z%!iM4*Sa0sx%2AzI4=#+P0uo&#Jcqsy}#Jgd2UtGYlngxx#C7DHIohzhj9~6J@R}o z?YUHuk)+pQeT~OrpEHI`u6KXv;QG8zr166C&Ve7YcX-8(d!;br$;#L}_Y=otk+=zK^%x^gZ`i zIgA~#ZL(Oh9SU=*$L`!E8yPVo-Fv~f z)vvCc2-8#0xlRJp*=Yj2 z%a<#$G^Wc3&Sb`hiEk6kG3ab{r=9Nbx6}HZ@~dYD^lxuhP?kQ+y*^@_hQ!Bt_c#9lKW7oHd5ka31^(`hf;t-3ZeRbAKx0}Ul8(q6~LZ0V|%GTm9i zctb!$Xo}fl2WW|KZy-wo?{yFn(_L$4v1%n#Z-n#DF|%&DvFSDG%iTlVE4zHdGB zU2PWFC#6SP?SSKF!%sh1UhkXMLpwp}M!<@g!UIbfH#ZBKsEnF;V9WRz;y$m2b^E$p z_vg>5Qj;!^?K8)8LjQ}0qjm0l-0XI-WO|2o15y?{nW?jd1@<2vTzaclcboXpW#*T= zm%5o{9#9nXS)APAWKDkw>2A{LvqEL2K1s7L8gw~Y+3=ZjS=`e3UDJw}x_QX+$M}!o zn<`JL-aS~l<;m#}Yi=Is(?u{xhtI2YX1M=}1v|oz6$?iX+u|fP{#sJUfdy6<;*Aow zM82t7>{jb&FJi+wJ}7ZOo_b>VuHg3{vog1s6uQWIPBaV3pEl09#-!(&SH!6>aFfFO z&B1c18W+N@yi>WIc4|lBs0^FlvUa9(JFLCpJfh2`HH)1_bu!p`;iCSg&H)dfC9O{A zFl|}njOdeYY2#je-u+xh^@!J4Lw?Gil<(E@p0Nry(+i%t+Td zbbMUi>$t7!muXF_t}>OBJaN>*b6|9@_0iL|u9FB1IkA?{X8Z)b+1f}ph!cmr8PcPH|;2EqlYlBp|RR2Q<)@Pq~Hul6WjsAyG??Pa8?y#1xDvQ?OM^D!Z4aEn+e9_BsJp&&HZIibt?PTVOWwdN zvkTFZy(EQt_vzZSZzAAC_1?eIT+ZiE`1j{`4r4h7v-H?A**dP;ZPg*_@@T|0*l-Od z9LIt~0}kvr&`~SCE>!-Tim7ddPC~Gm{9{?8S#pi4YA9<6tx|b;dWkon(R^59l!~q% z%lQ!fB&clX?JVc5ZB*I|==WH>uDUd#+Mu-Cf|ebt6QV^~FuX9g%QYSYa+FfNeO6@) z944Mt?o)1SNMG1!-EOh8)v}CUGX^er5<4g~wP?de?K9J}HTPSn^y2B^J-XoG1TX&4 z8vbeB^RqtnH}$Z6srW##B?n~&~TdY5@<1jE|QQX^Kmn}dDvLE#3dCk+)yB=~o#2@ST zexgJ)mqVma}t`;Ab!y}N7%y|{QavqEd;+~?(YW6r&P+P7wru#eo^ z(@~b1Gc!EjT!=XTI?=9Fu|t&VV8e`?R=3ZP((~QhguGc%s=(1+8HJqlxl)63TC}GD?WH4c3+zh6K&0td47XBMC9AHA9 z+u6XVahjh6)6*=NT9=0ZGF~{o%GL5NjSh_c4ZTqLoKMQp>R++=U&U|Aoqfdd- zr4+CCTQwx!kl4H}G54ut19e1D%acJrk_m5;v}FQ++n+mZ~I6;BuuWQ(PH za%8uK5A@t!yvFP7p0K9fnWqn5Xzq7=_58Xc#G0y{dZhL4`5miQilZh(EUFhYe7ZZ9 zAt*V7M`OQ^Y(V9JV`mmzem2Zt&{@L)oe!tPI?nUhsk6KXk6RVfRV~tzIo$KoB|){# znv&OygwJUw8-@?gN(>$U`oo^AbxZ(97m&NSlJDixV+V{nkhvP3OeNLV+r^BuZsfYXJPma8#nP@o3w(Fp$ zV{;0ovmU=W>9Ez&Xu6@}?JI+wvkk`IHrqYf|KS0?lrE8~S3T0NYI)Z#I2XUtNu$Kq zW71>wljA!T@0c~*j42~lQxP$JM7s^6`o39H@1bk@YSD)=pCgjTc01z{()s#{Gv2md zE`Lp8x%{Qr$as8wJTfkS4TF1(Z_(4mhgyQxh1tG=F2qgr7~ZaScy($C%?NQ+7biy%*ss9Zn2d$ zj`^r4*HXwiAp&7F9$OLGPJoqwJ8wYr~z28#0BZPWCpPYaH%rbV*)y~JBdKLEAW+x{m8Jf~(PQQL*tS%pz(`j+VNuf0p zdma%SWohbL-+oK4jQs}``pEDesk4bXsyHV??%m>@Z~Ud6x|=MB+o6}OICA&)DdCd7 zxv`Fp7uWTAHTPD+B~!iFVZ91;HDcYCb#l8^dL?_*#_>^i^!6xi=~r;qMpLSvmQ%lN za|^b;FI#*$SW#eORmjRb^XEAWDo4HF^GN&fR*Tn8cbBhym@2$~>d>M|l4WOi-`Bcm zo){aftI#=b_o0Qo4dW}uBfeHAcm0^+_Nuva$e6cz8)OQ~ug$uV-)p_$CcnY9PZO4VN{lqgGpN)a z<=%hDh18H9c?#y+X9`-@>8XrMF|D2ZAiJVNeh({C$5@a0L)UF3%12$3W@b2trj-gj zdKH(oaQaG_Lqm7vEBl%qO3=^ClOF2se_^p{Z5DHd=uG2F{?1*62J4l^kAD*)&}F&6 zxFx-l(zk6E$=b8_!IGQrUah})Z>fmDv8#^nZ@DE#u9RJSyQ9IDJ-0qoj@Yn%?_JX! zrBOFx3iK8k%}L$Zd#C~5jM{~7)olB0-IS4>^GI|FV?~iu8KXKTh9xrE|U>V~3{rUA+Gg&%pZT56mP1^qa-e8Zq{1@vS+97u{ zR8gjkbZ2+ku0?xz3(MK5jrOoU{T|rES~NUuvWLG7l3g=_L|MOcf`^HfPc3(xHRC7Dn&!+Aj&3G%gr<&Td|LMC&hepjK9duY z7{^8WMtbac=2&)r!?yc+CnJ{REH?M)F-&dyG><96bw7!pJ0-{xs?8mlA+%%dN1=ST zD9mB@PCKhRsqK~ycPJtr(UON1db_Fi$m?L>ZCNm2lBz`N~nLlFyk^@>jAoXSn>@5*C(f^qGQp`Cr_iLchW#rNSkn$&xsQ^puW zqa(edOgi0~sx&@%;pj&OlT?n0hF?#*d%fLgg{5*M!x3 z7ap~nA^oIZ-`$3Z{f<4pR&r}Vpw98Q%s$6v-WYYP$|zvS(Mw09x6Mm1E^wT?d3o`* z!^fs~<4trqktEh`!==^dW~}pEQBip6k)cQ8>G(adq6<_iQ-&4hEcxKb|1|Dl-TV%L z)|*O>ZgqSu$hS~^gh}9Fg`+&FlNF!jt+%aQ|i+o;0E!lf*& z!v2UJ{hofieY+4^nPx1c6LVziYTpz+RqvLY!8Nx7?BN7C9uax+^@ovHF##l&g7{mLPO-8~c)x}B-2I+Z_u{yXOv z1A1h+47+_Gp8ewWByZc=g8qgp4wcP1+-bhKzY~1Kc8bO6 z^Ha~9RP=ewmmOc?J>kxjXQCRLd&oaX)e0#-KJjyx7}H_3_ELkC-%p8ecfwqAmTtw& z@-dp)yO^mX=8axEJg{8N`0~I#`qj@Czc~1LT*Aj_mY-I}8;@!FlhXLyKA+i_wrILT z_n8Nic6S;-f9HU}tCE5rdkA;u+h{Z}*7UVWw*RY1RoCpIC-)O_co(v8s(p^JkG>F( z6r;=5*&mYMEKFR)>K-@s)VVZ=i>X~KJv*h!oxLAL>$ioV-weJpP12%f;#OTP&TO|~ znndt_e46C@3ZARiNRQWSG}xPP@cb%|Ak_gpo6OTQC8oTJ6qOd%7J0zZOO6+z$p0hc5fH?COKQ0&gZ6CuVlcTQEPvYJ2Tg=3sf5rT5QwKeFnEuGW&iodjJ1 zd&LFKebCc(56^(X$%mIsR$o(RW7i{tgy`%Xu`XoWeHEkk#^-}|_M4v?aVmRfXkPv9 z%chdsR~(2txgo{6!-nF(%l0~#46B2LqZhs*QnPoIJl?t5!R*Ps5*wd^wFyI9RKq=o zh=&a7CwcY4i(y4or=r$+I{VCZ4e6kk^zz0Yc7aD-o&QOjh#R-BUR}OA>z0U<(2et_ z?{D8eC|1Wu^Wqxw{iQZ1-c0A66)<+wLA}uNeFZhU9=bnn-E%V7#B<1qiIXh@O3kk; z-`6>>bX-D0DdmF5p!?7IIx6O#3d@sUeIP^9GFQPi#GgkeZt{4?Fs-n<8;94w?_eZ1 zZo9JMI)yhUUOgXJ-&ZQ__Q%3f52L!_nVZU{p0pp)Z^#JAr>lCd@7H_Rn3#i)^2`46 zG`M43GB)0W@sim~XGop#(q3NXm!8i_P#o$S>A@cU{;=`|#m7B8WX>)T>e?aBIZNbo zyzj&Ed0ChFYh7N*zU1Fg!nnWi+Q*cEn;9K*9e5?PO3WOl_z&GKq&_@p=uRX86On$VekGK_!1a~5uyKRet#9{~3Y!_QLd%$~Nw>4IAVm&HErmY(_@`6Hf1#!Aq<5gW| z^J38DS$v5zQii>#9Xa>G$BODu*1BW6q8B}S@K8ndxQay45gRX!vz~WP>IAgs6InAv z^-;m*LW97P_)#7G-;~KucvsDLdwXE2mQwH0Q_ckesj#}f;qp4IostXN~X zVD|1+#a_A-KRF0lY(2f|&Qs=|(Z+pv`R+0!L-pBRluT}V47eI?P&vo)!H|#@4#}$q zj;z?Udig-h0i=7(?2$t@Wh!QsoZo(=e*VJ+p`zEdPupJL2|7J5c#%}j#X~YKd=B6P zYl8#mYyQY67q4(jV{J`_&L6VG8pjY}=)7~2aY7h7r`>Xe+R1c+(A+VvXf4eUyw8OOgdDhM#%6;?)&dsU6Ia97)A1VmChNC(AlZXL8X<@&@IT+AM1q9>-V z9VA&A#v?4d(oD5yi2bBfljjRq?sd6hz4}aqV6T|FnJMB7`#4{IgKh&)*X+raxO-Z8 zx1gdz-}eKw)FlQy-*I|&mv_?_x;mH07RGCf<>ZO6PI=u6S+MQm-gN$R@lTcwUcCQC zv9+2>v07c8C2v0jzO+f72dAG zdv1h<+~!@gd!FL0jiZHM3FwXUU+On_h)Cwkyvx0%CA4;0Y*O>-a8k_tmcHaH-F=5W zZoCuA3QH=H{yZ|XUD)h6f8B>8Ri0>f^ZYpR(e!z<-44fGzi2yn(fPZ=Rz05YT6eTt z^`Q5~>(Vzro8$cPxS2Nc4>N-K{c@J-1bEBc-O*(HKeC7V0EQxa$o+=&vFmVL` z`1WsyT~aHIxnkNg*NY9nO1FC@os{I=>41Z2kPSk z!Yr5Y)@xYRB;WHh%~O0dxr1Tm36mvi5eNEY)vh`I(&5dPGi z9Z&TxR~evEYrS3|fTvuE-!Ly!->u!q$vcPLIcKUEd7#H4DMhvY)e5N(XB#J9D7x3@ xcKe77gRN@)PTgCcHp1#`o~`qx87t!B1V*mscYYnJm^k5zqp3%{CFOem{{S^X0m1+P literal 0 HcmV?d00001 From d47e3e664286314c6b9728bb38b1c7a576211572 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 17 Nov 2024 10:14:57 -0500 Subject: [PATCH 179/199] Update sd-scripts and allow python 3.10 to 3.12 --- sd-scripts | 2 +- setup/setup_common.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sd-scripts b/sd-scripts index 0047bb1fc..2a188f07e 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 0047bb1fc30a9987138a20f52f774ca536ff7b6a +Subproject commit 2a188f07e682ed5dd958821a223d48c17a9aeb83 diff --git a/setup/setup_common.py b/setup/setup_common.py index d02546310..69d1d6b6d 100644 --- a/setup/setup_common.py +++ b/setup/setup_common.py @@ -11,7 +11,7 @@ # Constants MIN_PYTHON_VERSION = (3, 10, 9) -MAX_PYTHON_VERSION = (3, 11, 0) +MAX_PYTHON_VERSION = (3, 13, 0) LOG_DIR = "../logs/setup/" LOG_LEVEL = "INFO" # Set to "INFO" or "WARNING" for less verbose logging @@ -31,7 +31,7 @@ def check_python_version(): log.error( f"The current version of python ({sys.version}) is not supported." ) - log.error("The Python version must be >= 3.10.9 and < 3.11.0.") + log.error("The Python version must be >= 3.10.9 and < 3.13.0.") return False return True except Exception as e: From a6f0ff72a8121dbffb28abf46d3bb023762bd09c Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 17 Nov 2024 18:37:27 -0500 Subject: [PATCH 180/199] Fix issue with max_train_steps --- kohya_gui/dreambooth_gui.py | 34 +++++++++++++++--------------- kohya_gui/finetune_gui.py | 20 +++++++++--------- kohya_gui/lora_gui.py | 12 ++++------- kohya_gui/textual_inversion_gui.py | 21 ++---------------- 4 files changed, 33 insertions(+), 54 deletions(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 55454b526..98d67530e 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -781,23 +781,23 @@ def train_model( log.info(f"Regularization factor: {reg_factor}") - if max_train_steps == 0: - # calculate max_train_steps - max_train_steps = int( - math.ceil( - float(total_steps) - / int(train_batch_size) - / int(gradient_accumulation_steps) - * int(epoch) - * int(reg_factor) - ) - ) - max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" - else: - if max_train_steps == 0: - max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." - else: - max_train_steps_info = f"Max train steps: {max_train_steps}" + # if max_train_steps == 0: + # # calculate max_train_steps + # max_train_steps = int( + # math.ceil( + # float(total_steps) + # / int(train_batch_size) + # / int(gradient_accumulation_steps) + # * int(epoch) + # * int(reg_factor) + # ) + # ) + # max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" + # else: + # if max_train_steps == 0: + # max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." + # else: + # max_train_steps_info = f"Max train steps: {max_train_steps}" log.info(f"Total steps: {total_steps}") diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 77351bbf9..e01456ca1 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -846,16 +846,16 @@ def train_model( repeats = int(image_num) * int(dataset_repeats) log.info(f"repeats = {str(repeats)}") - if max_train_steps == 0: - # calculate max_train_steps - max_train_steps = int( - math.ceil( - float(repeats) - / int(train_batch_size) - / int(gradient_accumulation_steps) - * int(epoch) - ) - ) + # if max_train_steps == 0: + # # calculate max_train_steps + # max_train_steps = int( + # math.ceil( + # float(repeats) + # / int(train_batch_size) + # / int(gradient_accumulation_steps) + # * int(epoch) + # ) + # ) # Divide by two because flip augmentation create two copied of the source images if flip_aug and max_train_steps: diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index f1713edf5..75dbb4237 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1076,7 +1076,7 @@ def train_model( log.info(f"Regularization factor: {reg_factor}") - if max_train_steps == 0: + if (max_train_steps == 0) and (stop_text_encoder_training != 0): # calculate max_train_steps max_train_steps = int( math.ceil( @@ -1094,13 +1094,9 @@ def train_model( else: max_train_steps_info = f"Max train steps: {max_train_steps}" - # calculate stop encoder training - if stop_text_encoder_training == 0: - stop_text_encoder_training = 0 - else: - stop_text_encoder_training = math.ceil( - float(max_train_steps) / 100 * int(stop_text_encoder_training) - ) + stop_text_encoder_training = math.ceil( + float(max_train_steps) / 100 * int(stop_text_encoder_training) + ) if stop_text_encoder_training != 0 else 0 # Calculate lr_warmup_steps if lr_warmup_steps > 0: diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 42249aee4..9b8ea6b26 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -664,22 +664,9 @@ def train_model( log.info(f"Regularization factor: {reg_factor}") if max_train_steps == 0: - # calculate max_train_steps - max_train_steps = int( - math.ceil( - float(total_steps) - / int(train_batch_size) - / int(gradient_accumulation_steps) - * int(epoch) - * int(reg_factor) - ) - ) - max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" + max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." else: - if max_train_steps == 0: - max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." - else: - max_train_steps_info = f"Max train steps: {max_train_steps}" + max_train_steps_info = f"Max train steps: {max_train_steps}" # calculate stop encoder training if stop_text_encoder_training_pct == 0: @@ -1076,10 +1063,6 @@ def list_embedding_files(path): step=1, label="Vectors", ) - # max_train_steps = gr.Textbox( - # label='Max train steps', - # placeholder='(Optional) Maximum number of steps', - # ) template = gr.Dropdown( label="Template", choices=[ From 309a9bbc1b333d965592efc1a4c762b2bd489c1c Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 18 Nov 2024 07:07:33 -0500 Subject: [PATCH 181/199] Fix max_train_steps_info error --- kohya_gui/dreambooth_gui.py | 2 ++ kohya_gui/finetune_gui.py | 2 ++ kohya_gui/lora_gui.py | 2 ++ kohya_gui/textual_inversion_gui.py | 2 ++ 4 files changed, 8 insertions(+) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 98d67530e..81db18ce4 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -654,6 +654,8 @@ def train_model( gr.Button(visible=False or headless), gr.Textbox(value=train_state_value), ] + + max_train_steps_info = "Automatic by sd-scripts" if executor.is_running(): log.error("Training is already running. Can't start another training session.") diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index e01456ca1..d6f137106 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -685,6 +685,8 @@ def train_model( gr.Button(visible=False or headless), gr.Textbox(value=train_state_value), ] + + max_train_steps_info = "Automatic by sd-scripts" if executor.is_running(): log.error("Training is already running. Can't start another training session.") diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 75dbb4237..53d7bb580 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -883,6 +883,8 @@ def train_model( gr.Button(visible=False or headless), gr.Textbox(value=train_state_value), ] + + max_train_steps_info = "Automatic by sd-scripts" if executor.is_running(): log.error("Training is already running. Can't start another training session.") diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 9b8ea6b26..618a0d816 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -521,6 +521,8 @@ def train_model( gr.Textbox(value=train_state_value), ] + max_train_steps_info = "Automatic by sd-scripts" + if executor.is_running(): log.error("Training is already running. Can't start another training session.") return TRAIN_BUTTON_VISIBLE From d0cd9f547a184655ddc038349c94bc5db8bd8b08 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 18 Nov 2024 19:31:51 -0500 Subject: [PATCH 182/199] Reverting all changes for max_train_steps --- kohya_gui/dreambooth_gui.py | 38 ++++++++++++++---------------- kohya_gui/finetune_gui.py | 24 +++++++++---------- kohya_gui/lora_gui.py | 16 +++++++------ kohya_gui/textual_inversion_gui.py | 25 ++++++++++++++++---- 4 files changed, 58 insertions(+), 45 deletions(-) diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 81db18ce4..4a0e88b20 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -654,8 +654,6 @@ def train_model( gr.Button(visible=False or headless), gr.Textbox(value=train_state_value), ] - - max_train_steps_info = "Automatic by sd-scripts" if executor.is_running(): log.error("Training is already running. Can't start another training session.") @@ -783,23 +781,23 @@ def train_model( log.info(f"Regularization factor: {reg_factor}") - # if max_train_steps == 0: - # # calculate max_train_steps - # max_train_steps = int( - # math.ceil( - # float(total_steps) - # / int(train_batch_size) - # / int(gradient_accumulation_steps) - # * int(epoch) - # * int(reg_factor) - # ) - # ) - # max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" - # else: - # if max_train_steps == 0: - # max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." - # else: - # max_train_steps_info = f"Max train steps: {max_train_steps}" + if max_train_steps == 0: + # calculate max_train_steps + max_train_steps = int( + math.ceil( + float(total_steps) + / int(train_batch_size) + / int(gradient_accumulation_steps) + * int(epoch) + * int(reg_factor) + ) + ) + max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" + else: + if max_train_steps == 0: + max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." + else: + max_train_steps_info = f"Max train steps: {max_train_steps}" log.info(f"Total steps: {total_steps}") @@ -1470,4 +1468,4 @@ def dreambooth_tab( folders.reg_data_dir, folders.output_dir, folders.logging_dir, - ) + ) \ No newline at end of file diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index d6f137106..8a2cc01c0 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -685,8 +685,6 @@ def train_model( gr.Button(visible=False or headless), gr.Textbox(value=train_state_value), ] - - max_train_steps_info = "Automatic by sd-scripts" if executor.is_running(): log.error("Training is already running. Can't start another training session.") @@ -848,16 +846,16 @@ def train_model( repeats = int(image_num) * int(dataset_repeats) log.info(f"repeats = {str(repeats)}") - # if max_train_steps == 0: - # # calculate max_train_steps - # max_train_steps = int( - # math.ceil( - # float(repeats) - # / int(train_batch_size) - # / int(gradient_accumulation_steps) - # * int(epoch) - # ) - # ) + if max_train_steps == 0: + # calculate max_train_steps + max_train_steps = int( + math.ceil( + float(repeats) + / int(train_batch_size) + / int(gradient_accumulation_steps) + * int(epoch) + ) + ) # Divide by two because flip augmentation create two copied of the source images if flip_aug and max_train_steps: @@ -1634,4 +1632,4 @@ def list_presets(path): if os.path.exists(top_level_path): with open(os.path.join(top_level_path), "r", encoding="utf-8") as file: guides_top_level = file.read() + "\n" - gr.Markdown(guides_top_level) + gr.Markdown(guides_top_level) \ No newline at end of file diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 53d7bb580..a172017e5 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -883,8 +883,6 @@ def train_model( gr.Button(visible=False or headless), gr.Textbox(value=train_state_value), ] - - max_train_steps_info = "Automatic by sd-scripts" if executor.is_running(): log.error("Training is already running. Can't start another training session.") @@ -1078,7 +1076,7 @@ def train_model( log.info(f"Regularization factor: {reg_factor}") - if (max_train_steps == 0) and (stop_text_encoder_training != 0): + if max_train_steps == 0: # calculate max_train_steps max_train_steps = int( math.ceil( @@ -1096,9 +1094,13 @@ def train_model( else: max_train_steps_info = f"Max train steps: {max_train_steps}" - stop_text_encoder_training = math.ceil( - float(max_train_steps) / 100 * int(stop_text_encoder_training) - ) if stop_text_encoder_training != 0 else 0 + # calculate stop encoder training + if stop_text_encoder_training == 0: + stop_text_encoder_training = 0 + else: + stop_text_encoder_training = math.ceil( + float(max_train_steps) / 100 * int(stop_text_encoder_training) + ) # Calculate lr_warmup_steps if lr_warmup_steps > 0: @@ -2855,4 +2857,4 @@ def update_LoRA_settings( folders.reg_data_dir, folders.output_dir, folders.logging_dir, - ) + ) \ No newline at end of file diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 618a0d816..30f87fcf4 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -521,8 +521,6 @@ def train_model( gr.Textbox(value=train_state_value), ] - max_train_steps_info = "Automatic by sd-scripts" - if executor.is_running(): log.error("Training is already running. Can't start another training session.") return TRAIN_BUTTON_VISIBLE @@ -666,9 +664,22 @@ def train_model( log.info(f"Regularization factor: {reg_factor}") if max_train_steps == 0: - max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." + # calculate max_train_steps + max_train_steps = int( + math.ceil( + float(total_steps) + / int(train_batch_size) + / int(gradient_accumulation_steps) + * int(epoch) + * int(reg_factor) + ) + ) + max_train_steps_info = f"max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}" else: - max_train_steps_info = f"Max train steps: {max_train_steps}" + if max_train_steps == 0: + max_train_steps_info = f"Max train steps: 0. sd-scripts will therefore default to 1600. Please specify a different value if required." + else: + max_train_steps_info = f"Max train steps: {max_train_steps}" # calculate stop encoder training if stop_text_encoder_training_pct == 0: @@ -1065,6 +1076,10 @@ def list_embedding_files(path): step=1, label="Vectors", ) + # max_train_steps = gr.Textbox( + # label='Max train steps', + # placeholder='(Optional) Maximum number of steps', + # ) template = gr.Dropdown( label="Template", choices=[ @@ -1294,4 +1309,4 @@ def list_embedding_files(path): folders.reg_data_dir, folders.output_dir, folders.logging_dir, - ) + ) \ No newline at end of file From 3c264e788685a1dc77e45228f71513185e0d8de9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Thu, 21 Nov 2024 19:44:06 -0500 Subject: [PATCH 183/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 2a188f07e..2a61fc078 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 2a188f07e682ed5dd958821a223d48c17a9aeb83 +Subproject commit 2a61fc07846dc919ea64b568f7e18c010e5c8e06 From 10e6f14110e95e4ee474fddeaa63430b8725ad4f Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 3 Dec 2024 07:14:09 -0500 Subject: [PATCH 184/199] Update sd-scripts --- sd-scripts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sd-scripts b/sd-scripts index 2a61fc078..8b36d907d 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 2a61fc07846dc919ea64b568f7e18c010e5c8e06 +Subproject commit 8b36d907d8635dca64224574b5cb15013e00809d From 79e5c36e7ff9554410d6d749235d591787a37edc Mon Sep 17 00:00:00 2001 From: bmaltais Date: Sun, 15 Dec 2024 09:58:22 -0500 Subject: [PATCH 185/199] Update to latest sd-scripts --- requirements.txt | 2 +- sd-scripts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 235cbfb6a..728c0e449 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,7 +23,7 @@ prodigyopt==1.0 pytorch-lightning==1.9.0 rich>=13.7.1 safetensors==0.4.4 -schedulefree==1.2.7 +schedulefree==1.4 scipy==1.11.4 # for T5XXL tokenizer (SD3/FLUX) sentencepiece==0.2.0 diff --git a/sd-scripts b/sd-scripts index 8b36d907d..e89653975 160000 --- a/sd-scripts +++ b/sd-scripts @@ -1 +1 @@ -Subproject commit 8b36d907d8635dca64224574b5cb15013e00809d +Subproject commit e89653975ddf429cdf0c0fd268da0a5a3e8dba1f From 45cfc1d8683f9b7491a0295df05e6266323fbeaf Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 30 Dec 2024 19:13:20 -0500 Subject: [PATCH 186/199] Add support for RAdamScheduleFree --- kohya_gui/class_basic_training.py | 1 + 1 file changed, 1 insertion(+) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index 0d03769cf..2bf6ff9a0 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -212,6 +212,7 @@ def init_lr_and_optimizer_controls(self) -> None: "PagedAdamW32bit", "PagedLion8bit", "Prodigy", + "RAdamScheduleFree", "SGDNesterov", "SGDNesterov8bit", "SGDScheduleFree", From fce89ad798d9d22870ebeb92c94e38fabc0f0faf Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 30 Dec 2024 19:21:03 -0500 Subject: [PATCH 187/199] Add support for huber_scale --- kohya_gui/class_advanced_training.py | 8 ++++++++ kohya_gui/dreambooth_gui.py | 5 +++++ kohya_gui/finetune_gui.py | 5 +++++ kohya_gui/lora_gui.py | 5 +++++ kohya_gui/textual_inversion_gui.py | 5 +++++ 5 files changed, 28 insertions(+) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 0aa9e0429..43d01588c 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -168,6 +168,14 @@ def list_vae_files(path): step=0.01, info="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type", ) + self.huber_scale = gr.Number( + label="Huber scale", + value=self.config.get("advanced.huber_scale", 1.0), + minimum=0.0, + maximum=1.0, + step=0.01, + info="The Huber loss scale parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.", + ) with gr.Row(): self.save_every_n_steps = gr.Number( diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 4a0e88b20..ae1cdffcf 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -159,6 +159,7 @@ def save_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, weighted_captions, @@ -367,6 +368,7 @@ def open_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, weighted_captions, @@ -570,6 +572,7 @@ def train_model( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, weighted_captions, @@ -907,6 +910,7 @@ def train_model( "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, + "huber_scale": huber_scale, "huber_schedule": huber_schedule, "huggingface_path_in_repo": huggingface_path_in_repo, "huggingface_repo_id": huggingface_repo_id, @@ -1341,6 +1345,7 @@ def dreambooth_tab( advanced_training.loss_type, advanced_training.huber_schedule, advanced_training.huber_c, + advanced_training.huber_scale, advanced_training.vae_batch_size, advanced_training.min_snr_gamma, advanced_training.weighted_captions, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 8a2cc01c0..fbaa05bb9 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -167,6 +167,7 @@ def save_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, weighted_captions, @@ -381,6 +382,7 @@ def open_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, weighted_captions, @@ -601,6 +603,7 @@ def train_model( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, weighted_captions, @@ -973,6 +976,7 @@ def train_model( "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, + "huber_scale": huber_scale, "huber_schedule": huber_schedule, "huggingface_repo_id": huggingface_repo_id, "huggingface_token": huggingface_token, @@ -1473,6 +1477,7 @@ def list_presets(path): advanced_training.loss_type, advanced_training.huber_schedule, advanced_training.huber_c, + advanced_training.huber_scale, advanced_training.vae_batch_size, advanced_training.min_snr_gamma, weighted_captions, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index a172017e5..f6cea68fb 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -180,6 +180,7 @@ def save_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, save_every_n_steps, @@ -449,6 +450,7 @@ def open_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, save_every_n_steps, @@ -752,6 +754,7 @@ def train_model( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, save_every_n_steps, @@ -1409,6 +1412,7 @@ def train_model( "gradient_checkpointing": gradient_checkpointing, "highvram": highvram, "huber_c": huber_c, + "huber_scale": huber_scale, "huber_schedule": huber_schedule, "huggingface_repo_id": huggingface_repo_id, "huggingface_token": huggingface_token, @@ -2659,6 +2663,7 @@ def update_LoRA_settings( advanced_training.loss_type, advanced_training.huber_schedule, advanced_training.huber_c, + advanced_training.huber_scale, advanced_training.vae_batch_size, advanced_training.min_snr_gamma, advanced_training.save_every_n_steps, diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 30f87fcf4..7ba4d3283 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -153,6 +153,7 @@ def save_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, save_every_n_steps, @@ -319,6 +320,7 @@ def open_configuration( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, save_every_n_steps, @@ -478,6 +480,7 @@ def train_model( loss_type, huber_schedule, huber_c, + huber_scale, vae_batch_size, min_snr_gamma, save_every_n_steps, @@ -771,6 +774,7 @@ def train_model( "gradient_accumulation_steps": int(gradient_accumulation_steps), "gradient_checkpointing": gradient_checkpointing, "huber_c": huber_c, + "huber_scale": huber_scale, "huber_schedule": huber_schedule, "huggingface_repo_id": huggingface_repo_id, "huggingface_token": huggingface_token, @@ -1225,6 +1229,7 @@ def list_embedding_files(path): advanced_training.loss_type, advanced_training.huber_schedule, advanced_training.huber_c, + advanced_training.huber_scale, advanced_training.vae_batch_size, advanced_training.min_snr_gamma, advanced_training.save_every_n_steps, From 7068c7dabe865c2723560b4f6267815400a52b8b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Mon, 30 Dec 2024 19:37:03 -0500 Subject: [PATCH 188/199] Add support for fused_backward_pass for sd3 finetuning --- kohya_gui/class_sd3.py | 7 +++++++ kohya_gui/finetune_gui.py | 7 +++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index d5dae715f..9d0ac3f5c 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -195,6 +195,13 @@ def noise_offset_type_change( info="Cache text encoder outputs to disk to speed up inference", interactive=True, ) + with gr.Row(): + self.sd3_fused_backward_pass = gr.Checkbox( + label="Fused Backward Pass", + value=self.config.get("sd3.fused_backward_pass", False), + info="Enables the fusing of the optimizer step into the backward pass for each parameter. Only Adafactor optimizer is supported.", + interactive=True, + ) self.sd3_checkbox.change( lambda sd3_checkbox: gr.Accordion(visible=sd3_checkbox), diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index fbaa05bb9..47bcaffef 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -208,6 +208,7 @@ def save_configuration( # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, + sd3_fused_backward_pass, clip_g, clip_l, logit_mean, @@ -423,6 +424,7 @@ def open_configuration( # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, + sd3_fused_backward_pass, clip_g, clip_l, logit_mean, @@ -644,6 +646,7 @@ def train_model( # SD3 parameters sd3_cache_text_encoder_outputs, sd3_cache_text_encoder_outputs_to_disk, + sd3_fused_backward_pass, clip_g, clip_l, logit_mean, @@ -969,7 +972,7 @@ def train_model( "fp8_base": fp8_base, "full_bf16": full_bf16, "full_fp16": full_fp16, - "fused_backward_pass": fused_backward_pass if not flux1_checkbox else flux_fused_backward_pass, + "fused_backward_pass": sd3_fused_backward_pass if sd3_checkbox else flux_fused_backward_pass if flux1_checkbox else fused_backward_pass, "fused_optimizer_groups": ( int(fused_optimizer_groups) if fused_optimizer_groups > 0 else None ), @@ -1118,7 +1121,6 @@ def train_model( "blockwise_fused_optimizers": ( blockwise_fused_optimizers if flux1_checkbox else None ), - # "flux_fused_backward_pass": see previous assignment of fused_backward_pass in above code "cpu_offload_checkpointing": ( cpu_offload_checkpointing if flux1_checkbox else None ), @@ -1529,6 +1531,7 @@ def list_presets(path): sd3_training.t5xxl_device, sd3_training.t5xxl_dtype, sd3_training.sd3_text_encoder_batch_size, + sd3_training.sd3_fused_backward_pass, sd3_training.weighting_scheme, source_model.sd3_checkbox, # Flux1 parameters From 730ed13546a3eb4f80f3c8f96b3b9da72ff8c21b Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 07:12:43 -0500 Subject: [PATCH 189/199] Add support for prodigyplus.ProdigyPlusScheduleFree --- kohya_gui/class_basic_training.py | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index 2bf6ff9a0..84a429021 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -212,6 +212,7 @@ def init_lr_and_optimizer_controls(self) -> None: "PagedAdamW32bit", "PagedLion8bit", "Prodigy", + "prodigyplus.ProdigyPlusScheduleFree", "RAdamScheduleFree", "SGDNesterov", "SGDNesterov8bit", diff --git a/requirements.txt b/requirements.txt index 728c0e449..a5234ec19 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,6 +20,7 @@ protobuf==3.20.3 open-clip-torch==2.20.0 opencv-python==4.10.0.84 prodigyopt==1.0 +prodigy-plus-schedule-free==1.8.0 pytorch-lightning==1.9.0 rich>=13.7.1 safetensors==0.4.4 From 3eec4c9b32a5310c177a041493b60c1c63c124c8 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 07:59:55 -0500 Subject: [PATCH 190/199] SD3 LoRA training MVP --- kohya_gui/class_advanced_training.py | 2 +- kohya_gui/lora_gui.py | 125 +++++++++++++++++++++++++-- kohya_gui/sd_modeltype.py | 2 + 3 files changed, 123 insertions(+), 6 deletions(-) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 43d01588c..06d200b6c 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -172,7 +172,7 @@ def list_vae_files(path): label="Huber scale", value=self.config.get("advanced.huber_scale", 1.0), minimum=0.0, - maximum=1.0, + maximum=10.0, step=0.01, info="The Huber loss scale parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.", ) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index f6cea68fb..4527b3fe3 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -31,6 +31,7 @@ from .class_source_model import SourceModel from .class_basic_training import BasicTraining from .class_advanced_training import AdvancedTraining +from .class_sd3 import sd3Training from .class_sdxl_parameters import SDXLParameters from .class_folders import Folders from .class_command_executor import CommandExecutor @@ -302,6 +303,24 @@ def save_configuration( in_dims, train_double_block_indices, train_single_block_indices, + + # SD3 parameters + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, + sd3_fused_backward_pass, + clip_g, + sd3_clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + sd3_t5xxl, + t5xxl_device, + t5xxl_dtype, + sd3_text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -573,6 +592,24 @@ def open_configuration( train_double_block_indices, train_single_block_indices, + # SD3 parameters + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, + sd3_fused_backward_pass, + clip_g, + sd3_clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + sd3_t5xxl, + t5xxl_device, + t5xxl_dtype, + sd3_text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, + ## training_preset, ): @@ -876,6 +913,24 @@ def train_model( in_dims, train_double_block_indices, train_single_block_indices, + + # SD3 parameters + sd3_cache_text_encoder_outputs, + sd3_cache_text_encoder_outputs_to_disk, + sd3_fused_backward_pass, + clip_g, + sd3_clip_l, + logit_mean, + logit_std, + mode_scale, + save_clip, + save_t5xxl, + sd3_t5xxl, + t5xxl_device, + t5xxl_dtype, + sd3_text_encoder_batch_size, + weighting_scheme, + sd3_checkbox, ): # Get list of function parameters and values parameters = list(locals().items()) @@ -1149,6 +1204,8 @@ def train_model( run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train_network.py") elif flux1_checkbox: run_cmd.append(rf"{scriptdir}/sd-scripts/flux_train_network.py") + elif sd3_checkbox: + run_cmd.append(rf"{scriptdir}/sd-scripts/sd3_train_network.py") else: run_cmd.append(rf"{scriptdir}/sd-scripts/train_network.py") @@ -1374,6 +1431,18 @@ def train_model( if text_encoder_lr_float != 0 or unet_lr_float != 0: do_not_set_learning_rate = True + + clip_l_value = None + if sd3_checkbox: + clip_l_value = sd3_clip_l + elif flux1_checkbox: + clip_l_value = clip_l + + t5xxl_value = None + if flux1_checkbox: + t5xxl_value = t5xxl + elif sd3_checkbox: + t5xxl_value = sd3_t5xxl config_toml_data = { "adaptive_noise_scale": ( @@ -1390,6 +1459,13 @@ def train_model( True if (sdxl and sdxl_cache_text_encoder_outputs) or (flux1_checkbox and flux1_cache_text_encoder_outputs) + or (sd3_checkbox and sd3_cache_text_encoder_outputs) + else None + ), + "cache_text_encoder_outputs_to_disk": ( + True + if flux1_checkbox and flux1_cache_text_encoder_outputs_to_disk + or sd3_checkbox and sd3_cache_text_encoder_outputs_to_disk else None ), "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), @@ -1554,14 +1630,31 @@ def train_model( "wandb_run_name": wandb_run_name if wandb_run_name != "" else output_name, "weighted_captions": weighted_captions, "xformers": True if xformers == "xformers" else None, - # Flux.1 specific parameters + + # SD3 only Parameters # "cache_text_encoder_outputs": see previous assignment above for code - "cache_text_encoder_outputs_to_disk": ( - flux1_cache_text_encoder_outputs_to_disk if flux1_checkbox else None + # "cache_text_encoder_outputs_to_disk": see previous assignment above for code + "clip_g": clip_g if sd3_checkbox else None, + "clip_l": clip_l_value, + "logit_mean": logit_mean if sd3_checkbox else None, + "logit_std": logit_std if sd3_checkbox else None, + "mode_scale": mode_scale if sd3_checkbox else None, + "save_clip": save_clip if sd3_checkbox else None, + "save_t5xxl": save_t5xxl if sd3_checkbox else None, + # "t5xxl": see previous assignment above for code + "t5xxl_device": t5xxl_device if sd3_checkbox else None, + "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, + "text_encoder_batch_size": ( + sd3_text_encoder_batch_size if sd3_checkbox else None ), + "weighting_scheme": weighting_scheme if sd3_checkbox else None, + + # Flux.1 specific parameters + # "cache_text_encoder_outputs": see previous assignment above for code + # "cache_text_encoder_outputs_to_disk": see previous assignment above for code "ae": ae if flux1_checkbox else None, "clip_l": clip_l if flux1_checkbox else None, - "t5xxl": t5xxl if flux1_checkbox else None, + "t5xxl": t5xxl_value, "discrete_flow_shift": float(discrete_flow_shift) if flux1_checkbox else None, "model_prediction_type": model_prediction_type if flux1_checkbox else None, "timestep_sampling": timestep_sampling if flux1_checkbox else None, @@ -2454,7 +2547,11 @@ def update_LoRA_settings( config=config, flux1_checkbox=source_model.flux1_checkbox, ) - + + # Add SD3 Parameters + sd3_training = sd3Training( + headless=headless, config=config, sd3_checkbox=source_model.sd3_checkbox + ) with gr.Accordion("Advanced", open=False, elem_classes="advanced_background"): # with gr.Accordion('Advanced Configuration', open=False): @@ -2776,6 +2873,24 @@ def update_LoRA_settings( flux1_training.in_dims, flux1_training.train_double_block_indices, flux1_training.train_single_block_indices, + + # SD3 Parameters + sd3_training.sd3_cache_text_encoder_outputs, + sd3_training.sd3_cache_text_encoder_outputs_to_disk, + sd3_training.clip_g, + sd3_training.clip_l, + sd3_training.logit_mean, + sd3_training.logit_std, + sd3_training.mode_scale, + sd3_training.save_clip, + sd3_training.save_t5xxl, + sd3_training.t5xxl, + sd3_training.t5xxl_device, + sd3_training.t5xxl_dtype, + sd3_training.sd3_text_encoder_batch_size, + sd3_training.sd3_fused_backward_pass, + sd3_training.weighting_scheme, + source_model.sd3_checkbox, ] configuration.button_open_config.click( diff --git a/kohya_gui/sd_modeltype.py b/kohya_gui/sd_modeltype.py index bb70150a0..f31855130 100755 --- a/kohya_gui/sd_modeltype.py +++ b/kohya_gui/sd_modeltype.py @@ -46,6 +46,8 @@ def hasKeyPrefix(pfx): self.model_type = ModelType.SD1 except: pass + + # print(f"Model type: {self.model_type}") def Is_SD1(self): return self.model_type == ModelType.SD1 From 3c860c40356cca0be5a5fda2c00fb4832d810ac9 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 08:11:42 -0500 Subject: [PATCH 191/199] Make blocks_to_swap common --- kohya_gui/class_advanced_training.py | 9 +++++++++ kohya_gui/class_flux1.py | 18 +++++++++--------- kohya_gui/dreambooth_gui.py | 2 +- kohya_gui/finetune_gui.py | 2 +- kohya_gui/lora_gui.py | 2 +- 5 files changed, 21 insertions(+), 12 deletions(-) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 06d200b6c..7d6e35961 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -493,6 +493,15 @@ def full_options_update(full_fp16, full_bf16): value=self.config.get("advanced.vae_batch_size", 0), step=1, ) + self.blocks_to_swap = gr.Slider( + label="Blocks to swap", + value=self.config.get("advanced.blocks_to_swap", 0), + info="The number of blocks to swap. The default is None (no swap). These options must be combined with --fused_backward_pass or --blockwise_fused_optimizers. The recommended maximum value is 36.", + minimum=0, + maximum=57, + step=1, + interactive=True, + ) with gr.Group(), gr.Row(): self.save_state = gr.Checkbox( label="Save training state", diff --git a/kohya_gui/class_flux1.py b/kohya_gui/class_flux1.py index d165705fc..ba4218209 100644 --- a/kohya_gui/class_flux1.py +++ b/kohya_gui/class_flux1.py @@ -202,15 +202,15 @@ def noise_offset_type_change( ) with gr.Row(): - self.blocks_to_swap = gr.Slider( - label="Blocks to swap", - value=self.config.get("flux1.blocks_to_swap", 0), - info="The number of blocks to swap. The default is None (no swap). These options must be combined with --fused_backward_pass or --blockwise_fused_optimizers. The recommended maximum value is 36.", - minimum=0, - maximum=57, - step=1, - interactive=True, - ) + # self.blocks_to_swap = gr.Slider( + # label="Blocks to swap", + # value=self.config.get("flux1.blocks_to_swap", 0), + # info="The number of blocks to swap. The default is None (no swap). These options must be combined with --fused_backward_pass or --blockwise_fused_optimizers. The recommended maximum value is 36.", + # minimum=0, + # maximum=57, + # step=1, + # interactive=True, + # ) self.single_blocks_to_swap = gr.Slider( label="Single Blocks to swap (depercated)", value=self.config.get("flux1.single_blocks_to_swap", 0), diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index ae1cdffcf..2353bdf33 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -1415,7 +1415,7 @@ def dreambooth_tab( flux1_training.blockwise_fused_optimizers, flux1_training.flux_fused_backward_pass, flux1_training.cpu_offload_checkpointing, - flux1_training.blocks_to_swap, + advanced_training.blocks_to_swap, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.mem_eff_save, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index 47bcaffef..f5a501234 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -1550,7 +1550,7 @@ def list_presets(path): flux1_training.blockwise_fused_optimizers, flux1_training.flux_fused_backward_pass, flux1_training.cpu_offload_checkpointing, - flux1_training.blocks_to_swap, + advanced_training.blocks_to_swap, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.mem_eff_save, diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 4527b3fe3..236cfdfbb 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -2859,7 +2859,7 @@ def update_LoRA_settings( flux1_training.split_qkv, flux1_training.train_t5xxl, flux1_training.cpu_offload_checkpointing, - flux1_training.blocks_to_swap, + advanced_training.blocks_to_swap, flux1_training.single_blocks_to_swap, flux1_training.double_blocks_to_swap, flux1_training.img_attn_dim, From 4a741a8a4aa68731808ab596afb03f26abc4ab44 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 08:24:37 -0500 Subject: [PATCH 192/199] Add support for sd3 lora disable_mmap_load_safetensors --- kohya_gui/class_sd3.py | 5 +++++ kohya_gui/lora_gui.py | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index 9d0ac3f5c..feeaf3c52 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -202,6 +202,11 @@ def noise_offset_type_change( info="Enables the fusing of the optimizer step into the backward pass for each parameter. Only Adafactor optimizer is supported.", interactive=True, ) + self.disable_mmap_load_safetensors = gr.Checkbox( + label="Disable mmap load safe tensors", + info="Disable memory mapping when loading the model's .safetensors in SDXL.", + value=self.config.get("sd3.disable_mmap_load_safetensors", False), + ) self.sd3_checkbox.change( lambda sd3_checkbox: gr.Accordion(visible=sd3_checkbox), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 236cfdfbb..07264db26 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -310,6 +310,7 @@ def save_configuration( sd3_fused_backward_pass, clip_g, sd3_clip_l, + sd3_disable_mmap_load_safetensors, logit_mean, logit_std, mode_scale, @@ -598,6 +599,7 @@ def open_configuration( sd3_fused_backward_pass, clip_g, sd3_clip_l, + sd3_disable_mmap_load_safetensors, logit_mean, logit_std, mode_scale, @@ -920,6 +922,7 @@ def train_model( sd3_fused_backward_pass, clip_g, sd3_clip_l, + sd3_disable_mmap_load_safetensors, logit_mean, logit_std, mode_scale, @@ -1443,6 +1446,10 @@ def train_model( t5xxl_value = t5xxl elif sd3_checkbox: t5xxl_value = sd3_t5xxl + + disable_mmap_load_safetensors_value = None + if sd3_checkbox: + disable_mmap_load_safetensors_value = sd3_disable_mmap_load_safetensors config_toml_data = { "adaptive_noise_scale": ( @@ -1477,6 +1484,7 @@ def train_model( "debiased_estimation_loss": debiased_estimation_loss, "dynamo_backend": dynamo_backend, "dim_from_weights": dim_from_weights, + "disable_mmap_load_safetensors": disable_mmap_load_safetensors_value, "enable_bucket": enable_bucket, "epoch": int(epoch), "flip_aug": flip_aug, @@ -2879,6 +2887,7 @@ def update_LoRA_settings( sd3_training.sd3_cache_text_encoder_outputs_to_disk, sd3_training.clip_g, sd3_training.clip_l, + sd3_training.disable_mmap_load_safetensors, sd3_training.logit_mean, sd3_training.logit_std, sd3_training.mode_scale, From 65da590eab07ccf00e617e2459bc7f3d61fb0185 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 08:48:48 -0500 Subject: [PATCH 193/199] Add a bunch of missing SD3 parameters --- kohya_gui/class_sd3.py | 34 ++++++++++++++++++++++++++++++++++ kohya_gui/lora_gui.py | 25 +++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index feeaf3c52..5939c260e 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -195,6 +195,28 @@ def noise_offset_type_change( info="Cache text encoder outputs to disk to speed up inference", interactive=True, ) + with gr.Row(): + self.clip_l_dropout_rate = gr.Number( + label="CLIP-L Dropout Rate", + value=self.config.get("sd3.clip_l_dropout_rate", 0.0), + interactive=True, + minimum=0.0, + info="Dropout rate for CLIP-L encoder" + ) + self.clip_g_dropout_rate = gr.Number( + label="CLIP-G Dropout Rate", + value=self.config.get("sd3.clip_g_dropout_rate", 0.0), + interactive=True, + minimum=0.0, + info="Dropout rate for CLIP-G encoder" + ) + self.t5_dropout_rate = gr.Number( + label="T5 Dropout Rate", + value=self.config.get("sd3.t5_dropout_rate", 0.0), + interactive=True, + minimum=0.0, + info="Dropout rate for T5-XXL encoder" + ) with gr.Row(): self.sd3_fused_backward_pass = gr.Checkbox( label="Fused Backward Pass", @@ -207,6 +229,18 @@ def noise_offset_type_change( info="Disable memory mapping when loading the model's .safetensors in SDXL.", value=self.config.get("sd3.disable_mmap_load_safetensors", False), ) + self.enable_scaled_pos_embed = gr.Checkbox( + label="Enable Scaled Positional Embeddings", + info="Enable scaled positional embeddings in the model.", + value=self.config.get("sd3.enable_scaled_pos_embed", False), + ) + self.pos_emb_random_crop_rate = gr.Number( + label="Positional Embedding Random Crop Rate", + value=self.config.get("sd3.pos_emb_random_crop_rate", 0.0), + interactive=True, + minimum=0.0, + info="Random crop rate for positional embeddings" + ) self.sd3_checkbox.change( lambda sd3_checkbox: gr.Accordion(visible=sd3_checkbox), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 07264db26..3d4ebfaba 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -309,13 +309,18 @@ def save_configuration( sd3_cache_text_encoder_outputs_to_disk, sd3_fused_backward_pass, clip_g, + clip_g_dropout_rate, sd3_clip_l, + sd3_clip_l_dropout_rate, sd3_disable_mmap_load_safetensors, + sd3_enable_scaled_pos_embed, logit_mean, logit_std, mode_scale, + pos_emb_random_crop_rate, save_clip, save_t5xxl, + sd3_t5_dropout_rate, sd3_t5xxl, t5xxl_device, t5xxl_dtype, @@ -598,13 +603,18 @@ def open_configuration( sd3_cache_text_encoder_outputs_to_disk, sd3_fused_backward_pass, clip_g, + clip_g_dropout_rate, sd3_clip_l, + sd3_clip_l_dropout_rate, sd3_disable_mmap_load_safetensors, + sd3_enable_scaled_pos_embed, logit_mean, logit_std, mode_scale, + pos_emb_random_crop_rate, save_clip, save_t5xxl, + sd3_t5_dropout_rate, sd3_t5xxl, t5xxl_device, t5xxl_dtype, @@ -921,13 +931,18 @@ def train_model( sd3_cache_text_encoder_outputs_to_disk, sd3_fused_backward_pass, clip_g, + clip_g_dropout_rate, sd3_clip_l, + sd3_clip_l_dropout_rate, sd3_disable_mmap_load_safetensors, + sd3_enable_scaled_pos_embed, logit_mean, logit_std, mode_scale, + pos_emb_random_crop_rate, save_clip, save_t5xxl, + sd3_t5_dropout_rate, sd3_t5xxl, t5xxl_device, t5xxl_dtype, @@ -1643,12 +1658,17 @@ def train_model( # "cache_text_encoder_outputs": see previous assignment above for code # "cache_text_encoder_outputs_to_disk": see previous assignment above for code "clip_g": clip_g if sd3_checkbox else None, + "clip_g_dropout_rate": clip_g_dropout_rate if sd3_checkbox else None, "clip_l": clip_l_value, + "clip_l_dropout_rate": sd3_clip_l_dropout_rate if sd3_checkbox else None, + "enable_scaled_pos_embed": sd3_enable_scaled_pos_embed if sd3_checkbox else None, "logit_mean": logit_mean if sd3_checkbox else None, "logit_std": logit_std if sd3_checkbox else None, "mode_scale": mode_scale if sd3_checkbox else None, + "pos_emb_random_crop_rate": pos_emb_random_crop_rate if sd3_checkbox else None, "save_clip": save_clip if sd3_checkbox else None, "save_t5xxl": save_t5xxl if sd3_checkbox else None, + "t5_dropout_rate": sd3_t5_dropout_rate if sd3_checkbox else None, # "t5xxl": see previous assignment above for code "t5xxl_device": t5xxl_device if sd3_checkbox else None, "t5xxl_dtype": t5xxl_dtype if sd3_checkbox else None, @@ -2886,13 +2906,18 @@ def update_LoRA_settings( sd3_training.sd3_cache_text_encoder_outputs, sd3_training.sd3_cache_text_encoder_outputs_to_disk, sd3_training.clip_g, + sd3_training.clip_g_dropout_rate, sd3_training.clip_l, + sd3_training.clip_l_dropout_rate, sd3_training.disable_mmap_load_safetensors, + sd3_training.enable_scaled_pos_embed, sd3_training.logit_mean, sd3_training.logit_std, sd3_training.mode_scale, + sd3_training.pos_emb_random_crop_rate, sd3_training.save_clip, sd3_training.save_t5xxl, + sd3_training.t5_dropout_rate, sd3_training.t5xxl, sd3_training.t5xxl_device, sd3_training.t5xxl_dtype, From 4fd25ced8585c4c03353d97e526e2e806932fd39 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 09:35:36 -0500 Subject: [PATCH 194/199] Fix clip_l issue for missing path --- kohya_gui/lora_gui.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 3d4ebfaba..5dc6ec460 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1452,6 +1452,8 @@ def train_model( clip_l_value = None if sd3_checkbox: + # print("Setting clip_l_value to sd3_clip_l") + # print("sd3_clip_l: ", sd3_clip_l) clip_l_value = sd3_clip_l elif flux1_checkbox: clip_l_value = clip_l @@ -1493,6 +1495,7 @@ def train_model( "caption_dropout_every_n_epochs": int(caption_dropout_every_n_epochs), "caption_dropout_rate": caption_dropout_rate, "caption_extension": caption_extension, + "clip_l": clip_l_value, "clip_skip": clip_skip if clip_skip != 0 else None, "color_aug": color_aug, "dataset_config": dataset_config, @@ -1659,7 +1662,7 @@ def train_model( # "cache_text_encoder_outputs_to_disk": see previous assignment above for code "clip_g": clip_g if sd3_checkbox else None, "clip_g_dropout_rate": clip_g_dropout_rate if sd3_checkbox else None, - "clip_l": clip_l_value, + # "clip_l": see previous assignment above for code "clip_l_dropout_rate": sd3_clip_l_dropout_rate if sd3_checkbox else None, "enable_scaled_pos_embed": sd3_enable_scaled_pos_embed if sd3_checkbox else None, "logit_mean": logit_mean if sd3_checkbox else None, @@ -1681,7 +1684,7 @@ def train_model( # "cache_text_encoder_outputs": see previous assignment above for code # "cache_text_encoder_outputs_to_disk": see previous assignment above for code "ae": ae if flux1_checkbox else None, - "clip_l": clip_l if flux1_checkbox else None, + # "clip_l": see previous assignment above for code "t5xxl": t5xxl_value, "discrete_flow_shift": float(discrete_flow_shift) if flux1_checkbox else None, "model_prediction_type": model_prediction_type if flux1_checkbox else None, @@ -2905,6 +2908,7 @@ def update_LoRA_settings( # SD3 Parameters sd3_training.sd3_cache_text_encoder_outputs, sd3_training.sd3_cache_text_encoder_outputs_to_disk, + sd3_training.sd3_fused_backward_pass, sd3_training.clip_g, sd3_training.clip_g_dropout_rate, sd3_training.clip_l, @@ -2922,7 +2926,6 @@ def update_LoRA_settings( sd3_training.t5xxl_device, sd3_training.t5xxl_dtype, sd3_training.sd3_text_encoder_batch_size, - sd3_training.sd3_fused_backward_pass, sd3_training.weighting_scheme, source_model.sd3_checkbox, ] From 8afcaf8be7c1b8869a15dfefd22a88aa298a809c Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 09:43:34 -0500 Subject: [PATCH 195/199] Fix train_t5xxl issue --- kohya_gui/lora_gui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 5dc6ec460..054bc005d 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1295,7 +1295,7 @@ def train_model( if split_qkv: kohya_lora_vars["split_qkv"] = True - if train_t5xxl: + if train_t5xxl and flux1_checkbox: kohya_lora_vars["train_t5xxl"] = True for key, value in kohya_lora_vars.items(): @@ -1342,7 +1342,7 @@ def train_model( "rank_dropout", "module_dropout", ] - network_module = "networks.lora" + network_module = "networks.lora" if sd3_checkbox else "networks.lora" kohya_lora_vars = { key: value for key, value in vars().items() From a10986ad3009de40e71b9b51d62d3fbd9aa44a8a Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 09:46:42 -0500 Subject: [PATCH 196/199] Fix network_module issue --- kohya_gui/lora_gui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 054bc005d..ce87cc3e1 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -1342,7 +1342,7 @@ def train_model( "rank_dropout", "module_dropout", ] - network_module = "networks.lora" if sd3_checkbox else "networks.lora" + network_module = "networks.lora_sd3" if sd3_checkbox else "networks.lora" kohya_lora_vars = { key: value for key, value in vars().items() From 6299e829051844b0ab28def4c9aeb0c8fb146669 Mon Sep 17 00:00:00 2001 From: bmaltais Date: Tue, 31 Dec 2024 09:57:52 -0500 Subject: [PATCH 197/199] Add uniform to weighting_scheme --- kohya_gui/class_sd3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kohya_gui/class_sd3.py b/kohya_gui/class_sd3.py index 5939c260e..207c5ba16 100644 --- a/kohya_gui/class_sd3.py +++ b/kohya_gui/class_sd3.py @@ -73,7 +73,7 @@ def noise_offset_type_change( with gr.Row(): self.weighting_scheme = gr.Dropdown( label="Weighting Scheme", - choices=["logit_normal", "sigma_sqrt", "mode", "cosmap"], + choices=["logit_normal", "sigma_sqrt", "mode", "cosmap", "uniform"], value=self.config.get("sd3.weighting_scheme", "logit_normal"), interactive=True, ) From 1a8f5511ed99a8afec9de102c8e552392cf682c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Dec 2024 13:12:55 -0500 Subject: [PATCH 198/199] Bump crate-ci/typos from 1.23.6 to 1.28.1 (#2996) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.23.6 to 1.28.1. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.23.6...v1.28.1) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: bmaltais From e03ffc15accf515775c32d4d377ad359c41fc92a Mon Sep 17 00:00:00 2001 From: ruucm Date: Fri, 3 Jan 2025 07:50:46 +0900 Subject: [PATCH 199/199] Update README.md (#3031) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3fa142929..f7848896b 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ If you choose to use the interactive mode, the default values for the accelerate To install the necessary components for Runpod and run kohya_ss, follow these steps: -1. Select the Runpod pytorch 2.0.1 template. This is important. Other templates may not work. +1. Select the Runpod pytorch 2.2.0 template. This is important. Other templates may not work. 2. SSH into the Runpod.