Skip to content

Commit

Permalink
Fix LoRA config display issue
Browse files Browse the repository at this point in the history
  • Loading branch information
bmaltais committed Jan 6, 2024
1 parent 1992e19 commit f579563
Show file tree
Hide file tree
Showing 3 changed files with 73 additions and 17 deletions.
2 changes: 1 addition & 1 deletion .release
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v22.4.1
v22.5.0
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,10 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b


## Change History
* 2024/01/10 (v22.5.0)
- Merged sd-scripts updates
- Fix LoRA config display after load that would sometime hide some of the feilds

* 2024/01/02 (v22.4.1)
- Minor bug fixed and enhancements.

Expand Down
84 changes: 68 additions & 16 deletions lora_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,15 +125,22 @@ def save_configuration(
caption_dropout_rate,
optimizer,
optimizer_args,
lr_scheduler_args,max_grad_norm,
lr_scheduler_args,
max_grad_norm,
noise_offset_type,
noise_offset,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
LoRA_type,
factor,
use_cp,use_tucker,use_scalar,rank_dropout_scale,constrain,rescaled,train_norm,
use_cp,
use_tucker,
use_scalar,
rank_dropout_scale,
constrain,
rescaled,
train_norm,
decompose_both,
train_on_input,
conv_dim,
Expand Down Expand Up @@ -280,15 +287,22 @@ def open_configuration(
caption_dropout_rate,
optimizer,
optimizer_args,
lr_scheduler_args,max_grad_norm,
lr_scheduler_args,
max_grad_norm,
noise_offset_type,
noise_offset,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
LoRA_type,
factor,
use_cp,use_tucker,use_scalar,rank_dropout_scale,constrain,rescaled,train_norm,
use_cp,
use_tucker,
use_scalar,
rank_dropout_scale,
constrain,
rescaled,
train_norm,
decompose_both,
train_on_input,
conv_dim,
Expand Down Expand Up @@ -378,7 +392,18 @@ def open_configuration(
values.append(json_value if json_value is not None else value)

# This next section is about making the LoCon parameters visible if LoRA_type = 'Standard'
if my_data.get("LoRA_type", "Standard") == "LoCon":
if my_data.get("LoRA_type", "Standard") in {
"LoCon",
"Kohya DyLoRA",
"Kohya LoCon",
"LoRA-FA",
"LyCORIS/Diag-OFT",
"LyCORIS/DyLoRA",
"LyCORIS/LoHa",
"LyCORIS/LoKr",
"LyCORIS/LoCon",
"LyCORIS/GLoRA",
}:
values.append(gr.Row.update(visible=True))
else:
values.append(gr.Row.update(visible=False))
Expand Down Expand Up @@ -455,15 +480,22 @@ def train_model(
caption_dropout_rate,
optimizer,
optimizer_args,
lr_scheduler_args,max_grad_norm,
lr_scheduler_args,
max_grad_norm,
noise_offset_type,
noise_offset,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
LoRA_type,
factor,
use_cp,use_tucker,use_scalar,rank_dropout_scale,constrain,rescaled,train_norm,
use_cp,
use_tucker,
use_scalar,
rank_dropout_scale,
constrain,
rescaled,
train_norm,
decompose_both,
train_on_input,
conv_dim,
Expand Down Expand Up @@ -825,9 +857,7 @@ def train_model(
)
return
run_cmd += f" --network_module=lycoris.kohya"
run_cmd += (
f' --network_args "preset={LyCORIS_preset}" "rank_dropout={rank_dropout}" "module_dropout={module_dropout}" "use_tucker={use_tucker}" "use_scalar={use_scalar}" "rank_dropout_scale={rank_dropout_scale}" "algo=full" "train_norm={train_norm}"'
)
run_cmd += f' --network_args "preset={LyCORIS_preset}" "rank_dropout={rank_dropout}" "module_dropout={module_dropout}" "use_tucker={use_tucker}" "use_scalar={use_scalar}" "rank_dropout_scale={rank_dropout_scale}" "algo=full" "train_norm={train_norm}"'
# This is a hack to fix a train_network LoHA logic issue
if not network_dropout > 0.0:
run_cmd += f' --network_dropout="{network_dropout}"'
Expand Down Expand Up @@ -1504,18 +1534,28 @@ def update_LoRA_settings(
"gr_type": gr.Slider,
"update_params": {
"maximum": 100000
if LoRA_type in {"LyCORIS/LoHa", "LyCORIS/LoKr", "LyCORIS/Diag-OFT"}
if LoRA_type
in {
"LyCORIS/LoHa",
"LyCORIS/LoKr",
"LyCORIS/Diag-OFT",
}
else 512,
"value": 512, # if conv_dim > 512 else conv_dim,
"value": conv_dim, # if conv_dim > 512 else conv_dim,
},
},
"network_dim": {
"gr_type": gr.Slider,
"update_params": {
"maximum": 100000
if LoRA_type in {"LyCORIS/LoHa", "LyCORIS/LoKr", "LyCORIS/Diag-OFT"}
if LoRA_type
in {
"LyCORIS/LoHa",
"LyCORIS/LoKr",
"LyCORIS/Diag-OFT",
}
else 512,
"value": 512, # if network_dim > 512 else network_dim,
"value": network_dim, # if network_dim > 512 else network_dim,
},
},
"use_cp": {
Expand Down Expand Up @@ -1790,7 +1830,13 @@ def update_LoRA_settings(
factor,
conv_dim,
network_dim,
use_cp,use_tucker,use_scalar,rank_dropout_scale,constrain,rescaled,train_norm,
use_cp,
use_tucker,
use_scalar,
rank_dropout_scale,
constrain,
rescaled,
train_norm,
decompose_both,
train_on_input,
scale_weight_norms,
Expand Down Expand Up @@ -1913,7 +1959,13 @@ def update_LoRA_settings(
advanced_training.multires_noise_discount,
LoRA_type,
factor,
use_cp,use_tucker,use_scalar,rank_dropout_scale,constrain,rescaled,train_norm,
use_cp,
use_tucker,
use_scalar,
rank_dropout_scale,
constrain,
rescaled,
train_norm,
decompose_both,
train_on_input,
conv_dim,
Expand Down

0 comments on commit f579563

Please sign in to comment.