Skip to content

Commit

Permalink
Merge pull request #1811 from rockerBOO/schedule-free-prodigy
Browse files Browse the repository at this point in the history
Allow unknown schedule-free optimizers to continue to module loader
  • Loading branch information
kohya-ss authored Dec 1, 2024
2 parents 34e7f50 + 6593cfb commit 14c9ba9
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 6 deletions.
15 changes: 10 additions & 5 deletions library/train_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -4609,7 +4609,7 @@ def task():

def get_optimizer(args, trainable_params):
# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit, Lion8bit, PagedLion8bit, AdEMAMix8bit, PagedAdEMAMix8bit, DAdaptation(DAdaptAdamPreprint), DAdaptAdaGrad, DAdaptAdam, DAdaptAdan, DAdaptAdanIP, DAdaptLion, DAdaptSGD, Adafactor"

optimizer_type = args.optimizer_type
if args.use_8bit_adam:
assert (
Expand Down Expand Up @@ -4883,6 +4883,7 @@ def get_optimizer(args, trainable_params):
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)

elif optimizer_type.endswith("schedulefree".lower()):
should_train_optimizer = True
try:
import schedulefree as sf
except ImportError:
Expand All @@ -4894,10 +4895,10 @@ def get_optimizer(args, trainable_params):
optimizer_class = sf.SGDScheduleFree
logger.info(f"use SGDScheduleFree optimizer | {optimizer_kwargs}")
else:
raise ValueError(f"Unknown optimizer type: {optimizer_type}")
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
# make optimizer as train mode: we don't need to call train again, because eval will not be called in training loop
optimizer.train()
optimizer_class = None

if optimizer_class is not None:
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)

if optimizer is None:
# 任意のoptimizerを使う
Expand Down Expand Up @@ -4999,6 +5000,10 @@ def __instancecheck__(self, instance):
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()])

if hasattr(optimizer, 'train') and callable(optimizer.train):
# make optimizer as train mode: we don't need to call train again, because eval will not be called in training loop
optimizer.train()

return optimizer_name, optimizer_args, optimizer


Expand Down
27 changes: 26 additions & 1 deletion train_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def generate_step_logs(
avr_loss,
lr_scheduler,
lr_descriptions,
optimizer=None,
keys_scaled=None,
mean_norm=None,
maximum_norm=None,
Expand Down Expand Up @@ -93,6 +94,30 @@ def generate_step_logs(
logs[f"lr/d*lr/{lr_desc}"] = (
lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
)
if (
args.optimizer_type.lower().endswith("ProdigyPlusScheduleFree".lower()) and optimizer is not None
): # tracking d*lr value of unet.
logs["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
else:
idx = 0
if not args.network_train_unet_only:
logs["lr/textencoder"] = float(lrs[0])
idx = 1

for i in range(idx, len(lrs)):
logs[f"lr/group{i}"] = float(lrs[i])
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower():
logs[f"lr/d*lr/group{i}"] = (
lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
)
if (
args.optimizer_type.lower().endswith("ProdigyPlusScheduleFree".lower()) and optimizer is not None
):
logs[f"lr/d*lr/group{i}"] = (
optimizer.param_groups[i]["d"] * optimizer.param_groups[i]["lr"]
)

return logs

Expand Down Expand Up @@ -1279,7 +1304,7 @@ def remove_model(old_ckpt_name):

if len(accelerator.trackers) > 0:
logs = self.generate_step_logs(
args, current_loss, avr_loss, lr_scheduler, lr_descriptions, keys_scaled, mean_norm, maximum_norm
args, current_loss, avr_loss, lr_scheduler, lr_descriptions, optimizer, keys_scaled, mean_norm, maximum_norm
)
accelerator.log(logs, step=global_step)

Expand Down

0 comments on commit 14c9ba9

Please sign in to comment.