Add gui support for noise_offset_random_strength, ip_noise_gamma, ip_noise_gamma_random_strength

pull/2167/head
bmaltais 2024-03-26 19:12:48 -04:00
parent 0a05c07149
commit 0f889a8eb6
11 changed files with 303 additions and 171 deletions

View File

@ -1 +1 @@
v23.0.15
v23.1.0

View File

@ -38,7 +38,8 @@ The GUI allows you to set the training parameters and generate and run the requi
- [No module called tkinter](#no-module-called-tkinter)
- [SDXL training](#sdxl-training)
- [Change History](#change-history)
- [2024/03/20 (v23.0.15)](#20240320-v23015)
- [2024/03/27 (v23.1.0)](#20240327-v2310)
- [2024/03/21 (v23.0.15)](#20240321-v23015)
- [2024/03/19 (v23.0.14)](#20240319-v23014)
- [2024/03/19 (v23.0.13)](#20240319-v23013)
- [2024/03/16 (v23.0.12)](#20240316-v23012)
@ -382,6 +383,11 @@ The documentation in this section will be moved to a separate document later.
## Change History
### 2024/03/27 (v23.1.0)
- Update sd-scripts to 0.8.6
- Add support for ... to the GUI.
### 2024/03/21 (v23.0.15)
- Add support for toml dataset configuration fole to all trainers

View File

@ -281,6 +281,7 @@ class AdvancedTraining:
"Multires",
],
value="Original",
scale=1
)
with gr.Row(visible=True) as self.noise_offset_original:
self.noise_offset = gr.Slider(
@ -291,13 +292,18 @@ class AdvancedTraining:
step=0.01,
info='Recommended values are 0.05 - 0.15',
)
self.noise_offset_random_strength = gr.Checkbox(
label="Noise offset random strength",
value=False,
info='Use random strength between 0~noise_offset for noise offset',
)
self.adaptive_noise_scale = gr.Slider(
label="Adaptive noise scale",
value=0,
minimum=-1,
maximum=1,
step=0.001,
info="(Experimental, Optional) Since the latent is close to a normal distribution, it may be a good idea to specify a value around 1/10 the noise offset.",
info="Add `latent mean absolute value * this value` to noise_offset",
)
with gr.Row(visible=False) as self.noise_offset_multires:
self.multires_noise_iterations = gr.Slider(
@ -306,7 +312,7 @@ class AdvancedTraining:
minimum=0,
maximum=64,
step=1,
info='Enable multires noise (recommended values are 6-10)',
info='Enable multires noise (recommended values are 6-10)',
)
self.multires_noise_discount = gr.Slider(
label="Multires noise discount",
@ -316,6 +322,20 @@ class AdvancedTraining:
step=0.01,
info='Recommended values are 0.8. For LoRAs with small datasets, 0.1-0.3',
)
with gr.Row(visible=True):
self.ip_noise_gamma = gr.Slider(
label="IP noise gamma",
value=0,
minimum=0,
maximum=1,
step=0.01,
info='enable input perturbation noise. used for regularization. recommended value: around 0.1',
)
self.ip_noise_gamma_random_strength = gr.Checkbox(
label="IP noise gamma random strength",
value=False,
info='Use random strength between 0~ip_noise_gamma for input perturbation noise',
)
self.noise_offset_type.change(
noise_offset_type_change,
inputs=[self.noise_offset_type],

View File

@ -9,6 +9,7 @@ import gradio as gr
import shutil
import sys
import json
import math
# Set up logging
log = setup_logging()
@ -1150,6 +1151,14 @@ def run_cmd_advanced_training(**kwargs):
if kwargs.get("gradient_checkpointing"):
run_cmd += " --gradient_checkpointing"
if kwargs.get("ip_noise_gamma"):
if float(kwargs["ip_noise_gamma"]) > 0:
run_cmd += f' --ip_noise_gamma={kwargs["ip_noise_gamma"]}'
if kwargs.get("ip_noise_gamma_random_strength"):
if kwargs["ip_noise_gamma_random_strength"]:
run_cmd += f' --ip_noise_gamma_random_strength'
if "keep_tokens" in kwargs and int(kwargs["keep_tokens"]) > 0:
run_cmd += f' --keep_tokens="{int(kwargs["keep_tokens"])}"'
@ -1224,231 +1233,259 @@ def run_cmd_advanced_training(**kwargs):
else:
run_cmd += f' --lr_warmup_steps="{lr_warmup_steps}"'
gpu_ids = kwargs.get("gpu_ids")
if gpu_ids:
run_cmd += f' --gpu_ids="{gpu_ids}"'
if "gpu_ids" in kwargs:
gpu_ids = kwargs.get("gpu_ids")
if not gpu_ids == "":
run_cmd += f' --gpu_ids="{gpu_ids}"'
max_data_loader_n_workers = kwargs.get("max_data_loader_n_workers")
if max_data_loader_n_workers and not max_data_loader_n_workers == "":
run_cmd += f' --max_data_loader_n_workers="{max_data_loader_n_workers}"'
if "max_data_loader_n_workers" in kwargs:
max_data_loader_n_workers = kwargs.get("max_data_loader_n_workers")
if not max_data_loader_n_workers == "":
run_cmd += f' --max_data_loader_n_workers="{max_data_loader_n_workers}"'
if "max_grad_norm" in kwargs:
max_grad_norm = kwargs.get("max_grad_norm")
if max_grad_norm != "":
run_cmd += f' --max_grad_norm="{max_grad_norm}"'
max_resolution = kwargs.get("max_resolution")
if max_resolution:
run_cmd += f' --resolution="{max_resolution}"'
if "max_resolution" in kwargs:
run_cmd += fr' --resolution="{kwargs.get("max_resolution")}"'
max_timestep = kwargs.get("max_timestep")
if max_timestep and int(max_timestep) < 1000:
run_cmd += f" --max_timestep={int(max_timestep)}"
if "max_timestep" in kwargs:
max_timestep = kwargs.get("max_timestep")
if int(max_timestep) < 1000:
run_cmd += f" --max_timestep={int(max_timestep)}"
max_token_length = kwargs.get("max_token_length")
if max_token_length and int(max_token_length) > 75:
run_cmd += f" --max_token_length={int(max_token_length)}"
if "max_token_length" in kwargs:
max_token_length = kwargs.get("max_token_length")
if int(max_token_length) > 75:
run_cmd += f" --max_token_length={int(max_token_length)}"
max_train_epochs = kwargs.get("max_train_epochs")
if max_train_epochs and not max_train_epochs == "":
run_cmd += f" --max_train_epochs={max_train_epochs}"
if "max_train_epochs" in kwargs:
max_train_epochs = kwargs.get("max_train_epochs")
if not max_train_epochs == "":
run_cmd += f" --max_train_epochs={max_train_epochs}"
max_train_steps = kwargs.get("max_train_steps")
if max_train_steps:
run_cmd += f' --max_train_steps="{max_train_steps}"'
if "max_train_steps" in kwargs:
max_train_steps = kwargs.get("max_train_steps")
if not max_train_steps == "":
run_cmd += f' --max_train_steps="{max_train_steps}"'
mem_eff_attn = kwargs.get("mem_eff_attn")
if mem_eff_attn:
run_cmd += " --mem_eff_attn"
if "mem_eff_attn" in kwargs:
if kwargs.get("mem_eff_attn"): # Test if the value is true as it could be false
run_cmd += " --mem_eff_attn"
min_snr_gamma = kwargs.get("min_snr_gamma")
if min_snr_gamma and int(min_snr_gamma) >= 1:
run_cmd += f" --min_snr_gamma={int(min_snr_gamma)}"
if "min_snr_gamma" in kwargs:
min_snr_gamma = kwargs.get("min_snr_gamma")
if int(min_snr_gamma) >= 1:
run_cmd += f" --min_snr_gamma={int(min_snr_gamma)}"
min_timestep = kwargs.get("min_timestep")
if min_timestep and int(min_timestep) > 0:
run_cmd += f" --min_timestep={int(min_timestep)}"
if "min_timestep" in kwargs:
min_timestep = kwargs.get("min_timestep")
if int(min_timestep) > -1:
run_cmd += f" --min_timestep={int(min_timestep)}"
mixed_precision = kwargs.get("mixed_precision")
if mixed_precision:
run_cmd += f' --mixed_precision="{mixed_precision}"'
if "mixed_precision" in kwargs:
run_cmd += fr' --mixed_precision="{kwargs.get("mixed_precision")}"'
multi_gpu = kwargs.get("multi_gpu")
if multi_gpu:
run_cmd += " --multi_gpu"
if "multi_gpu" in kwargs:
if kwargs.get("multi_gpu"):
run_cmd += " --multi_gpu"
network_alpha = kwargs.get("network_alpha")
if network_alpha:
run_cmd += f' --network_alpha="{network_alpha}"'
if "network_alpha" in kwargs:
run_cmd += fr' --network_alpha="{kwargs.get("network_alpha")}"'
network_args = kwargs.get("network_args")
if network_args and len(network_args):
run_cmd += f" --network_args{network_args}"
if "network_args" in kwargs:
network_args = kwargs.get("network_args")
if network_args != "":
run_cmd += f' --network_args{network_args}'
network_dim = kwargs.get("network_dim")
if network_dim:
run_cmd += f" --network_dim={network_dim}"
if "network_dim" in kwargs:
run_cmd += fr' --network_dim={kwargs.get("network_dim")}'
network_dropout = kwargs.get("network_dropout")
if network_dropout and network_dropout > 0.0:
run_cmd += f" --network_dropout={network_dropout}"
if "network_dropout" in kwargs:
network_dropout = kwargs.get("network_dropout")
if network_dropout > 0.0:
run_cmd += f" --network_dropout={network_dropout}"
network_module = kwargs.get("network_module")
if network_module:
run_cmd += f" --network_module={network_module}"
if "network_module" in kwargs:
network_module = kwargs.get("network_module")
if network_module != "":
run_cmd += f' --network_module={network_module}'
network_train_text_encoder_only = kwargs.get("network_train_text_encoder_only")
if network_train_text_encoder_only:
run_cmd += " --network_train_text_encoder_only"
if "network_train_text_encoder_only" in kwargs:
if kwargs.get("network_train_text_encoder_only"):
run_cmd += " --network_train_text_encoder_only"
network_train_unet_only = kwargs.get("network_train_unet_only")
if network_train_unet_only:
run_cmd += " --network_train_unet_only"
if "network_train_unet_only" in kwargs:
if kwargs.get("network_train_unet_only"):
run_cmd += " --network_train_unet_only"
no_half_vae = kwargs.get("no_half_vae")
if no_half_vae:
run_cmd += " --no_half_vae"
if "no_half_vae" in kwargs:
if kwargs.get("no_half_vae"): # Test if the value is true as it could be false
run_cmd += " --no_half_vae"
no_token_padding = kwargs.get("no_token_padding")
if no_token_padding:
run_cmd += " --no_token_padding"
if "no_token_padding" in kwargs:
if kwargs.get("no_token_padding"): # Test if the value is true as it could be false
run_cmd += " --no_token_padding"
if "noise_offset_type" in kwargs:
noise_offset_type = kwargs["noise_offset_type"]
if kwargs["noise_offset_type"] == "Original":
noise_offset = float(kwargs.get("noise_offset", 0))
if noise_offset:
run_cmd += f" --noise_offset={noise_offset}"
adaptive_noise_scale = float(kwargs.get("adaptive_noise_scale", 0))
if adaptive_noise_scale != 0 and noise_offset > 0:
run_cmd += f" --adaptive_noise_scale={adaptive_noise_scale}"
if noise_offset_type == "Original":
if "noise_offset" in kwargs:
noise_offset = float(kwargs.get("noise_offset", 0))
if noise_offset:
run_cmd += f" --noise_offset={float(noise_offset)}"
if "adaptive_noise_scale" in kwargs:
adaptive_noise_scale = float(kwargs.get("adaptive_noise_scale", 0))
if adaptive_noise_scale != 0 and noise_offset > 0:
run_cmd += f" --adaptive_noise_scale={adaptive_noise_scale}"
if "noise_offset_random_strength" in kwargs:
if kwargs.get("noise_offset_random_strength"):
run_cmd += f" --noise_offset_random_strength"
elif noise_offset_type == "Multires":
multires_noise_iterations = int(kwargs.get("multires_noise_iterations", 0))
if multires_noise_iterations > 0:
run_cmd += f' --multires_noise_iterations="{multires_noise_iterations}"'
if "multires_noise_iterations" in kwargs:
multires_noise_iterations = int(kwargs.get("multires_noise_iterations", 0))
if multires_noise_iterations > 0:
run_cmd += f' --multires_noise_iterations="{multires_noise_iterations}"'
multires_noise_discount = float(kwargs.get("multires_noise_discount", 0))
if multires_noise_discount > 0:
run_cmd += f' --multires_noise_discount="{multires_noise_discount}"'
if "multires_noise_discount" in kwargs:
multires_noise_discount = float(kwargs.get("multires_noise_discount", 0))
if multires_noise_discount > 0:
run_cmd += f' --multires_noise_discount="{multires_noise_discount}"'
num_machines = kwargs.get("num_machines")
if num_machines and int(num_machines) > 1:
run_cmd += f" --num_machines={int(num_machines)}"
if "num_machines" in kwargs:
num_machines = kwargs.get("num_machines")
if int(num_machines) > 1:
run_cmd += f" --num_machines={int(num_machines)}"
num_processes = kwargs.get("num_processes")
if num_processes and int(num_processes) > 1:
run_cmd += f" --num_processes={int(num_processes)}"
if "num_processes" in kwargs:
num_processes = kwargs.get("num_processes")
if int(num_processes) > 1:
run_cmd += f" --num_processes={int(num_processes)}"
num_cpu_threads_per_process = kwargs.get("num_cpu_threads_per_process")
if num_cpu_threads_per_process and int(num_cpu_threads_per_process) > 1:
run_cmd += f" --num_cpu_threads_per_process={int(num_cpu_threads_per_process)}"
if "num_cpu_threads_per_process" in kwargs:
num_cpu_threads_per_process = kwargs.get("num_cpu_threads_per_process")
if int(num_cpu_threads_per_process) > 1:
run_cmd += f" --num_cpu_threads_per_process={int(num_cpu_threads_per_process)}"
optimizer_args = kwargs.get("optimizer_args")
if optimizer_args and optimizer_args != "":
run_cmd += f" --optimizer_args {optimizer_args}"
if "optimizer_args" in kwargs:
optimizer_args = kwargs.get("optimizer_args")
if optimizer_args != "":
run_cmd += f" --optimizer_args {optimizer_args}"
optimizer_type = kwargs.get("optimizer")
if optimizer_type:
run_cmd += f' --optimizer_type="{optimizer_type}"'
if "optimizer" in kwargs:
run_cmd += fr' --optimizer_type="{kwargs.get("optimizer")}"'
output_dir = kwargs.get("output_dir")
if output_dir:
if "output_dir" in kwargs:
output_dir = kwargs.get("output_dir")
if output_dir.startswith('"') and output_dir.endswith('"'):
output_dir = output_dir[1:-1]
if os.path.exists(output_dir):
run_cmd += rf' --output_dir="{output_dir}"'
output_name = kwargs.get("output_name")
if output_name and not output_name == "":
run_cmd += f' --output_name="{output_name}"'
if "output_name" in kwargs:
output_name = kwargs.get("output_name")
if not output_name == "":
run_cmd += f' --output_name="{output_name}"'
persistent_data_loader_workers = kwargs.get("persistent_data_loader_workers")
if persistent_data_loader_workers:
run_cmd += " --persistent_data_loader_workers"
if "persistent_data_loader_workers" in kwargs:
if kwargs.get("persistent_data_loader_workers"):
run_cmd += " --persistent_data_loader_workers"
pretrained_model_name_or_path = kwargs.get("pretrained_model_name_or_path")
if pretrained_model_name_or_path:
if "pretrained_model_name_or_path" in kwargs:
run_cmd += (
rf' --pretrained_model_name_or_path="{pretrained_model_name_or_path}"'
rf' --pretrained_model_name_or_path="{kwargs.get("pretrained_model_name_or_path")}"'
)
prior_loss_weight = kwargs.get("prior_loss_weight")
if prior_loss_weight and not float(prior_loss_weight) == 1.0:
run_cmd += f" --prior_loss_weight={prior_loss_weight}"
if "prior_loss_weight" in kwargs:
prior_loss_weight = kwargs.get("prior_loss_weight")
if not float(prior_loss_weight) == 1.0:
run_cmd += f" --prior_loss_weight={prior_loss_weight}"
random_crop = kwargs.get("random_crop")
if random_crop:
run_cmd += " --random_crop"
if "random_crop" in kwargs:
random_crop = kwargs.get("random_crop")
if random_crop:
run_cmd += " --random_crop"
reg_data_dir = kwargs.get("reg_data_dir")
if reg_data_dir and len(reg_data_dir):
if reg_data_dir.startswith('"') and reg_data_dir.endswith('"'):
reg_data_dir = reg_data_dir[1:-1]
if os.path.isdir(reg_data_dir):
run_cmd += rf' --reg_data_dir="{reg_data_dir}"'
if "reg_data_dir" in kwargs:
reg_data_dir = kwargs.get("reg_data_dir")
if len(reg_data_dir):
if reg_data_dir.startswith('"') and reg_data_dir.endswith('"'):
reg_data_dir = reg_data_dir[1:-1]
if os.path.isdir(reg_data_dir):
run_cmd += rf' --reg_data_dir="{reg_data_dir}"'
resume = kwargs.get("resume")
if resume:
run_cmd += f' --resume="{resume}"'
if "resume" in kwargs:
resume = kwargs.get("resume")
if len(resume):
run_cmd += f' --resume="{resume}"'
save_every_n_epochs = kwargs.get("save_every_n_epochs")
if save_every_n_epochs:
run_cmd += f' --save_every_n_epochs="{int(save_every_n_epochs)}"'
if "save_every_n_epochs" in kwargs:
save_every_n_epochs = kwargs.get("save_every_n_epochs")
if int(save_every_n_epochs) > 0:
run_cmd += f' --save_every_n_epochs="{int(save_every_n_epochs)}"'
save_every_n_steps = kwargs.get("save_every_n_steps")
if save_every_n_steps and int(save_every_n_steps) > 0:
run_cmd += f' --save_every_n_steps="{int(save_every_n_steps)}"'
if "save_every_n_steps" in kwargs:
save_every_n_steps = kwargs.get("save_every_n_steps")
if int(save_every_n_steps) > 0:
run_cmd += f' --save_every_n_steps="{int(save_every_n_steps)}"'
save_last_n_steps = kwargs.get("save_last_n_steps")
if save_last_n_steps and int(save_last_n_steps) > 0:
run_cmd += f' --save_last_n_steps="{int(save_last_n_steps)}"'
if "save_last_n_steps" in kwargs:
save_last_n_steps = kwargs.get("save_last_n_steps")
if int(save_last_n_steps) > 0:
run_cmd += f' --save_last_n_steps="{int(save_last_n_steps)}"'
save_last_n_steps_state = kwargs.get("save_last_n_steps_state")
if save_last_n_steps_state and int(save_last_n_steps_state) > 0:
run_cmd += f' --save_last_n_steps_state="{int(save_last_n_steps_state)}"'
if "save_last_n_steps_state" in kwargs:
save_last_n_steps_state = kwargs.get("save_last_n_steps_state")
if int(save_last_n_steps_state) > 0:
run_cmd += f' --save_last_n_steps_state="{int(save_last_n_steps_state)}"'
save_model_as = kwargs.get("save_model_as")
if save_model_as and not save_model_as == "same as source model":
run_cmd += f" --save_model_as={save_model_as}"
if "save_model_as" in kwargs:
save_model_as = kwargs.get("save_model_as")
if save_model_as != "same as source model":
run_cmd += f" --save_model_as={save_model_as}"
save_precision = kwargs.get("save_precision")
if save_precision:
run_cmd += f' --save_precision="{save_precision}"'
if "save_precision" in kwargs:
run_cmd += fr' --save_precision="{kwargs.get("save_precision")}"'
save_state = kwargs.get("save_state")
if save_state:
run_cmd += " --save_state"
if "save_state" in kwargs:
if kwargs.get("save_state"):
run_cmd += " --save_state"
scale_v_pred_loss_like_noise_pred = kwargs.get("scale_v_pred_loss_like_noise_pred")
if scale_v_pred_loss_like_noise_pred:
run_cmd += " --scale_v_pred_loss_like_noise_pred"
if "scale_v_pred_loss_like_noise_pred" in kwargs:
if kwargs.get("scale_v_pred_loss_like_noise_pred"):
run_cmd += " --scale_v_pred_loss_like_noise_pred"
scale_weight_norms = kwargs.get("scale_weight_norms")
if scale_weight_norms and scale_weight_norms > 0.0:
run_cmd += f' --scale_weight_norms="{scale_weight_norms}"'
if "scale_weight_norms" in kwargs:
scale_weight_norms = kwargs.get("scale_weight_norms")
if scale_weight_norms > 0.0:
run_cmd += f' --scale_weight_norms="{scale_weight_norms}"'
seed = kwargs.get("seed")
if seed and seed != "":
run_cmd += f' --seed="{seed}"'
if "seed" in kwargs:
seed = kwargs.get("seed")
if seed != "":
run_cmd += f' --seed="{seed}"'
shuffle_caption = kwargs.get("shuffle_caption")
if shuffle_caption:
run_cmd += " --shuffle_caption"
if "shuffle_caption" in kwargs:
if kwargs.get("shuffle_caption"):
run_cmd += " --shuffle_caption"
stop_text_encoder_training = kwargs.get("stop_text_encoder_training")
if stop_text_encoder_training and stop_text_encoder_training > 0:
run_cmd += f' --stop_text_encoder_training="{stop_text_encoder_training}"'
if "stop_text_encoder_training" in kwargs:
stop_text_encoder_training = kwargs.get("stop_text_encoder_training")
if stop_text_encoder_training > 0:
run_cmd += f' --stop_text_encoder_training="{stop_text_encoder_training}"'
text_encoder_lr = kwargs.get("text_encoder_lr")
if text_encoder_lr and (float(text_encoder_lr) > 0):
run_cmd += f" --text_encoder_lr={text_encoder_lr}"
if "text_encoder_lr" in kwargs:
text_encoder_lr = kwargs.get("text_encoder_lr")
if float(text_encoder_lr) > -1:
run_cmd += f" --text_encoder_lr={text_encoder_lr}"
train_batch_size = kwargs.get("train_batch_size")
if train_batch_size:
run_cmd += f' --train_batch_size="{train_batch_size}"'
if "train_batch_size" in kwargs:
run_cmd += fr' --train_batch_size="{kwargs.get("train_batch_size")}"'
training_comment = kwargs.get("training_comment")
if training_comment and len(training_comment):

View File

@ -122,9 +122,12 @@ def save_configuration(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -254,9 +257,12 @@ def open_configuration(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -381,9 +387,12 @@ def train_model(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -567,6 +576,8 @@ def train_model(
"full_fp16": full_fp16,
"gradient_accumulation_steps": gradient_accumulation_steps,
"gradient_checkpointing": gradient_checkpointing,
"ip_noise_gamma": ip_noise_gamma,
"ip_noise_gamma_random_strength": ip_noise_gamma_random_strength,
"keep_tokens": keep_tokens,
"learning_rate": learning_rate,
"logging_dir": logging_dir,
@ -593,6 +604,7 @@ def train_model(
"multires_noise_iterations": multires_noise_iterations,
"no_token_padding": no_token_padding,
"noise_offset": noise_offset,
"noise_offset_random_strength": noise_offset_random_strength,
"noise_offset_type": noise_offset_type,
"optimizer": optimizer,
"optimizer_args": optimizer_args,
@ -847,9 +859,12 @@ def dreambooth_tab(
basic_training.lr_scheduler_args,
advanced_training.noise_offset_type,
advanced_training.noise_offset,
advanced_training.noise_offset_random_strength,
advanced_training.adaptive_noise_scale,
advanced_training.multires_noise_iterations,
advanced_training.multires_noise_discount,
advanced_training.ip_noise_gamma,
advanced_training.ip_noise_gamma_random_strength,
sample.sample_every_n_steps,
sample.sample_every_n_epochs,
sample.sample_sampler,

View File

@ -130,9 +130,12 @@ def save_configuration(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -270,9 +273,12 @@ def open_configuration(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -417,9 +423,12 @@ def train_model(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -602,6 +611,8 @@ def train_model(
"gradient_accumulation_steps": gradient_accumulation_steps,
"gradient_checkpointing": gradient_checkpointing,
"in_json": in_json,
"ip_noise_gamma": ip_noise_gamma,
"ip_noise_gamma_random_strength": ip_noise_gamma_random_strength,
"keep_tokens": keep_tokens,
"learning_rate": learning_rate,
"logging_dir": logging_dir,
@ -625,6 +636,7 @@ def train_model(
"multires_noise_discount": multires_noise_discount,
"multires_noise_iterations": multires_noise_iterations,
"noise_offset": noise_offset,
"noise_offset_random_strength": noise_offset_random_strength,
"noise_offset_type": noise_offset_type,
"optimizer": optimizer,
"optimizer_args": optimizer_args,
@ -941,9 +953,12 @@ def finetune_tab(headless=False, config: dict = {}):
basic_training.lr_scheduler_args,
advanced_training.noise_offset_type,
advanced_training.noise_offset,
advanced_training.noise_offset_random_strength,
advanced_training.adaptive_noise_scale,
advanced_training.multires_noise_iterations,
advanced_training.multires_noise_discount,
advanced_training.ip_noise_gamma,
advanced_training.ip_noise_gamma_random_strength,
sample.sample_every_n_steps,
sample.sample_every_n_epochs,
sample.sample_sampler,

View File

@ -160,9 +160,12 @@ def save_configuration(
max_grad_norm,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
LoRA_type,
factor,
bypass_mode,
@ -341,9 +344,12 @@ def open_configuration(
max_grad_norm,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
LoRA_type,
factor,
bypass_mode,
@ -550,9 +556,12 @@ def train_model(
max_grad_norm,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
LoRA_type,
factor,
bypass_mode,
@ -923,6 +932,8 @@ def train_model(
"full_fp16": full_fp16,
"gradient_accumulation_steps": gradient_accumulation_steps,
"gradient_checkpointing": gradient_checkpointing,
"ip_noise_gamma": ip_noise_gamma,
"ip_noise_gamma_random_strength": ip_noise_gamma_random_strength,
"keep_tokens": keep_tokens,
"learning_rate": learning_rate,
"logging_dir": logging_dir,
@ -958,6 +969,7 @@ def train_model(
"network_train_text_encoder_only": network_train_text_encoder_only,
"no_half_vae": True if sdxl and sdxl_no_half_vae else None,
"noise_offset": noise_offset,
"noise_offset_random_strength": noise_offset_random_strength,
"noise_offset_type": noise_offset_type,
"optimizer": optimizer,
"optimizer_args": optimizer_args,
@ -1977,9 +1989,12 @@ def lora_tab(
basic_training.max_grad_norm,
advanced_training.noise_offset_type,
advanced_training.noise_offset,
advanced_training.noise_offset_random_strength,
advanced_training.adaptive_noise_scale,
advanced_training.multires_noise_iterations,
advanced_training.multires_noise_discount,
advanced_training.ip_noise_gamma,
advanced_training.ip_noise_gamma_random_strength,
LoRA_type,
factor,
bypass_mode,

View File

@ -123,9 +123,12 @@ def save_configuration(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -258,9 +261,12 @@ def open_configuration(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -386,9 +392,12 @@ def train_model(
lr_scheduler_args,
noise_offset_type,
noise_offset,
noise_offset_random_strength,
adaptive_noise_scale,
multires_noise_iterations,
multires_noise_discount,
ip_noise_gamma,
ip_noise_gamma_random_strength,
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
@ -555,6 +564,8 @@ def train_model(
full_fp16=full_fp16,
gradient_accumulation_steps=gradient_accumulation_steps,
gradient_checkpointing=gradient_checkpointing,
ip_noise_gamma=ip_noise_gamma,
ip_noise_gamma_random_strength=ip_noise_gamma_random_strength,
keep_tokens=keep_tokens,
learning_rate=learning_rate,
logging_dir=logging_dir,
@ -582,6 +593,7 @@ def train_model(
no_half_vae=True if sdxl and sdxl_no_half_vae else None,
no_token_padding=no_token_padding,
noise_offset=noise_offset,
noise_offset_random_strength=noise_offset_random_strength,
noise_offset_type=noise_offset_type,
optimizer=optimizer,
optimizer_args=optimizer_args,
@ -918,9 +930,12 @@ def ti_tab(headless=False, default_output_dir=None, config: dict = {}):
basic_training.lr_scheduler_args,
advanced_training.noise_offset_type,
advanced_training.noise_offset,
advanced_training.noise_offset_random_strength,
advanced_training.adaptive_noise_scale,
advanced_training.multires_noise_iterations,
advanced_training.multires_noise_discount,
advanced_training.ip_noise_gamma,
advanced_training.ip_noise_gamma_random_strength,
sample.sample_every_n_steps,
sample.sample_every_n_epochs,
sample.sample_sampler,

View File

@ -22,8 +22,8 @@ lycoris_lora==2.2.0.post3
# tensorflow==2.14.0
# for WD14 captioning (onnx)
omegaconf==2.3.0
onnx==1.14.1
onnxruntime-gpu==1.16.0
onnx==1.15.0
onnxruntime-gpu==1.17.1
# onnxruntime==1.16.0
# this is for onnx:
# tensorboard==2.14.1

View File

@ -1,5 +1,5 @@
{
"adaptive_noise_scale": 0,
"adaptive_noise_scale": 0.005,
"additional_parameters": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 1,
@ -12,13 +12,15 @@
"color_aug": false,
"dataset_config": "",
"enable_bucket": true,
"epoch": 4,
"epoch": 8,
"flip_aug": false,
"full_fp16": false,
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": false,
"init_word": "*",
"ip_noise_gamma": 0.1,
"ip_noise_gamma_random_strength": true,
"keep_tokens": "0",
"learning_rate": 0.0001,
"log_tracker_config": "",
@ -47,7 +49,8 @@
"multires_noise_iterations": 8,
"no_token_padding": false,
"noise_offset": 0.05,
"noise_offset_type": "Multires",
"noise_offset_random_strength": true,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"num_machines": 1,
"num_processes": 1,

View File

@ -1,13 +1,14 @@
{
"LoRA_type": "Kohya LoCon",
"LyCORIS_preset": "full",
"adaptive_noise_scale": 0,
"adaptive_noise_scale": 0.005,
"additional_parameters": "",
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"bypass_mode": false,
"cache_latents": true,
"cache_latents_to_disk": false,
"caption_dropout_every_n_epochs": 0.0,
@ -20,12 +21,14 @@
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 16,
"dataset_config": "",
"debiased_estimation_loss": false,
"decompose_both": false,
"dim_from_weights": false,
"dora_wd": false,
"down_lr_weight": "",
"enable_bucket": true,
"epoch": 1,
"epoch": 8,
"factor": -1,
"flip_aug": false,
"fp8_base": false,
@ -34,6 +37,8 @@
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": false,
"ip_noise_gamma": 0.1,
"ip_noise_gamma_random_strength": true,
"keep_tokens": "0",
"learning_rate": 0.0005,
"log_tracker_config": "",
@ -68,6 +73,7 @@
"network_dim": 16,
"network_dropout": 0.1,
"noise_offset": 0.05,
"noise_offset_random_strength": true,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"num_machines": 1,