mirror of https://github.com/bmaltais/kohya_ss
Add noise parameters support in GUI
parent
fd4710423a
commit
1101cb018a
|
|
@ -109,7 +109,9 @@ def save_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -125,6 +127,7 @@ def save_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
|
|
@ -227,7 +230,9 @@ def open_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -243,6 +248,7 @@ def open_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
|
|
@ -328,7 +334,9 @@ def train_model(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -344,6 +352,7 @@ def train_model(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
headless_bool = True if headless.get('label') == 'True' else False
|
||||
|
||||
|
|
@ -412,7 +421,9 @@ def train_model(
|
|||
|
||||
# Check if subfolders are present. If not let the user know and return
|
||||
if not subfolders:
|
||||
log.info(f'No {subfolders} were found in train_data_dir can\'t train...')
|
||||
log.info(
|
||||
f"No {subfolders} were found in train_data_dir can't train..."
|
||||
)
|
||||
return
|
||||
|
||||
total_steps = 0
|
||||
|
|
@ -423,7 +434,9 @@ def train_model(
|
|||
try:
|
||||
repeats = int(folder.split('_')[0])
|
||||
except ValueError:
|
||||
log.info(f'Subfolder {folder} does not have a proper repeat value, please correct the name or remove it... can\'t train...')
|
||||
log.info(
|
||||
f"Subfolder {folder} does not have a proper repeat value, please correct the name or remove it... can't train..."
|
||||
)
|
||||
continue
|
||||
|
||||
# Count the number of images in the folder
|
||||
|
|
@ -451,7 +464,9 @@ def train_model(
|
|||
log.info(f'Folder {folder} : steps {steps}')
|
||||
|
||||
if total_steps == 0:
|
||||
log.info(f'No images were found in folder {train_data_dir}... please rectify!')
|
||||
log.info(
|
||||
f'No images were found in folder {train_data_dir}... please rectify!'
|
||||
)
|
||||
return
|
||||
|
||||
# Print the result
|
||||
|
|
@ -460,7 +475,8 @@ def train_model(
|
|||
if reg_data_dir == '':
|
||||
reg_factor = 1
|
||||
else:
|
||||
log.info(f'Regularisation images are used... Will double the number of steps required...'
|
||||
log.info(
|
||||
f'Regularisation images are used... Will double the number of steps required...'
|
||||
)
|
||||
reg_factor = 2
|
||||
|
||||
|
|
@ -588,6 +604,7 @@ def train_model(
|
|||
save_last_n_steps_state=save_last_n_steps_state,
|
||||
use_wandb=use_wandb,
|
||||
wandb_api_key=wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred=scale_v_pred_loss_like_noise_pred,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_sample(
|
||||
|
|
@ -800,7 +817,9 @@ def dreambooth_tab(
|
|||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
additional_parameters,
|
||||
|
|
@ -811,6 +830,7 @@ def dreambooth_tab(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
) = gradio_advanced_training(headless=headless)
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
|
|
@ -907,7 +927,9 @@ def dreambooth_tab(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -923,6 +945,7 @@ def dreambooth_tab(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
]
|
||||
|
||||
button_open_config.click(
|
||||
|
|
|
|||
|
|
@ -108,7 +108,9 @@ def save_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -124,6 +126,7 @@ def save_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
|
|
@ -232,7 +235,9 @@ def open_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -248,6 +253,7 @@ def open_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
|
|
@ -339,7 +345,9 @@ def train_model(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -355,6 +363,7 @@ def train_model(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
headless_bool = True if headless.get('label') == 'True' else False
|
||||
|
||||
|
|
@ -555,6 +564,7 @@ def train_model(
|
|||
save_last_n_steps_state=save_last_n_steps_state,
|
||||
use_wandb=use_wandb,
|
||||
wandb_api_key=wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred=scale_v_pred_loss_like_noise_pred,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_sample(
|
||||
|
|
@ -783,7 +793,9 @@ def finetune_tab(headless=False):
|
|||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
additional_parameters,
|
||||
|
|
@ -794,6 +806,7 @@ def finetune_tab(headless=False):
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
) = gradio_advanced_training(headless=headless)
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
|
|
@ -883,7 +896,9 @@ def finetune_tab(headless=False):
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -899,6 +914,7 @@ def finetune_tab(headless=False):
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
]
|
||||
|
||||
button_run.click(train_model, inputs=[dummy_headless] + settings_list)
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ from library.custom_logging import setup_logging
|
|||
# Set up logging
|
||||
log = setup_logging()
|
||||
|
||||
|
||||
def UI(**kwargs):
|
||||
css = ''
|
||||
|
||||
|
|
|
|||
|
|
@ -1102,6 +1102,11 @@ def gradio_advanced_training(headless=False):
|
|||
value=False,
|
||||
info='If unchecked, tensorboard will be used as the default for logging.',
|
||||
)
|
||||
scale_v_pred_loss_like_noise_pred = gr.Checkbox(
|
||||
label='Scale v prediction loss',
|
||||
value=False,
|
||||
info='Only for SD v2 models. By scaling the loss according to the time step, the weights of global noise prediction and local noise prediction become the same, and the improvement of details may be expected.',
|
||||
)
|
||||
return (
|
||||
# use_8bit_adam,
|
||||
xformers,
|
||||
|
|
@ -1137,6 +1142,7 @@ def gradio_advanced_training(headless=False):
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -1242,6 +1248,10 @@ def run_cmd_advanced_training(**kwargs):
|
|||
if random_crop:
|
||||
run_cmd += ' --random_crop'
|
||||
|
||||
scale_v_pred_loss_like_noise_pred = kwargs.get('scale_v_pred_loss_like_noise_pred')
|
||||
if scale_v_pred_loss_like_noise_pred:
|
||||
run_cmd += ' --scale_v_pred_loss_like_noise_pred'
|
||||
|
||||
noise_offset_type = kwargs.get('noise_offset_type', 'Original')
|
||||
if noise_offset_type == 'Original':
|
||||
noise_offset = float(kwargs.get("noise_offset", 0))
|
||||
|
|
|
|||
90
lora_gui.py
90
lora_gui.py
|
|
@ -62,6 +62,7 @@ save_style_symbol = '\U0001f4be' # 💾
|
|||
document_symbol = '\U0001F4C4' # 📄
|
||||
path_of_this_folder = os.getcwd()
|
||||
|
||||
|
||||
def save_configuration(
|
||||
save_as,
|
||||
file_path,
|
||||
|
|
@ -101,7 +102,8 @@ def save_configuration(
|
|||
text_encoder_lr,
|
||||
unet_lr,
|
||||
network_dim,
|
||||
lora_network_weights,dim_from_weights,
|
||||
lora_network_weights,
|
||||
dim_from_weights,
|
||||
color_aug,
|
||||
flip_aug,
|
||||
clip_skip,
|
||||
|
|
@ -125,7 +127,9 @@ def save_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
LoRA_type,
|
||||
|
|
@ -153,6 +157,7 @@ def save_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
scale_weight_norms,
|
||||
network_dropout,
|
||||
rank_dropout,
|
||||
|
|
@ -242,7 +247,8 @@ def open_configuration(
|
|||
text_encoder_lr,
|
||||
unet_lr,
|
||||
network_dim,
|
||||
lora_network_weights,dim_from_weights,
|
||||
lora_network_weights,
|
||||
dim_from_weights,
|
||||
color_aug,
|
||||
flip_aug,
|
||||
clip_skip,
|
||||
|
|
@ -266,7 +272,9 @@ def open_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
LoRA_type,
|
||||
|
|
@ -294,6 +302,7 @@ def open_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
scale_weight_norms,
|
||||
network_dropout,
|
||||
rank_dropout,
|
||||
|
|
@ -375,7 +384,8 @@ def train_model(
|
|||
text_encoder_lr,
|
||||
unet_lr,
|
||||
network_dim,
|
||||
lora_network_weights,dim_from_weights,
|
||||
lora_network_weights,
|
||||
dim_from_weights,
|
||||
color_aug,
|
||||
flip_aug,
|
||||
clip_skip,
|
||||
|
|
@ -399,7 +409,9 @@ def train_model(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
LoRA_type,
|
||||
|
|
@ -427,6 +439,7 @@ def train_model(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
scale_weight_norms,
|
||||
network_dropout,
|
||||
rank_dropout,
|
||||
|
|
@ -593,7 +606,9 @@ def train_model(
|
|||
* int(reg_factor)
|
||||
)
|
||||
)
|
||||
log.info(f'max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}')
|
||||
log.info(
|
||||
f'max_train_steps ({total_steps} / {train_batch_size} / {gradient_accumulation_steps} * {epoch} * {reg_factor}) = {max_train_steps}'
|
||||
)
|
||||
|
||||
# calculate stop encoder training
|
||||
if stop_text_encoder_training_pct == None:
|
||||
|
|
@ -640,7 +655,7 @@ def train_model(
|
|||
run_cmd += f' --save_model_as={save_model_as}'
|
||||
if not float(prior_loss_weight) == 1.0:
|
||||
run_cmd += f' --prior_loss_weight={prior_loss_weight}'
|
||||
|
||||
|
||||
if LoRA_type == 'LoCon' or LoRA_type == 'LyCORIS/LoCon':
|
||||
try:
|
||||
import lycoris
|
||||
|
|
@ -651,7 +666,7 @@ def train_model(
|
|||
return
|
||||
run_cmd += f' --network_module=lycoris.kohya'
|
||||
run_cmd += f' --network_args "conv_dim={conv_dim}" "conv_alpha={conv_alpha}" "algo=lora"'
|
||||
|
||||
|
||||
if LoRA_type == 'LyCORIS/LoHa':
|
||||
try:
|
||||
import lycoris
|
||||
|
|
@ -753,7 +768,7 @@ def train_model(
|
|||
run_cmd += f' --network_weights="{lora_network_weights}"'
|
||||
if dim_from_weights:
|
||||
run_cmd += f' --dim_from_weights'
|
||||
|
||||
|
||||
if int(gradient_accumulation_steps) > 1:
|
||||
run_cmd += f' --gradient_accumulation_steps={int(gradient_accumulation_steps)}'
|
||||
if not output_name == '':
|
||||
|
|
@ -764,10 +779,10 @@ def train_model(
|
|||
run_cmd += f' --lr_scheduler_num_cycles="{epoch}"'
|
||||
if not lr_scheduler_power == '':
|
||||
run_cmd += f' --lr_scheduler_power="{lr_scheduler_power}"'
|
||||
|
||||
|
||||
if scale_weight_norms > 0.0:
|
||||
run_cmd += f' --scale_weight_norms="{scale_weight_norms}"'
|
||||
|
||||
|
||||
if network_dropout > 0.0:
|
||||
run_cmd += f' --network_dropout="{network_dropout}"'
|
||||
|
||||
|
|
@ -823,6 +838,7 @@ def train_model(
|
|||
save_last_n_steps_state=save_last_n_steps_state,
|
||||
use_wandb=use_wandb,
|
||||
wandb_api_key=wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred=scale_v_pred_loss_like_noise_pred,
|
||||
)
|
||||
|
||||
run_cmd += run_cmd_sample(
|
||||
|
|
@ -1011,7 +1027,7 @@ def lora_tab(
|
|||
lora_network_weights = gr.Textbox(
|
||||
label='LoRA network weights',
|
||||
placeholder='(Optional)',
|
||||
info='Path to an existing LoRA network weights to resume training from'
|
||||
info='Path to an existing LoRA network weights to resume training from',
|
||||
)
|
||||
lora_network_weights_file = gr.Button(
|
||||
document_symbol,
|
||||
|
|
@ -1126,7 +1142,7 @@ def lora_tab(
|
|||
'Kohya DyLoRA',
|
||||
'Kohya LoCon',
|
||||
}
|
||||
|
||||
|
||||
# Determine if LoRA network weights should be visible based on LoRA_type
|
||||
LoRA_network_weights_visible = LoRA_type in {
|
||||
'Standard',
|
||||
|
|
@ -1229,15 +1245,6 @@ def lora_tab(
|
|||
value=False,
|
||||
info='Enable weighted captions in the standard style (token:1.3). No commas inside parens, or shuffle/dropout may break the decoder.',
|
||||
)
|
||||
scale_weight_norms = gr.Slider(
|
||||
label="Scale weight norms",
|
||||
value=0,
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.01,
|
||||
info='Max Norm Regularization is a technique to stabilize network training by limiting the norm of network weights. It may be effective in suppressing overfitting of LoRA and improving stability when used with other LoRAs. See PR for details.',
|
||||
interactive=True,
|
||||
)
|
||||
with gr.Row():
|
||||
prior_loss_weight = gr.Number(
|
||||
label='Prior loss weight', value=1.0
|
||||
|
|
@ -1252,13 +1259,22 @@ def lora_tab(
|
|||
placeholder='(Optional) For Cosine with restart and polynomial only',
|
||||
)
|
||||
with gr.Row():
|
||||
scale_weight_norms = gr.Slider(
|
||||
label='Scale weight norms',
|
||||
value=0,
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.01,
|
||||
info='Max Norm Regularization is a technique to stabilize network training by limiting the norm of network weights. It may be effective in suppressing overfitting of LoRA and improving stability when used with other LoRAs. See PR for details.',
|
||||
interactive=True,
|
||||
)
|
||||
network_dropout = gr.Slider(
|
||||
label='Network dropout',
|
||||
value=0.0,
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.01,
|
||||
info='Is a normal probability dropout at the neuron level. In the case of LoRA, it is applied to the output of down. Recommended range 0.1 to 0.5'
|
||||
info='Is a normal probability dropout at the neuron level. In the case of LoRA, it is applied to the output of down. Recommended range 0.1 to 0.5',
|
||||
)
|
||||
rank_dropout = gr.Slider(
|
||||
label='Rank dropout',
|
||||
|
|
@ -1266,7 +1282,7 @@ def lora_tab(
|
|||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.01,
|
||||
info='can specify `rank_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3'
|
||||
info='can specify `rank_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3',
|
||||
)
|
||||
module_dropout = gr.Slider(
|
||||
label='Module dropout',
|
||||
|
|
@ -1274,7 +1290,7 @@ def lora_tab(
|
|||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
step=0.01,
|
||||
info='can specify `module_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3'
|
||||
info='can specify `module_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3',
|
||||
)
|
||||
(
|
||||
# use_8bit_adam,
|
||||
|
|
@ -1298,7 +1314,9 @@ def lora_tab(
|
|||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
additional_parameters,
|
||||
|
|
@ -1309,6 +1327,7 @@ def lora_tab(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
) = gradio_advanced_training(headless=headless)
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
|
|
@ -1326,7 +1345,14 @@ def lora_tab(
|
|||
LoRA_type.change(
|
||||
update_LoRA_settings,
|
||||
inputs=[LoRA_type],
|
||||
outputs=[LoCon_row, kohya_advanced_lora, kohya_dylora, lora_network_weights, lora_network_weights_file, dim_from_weights],
|
||||
outputs=[
|
||||
LoCon_row,
|
||||
kohya_advanced_lora,
|
||||
kohya_dylora,
|
||||
lora_network_weights,
|
||||
lora_network_weights_file,
|
||||
dim_from_weights,
|
||||
],
|
||||
)
|
||||
|
||||
with gr.Tab('Tools'):
|
||||
|
|
@ -1401,7 +1427,8 @@ def lora_tab(
|
|||
text_encoder_lr,
|
||||
unet_lr,
|
||||
network_dim,
|
||||
lora_network_weights,dim_from_weights,
|
||||
lora_network_weights,
|
||||
dim_from_weights,
|
||||
color_aug,
|
||||
flip_aug,
|
||||
clip_skip,
|
||||
|
|
@ -1425,7 +1452,9 @@ def lora_tab(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
LoRA_type,
|
||||
|
|
@ -1453,6 +1482,7 @@ def lora_tab(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
scale_weight_norms,
|
||||
network_dropout,
|
||||
rank_dropout,
|
||||
|
|
|
|||
|
|
@ -21,9 +21,6 @@ call .\venv\Scripts\deactivate.bat
|
|||
|
||||
:: Calling external python program to check for local modules
|
||||
python .\tools\check_local_modules.py
|
||||
if %errorlevel% equ 1 (
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .\venv\Scripts\activate.bat
|
||||
|
||||
|
|
|
|||
|
|
@ -115,7 +115,9 @@ def save_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -130,6 +132,7 @@ def save_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
|
|
@ -238,7 +241,9 @@ def open_configuration(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -253,6 +258,7 @@ def open_configuration(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
# Get list of function parameters and values
|
||||
parameters = list(locals().items())
|
||||
|
|
@ -344,7 +350,9 @@ def train_model(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -359,6 +367,7 @@ def train_model(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
):
|
||||
headless_bool = True if headless.get('label') == 'True' else False
|
||||
|
||||
|
|
@ -598,6 +607,7 @@ def train_model(
|
|||
save_last_n_steps_state=save_last_n_steps_state,
|
||||
use_wandb=use_wandb,
|
||||
wandb_api_key=wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred=scale_v_pred_loss_like_noise_pred,
|
||||
)
|
||||
run_cmd += f' --token_string="{token_string}"'
|
||||
run_cmd += f' --init_word="{init_word}"'
|
||||
|
|
@ -868,7 +878,9 @@ def ti_tab(
|
|||
bucket_reso_steps,
|
||||
caption_dropout_every_n_epochs,
|
||||
caption_dropout_rate,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
additional_parameters,
|
||||
|
|
@ -879,6 +891,7 @@ def ti_tab(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
) = gradio_advanced_training(headless=headless)
|
||||
color_aug.change(
|
||||
color_aug_changed,
|
||||
|
|
@ -981,7 +994,9 @@ def ti_tab(
|
|||
caption_dropout_rate,
|
||||
optimizer,
|
||||
optimizer_args,
|
||||
noise_offset_type,noise_offset,adaptive_noise_scale,
|
||||
noise_offset_type,
|
||||
noise_offset,
|
||||
adaptive_noise_scale,
|
||||
multires_noise_iterations,
|
||||
multires_noise_discount,
|
||||
sample_every_n_steps,
|
||||
|
|
@ -996,6 +1011,7 @@ def ti_tab(
|
|||
save_last_n_steps_state,
|
||||
use_wandb,
|
||||
wandb_api_key,
|
||||
scale_v_pred_loss_like_noise_pred,
|
||||
]
|
||||
|
||||
button_open_config.click(
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
# Define color variables
|
||||
yellow_text = "\033[1;33m"
|
||||
|
|
@ -15,6 +14,12 @@ args = parser.parse_args()
|
|||
# Run pip freeze and capture the output
|
||||
output = subprocess.getoutput("pip freeze")
|
||||
|
||||
# Remove lines containing "WARNING"
|
||||
output_lines = [line for line in output.splitlines() if "WARNING" not in line]
|
||||
|
||||
# Reconstruct the output string without warning lines
|
||||
output = "\n".join(output_lines)
|
||||
|
||||
# Check if modules are found in the output
|
||||
if output:
|
||||
print(f"{yellow_text}=============================================================")
|
||||
|
|
@ -25,25 +30,4 @@ if output:
|
|||
print("pip freeze > uninstall.txt")
|
||||
print("pip uninstall -y -r uninstall.txt")
|
||||
print(f"{yellow_text}============================================================={reset_text}")
|
||||
|
||||
if args.no_question:
|
||||
sys.exit(1) # Exit with code 1 for "no" without asking the user
|
||||
|
||||
# Ask the user if they want to continue
|
||||
valid_input = False
|
||||
while not valid_input:
|
||||
print('Do you want to continue?')
|
||||
print('')
|
||||
print('[1] - Yes')
|
||||
print('[2] - No')
|
||||
user_input = input("Enter your choice (1 or 2): ")
|
||||
if user_input.lower() == "2":
|
||||
valid_input = True
|
||||
sys.exit(1) # Exit with code 1 for "no"
|
||||
elif user_input.lower() == "1":
|
||||
valid_input = True
|
||||
sys.exit(0) # Exit with code 0 for "yes"
|
||||
else:
|
||||
print("Invalid input. Please enter '1' or '2'.")
|
||||
else:
|
||||
sys.exit(0)
|
||||
print('')
|
||||
|
|
|
|||
Loading…
Reference in New Issue