Improvigs UI a bit

pull/1133/head
bmaltais 2023-07-02 13:30:48 -04:00
parent 66363f57ac
commit be7c2cacaf
5 changed files with 872 additions and 759 deletions

View File

@ -52,7 +52,7 @@ def save_configuration(
pretrained_model_name_or_path,
v2,
v_parameterization,
sdxl,
sdxl_checkbox,
train_dir,
image_folder,
output_dir,
@ -128,6 +128,8 @@ def save_configuration(
use_wandb,
wandb_api_key,
scale_v_pred_loss_like_noise_pred,
sdxl_cache_text_encoder_outputs,
sdxl_no_half_vae,
):
# Get list of function parameters and values
parameters = list(locals().items())
@ -176,7 +178,7 @@ def open_configuration(
pretrained_model_name_or_path,
v2,
v_parameterization,
sdxl,
sdxl_checkbox,
train_dir,
image_folder,
output_dir,
@ -252,6 +254,8 @@ def open_configuration(
use_wandb,
wandb_api_key,
scale_v_pred_loss_like_noise_pred,
sdxl_cache_text_encoder_outputs,
sdxl_no_half_vae,
):
# Get list of function parameters and values
parameters = list(locals().items())
@ -288,7 +292,7 @@ def train_model(
pretrained_model_name_or_path,
v2,
v_parameterization,
sdxl,
sdxl_checkbox,
train_dir,
image_folder,
output_dir,
@ -364,6 +368,8 @@ def train_model(
use_wandb,
wandb_api_key,
scale_v_pred_loss_like_noise_pred,
sdxl_cache_text_encoder_outputs,
sdxl_no_half_vae,
):
print_only_bool = True if print_only.get('label') == 'True' else False
log.info(f'Start Finetuning...')
@ -477,7 +483,7 @@ def train_model(
log.info(f'lr_warmup_steps = {lr_warmup_steps}')
run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process}'
if sdxl:
if sdxl_checkbox:
run_cmd += f' "./sdxl_train.py"'
else:
run_cmd += f' "./fine_tune.py"'
@ -521,6 +527,12 @@ def train_model(
run_cmd += f' --output_name="{output_name}"'
if int(max_token_length) > 75:
run_cmd += f' --max_token_length={max_token_length}'
if sdxl_cache_text_encoder_outputs:
run_cmd += f' --cache_text_encoder_outputs'
if sdxl_no_half_vae:
run_cmd += f' --no_half_vae'
run_cmd += run_cmd_training(
learning_rate=learning_rate,
@ -634,7 +646,7 @@ def finetune_tab(headless=False):
pretrained_model_name_or_path,
v2,
v_parameterization,
sdxl,
sdxl_checkbox,
save_model_as,
model_list,
) = gradio_source_model(headless=headless)
@ -780,6 +792,22 @@ def finetune_tab(headless=False):
optimizer,
optimizer_args,
) = gradio_training(learning_rate_value='1e-5')
# SDXL parameters
with gr.Row(visible=False) as sdxl_row:
sdxl_cache_text_encoder_outputs = gr.Checkbox(
label='(SDXL) Cache text encoder outputs',
info='Cache the outputs of the text encoders. This option is useful to reduce the GPU memory usage. This option cannot be used with options for shuffling or dropping the captions.',
value=False
)
sdxl_no_half_vae = gr.Checkbox(
label='(SDXL) No half VAE',
info='Disable the half-precision (mixed-precision) VAE. VAE for SDXL seems to produce NaNs in some cases. This option is useful to avoid the NaNs.',
value=False
)
sdxl_checkbox.change(lambda sdxl_checkbox: gr.Row.update(visible=sdxl_checkbox), inputs=[sdxl_checkbox], outputs=[sdxl_row])
with gr.Row():
dataset_repeats = gr.Textbox(label='Dataset repeats', value=40)
train_text_encoder = gr.Checkbox(
@ -861,7 +889,7 @@ def finetune_tab(headless=False):
pretrained_model_name_or_path,
v2,
v_parameterization,
sdxl,
sdxl_checkbox,
train_dir,
image_folder,
output_dir,
@ -937,6 +965,8 @@ def finetune_tab(headless=False):
use_wandb,
wandb_api_key,
scale_v_pred_loss_like_noise_pred,
sdxl_cache_text_encoder_outputs,
sdxl_no_half_vae,
]
button_run.click(

View File

@ -51,11 +51,11 @@ def UI(**kwargs):
output_dir_input,
logging_dir_input,
) = dreambooth_tab(headless=headless)
with gr.Tab('Dreambooth LoRA'):
with gr.Tab('LoRA'):
lora_tab(headless=headless)
with gr.Tab('Dreambooth TI'):
with gr.Tab('Textual Inversion'):
ti_tab(headless=headless)
with gr.Tab('Finetune'):
with gr.Tab('Finetuning'):
finetune_tab(headless=headless)
with gr.Tab('Utilities'):
utilities_tab(

View File

@ -489,9 +489,9 @@ def set_pretrained_model_name_or_path_input(
):
# Check if the given model_list is in the list of SDXL models
if str(model_list) in SDXL_MODELS:
log.info('SDXL model selected. Setting --v2, --v_parameterization and sdxl parameters')
v2 = gr.Checkbox.update(value=True, visible=False)
v_parameterization = gr.Checkbox.update(value=True, visible=False)
log.info('SDXL model selected. Setting sdxl parameters')
v2 = gr.Checkbox.update(value=False, visible=False)
v_parameterization = gr.Checkbox.update(value=False, visible=False)
sdxl = gr.Checkbox.update(value=True, visible=False)
pretrained_model_name_or_path = gr.Textbox.update(value=str(model_list), visible=False)
pretrained_model_name_or_path_file = gr.Button.update(visible=False)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,82 @@
{
"adaptive_noise_scale": 0,
"additional_parameters": "",
"batch_size": "8",
"bucket_no_upscale": true,
"bucket_reso_steps": 1,
"cache_latents": true,
"cache_latents_to_disk": false,
"caption_dropout_every_n_epochs": 0.0,
"caption_dropout_rate": 0,
"caption_extension": ".txt",
"caption_metadata_filename": "meta-1_cap.json",
"clip_skip": 1,
"color_aug": false,
"create_buckets": false,
"create_caption": true,
"dataset_repeats": "50",
"epoch": 2,
"flip_aug": false,
"full_fp16": false,
"full_path": true,
"gradient_accumulation_steps": 1.0,
"gradient_checkpointing": false,
"image_folder": ".\\test\\img\\10_darius kawasaki person",
"keep_tokens": 0,
"latent_metadata_filename": "meta-1_lat.json",
"learning_rate": 1e-05,
"logging_dir": "./test/ft",
"lr_scheduler": "cosine_with_restarts",
"lr_warmup": 10,
"max_bucket_reso": "1024",
"max_data_loader_n_workers": "0",
"max_resolution": "512,512",
"max_token_length": "75",
"max_train_epochs": "",
"mem_eff_attn": false,
"min_bucket_reso": "256",
"min_snr_gamma": 0,
"mixed_precision": "bf16",
"model_list": "stabilityai/stable-diffusion-xl-base-0.9",
"multires_noise_discount": 0,
"multires_noise_iterations": 0,
"noise_offset": 0,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"optimizer": "AdamW",
"optimizer_args": "",
"output_dir": "./test/output",
"output_name": "test_ft",
"persistent_data_loader_workers": false,
"pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-0.9",
"random_crop": false,
"resume": "",
"sample_every_n_epochs": 0,
"sample_every_n_steps": 0,
"sample_prompts": "",
"sample_sampler": "euler_a",
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "bf16",
"save_state": false,
"scale_v_pred_loss_like_noise_pred": false,
"sdxl_cache_text_encoder_outputs": false,
"sdxl_checkbox": true,
"sdxl_no_half_vae": false,
"seed": "1234",
"shuffle_caption": false,
"train_batch_size": 4,
"train_dir": "./test",
"train_text_encoder": true,
"use_latent_files": "No",
"use_wandb": false,
"v2": true,
"v_parameterization": true,
"vae_batch_size": 0,
"wandb_api_key": "",
"weighted_captions": false,
"xformers": true
}