diff --git a/lora_gui.py b/lora_gui.py index 6cb4029..971b0e3 100644 --- a/lora_gui.py +++ b/lora_gui.py @@ -211,6 +211,7 @@ def save_configuration( def open_configuration( ask_for_file, + apply_preset, file_path, pretrained_model_name_or_path, v2, @@ -312,11 +313,25 @@ def open_configuration( network_dropout, rank_dropout, module_dropout, + training_preset ): # Get list of function parameters and values parameters = list(locals().items()) ask_for_file = True if ask_for_file.get('label') == 'True' else False + apply_preset = True if apply_preset.get('label') == 'True' else False + + # Check if we are "applying" a preset or a config + if apply_preset: + log.info(f'Applying preset {training_preset}...') + file_path = f'./presets/lora/{training_preset}.json' + else: + # If not applying a preset, set the `training_preset` field to an empty string + # Find the index of the `training_preset` parameter using the `index()` method + training_preset_index = parameters.index(("training_preset", training_preset)) + + # Update the value of `training_preset` by directly assigning an empty string value + parameters[training_preset_index] = ("training_preset", "") original_file_path = file_path @@ -324,22 +339,28 @@ def open_configuration( file_path = get_file_path(file_path) if not file_path == '' and not file_path == None: - # load variables from JSON file + # Load variables from JSON file with open(file_path, 'r') as f: my_data = json.load(f) log.info('Loading config...') - # Update values to fix deprecated use_8bit_adam checkbox, set appropriate optimizer if it is set to True, etc. + # Update values to fix deprecated options, set appropriate optimizer if it is set to True, etc. my_data = update_my_data(my_data) else: - file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action + file_path = original_file_path # In case a file_path was provided and the user decides to cancel the open action my_data = {} values = [file_path] for key, value in parameters: # Set the value in the dictionary to the corresponding value in `my_data`, or the default value if not found - if not key in ['ask_for_file', 'file_path']: - values.append(my_data.get(key, value)) + if not key in ['ask_for_file', 'apply_preset', 'file_path']: + json_value = my_data.get(key) + if isinstance(json_value, str) and json_value == '': + # If the JSON value is an empty string, use the default value + values.append(value) + else: + # Otherwise, use the JSON value if not None, otherwise use the default value + values.append(json_value if json_value is not None else value) # This next section is about making the LoCon parameters visible if LoRA_type = 'Standard' if my_data.get('LoRA_type', 'Standard') == 'LoCon': @@ -1069,6 +1090,17 @@ def lora_tab( outputs=[logging_dir], ) with gr.Tab('Training parameters'): + def list_presets(path): + json_files = [] + for file in os.listdir(path): + if file.endswith(".json"): + json_files.append(os.path.splitext(file)[0]) + return json_files + with gr.Row(): + training_preset = gr.Dropdown( + label='Presets', + choices=list_presets('./presets/lora'), + ) with gr.Row(): LoRA_type = gr.Dropdown( label='LoRA type', @@ -1658,15 +1690,22 @@ def lora_tab( button_open_config.click( open_configuration, - inputs=[dummy_db_true, config_file_name] + settings_list, - outputs=[config_file_name] + settings_list + [LoCon_row], + inputs=[dummy_db_true, dummy_db_false, config_file_name] + settings_list + [training_preset], + outputs=[config_file_name] + settings_list + [training_preset, LoCon_row], show_progress=False, ) button_load_config.click( open_configuration, - inputs=[dummy_db_false, config_file_name] + settings_list, - outputs=[config_file_name] + settings_list + [LoCon_row], + inputs=[dummy_db_false, dummy_db_false, config_file_name] + settings_list + [training_preset], + outputs=[config_file_name] + settings_list + [training_preset, LoCon_row], + show_progress=False, + ) + + training_preset.input( + open_configuration, + inputs=[dummy_db_false, dummy_db_true, config_file_name] + settings_list + [training_preset], + outputs=[gr.Textbox()] + settings_list + [training_preset, LoCon_row], show_progress=False, ) diff --git a/presets/lora/dadaptation.json b/presets/lora/dadaptation.json index c9c9b2e..752e312 100644 --- a/presets/lora/dadaptation.json +++ b/presets/lora/dadaptation.json @@ -1,37 +1,101 @@ { - "v2": false, - "v_parameterization": false, - "max_resolution": "576,576", - "learning_rate": "1.0", - "lr_scheduler": "constant", - "lr_warmup": "0", - "save_every_n_epochs": 1, - "seed": "31337", - "cache_latents": true, - "gradient_checkpointing": true, - "no_token_padding": false, - "save_model_as": "safetensors", - "shuffle_caption": true, - "text_encoder_lr": "1.0", - "unet_lr": "1.0", - "network_dim": 8, - "color_aug": false, - "flip_aug": false, - "gradient_accumulation_steps": 1.0, - "model_list": "custom", - "max_token_length": "150", - "max_data_loader_n_workers": "1", - "network_alpha": 8, - "persistent_data_loader_workers": true, - "random_crop": false, - "optimizer": "DAdaptation", - "optimizer_args": "\"decouple=True\" \"weight_decay=0.01\" \"betas=0.9,0.99\"", - "noise_offset": "0", - "LoRA_type": "LyCORIS/LoHa", - "conv_dim": 8, - "conv_alpha": 8, - "sample_every_n_epochs": 1, - "sample_sampler": "euler_a", - "sample_prompts": "masterpiece, best quality, TRIGGERWORD, looking at viewer, simple background --n worst low quality --w 512 --h 712 --d 1 --l 7.5 --s 28", - "min_snr_gamma": 3 - } \ No newline at end of file + "LoRA_type": "LyCORIS/LoHa", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": false, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.1, + "caption_extension": "", + "clip_skip": "1", + "color_aug": false, + "conv_alpha": 8, + "conv_alphas": "", + "conv_dim": 8, + "conv_dims": "", + "decompose_both": false, + "dim_from_weights": false, + "down_lr_weight": "", + "enable_bucket": true, + "epoch": 1, + "factor": -1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 3.333e-05, + "logging_dir": "", + "lora_network_weights": "", + "lr_scheduler": "cosine", + "lr_scheduler_num_cycles": "", + "lr_scheduler_power": "", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "mid_lr_weight": "", + "min_snr_gamma": 3, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "module_dropout": 0, + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "network_alpha": 128, + "network_dim": 128, + "network_dropout": 0, + "no_token_padding": false, + "noise_offset": 0, + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "DAdaptAdam", + "optimizer_args": "\"decouple=True\" \"weight_decay=0.01\" \"betas=0.9,0.99\"", + "output_dir": "", + "output_name": "", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": true, + "rank_dropout": 0, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 1, + "sample_every_n_steps": 0, + "sample_prompts": "masterpiece, best quality, TRIGGERWORD, looking at viewer, simple background --n worst low quality --w 512 --h 712 --d 1 --l 7.5 --s 28", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 0, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "text_encoder_lr": 1.6666e-05, + "train_batch_size": 8, + "train_data_dir": "", + "train_on_input": false, + "training_comment": "", + "unet_lr": 3.333e-05, + "unit": 1, + "up_lr_weight": "", + "use_cp": false, + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/presets/lora/lion_optimizer.json b/presets/lora/lion_optimizer.json index 77ffa4d..7e20df8 100644 --- a/presets/lora/lion_optimizer.json +++ b/presets/lora/lion_optimizer.json @@ -1,59 +1,101 @@ { - "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", - "v2": false, - "v_parameterization": false, - "logging_dir": "D:\\dataset\\marty_mcfly\\1985\\lora/log", - "train_data_dir": "D:\\dataset\\marty_mcfly\\1985\\lora\\img_gan", - "reg_data_dir": "", - "output_dir": "D:/lora/sd1.5/marty_mcfly", - "max_resolution": "512,512", - "learning_rate": "0.00003333", - "lr_scheduler": "cosine", - "lr_warmup": "0", - "train_batch_size": 8, - "epoch": "1", - "save_every_n_epochs": "1", - "mixed_precision": "bf16", - "save_precision": "fp16", - "seed": "1234", - "num_cpu_threads_per_process": 2, + "LoRA_type": "LyCORIS/LoHa", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, "cache_latents": false, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.1, "caption_extension": "", - "enable_bucket": true, - "gradient_checkpointing": false, - "full_fp16": false, - "no_token_padding": false, - "stop_text_encoder_training": 0, - "use_8bit_adam": false, - "xformers": true, - "save_model_as": "safetensors", - "shuffle_caption": false, - "save_state": false, - "resume": "", - "prior_loss_weight": 1.0, - "text_encoder_lr": "0.000016666", - "unet_lr": "0.00003333", - "network_dim": 128, - "lora_network_weights": "", - "color_aug": false, - "flip_aug": false, "clip_skip": "1", - "gradient_accumulation_steps": 1.0, - "mem_eff_attn": false, - "output_name": "mrtmcfl_v2.0", - "model_list": "runwayml/stable-diffusion-v1-5", - "max_token_length": "75", - "max_train_epochs": "", - "max_data_loader_n_workers": "0", - "network_alpha": 128, - "training_comment": "", + "color_aug": false, + "conv_alpha": 8, + "conv_alphas": "", + "conv_dim": 8, + "conv_dims": "", + "decompose_both": false, + "dim_from_weights": false, + "down_lr_weight": "", + "enable_bucket": true, + "epoch": 1, + "factor": -1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, "keep_tokens": "0", + "learning_rate": 3.333e-05, + "logging_dir": "", + "lora_network_weights": "", + "lr_scheduler": "cosine", "lr_scheduler_num_cycles": "", "lr_scheduler_power": "", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "mid_lr_weight": "", + "min_snr_gamma": 3, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "module_dropout": 0, + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "network_alpha": 128, + "network_dim": 128, + "network_dropout": 0, + "no_token_padding": false, + "noise_offset": 0, + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "Lion", + "optimizer_args": "\"decouple=True\" \"weight_decay=0.01\" \"betas=0.9,0.99\"", + "output_dir": "", + "output_name": "", "persistent_data_loader_workers": false, - "bucket_no_upscale": true, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, "random_crop": true, - "bucket_reso_steps": 64.0, - "caption_dropout_every_n_epochs": 0.0, - "caption_dropout_rate": 0.1 + "rank_dropout": 0, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 1, + "sample_every_n_steps": 0, + "sample_prompts": "masterpiece, best quality, TRIGGERWORD, looking at viewer, simple background --n worst low quality --w 512 --h 712 --d 1 --l 7.5 --s 28", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 0, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "text_encoder_lr": 1.6666e-05, + "train_batch_size": 1, + "train_data_dir": "", + "train_on_input": false, + "training_comment": "", + "unet_lr": 3.333e-05, + "unit": 1, + "up_lr_weight": "", + "use_cp": false, + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true } \ No newline at end of file