Add support for metadata parameters (#2295)

pull/2297/head
bmaltais 2024-04-15 13:26:39 -04:00 committed by GitHub
parent a8320e3f2c
commit a22d4622cd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 246 additions and 176 deletions

View File

@ -1 +1 @@
v23.1.6 v24.0.0

View File

@ -42,7 +42,7 @@ The GUI allows you to set the training parameters and generate and run the requi
- [SDXL training](#sdxl-training) - [SDXL training](#sdxl-training)
- [Masked loss](#masked-loss) - [Masked loss](#masked-loss)
- [Change History](#change-history) - [Change History](#change-history)
- [2024/04/12 (v23.1.6)](#20240412-v2316) - [2024/04/12 (v24.0.0)](#20240412-v2400)
- [2024/04/10 (v23.1.5)](#20240410-v2315) - [2024/04/10 (v23.1.5)](#20240410-v2315)
- [Security Improvements](#security-improvements) - [Security Improvements](#security-improvements)
- [2024/04/08 (v23.1.4)](#20240408-v2314) - [2024/04/08 (v23.1.4)](#20240408-v2314)
@ -402,7 +402,7 @@ ControlNet dataset is used to specify the mask. The mask images should be the RG
## Change History ## Change History
### 2024/04/12 (v23.1.6) ### 2024/04/12 (v24.0.0)
- Rewrote significant portions of the code to address security vulnerabilities and remove the `shell=True` parameter from process calls. - Rewrote significant portions of the code to address security vulnerabilities and remove the `shell=True` parameter from process calls.
- Enhanced the training and tensorboard buttons to provide a more intuitive and user-friendly experience. - Enhanced the training and tensorboard buttons to provide a more intuitive and user-friendly experience.
@ -411,6 +411,7 @@ ControlNet dataset is used to specify the mask. The mask images should be the RG
- Converted the Graphical User Interface (GUI) to use the configuration TOML file format to pass arguments to sd-scripts. This change improves security by eliminating the need for sensitive information to be passed through the command-line interface (CLI). - Converted the Graphical User Interface (GUI) to use the configuration TOML file format to pass arguments to sd-scripts. This change improves security by eliminating the need for sensitive information to be passed through the command-line interface (CLI).
- Made various other minor improvements and bug fixes to enhance the overall functionality and user experience. - Made various other minor improvements and bug fixes to enhance the overall functionality and user experience.
- Disabled LR Warmup when using the Constant LR Scheduler to prevent traceback errors with sd-scripts. - Disabled LR Warmup when using the Constant LR Scheduler to prevent traceback errors with sd-scripts.
- Added support for metadata capture to the GUI
### 2024/04/10 (v23.1.5) ### 2024/04/10 (v23.1.5)

View File

@ -173,3 +173,10 @@ train_data_dir = "" # Image folder to caption (contain
undesired_tags = "" # comma-separated list of tags to remove, e.g. 1girl,1boy undesired_tags = "" # comma-separated list of tags to remove, e.g. 1girl,1boy
use_rating_tags = false # Use rating tags use_rating_tags = false # Use rating tags
use_rating_tags_as_last_tag = false # Use rating tags as last tagging tags use_rating_tags_as_last_tag = false # Use rating tags as last tagging tags
[metadata]
metadata_title = "" # Title for model metadata (default is output_name)
metadata_author = "" # Author name for model metadata
metadata_description = "" # Description for model metadata
metadata_license = "" # License for model metadata
metadata_tags = "" # Tags for model metadata

View File

@ -1,6 +1,4 @@
import gradio as gr import gradio as gr
import os
import shlex
from .class_gui_config import KohyaSSGUIConfig from .class_gui_config import KohyaSSGUIConfig
@ -12,161 +10,58 @@ class MetaData:
) -> None: ) -> None:
self.config = config self.config = config
with gr.Accordion("Resource Selection", open=True):
with gr.Row():
self.mixed_precision = gr.Dropdown(
label="Mixed precision",
choices=["no", "fp16", "bf16", "fp8"],
value=self.config.get("accelerate_launch.mixed_precision", "fp16"),
info="Whether or not to use mixed precision training.",
)
self.num_processes = gr.Number(
label="Number of processes",
value=self.config.get("accelerate_launch.num_processes", 1),
precision=0,
minimum=1,
info="The total number of processes to be launched in parallel.",
)
self.num_machines = gr.Number(
label="Number of machines",
value=self.config.get("accelerate_launch.num_machines", 1),
precision=0,
minimum=1,
info="The total number of machines used in this training.",
)
self.num_cpu_threads_per_process = gr.Slider(
minimum=1,
maximum=os.cpu_count(),
step=1,
label="Number of CPU threads per core",
value=self.config.get(
"accelerate_launch.num_cpu_threads_per_process", 2
),
info="The number of CPU threads per process.",
)
with gr.Row():
self.dynamo_backend = gr.Dropdown(
label="Dynamo backend",
choices=[
"no",
"eager",
"aot_eager",
"inductor",
"aot_ts_nvfuser",
"nvprims_nvfuser",
"cudagraphs",
"ofi",
"fx2trt",
"onnxrt",
"tensorrt",
"ipex",
"tvm",
],
value=self.config.get("accelerate_launch.dynamo_backend", "no"),
info="The backend to use for the dynamo JIT compiler.",
)
self.dynamo_mode = gr.Dropdown(
label="Dynamo mode",
choices=[
"default",
"reduce-overhead",
"max-autotune",
],
value=self.config.get("accelerate_launch.dynamo_mode", "default"),
info="Choose a mode to optimize your training with dynamo.",
)
self.dynamo_use_fullgraph = gr.Checkbox(
label="Dynamo use fullgraph",
value=self.config.get("accelerate_launch.dynamo_use_fullgraph", False),
info="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
)
self.dynamo_use_dynamic = gr.Checkbox(
label="Dynamo use dynamic",
value=self.config.get("accelerate_launch.dynamo_use_dynamic", False),
info="Whether to enable dynamic shape tracing.",
)
with gr.Accordion("Hardware Selection", open=True):
with gr.Row():
self.multi_gpu = gr.Checkbox(
label="Multi GPU",
value=self.config.get("accelerate_launch.multi_gpu", False),
info="Whether or not this should launch a distributed GPU training.",
)
with gr.Accordion("Distributed GPUs", open=True):
with gr.Row():
self.gpu_ids = gr.Textbox(
label="GPU IDs",
value=self.config.get("accelerate_launch.gpu_ids", ""),
placeholder="example: 0,1",
info=" What GPUs (by id) should be used for training on this machine as a comma-separated list",
)
self.main_process_port = gr.Number(
label="Main process port",
value=self.config.get("accelerate_launch.main_process_port", 0),
precision=1,
minimum=0,
maximum=65535,
info="The port to use to communicate with the machine of rank 0.",
)
with gr.Row(): with gr.Row():
self.extra_accelerate_launch_args = gr.Textbox( self.metadata_title = gr.Textbox(
label="Extra accelerate launch arguments", label="Metadata title",
value=self.config.get( placeholder="(optional) title for model metadata (default is output_name)",
"accelerate_launch.extra_accelerate_launch_args", "" interactive=True,
), value=self.config.get("metadata.title", ""),
placeholder="example: --same_network --machine_rank 4", )
info="List of extra parameters to pass to accelerate launch", self.metadata_author = gr.Textbox(
label="Metadata author",
placeholder="(optional) author name for model metadata",
interactive=True,
value=self.config.get("metadata.author", ""),
)
self.metadata_description = gr.Textbox(
label="Metadata description",
placeholder="(optional) description for model metadata",
interactive=True,
value=self.config.get("metadata.description", ""),
)
with gr.Row():
self.metadata_license = gr.Textbox(
label="Metadata license",
placeholder="(optional) license for model metadata",
interactive=True,
value=self.config.get("metadata.license", ""),
)
self.metadata_tags = gr.Textbox(
label="Metadata tags",
placeholder="(optional) tags for model metadata, separated by comma",
interactive=True,
value=self.config.get("metadata.tags", ""),
) )
def run_cmd(run_cmd: list, **kwargs): def run_cmd(run_cmd: list, **kwargs):
if "dynamo_backend" in kwargs and kwargs.get("dynamo_backend"): if "metadata_title" in kwargs and kwargs.get("metadata_title") != "":
run_cmd.append("--dynamo_backend") run_cmd.append("--metadata_title")
run_cmd.append(kwargs["dynamo_backend"]) run_cmd.append(kwargs["metadata_title"])
if "dynamo_mode" in kwargs and kwargs.get("dynamo_mode"): if "metadata_author" in kwargs and kwargs.get("metadata_author") != "":
run_cmd.append("--dynamo_mode") run_cmd.append("--metadata_author")
run_cmd.append(kwargs["dynamo_mode"]) run_cmd.append(kwargs["metadata_author"])
if "dynamo_use_fullgraph" in kwargs and kwargs.get("dynamo_use_fullgraph"): if "metadata_description" in kwargs and kwargs.get("metadata_description") != "":
run_cmd.append("--dynamo_use_fullgraph") run_cmd.append("--metadata_description")
run_cmd.append(kwargs["metadata_description"])
if "dynamo_use_dynamic" in kwargs and kwargs.get("dynamo_use_dynamic"): if "metadata_license" in kwargs and kwargs.get("metadata_license") != "":
run_cmd.append("--dynamo_use_dynamic") run_cmd.append("--metadata_license")
run_cmd.append(kwargs["metadata_license"])
if "extra_accelerate_launch_args" in kwargs and kwargs["extra_accelerate_launch_args"] != "":
extra_accelerate_launch_args = kwargs["extra_accelerate_launch_args"].replace('"', "")
for arg in extra_accelerate_launch_args.split():
run_cmd.append(shlex.quote(arg))
if "gpu_ids" in kwargs and kwargs.get("gpu_ids") != "": if "metadata_tags" in kwargs and kwargs.get("metadata_tags") != "":
run_cmd.append("--gpu_ids") run_cmd.append("--metadata_tags")
run_cmd.append(shlex.quote(kwargs["gpu_ids"])) run_cmd.append(kwargs["metadata_tags"])
if "main_process_port" in kwargs and kwargs.get("main_process_port", 0) > 0:
run_cmd.append("--main_process_port")
run_cmd.append(str(int(kwargs["main_process_port"])))
if "mixed_precision" in kwargs and kwargs.get("mixed_precision"):
run_cmd.append("--mixed_precision")
run_cmd.append(shlex.quote(kwargs["mixed_precision"]))
if "multi_gpu" in kwargs and kwargs.get("multi_gpu"):
run_cmd.append("--multi_gpu")
if "num_processes" in kwargs and int(kwargs.get("num_processes", 0)) > 0:
run_cmd.append("--num_processes")
run_cmd.append(str(int(kwargs["num_processes"])))
if "num_machines" in kwargs and int(kwargs.get("num_machines", 0)) > 0:
run_cmd.append("--num_machines")
run_cmd.append(str(int(kwargs["num_machines"])))
if (
"num_cpu_threads_per_process" in kwargs
and int(kwargs.get("num_cpu_threads_per_process", 0)) > 0
):
run_cmd.append("--num_cpu_threads_per_process")
run_cmd.append(str(int(kwargs["num_cpu_threads_per_process"])))
return run_cmd return run_cmd

View File

@ -28,6 +28,7 @@ from .class_advanced_training import AdvancedTraining
from .class_folders import Folders from .class_folders import Folders
from .class_command_executor import CommandExecutor from .class_command_executor import CommandExecutor
from .class_huggingface import HuggingFace from .class_huggingface import HuggingFace
from .class_metadata import MetaData
from .dreambooth_folder_creation_gui import ( from .dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab, gradio_dreambooth_folder_creation_tab,
@ -172,6 +173,11 @@ def save_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -325,6 +331,11 @@ def open_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -473,6 +484,11 @@ def train_model(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -681,10 +697,10 @@ def train_model(
"ip_noise_gamma": ip_noise_gamma, "ip_noise_gamma": ip_noise_gamma,
"ip_noise_gamma_random_strength": ip_noise_gamma_random_strength, "ip_noise_gamma_random_strength": ip_noise_gamma_random_strength,
"keep_tokens": int(keep_tokens), "keep_tokens": int(keep_tokens),
"learning_rate": learning_rate, "learning_rate": learning_rate, # both for sd1.5 and sdxl
"learning_rate_te": learning_rate_te, "learning_rate_te": learning_rate_te if not sdxl else None, # only for sd1.5
"learning_rate_te1": learning_rate_te1, "learning_rate_te1": learning_rate_te1 if sdxl else None, # only for sdxl
"learning_rate_te2": learning_rate_te2, "learning_rate_te2": learning_rate_te2 if sdxl else None, # only for sdxl
"logging_dir": logging_dir, "logging_dir": logging_dir,
"log_tracker_name": log_tracker_name, "log_tracker_name": log_tracker_name,
"log_tracker_config": log_tracker_config, "log_tracker_config": log_tracker_config,
@ -703,6 +719,11 @@ def train_model(
"max_train_epochs": max_train_epochs, "max_train_epochs": max_train_epochs,
"max_train_steps": int(max_train_steps), "max_train_steps": int(max_train_steps),
"mem_eff_attn": mem_eff_attn, "mem_eff_attn": mem_eff_attn,
"metadata_author": metadata_author,
"metadata_description": metadata_description,
"metadata_license": metadata_license,
"metadata_tags": metadata_tags,
"metadata_title": metadata_title,
"min_bucket_reso": int(min_bucket_reso), "min_bucket_reso": int(min_bucket_reso),
"min_snr_gamma": min_snr_gamma, "min_snr_gamma": min_snr_gamma,
"min_timestep": int(min_timestep), "min_timestep": int(min_timestep),
@ -859,6 +880,9 @@ def dreambooth_tab(
with gr.Accordion("Folders", open=False), gr.Group(): with gr.Accordion("Folders", open=False), gr.Group():
folders = Folders(headless=headless, config=config) folders = Folders(headless=headless, config=config)
with gr.Accordion("Metadata", open=False), gr.Group():
metadata = MetaData(config=config)
with gr.Accordion("Dataset Preparation", open=False): with gr.Accordion("Dataset Preparation", open=False):
gr.Markdown( gr.Markdown(
"This section provide Dreambooth tools to help setup your dataset..." "This section provide Dreambooth tools to help setup your dataset..."
@ -1034,6 +1058,11 @@ def dreambooth_tab(
huggingface.save_state_to_huggingface, huggingface.save_state_to_huggingface,
huggingface.resume_from_huggingface, huggingface.resume_from_huggingface,
huggingface.async_upload, huggingface.async_upload,
metadata.metadata_author,
metadata.metadata_description,
metadata.metadata_license,
metadata.metadata_tags,
metadata.metadata_title,
] ]
configuration.button_open_config.click( configuration.button_open_config.click(

View File

@ -31,6 +31,7 @@ from .class_command_executor import CommandExecutor
from .class_tensorboard import TensorboardManager from .class_tensorboard import TensorboardManager
from .class_sample_images import SampleImages, create_prompt_file from .class_sample_images import SampleImages, create_prompt_file
from .class_huggingface import HuggingFace from .class_huggingface import HuggingFace
from .class_metadata import MetaData
from .custom_logging import setup_logging from .custom_logging import setup_logging
@ -180,6 +181,11 @@ def save_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -340,6 +346,11 @@ def open_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
training_preset, training_preset,
): ):
# Get list of function parameters and values # Get list of function parameters and values
@ -506,6 +517,11 @@ def train_model(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -742,10 +758,10 @@ def train_model(
"ip_noise_gamma": ip_noise_gamma, "ip_noise_gamma": ip_noise_gamma,
"ip_noise_gamma_random_strength": ip_noise_gamma_random_strength, "ip_noise_gamma_random_strength": ip_noise_gamma_random_strength,
"keep_tokens": int(keep_tokens), "keep_tokens": int(keep_tokens),
"learning_rate": learning_rate, "learning_rate": learning_rate, # both for sd1.5 and sdxl
"learning_rate_te": learning_rate_te, "learning_rate_te": learning_rate_te if not sdxl_checkbox else None, # only for sd1.5
"learning_rate_te1": learning_rate_te1, "learning_rate_te1": learning_rate_te1 if sdxl_checkbox else None, # only for sdxl
"learning_rate_te2": learning_rate_te2, "learning_rate_te2": learning_rate_te2 if sdxl_checkbox else None, # only for sdxl
"logging_dir": logging_dir, "logging_dir": logging_dir,
"log_tracker_name": log_tracker_name, "log_tracker_name": log_tracker_name,
"log_tracker_config": log_tracker_config, "log_tracker_config": log_tracker_config,
@ -760,6 +776,11 @@ def train_model(
"max_train_epochs": max_train_epochs, "max_train_epochs": max_train_epochs,
"max_train_steps": int(max_train_steps), "max_train_steps": int(max_train_steps),
"mem_eff_attn": mem_eff_attn, "mem_eff_attn": mem_eff_attn,
"metadata_author": metadata_author,
"metadata_description": metadata_description,
"metadata_license": metadata_license,
"metadata_tags": metadata_tags,
"metadata_title": metadata_title,
"min_bucket_reso": int(min_bucket_reso), "min_bucket_reso": int(min_bucket_reso),
"min_snr_gamma": min_snr_gamma, "min_snr_gamma": min_snr_gamma,
"min_timestep": int(min_timestep), "min_timestep": int(min_timestep),
@ -907,6 +928,9 @@ def finetune_tab(headless=False, config: dict = {}):
logging_dir = folders.logging_dir logging_dir = folders.logging_dir
train_dir = folders.reg_data_dir train_dir = folders.reg_data_dir
with gr.Accordion("Metadata", open=False), gr.Group():
metadata = MetaData(config=config)
with gr.Accordion("Dataset Preparation", open=False): with gr.Accordion("Dataset Preparation", open=False):
with gr.Row(): with gr.Row():
max_resolution = gr.Textbox( max_resolution = gr.Textbox(
@ -1162,6 +1186,11 @@ def finetune_tab(headless=False, config: dict = {}):
huggingface.save_state_to_huggingface, huggingface.save_state_to_huggingface,
huggingface.resume_from_huggingface, huggingface.resume_from_huggingface,
huggingface.async_upload, huggingface.async_upload,
metadata.metadata_author,
metadata.metadata_description,
metadata.metadata_license,
metadata.metadata_tags,
metadata.metadata_title,
] ]
configuration.button_open_config.click( configuration.button_open_config.click(

View File

@ -33,6 +33,7 @@ from .class_tensorboard import TensorboardManager
from .class_sample_images import SampleImages, create_prompt_file from .class_sample_images import SampleImages, create_prompt_file
from .class_lora_tab import LoRATools from .class_lora_tab import LoRATools
from .class_huggingface import HuggingFace from .class_huggingface import HuggingFace
from .class_metadata import MetaData
from .dreambooth_folder_creation_gui import ( from .dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab, gradio_dreambooth_folder_creation_tab,
@ -42,7 +43,7 @@ from .dataset_balancing_gui import gradio_dataset_balancing_tab
from .custom_logging import setup_logging from .custom_logging import setup_logging
# Set up logging # Set up logging
log = setup_logging(debug=True) log = setup_logging()
# Setup command executor # Setup command executor
executor = CommandExecutor() executor = CommandExecutor()
@ -245,6 +246,11 @@ def save_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -445,6 +451,11 @@ def open_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
training_preset, training_preset,
): ):
# Get list of function parameters and values # Get list of function parameters and values
@ -675,6 +686,11 @@ def train_model(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -1048,6 +1064,11 @@ def train_model(
"max_train_epochs": max_train_epochs, "max_train_epochs": max_train_epochs,
"max_train_steps": int(max_train_steps), "max_train_steps": int(max_train_steps),
"mem_eff_attn": mem_eff_attn, "mem_eff_attn": mem_eff_attn,
"metadata_author": metadata_author,
"metadata_description": metadata_description,
"metadata_license": metadata_license,
"metadata_tags": metadata_tags,
"metadata_title": metadata_title,
"min_bucket_reso": int(min_bucket_reso), "min_bucket_reso": int(min_bucket_reso),
"min_snr_gamma": min_snr_gamma, "min_snr_gamma": min_snr_gamma,
"min_timestep": int(min_timestep), "min_timestep": int(min_timestep),
@ -1215,6 +1236,9 @@ def lora_tab(
config=config, config=config,
) )
with gr.Accordion("Metadata", open=False), gr.Group():
metadata = MetaData(config=config)
with gr.Accordion("Folders", open=False), gr.Group(): with gr.Accordion("Folders", open=False), gr.Group():
folders = Folders(headless=headless, config=config) folders = Folders(headless=headless, config=config)
@ -2198,6 +2222,11 @@ def lora_tab(
huggingface.save_state_to_huggingface, huggingface.save_state_to_huggingface,
huggingface.resume_from_huggingface, huggingface.resume_from_huggingface,
huggingface.async_upload, huggingface.async_upload,
metadata.metadata_author,
metadata.metadata_description,
metadata.metadata_license,
metadata.metadata_tags,
metadata.metadata_title,
] ]
configuration.button_open_config.click( configuration.button_open_config.click(

View File

@ -30,6 +30,7 @@ from .class_folders import Folders
from .class_sdxl_parameters import SDXLParameters from .class_sdxl_parameters import SDXLParameters
from .class_command_executor import CommandExecutor from .class_command_executor import CommandExecutor
from .class_huggingface import HuggingFace from .class_huggingface import HuggingFace
from .class_metadata import MetaData
from .class_tensorboard import TensorboardManager from .class_tensorboard import TensorboardManager
from .dreambooth_folder_creation_gui import ( from .dreambooth_folder_creation_gui import (
gradio_dreambooth_folder_creation_tab, gradio_dreambooth_folder_creation_tab,
@ -170,6 +171,11 @@ def save_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -324,6 +330,11 @@ def open_configuration(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -471,6 +482,11 @@ def train_model(
save_state_to_huggingface, save_state_to_huggingface,
resume_from_huggingface, resume_from_huggingface,
async_upload, async_upload,
metadata_author,
metadata_description,
metadata_license,
metadata_tags,
metadata_title,
): ):
# Get list of function parameters and values # Get list of function parameters and values
parameters = list(locals().items()) parameters = list(locals().items())
@ -670,6 +686,11 @@ def train_model(
"max_train_epochs": max_train_epochs, "max_train_epochs": max_train_epochs,
"max_train_steps": int(max_train_steps), "max_train_steps": int(max_train_steps),
"mem_eff_attn": mem_eff_attn, "mem_eff_attn": mem_eff_attn,
"metadata_author": metadata_author,
"metadata_description": metadata_description,
"metadata_license": metadata_license,
"metadata_tags": metadata_tags,
"metadata_title": metadata_title,
"min_bucket_reso": int(min_bucket_reso), "min_bucket_reso": int(min_bucket_reso),
"min_snr_gamma": min_snr_gamma, "min_snr_gamma": min_snr_gamma,
"min_timestep": int(min_timestep), "min_timestep": int(min_timestep),
@ -833,6 +854,9 @@ def ti_tab(headless=False, default_output_dir=None, config: dict = {}):
with gr.Accordion("Folders", open=False), gr.Group(): with gr.Accordion("Folders", open=False), gr.Group():
folders = Folders(headless=headless, config=config) folders = Folders(headless=headless, config=config)
with gr.Accordion("Metadata", open=False), gr.Group():
metadata = MetaData(config=config)
with gr.Accordion("Dataset Preparation", open=False): with gr.Accordion("Dataset Preparation", open=False):
gr.Markdown( gr.Markdown(
"This section provide Dreambooth tools to help setup your dataset..." "This section provide Dreambooth tools to help setup your dataset..."
@ -1090,6 +1114,11 @@ def ti_tab(headless=False, default_output_dir=None, config: dict = {}):
huggingface.save_state_to_huggingface, huggingface.save_state_to_huggingface,
huggingface.resume_from_huggingface, huggingface.resume_from_huggingface,
huggingface.async_upload, huggingface.async_upload,
metadata.metadata_author,
metadata.metadata_description,
metadata.metadata_license,
metadata.metadata_tags,
metadata.metadata_title,
] ]
configuration.button_open_config.click( configuration.button_open_config.click(

View File

@ -1,63 +1,112 @@
{ {
"adaptive_noise_scale": 0, "adaptive_noise_scale": 0,
"additional_parameters": "", "additional_parameters": "",
"async_upload": false,
"bucket_no_upscale": true, "bucket_no_upscale": true,
"bucket_reso_steps": 1, "bucket_reso_steps": 1,
"cache_latents": true, "cache_latents": true,
"cache_latents_to_disk": false, "cache_latents_to_disk": false,
"caption_dropout_every_n_epochs": 0.0, "caption_dropout_every_n_epochs": 0,
"caption_dropout_rate": 0, "caption_dropout_rate": 0,
"caption_extension": "", "caption_extension": ".txt",
"clip_skip": 2, "clip_skip": 2,
"color_aug": false, "color_aug": false,
"dataset_config": "",
"debiased_estimation_loss": false,
"dynamo_backend": "no",
"dynamo_mode": "default",
"dynamo_use_dynamic": false,
"dynamo_use_fullgraph": false,
"enable_bucket": true, "enable_bucket": true,
"epoch": 1, "epoch": 2,
"extra_accelerate_launch_args": "",
"flip_aug": false, "flip_aug": false,
"full_bf16": false,
"full_fp16": false, "full_fp16": false,
"gradient_accumulation_steps": 1.0, "gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": false, "gradient_checkpointing": false,
"keep_tokens": "0", "huber_c": 0.1,
"learning_rate": 1.0, "huber_schedule": "snr",
"huggingface_path_in_repo": "",
"huggingface_repo_id": "",
"huggingface_repo_type": "",
"huggingface_repo_visibility": "",
"huggingface_token": "",
"ip_noise_gamma": 0,
"ip_noise_gamma_random_strength": false,
"keep_tokens": 0,
"learning_rate": 1,
"learning_rate_te": 1e-05,
"learning_rate_te1": 1e-05,
"learning_rate_te2": 1e-05,
"log_tracker_config": "",
"log_tracker_name": "",
"logging_dir": "./test/logs", "logging_dir": "./test/logs",
"loss_type": "l2",
"lr_scheduler": "cosine", "lr_scheduler": "cosine",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": 1,
"lr_scheduler_power": 1,
"lr_warmup": 0, "lr_warmup": 0,
"max_data_loader_n_workers": "0", "main_process_port": 0,
"masked_loss": false,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": 0,
"max_resolution": "512,512", "max_resolution": "512,512",
"max_token_length": "75", "max_timestep": 1000,
"max_train_epochs": "", "max_token_length": 75,
"max_train_epochs": 0,
"max_train_steps": 0,
"mem_eff_attn": false, "mem_eff_attn": false,
"metadata_author": "",
"metadata_description": "",
"metadata_license": "",
"metadata_tags": "",
"metadata_title": "",
"min_bucket_reso": 256,
"min_snr_gamma": 0, "min_snr_gamma": 0,
"min_timestep": 0,
"mixed_precision": "bf16", "mixed_precision": "bf16",
"model_list": "runwayml/stable-diffusion-v1-5", "model_list": "runwayml/stable-diffusion-v1-5",
"multi_gpu": false,
"multires_noise_discount": 0.2, "multires_noise_discount": 0.2,
"multires_noise_iterations": 8, "multires_noise_iterations": 8,
"no_token_padding": false, "no_token_padding": false,
"noise_offset": "0.05", "noise_offset": 0.05,
"noise_offset_random_strength": false,
"noise_offset_type": "Multires", "noise_offset_type": "Multires",
"num_cpu_threads_per_process": 2, "num_cpu_threads_per_process": 2,
"num_machines": 1,
"num_processes": 1,
"optimizer": "DAdaptAdam", "optimizer": "DAdaptAdam",
"optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True", "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True",
"output_dir": "./test/output", "output_dir": "./test/output",
"output_name": "db-DAdaptAdam", "output_name": "db-DAdaptAdam",
"persistent_data_loader_workers": false, "persistent_data_loader_workers": false,
"pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
"prior_loss_weight": 1.0, "prior_loss_weight": 1,
"random_crop": false, "random_crop": false,
"reg_data_dir": "", "reg_data_dir": "",
"resume": "", "resume": "",
"resume_from_huggingface": "",
"sample_every_n_epochs": 0, "sample_every_n_epochs": 0,
"sample_every_n_steps": 25, "sample_every_n_steps": 25,
"sample_prompts": "a painting of a gas mask , by darius kawasaki", "sample_prompts": "a painting of a gas mask , by darius kawasaki",
"sample_sampler": "euler_a", "sample_sampler": "euler_a",
"save_every_n_epochs": 1, "save_as_bool": false,
"save_every_n_epochs": 0,
"save_every_n_steps": 0, "save_every_n_steps": 0,
"save_last_n_steps": 0, "save_last_n_steps": 0,
"save_last_n_steps_state": 0, "save_last_n_steps_state": 0,
"save_model_as": "safetensors", "save_model_as": "safetensors",
"save_precision": "fp16", "save_precision": "fp16",
"save_state": false, "save_state": false,
"save_state_on_train_end": false,
"save_state_to_huggingface": false,
"scale_v_pred_loss_like_noise_pred": false, "scale_v_pred_loss_like_noise_pred": false,
"seed": "1234", "sdxl": false,
"seed": 1234,
"shuffle_caption": false, "shuffle_caption": false,
"stop_text_encoder_training": 0, "stop_text_encoder_training": 0,
"train_batch_size": 1, "train_batch_size": 1,
@ -65,9 +114,11 @@
"use_wandb": false, "use_wandb": false,
"v2": false, "v2": false,
"v_parameterization": false, "v_parameterization": false,
"v_pred_like_loss": 0,
"vae": "", "vae": "",
"vae_batch_size": 0, "vae_batch_size": 0,
"wandb_api_key": "", "wandb_api_key": "",
"wandb_run_name": "",
"weighted_captions": false, "weighted_captions": false,
"xformers": true "xformers": "xformers"
} }