2382 some incorrect usage related to the recent shell=false issue (#2417)

* Convert script back to no shell.

* Update rest of tools and update dreambooth to no shell

* Update rest of trainers to not use shell

* Allow the use of custom caption extension
pull/2420/head
bmaltais 2024-04-29 07:44:55 -04:00 committed by GitHub
parent fbd0d1a0cf
commit 7bbc99d91b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 291 additions and 321 deletions

View File

@ -78,13 +78,11 @@ def UI(**kwargs):
reg_data_dir_input=reg_data_dir_input,
output_dir_input=output_dir_input,
logging_dir_input=logging_dir_input,
enable_copy_info_button=True,
headless=headless,
config=config,
use_shell_flag=use_shell_flag,
)
with gr.Tab("LoRA"):
_ = LoRATools(headless=headless, use_shell_flag=use_shell_flag)
_ = LoRATools(headless=headless)
with gr.Tab("About"):
gr.Markdown(f"kohya_ss GUI release {release}")
with gr.Tab("README"):

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
from .common_gui import (
get_folder_path,
@ -28,7 +27,6 @@ def caption_images(
postfix: str,
find_text: str,
replace_text: str,
use_shell: bool = False,
):
"""
Captions images in a given directory with a given caption text.
@ -48,14 +46,14 @@ def caption_images(
"""
# Check if images_dir is provided
if not images_dir:
msgbox(
log.info(
"Image folder is missing. Please provide the directory containing the images to caption."
)
return
# Check if caption_ext is provided
if not caption_ext:
msgbox("Please provide an extension for the caption files.")
log.info("Please provide an extension for the caption files.")
return
# Log the captioning process
@ -63,32 +61,35 @@ def caption_images(
log.info(f"Captioning files in {images_dir} with {caption_text}...")
# Build the command to run caption.py
run_cmd = rf'"{PYTHON}" "{scriptdir}/tools/caption.py"'
run_cmd += f' --caption_text="{caption_text}"'
run_cmd = [
rf"{PYTHON}",
rf"{scriptdir}/tools/caption.py",
"--caption_text",
caption_text,
]
# Add optional flags to the command
if overwrite:
run_cmd += f" --overwrite"
run_cmd.append("--overwrite")
if caption_ext:
run_cmd += f' --caption_file_ext="{caption_ext}"'
run_cmd.append("--caption_file_ext")
run_cmd.append(caption_ext)
run_cmd += f' "{images_dir}"'
run_cmd.append(rf"{images_dir}")
# Log the command
log.info(run_cmd)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run}")
# Set the environment variable for the Python path
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
log.info(f"Executing command: {run_cmd} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env, shell=use_shell)
subprocess.run(run_cmd, env=env, shell=False)
# Check if overwrite option is enabled
if overwrite:
@ -111,7 +112,7 @@ def caption_images(
else:
# Show a message if modification is not possible without overwrite option enabled
if prefix or postfix:
msgbox(
log.info(
'Could not modify caption files with requested change because the "Overwrite existing captions in folder" option is not selected.'
)
@ -120,7 +121,7 @@ def caption_images(
# Gradio UI
def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None, use_shell: bool = False):
def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None):
"""
Creates a Gradio tab for basic image captioning.
@ -200,6 +201,7 @@ def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None, use_sh
choices=[".cap", ".caption", ".txt"],
value=".txt",
interactive=True,
allow_custom_value=True,
)
# Checkbox to overwrite existing captions
overwrite = gr.Checkbox(
@ -258,7 +260,6 @@ def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None, use_sh
postfix,
find_text,
replace_text,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -23,7 +22,6 @@ def caption_images(
beam_search: bool,
prefix: str = "",
postfix: str = "",
use_shell: bool = False,
) -> None:
"""
Automatically generates captions for images in the specified directory using the BLIP model.
@ -47,59 +45,60 @@ def caption_images(
"""
# Check if the image folder is provided
if not train_data_dir:
msgbox("Image folder is missing...")
log.info("Image folder is missing...")
return
# Check if the caption file extension is provided
if not caption_file_ext:
msgbox("Please provide an extension for the caption files.")
log.info("Please provide an extension for the caption files.")
return
log.info(f"Captioning files in {train_data_dir}...")
# Construct the command to run make_captions.py
run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/finetune/make_captions.py"']
run_cmd = [rf"{PYTHON}", rf"{scriptdir}/sd-scripts/finetune/make_captions.py"]
# Add required arguments
run_cmd.append('--batch_size')
run_cmd.append("--batch_size")
run_cmd.append(str(batch_size))
run_cmd.append('--num_beams')
run_cmd.append("--num_beams")
run_cmd.append(str(num_beams))
run_cmd.append('--top_p')
run_cmd.append("--top_p")
run_cmd.append(str(top_p))
run_cmd.append('--max_length')
run_cmd.append("--max_length")
run_cmd.append(str(max_length))
run_cmd.append('--min_length')
run_cmd.append("--min_length")
run_cmd.append(str(min_length))
# Add optional flags to the command
if beam_search:
run_cmd.append("--beam_search")
if caption_file_ext:
run_cmd.append('--caption_extension')
run_cmd.append("--caption_extension")
run_cmd.append(caption_file_ext)
# Add the directory containing the training data
run_cmd.append(fr'"{train_data_dir}"')
run_cmd.append(rf"{train_data_dir}")
# Add URL for caption model weights
run_cmd.append('--caption_weights')
run_cmd.append("https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth")
run_cmd.append("--caption_weights")
run_cmd.append(
rf"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth"
)
# Set up the environment
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell, cwd=f"{scriptdir}/sd-scripts")
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env, shell=False, cwd=rf"{scriptdir}/sd-scripts")
# Add prefix and postfix
add_pre_postfix(
@ -117,7 +116,7 @@ def caption_images(
###
def gradio_blip_caption_gui_tab(headless=False, default_train_dir=None, use_shell: bool = False):
def gradio_blip_caption_gui_tab(headless=False, default_train_dir=None):
from .common_gui import create_refresh_button
default_train_dir = (
@ -167,6 +166,7 @@ def gradio_blip_caption_gui_tab(headless=False, default_train_dir=None, use_shel
choices=[".cap", ".caption", ".txt"],
value=".txt",
interactive=True,
allow_custom_value=True,
)
prefix = gr.Textbox(
@ -207,7 +207,6 @@ def gradio_blip_caption_gui_tab(headless=False, default_train_dir=None, use_shel
beam_search,
prefix,
postfix,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -28,7 +28,7 @@ class CommandExecutor:
"Stop training", visible=self.process is not None or headless, variant="stop"
)
def execute_command(self, run_cmd: str, use_shell: bool = False, **kwargs):
def execute_command(self, run_cmd: str, **kwargs):
"""
Execute a command if no other command is currently running.
@ -44,10 +44,10 @@ class CommandExecutor:
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
log.info(f"Executing command: {command_to_run}")
# Execute the command securely
self.process = subprocess.Popen(command_to_run, **kwargs, shell=use_shell)
self.process = subprocess.Popen(run_cmd, **kwargs)
log.info("Command executed.")
def kill_command(self):

View File

@ -14,15 +14,14 @@ class LoRATools:
def __init__(
self,
headless: bool = False,
use_shell_flag: bool = False,
):
gr.Markdown("This section provide various LoRA tools...")
gradio_extract_dylora_tab(headless=headless, use_shell=use_shell_flag)
gradio_convert_lcm_tab(headless=headless, use_shell=use_shell_flag)
gradio_extract_lora_tab(headless=headless, use_shell=use_shell_flag)
gradio_extract_lycoris_locon_tab(headless=headless, use_shell=use_shell_flag)
gradio_merge_lora_tab = GradioMergeLoRaTab(use_shell=use_shell_flag)
gradio_merge_lycoris_tab(headless=headless, use_shell=use_shell_flag)
gradio_svd_merge_lora_tab(headless=headless, use_shell=use_shell_flag)
gradio_extract_dylora_tab(headless=headless)
gradio_convert_lcm_tab(headless=headless)
gradio_extract_lora_tab(headless=headless)
gradio_extract_lycoris_locon_tab(headless=headless)
gradio_merge_lora_tab = GradioMergeLoRaTab()
gradio_merge_lycoris_tab(headless=headless)
gradio_svd_merge_lora_tab(headless=headless)
gradio_resize_lora_tab(headless=headless)
gradio_verify_lora_tab(headless=headless)

View File

@ -752,11 +752,6 @@ def add_pre_postfix(
postfix (str, optional): Postfix to add to the content of the caption files.
caption_file_ext (str, optional): Extension of the caption files.
"""
# Enforce that the provided extension is one of .caption, .cap, .txt
if caption_file_ext not in (".caption", ".cap", ".txt"):
log.error("Invalid caption file extension. Must be on of .caption, .cap, .txt")
return
# If neither prefix nor postfix is provided, return early
if prefix == "" and postfix == "":
return

View File

@ -27,10 +27,7 @@ def convert_lcm(
model_path,
lora_scale,
model_type,
use_shell: bool = False,
):
run_cmd = rf'"{PYTHON}" "{scriptdir}/tools/lcm_convert.py"'
# Check if source model exist
if not os.path.isfile(model_path):
log.error("The provided DyLoRA model is not a file")
@ -48,35 +45,41 @@ def convert_lcm(
save_to = f"{path}_lcm{ext}"
# Construct the command to run the script
run_cmd += f" --lora-scale {lora_scale}"
run_cmd += f' --model "{model_path}"'
run_cmd += f' --name "{name}"'
run_cmd = [
rf"{PYTHON}",
rf"{scriptdir}/tools/lcm_convert.py",
"--lora-scale",
str(lora_scale),
"--model",
rf"{model_path}",
"--name",
str(name),
]
if model_type == "SDXL":
run_cmd += f" --sdxl"
run_cmd.append("--sdxl")
if model_type == "SSD-1B":
run_cmd += f" --ssd-1b"
run_cmd.append("--ssd-1b")
# Set up the environment
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
log.info(f"Executing command: {run_cmd} with shell={use_shell}")
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(
run_cmd, env=env, shell=use_shell
)
subprocess.run(run_cmd, env=env, shell=False)
# Return a success message
log.info("Done extracting...")
def gradio_convert_lcm_tab(headless=False, use_shell: bool = False):
def gradio_convert_lcm_tab(headless=False):
current_model_dir = os.path.join(scriptdir, "outputs")
current_save_dir = os.path.join(scriptdir, "outputs")
@ -186,7 +189,6 @@ def gradio_convert_lcm_tab(headless=False, use_shell: bool = False):
model_path,
lora_scale,
model_type,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -26,11 +25,10 @@ def convert_model(
target_model_type,
target_save_precision_type,
unet_use_linear_projection,
use_shell: bool = False,
):
# Check for caption_text_input
if source_model_type == "":
msgbox("Invalid source model type")
log.info("Invalid source model type")
return
# Check if source model exist
@ -39,19 +37,19 @@ def convert_model(
elif os.path.isdir(source_model_input):
log.info("The provided model is a folder")
else:
msgbox("The provided source model is neither a file nor a folder")
log.info("The provided source model is neither a file nor a folder")
return
# Check if source model exist
if os.path.isdir(target_model_folder_input):
log.info("The provided model folder exist")
else:
msgbox("The provided target folder does not exist")
log.info("The provided target folder does not exist")
return
run_cmd = [
fr'"{PYTHON}"',
fr'"{scriptdir}/sd-scripts/tools/convert_diffusers20_original_sd.py"',
rf"{PYTHON}",
rf"{scriptdir}/sd-scripts/tools/convert_diffusers20_original_sd.py",
]
v1_models = [
@ -82,7 +80,7 @@ def convert_model(
run_cmd.append("--unet_use_linear_projection")
# Add the source model input path
run_cmd.append(fr'"{source_model_input}"')
run_cmd.append(rf"{source_model_input}")
# Determine the target model path
if target_model_type == "diffuser" or target_model_type == "diffuser_safetensors":
@ -96,23 +94,20 @@ def convert_model(
)
# Add the target model path
run_cmd.append(fr'"{target_model_path}"')
run_cmd.append(rf"{target_model_path}")
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(
command_to_run, env=env, shell=use_shell
)
# Run the command
subprocess.run(run_cmd, env=env, shell=False)
###
@ -120,7 +115,7 @@ def convert_model(
###
def gradio_convert_model_tab(headless=False, use_shell: bool = False):
def gradio_convert_model_tab(headless=False):
from .common_gui import create_refresh_button
default_source_model = os.path.join(scriptdir, "outputs")
@ -280,7 +275,6 @@ def gradio_convert_model_tab(headless=False, use_shell: bool = False):
target_model_type,
target_save_precision_type,
unet_use_linear_projection,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -642,7 +642,7 @@ def train_model(
log.info(max_train_steps_info)
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [rf'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = [rf'{get_executable_path("accelerate")}', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -661,9 +661,9 @@ def train_model(
)
if sdxl:
run_cmd.append(rf'"{scriptdir}/sd-scripts/sdxl_train.py"')
run_cmd.append(rf'{scriptdir}/sd-scripts/sdxl_train.py')
else:
run_cmd.append(rf'"{scriptdir}/sd-scripts/train_db.py"')
run_cmd.append(rf"{scriptdir}/sd-scripts/train_db.py")
if max_data_loader_n_workers == "" or None:
max_data_loader_n_workers = 0
@ -835,7 +835,7 @@ def train_model(
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(rf'"{tmpfilename}"')
run_cmd.append(rf'{tmpfilename}')
# Initialize a dictionary with always-included keyword arguments
kwargs_for_training = {
@ -866,13 +866,13 @@ def train_model(
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env)
executor.execute_command(run_cmd=run_cmd, env=env)
train_state_value = time.time()

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -27,16 +26,15 @@ def extract_dylora(
model,
save_to,
unit,
use_shell: bool = False,
):
# Check for caption_text_input
if model == "":
msgbox("Invalid DyLoRA model file")
log.info("Invalid DyLoRA model file")
return
# Check if source model exist
if not os.path.isfile(model):
msgbox("The provided DyLoRA model is not a file")
log.info("The provided DyLoRA model is not a file")
return
if os.path.dirname(save_to) == "":
@ -51,29 +49,29 @@ def extract_dylora(
save_to = f"{path}_tmp{ext}"
run_cmd = [
fr'"{PYTHON}"',
rf'"{scriptdir}/sd-scripts/networks/extract_lora_from_dylora.py"',
rf"{PYTHON}",
rf"{scriptdir}/sd-scripts/networks/extract_lora_from_dylora.py",
"--save_to",
rf'"{save_to}"',
rf"{save_to}",
"--model",
rf'"{model}"',
rf"{model}",
"--unit",
str(unit),
]
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Example environment variable adjustment for the Python environment
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
subprocess.run(run_cmd, env=env, shell=False)
log.info("Done extracting DyLoRA...")
@ -83,7 +81,7 @@ def extract_dylora(
###
def gradio_extract_dylora_tab(headless=False, use_shell: bool = False):
def gradio_extract_dylora_tab(headless=False):
current_model_dir = os.path.join(scriptdir, "outputs")
current_save_dir = os.path.join(scriptdir, "outputs")
@ -172,7 +170,6 @@ def gradio_extract_dylora_tab(headless=False, use_shell: bool = False):
model,
save_to,
unit,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -39,7 +39,6 @@ def extract_lora(
load_original_model_to,
load_tuned_model_to,
load_precision,
use_shell: bool = False,
):
# Check for caption_text_input
if model_tuned == "":
@ -74,18 +73,18 @@ def extract_lora(
return
run_cmd = [
fr'"{PYTHON}"',
fr'"{scriptdir}/sd-scripts/networks/extract_lora_from_models.py"',
rf"{PYTHON}",
rf"{scriptdir}/sd-scripts/networks/extract_lora_from_models.py",
"--load_precision",
load_precision,
"--save_precision",
save_precision,
"--save_to",
fr'"{save_to}"',
rf"{save_to}",
"--model_org",
fr'"{model_org}"',
rf"{model_org}",
"--model_tuned",
fr'"{model_tuned}"',
rf"{model_tuned}",
"--dim",
str(dim),
"--device",
@ -112,18 +111,17 @@ def extract_lora(
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Adding an example of another potentially relevant environment variable
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env)
###
@ -131,7 +129,9 @@ def extract_lora(
###
def gradio_extract_lora_tab(headless=False, use_shell: bool = False):
def gradio_extract_lora_tab(
headless=False,
):
current_model_dir = os.path.join(scriptdir, "outputs")
current_model_org_dir = os.path.join(scriptdir, "outputs")
current_save_dir = os.path.join(scriptdir, "outputs")
@ -361,7 +361,6 @@ def gradio_extract_lora_tab(headless=False, use_shell: bool = False):
load_original_model_to,
load_tuned_model_to,
load_precision,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -43,24 +42,23 @@ def extract_lycoris_locon(
use_sparse_bias,
sparsity,
disable_cp,
use_shell: bool = False,
):
# Check for caption_text_input
if db_model == "":
msgbox("Invalid finetuned model file")
log.info("Invalid finetuned model file")
return
if base_model == "":
msgbox("Invalid base model file")
log.info("Invalid base model file")
return
# Check if source model exist
if not os.path.isfile(db_model):
msgbox("The provided finetuned model is not a file")
log.info("The provided finetuned model is not a file")
return
if not os.path.isfile(base_model):
msgbox("The provided base model is not a file")
log.info("The provided base model is not a file")
return
if os.path.dirname(output_name) == "":
@ -74,7 +72,7 @@ def extract_lycoris_locon(
path, ext = os.path.splitext(output_name)
output_name = f"{path}_tmp{ext}"
run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/tools/lycoris_locon_extract.py"']
run_cmd = [fr'{PYTHON}', fr'{scriptdir}/tools/lycoris_locon_extract.py']
if is_sdxl:
run_cmd.append("--is_sdxl")
@ -121,23 +119,23 @@ def extract_lycoris_locon(
run_cmd.append("--disable_cp")
# Add paths
run_cmd.append(fr'"{base_model}"')
run_cmd.append(fr'"{db_model}"')
run_cmd.append(fr'"{output_name}"')
run_cmd.append(fr"{base_model}")
run_cmd.append(fr"{db_model}")
run_cmd.append(fr"{output_name}")
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
subprocess.run(run_cmd, env=env)
log.info("Done extracting...")
@ -174,7 +172,7 @@ def update_mode(mode):
return tuple(updates)
def gradio_extract_lycoris_locon_tab(headless=False, use_shell: bool = False):
def gradio_extract_lycoris_locon_tab(headless=False):
current_model_dir = os.path.join(scriptdir, "outputs")
current_base_model_dir = os.path.join(scriptdir, "outputs")
@ -452,7 +450,6 @@ def gradio_extract_lycoris_locon_tab(headless=False, use_shell: bool = False):
use_sparse_bias,
sparsity,
disable_cp,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -19,7 +19,7 @@ from .common_gui import (
scriptdir,
update_my_data,
validate_paths,
validate_args_setting
validate_args_setting,
)
from .class_accelerate_launch import AccelerateLaunch
from .class_configuration_file import ConfigurationFile
@ -530,13 +530,13 @@ def train_model(
# Get list of function parameters and values
parameters = list(locals().items())
global train_state_value
TRAIN_BUTTON_VISIBLE = [
gr.Button(visible=True),
gr.Button(visible=False or headless),
gr.Textbox(value=train_state_value),
]
if executor.is_running():
log.error("Training is already running. Can't start another training session.")
return TRAIN_BUTTON_VISIBLE
@ -548,7 +548,7 @@ def train_model(
log.info(f"Validating lr scheduler arguments...")
if not validate_args_setting(lr_scheduler_args):
return
log.info(f"Validating optimizer arguments...")
if not validate_args_setting(optimizer_args):
return
@ -712,7 +712,7 @@ def train_model(
lr_warmup_steps = 0
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [get_executable_path("accelerate"), "launch"]
run_cmd = [rf'{get_executable_path("accelerate")}', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -812,7 +812,9 @@ def train_model(
"max_bucket_reso": int(max_bucket_reso),
"max_timestep": max_timestep if max_timestep != 0 else None,
"max_token_length": int(max_token_length),
"max_train_epochs": int(max_train_epochs) if int(max_train_epochs) != 0 else None,
"max_train_epochs": (
int(max_train_epochs) if int(max_train_epochs) != 0 else None
),
"max_train_steps": int(max_train_steps) if int(max_train_steps) != 0 else None,
"mem_eff_attn": mem_eff_attn,
"metadata_author": metadata_author,
@ -888,9 +890,9 @@ def train_model(
for key, value in config_toml_data.items()
if value not in ["", False, None]
}
config_toml_data["max_data_loader_n_workers"] = int(max_data_loader_n_workers)
# Sort the dictionary by keys
config_toml_data = dict(sorted(config_toml_data.items()))
@ -904,7 +906,7 @@ def train_model(
if not os.path.exists(toml_file.name):
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append("--config_file")
run_cmd.append(rf"{tmpfilename}")
# Initialize a dictionary with always-included keyword arguments
@ -941,7 +943,7 @@ def train_model(
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env)
executor.execute_command(run_cmd=run_cmd, env=env)
train_state_value = time.time()
@ -1285,7 +1287,7 @@ def finetune_tab(
)
run_state = gr.Textbox(value=train_state_value, visible=False)
run_state.change(
fn=executor.wait_for_training_to_end,
outputs=[executor.button_run, executor.button_stop_training],
@ -1299,7 +1301,8 @@ def finetune_tab(
)
executor.button_stop_training.click(
executor.kill_command, outputs=[executor.button_run, executor.button_stop_training]
executor.kill_command,
outputs=[executor.button_run, executor.button_stop_training],
)
button_print.click(

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -22,25 +21,24 @@ def caption_images(
model_id,
prefix,
postfix,
use_shell: bool = False,
):
# Check for images_dir_input
if train_data_dir == "":
msgbox("Image folder is missing...")
log.info("Image folder is missing...")
return
if caption_ext == "":
msgbox("Please provide an extension for the caption files.")
log.info("Please provide an extension for the caption files.")
return
log.info(f"GIT captioning files in {train_data_dir}...")
run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/finetune/make_captions_by_git.py"']
run_cmd = [fr"{PYTHON}", fr"{scriptdir}/sd-scripts/finetune/make_captions_by_git.py"]
# Add --model_id if provided
if model_id != "":
run_cmd.append("--model_id")
run_cmd.append(model_id)
run_cmd.append(fr'{model_id}')
# Add other arguments with their values
run_cmd.append("--batch_size")
@ -58,21 +56,21 @@ def caption_images(
run_cmd.append(caption_ext)
# Add the directory containing the training data
run_cmd.append(fr'"{train_data_dir}"')
run_cmd.append(fr"{train_data_dir}")
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
subprocess.run(run_cmd, env=env)
# Add prefix and postfix
@ -92,7 +90,7 @@ def caption_images(
def gradio_git_caption_gui_tab(
headless=False, default_train_dir=None, use_shell: bool = False
headless=False, default_train_dir=None,
):
from .common_gui import create_refresh_button
@ -143,6 +141,7 @@ def gradio_git_caption_gui_tab(
choices=[".cap", ".caption", ".txt"],
value=".txt",
interactive=True,
allow_custom_value=True,
)
prefix = gr.Textbox(
@ -183,7 +182,6 @@ def gradio_git_caption_gui_tab(
model_id,
prefix,
postfix,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
from .common_gui import get_folder_path, scriptdir, list_dirs
import os
@ -21,23 +20,22 @@ def group_images(
do_not_copy_other_files,
generate_captions,
caption_ext,
use_shell: bool = False,
):
if input_folder == "":
msgbox("Input folder is missing...")
log.info("Input folder is missing...")
return
if output_folder == "":
msgbox("Please provide an output folder.")
log.info("Please provide an output folder.")
return
log.info(f"Grouping images in {input_folder}...")
run_cmd = [
fr'"{PYTHON}"',
f'"{scriptdir}/tools/group_images.py"',
fr'"{input_folder}"',
fr'"{output_folder}"',
fr"{PYTHON}",
f"{scriptdir}/tools/group_images.py",
fr"{input_folder}",
fr"{output_folder}",
str(group_size),
]
@ -62,16 +60,16 @@ def group_images(
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
subprocess.run(run_cmd, env=env)
log.info("...grouping done")
def gradio_group_images_gui_tab(headless=False, use_shell: bool = False):
def gradio_group_images_gui_tab(headless=False):
from .common_gui import create_refresh_button
current_input_folder = os.path.join(scriptdir, "data")
@ -189,6 +187,7 @@ def gradio_group_images_gui_tab(headless=False, use_shell: bool = False):
choices=[".cap", ".caption", ".txt"],
value=".txt",
interactive=True,
allow_custom_value=True,
)
group_images_button = gr.Button("Group images")
@ -203,7 +202,6 @@ def gradio_group_images_gui_tab(headless=False, use_shell: bool = False):
do_not_copy_other_files,
generate_captions,
caption_ext,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -869,7 +869,7 @@ def train_model(
log.info(f"stop_text_encoder_training = {stop_text_encoder_training}")
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [rf'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = [rf'{get_executable_path("accelerate")}', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -888,9 +888,9 @@ def train_model(
)
if sdxl:
run_cmd.append(rf'"{scriptdir}/sd-scripts/sdxl_train_network.py"')
run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train_network.py")
else:
run_cmd.append(rf'"{scriptdir}/sd-scripts/train_network.py"')
run_cmd.append(rf"{scriptdir}/sd-scripts/train_network.py")
network_args = ""
@ -1198,8 +1198,8 @@ def train_model(
if not os.path.exists(toml_file.name):
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(rf'"{tmpfilename}"')
run_cmd.append("--config_file")
run_cmd.append(rf"{tmpfilename}")
# Define a dictionary of parameters
run_cmd_params = {
@ -1229,13 +1229,13 @@ def train_model(
# log.info(run_cmd)
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env)
executor.execute_command(run_cmd=run_cmd, env=env)
train_state_value = time.time()
@ -2336,7 +2336,7 @@ def lora_tab(
)
with gr.Tab("Tools"):
lora_tools = LoRATools(headless=headless, use_shell_flag=use_shell)
lora_tools = LoRATools(headless=headless)
with gr.Tab("Guides"):
gr.Markdown("This section provide Various LoRA guides and information...")

View File

@ -303,6 +303,7 @@ def gradio_manual_caption_gui_tab(headless=False, default_images_dir=None):
choices=[".cap", ".caption", ".txt"],
value=".txt",
interactive=True,
allow_custom_value=True,
)
auto_save = gr.Checkbox(
label="Autosave", info="Options", value=True, interactive=True

View File

@ -6,7 +6,6 @@ import json
# Third-party imports
import gradio as gr
from easygui import msgbox
# Local module imports
from .common_gui import (
@ -33,7 +32,7 @@ def check_model(model):
if not model:
return True
if not os.path.isfile(model):
msgbox(f"The provided {model} is not a file")
log.info(f"The provided {model} is not a file")
return False
return True
@ -48,9 +47,8 @@ def verify_conditions(sd_model, lora_models):
class GradioMergeLoRaTab:
def __init__(self, headless=False, use_shell: bool = False):
def __init__(self, headless=False):
self.headless = headless
self.use_shell = use_shell
self.build_tab()
def save_inputs_to_json(self, file_path, inputs):
@ -380,7 +378,6 @@ class GradioMergeLoRaTab:
save_to,
precision,
save_precision,
gr.Checkbox(value=self.use_shell, visible=False),
],
show_progress=False,
)
@ -400,7 +397,6 @@ class GradioMergeLoRaTab:
save_to,
precision,
save_precision,
use_shell: bool = False,
):
log.info("Merge model...")
@ -425,18 +421,23 @@ class GradioMergeLoRaTab:
return
if not sdxl_model:
run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/networks/merge_lora.py"']
run_cmd = [rf"{PYTHON}", rf"{scriptdir}/sd-scripts/networks/merge_lora.py"]
else:
run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/networks/sdxl_merge_lora.py"']
run_cmd = [
rf"{PYTHON}",
rf"{scriptdir}/sd-scripts/networks/sdxl_merge_lora.py",
]
if sd_model:
run_cmd.append("--sd_model")
run_cmd.append(fr'"{sd_model}"')
run_cmd.append(rf"{sd_model}")
run_cmd.extend(["--save_precision", save_precision])
run_cmd.extend(["--precision", precision])
run_cmd.append("--save_precision")
run_cmd.append(save_precision)
run_cmd.append("--precision")
run_cmd.append(precision)
run_cmd.append("--save_to")
run_cmd.append(fr'"{save_to}"')
run_cmd.append(rf"{save_to}")
# Prepare model and ratios command as lists, including only non-empty models
valid_models = [model for model in lora_models if model]
@ -452,17 +453,16 @@ class GradioMergeLoRaTab:
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Example of adding an environment variable for TensorFlow, if necessary
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env)
log.info("Done merging...")

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -33,23 +32,25 @@ def merge_lycoris(
device,
is_sdxl,
is_v2,
use_shell: bool = False,
):
log.info("Merge model...")
# Build the command to run merge_lycoris.py using list format
run_cmd = [
fr'"{PYTHON}"',
fr'"{scriptdir}/tools/merge_lycoris.py"',
fr'"{base_model}"',
fr'"{lycoris_model}"',
fr'"{output_name}"',
fr"{PYTHON}",
fr"{scriptdir}/tools/merge_lycoris.py",
fr"{base_model}",
fr"{lycoris_model}",
fr"{output_name}",
]
# Add additional required arguments with their values
run_cmd.extend(["--weight", str(weight)])
run_cmd.extend(["--device", device])
run_cmd.extend(["--dtype", dtype])
run_cmd.append("--weight")
run_cmd.append(str(weight))
run_cmd.append("--device")
run_cmd.append(device)
run_cmd.append("--dtype")
run_cmd.append(dtype)
# Add optional flags based on conditions
if is_sdxl:
@ -60,16 +61,16 @@ def merge_lycoris(
# Copy and update the environment variables
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
subprocess.run(run_cmd, env=env)
log.info("Done merging...")
@ -80,7 +81,7 @@ def merge_lycoris(
###
def gradio_merge_lycoris_tab(headless=False, use_shell: bool = False):
def gradio_merge_lycoris_tab(headless=False):
current_model_dir = os.path.join(scriptdir, "outputs")
current_lycoris_dir = current_model_dir
current_save_dir = current_model_dir
@ -253,7 +254,6 @@ def gradio_merge_lycoris_tab(headless=False, use_shell: bool = False):
device,
is_sdxl,
is_v2,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -36,22 +35,24 @@ def resize_lora(
):
# Check for caption_text_input
if model == "":
msgbox("Invalid model file")
log.info("Invalid model file")
return
# Check if source model exist
if not os.path.isfile(model):
msgbox("The provided model is not a file")
log.info("The provided model is not a file")
return
if dynamic_method == "sv_ratio":
if float(dynamic_param) < 2:
msgbox(f"Dynamic parameter for {dynamic_method} need to be 2 or greater...")
log.info(
f"Dynamic parameter for {dynamic_method} need to be 2 or greater..."
)
return
if dynamic_method == "sv_fro" or dynamic_method == "sv_cumulative":
if float(dynamic_param) < 0 or float(dynamic_param) > 1:
msgbox(
log.info(
f"Dynamic parameter for {dynamic_method} need to be between 0 and 1..."
)
return
@ -64,14 +65,14 @@ def resize_lora(
device = "cuda"
run_cmd = [
fr"{PYTHON}",
fr"{scriptdir}/sd-scripts/networks/resize_lora.py",
rf"{PYTHON}",
rf"{scriptdir}/sd-scripts/networks/resize_lora.py",
"--save_precision",
save_precision,
"--save_to",
fr"{save_to}",
rf"{save_to}",
"--model",
fr"{model}",
rf"{model}",
"--new_rank",
str(new_rank),
"--device",
@ -80,9 +81,10 @@ def resize_lora(
# Conditional checks for dynamic parameters
if dynamic_method != "None":
run_cmd.extend(
["--dynamic_method", dynamic_method, "--dynamic_param", str(dynamic_param)]
)
run_cmd.append("--dynamic_method")
run_cmd.append(dynamic_method)
run_cmd.append("--dynamic_param")
run_cmd.append(str(dynamic_param))
# Check for verbosity
if verbose:
@ -90,7 +92,7 @@ def resize_lora(
env = os.environ.copy()
env["PYTHONPATH"] = (
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Adding example environment variables if relevant
@ -99,10 +101,9 @@ def resize_lora(
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env, shell=False)
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env)
log.info("Done resizing...")
@ -112,7 +113,9 @@ def resize_lora(
###
def gradio_resize_lora_tab(headless=False,):
def gradio_resize_lora_tab(
headless=False,
):
current_model_dir = os.path.join(scriptdir, "outputs")
current_save_dir = os.path.join(scriptdir, "outputs")

View File

@ -1,9 +1,7 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
import shlex
from .common_gui import (
get_saveasfilename_path,
get_file_path,
@ -39,11 +37,10 @@ def svd_merge_lora(
new_rank,
new_conv_rank,
device,
use_shell: bool = False,
):
# Check if the output file already exists
if os.path.isfile(save_to):
print(f"Output file '{save_to}' already exists. Aborting.")
log.info(f"Output file '{save_to}' already exists. Aborting.")
return
# Check if the ratio total is equal to one. If not normalise to 1
@ -54,57 +51,59 @@ def svd_merge_lora(
ratio_c /= total_ratio
ratio_d /= total_ratio
run_cmd = rf'"{PYTHON}" "{scriptdir}/sd-scripts/networks/svd_merge_lora.py"'
run_cmd += f" --save_precision {save_precision}"
run_cmd += f" --precision {precision}"
run_cmd += rf' --save_to "{save_to}"'
run_cmd = [
rf"{PYTHON}",
rf"{scriptdir}/sd-scripts/networks/svd_merge_lora.py",
"--save_precision",
save_precision,
"--precision",
precision,
"--save_to",
save_to,
]
# Variables for model paths and their ratios
models = []
ratios = []
run_cmd_models = " --models"
run_cmd_ratios = " --ratios"
# Add non-empty models and their ratios to the command
if lora_a_model:
if not os.path.isfile(lora_a_model):
msgbox("The provided model A is not a file")
return
run_cmd_models += rf' "{lora_a_model}"'
run_cmd_ratios += f" {ratio_a}"
if lora_b_model:
if not os.path.isfile(lora_b_model):
msgbox("The provided model B is not a file")
return
run_cmd_models += rf' "{lora_b_model}"'
run_cmd_ratios += f" {ratio_b}"
if lora_c_model:
if not os.path.isfile(lora_c_model):
msgbox("The provided model C is not a file")
return
run_cmd_models += rf' "{lora_c_model}"'
run_cmd_ratios += f" {ratio_c}"
if lora_d_model:
if not os.path.isfile(lora_d_model):
msgbox("The provided model D is not a file")
return
run_cmd_models += rf' "{lora_d_model}"'
run_cmd_ratios += f" {ratio_d}"
def add_model(model_path, ratio):
if not os.path.isfile(model_path):
log.info(f"The provided model at {model_path} is not a file")
return False
models.append(model_path)
ratios.append(str(ratio))
return True
run_cmd += run_cmd_models
run_cmd += run_cmd_ratios
run_cmd += f" --device {device}"
run_cmd += f' --new_rank "{new_rank}"'
run_cmd += f' --new_conv_rank "{new_conv_rank}"'
if lora_a_model and add_model(lora_a_model, ratio_a):
pass
if lora_b_model and add_model(lora_b_model, ratio_b):
pass
if lora_c_model and add_model(lora_c_model, ratio_c):
pass
if lora_d_model and add_model(lora_d_model, ratio_d):
pass
if models and ratios: # Ensure we have valid models and ratios before appending
run_cmd.extend(["--models"] + models)
run_cmd.extend(["--ratios"] + ratios)
run_cmd.extend(
["--device", device, "--new_rank", new_rank, "--new_conv_rank", new_conv_rank]
)
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Example of setting additional environment variables if needed
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
log.info(f"Executing command: {run_cmd} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env, shell=use_shell)
# Run the command
subprocess.run(run_cmd, env=env)
###
@ -112,7 +111,7 @@ def svd_merge_lora(
###
def gradio_svd_merge_lora_tab(headless=False, use_shell: bool = False):
def gradio_svd_merge_lora_tab(headless=False):
current_save_dir = os.path.join(scriptdir, "outputs")
current_a_model_dir = current_save_dir
current_b_model_dir = current_save_dir
@ -407,7 +406,6 @@ def gradio_svd_merge_lora_tab(headless=False, use_shell: bool = False):
new_rank,
new_conv_rank,
device,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -668,7 +668,7 @@ def train_model(
log.info(f"stop_text_encoder_training = {stop_text_encoder_training}")
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [rf'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = [rf'{get_executable_path("accelerate")}', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -687,9 +687,9 @@ def train_model(
)
if sdxl:
run_cmd.append(rf'"{scriptdir}/sd-scripts/sdxl_train_textual_inversion.py"')
run_cmd.append(rf"{scriptdir}/sd-scripts/sdxl_train_textual_inversion.py")
else:
run_cmd.append(rf'"{scriptdir}/sd-scripts/train_textual_inversion.py"')
run_cmd.append(rf"{scriptdir}/sd-scripts/train_textual_inversion.py")
if max_data_loader_n_workers == "" or None:
max_data_loader_n_workers = 0
@ -853,8 +853,8 @@ def train_model(
if not os.path.exists(toml_file.name):
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(rf'"{tmpfilename}"')
run_cmd.append("--config_file")
run_cmd.append(rf"{tmpfilename}")
# Initialize a dictionary with always-included keyword arguments
kwargs_for_training = {
@ -891,7 +891,7 @@ def train_model(
# Run the command
executor.execute_command(run_cmd=run_cmd, use_shell=use_shell, env=env)
executor.execute_command(run_cmd=run_cmd, env=env)
train_state_value = time.time()

View File

@ -16,21 +16,18 @@ def utilities_tab(
reg_data_dir_input=gr.Dropdown(),
output_dir_input=gr.Dropdown(),
logging_dir_input=gr.Dropdown(),
enable_copy_info_button=bool(False),
enable_dreambooth_tab=True,
headless=False,
config: KohyaSSGUIConfig = {},
use_shell_flag: bool = False,
):
with gr.Tab("Captioning"):
gradio_basic_caption_gui_tab(headless=headless, use_shell=use_shell_flag)
gradio_blip_caption_gui_tab(headless=headless, use_shell=use_shell_flag)
gradio_basic_caption_gui_tab(headless=headless)
gradio_blip_caption_gui_tab(headless=headless)
gradio_blip2_caption_gui_tab(headless=headless)
gradio_git_caption_gui_tab(headless=headless, use_shell=use_shell_flag)
gradio_wd14_caption_gui_tab(headless=headless, config=config, use_shell=use_shell_flag)
gradio_git_caption_gui_tab(headless=headless)
gradio_wd14_caption_gui_tab(headless=headless, config=config)
gradio_manual_caption_gui_tab(headless=headless)
gradio_convert_model_tab(headless=headless, use_shell=use_shell_flag)
gradio_group_images_gui_tab(headless=headless, use_shell=use_shell_flag)
gradio_convert_model_tab(headless=headless)
gradio_group_images_gui_tab(headless=headless)
return (
train_data_dir_input,

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
import os
import sys
@ -27,12 +26,12 @@ def verify_lora(
):
# verify for caption_text_input
if lora_model == "":
msgbox("Invalid model A file")
log.info("Invalid model A file")
return
# verify if source model exist
if not os.path.isfile(lora_model):
msgbox("The provided model A is not a file")
log.info("The provided model A is not a file")
return
run_cmd = [
@ -61,7 +60,6 @@ def verify_lora(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=False,
)
output, error = process.communicate()

View File

@ -1,5 +1,4 @@
import gradio as gr
from easygui import msgbox
import subprocess
from .common_gui import (
get_folder_path,
@ -17,6 +16,7 @@ from .custom_logging import setup_logging
log = setup_logging()
old_onnx_value = True
def caption_images(
train_data_dir: str,
caption_extension: str,
@ -40,26 +40,25 @@ def caption_images(
use_rating_tags_as_last_tag: bool,
remove_underscore: bool,
thresh: float,
use_shell: bool = False,
) -> None:
# Check for images_dir_input
if train_data_dir == "":
msgbox("Image folder is missing...")
log.info("Image folder is missing...")
return
if caption_extension == "":
msgbox("Please provide an extension for the caption files.")
log.info("Please provide an extension for the caption files.")
return
repo_id_converted = repo_id.replace("/", "_")
if not os.path.exists(f"./wd14_tagger_model/{repo_id_converted}"):
force_download = True
log.info(f"Captioning files in {train_data_dir}...")
run_cmd = [
fr'{get_executable_path("accelerate")}',
rf'{get_executable_path("accelerate")}',
"launch",
fr"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py",
rf"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py",
]
# Uncomment and modify if needed
@ -116,11 +115,11 @@ def caption_images(
run_cmd.append("--use_rating_tags_as_last_tag")
# Add the directory containing the training data
run_cmd.append(fr'{train_data_dir}')
run_cmd.append(rf"{train_data_dir}")
env = os.environ.copy()
env["PYTHONPATH"] = (
fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
)
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
@ -128,11 +127,10 @@ def caption_images(
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env)
# Add prefix and postfix
add_pre_postfix(
folder=train_data_dir,
@ -153,7 +151,6 @@ def gradio_wd14_caption_gui_tab(
headless=False,
default_train_dir=None,
config: KohyaSSGUIConfig = {},
use_shell: bool = False,
):
from .common_gui import create_refresh_button
@ -234,6 +231,7 @@ def gradio_wd14_caption_gui_tab(
choices=[".cap", ".caption", ".txt"],
value=".txt",
interactive=True,
allow_custom_value=True,
)
caption_separator = gr.Textbox(
@ -364,21 +362,17 @@ def gradio_wd14_caption_gui_tab(
label="Max dataloader workers",
interactive=True,
)
def repo_id_changes(repo_id, onnx):
global old_onnx_value
if "-v3" in repo_id:
old_onnx_value = onnx
return gr.Checkbox(value=True, interactive=False)
else:
return gr.Checkbox(value=old_onnx_value, interactive=True)
repo_id.change(
repo_id_changes,
inputs=[repo_id, onnx],
outputs=[onnx]
)
repo_id.change(repo_id_changes, inputs=[repo_id, onnx], outputs=[onnx])
caption_button = gr.Button("Caption images")
@ -407,7 +401,6 @@ def gradio_wd14_caption_gui_tab(
use_rating_tags_as_last_tag,
remove_underscore,
thresh,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)