Improve process execution under Linux (and windows) (#2301)

* Update how process is executed

* Auto config use_shell=True on posix systems
pull/2306/head
bmaltais 2024-04-16 07:53:44 -04:00 committed by GitHub
parent 099f00086c
commit 9037fd0373
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 185 additions and 146 deletions

View File

@ -410,24 +410,26 @@ ControlNet dataset is used to specify the mask. The mask images should be the RG
#### Enhancements
- **User Interface:** Converted the GUI to utilize a configuration TOML file for passing arguments to sd-scripts, enhancing security by avoiding the command-line interface (CLI) for sensitive information.
- **Training Tools:** Enhanced the functionality of the training and TensorBoard buttons to offer a more intuitive user experience.
- **HuggingFace Integration:** Added a HuggingFace section to all trainers' tabs, allowing users to authenticate and leverage HuggingFace's advanced AI models.
- **Gradio Upgrade:** Upgraded to gradio version 4.20.0, resolving a bug affecting the runpod platform.
- **Metadata Support:** Introduced support for metadata capture within the GUI.
- **User Interface:** Transitioned the GUI to use a TOML file for argument passing to sd-scripts, significantly enhancing security by eliminating the need for command-line interface (CLI) use for sensitive data.
- **Training Tools:** Improved the training and TensorBoard buttons to provide a more intuitive user experience.
- **HuggingFace Integration:** Integrated a HuggingFace section in all trainer tabs, enabling authentication and use of HuggingFace's advanced AI models.
- **Gradio Upgrade:** Upgraded Gradio to version 4.20.0 to fix a previously identified bug impacting the runpod platform.
- **Metadata Support:** Added functionality for metadata capture within the GUI.
#### Security and Stability
- **Code Refactoring:** Rewrote significant portions of the code to address security vulnerabilities, including the removal of the `shell=True` parameter from process calls.
- **Scheduler Update:** Disabled LR Warmup when using the Constant LR Scheduler to avoid traceback errors with sd-scripts.
- **Code Refactoring:** Extensively rewrote the code to address various security vulnerabilities, including removing the `shell=True` parameter from process calls.
- **Scheduler Update:** Disabled LR Warmup when using the Constant LR Scheduler to prevent traceback errors associated with sd-scripts.
#### Shell Execution
- **Conditional Shell Usage:** Implemented support for optional shell usage when running external sd-scripts commands to accommodate specific platform requirements, following security enhancements.
- **How to Enable Shell Execution:**
1. Start the GUI with the `--use_shell` option.
2. Set the `use_shell` value in the config.toml file to `true`, allowing the GUI to apply user-specified settings at startup.
- **Note:** The `--use_shell` option will take precedence over settings in the config.toml file.
- **Conditional Shell Usage:** Added support for optional shell usage when executing external sd-scripts commands, tailored to meet specific platform needs and recent security updates.
The `gui.bat` and `gui.sh` scripts now include the `--do_not_use_shell` argument to prevent shell execution (`shell=True`) during external process handling. Unix-like systems automatically set `use_shell` to True internally, as required for proper execution of external commands. To enforce disabling shell execution, use the `--do_not_use_shell` argument.
- **How to Enable Shell Execution via Config File:**
1. In the `config.toml` file, set `use_shell` to `true` to enable shell usage as per GUI startup settings.
**Note:** The `--do_not_use_shell` option will override the `config.toml` settings, setting `use_shell` to False even if it is set to True in the config file.
#### Miscellaneous

View File

@ -42,9 +42,16 @@ def UI(**kwargs):
if config.is_config_loaded():
log.info(f"Loaded default GUI values from '{kwargs.get('config')}'...")
use_shell_flag = kwargs.get("use_shell", False)
if use_shell_flag == False:
use_shell_flag = config.get("settings.use_shell", False)
use_shell_flag = False
if os.name == "posix":
use_shell_flag = True
if config.get("settings.use_shell", False):
use_shell_flag = True
if kwargs.get("do_not_use_shell", False):
use_shell_flag = False
if use_shell_flag:
log.info("Using shell=True when running external commands...")
@ -159,7 +166,7 @@ if __name__ == "__main__":
parser.add_argument("--use-rocm", action="store_true", help="Use ROCm environment")
parser.add_argument(
"--use_shell", action="store_true", help="Use shell environment"
"--do_not_use_shell", action="store_true", help="Enforce not to use shell=True when running external commands"
)
parser.add_argument(

View File

@ -28,6 +28,7 @@ def caption_images(
postfix: str,
find_text: str,
replace_text: str,
use_shell: bool = False,
):
"""
Captions images in a given directory with a given caption text.
@ -62,7 +63,7 @@ def caption_images(
log.info(f"Captioning files in {images_dir} with {caption_text}...")
# Build the command to run caption.py
run_cmd = [PYTHON, f"{scriptdir}/tools/caption.py"]
run_cmd = [PYTHON, fr'"{scriptdir}/tools/caption.py"']
# Add required arguments
run_cmd.append('--caption_text')
@ -76,10 +77,7 @@ def caption_images(
run_cmd.append(caption_ext)
# Add the directory containing the images
run_cmd.append(images_dir)
# Log the command
log.info(' '.join(run_cmd))
run_cmd.append(fr'"{images_dir}"')
# Set the environment variable for the Python path
env = os.environ.copy()
@ -88,8 +86,13 @@ def caption_images(
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command based on the operating system
subprocess.run(run_cmd, env=env)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
# Check if overwrite option is enabled
if overwrite:
@ -121,7 +124,7 @@ def caption_images(
# Gradio UI
def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None):
def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None, use_shell: bool = False):
"""
Creates a Gradio tab for basic image captioning.
@ -259,6 +262,7 @@ def gradio_basic_caption_gui_tab(headless=False, default_images_dir=None):
postfix,
find_text,
replace_text,
gr.Checkbox(value=use_shell, visible=False),
],
show_progress=False,
)

View File

@ -58,7 +58,7 @@ def caption_images(
log.info(f"Captioning files in {train_data_dir}...")
# Construct the command to run make_captions.py
run_cmd = [PYTHON, f"{scriptdir}/sd-scripts/finetune/make_captions.py"]
run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/finetune/make_captions.py"']
# Add required arguments
run_cmd.append('--batch_size')
@ -80,15 +80,12 @@ def caption_images(
run_cmd.append(caption_file_ext)
# Add the directory containing the training data
run_cmd.append(train_data_dir)
run_cmd.append(fr'"{train_data_dir}"')
# Add URL for caption model weights
run_cmd.append('--caption_weights')
run_cmd.append("https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth")
# Log the command
log.info(' '.join(run_cmd))
# Set up the environment
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -96,8 +93,12 @@ def caption_images(
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(run_cmd, env=env, shell=use_shell, cwd=f"{scriptdir}/sd-scripts")
subprocess.run(command_to_run, env=env, shell=use_shell, cwd=f"{scriptdir}/sd-scripts")
# Add prefix and postfix

View File

@ -32,15 +32,15 @@ class CommandExecutor:
if self.process and self.process.poll() is None:
log.info("The command is already running. Please wait for it to finish.")
else:
# for i, item in enumerate(run_cmd):
# log.info(f"{i}: {item}")
for i, item in enumerate(run_cmd):
log.info(f"{i}: {item}")
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Execute the command securely
self.process = subprocess.Popen(run_cmd, **kwargs, shell=use_shell)
self.process = subprocess.Popen(command_to_run, **kwargs, shell=use_shell)
log.info("Command executed.")
def kill_command(self):

View File

@ -57,7 +57,7 @@ def convert_lcm(
run_cmd.append("--lora-scale")
run_cmd.append(str(lora_scale))
run_cmd.append("--model")
run_cmd.append(model_path)
run_cmd.append(rf'"{model_path}"')
run_cmd.append("--name")
run_cmd.append(name)
@ -67,9 +67,6 @@ def convert_lcm(
if model_type == "SSD-1B":
run_cmd.append("--ssd-1b")
# Log the command
log.info(" ".join(run_cmd))
# Set up the environment
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -77,8 +74,14 @@ def convert_lcm(
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(
command_to_run, env=env, shell=use_shell
)
# Return a success message
log.info("Done extracting...")

View File

@ -50,7 +50,8 @@ def convert_model(
return
run_cmd = [
PYTHON, f"{scriptdir}/sd-scripts/tools/convert_diffusers20_original_sd.py"
PYTHON,
fr'"{scriptdir}/sd-scripts/tools/convert_diffusers20_original_sd.py"',
]
v1_models = [
@ -70,7 +71,7 @@ def convert_model(
run_cmd.append(f"--{target_save_precision_type}")
if target_model_type == "diffuser" or target_model_type == "diffuser_safetensors":
run_cmd.append('--reference_model')
run_cmd.append("--reference_model")
run_cmd.append(source_model_type)
if target_model_type == "diffuser_safetensors":
@ -81,7 +82,7 @@ def convert_model(
run_cmd.append("--unet_use_linear_projection")
# Add the source model input path
run_cmd.append(source_model_input)
run_cmd.append(fr'"{source_model_input}"')
# Determine the target model path
if target_model_type == "diffuser" or target_model_type == "diffuser_safetensors":
@ -95,10 +96,7 @@ def convert_model(
)
# Add the target model path
run_cmd.append(target_model_path)
# Log the command
log.info(' '.join(run_cmd))
run_cmd.append(fr'"{target_model_path}"')
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -107,9 +105,14 @@ def convert_model(
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(
command_to_run, env=env, shell=use_shell
)
###

View File

@ -628,7 +628,7 @@ def train_model(
lr_warmup_steps = 0
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [get_executable_path("accelerate"), "launch"]
run_cmd = [fr'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -647,9 +647,9 @@ def train_model(
)
if sdxl:
run_cmd.append(f"{scriptdir}/sd-scripts/sdxl_train.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/sdxl_train.py"')
else:
run_cmd.append(f"{scriptdir}/sd-scripts/train_db.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/train_db.py"')
if max_data_loader_n_workers == "" or None:
max_data_loader_n_workers = 0
@ -798,7 +798,7 @@ def train_model(
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(tmpfilename)
run_cmd.append(fr'"{tmpfilename}"')
# Initialize a dictionary with always-included keyword arguments
kwargs_for_training = {

View File

@ -52,18 +52,15 @@ def extract_dylora(
run_cmd = [
PYTHON,
f"{scriptdir}/sd-scripts/networks/extract_lora_from_dylora.py",
rf'"{scriptdir}/sd-scripts/networks/extract_lora_from_dylora.py"',
"--save_to",
save_to,
rf'"{save_to}"',
"--model",
model,
rf'"{model}"',
"--unit",
str(unit),
]
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
@ -71,8 +68,12 @@ def extract_dylora(
# Example environment variable adjustment for the Python environment
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info("Done extracting DyLoRA...")

View File

@ -75,17 +75,17 @@ def extract_lora(
run_cmd = [
PYTHON,
f"{scriptdir}/sd-scripts/networks/extract_lora_from_models.py",
fr'"{scriptdir}/sd-scripts/networks/extract_lora_from_models.py"',
"--load_precision",
load_precision,
"--save_precision",
save_precision,
"--save_to",
save_to,
fr'"{save_to}"',
"--model_org",
model_org,
fr'"{model_org}"',
"--model_tuned",
model_tuned,
fr'"{model_tuned}"',
"--dim",
str(dim),
"--device",
@ -110,9 +110,6 @@ def extract_lora(
run_cmd.append("--load_tuned_model_to")
run_cmd.append(load_tuned_model_to)
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
@ -120,8 +117,13 @@ def extract_lora(
# Adding an example of another potentially relevant environment variable
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
###

View File

@ -74,7 +74,7 @@ def extract_lycoris_locon(
path, ext = os.path.splitext(output_name)
output_name = f"{path}_tmp{ext}"
run_cmd = [PYTHON, f"{scriptdir}/tools/lycoris_locon_extract.py"]
run_cmd = [PYTHON, fr'"{scriptdir}/tools/lycoris_locon_extract.py"']
if is_sdxl:
run_cmd.append("--is_sdxl")
@ -121,12 +121,9 @@ def extract_lycoris_locon(
run_cmd.append("--disable_cp")
# Add paths
run_cmd.append(base_model)
run_cmd.append(db_model)
run_cmd.append(output_name)
# Log the command
log.info(" ".join(run_cmd))
run_cmd.append(fr'"{base_model}"')
run_cmd.append(fr'"{db_model}"')
run_cmd.append(fr'"{output_name}"')
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -135,8 +132,13 @@ def extract_lycoris_locon(
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info("Done extracting...")

View File

@ -679,7 +679,7 @@ def train_model(
lr_warmup_steps = 0
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [get_executable_path("accelerate"), "launch"]
run_cmd = [fr'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -698,9 +698,9 @@ def train_model(
)
if sdxl_checkbox:
run_cmd.append(f"{scriptdir}/sd-scripts/sdxl_train.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/sdxl_train.py"')
else:
run_cmd.append(f"{scriptdir}/sd-scripts/fine_tune.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/fine_tune.py"')
in_json = (
f"{train_dir}/{latent_metadata_filename}"
@ -853,7 +853,7 @@ def train_model(
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(tmpfilename)
run_cmd.append(fr'"{tmpfilename}"')
# Initialize a dictionary with always-included keyword arguments
kwargs_for_training = {

View File

@ -35,7 +35,7 @@ def caption_images(
log.info(f"GIT captioning files in {train_data_dir}...")
run_cmd = [PYTHON, f"{scriptdir}/sd-scripts/finetune/make_captions_by_git.py"]
run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/finetune/make_captions_by_git.py"']
# Add --model_id if provided
if model_id != "":
@ -58,10 +58,7 @@ def caption_images(
run_cmd.append(caption_ext)
# Add the directory containing the training data
run_cmd.append(train_data_dir)
# Log the command
log.info(" ".join(run_cmd))
run_cmd.append(fr'"{train_data_dir}"')
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -70,8 +67,13 @@ def caption_images(
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
# Add prefix and postfix
add_pre_postfix(

View File

@ -36,8 +36,8 @@ def group_images(
run_cmd = [
PYTHON,
f"{scriptdir}/tools/group_images.py",
input_folder,
output_folder,
fr'"{input_folder}"',
fr'"{output_folder}"',
str(group_size),
]
@ -53,9 +53,6 @@ def group_images(
run_cmd.append("--caption_ext")
run_cmd.append(caption_ext)
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
rf"{scriptdir}{os.pathsep}{scriptdir}/tools{os.pathsep}{env.get('PYTHONPATH', '')}"
@ -63,8 +60,13 @@ def group_images(
# Adding a common environmental setting as an example if it's missing in the original context
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info("...grouping done")

View File

@ -852,7 +852,7 @@ def train_model(
lr_warmup_steps = 0
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [get_executable_path("accelerate"), "launch"]
run_cmd = [fr'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -871,9 +871,9 @@ def train_model(
)
if sdxl:
run_cmd.append(f"{scriptdir}/sd-scripts/sdxl_train_network.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/sdxl_train_network.py"')
else:
run_cmd.append(f"{scriptdir}/sd-scripts/train_network.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/train_network.py"')
network_args = ""
@ -1151,7 +1151,7 @@ def train_model(
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(tmpfilename)
run_cmd.append(fr'"{tmpfilename}"')
# Define a dictionary of parameters
run_cmd_params = {

View File

@ -425,18 +425,18 @@ class GradioMergeLoRaTab:
return
if not sdxl_model:
run_cmd = [PYTHON, f"{scriptdir}/sd-scripts/networks/merge_lora.py"]
run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/networks/merge_lora.py"']
else:
run_cmd = [PYTHON, f"{scriptdir}/sd-scripts/networks/sdxl_merge_lora.py"]
run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/networks/sdxl_merge_lora.py"']
if sd_model:
run_cmd.append("--sd_model")
run_cmd.append(sd_model)
run_cmd.append(fr'"{sd_model}"')
run_cmd.extend(["--save_precision", save_precision])
run_cmd.extend(["--precision", precision])
run_cmd.append("--save_to")
run_cmd.append(save_to)
run_cmd.append(fr'"{save_to}"')
# Prepare model and ratios command as lists, including only non-empty models
valid_models = [model for model in lora_models if model]
@ -450,9 +450,6 @@ class GradioMergeLoRaTab:
map(str, valid_ratios)
) # Convert ratios to strings and include them as separate arguments
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
@ -460,7 +457,12 @@ class GradioMergeLoRaTab:
# Example of adding an environment variable for TensorFlow, if necessary
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info("Done merging...")

View File

@ -40,10 +40,10 @@ def merge_lycoris(
# Build the command to run merge_lycoris.py using list format
run_cmd = [
PYTHON,
f"{scriptdir}/tools/merge_lycoris.py",
base_model,
lycoris_model,
output_name,
fr'"{scriptdir}/tools/merge_lycoris.py"',
fr'"{base_model}"',
fr'"{lycoris_model}"',
fr'"{output_name}"',
]
# Add additional required arguments with their values
@ -57,9 +57,6 @@ def merge_lycoris(
if is_v2:
run_cmd.append("--is_v2")
# Log the command
log.info(" ".join(run_cmd))
# Copy and update the environment variables
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -67,8 +64,13 @@ def merge_lycoris(
)
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Execute the command with the modified environment
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info("Done merging...")

View File

@ -66,13 +66,13 @@ def resize_lora(
run_cmd = [
PYTHON,
f"{scriptdir}/sd-scripts/networks/resize_lora.py",
fr'"{scriptdir}/sd-scripts/networks/resize_lora.py"',
"--save_precision",
save_precision,
"--save_to",
save_to,
fr'"{save_to}"',
"--model",
model,
fr'"{model}"',
"--new_rank",
str(new_rank),
"--device",
@ -89,9 +89,6 @@ def resize_lora(
if verbose:
run_cmd.append("--verbose")
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
@ -100,8 +97,13 @@ def resize_lora(
# Adding example environment variables if relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
log.info("Done resizing...")

View File

@ -55,13 +55,13 @@ def svd_merge_lora(
run_cmd = [
PYTHON,
f"{scriptdir}/sd-scripts/networks/svd_merge_lora.py",
fr'"{scriptdir}/sd-scripts/networks/svd_merge_lora.py"',
"--save_precision",
save_precision,
"--precision",
precision,
"--save_to",
save_to,
fr'"{save_to}"',
]
# Variables for model paths and their ratios
@ -73,7 +73,7 @@ def svd_merge_lora(
if not os.path.isfile(model_path):
msgbox(f"The provided model at {model_path} is not a file")
return False
models.append(model_path)
models.append(fr'"{model_path}"')
ratios.append(str(ratio))
return True
@ -94,9 +94,6 @@ def svd_merge_lora(
["--device", device, "--new_rank", new_rank, "--new_conv_rank", new_conv_rank]
)
# Log the command
log.info(" ".join(run_cmd))
env = os.environ.copy()
env["PYTHONPATH"] = (
rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}"
@ -104,8 +101,13 @@ def svd_merge_lora(
# Example of setting additional environment variables if needed
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
###

View File

@ -604,7 +604,7 @@ def train_model(
lr_warmup_steps = 0
log.info(f"lr_warmup_steps = {lr_warmup_steps}")
run_cmd = [get_executable_path("accelerate"), "launch"]
run_cmd = [fr'"{get_executable_path("accelerate")}"', "launch"]
run_cmd = AccelerateLaunch.run_cmd(
run_cmd=run_cmd,
@ -623,9 +623,9 @@ def train_model(
)
if sdxl:
run_cmd.append(f"{scriptdir}/sd-scripts/sdxl_train_textual_inversion.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/sdxl_train_textual_inversion.py"')
else:
run_cmd.append(f"{scriptdir}/sd-scripts/train_textual_inversion.py")
run_cmd.append(fr'"{scriptdir}/sd-scripts/train_textual_inversion.py"')
if max_data_loader_n_workers == "" or None:
max_data_loader_n_workers = 0
@ -769,7 +769,7 @@ def train_model(
log.error(f"Failed to write TOML file: {toml_file.name}")
run_cmd.append(f"--config_file")
run_cmd.append(tmpfilename)
run_cmd.append(fr'"{tmpfilename}"')
# Initialize a dictionary with always-included keyword arguments
kwargs_for_training = {

View File

@ -23,7 +23,7 @@ def utilities_tab(
use_shell_flag: bool = False,
):
with gr.Tab("Captioning"):
gradio_basic_caption_gui_tab(headless=headless)
gradio_basic_caption_gui_tab(headless=headless, use_shell=use_shell_flag)
gradio_blip_caption_gui_tab(headless=headless, use_shell=use_shell_flag)
gradio_blip2_caption_gui_tab(headless=headless)
gradio_git_caption_gui_tab(headless=headless, use_shell=use_shell_flag)

View File

@ -55,7 +55,7 @@ def caption_images(
run_cmd = [
get_executable_path("accelerate"),
"launch",
f"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py",
fr'"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py"',
]
# Uncomment and modify if needed
@ -112,10 +112,7 @@ def caption_images(
run_cmd.append("--use_rating_tags_as_last_tag")
# Add the directory containing the training data
run_cmd.append(train_data_dir)
# Log the command
log.info(" ".join(run_cmd))
run_cmd.append(fr'"{train_data_dir}"')
env = os.environ.copy()
env["PYTHONPATH"] = (
@ -124,8 +121,13 @@ def caption_images(
# Adding an example of an environment variable that might be relevant
env["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Run the command
subprocess.run(run_cmd, env=env, shell=use_shell)
# Reconstruct the safe command string for display
command_to_run = " ".join(run_cmd)
log.info(f"Executing command: {command_to_run} with shell={use_shell}")
# Run the command in the sd-scripts folder context
subprocess.run(command_to_run, env=env, shell=use_shell)
# Add prefix and postfix
add_pre_postfix(