switch cmdflags to settings

pull/112/head
Vladimir Mandic 2023-04-12 10:40:11 -04:00
parent 831f562394
commit 81b8294e93
47 changed files with 461 additions and 632 deletions

View File

@ -5,6 +5,12 @@
Fork is as close as up-to-date with origin as time allows
All code changes are merged upstream whenever possible
The idea behind the fork is to enable latest technologies and advances in text-to-image generation
*Sometimes this is not the same as "as simple as possible to use"*
If you are looking an amazing simple-to-use Stable Diffusion tool, I'd suggest [InvokeAI](https://invoke-ai.github.io/InvokeAI/) specifically due to its automated installer and ease of use
<br>
![screenshot](ui-screenshot.jpg)
<br>
@ -13,7 +19,9 @@ All code changes are merged upstream whenever possible
### Fork does differ in few things
- New logger
- New error and exception handlers
- Built-in performance profiler
- Updated **Python** libraries to latest known compatible versions
e.g. `accelerate`, `transformers`, `numpy`, etc.
- Includes opinionated **System** and **Options** configuration
@ -22,7 +30,6 @@ All code changes are merged upstream whenever possible
- Optimized startup
Gradio web server will be initialized much earlier which model load is done in the background
Faster model loading plus ability to fallback on corrupt models
- Includes **SD2** configuration files
- Uses simplified folder structure
e.g. `/train`, `/outputs/*`, `/models/*`, etc.
- Enhanced training templates
@ -37,13 +44,10 @@ All code changes are merged upstream whenever possible
- Optimized for `Torch` 2.0
- Runs with `SDP` memory attention enabled by default if supported by system
Fallback to `XFormers` if SDP is not supported
If either `SDP` or `XFormers` are not supported, falls back to usual cmd line arguments
### Removed
- Drops compatibility with `python` **3.7** and requires **3.9**
Recommended is **Python 3.10**
- Drops compatibility with older versions of `python` and requires **3.9**
- Drops localizations
- Drops automated tests
@ -69,41 +73,36 @@ Fork adds extra functionality:
- [Steps Animation](https://github.com/vladmandic/sd-extension-steps-animation)
- [Seed Travel](https://github.com/yownas/seed_travel)
*Note*: Extensions are automatically updated to latest version on `install`
<br>
<br>
## Install
### Install
1. Install `Python`, `Git`
1. Install first:
`Python`, `Git`
2. Clone repository
`git clone https://github.com/vladmandic/automatic`
> git clone https://github.com/vladmandic/automatic
### Run
## Run
Run desired startup script to install dependencies and extensions and start server:
- `launch.py`:
Main startup script
Run `python launch.py --help` for available options
- `launch-venv.bat` and `launch.venv.sh`:
- `webui.bat` and `webui.sh`:
Platform specific wrapper scripts that starts `launch.py` in Python virtual environment
*Note*: Server can run without virtual environment, but it is recommended to use it
**If you're unsure which launcher to use, this is the one you want**
- `launch.py`:
Main startup script
Can be used directly to start server in manually activated `venv` or to run it without `venv`
Run `python launch.py --help` for available options
- `setup.py`:
Main installer, used by `launch.py`
Can also be used directly to update repository or extensions
If running manually, make sure to activate `venv` first (if used)
Run `python setup.py --help` for available options
Setup details are logged to `setup.log`
- `webui.py`:
Main server script
Run `python webui.py --help` for available options
<br>

15
TODO.md
View File

@ -5,16 +5,9 @@
Stuff to be fixed...
- Reconnect WebUI
- Skip Torch
- Update requirements
- Update README
- Fix CSS
- Fix Firefox
- Implement installer in Python
- piexif, rich, kornia
- Move cross-optimization to settings
- Fix mediapipe
- Redo Extensions tab
- Fix Firefox CSS issues
- Integrate CSS into single file
## Integration
@ -44,4 +37,4 @@ Tech that can be integrated as part of the core workflow...
## Random
- Bunch of stuff:<https://pharmapsychotic.com/tools.html>
- Bunch of stuff: <https://pharmapsychotic.com/tools.html>

View File

@ -1 +1,2 @@
mediapipe
colormap

View File

@ -266,7 +266,6 @@ async def check(params):
log.info({ 'checking server options' })
options['training_xattention_optimizations'] = False
options['training_image_repeats_per_epoch'] = 1
if params.skipmodel:

View File

@ -105,7 +105,6 @@ def prepare_server():
server_options.options.save_optimizer_state = False
server_options.options.training_image_repeats_per_epoch = args.repeats
server_options.options.training_write_csv_every = 0
server_options.options.training_xattention_optimizations = False
sdapi.postsync('/sdapi/v1/options', server_options.options)
console.log(f'updated server options')

View File

@ -38,7 +38,7 @@
"dimensions_and_batch_together": true,
"directories_filename_pattern": "",
"directories_max_prompt_words": 8,
"disable_weights_auto_swap": false,
"disable_weights_auto_swap": true,
"disabled_extensions": [
"ScuNET"
],
@ -133,7 +133,6 @@
"memmon_poll_rate": 1,
"multiple_tqdm": false,
"n_rows": -1,
"no_dpmpp_sde_batch_determinism": false,
"outdir_extras_samples": "outputs/extras",
"outdir_grids": "",
"outdir_img2img_grids": "outputs/grids",
@ -160,7 +159,6 @@
"samplers_in_dropdown": true,
"samples_filename_pattern": "",
"samples_format": "jpg",
"samples_log_stdout": false,
"samples_save": true,
"save_images_add_number": true,
"save_images_before_color_correction": true,
@ -172,7 +170,7 @@
"save_training_settings_to_txt": false,
"save_txt": false,
"sd_checkpoint_cache": 0,
"sd_checkpoint_hash": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
"sd_checkpoint_hash": "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
"sd_hypernetwork_strength": 1.0,
"sd_hypernetwork": "None",
"sd_lora": "",
@ -197,16 +195,13 @@
"training_tensorboard_flush_every": 120,
"training_tensorboard_save_images": false,
"training_write_csv_every": 0.0,
"training_xattention_optimizations": false,
"ui_extra_networks_tab_reorder": "",
"ui_reorder": "sampler, dimensions, cfg, seed, checkboxes, hires_fix, batch, scripts",
"unload_models_when_training": false,
"upcast_attn": false,
"upscaler_for_img2img": "SwinIR_4x",
"upscaling_max_images_in_cache": 5,
"use_old_emphasis_implementation": false,
"use_old_hires_fix_width_height": false,
"use_old_karras_scheduler_sigmas": false,
"use_original_name_batch": true,
"use_save_to_dirs_for_ui": false,
"use_upscaler_name_as_suffix": true,
@ -231,5 +226,12 @@
"image_browser_use_thumbnail": false,
"image_browser_thumbnail_size": 200.0,
"disable_all_extensions": "none",
"openpose3d_use_online_version": false
"openpose3d_use_online_version": false,
"cross_attention": "xFormers for cross-attention layers",
"cross_attention_optimization": "Scaled-Dot-Product",
"cross_attention_options": [],
"disable_nan_check": false,
"opt_channelslast": false,
"cudnn_benchmark": false,
"image_browser_debug_level": "0 - none"
}

View File

@ -40,7 +40,7 @@ class LDSR:
model = model.to(shared.device)
if half_attention:
model = model.half()
if shared.cmd_opts.opt_channelslast:
if shared.opts.opt_channelslast:
model = model.to(memory_format=torch.channels_last)
sd_hijack.model_hijack.hijack(model) # apply optimization

@ -1 +1 @@
Subproject commit 8f18058bf6d061f64047c1babab57e2293d3d1e3
Subproject commit 5d3465c4b2d5dacfdd6caa38b71b23b4f88401c0

@ -1 +1 @@
Subproject commit 687bfc2be7291a63d78c72c08c9f263b3cc4d47d
Subproject commit 8a7df4a911469637cab61cd7722fbaa6d6a2a77d

@ -1 +1 @@
Subproject commit 241c05f8c9d3c5abe637187e3c4bb46f17447029
Subproject commit e1885108055726638f0204aadf668064faf7eebc

@ -1 +1 @@
Subproject commit 59973b8dd532df46e62a014589b2574a5709deea
Subproject commit 972cc439225ae6993b8c43bf4b677c9f16e3ead1

@ -1 +1 @@
Subproject commit 4b9755bb946ddcce4b89021271504f032f59f9ec
Subproject commit 57040c311fc540bca9ca9d5ada2a9dd521f03f95

View File

@ -59,11 +59,7 @@ def check_run(command):
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
return setup.installed(package)
def repo_dir(name):
@ -84,27 +80,17 @@ def check_run_python(code):
def git_clone(url, dir, name, commithash=None):
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
setup.clone(url, dir, commithash)
def run_extension_installer(dir):
setup.run_extension_installer(dir)
if __name__ == "__main__":
setup.run_setup(False)
setup.set_environment()
# setup.check_torch()
print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
setup.log.info(f"Server Arguments: {sys.argv[1:]}")
import webui
if '--nowebui' in sys.argv:
webui.api_only()
else:
webui.webui()
webui.webui()

View File

@ -609,7 +609,7 @@ class Api:
def train_embedding(self, args: dict):
try:
shared.state.begin()
apply_optimizations = shared.opts.training_xattention_optimizations
apply_optimizations = False
error = None
filename = ''
if not apply_optimizations:
@ -631,7 +631,7 @@ class Api:
try:
shared.state.begin()
shared.loaded_hypernetworks = []
apply_optimizations = shared.opts.training_xattention_optimizations
apply_optimizations = False
error = None
filename = ''
if not apply_optimizations:

View File

@ -2,90 +2,59 @@ import argparse
import os
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description="Stable Diffusion", formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=55,indent_increment=2,width=200))
parser.add_argument("-f", action='store_true', help=argparse.SUPPRESS) # allows running as root; implemented outside of webui
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=32, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'models/embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'train/templates'), help="directory with textual inversion templates")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers", default=True)
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-sdp-attention", action='store_true', help="disable scaled dot product cross-attention layer optimization", default=False)
parser.add_argument("--opt-sdp-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization", default=True)
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization without memory efficient attention, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI", default=True)
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=True)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=True)
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)", default=True)
parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list (no spaces)", default=None)
parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None)
parser.add_argument("--ui-settings-file", type=str, help=argparse.SUPPRESS, default=os.path.join(data_path, 'config.json'))
parser.add_argument("--ui-config-file", type=str, help=argparse.SUPPRESS, default=os.path.join(data_path, 'ui-config.json'))
parser.add_argument("--config", type=str, default=sd_default_config, help=argparse.SUPPRESS)
parser.add_argument("--theme", type=str, help=argparse.SUPPRESS, default='dark')
parser.add_argument("--no-half", action='store_true', help="Do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="Do not switch the VAE model to 16-bit floats")
parser.add_argument("--precision", type=str, help="Evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--medvram", action='store_true', help="Enable model optimizations for sacrificing a little speed for low memory usage")
parser.add_argument("--lowvram", action='store_true', help="Enable model optimizations for sacrificing a lot of speed for lowest memory usage")
parser.add_argument("--lowram", action='store_true', help="Load checkpoint weights to VRAM instead of RAM")
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="Path to checkpoint of stable diffusion model to load immediately",)
parser.add_argument('--vae', type=str, help='Path to checkpoint of stable diffusion VAE model to load immediately', default=None)
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="Base path where all user data is stored")
# parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
# parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
# parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
# parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'models/embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
# parser.add_argument("--embeddings-templates-dir", type=str, default=os.path.join(script_path, 'train/templates'), help="directory with textual inversion templates")
# parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
# parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
# parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
# parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
# parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
# parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
# parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--allow-code", action='store_true', help="Allow custom script execution")
parser.add_argument("--share", action='store_true', help="Enable to make the UI accessible through Gradio site")
parser.add_argument("--enable-insecure", action='store_true', help="Enable extensions tab regardless of other options")
parser.add_argument("--use-cpu", nargs='+', help="Force use CPU for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="Launch web server using public IP address")
parser.add_argument("--port", type=int, help="Launch web server with given server port", default=None)
parser.add_argument("--hide-ui-dir-config", action='store_true', help="Hide directory configuration from UI", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="Disable editing settings", default=False)
parser.add_argument("--gradio-auth", type=str, help='Set Gradio authentication like "username:password,username:password""', default=None)
parser.add_argument("--gradio-auth-path", type=str, help='Set Gradio authentication using file', default=None)
parser.add_argument("--autolaunch", action='store_true', help="Open the UI URL in the system's default browser upon launch", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="Do not output progressbars to console", default=True)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="Disable checking models for malicious code", default=True)
parser.add_argument("--api-auth", type=str, help='Set API authentication', default=None)
parser.add_argument("--api-log", action='store_true', help="Enable logging of all API requests")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use", default=None)
parser.add_argument("--cors-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list", default=None)
parser.add_argument("--cors-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None)
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
parser.add_argument("--profile", action='store_true', help="run profiler")
# used by setup.py
parser.add_argument('--quick', default = False, action='store_true', help = "Skip installing if setup.log is newer than repo timestamp, default: %(default)s")
parser.add_argument('--upgrade', default = False, action='store_true', help = "Upgrade main repository to latest version, default: %(default)s")
parser.add_argument('--update', default = False, action='store_true', help = "Update all extensions and submodules, default: %(default)s")
parser.add_argument('--skip-extensions', default = False, action='store_true', help = "Skips running individual extension installers, default: %(default)s")
parser.add_argument("--no-hashing", action='store_true', help="Disable sha256 hashing of checkpoints", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="Disable download of default model even if no model is found", default=False)
parser.add_argument("--profile", action='store_true', help="Run profiler, default: %(default)s")

View File

@ -62,16 +62,19 @@ def torch_gc():
def enable_tf32():
if torch.cuda.is_available():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]):
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def enable_cudnn_benchmark():
from modules import shared
if shared.opts.cudnn_benchmark:
torch.backends.cudnn.benchmark = True
else:
torch.backends.cudnn.benchmark = False
errors.run(enable_tf32, "Enabling TF32")
@ -106,13 +109,10 @@ def randn_without_seed(shape):
def autocast(disable=False):
from modules import shared
if disable:
return contextlib.nullcontext()
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
return contextlib.nullcontext()
return torch.autocast("cuda")
@ -126,27 +126,19 @@ class NansException(Exception):
def test_for_nans(x, where):
from modules import shared
if shared.cmd_opts.disable_nan_check:
if shared.opts.disable_nan_check:
return
if not torch.all(torch.isnan(x)).item():
return
if where == "unet":
message = "A tensor with all NaNs was produced in Unet."
if not shared.cmd_opts.no_half:
message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
elif where == "vae":
message = "A tensor with all NaNs was produced in VAE."
if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
else:
message = "A tensor with all NaNs was produced."
message += " Use --disable-nan-check commandline argument to disable this check."
raise NansException(message)

View File

@ -223,7 +223,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
if re.search(regex, key):
theta_0.pop(key, None)
ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path
ckpt_dir = shared.opts.ckpt_dir or sd_models.model_path
filename = filename_generator() if custom_name == '' else custom_name
filename += ".inpainting" if result_is_inpainting_model else ""

View File

@ -468,7 +468,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
assert name, "Name cannot be empty!"
fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt")
fn = os.path.join(shared.opts.hypernetwork_dir, f"{name}.pt")
if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists"
@ -515,7 +515,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
shared.state.job_count = steps
hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
filename = os.path.join(shared.opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
unload = shared.opts.unload_models_when_training
@ -780,7 +780,7 @@ Last saved image: {html.escape(last_saved_image)}<br/>
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
filename = os.path.join(shared.opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
hypernetwork.optimizer_name = optimizer_name
if shared.opts.save_optimizer_state:
hypernetwork.optimizer_state_dict = optimizer.state_dict()

View File

@ -587,20 +587,6 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
image.already_saved_as = fullfn
oversize = image.width > opts.target_side_length or image.height > opts.target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024):
ratio = image.width / image.height
if oversize and ratio > 1:
image = image.resize((round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)), LANCZOS)
elif oversize:
image = image.resize((round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)), LANCZOS)
try:
_atomically_save_image(image, fullfn_without_extension, ".jpg")
except Exception as e:
errors.display(e, "saving image as downscaled JPG")
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"
with open(txt_fullfn, "w", encoding="utf8") as file:

View File

@ -153,9 +153,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
p.scripts = modules.scripts.scripts_txt2img
p.script_args = args
if shared.cmd_opts.enable_console_prompts:
print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
if mask:
p.extra_generation_params["Mask blur"] = mask_blur
@ -175,8 +172,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
shared.total_tqdm.clear()
generation_info_js = processed.js()
if opts.samples_log_stdout:
print(generation_info_js)
if opts.do_not_show_images:
processed.images = []

View File

@ -1,5 +1,6 @@
import sys
from modules import shared
# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
if "--xformers" not in "".join(sys.argv):
if shared.opts.cross_attention_optimization != "xFormers":
sys.modules["xformers"] = None

View File

@ -108,9 +108,9 @@ class InterrogateModels:
import clip
if self.running_on_cpu:
model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path)
model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.opts.clip_models_path)
else:
model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path)
model, preprocess = clip.load(clip_model_name, download_root=shared.opts.clip_models_path)
model.eval()
model = model.to(devices.device_interrogate)

@ -1 +1 @@
Subproject commit b5c60d7d62d6bb4a174ac09327dc517fc4446523
Subproject commit 5050971ac687dca70ba0486a583d283e8ae324e2

@ -1 +1 @@
Subproject commit ec1b758ab416e9d2f3590d5dd961b9ec27a0dc60
Subproject commit 66a7fafad3cbb14378fe548fca0153709cc59968

View File

@ -1,26 +0,0 @@
from pyngrok import ngrok, conf, exception
def connect(token, port, region):
account = None
if token is None:
token = 'None'
else:
if ':' in token:
# token = authtoken:username:password
account = token.split(':')[1] + ':' + token.split(':')[-1]
token = token.split(':')[0]
config = conf.PyngrokConfig(
auth_token=token, region=region
)
try:
if account is None:
public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url
else:
public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True, auth=account).public_url
except exception.PyngrokNgrokError:
print(f'Invalid ngrok authtoken, ngrok connection aborted.\n'
f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
else:
print(f'ngrok connected to localhost:{port}! URL: {public_url}\n'
'You can use this link after the launch is complete.')

View File

@ -565,7 +565,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
if os.path.exists(opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()
if p.scripts is not None:

View File

@ -46,7 +46,7 @@ class UpscalerRealESRGAN(Upscaler):
scale=info.scale,
model_path=info.local_data_path,
model=info.model(),
half=not cmd_opts.no_half and not cmd_opts.upcast_sampling,
half=not cmd_opts.no_half and not opts.upcast_sampling,
tile=opts.ESRGAN_tile,
tile_pad=opts.ESRGAN_tile_overlap,
)

View File

@ -5,7 +5,8 @@ from types import MethodType
import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules.shared import cmd_opts, opts
from modules.shared_items import list_crossattention
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
from rich import print
@ -38,37 +39,40 @@ def apply_optimizations():
optimization_method = None
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) and not cmd_opts.disable_sdp_attention # not everyone has torch 2.x to use sdp
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention"))
if cmd_opts.opt_sdp_no_mem_attention and can_use_sdp:
print("Applying scaled dot product cross attention optimization (without memory efficient attention).")
if opts.cross_attention_optimization == "Disable cross-attention layer optimization":
print("Cross-attention optimization disabled")
optimization_method = 'none'
if can_use_sdp and opts.cross_attention_optimization == "Scaled-Dot-Product" and 'SDP disable memory attention' in opts.cross_attention_options:
print("Applying scaled dot product cross attention optimization (without memory efficient attention)")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_no_mem_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sdp_no_mem_attnblock_forward
optimization_method = 'sdp-no-mem'
elif cmd_opts.opt_sdp_attention and can_use_sdp:
print("Applying scaled dot product cross attention optimization.")
elif can_use_sdp and opts.cross_attention_optimization == "Scaled-Dot-Product":
print("Applying scaled dot product cross attention optimization")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sdp_attnblock_forward
optimization_method = 'sdp'
elif cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
if shared.xformers_available and opts.cross_attention_optimization == "xFormers":
print("Applying xformers cross attention optimization")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
optimization_method = 'xformers'
elif cmd_opts.opt_sub_quad_attention:
print("Applying sub-quadratic cross attention optimization.")
if opts.cross_attention_optimization == "Sub-quadratic":
print("Applying sub-quadratic cross attention optimization")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sub_quad_attnblock_forward
optimization_method = 'sub-quadratic'
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
if opts.cross_attention_optimization == "Split attention":
print("Applying split attention optimization")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
optimization_method = 'v1'
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not cmd_opts.opt_split_attention and not torch.cuda.is_available()):
print("Applying cross attention optimization (InvokeAI).")
if opts.cross_attention_optimization == "InvokeAI's":
print("Applying InvokeAI's cross attention optimization")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
optimization_method = 'invokeai'
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
if opts.cross_attention_optimization == "Doggettx's":
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
@ -149,7 +153,7 @@ class StableDiffusionModelHijack:
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
def __init__(self):
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
self.embedding_db.add_embedding_dir(opts.embeddings_dir)
def hijack(self, m):
if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:

View File

@ -205,10 +205,6 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
"""
if opts.use_old_emphasis_implementation:
import modules.sd_hijack_clip_old
return modules.sd_hijack_clip_old.forward_old(self, texts)
batch_chunks, token_count = self.process_texts(texts)
used_embeddings = {}

View File

@ -17,8 +17,6 @@ class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWit
self.id_pad = 0
def tokenize(self, texts):
assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
tokenized = [tokenizer.encode(text) for text in texts]
return tokenized

View File

@ -14,7 +14,7 @@ from modules.hypernetworks import hypernetwork
from .sub_quadratic_attention import efficient_dot_product_attention
if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
if shared.opts.cross_attention_optimization == "xFormers":
try:
import xformers.ops
shared.xformers_available = True
@ -258,7 +258,7 @@ def sub_quad_attention_forward(self, x, context=None, mask=None):
if shared.opts.upcast_attn:
q, k = q.float(), k.float()
x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
x = sub_quad_attention(q, k, v, q_chunk_size=shared.opts.sub_quad_q_chunk_size, kv_chunk_size=shared.opts.sub_quad_kv_chunk_size, chunk_threshold=shared.opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
x = x.to(dtype)
@ -307,7 +307,7 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
def get_xformers_flash_attention_op(q, k, v):
if not shared.cmd_opts.xformers_flash_attention:
if 'xFormers enable flash Attention' not in shared.opts.cross_attention_options:
return None
try:
@ -506,7 +506,7 @@ def sub_quad_attnblock_forward(self, x):
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
out = sub_quad_attention(q, k, v, q_chunk_size=shared.opts.sub_quad_q_chunk_size, kv_chunk_size=shared.opts.sub_quad_kv_chunk_size, chunk_threshold=shared.opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
out = rearrange(out, 'b (h w) c -> b c h w', h=h)
out = self.proj_out(out)
return x + out

View File

@ -33,8 +33,8 @@ class CheckpointInfo:
self.filename = filename
abspath = os.path.abspath(filename)
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
if shared.opts.ckpt_dir is not None and abspath.startswith(shared.opts.ckpt_dir):
name = abspath.replace(shared.opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
@ -115,7 +115,7 @@ def list_models():
else:
model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
if os.path.exists(cmd_ckpt):
checkpoint_info = CheckpointInfo(cmd_ckpt)
@ -171,8 +171,8 @@ def select_checkpoint():
if shared.cmd_opts.ckpt is not None:
print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
if shared.opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.opts.ckpt_dir)}", file=sys.stderr)
print("Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
@ -293,7 +293,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
# cache newly loaded model
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
if shared.opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
timer.record("channels")
@ -305,7 +305,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
# with --upcast-sampling, don't convert the depth model weights to float16
if shared.cmd_opts.upcast_sampling and depth_model:
if shared.opts.upcast_sampling and depth_model:
model.depth_model = None
model.half()
@ -316,7 +316,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
devices.dtype_unet = model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
devices.unet_needs_upcast = shared.opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
@ -388,7 +388,7 @@ def repair_config(sd_config):
if shared.cmd_opts.no_half:
sd_config.model.params.unet_config.params.use_fp16 = False
elif shared.cmd_opts.upcast_sampling:
elif shared.opts.upcast_sampling:
sd_config.model.params.unet_config.params.use_fp16 = True
if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
@ -416,8 +416,10 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
current_checkpoint_info = shared.sd_model.sd_checkpoint_info
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
devices.enable_cudnn_benchmark()
gc.collect()
devices.torch_gc()
if already_loaded_state_dict is not None:
state_dict = already_loaded_state_dict

View File

@ -271,7 +271,7 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigma_min, sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
else:
@ -283,10 +283,6 @@ class KDiffusionSampler:
return sigmas
def create_noise_sampler(self, x, sigmas, p):
"""For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
if shared.opts.no_dpmpp_sde_batch_determinism:
return None
from k_diffusion.sampling import BrownianTreeNoiseSampler
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]

View File

@ -8,7 +8,7 @@ import glob
from copy import deepcopy
from rich import print
vae_path = os.path.abspath(os.path.join(paths.models_path, "VAE"))
vae_path = shared.opts.vae_dir
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
vae_dict = {}
@ -64,18 +64,18 @@ def refresh_vae_list():
os.path.join(vae_path, '**/*.safetensors'),
]
if shared.cmd_opts.ckpt_dir is not None and os.path.isdir(shared.cmd_opts.ckpt_dir):
if shared.opts.ckpt_dir is not None and os.path.isdir(shared.opts.ckpt_dir):
paths += [
os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.ckpt'),
os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.pt'),
os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.safetensors'),
os.path.join(shared.opts.ckpt_dir, '**/*.vae.ckpt'),
os.path.join(shared.opts.ckpt_dir, '**/*.vae.pt'),
os.path.join(shared.opts.ckpt_dir, '**/*.vae.safetensors'),
]
if shared.cmd_opts.vae_dir is not None and os.path.isdir(shared.cmd_opts.vae_dir):
if shared.opts.vae_dir is not None and os.path.isdir(shared.opts.vae_dir):
paths += [
os.path.join(shared.cmd_opts.vae_dir, '**/*.ckpt'),
os.path.join(shared.cmd_opts.vae_dir, '**/*.pt'),
os.path.join(shared.cmd_opts.vae_dir, '**/*.safetensors'),
os.path.join(shared.opts.vae_dir, '**/*.ckpt'),
os.path.join(shared.opts.vae_dir, '**/*.pt'),
os.path.join(shared.opts.vae_dir, '**/*.safetensors'),
]
candidates = []
@ -97,8 +97,8 @@ def find_vae_near_checkpoint(checkpoint_file):
def resolve_vae(checkpoint_file):
if shared.cmd_opts.vae_path is not None:
return shared.cmd_opts.vae_path, 'from commandline argument'
if shared.cmd_opts.vae is not None:
return shared.cmd_opts.vae, 'from commandline argument'
is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config

View File

@ -13,7 +13,7 @@ import modules.interrogate
import modules.memmon
import modules.styles
import modules.devices as devices
from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
from modules import script_loading, errors, ui_components, shared_items, cmd_args
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir
demo = None
@ -63,34 +63,22 @@ ui_reorder_categories = [
"scripts",
]
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
device = devices.device
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = {}
loaded_hypernetworks = []
sd_upscalers = []
sd_model = None
clip_model = None
def reload_hypernetworks():
from modules.hypernetworks import hypernetwork
global hypernetworks
hypernetworks = hypernetwork.list_hypernetworks(opts.hypernetwork_dir)
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
sd_upscalers = []
sd_model = None
clip_model = None
class State:
skipped = False
@ -190,8 +178,7 @@ class State:
state = State()
state.server_start = time.time()
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
prompt_styles = modules.styles.StyleDatabase('styles.csv')
interrogator = modules.interrogate.InterrogateModels("interrogate")
@ -235,98 +222,6 @@ tab_names = []
options_templates = {}
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
"save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
"export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"),
"img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
"target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
"img_max_size_mp": OptionInfo(200, "Maximum image size, in megapixels", gr.Number),
"use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
"use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
"temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
"clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"show_warnings": OptionInfo(False, "Show warnings in console."),
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
"print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
"save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
"training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
"training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
"training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
@ -344,25 +239,121 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
"cross_attention_optimization": OptionInfo("Scaled-Dot-Product", "Cross-attention optimization method", gr.Radio, lambda: {"choices": shared_items.list_crossattention() }),
"cross_attention_options": OptionInfo([], "Cross-attention advanced options", gr.CheckboxGroup, lambda: {"choices": ['xFormers enable flash Attention', 'SDP disable memory attention']}),
"disable_nan_check": OptionInfo(False, "Do not check if produced images/latent spaces have NaN values"),
"opt_channelslast": OptionInfo(False, "Use channels last as torch memory format "),
"cudnn_benchmark": OptionInfo(False, "Enable CUDA cuDNN benchmark feature"),
"sub_quad_q_chunk_size": OptionInfo(512, "Sub-quadratic cross-attention query chunk size for the layer optimization to use", gr.Slider, {"minimum": 16, "maximum": 8192, "step": 8}),
"sub_quad_kv_chunk_size": OptionInfo(512, "Sub-quadratic cross-attentionkv chunk size for the sub-quadratic cross-attention layer optimization to use", gr.Slider, {"minimum": 0, "maximum": 8192, "step": 8}),
"sub_quad_chunk_threshold": OptionInfo(80, "Sub-quadratic cross-attention percentage of VRAM chunking threshold", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}),
"always_batch_cond_uncond": OptionInfo(False, "Disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram"),
"upcast_sampling": OptionInfo(False, "Enable upcast sampling. Usually produces similar results to --no-half with better performance while using less memory"),
}))
options_templates.update(options_section(('compatibility', "Compatibility"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
"no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
options_templates.update(options_section(('system-paths', "System Paths"), {
"ckpt_dir": OptionInfo(os.path.join(models_path, 'Stable-diffusion'), "Path to directory with stable diffusion checkpoints"),
"vae_dir": OptionInfo(os.path.join(models_path, 'VAE'), "Path to directory with VAE files"),
"embeddings_dir": OptionInfo(os.path.join(models_path, 'embeddings'), "Embeddings directory for textual inversion"),
"embeddings_templates_dir": OptionInfo(os.path.join(script_path, 'train/templates'), "Embeddings train templates directory"),
"hypernetwork_dir": OptionInfo(os.path.join(models_path, 'hypernetworks'), "Hypernetwork directory"),
"codeformer_models_path": OptionInfo(os.path.join(models_path, 'Codeformer'), "Path to directory with codeformer model file(s)."),
"gfpgan_models_path": OptionInfo(os.path.join(models_path, 'GFPGAN'), "Path to directory with GFPGAN model file(s)"),
"esrgan_models_path": OptionInfo(os.path.join(models_path, 'ESRGAN'), "Path to directory with ESRGAN model file(s)"),
"bsrgan_models_path": OptionInfo(os.path.join(models_path, 'BSRGAN'), "Path to directory with BSRGAN model file(s)"),
"realesrgan_models_path": OptionInfo(os.path.join(models_path, 'RealESRGAN'), "Path to directory with RealESRGAN model file(s)"),
"clip_models_path": OptionInfo(os.path.join(models_path, 'CLIP'), "Path to directory with CLIP model file(s)"),
# "gfpgan_model": OptionInfo("", "GFPGAN model file name"),
}))
options_templates.update(options_section(('saving-images', "Image options"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('jpg', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
"grid_save": OptionInfo(True, "Always save all generated image grids"),
"grid_format": OptionInfo('jpg', 'File format for grids'),
"grid_extended_filename": OptionInfo(True, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(True, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(True, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(True, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(True, "Save a copy of image before applying color correction to img2img results"),
"save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
"save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
"jpeg_quality": OptionInfo(85, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
"img_max_size_mp": OptionInfo(200, "Maximum image size, in megapixels", gr.Number),
"use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
"use_upscaler_name_as_suffix": OptionInfo(True, "Use upscaler name as filename suffix in the extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
"temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
"clean_temp_dir_at_start": OptionInfo(True, "Cleanup non-default temporary directory when starting webui"),
}))
options_templates.update(options_section(('saving-paths', "Image Paths"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/text", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/image", 'Output directory for img2img images', component_args=hide_dirs),
"outdir_extras_samples": OptionInfo("outputs/extras", 'Output directory for images from extras tab', component_args=hide_dirs),
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
"outdir_txt2img_grids": OptionInfo("outputs/grids", 'Output directory for txt2img grids', component_args=hide_dirs),
"outdir_img2img_grids": OptionInfo("outputs/grids", 'Output directory for img2img grids', component_args=hide_dirs),
"outdir_save": OptionInfo("outputs/save", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo("SwinIR_4x", "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
"use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.2, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(2, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"multiple_tqdm": OptionInfo(False, "Add a second progress bar to the console that shows progress for an entire job."),
"print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(True, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
"save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(0, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
"training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
"training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_return_ranks": OptionInfo(True, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"interrogate_clip_min_length": OptionInfo(32, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(192, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(2048, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_clip_skip_categories": OptionInfo(["artists", "movements", "flavors"], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types),
"interrogate_deepbooru_score_threshold": OptionInfo(0.65, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(False, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
"deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"),
"deepbooru_filter_tags": OptionInfo("", "filter out those tags from deepbooru output (separated by comma)"),
@ -390,30 +381,27 @@ options_templates.update(options_section(('ui', "User interface"), {
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
"samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"),
"dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"),
"show_progress_in_title": OptionInfo(False, "Show generation progress in window title."),
"keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"),
"hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}),
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
"localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
options_templates.update(options_section(('ui', "Live previews"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}),
"live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
"show_progress_every_n_steps": OptionInfo(-1, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
"show_progress_type": OptionInfo("Full", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}),
"live_preview_content": OptionInfo("Combined", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
"live_preview_refresh_period": OptionInfo(1000, "Progressbar/preview update period, in milliseconds")
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}),
"hide_samplers": OptionInfo(["Euler", "LMS", "Heun", "DPM2", "DPM2 a", "DPM++ 2M", "DPM fast", "DPM adaptive", "DPM++ 2S a Karras", "DPM++ 2S a", "DPM++ SDE Karras", "DPM2 a Karras", "LMS Karras"], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}),
"eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
@ -589,6 +577,16 @@ class Options:
opts = Options()
batch_cond_uncond = opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
os.makedirs(opts.hypernetwork_dir, exist_ok=True)
hypernetworks = {}
loaded_hypernetworks = []
if os.path.exists(config_filename):
opts.load(config_filename)

View File

@ -21,3 +21,17 @@ def refresh_vae_list():
import modules.sd_vae
modules.sd_vae.refresh_vae_list()
def list_crossattention():
return [
"Disable cross-attention layer optimization",
"xFormers",
"Scaled-Dot-Product",
"Doggettx's",
"InvokeAI's",
"Sub-quadratic",
"Split attention"
]
# parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
# parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
# parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)

View File

@ -29,7 +29,7 @@ textual_inversion_templates = {}
def list_textual_inversion_templates():
textual_inversion_templates.clear()
for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
for root, dirs, fns in os.walk(shared.opts.embeddings_templates_dir):
for fn in fns:
path = os.path.join(root, fn)
@ -270,7 +270,7 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
fn = os.path.join(shared.opts.embeddings_dir, f"{name}.pt")
if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists"
@ -365,7 +365,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
filename = os.path.join(shared.opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, embedding_name)
unload = shared.opts.unload_models_when_training
@ -623,7 +623,7 @@ Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
filename = os.path.join(shared.opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
shared.exception()

View File

@ -25,7 +25,7 @@ def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
apply_optimizations = shared.opts.training_xattention_optimizations
apply_optimizations = False
try:
if not apply_optimizations:
sd_hijack.undo_optimizations()

View File

@ -46,9 +46,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
p.scripts = modules.scripts.scripts_txt2img
p.script_args = args
if cmd_opts.enable_console_prompts:
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
processed = modules.scripts.scripts_txt2img.run(p, *args)
if processed is None:
@ -59,8 +56,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
shared.total_tqdm.clear()
generation_info_js = processed.js()
if opts.samples_log_stdout:
print(generation_info_js)
if opts.do_not_show_images:
processed.images = []

View File

@ -42,7 +42,7 @@ import modules.hypernetworks.ui
from modules.generation_parameters_copypaste import image_from_url_text
import modules.extras
warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
@ -53,15 +53,6 @@ if not cmd_opts.share and not cmd_opts.listen:
gradio.utils.version_check = lambda: None
gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
if cmd_opts.ngrok is not None:
import modules.ngrok as ngrok
print('ngrok authtoken detected, trying to connect...')
ngrok.connect(
cmd_opts.ngrok,
cmd_opts.port if cmd_opts.port is not None else 7860,
cmd_opts.ngrok_region
)
def gr_show(visible=True):
return {"visible": visible, "__type__": "update"}
@ -108,7 +99,7 @@ def add_style(name: str, prompt: str, negative_prompt: str):
shared.prompt_styles.styles[style.name] = style
# Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
shared.prompt_styles.save_styles('styles.csv')
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)]
@ -383,14 +374,9 @@ def create_output_panel(tabname, outdir):
def create_sampler_and_steps_selection(choices, tabname):
if opts.samplers_in_dropdown:
with FormRow(elem_id=f"sampler_selection_{tabname}"):
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{tabname}"):
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
with FormRow(elem_id=f"sampler_selection_{tabname}"):
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
return steps, sampler_index
@ -461,11 +447,10 @@ def create_ui():
with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
if opts.dimensions_and_batch_together:
with gr.Column(elem_id="txt2img_column_batch"):
with FormRow(elem_id="txt2img_row_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
with gr.Column(elem_id="txt2img_column_batch"):
with FormRow(elem_id="txt2img_row_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
elif category == "cfg":
with FormRow():
@ -495,12 +480,6 @@ def create_ui():
hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
elif category == "batch":
if not opts.dimensions_and_batch_together:
with FormRow(elem_id="txt2img_column_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
elif category == "override_settings":
with FormRow(elem_id="txt2img_override_settings_row") as row:
override_settings = create_override_settings_dropdown('txt2img', row)
@ -766,12 +745,6 @@ def create_ui():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
elif category == "batch":
if not opts.dimensions_and_batch_together:
with FormRow(elem_id="img2img_column_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
elif category == "override_settings":
with FormRow(elem_id="img2img_override_settings_row") as row:
override_settings = create_override_settings_dropdown('img2img', row)
@ -1426,8 +1399,6 @@ def create_ui():
with gr.Row():
with gr.Column(scale=6):
settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
with gr.Column():
restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio")
result = gr.HTML(elem_id="settings_result")
@ -1474,7 +1445,6 @@ def create_ui():
with gr.TabItem("Actions"):
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
with gr.Row():
unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model")
@ -1511,13 +1481,6 @@ def create_ui():
_js='function(){}'
)
download_localization.click(
fn=lambda: None,
inputs=[],
outputs=[],
_js='download_localization'
)
def reload_scripts():
modules.scripts.reload_script_body_only()
reload_javascript() # need to refresh the html page
@ -1528,16 +1491,6 @@ def create_ui():
outputs=[]
)
def request_restart():
shared.state.interrupt()
shared.state.need_restart = True
restart_gradio.click(
fn=request_restart,
_js='restart_reload',
inputs=[],
outputs=[],
)
interfaces = [
(txt2img_interface, "From Text", "txt2img"),

View File

@ -27,5 +27,5 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
}
def allowed_directories_for_previews(self):
return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
return [v for v in [shared.opts.ckpt_dir, sd_models.model_path] if v is not None]

View File

@ -26,5 +26,5 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
}
def allowed_directories_for_previews(self):
return [shared.cmd_opts.hypernetwork_dir]
return [shared.opts.hypernetwork_dir]

View File

@ -8,7 +8,6 @@ basicsr
bitsandbytes
blendmodes
clean-fid
colormap
easydev
extcolors
facexlib
@ -31,7 +30,6 @@ opencv-python
piexif
Pillow
psutil
pyngrok
pyyaml
requests
resize-right

183
setup.py
View File

@ -4,37 +4,73 @@ import json
import time
import subprocess
import argparse
from modules.cmd_args import parser
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(message)s', filename='setup.log', filemode='w')
log = logging.getLogger("sd-setup")
try:
from rich.logging import RichHandler
from rich.console import Console
from rich.pretty import install as pretty_install
from rich.traceback import install as traceback_install
rh = RichHandler(show_time=True, omit_repeated_times=False, show_level=True, show_path=True, markup=True, rich_tracebacks=True, log_time_format='%H:%M:%S-%f')
rh.setLevel(logging.INFO)
log.addHandler(rh)
# logging.basicConfig(level=logging.INFO, format='%(message)s', handlers=[RichHandler(show_time=True, omit_repeated_times=False, show_level=True, show_path=True, markup=True, rich_tracebacks=True, log_time_format='%H:%M:%S-%f')])
console = Console(log_time=True, log_time_format='%H:%M:%S-%f')
pretty_install(console=console)
traceback_install(console=console, extra_lines=1, width=console.width, word_wrap=False, indent_guides=False, suppress=[])
except:
pass
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
log.addHandler(sh)
parser = argparse.ArgumentParser(description = 'Setup for SD WebUI')
# command line args
# parser = argparse.ArgumentParser(description = 'Setup for SD WebUI')
parser.add_argument('--debug', default = False, action='store_true', help = "Run installer with debug logging, default: %(default)s")
parser.add_argument('--quick', default = False, action='store_true', help = "Skip installing if setup.log is newer than repo timestamp, default: %(default)s")
parser.add_argument('--upgrade', default = False, action='store_true', help = "Upgrade main repository to latest version, default: %(default)s")
parser.add_argument('--update', default = False, action='store_true', help = "Update all extensions and submodules, default: %(default)s")
parser.add_argument('--noupdate', default = False, action='store_true', help = "Skip update extensions and submodules, default: %(default)s")
parser.add_argument('--skip-extensions', default = False, action='store_true', help = "Skips running individual extension installers, default: %(default)s")
args = parser.parse_args()
# setup console and file logging
if os.path.isfile('setup.log'):
os.remove('setup.log')
time.sleep(0.1) # prevent race condition
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', encoding='utf-8', force=True)
log = logging.getLogger("sd")
print=print
try: # we may not have rich on the first run
from rich import print
from rich.logging import RichHandler
from rich.console import Console
from rich.pretty import install as pretty_install
from rich.traceback import install as traceback_install
console = Console(log_time=True, log_time_format='%H:%M:%S-%f')
pretty_install(console=console)
traceback_install(console=console, extra_lines=1, width=console.width, word_wrap=False, indent_guides=False, suppress=[])
rh = RichHandler(show_time=True, omit_repeated_times=False, show_level=True, show_path=False, markup=False, rich_tracebacks=True, log_time_format='%H:%M:%S-%f', level=logging.DEBUG if args.debug else logging.INFO, console=console)
log.addHandler(rh)
except:
pass
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG if args.debug else logging.INFO)
log.addHandler(sh)
def installed(package):
import pkg_resources
ok = True
try:
pkgs = [p for p in package.split() if not p.startswith('-') and not p.startswith('git+') and not p.startswith('http') and not p.startswith('=')]
for pkg in pkgs:
p = pkg.split('==')
spec = pkg_resources.working_set.by_key.get(p[0], None) # more reliable than importlib
if spec is None:
spec = pkg_resources.working_set.by_key.get(p[0].lower(), None) # check name variations
if spec is None:
spec = pkg_resources.working_set.by_key.get(p[0].replace('_', '-'), None) # check name variations
ok = ok and spec is not None
version = pkg_resources.get_distribution(p[0]).version
if ok and len(p) > 1:
ok = ok and version == p[1]
if not ok:
log.warning(f"Package wrong version found: {p[0]} {version} required {p[1]}")
# if ok:
# log.debug(f"Package already installed: {p[0]} {version}")
# else:
# log.debug(f"Package not installed: {p[0]} {version}")
return ok
except ModuleNotFoundError:
log.debug(f"Package not installed: {pkgs}")
return False
# install package using pip if not already installed
def install(package):
def pip(args: str):
log.debug(f"Running pip: {args}")
@ -47,37 +83,11 @@ def install(package):
log.debug(f'Pip output: {txt}')
return txt
def installed():
import pkg_resources
ok = True
try:
pkgs = [p for p in package.split() if not p.startswith('-') and not p.startswith('git+') and not p.startswith('http') and not p.startswith('=')]
for pkg in pkgs:
p = pkg.split('==')
spec = pkg_resources.working_set.by_key.get(p[0], None)
if spec is None:
spec = pkg_resources.working_set.by_key.get(p[0].lower(), None)
if spec is None:
spec = pkg_resources.working_set.by_key.get(p[0].replace('_', '-'), None)
ok = ok and spec is not None
version = pkg_resources.get_distribution(p[0]).version
if ok and len(p) > 1:
ok = ok and version == p[1]
if not ok:
log.warning(f"Package wrong version found: {p[0]} {version} required {p[1]}")
# if ok:
# log.debug(f"Package already installed: {p[0]} {version}")
# else:
# log.debug(f"Package not installed: {p[0]} {version}")
return ok
except ModuleNotFoundError:
log.debug(f"Package not installed: {pkgs}")
return False
if not installed():
if not installed(package):
pip(f"install --upgrade {package}")
# execute git command
def git(args: str):
# log.debug(f"Running git: {args}")
git_cmd = os.environ.get('GIT', "git")
@ -91,6 +101,7 @@ def git(args: str):
return txt
# update switch to main branch as head can get detached and update repository
def update(dir):
branch = git(f'-C "{dir}" branch')
if 'main' in branch:
@ -102,10 +113,11 @@ def update(dir):
else:
log.warning(f'Unknown branch for: {dir}')
git(f'-C "{dir}" pull --rebase --autostash')
branch = git(f'-C "{dir}" branch')
# clone git repository
def clone(url, dir, commithash=None):
if os.path.exists(dir):
if commithash is None:
return
@ -120,12 +132,7 @@ def clone(url, dir, commithash=None):
git(f'-C "{dir}" checkout {commithash}')
def parse_env():
import shlex
args = os.environ.get('COMMANDLINE_ARGS', "")
sys.argv += shlex.split(args)
# check python version
def check_python():
import platform
supported_minors = [10] if platform.system() != "Windows" else [9, 10, 11]
@ -134,6 +141,7 @@ def check_python():
raise RuntimeError(f"Incompatible Python version: {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro} required 3.9-3.11")
# check torch version
def check_torch():
install(f'torch torchaudio torchvision --extra-index-url https://download.pytorch.org/whl/cu118')
try:
@ -153,6 +161,7 @@ def check_torch():
pass
# install required packages
def install_packages():
log.info('Installing packages')
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
@ -165,6 +174,7 @@ def install_packages():
install(f'--no-deps {xformers_package}')
# clone required repositories
def install_repositories():
def dir(name):
return os.path.join(os.path.dirname(__file__), 'repositories', name)
@ -188,6 +198,26 @@ def install_repositories():
clone(blip_repo, dir('BLIP'), blip_commit)
# run extension installer
def run_extension_installer(extension_dir):
path_installer = os.path.join(extension_dir, "install.py")
if not os.path.isfile(path_installer):
return
try:
log.debug(f"Running extension installer: {path_installer}")
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
result = subprocess.run(f'"{sys.executable}" "{path_installer}"', shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
txt = result.stdout.decode(encoding="utf8", errors="ignore")
if len(result.stderr) > 0:
txt = txt + '\n' + result.stderr.decode(encoding="utf8", errors="ignore")
log.error(f'Error running extension installer: {txt}')
except Exception as e:
log.error(f'Exception running extension installer: {e}')
# run installer for each installed and enabled extension and optionally update them
def install_extensions():
def list_extensions(dir):
settings = {}
@ -203,29 +233,11 @@ def install_extensions():
log.debug(f'Disabled extensions: {disabled_extensions}')
return [x for x in os.listdir(dir) if x not in disabled_extensions and not x.startswith('.')]
def run_extension_installer(extension_dir):
path_installer = os.path.join(extension_dir, "install.py")
if not os.path.isfile(path_installer):
return
try:
log.debug(f"Running extension installer: {path_installer}")
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
result = subprocess.run(f'"{sys.executable}" "{path_installer}"', shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
txt = result.stdout.decode(encoding="utf8", errors="ignore")
if len(result.stderr) > 0:
txt = txt + '\n' + result.stderr.decode(encoding="utf8", errors="ignore")
log.error(f'Error running extension installer: {txt}')
except Exception as e:
log.error(f'Exception running extension installer: {e}')
extensions_builtin_dir = os.path.join(os.path.dirname(__file__), 'extensions-builtin')
extensions = list_extensions(extensions_builtin_dir)
log.info(f'Built-in extensions: {extensions}')
for ext in extensions:
if args.update:
if not args.noupdate:
update(os.path.join(extensions_builtin_dir, ext))
if not args.skip_extensions:
run_extension_installer(os.path.join(extensions_builtin_dir, ext))
@ -234,16 +246,17 @@ def install_extensions():
extensions = list_extensions(extensions_dir)
log.info(f'Enabled extensions: {extensions}')
for ext in extensions:
if args.update:
if not args.noupdate:
update(os.path.join(extensions_dir, ext))
if not args.skip_extensions:
run_extension_installer(os.path.join(extensions_dir, ext))
# initialize and optionally update submodules
def install_submodules():
log.info('Installing submodules')
git(f'submodule --quiet update --init --recursive')
if args.update:
if not args.noupdate:
log.info('Updating submodules')
submodules = git('submodule').splitlines()
for submodule in submodules:
@ -251,6 +264,7 @@ def install_submodules():
update(name)
# install requirements
def install_requirements():
log.info('Installing requirements')
f = open('requirements.txt', 'r')
@ -259,6 +273,7 @@ def install_requirements():
install(line)
# set environment variables controling the behavior of various libraries
def set_environment():
log.info('Setting environment tuning')
os.environ.setdefault('TF_CPP_MIN_LOG_LEVEL', '2')
@ -273,8 +288,10 @@ def set_environment():
os.environ.setdefault('CUDA_DEVICE_DEFAULT_PERSISTING_L2_CACHE_PERCENTAGE_LIMIT', '0')
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
os.environ.setdefault('SAFETENSORS_FAST_GPU', '1')
os.environ.setdefault('NUMEXPR_MAX_THREADS', '16')
# check version of the main repo and optionally upgrade it
def check_version():
ver = git('log -1 --pretty=format:"%h %ad"')
log.info(f'Version: {ver}')
@ -289,12 +306,13 @@ def check_version():
log.info(f'Updated to version: {ver}')
else:
log.warning(f'Latest available version: {commits["commit"]["commit"]["author"]["date"]}')
if args.update:
if not args.noupdate:
log.info('Updating Wiki')
update(os.path.join(os.path.dirname(__file__), "wiki"))
update(os.path.join(os.path.dirname(__file__), "wiki", "origin-wiki"))
# check if we can run setup in quick mode
def check_timestamp():
if not os.path.isfile('setup.log'):
return False
@ -304,13 +322,15 @@ def check_timestamp():
log.debug(f'Repository update time: {time.ctime(int(version_time))}')
return setup_time >= version_time
# entry method when used as module
def run_setup(quick = False):
parse_env()
check_python()
if (quick or args.quick) and check_timestamp():
log.info('Attempting quick setup')
return
log.info("Running setup")
log.debug(f"Args: {vars(args)}")
install_requirements()
check_version()
install_packages()
@ -319,6 +339,7 @@ def run_setup(quick = False):
install_extensions()
install_requirements()
if __name__ == "__main__":
run_setup()
set_environment()

View File

@ -1,15 +1,11 @@
import os
import sys
import time
import importlib
import signal
import re
import warnings
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from packaging import version
from rich import print
from setup import log
import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
@ -42,7 +38,6 @@ import modules.codeformer_model as codeformer
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
import modules.lowvram
import modules.scripts
import modules.sd_hijack
@ -52,15 +47,13 @@ import modules.txt2img
import modules.script_callbacks
import modules.textual_inversion.textual_inversion
import modules.progress
import modules.ui
from modules import modelloader
from modules.shared import cmd_opts
from modules.shared import cmd_opts, opts
import modules.hypernetworks.hypernetwork
startup_timer.record("libraries")
if cmd_opts.server_name:
server_name = cmd_opts.server_name
else:
@ -72,28 +65,23 @@ def initialize():
if torch.version.cuda: cuda_version = f'CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version()}'
elif torch.version.hip: cuda_version = f'HIP {torch.version.hip}'
else: cuda_version = ''
print(f'Torch {getattr(torch, "__long_version__", torch.__version__)} {cuda_version}')
log.info(f'Torch {getattr(torch, "__long_version__", torch.__version__)} {cuda_version}')
for device in [torch.cuda.device(i) for i in range(torch.cuda.device_count())]:
print(f'GPU {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}')
log.info(f'GPU {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}')
else:
print(f'Torch {getattr(torch, "__long_version__", torch.__version__)} running on CPU')
log.info(f'Torch {getattr(torch, "__long_version__", torch.__version__)} running on CPU')
extensions.list_extensions()
startup_timer.record("extensions")
if cmd_opts.ui_debug_mode:
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
modules.scripts.load_scripts()
return
modelloader.cleanup_models()
modules.sd_models.setup_model()
startup_timer.record("models")
codeformer.setup_model(cmd_opts.codeformer_models_path)
codeformer.setup_model(opts.codeformer_models_path)
startup_timer.record("codeformer")
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
gfpgan.setup_model(opts.gfpgan_models_path)
startup_timer.record("gfpgan")
modelloader.list_builtin_upscalers()
@ -123,28 +111,26 @@ def initialize():
ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion())
ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks())
ui_extra_networks.register_page(ui_extra_networks_checkpoints.ExtraNetworksPageCheckpoints())
extra_networks.initialize()
extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet())
startup_timer.record("extra networks")
if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None:
try:
if not os.path.exists(cmd_opts.tls_keyfile):
print("Invalid path to TLS keyfile given")
log.warning("Invalid path to TLS keyfile given")
if not os.path.exists(cmd_opts.tls_certfile):
print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
log.warning(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
except TypeError:
cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
print("TLS setup invalid, running webui without TLS")
log.warning("TLS setup invalid, running webui without TLS")
else:
print("Running with TLS")
log.info("Running with TLS")
startup_timer.record("TLS")
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(_sig, _frame):
print('Exiting')
log.info('Exiting')
os._exit(0)
signal.signal(signal.SIGINT, sigint_handler)
@ -157,10 +143,10 @@ def load_model():
modules.sd_models.load_model()
except Exception as e:
errors.display(e, "loading stable diffusion model")
print("Stable diffusion model failed to load, exiting", file=sys.stderr)
log.error(f"Stable diffusion model failed to load")
exit(1)
if shared.sd_model is None:
print("No stable diffusion model loaded, exiting", file=sys.stderr)
log.error("No stable diffusion model loaded")
exit(1)
shared.opts.data["sd_model_checkpoint"] = shared.sd_model.sd_checkpoint_info.title
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
@ -171,12 +157,12 @@ def load_model():
def setup_middleware(app):
app.middleware_stack = None # reset current middleware to allow modifying user provided list
app.add_middleware(GZipMiddleware, minimum_size=1024)
if cmd_opts.cors_allow_origins and cmd_opts.cors_allow_origins_regex:
app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_allow_origins:
app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_allow_origins_regex:
app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
if cmd_opts.cors_origins and cmd_opts.cors_regex:
app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_origins.split(','), allow_origin_regex=cmd_opts.cors_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_origins:
app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_origins.split(','), allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_regex:
app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
app.build_middleware_stack() # rebuild middleware stack on-the-fly
@ -186,36 +172,16 @@ def create_api(app):
return api
def api_only():
initialize()
load_model()
app = FastAPI()
setup_middleware(app)
api = create_api(app)
modules.script_callbacks.app_started_callback(None, app)
print(f"Startup time: {startup_timer.summary()}")
api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861)
def webui():
launch_api = cmd_opts.api
initialize()
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()
startup_timer.record("cleanup")
modules.script_callbacks.before_ui_callback()
startup_timer.record("scripts before_ui_callback")
shared.demo = modules.ui.create_ui()
startup_timer.record("ui")
if not cmd_opts.no_gradio_queue:
shared.demo.queue(16)
shared.demo.queue(16)
gradio_auth_creds = []
if cmd_opts.gradio_auth:
@ -231,7 +197,7 @@ def webui():
server_port=cmd_opts.port,
ssl_keyfile=cmd_opts.tls_keyfile,
ssl_certfile=cmd_opts.tls_certfile,
debug=cmd_opts.gradio_debug,
debug=False,
auth=[tuple(cred.split(':')) for cred in gradio_auth_creds] if gradio_auth_creds else None,
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True,
@ -239,24 +205,19 @@ def webui():
)
# for dep in shared.demo.dependencies:
# dep['show_progress'] = False # disable gradio css animation on component update
# app is instance of FastAPI server
# shared.demo.server is instance of gradio class which inherits from uvicorn.Server
# shared.demo.config is instance of uvicorn.Config
# shared.demo.app is instance of ASGIApp
cmd_opts.autolaunch = False
startup_timer.record("gradio")
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
setup_middleware(app)
modules.progress.setup_progress_api(app)
if launch_api:
create_api(app)
create_api(app)
ui_extra_networks.add_pages_to_demo(app)
@ -265,14 +226,11 @@ def webui():
load_model()
print(f"Startup time: {startup_timer.summary()}")
log.info(f"Startup time: {startup_timer.summary()}")
while True:
time.sleep(0.1)
if __name__ == "__main__":
if cmd_opts.nowebui:
api_only()
else:
webui()
webui()