jump patch

pull/316/head
Vladimir Mandic 2023-04-20 11:20:27 -04:00
parent e6701f5cb6
commit 0e7144186d
22 changed files with 142 additions and 116 deletions

5
.gitignore vendored
View File

@ -39,10 +39,5 @@ venv
# unexcluded so folders get created
!/repositories/.placeholder
!/extensions/.placeholder
!/outputs/.placeholder
!/models/.placeholder
!/models/embeddings/.placeholder
!/models/hypernetworks/.placeholder
!/models/VAE-approx
!/models/VAE-approx/model.pt

View File

@ -10,6 +10,8 @@ Stuff to be fixed...
Stuff to be added...
- Add README headers
- Add Gradio base themes: <https://gradio.app/theming-guide/#using-the-theme-builder>
- Create new GitHub hooks/actions for CI/CD
- Move Restart Server from WebUI to Launch and reload modules
- Redo Extensions tab: see <https://vladmandic.github.io/sd-extension-manager/pages/extensions.html>
@ -54,3 +56,6 @@ Tech that can be integrated as part of the core workflow...
### Pending Code Updates
- full CUDA tuning section in UI Settings
- improve compatibility with some 3rd party extensions
- improve exif/pnginfo metadata parsing

@ -1 +1 @@
Subproject commit bb7d428ac7703d6703ebabbc1fcc9233ac958801
Subproject commit 94e3375e746ed8caab6d09799f380bed1f29ccd6

@ -1 +1 @@
Subproject commit 15c1938bd006638363f762c518a0f48247eff724
Subproject commit cc86ce8887f041e88e67d336044190fd0296fd74

@ -1 +1 @@
Subproject commit 937e8f07414d5de9f4a8570844731d643aeff576
Subproject commit 4d95eb9ea77023d00b43bffcb59baf2c56cc639c

View File

@ -102,6 +102,7 @@ svg.feather.feather-image, .feather .feather-image { display: none }
#txt2img_subseed_strength { margin-top: 0; }
#txt2img_tools, #img2img_tools { margin-top: 54px; scale: 120%; margin-left: 26px; }
#txtimg_hr_finalres { max-width: 200px; }
#pnginfo_html2_info { margin-top: -18px; background-color: var(--input-background-fill); padding: var(--input-padding) }
/* custom elements overrides */
#steps-animation, #controlnet { border-width: 0; }
@ -133,7 +134,7 @@ svg.feather.feather-image, .feather .feather-image { display: none }
--block_title_background_fill: None;
--block_title_border_color: None;
--block_title_border_width: None;
--block-title-text-color: var(--neutral-200);
--block-title-text-color: white;
--panel-background-fill: var(--background-fill-secondary);
--panel-border-color: var(--border-color-primary);
--panel_border_width: None;

View File

@ -3,23 +3,28 @@ import os
import sys
import shlex
import setup
import modules.paths_internal
from modules import cmd_args
from modules.paths_internal import script_path
try:
from rich import print # pylint: disable=redefined-builtin
except ImportError:
pass
### majority of this file is superflous, but used by some extensions as helpers during extension installation
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
sys.argv += shlex.split(commandline_args)
setup.parse_args()
args, _ = cmd_args.parser.parse_known_args()
script_path = modules.paths_internal.script_path
extensions_dir = modules.paths_internal.extensions_dir
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
stored_commit_hash = None
dir_repos = "repositories"
python = sys.executable # used by some extensions to run python
skip_install = False # parsed by some extensions
def commit_hash():

View File

@ -10,10 +10,6 @@ parser.add_argument("--ui-config-file", type=str, help=argparse.SUPPRESS, defaul
parser.add_argument("--config", type=str, default=sd_default_config, help=argparse.SUPPRESS)
parser.add_argument("--theme", type=str, help=argparse.SUPPRESS, default=None)
parser.add_argument("--no-half", action='store_true', help="Do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="Do not switch the VAE model to 16-bit floats")
parser.add_argument("--precision", type=str, help="Evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--medvram", action='store_true', help="Enable model optimizations for sacrificing a little speed for low memory usage")
parser.add_argument("--lowvram", action='store_true', help="Enable model optimizations for sacrificing a lot of speed for lowest memory usage")
parser.add_argument("--lowram", action='store_true', help="Load checkpoint weights to VRAM instead of RAM")
@ -61,10 +57,15 @@ def compatibility_args(opts, args):
parser.add_argument("--bsrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.bsrgan_models_path)
parser.add_argument("--realesrgan-models-path", type=str, help=argparse.SUPPRESS, default=opts.realesrgan_models_path)
parser.add_argument("--clip-models-path", type=str, help=argparse.SUPPRESS, default=opts.clip_models_path)
parser.add_argument("--disable-nan-check", default = True, action='store_true', help=argparse.SUPPRESS)
parser.add_argument("--disable-extension-access", default = False, action='store_true', help=argparse.SUPPRESS)
parser.add_argument("--opt-channelslast", help=argparse.SUPPRESS, default=opts.opt_channelslast)
parser.add_argument("--xformers", default = (opts.cross_attention_optimization == "xFormers"), action='store_true', help=argparse.SUPPRESS)
parser.add_argument("--disable-nan-check", help=argparse.SUPPRESS, default=opts.disable_nan_check)
parser.add_argument("--no-half", help=argparse.SUPPRESS, default=opts.no_half)
parser.add_argument("--no-half-vae", help=argparse.SUPPRESS, default=opts.no_half_vae)
parser.add_argument("--precision", help=argparse.SUPPRESS, default=opts.precision)
parser.add_argument("--api", help=argparse.SUPPRESS, default=True)
args = parser.parse_args()
if vars(parser)['_option_string_actions'].get('--lora-dir', None) is not None:
args.lora_dir = opts.lora_dir

View File

@ -1,7 +1,6 @@
import sys
import contextlib
import torch
from modules import errors
if sys.platform == "darwin":
from modules import mac_specific
@ -60,23 +59,26 @@ def torch_gc():
torch.cuda.ipc_collect()
def enable_tf32():
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def enable_cudnn_benchmark():
def set_cuda_params():
if not torch.cuda.is_available():
return
from modules import shared
if torch.backends.cudnn.is_available():
torch.backends.cudnn.benchmark = shared.opts.cudnn_benchmark
torch.backends.cudnn.benchmark_limit = 0
torch.backends.cudnn.allow_tf32 = shared.opts.cuda_allow_tf32
torch.backends.cuda.matmul.allow_tf32 = shared.opts.cuda_allow_tf32
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = shared.opts.cuda_allow_tf16_reduced
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = shared.opts.cuda_allow_tf16_reduced
global dtype, dtype_vae, dtype_unet, unet_needs_upcast # pylint: disable=global-statement
if shared.opts.cuda_dtype == 'FP16':
dtype = dtype_vae = dtype_unet = torch.float16
if shared.opts.cuda_dtype == 'BP16':
dtype = dtype_vae = dtype_unet = torch.bfloat16
if shared.opts.cuda_dtype == 'FP32':
dtype = dtype_vae = dtype_unet = torch.float32
unet_needs_upcast = shared.opts.upcast_sampling
if shared.opts.cudnn_benchmark:
torch.backends.cudnn.benchmark = True
else:
torch.backends.cudnn.benchmark = False
errors.run(enable_tf32, "Enabling TF32")
cpu = torch.device("cpu")
device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None
@ -111,7 +113,7 @@ def autocast(disable=False):
from modules import shared
if disable:
return contextlib.nullcontext()
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
if dtype == torch.float32 or shared.cmd_opts.precision == "Full":
return contextlib.nullcontext()
return torch.autocast("cuda")

View File

@ -1,22 +1,20 @@
import datetime
import pytz
import io
import math
import os
from collections import namedtuple
import re
import os
import math
import json
import string
import hashlib
from collections import namedtuple
import pytz
import numpy as np
import piexif
import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin, ExifTags
import string
import json
import hashlib
from modules import sd_samplers, shared, script_callbacks, errors
from modules.shared import opts, cmd_opts
from modules.shared import opts, cmd_opts # pylint: disable=unused-import
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
@ -146,7 +144,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
return ImageFont.truetype('javascript/roboto.ttf', fontsize)
def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
for i, line in enumerate(lines):
for _i, line in enumerate(lines):
fnt = initial_fnt
fontsize = initial_fontsize
while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
@ -599,17 +597,14 @@ def safe_decode_string(s: bytes):
remove_prefix = lambda text, prefix: text[len(prefix):] if text.startswith(prefix) else text
for encoding in ['utf-8', 'utf-16', 'ascii', 'latin_1', 'cp1252', 'cp437']: # try different encodings
try:
decoded = s.decode(encoding, errors="strict")
val = remove_prefix(decoded, 'UNICODE') # remove silly prefix added by old pnginfo/exif encoding
val = remove_prefix(val, 'ASCII')
s = remove_prefix(s, b'UNICODE')
s = remove_prefix(s, b'ASCII')
s = remove_prefix(s, b'\x00')
val = s.decode(encoding, errors="strict")
val = re.sub(r'[\x00-\x09]', '', val).strip() # remove remaining special characters
if len(val) > 1024: # limit string length
val = val[:1024]
if len(val) == 0: # remove empty strings
val = None
# if not all(ord(c) < 128 for c in decoded): # only allow 7-bit ascii characters
# val = None
return val
return val
except:
pass
return None
@ -651,7 +646,7 @@ Negative prompt: {json_info["uc"]}
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
except Exception as e:
errors.display(e, 'novelai image parser')
print('GENINFO', geninfo)
return geninfo, items

View File

@ -27,11 +27,11 @@ def download_default_clip_interrogate_categories(content_dir):
print("Downloading CLIP categories...")
tmpdir = content_dir + "_tmp"
category_types = ["artists", "flavors", "mediums", "movements"]
cat_types = ["artists", "flavors", "mediums", "movements"]
try:
os.makedirs(tmpdir)
for category_type in category_types:
for category_type in cat_types:
torch.hub.download_url_to_file(f"https://raw.githubusercontent.com/pharmapsychotic/clip-interrogator/main/clip_interrogator/data/{category_type}.txt", os.path.join(tmpdir, f"{category_type}.txt"))
os.rename(tmpdir, content_dir)
@ -60,22 +60,21 @@ class InterrogateModels:
download_default_clip_interrogate_categories(self.content_dir)
if self.loaded_categories is not None and self.skip_categories == shared.opts.interrogate_clip_skip_categories:
return self.loaded_categories
return self.loaded_categories
self.loaded_categories = []
if os.path.exists(self.content_dir):
self.skip_categories = shared.opts.interrogate_clip_skip_categories
category_types = []
cat_types = []
for filename in Path(self.content_dir).glob('*.txt'):
category_types.append(filename.stem)
cat_types.append(filename.stem)
if filename.stem in self.skip_categories:
continue
m = re_topn.search(filename.stem)
topn = 1 if m is None else int(m.group(1))
with open(filename, "r", encoding="utf8") as file:
lines = [x.strip() for x in file.readlines()]
self.loaded_categories.append(Category(name=filename.stem, topn=topn, items=lines))
return self.loaded_categories
@ -206,7 +205,7 @@ class InterrogateModels:
image_features /= image_features.norm(dim=-1, keepdim=True)
for name, topn, items in self.categories():
for _name, topn, items in self.categories():
matches = self.rank(image_features, items, top_count=topn)
for match, score in matches:
if shared.opts.interrogate_return_ranks:

View File

@ -49,6 +49,29 @@ for d, must_exist, what, options in path_dirs:
paths[what] = d
def create_paths(opts):
def create_path(folder):
if folder is None or folder == '':
return
if not os.path.exists(folder):
print('Creating folder:', folder)
os.makedirs(opts.temp_dir, exist_ok=True)
create_path(opts.temp_dir)
create_path(extensions_dir)
create_path(extensions_builtin_dir)
create_path(opts.ckpt_dir)
create_path(opts.vae_dir)
create_path(opts.embeddings_dir)
create_path(opts.outdir_samples)
create_path(opts.outdir_txt2img_samples)
create_path(opts.outdir_img2img_samples)
create_path(opts.outdir_extras_samples)
create_path(opts.outdir_grids)
create_path(opts.outdir_txt2img_grids)
create_path(opts.outdir_img2img_grids)
create_path(opts.outdir_save)
class Prioritize:
def __init__(self, name):
self.name = name

View File

@ -2,19 +2,25 @@ import json
import math
import os
import sys
import random
import logging
from typing import Any, Dict, List
import torch
import numpy as np
from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
from typing import Any, Dict, List
from ldm.data.util import AddMiDaS
from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
from einops import repeat, rearrange
from blendmodes.blend import blendLayers, BlendType
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts # pylint: disable=unused-import
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
from modules.shared import opts, cmd_opts, state # pylint: disable=unused-import
import modules.shared as shared
import modules.paths as paths
import modules.face_restoration
@ -22,12 +28,6 @@ import modules.images as images
import modules.styles
import modules.sd_models as sd_models
import modules.sd_vae as sd_vae
import logging
from ldm.data.util import AddMiDaS
from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
from einops import repeat, rearrange
from blendmodes.blend import blendLayers, BlendType
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
@ -198,7 +198,7 @@ class StableDiffusionProcessing:
def unclip_image_conditioning(self, source_image):
c_adm = self.sd_model.embedder(source_image)
if self.sd_model.noise_augmentor is not None:
noise_level = 0 # TODO: Allow other noise levels?
noise_level = 0
c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
c_adm = torch.cat((c_adm, noise_level_emb), 1)
return c_adm
@ -267,7 +267,7 @@ class StableDiffusionProcessing:
raise NotImplementedError()
def close(self):
self.sampler = None
self.sampler = None # pylint: disable=attribute-defined-outside-init
class Processed:
@ -453,7 +453,7 @@ def fix_seed(p):
p.subseed = get_fixed_seed(p.subseed)
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0):
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): # pylint: disable=unused-argument
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
@ -527,7 +527,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
if type(p.prompt) == list:
assert(len(p.prompt) > 0)
assert len(p.prompt) > 0
else:
assert p.prompt is not None
@ -725,7 +725,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if opts.return_mask:
output_images.append(image_mask)
if opts.return_mask_composite:
output_images.append(image_mask_composite)

View File

@ -1,4 +1,3 @@
import sys
from types import MethodType
from rich import print # pylint: disable=redefined-builtin
import torch
@ -89,7 +88,6 @@ def undo_optimizations():
def fix_checkpoint():
"""checkpoints are now added and removed in embedding/hypernet code, since torch doesn't want
checkpoints to be added when not training (there's a warning)"""
pass
@ -173,16 +171,15 @@ class StableDiffusionModelHijack:
if m.cond_stage_key == "edit":
sd_hijack_unet.hijack_ddpm_edit()
"""
try:
import torch._dynamo as dynamo
torch._dynamo.config.verbose = True
torch.backends.cudnn.benchmark = True
m.model = torch.compile(m.model, mode="default", backend="inductor", fullgraph=False, dynamic=False)
print("Model compiled set")
except Exception as err:
print(f"Model compile not supported: {err}")
"""
if opts.cuda_compile and opts.cuda_compile_mode != 'none':
try:
import torch._dynamo as dynamo # pylint: disable=unused-import
torch._dynamo.config.verbose = True # pylint: disable=protected-access
torch.backends.cudnn.benchmark = True
m.model = torch.compile(m.model, mode="default", backend=opts.cuda_compile_mode, fullgraph=False, dynamic=False)
print("Model compile enabled:", opts.cuda_compile_mode)
except Exception as err:
print(f"Model compile not supported: {err}")
self.optimization_method = apply_optimizations()

View File

@ -299,11 +299,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
if depth_model:
model.depth_model = depth_model
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
devices.dtype_unet = model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
# clean up cache if limit is reached
@ -403,7 +399,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
devices.enable_cudnn_benchmark()
devices.set_cuda_params()
gc.collect()
devices.torch_gc()

View File

@ -251,9 +251,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
"cross_attention_optimization": OptionInfo("Scaled-Dot-Product", "Cross-attention optimization method", gr.Radio, lambda: {"choices": shared_items.list_crossattention() }),
"cross_attention_options": OptionInfo([], "Cross-attention advanced options", gr.CheckboxGroup, lambda: {"choices": ['xFormers enable flash Attention', 'SDP disable memory attention']}),
"disable_nan_check": OptionInfo(False, "Do not check if produced images/latent spaces have NaN values"),
"opt_channelslast": OptionInfo(False, "Use channels last as torch memory format "),
"cudnn_benchmark": OptionInfo(False, "Enable CUDA cuDNN benchmark feature"),
"sub_quad_q_chunk_size": OptionInfo(512, "Sub-quadratic cross-attention query chunk size for the layer optimization to use", gr.Slider, {"minimum": 16, "maximum": 8192, "step": 8}),
"sub_quad_kv_chunk_size": OptionInfo(512, "Sub-quadratic cross-attentionkv chunk size for the sub-quadratic cross-attention layer optimization to use", gr.Slider, {"minimum": 0, "maximum": 8192, "step": 8}),
"sub_quad_chunk_threshold": OptionInfo(80, "Sub-quadratic cross-attention percentage of VRAM chunking threshold", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}),
@ -323,6 +320,20 @@ options_templates.update(options_section(('saving-paths', "Image Paths"), {
"outdir_save": OptionInfo("outputs/save", "Directory for saving images using the Save button", component_args=hide_dirs),
}))
options_templates.update(options_section(('cuda', "CUDA Settings"), {
"precision": OptionInfo("Autocast", "Precision type", gr.Radio, lambda: {"choices": ["Autocast", "Full"]}),
"cuda_dtype": OptionInfo("FP16", "Device precision type", gr.Radio, lambda: {"choices": ["FP32", "FP16", "BF16"]}),
"no_half": OptionInfo(False, "Use full precision for model (--no-half)"),
"no_half_vae": OptionInfo(False, "Use full precision for VAE (--no-half-vae)"),
"disable_nan_check": OptionInfo(True, "Do not check if produced images/latent spaces have NaN values"),
"opt_channelslast": OptionInfo(False, "Use channels last as torch memory format "),
"cudnn_benchmark": OptionInfo(False, "Enable cuDNN benchmark feature"),
"cuda_allow_tf32": OptionInfo(True, "Allow TF32 math ops"),
"cuda_allow_tf16_reduced": OptionInfo(True, "Allow TF16 reduced precision math ops"),
"cuda_compile": OptionInfo(False, "Enable model compile (experimental)"),
"cuda_compile_mode": OptionInfo("none", "Model compile mode (experimental)", gr.Radio, lambda: {"choices": ['none', 'inductor', 'cudagraphs', 'aot_ts_nvfuser']}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),

View File

@ -13,7 +13,7 @@ from PIL import Image
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing
from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML
from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML # pylint: disable=unused-import
from modules.paths import script_path, data_path
from modules.shared import opts, cmd_opts
import modules.codeformer_model
@ -133,7 +133,7 @@ def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_di
img = Image.open(image)
filename = os.path.basename(image)
left, _ = os.path.splitext(filename)
print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a'))
print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a', encoding='utf-8'))
return [gr.update(), None]
@ -157,19 +157,19 @@ def create_seed_inputs(target_interface):
random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed')
reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed')
with FormRow(visible=True, elem_id=target_interface + '_subseed_row') as seed_extra_row_1:
with FormRow(visible=True, elem_id=target_interface + '_subseed_row'):
subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
subseed.style(container=False)
# random_subseed = ToolButton(random_symbol, elem_id=target_interface + '_random_subseed')
random_subseed = ToolButton(random_symbol, elem_id=target_interface + '_random_subseed')
reuse_subseed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
subseed_strength = gr.Slider(label='Strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
with FormRow(visible=False) as seed_extra_row_2:
with FormRow(visible=False):
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w')
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h')
random_seed.click(fn=lambda: [-1, -1], show_progress=False, inputs=[], outputs=[seed, subseed])
# random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
@ -670,9 +670,9 @@ def create_ui():
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(
f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
f"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
"<br>Add inpaint batch mask directory to enable inpaint batch processing."
f"{hidden}</p>"
)
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
@ -1162,8 +1162,8 @@ def create_ui():
with gr.Column(elem_id='ti_gallery_container'):
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
_ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
_ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
create_embedding.click(

View File

@ -30,10 +30,11 @@ def create_ui():
script_inputs = scripts.scripts_postproc.setup_ui()
with gr.Column():
result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples)
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info")
html2_info = gr.HTML()
result_images, html_info_x, html_info, _html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples)
html_info = gr.HTML(elem_id="pnginfo_html_info")
generation_info = gr.Textbox(elem_id="pnginfo_generation_info", label="Parameters")
gr.HTML('Full metadata')
html2_info = gr.HTML(elem_id="pnginfo_html2_info")
for tabname, button in buttons.items():
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=extras_image))

View File

@ -51,9 +51,6 @@ gr.processing_utils.save_pil_to_file = save_pil_to_file
def on_tmpdir_changed():
if shared.opts.temp_dir == "":
return
if not os.path.isdir(shared.opts.temp_dir):
print('Creating temporary folder:', shared.opts.temp_dir)
os.makedirs(shared.opts.temp_dir, exist_ok=True)
register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x"))

View File

View File

@ -233,9 +233,6 @@ button.custom-button{
margin-right: 0;
}
.performance .vram {
}
#txt2img_generate, #img2img_generate {
min-height: 4.5em;
}

View File

@ -27,6 +27,7 @@ import ldm.modules.encoders.modules # pylint: disable=W0611,C0411
from modules import extra_networks, ui_extra_networks_checkpoints # pylint: disable=C0411,C0412
from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call # pylint: disable=W0611,C0411
from modules.paths import create_paths
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
if ".dev" in torch.__version__ or "+git" in torch.__version__:
@ -152,8 +153,8 @@ def create_api(app):
def start_ui():
logging.disable(logging.INFO)
create_paths(opts)
initialize()
ui_tempdir.on_tmpdir_changed()
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()
startup_timer.record("cleanup")