add hidiffusion

pull/3086/head
Vladimir Mandic 2024-04-24 10:48:47 -04:00
parent a7408bca6e
commit 5914f1e00d
19 changed files with 4615 additions and 25 deletions

View File

@ -16,6 +16,7 @@ ignore-paths=/usr/lib/.*$,
^modules/dml/.*$,
^modules/tcd/.*$,
^modules/xadapters/.*$,
^modules/hidiffusion/.*$,
ignore-patterns=
ignored-modules=
jobs=0

View File

@ -8,13 +8,18 @@
- IP adapter masking
- InstantStyle
## Update for 2024-04-23
## Update for 2024-04-24
- **Features**:
- **Gallery**: list, preview, search through all your images and videos!
implemented as infinite-scroll with client-side-caching and lazy-loading while being fully async and non-blocking
search or sort by path, name, size, width, height, mtime or any image metadata item, also with extended syntax like *width > 1000*
*settings*: optional additional user-defined folders, thumbnails in fixed or variable aspect-ratio
- **HiDiffusion**:
generate high-resolution images using your standard models without duplicates/distorsions
simply enable checkbox in advanced menu and set desired resolution
additional settings are available in settings -> inference settings -> hidiffusion
for example 1024x1024 using sd15 or 2024x2024 using sdxl
- **IP Adapter Masking**:
powerful method of using masking with ip-adapters
when combined with multiple ip-adapters, it allows for different inputs guidance for each segment of the input image

View File

@ -58,7 +58,7 @@ def control_run(units: List[unit.Unit] = [], inputs: List[Image.Image] = [], ini
steps: int = 20, sampler_index: int = None,
seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1,
cfg_scale: float = 6.0, clip_skip: float = 1.0, image_cfg_scale: float = 6.0, diffusers_guidance_rescale: float = 0.7, sag_scale: float = 0.0, cfg_end: float = 1.0,
full_quality: bool = True, restore_faces: bool = False, tiling: bool = False,
full_quality: bool = True, restore_faces: bool = False, tiling: bool = False, hidiffusion: bool = False,
hdr_mode: int = 0, hdr_brightness: float = 0, hdr_color: float = 0, hdr_sharpen: float = 0, hdr_clamp: bool = False, hdr_boundary: float = 4.0, hdr_threshold: float = 0.95,
hdr_maximize: bool = False, hdr_max_center: float = 0.6, hdr_max_boundry: float = 1.0, hdr_color_picker: str = None, hdr_tint_ratio: float = 0,
resize_mode_before: int = 0, resize_name_before: str = 'None', width_before: int = 512, height_before: int = 512, scale_by_before: float = 1.0, selected_scale_tab_before: int = 0,
@ -107,6 +107,7 @@ def control_run(units: List[unit.Unit] = [], inputs: List[Image.Image] = [], ini
full_quality = full_quality,
restore_faces = restore_faces,
tiling = tiling,
hidiffusion = hidiffusion,
# resize
resize_mode = resize_mode_before if resize_name_before != 'None' else 0,
resize_name = resize_name_before,
@ -237,9 +238,9 @@ def control_run(units: List[unit.Unit] = [], inputs: List[Image.Image] = [], ini
p.extra_generation_params['Control start'] = control_guidance_start if isinstance(control_guidance_start, list) else [control_guidance_start]
p.extra_generation_params['Control end'] = control_guidance_end if isinstance(control_guidance_end, list) else [control_guidance_end]
p.extra_generation_params["Control model"] = ';'.join([(m.model_id or '') for m in active_model if m.model is not None])
p.extra_generation_params["Control conditioning"] = ';'.join([str(c) for c in control_conditioning])
p.extra_generation_params['Control start'] = ';'.join([str(c) for c in control_guidance_start])
p.extra_generation_params['Control end'] = ';'.join([str(c) for c in control_guidance_end])
p.extra_generation_params["Control conditioning"] = ';'.join([str(c) for c in p.extra_generation_params["Control conditioning"]])
p.extra_generation_params['Control start'] = ';'.join([str(c) for c in p.extra_generation_params['Control start']])
p.extra_generation_params['Control end'] = ';'.join([str(c) for c in p.extra_generation_params['Control end']])
if unit_type == 't2i adapter' and has_models:
p.extra_generation_params["Control mode"] = 'T2I-Adapter'
p.task_args['adapter_conditioning_scale'] = control_conditioning

View File

@ -0,0 +1,3 @@
from .hidiffusion import apply_hidiffusion, remove_hidiffusion
__all__ = ["apply_hidiffusion", "remove_hidiffusion"]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,685 @@
conv_in
time_proj
time_embedding
time_embedding.linear_1
time_embedding.act
time_embedding.linear_2
down_blocks
down_blocks.0
down_blocks.0.attentions
down_blocks.0.attentions.0
down_blocks.0.attentions.0.norm
down_blocks.0.attentions.0.proj_in
down_blocks.0.attentions.0.transformer_blocks
down_blocks.0.attentions.0.transformer_blocks.0
down_blocks.0.attentions.0.transformer_blocks.0.norm1
down_blocks.0.attentions.0.transformer_blocks.0.attn1
down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q
down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k
down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v
down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out
down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0
down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.1
down_blocks.0.attentions.0.transformer_blocks.0.norm2
down_blocks.0.attentions.0.transformer_blocks.0.attn2
down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q
down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k
down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v
down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out
down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0
down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.1
down_blocks.0.attentions.0.transformer_blocks.0.norm3
down_blocks.0.attentions.0.transformer_blocks.0.ff
down_blocks.0.attentions.0.transformer_blocks.0.ff.net
down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0
down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj
down_blocks.0.attentions.0.transformer_blocks.0.ff.net.1
down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2
down_blocks.0.attentions.0.proj_out
down_blocks.0.attentions.1
down_blocks.0.attentions.1.norm
down_blocks.0.attentions.1.proj_in
down_blocks.0.attentions.1.transformer_blocks
down_blocks.0.attentions.1.transformer_blocks.0
down_blocks.0.attentions.1.transformer_blocks.0.norm1
down_blocks.0.attentions.1.transformer_blocks.0.attn1
down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q
down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k
down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v
down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out
down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0
down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.1
down_blocks.0.attentions.1.transformer_blocks.0.norm2
down_blocks.0.attentions.1.transformer_blocks.0.attn2
down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q
down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k
down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v
down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out
down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0
down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.1
down_blocks.0.attentions.1.transformer_blocks.0.norm3
down_blocks.0.attentions.1.transformer_blocks.0.ff
down_blocks.0.attentions.1.transformer_blocks.0.ff.net
down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0
down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj
down_blocks.0.attentions.1.transformer_blocks.0.ff.net.1
down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2
down_blocks.0.attentions.1.proj_out
down_blocks.0.resnets
down_blocks.0.resnets.0
down_blocks.0.resnets.0.norm1
down_blocks.0.resnets.0.conv1
down_blocks.0.resnets.0.time_emb_proj
down_blocks.0.resnets.0.norm2
down_blocks.0.resnets.0.dropout
down_blocks.0.resnets.0.conv2
down_blocks.0.resnets.1
down_blocks.0.resnets.1.norm1
down_blocks.0.resnets.1.conv1
down_blocks.0.resnets.1.time_emb_proj
down_blocks.0.resnets.1.norm2
down_blocks.0.resnets.1.dropout
down_blocks.0.resnets.1.conv2
down_blocks.0.downsamplers
down_blocks.0.downsamplers.0
down_blocks.0.downsamplers.0.conv
down_blocks.1
down_blocks.1.attentions
down_blocks.1.attentions.0
down_blocks.1.attentions.0.norm
down_blocks.1.attentions.0.proj_in
down_blocks.1.attentions.0.transformer_blocks
down_blocks.1.attentions.0.transformer_blocks.0
down_blocks.1.attentions.0.transformer_blocks.0.norm1
down_blocks.1.attentions.0.transformer_blocks.0.attn1
down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q
down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k
down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v
down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out
down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0
down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.1
down_blocks.1.attentions.0.transformer_blocks.0.norm2
down_blocks.1.attentions.0.transformer_blocks.0.attn2
down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q
down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k
down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v
down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out
down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0
down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.1
down_blocks.1.attentions.0.transformer_blocks.0.norm3
down_blocks.1.attentions.0.transformer_blocks.0.ff
down_blocks.1.attentions.0.transformer_blocks.0.ff.net
down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0
down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj
down_blocks.1.attentions.0.transformer_blocks.0.ff.net.1
down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2
down_blocks.1.attentions.0.proj_out
down_blocks.1.attentions.1
down_blocks.1.attentions.1.norm
down_blocks.1.attentions.1.proj_in
down_blocks.1.attentions.1.transformer_blocks
down_blocks.1.attentions.1.transformer_blocks.0
down_blocks.1.attentions.1.transformer_blocks.0.norm1
down_blocks.1.attentions.1.transformer_blocks.0.attn1
down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q
down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k
down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v
down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out
down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0
down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.1
down_blocks.1.attentions.1.transformer_blocks.0.norm2
down_blocks.1.attentions.1.transformer_blocks.0.attn2
down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q
down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k
down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v
down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out
down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0
down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.1
down_blocks.1.attentions.1.transformer_blocks.0.norm3
down_blocks.1.attentions.1.transformer_blocks.0.ff
down_blocks.1.attentions.1.transformer_blocks.0.ff.net
down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0
down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj
down_blocks.1.attentions.1.transformer_blocks.0.ff.net.1
down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2
down_blocks.1.attentions.1.proj_out
down_blocks.1.resnets
down_blocks.1.resnets.0
down_blocks.1.resnets.0.norm1
down_blocks.1.resnets.0.conv1
down_blocks.1.resnets.0.time_emb_proj
down_blocks.1.resnets.0.norm2
down_blocks.1.resnets.0.dropout
down_blocks.1.resnets.0.conv2
down_blocks.1.resnets.0.conv_shortcut
down_blocks.1.resnets.1
down_blocks.1.resnets.1.norm1
down_blocks.1.resnets.1.conv1
down_blocks.1.resnets.1.time_emb_proj
down_blocks.1.resnets.1.norm2
down_blocks.1.resnets.1.dropout
down_blocks.1.resnets.1.conv2
down_blocks.1.downsamplers
down_blocks.1.downsamplers.0
down_blocks.1.downsamplers.0.conv
down_blocks.2
down_blocks.2.attentions
down_blocks.2.attentions.0
down_blocks.2.attentions.0.norm
down_blocks.2.attentions.0.proj_in
down_blocks.2.attentions.0.transformer_blocks
down_blocks.2.attentions.0.transformer_blocks.0
down_blocks.2.attentions.0.transformer_blocks.0.norm1
down_blocks.2.attentions.0.transformer_blocks.0.attn1
down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q
down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k
down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v
down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out
down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0
down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.1
down_blocks.2.attentions.0.transformer_blocks.0.norm2
down_blocks.2.attentions.0.transformer_blocks.0.attn2
down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q
down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k
down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v
down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out
down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0
down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.1
down_blocks.2.attentions.0.transformer_blocks.0.norm3
down_blocks.2.attentions.0.transformer_blocks.0.ff
down_blocks.2.attentions.0.transformer_blocks.0.ff.net
down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0
down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj
down_blocks.2.attentions.0.transformer_blocks.0.ff.net.1
down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2
down_blocks.2.attentions.0.proj_out
down_blocks.2.attentions.1
down_blocks.2.attentions.1.norm
down_blocks.2.attentions.1.proj_in
down_blocks.2.attentions.1.transformer_blocks
down_blocks.2.attentions.1.transformer_blocks.0
down_blocks.2.attentions.1.transformer_blocks.0.norm1
down_blocks.2.attentions.1.transformer_blocks.0.attn1
down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q
down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k
down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v
down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out
down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0
down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.1
down_blocks.2.attentions.1.transformer_blocks.0.norm2
down_blocks.2.attentions.1.transformer_blocks.0.attn2
down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q
down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k
down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v
down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out
down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0
down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.1
down_blocks.2.attentions.1.transformer_blocks.0.norm3
down_blocks.2.attentions.1.transformer_blocks.0.ff
down_blocks.2.attentions.1.transformer_blocks.0.ff.net
down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0
down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj
down_blocks.2.attentions.1.transformer_blocks.0.ff.net.1
down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2
down_blocks.2.attentions.1.proj_out
down_blocks.2.resnets
down_blocks.2.resnets.0
down_blocks.2.resnets.0.norm1
down_blocks.2.resnets.0.conv1
down_blocks.2.resnets.0.time_emb_proj
down_blocks.2.resnets.0.norm2
down_blocks.2.resnets.0.dropout
down_blocks.2.resnets.0.conv2
down_blocks.2.resnets.0.conv_shortcut
down_blocks.2.resnets.1
down_blocks.2.resnets.1.norm1
down_blocks.2.resnets.1.conv1
down_blocks.2.resnets.1.time_emb_proj
down_blocks.2.resnets.1.norm2
down_blocks.2.resnets.1.dropout
down_blocks.2.resnets.1.conv2
down_blocks.2.downsamplers
down_blocks.2.downsamplers.0
down_blocks.2.downsamplers.0.conv
down_blocks.3
down_blocks.3.resnets
down_blocks.3.resnets.0
down_blocks.3.resnets.0.norm1
down_blocks.3.resnets.0.conv1
down_blocks.3.resnets.0.time_emb_proj
down_blocks.3.resnets.0.norm2
down_blocks.3.resnets.0.dropout
down_blocks.3.resnets.0.conv2
down_blocks.3.resnets.1
down_blocks.3.resnets.1.norm1
down_blocks.3.resnets.1.conv1
down_blocks.3.resnets.1.time_emb_proj
down_blocks.3.resnets.1.norm2
down_blocks.3.resnets.1.dropout
down_blocks.3.resnets.1.conv2
up_blocks
up_blocks.0
up_blocks.0.resnets
up_blocks.0.resnets.0
up_blocks.0.resnets.0.norm1
up_blocks.0.resnets.0.conv1
up_blocks.0.resnets.0.time_emb_proj
up_blocks.0.resnets.0.norm2
up_blocks.0.resnets.0.dropout
up_blocks.0.resnets.0.conv2
up_blocks.0.resnets.0.conv_shortcut
up_blocks.0.resnets.1
up_blocks.0.resnets.1.norm1
up_blocks.0.resnets.1.conv1
up_blocks.0.resnets.1.time_emb_proj
up_blocks.0.resnets.1.norm2
up_blocks.0.resnets.1.dropout
up_blocks.0.resnets.1.conv2
up_blocks.0.resnets.1.conv_shortcut
up_blocks.0.resnets.2
up_blocks.0.resnets.2.norm1
up_blocks.0.resnets.2.conv1
up_blocks.0.resnets.2.time_emb_proj
up_blocks.0.resnets.2.norm2
up_blocks.0.resnets.2.dropout
up_blocks.0.resnets.2.conv2
up_blocks.0.resnets.2.conv_shortcut
up_blocks.0.upsamplers
up_blocks.0.upsamplers.0
up_blocks.0.upsamplers.0.conv
up_blocks.1
up_blocks.1.attentions
up_blocks.1.attentions.0
up_blocks.1.attentions.0.norm
up_blocks.1.attentions.0.proj_in
up_blocks.1.attentions.0.transformer_blocks
up_blocks.1.attentions.0.transformer_blocks.0
up_blocks.1.attentions.0.transformer_blocks.0.norm1
up_blocks.1.attentions.0.transformer_blocks.0.attn1
up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q
up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k
up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v
up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out
up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0
up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.1
up_blocks.1.attentions.0.transformer_blocks.0.norm2
up_blocks.1.attentions.0.transformer_blocks.0.attn2
up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q
up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k
up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v
up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out
up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0
up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.1
up_blocks.1.attentions.0.transformer_blocks.0.norm3
up_blocks.1.attentions.0.transformer_blocks.0.ff
up_blocks.1.attentions.0.transformer_blocks.0.ff.net
up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0
up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj
up_blocks.1.attentions.0.transformer_blocks.0.ff.net.1
up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2
up_blocks.1.attentions.0.proj_out
up_blocks.1.attentions.1
up_blocks.1.attentions.1.norm
up_blocks.1.attentions.1.proj_in
up_blocks.1.attentions.1.transformer_blocks
up_blocks.1.attentions.1.transformer_blocks.0
up_blocks.1.attentions.1.transformer_blocks.0.norm1
up_blocks.1.attentions.1.transformer_blocks.0.attn1
up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q
up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k
up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v
up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out
up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0
up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.1
up_blocks.1.attentions.1.transformer_blocks.0.norm2
up_blocks.1.attentions.1.transformer_blocks.0.attn2
up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q
up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k
up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v
up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out
up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0
up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.1
up_blocks.1.attentions.1.transformer_blocks.0.norm3
up_blocks.1.attentions.1.transformer_blocks.0.ff
up_blocks.1.attentions.1.transformer_blocks.0.ff.net
up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0
up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj
up_blocks.1.attentions.1.transformer_blocks.0.ff.net.1
up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2
up_blocks.1.attentions.1.proj_out
up_blocks.1.attentions.2
up_blocks.1.attentions.2.norm
up_blocks.1.attentions.2.proj_in
up_blocks.1.attentions.2.transformer_blocks
up_blocks.1.attentions.2.transformer_blocks.0
up_blocks.1.attentions.2.transformer_blocks.0.norm1
up_blocks.1.attentions.2.transformer_blocks.0.attn1
up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q
up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k
up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v
up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out
up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0
up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.1
up_blocks.1.attentions.2.transformer_blocks.0.norm2
up_blocks.1.attentions.2.transformer_blocks.0.attn2
up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q
up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k
up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v
up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out
up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0
up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.1
up_blocks.1.attentions.2.transformer_blocks.0.norm3
up_blocks.1.attentions.2.transformer_blocks.0.ff
up_blocks.1.attentions.2.transformer_blocks.0.ff.net
up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0
up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj
up_blocks.1.attentions.2.transformer_blocks.0.ff.net.1
up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2
up_blocks.1.attentions.2.proj_out
up_blocks.1.resnets
up_blocks.1.resnets.0
up_blocks.1.resnets.0.norm1
up_blocks.1.resnets.0.conv1
up_blocks.1.resnets.0.time_emb_proj
up_blocks.1.resnets.0.norm2
up_blocks.1.resnets.0.dropout
up_blocks.1.resnets.0.conv2
up_blocks.1.resnets.0.conv_shortcut
up_blocks.1.resnets.1
up_blocks.1.resnets.1.norm1
up_blocks.1.resnets.1.conv1
up_blocks.1.resnets.1.time_emb_proj
up_blocks.1.resnets.1.norm2
up_blocks.1.resnets.1.dropout
up_blocks.1.resnets.1.conv2
up_blocks.1.resnets.1.conv_shortcut
up_blocks.1.resnets.2
up_blocks.1.resnets.2.norm1
up_blocks.1.resnets.2.conv1
up_blocks.1.resnets.2.time_emb_proj
up_blocks.1.resnets.2.norm2
up_blocks.1.resnets.2.dropout
up_blocks.1.resnets.2.conv2
up_blocks.1.resnets.2.conv_shortcut
up_blocks.1.upsamplers
up_blocks.1.upsamplers.0
up_blocks.1.upsamplers.0.conv
up_blocks.2
up_blocks.2.attentions
up_blocks.2.attentions.0
up_blocks.2.attentions.0.norm
up_blocks.2.attentions.0.proj_in
up_blocks.2.attentions.0.transformer_blocks
up_blocks.2.attentions.0.transformer_blocks.0
up_blocks.2.attentions.0.transformer_blocks.0.norm1
up_blocks.2.attentions.0.transformer_blocks.0.attn1
up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q
up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k
up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v
up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out
up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0
up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.1
up_blocks.2.attentions.0.transformer_blocks.0.norm2
up_blocks.2.attentions.0.transformer_blocks.0.attn2
up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q
up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k
up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v
up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out
up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0
up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.1
up_blocks.2.attentions.0.transformer_blocks.0.norm3
up_blocks.2.attentions.0.transformer_blocks.0.ff
up_blocks.2.attentions.0.transformer_blocks.0.ff.net
up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0
up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj
up_blocks.2.attentions.0.transformer_blocks.0.ff.net.1
up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2
up_blocks.2.attentions.0.proj_out
up_blocks.2.attentions.1
up_blocks.2.attentions.1.norm
up_blocks.2.attentions.1.proj_in
up_blocks.2.attentions.1.transformer_blocks
up_blocks.2.attentions.1.transformer_blocks.0
up_blocks.2.attentions.1.transformer_blocks.0.norm1
up_blocks.2.attentions.1.transformer_blocks.0.attn1
up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q
up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k
up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v
up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out
up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0
up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.1
up_blocks.2.attentions.1.transformer_blocks.0.norm2
up_blocks.2.attentions.1.transformer_blocks.0.attn2
up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q
up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k
up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v
up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out
up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0
up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.1
up_blocks.2.attentions.1.transformer_blocks.0.norm3
up_blocks.2.attentions.1.transformer_blocks.0.ff
up_blocks.2.attentions.1.transformer_blocks.0.ff.net
up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0
up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj
up_blocks.2.attentions.1.transformer_blocks.0.ff.net.1
up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2
up_blocks.2.attentions.1.proj_out
up_blocks.2.attentions.2
up_blocks.2.attentions.2.norm
up_blocks.2.attentions.2.proj_in
up_blocks.2.attentions.2.transformer_blocks
up_blocks.2.attentions.2.transformer_blocks.0
up_blocks.2.attentions.2.transformer_blocks.0.norm1
up_blocks.2.attentions.2.transformer_blocks.0.attn1
up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q
up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k
up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v
up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out
up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0
up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.1
up_blocks.2.attentions.2.transformer_blocks.0.norm2
up_blocks.2.attentions.2.transformer_blocks.0.attn2
up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q
up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k
up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v
up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out
up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0
up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.1
up_blocks.2.attentions.2.transformer_blocks.0.norm3
up_blocks.2.attentions.2.transformer_blocks.0.ff
up_blocks.2.attentions.2.transformer_blocks.0.ff.net
up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0
up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj
up_blocks.2.attentions.2.transformer_blocks.0.ff.net.1
up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2
up_blocks.2.attentions.2.proj_out
up_blocks.2.resnets
up_blocks.2.resnets.0
up_blocks.2.resnets.0.norm1
up_blocks.2.resnets.0.conv1
up_blocks.2.resnets.0.time_emb_proj
up_blocks.2.resnets.0.norm2
up_blocks.2.resnets.0.dropout
up_blocks.2.resnets.0.conv2
up_blocks.2.resnets.0.conv_shortcut
up_blocks.2.resnets.1
up_blocks.2.resnets.1.norm1
up_blocks.2.resnets.1.conv1
up_blocks.2.resnets.1.time_emb_proj
up_blocks.2.resnets.1.norm2
up_blocks.2.resnets.1.dropout
up_blocks.2.resnets.1.conv2
up_blocks.2.resnets.1.conv_shortcut
up_blocks.2.resnets.2
up_blocks.2.resnets.2.norm1
up_blocks.2.resnets.2.conv1
up_blocks.2.resnets.2.time_emb_proj
up_blocks.2.resnets.2.norm2
up_blocks.2.resnets.2.dropout
up_blocks.2.resnets.2.conv2
up_blocks.2.resnets.2.conv_shortcut
up_blocks.2.upsamplers
up_blocks.2.upsamplers.0
up_blocks.2.upsamplers.0.conv
up_blocks.3
up_blocks.3.attentions
up_blocks.3.attentions.0
up_blocks.3.attentions.0.norm
up_blocks.3.attentions.0.proj_in
up_blocks.3.attentions.0.transformer_blocks
up_blocks.3.attentions.0.transformer_blocks.0
up_blocks.3.attentions.0.transformer_blocks.0.norm1
up_blocks.3.attentions.0.transformer_blocks.0.attn1
up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q
up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k
up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v
up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out
up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0
up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.1
up_blocks.3.attentions.0.transformer_blocks.0.norm2
up_blocks.3.attentions.0.transformer_blocks.0.attn2
up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q
up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k
up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v
up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out
up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0
up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.1
up_blocks.3.attentions.0.transformer_blocks.0.norm3
up_blocks.3.attentions.0.transformer_blocks.0.ff
up_blocks.3.attentions.0.transformer_blocks.0.ff.net
up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0
up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj
up_blocks.3.attentions.0.transformer_blocks.0.ff.net.1
up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2
up_blocks.3.attentions.0.proj_out
up_blocks.3.attentions.1
up_blocks.3.attentions.1.norm
up_blocks.3.attentions.1.proj_in
up_blocks.3.attentions.1.transformer_blocks
up_blocks.3.attentions.1.transformer_blocks.0
up_blocks.3.attentions.1.transformer_blocks.0.norm1
up_blocks.3.attentions.1.transformer_blocks.0.attn1
up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q
up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k
up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v
up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out
up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0
up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.1
up_blocks.3.attentions.1.transformer_blocks.0.norm2
up_blocks.3.attentions.1.transformer_blocks.0.attn2
up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q
up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k
up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v
up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out
up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0
up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.1
up_blocks.3.attentions.1.transformer_blocks.0.norm3
up_blocks.3.attentions.1.transformer_blocks.0.ff
up_blocks.3.attentions.1.transformer_blocks.0.ff.net
up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0
up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj
up_blocks.3.attentions.1.transformer_blocks.0.ff.net.1
up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2
up_blocks.3.attentions.1.proj_out
up_blocks.3.attentions.2
up_blocks.3.attentions.2.norm
up_blocks.3.attentions.2.proj_in
up_blocks.3.attentions.2.transformer_blocks
up_blocks.3.attentions.2.transformer_blocks.0
up_blocks.3.attentions.2.transformer_blocks.0.norm1
up_blocks.3.attentions.2.transformer_blocks.0.attn1
up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q
up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k
up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v
up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out
up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0
up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.1
up_blocks.3.attentions.2.transformer_blocks.0.norm2
up_blocks.3.attentions.2.transformer_blocks.0.attn2
up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q
up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k
up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v
up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out
up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0
up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.1
up_blocks.3.attentions.2.transformer_blocks.0.norm3
up_blocks.3.attentions.2.transformer_blocks.0.ff
up_blocks.3.attentions.2.transformer_blocks.0.ff.net
up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0
up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj
up_blocks.3.attentions.2.transformer_blocks.0.ff.net.1
up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2
up_blocks.3.attentions.2.proj_out
up_blocks.3.resnets
up_blocks.3.resnets.0
up_blocks.3.resnets.0.norm1
up_blocks.3.resnets.0.conv1
up_blocks.3.resnets.0.time_emb_proj
up_blocks.3.resnets.0.norm2
up_blocks.3.resnets.0.dropout
up_blocks.3.resnets.0.conv2
up_blocks.3.resnets.0.conv_shortcut
up_blocks.3.resnets.1
up_blocks.3.resnets.1.norm1
up_blocks.3.resnets.1.conv1
up_blocks.3.resnets.1.time_emb_proj
up_blocks.3.resnets.1.norm2
up_blocks.3.resnets.1.dropout
up_blocks.3.resnets.1.conv2
up_blocks.3.resnets.1.conv_shortcut
up_blocks.3.resnets.2
up_blocks.3.resnets.2.norm1
up_blocks.3.resnets.2.conv1
up_blocks.3.resnets.2.time_emb_proj
up_blocks.3.resnets.2.norm2
up_blocks.3.resnets.2.dropout
up_blocks.3.resnets.2.conv2
up_blocks.3.resnets.2.conv_shortcut
mid_block
mid_block.attentions
mid_block.attentions.0
mid_block.attentions.0.norm
mid_block.attentions.0.proj_in
mid_block.attentions.0.transformer_blocks
mid_block.attentions.0.transformer_blocks.0
mid_block.attentions.0.transformer_blocks.0.norm1
mid_block.attentions.0.transformer_blocks.0.attn1
mid_block.attentions.0.transformer_blocks.0.attn1.to_q
mid_block.attentions.0.transformer_blocks.0.attn1.to_k
mid_block.attentions.0.transformer_blocks.0.attn1.to_v
mid_block.attentions.0.transformer_blocks.0.attn1.to_out
mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0
mid_block.attentions.0.transformer_blocks.0.attn1.to_out.1
mid_block.attentions.0.transformer_blocks.0.norm2
mid_block.attentions.0.transformer_blocks.0.attn2
mid_block.attentions.0.transformer_blocks.0.attn2.to_q
mid_block.attentions.0.transformer_blocks.0.attn2.to_k
mid_block.attentions.0.transformer_blocks.0.attn2.to_v
mid_block.attentions.0.transformer_blocks.0.attn2.to_out
mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0
mid_block.attentions.0.transformer_blocks.0.attn2.to_out.1
mid_block.attentions.0.transformer_blocks.0.norm3
mid_block.attentions.0.transformer_blocks.0.ff
mid_block.attentions.0.transformer_blocks.0.ff.net
mid_block.attentions.0.transformer_blocks.0.ff.net.0
mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj
mid_block.attentions.0.transformer_blocks.0.ff.net.1
mid_block.attentions.0.transformer_blocks.0.ff.net.2
mid_block.attentions.0.proj_out
mid_block.resnets
mid_block.resnets.0
mid_block.resnets.0.norm1
mid_block.resnets.0.conv1
mid_block.resnets.0.time_emb_proj
mid_block.resnets.0.norm2
mid_block.resnets.0.dropout
mid_block.resnets.0.conv2
mid_block.resnets.1
mid_block.resnets.1.norm1
mid_block.resnets.1.conv1
mid_block.resnets.1.time_emb_proj
mid_block.resnets.1.norm2
mid_block.resnets.1.dropout
mid_block.resnets.1.conv2
conv_norm_out
conv_out

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
import torch
def isinstance_str(x: object, cls_name: str):
"""
Checks whether x has any class *named* cls_name in its ancestry.
Doesn't require access to the class's implementation.
Useful for patching!
"""
for _cls in x.__class__.__mro__:
if _cls.__name__ == cls_name:
return True
return False
def init_generator(device: torch.device, fallback: torch.Generator=None):
"""
Forks the current default random generator given device.
"""
if device.type == "cpu":
return torch.Generator(device="cpu").set_state(torch.get_rng_state())
elif device.type == "cuda":
return torch.Generator(device=device).set_state(torch.cuda.get_rng_state())
else:
if fallback is None:
return init_generator(torch.device("cpu"))
else:
return fallback

View File

@ -119,7 +119,7 @@ def img2img(id_task: str, mode: int,
sampler_index,
mask_blur, mask_alpha,
inpainting_fill,
full_quality, restore_faces, tiling,
full_quality, restore_faces, tiling, hidiffusion,
n_iter, batch_size,
cfg_scale, image_cfg_scale,
diffusers_guidance_rescale,
@ -143,7 +143,7 @@ def img2img(id_task: str, mode: int,
shared.log.warning('Model not loaded')
return [], '', '', 'Error: model not loaded'
debug(f'img2img: id_task={id_task}|mode={mode}|prompt={prompt}|negative_prompt={negative_prompt}|prompt_styles={prompt_styles}|init_img={init_img}|sketch={sketch}|init_img_with_mask={init_img_with_mask}|inpaint_color_sketch={inpaint_color_sketch}|inpaint_color_sketch_orig={inpaint_color_sketch_orig}|init_img_inpaint={init_img_inpaint}|init_mask_inpaint={init_mask_inpaint}|steps={steps}|sampler_index={sampler_index}||mask_blur={mask_blur}|mask_alpha={mask_alpha}|inpainting_fill={inpainting_fill}|full_quality={full_quality}|restore_faces={restore_faces}|tiling={tiling}|n_iter={n_iter}|batch_size={batch_size}|cfg_scale={cfg_scale}|image_cfg_scale={image_cfg_scale}|clip_skip={clip_skip}|denoising_strength={denoising_strength}|seed={seed}|subseed{subseed}|subseed_strength={subseed_strength}|seed_resize_from_h={seed_resize_from_h}|seed_resize_from_w={seed_resize_from_w}|selected_scale_tab={selected_scale_tab}|height={height}|width={width}|scale_by={scale_by}|resize_mode={resize_mode}|resize_name={resize_name}|inpaint_full_res={inpaint_full_res}|inpaint_full_res_padding={inpaint_full_res_padding}|inpainting_mask_invert={inpainting_mask_invert}|img2img_batch_files={img2img_batch_files}|img2img_batch_input_dir={img2img_batch_input_dir}|img2img_batch_output_dir={img2img_batch_output_dir}|img2img_batch_inpaint_mask_dir={img2img_batch_inpaint_mask_dir}|override_settings_texts={override_settings_texts}')
debug(f'img2img: id_task={id_task}|mode={mode}|prompt={prompt}|negative_prompt={negative_prompt}|prompt_styles={prompt_styles}|init_img={init_img}|sketch={sketch}|init_img_with_mask={init_img_with_mask}|inpaint_color_sketch={inpaint_color_sketch}|inpaint_color_sketch_orig={inpaint_color_sketch_orig}|init_img_inpaint={init_img_inpaint}|init_mask_inpaint={init_mask_inpaint}|steps={steps}|sampler_index={sampler_index}||mask_blur={mask_blur}|mask_alpha={mask_alpha}|inpainting_fill={inpainting_fill}|full_quality={full_quality}|restore_faces={restore_faces}|tiling={tiling}|hidiffusion={hidiffusion}|n_iter={n_iter}|batch_size={batch_size}|cfg_scale={cfg_scale}|image_cfg_scale={image_cfg_scale}|clip_skip={clip_skip}|denoising_strength={denoising_strength}|seed={seed}|subseed{subseed}|subseed_strength={subseed_strength}|seed_resize_from_h={seed_resize_from_h}|seed_resize_from_w={seed_resize_from_w}|selected_scale_tab={selected_scale_tab}|height={height}|width={width}|scale_by={scale_by}|resize_mode={resize_mode}|resize_name={resize_name}|inpaint_full_res={inpaint_full_res}|inpaint_full_res_padding={inpaint_full_res_padding}|inpainting_mask_invert={inpainting_mask_invert}|img2img_batch_files={img2img_batch_files}|img2img_batch_input_dir={img2img_batch_input_dir}|img2img_batch_output_dir={img2img_batch_output_dir}|img2img_batch_inpaint_mask_dir={img2img_batch_inpaint_mask_dir}|override_settings_texts={override_settings_texts}')
if mode == 5:
if img2img_batch_files is None or len(img2img_batch_files) == 0:
@ -225,6 +225,7 @@ def img2img(id_task: str, mode: int,
full_quality=full_quality,
restore_faces=restore_faces,
tiling=tiling,
hidiffusion=hidiffusion,
init_images=[image],
mask=mask,
mask_blur=mask_blur,

View File

@ -20,7 +20,7 @@ class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, hr_sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, image_cfg_scale: float = None, clip_skip: int = 1, width: int = 512, height: int = 512, full_quality: bool = True, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, diffusers_guidance_rescale: float = 0.7, sag_scale: float = 0.0, cfg_end: float = 1, resize_mode: int = 0, resize_name: str = 'None', scale_by: float = 0, selected_scale_tab: int = 0, hdr_mode: int = 0, hdr_brightness: float = 0, hdr_color: float = 0, hdr_sharpen: float = 0, hdr_clamp: bool = False, hdr_boundary: float = 4.0, hdr_threshold: float = 0.95, hdr_maximize: bool = False, hdr_max_center: float = 0.6, hdr_max_boundry: float = 1.0, hdr_color_picker: str = None, hdr_tint_ratio: float = 0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): # pylint: disable=unused-argument
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, hr_sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, image_cfg_scale: float = None, clip_skip: int = 1, width: int = 512, height: int = 512, full_quality: bool = True, restore_faces: bool = False, tiling: bool = False, hidiffusion: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, diffusers_guidance_rescale: float = 0.7, sag_scale: float = 0.0, cfg_end: float = 1, resize_mode: int = 0, resize_name: str = 'None', scale_by: float = 0, selected_scale_tab: int = 0, hdr_mode: int = 0, hdr_brightness: float = 0, hdr_color: float = 0, hdr_sharpen: float = 0, hdr_clamp: bool = False, hdr_boundary: float = 4.0, hdr_threshold: float = 0.95, hdr_maximize: bool = False, hdr_max_center: float = 0.6, hdr_max_boundry: float = 1.0, hdr_color_picker: str = None, hdr_tint_ratio: float = 0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): # pylint: disable=unused-argument
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
@ -49,6 +49,7 @@ class StableDiffusionProcessing:
self.full_quality: bool = full_quality
self.restore_faces: bool = restore_faces
self.tiling: bool = tiling
self.hidiffusion: bool = hidiffusion
self.do_not_save_samples: bool = do_not_save_samples
self.do_not_save_grid: bool = do_not_save_grid
self.extra_generation_params: dict = extra_generation_params or {}

View File

@ -8,7 +8,7 @@ import numpy as np
import torch
import torchvision.transforms.functional as TF
import diffusers
from modules import shared, devices, processing, sd_samplers, sd_models, images, errors, prompt_parser_diffusers, sd_hijack_hypertile, processing_correction, processing_vae, sd_models_compile, extra_networks
from modules import shared, devices, processing, sd_samplers, sd_models, images, errors, prompt_parser_diffusers, sd_hijack_hypertile, processing_correction, processing_vae, sd_models_compile, extra_networks, hidiffusion
from modules.processing_helpers import resize_init_images, resize_hires, fix_prompts, calculate_base_steps, calculate_hires_steps, calculate_refiner_steps
from modules.onnx_impl import preprocess_pipeline as preprocess_onnx_pipeline, check_parameters_changed as olive_check_parameters_changed
@ -437,9 +437,16 @@ def process_diffusers(p: processing.StableDiffusionProcessing):
t0 = time.time()
sd_models_compile.check_deepcache(enable=True)
sd_models.move_model(shared.sd_model, devices.device)
if p.hidiffusion:
hidiffusion.hidiffusion.is_aggressive_raunet = shared.opts.hidiffusion_aggressive
hidiffusion.apply_hidiffusion(shared.sd_model, apply_raunet=shared.opts.hidiffusion_raunet, apply_window_attn=shared.opts.hidiffusion_attn)
shared.log.debug(f'Applying HiDiffusion: raunet={shared.opts.hidiffusion_raunet} aggressive={shared.opts.hidiffusion_aggressive} attn={shared.opts.hidiffusion_attn}')
p.extra_generation_params['HiDiffusion'] = f'{shared.opts.hidiffusion_raunet}/{shared.opts.hidiffusion_aggressive}/{shared.opts.hidiffusion_attn}'
output = shared.sd_model(**base_args) # pylint: disable=not-callable
if isinstance(output, dict):
output = SimpleNamespace(**output)
if p.hidiffusion:
hidiffusion.remove_hidiffusion(shared.sd_model)
sd_models_compile.openvino_post_compile(op="base") # only executes on compiled vino models
sd_models_compile.check_deepcache(enable=False)
if shared.cmd_opts.profile:

View File

@ -313,17 +313,17 @@ def temp_disable_extensions():
if theme_name == 'huggingface':
theme_name = 'huggingface/blaaa'
if theme_name.startswith('standard'):
if theme_name.lower().startswith('standard') or theme_name.lower().startswith('default'):
modules.shared.opts.data['theme_type'] = 'Standard'
modules.shared.opts.data['gradio_theme'] = theme_name[9:]
elif theme_name.startswith('modern'):
elif theme_name.lower().startswith('modern'):
modules.shared.opts.data['theme_type'] = 'Modern'
modules.shared.opts.data['gradio_theme'] = theme_name[7:]
disable_themes.remove('sdnext-ui-ux')
elif theme_name.startswith('gradio'):
elif theme_name.lower().startswith('gradio'):
modules.shared.opts.data['theme_type'] = 'None'
modules.shared.opts.data['gradio_theme'] = theme_name
elif theme_name.startswith('huggingface'):
elif theme_name.lower().startswith('huggingface'):
modules.shared.opts.data['theme_type'] = 'None'
modules.shared.opts.data['gradio_theme'] = theme_name
else:
@ -439,6 +439,7 @@ options_templates.update(options_section(('cuda', "Compute Settings"), {
"diffusers_quantization": OptionInfo(False, "Dynamic quantization with TorchAO"),
"deep_cache_interval": OptionInfo(3, "DeepCache cache interval", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
"nncf_sep": OptionInfo("<h2>Model Compress</h2>", "", gr.HTML),
"nncf_compress_weights": OptionInfo([], "Compress Model weights with NNCF", gr.CheckboxGroup, {"choices": ["Model", "VAE", "Text Encoder"], "visible": backend == Backend.DIFFUSERS}),
"ipex_sep": OptionInfo("<h2>IPEX</h2>", "", gr.HTML, {"visible": devices.backend == "ipex"}),
@ -490,6 +491,11 @@ options_templates.update(options_section(('advanced', "Inference Settings"), {
"hypertile_vae_tile": OptionInfo(128, "HyperTile VAE tile size", gr.Slider, {"minimum": 0, "maximum": 1024, "step": 8}),
"hypertile_vae_swap_size": OptionInfo(1, "HyperTile VAE swap size", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
"hidiffusion_sep": OptionInfo("<h2>HiDiffusion</h2>", "", gr.HTML),
"hidiffusion_raunet": OptionInfo(True, "Apply RAU-Net"),
"hidiffusion_aggressive": OptionInfo(True, "Aggressive RAU-Net"),
"hidiffusion_attn": OptionInfo(True, "Apply MSW-MSA"),
"inference_batch_sep": OptionInfo("<h2>Batch</h2>", "", gr.HTML),
"sequential_seed": OptionInfo(True, "Batch mode uses sequential seeds"),
"batch_frame_mode": OptionInfo(False, "Parallel process images in batch"),

View File

@ -103,15 +103,15 @@ def reload_gradio_theme():
modules.shared.opts.data['gradio_theme'] = theme_name
if theme_name.lower() in ['lobe', 'cozy-nest']:
modules.shared.log.info(f'UI theme extension: name="{theme_name}" style={modules.shared.opts.theme_style}')
modules.shared.log.info(f'UI theme extension: name="{theme_name}"')
return None
elif modules.shared.opts.theme_type == 'Standard':
gradio_theme = gr.themes.Base(**default_font_params)
modules.shared.log.info(f'UI theme: type={modules.shared.opts.theme_type} name="{theme_name}" style={modules.shared.opts.theme_style}')
modules.shared.log.info(f'UI theme: type={modules.shared.opts.theme_type} name="{theme_name}"')
return 'sdnext.css'
elif modules.shared.opts.theme_type == 'Modern':
gradio_theme = gr.themes.Base(**default_font_params)
modules.shared.log.info(f'UI theme: type={modules.shared.opts.theme_type} name="{theme_name}" style={modules.shared.opts.theme_style}')
modules.shared.log.info(f'UI theme: type={modules.shared.opts.theme_type} name="{theme_name}"')
return 'base.css'
elif modules.shared.opts.theme_type == 'None':
if theme_name.startswith('gradio/'):

View File

@ -11,7 +11,7 @@ debug('Trace: PROCESS')
def txt2img(id_task,
prompt, negative_prompt, prompt_styles,
steps, sampler_index, hr_sampler_index,
full_quality, restore_faces, tiling,
full_quality, restore_faces, tiling, hidiffusion,
n_iter, batch_size,
cfg_scale, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end,
clip_skip,
@ -24,7 +24,7 @@ def txt2img(id_task,
override_settings_texts,
*args):
debug(f'txt2img: id_task={id_task}|prompt={prompt}|negative={negative_prompt}|styles={prompt_styles}|steps={steps}|sampler_index={sampler_index}|hr_sampler_index={hr_sampler_index}|full_quality={full_quality}|restore_faces={restore_faces}|tiling={tiling}|batch_count={n_iter}|batch_size={batch_size}|cfg_scale={cfg_scale}|clip_skip={clip_skip}|seed={seed}|subseed={subseed}|subseed_strength={subseed_strength}|seed_resize_from_h={seed_resize_from_h}|seed_resize_from_w={seed_resize_from_w}|height={height}|width={width}|enable_hr={enable_hr}|denoising_strength={denoising_strength}|hr_scale={hr_scale}|hr_upscaler={hr_upscaler}|hr_force={hr_force}|hr_second_pass_steps={hr_second_pass_steps}|hr_resize_x={hr_resize_x}|hr_resize_y={hr_resize_y}|image_cfg_scale={image_cfg_scale}|diffusers_guidance_rescale={diffusers_guidance_rescale}|refiner_steps={refiner_steps}|refiner_start={refiner_start}|refiner_prompt={refiner_prompt}|refiner_negative={refiner_negative}|override_settings={override_settings_texts}')
debug(f'txt2img: id_task={id_task}|prompt={prompt}|negative={negative_prompt}|styles={prompt_styles}|steps={steps}|sampler_index={sampler_index}|hr_sampler_index={hr_sampler_index}|full_quality={full_quality}|restore_faces={restore_faces}|tiling={tiling}|hidiffusion={hidiffusion}|batch_count={n_iter}|batch_size={batch_size}|cfg_scale={cfg_scale}|clip_skip={clip_skip}|seed={seed}|subseed={subseed}|subseed_strength={subseed_strength}|seed_resize_from_h={seed_resize_from_h}|seed_resize_from_w={seed_resize_from_w}|height={height}|width={width}|enable_hr={enable_hr}|denoising_strength={denoising_strength}|hr_scale={hr_scale}|hr_upscaler={hr_upscaler}|hr_force={hr_force}|hr_second_pass_steps={hr_second_pass_steps}|hr_resize_x={hr_resize_x}|hr_resize_y={hr_resize_y}|image_cfg_scale={image_cfg_scale}|diffusers_guidance_rescale={diffusers_guidance_rescale}|refiner_steps={refiner_steps}|refiner_start={refiner_start}|refiner_prompt={refiner_prompt}|refiner_negative={refiner_negative}|override_settings={override_settings_texts}')
if shared.sd_model is None:
shared.log.warning('Model not loaded')
@ -65,6 +65,7 @@ def txt2img(id_task,
full_quality=full_quality,
restore_faces=restore_faces,
tiling=tiling,
hidiffusion=hidiffusion,
enable_hr=enable_hr,
denoising_strength=denoising_strength,
hr_scale=hr_scale,

View File

@ -111,7 +111,7 @@ def create_ui(_blocks: gr.Blocks=None):
mask_controls = masking.create_segment_ui()
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling= ui_sections.create_advanced_inputs('control')
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling, hidiffusion = ui_sections.create_advanced_inputs('control')
hdr_mode, hdr_brightness, hdr_color, hdr_sharpen, hdr_clamp, hdr_boundary, hdr_threshold, hdr_maximize, hdr_max_center, hdr_max_boundry, hdr_color_picker, hdr_tint_ratio, = ui_sections.create_correction_inputs('control')
with gr.Accordion(open=False, label="Video", elem_id="control_video", elem_classes=["small-accordion"]):
@ -498,7 +498,7 @@ def create_ui(_blocks: gr.Blocks=None):
prompt, negative, styles,
steps, sampler_index,
seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w,
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling,
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling, hidiffusion,
hdr_mode, hdr_brightness, hdr_color, hdr_sharpen, hdr_clamp, hdr_boundary, hdr_threshold, hdr_maximize, hdr_max_center, hdr_max_boundry, hdr_color_picker, hdr_tint_ratio,
resize_mode_before, resize_name_before, width_before, height_before, scale_by_before, selected_scale_tab_before,
resize_mode_after, resize_name_after, width_after, height_after, scale_by_after, selected_scale_tab_after,
@ -561,6 +561,7 @@ def create_ui(_blocks: gr.Blocks=None):
(full_quality, "Full quality"),
(restore_faces, "Face restoration"),
(tiling, "Tiling"),
(hidiffusion, "HiDiffusion"),
# second pass
(enable_hr, "Second pass"),
(hr_sampler_index, "Hires sampler"),

View File

@ -130,7 +130,7 @@ def create_ui():
denoising_strength = gr.Slider(minimum=0.0, maximum=0.99, step=0.01, label='Denoising strength', value=0.50, elem_id="img2img_denoising_strength")
refiner_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Denoise start', value=0.0, elem_id="img2img_refiner_start")
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling = ui_sections.create_advanced_inputs('img2img')
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling, hidiffusion = ui_sections.create_advanced_inputs('img2img')
hdr_mode, hdr_brightness, hdr_color, hdr_sharpen, hdr_clamp, hdr_boundary, hdr_threshold, hdr_maximize, hdr_max_center, hdr_max_boundry, hdr_color_picker, hdr_tint_ratio, = ui_sections.create_correction_inputs('img2img')
# with gr.Group(elem_id="inpaint_controls", visible=False) as inpaint_controls:
@ -177,7 +177,7 @@ def create_ui():
sampler_index,
mask_blur, mask_alpha,
inpainting_fill,
full_quality, restore_faces, tiling,
full_quality, restore_faces, tiling, hidiffusion,
batch_count, batch_size,
cfg_scale, image_cfg_scale,
diffusers_guidance_rescale, sag_scale, cfg_end,
@ -263,6 +263,7 @@ def create_ui():
(full_quality, "Full quality"),
(restore_faces, "Face restoration"),
(tiling, "Tiling"),
(hidiffusion, "HiDiffusion"),
# inpaint
(mask_blur, "Mask blur"),
(mask_alpha, "Mask alpha"),

View File

@ -157,7 +157,8 @@ def create_advanced_inputs(tab):
full_quality = gr.Checkbox(label='Full quality', value=True, elem_id=f"{tab}_full_quality")
restore_faces = gr.Checkbox(label='Face restore', value=False, elem_id=f"{tab}_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id=f"{tab}_tiling", visible=True)
return cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, diffusers_sag_scale, cfg_end, full_quality, restore_faces, tiling
hidiffusion = gr.Checkbox(label='HiDiffusion', value=False, elem_id=f"{tab}_hidiffusion", visible=True)
return cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, diffusers_sag_scale, cfg_end, full_quality, restore_faces, tiling, hidiffusion
def create_correction_inputs(tab):
with gr.Accordion(open=False, label="Corrections", elem_id=f"{tab}_corrections", elem_classes=["small-accordion"], visible=shared.backend == shared.Backend.DIFFUSERS):

View File

@ -42,7 +42,7 @@ def create_ui():
steps, sampler_index = ui_sections.create_sampler_inputs('txt2img')
batch_count, batch_size = ui_sections.create_batch_inputs('txt2img')
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w = ui_sections.create_seed_inputs('txt2img')
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling = ui_sections.create_advanced_inputs('txt2img')
cfg_scale, clip_skip, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end, full_quality, restore_faces, tiling, hidiffusion = ui_sections.create_advanced_inputs('txt2img')
hdr_mode, hdr_brightness, hdr_color, hdr_sharpen, hdr_clamp, hdr_boundary, hdr_threshold, hdr_maximize, hdr_max_center, hdr_max_boundry, hdr_color_picker, hdr_tint_ratio, = ui_sections.create_correction_inputs('txt2img')
enable_hr, hr_sampler_index, denoising_strength, hr_upscaler, hr_force, hr_second_pass_steps, hr_scale, hr_resize_x, hr_resize_y, refiner_steps, refiner_start, refiner_prompt, refiner_negative = ui_sections.create_hires_inputs('txt2img')
override_settings = ui_common.create_override_inputs('txt2img')
@ -59,7 +59,7 @@ def create_ui():
dummy_component,
txt2img_prompt, txt2img_negative_prompt, txt2img_prompt_styles,
steps, sampler_index, hr_sampler_index,
full_quality, restore_faces, tiling,
full_quality, restore_faces, tiling, hidiffusion,
batch_count, batch_size,
cfg_scale, image_cfg_scale, diffusers_guidance_rescale, sag_scale, cfg_end,
clip_skip,
@ -111,6 +111,7 @@ def create_ui():
(full_quality, "Full quality"),
(restore_faces, "Face restoration"),
(tiling, "Tiling"),
(hidiffusion, "HiDiffusion"),
# second pass
(enable_hr, "Second pass"),
(hr_sampler_index, "Hires sampler"),

View File

@ -15,6 +15,7 @@ exclude = [
"modules/control/units/*_model.py",
"modules/control/units/*_pipe.py",
"modules/xadapter/*.py",
"modules/hidiffusion/*.py",
"modules/tcd/*.py",
]
[tool.ruff.lint]