mirror of https://github.com/vladmandic/automatic
commit
c9e21a51db
|
|
@ -1,6 +1,6 @@
|
|||
# Change Log for SD.Next
|
||||
|
||||
## Update for 2026-02-06
|
||||
## Update for 2026-02-07
|
||||
|
||||
- **Upscalers**
|
||||
- add support for [spandrel](https://github.com/chaiNNer-org/spandrel)
|
||||
|
|
@ -9,6 +9,8 @@
|
|||
- add two new interpolation methods: *HQX* and *ICB*
|
||||
- **Features**
|
||||
- pipelines: add **ZImageInpaint**, thanks @CalamitousFelicitousness
|
||||
- add `--remote` command line flag that reduces client/server chatter and improves link stability
|
||||
for long-running generates, useful when running on remote servers
|
||||
- **UI**
|
||||
- ui: **themes** add *CTD-NT64Light* and *CTD-NT64Dark*, thanks @resonantsky
|
||||
- ui: **gallery** add option to auto-refresh gallery, thanks @awsr
|
||||
|
|
@ -19,6 +21,8 @@
|
|||
- fix: improve wildcard weights parsing, thanks @Tillerz
|
||||
- fix: ui gallery cace recursive cleanup, thanks @awsr
|
||||
- fix: `anima` model detection
|
||||
- fix: lora unwanted unload
|
||||
- fix: improve preview error handler
|
||||
|
||||
## Update for 2026-02-04
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,112 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
from collections import Counter
|
||||
|
||||
script_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
os.chdir(script_dir)
|
||||
|
||||
# --- test defition -------------------------------
|
||||
# library
|
||||
fn = r'./modules/styles.py'
|
||||
# tested function
|
||||
funcname = 'select_from_weighted_list'
|
||||
# random needed
|
||||
ns = {'Dict': dict, 'random': __import__('random')}
|
||||
# number of samples to test
|
||||
tries = 2000
|
||||
# allowed deviation in percentage points
|
||||
tolerance_pct = 5
|
||||
# tests
|
||||
tests = [
|
||||
# - empty
|
||||
["", { '': 100 } ],
|
||||
# - no weights
|
||||
[ "red|blonde|black", { 'black': 33, 'red': 33, 'blonde': 33 } ],
|
||||
# - full weights <= 1
|
||||
[ "red:0.1|blonde:0.9", { 'blonde': 90, 'red': 10 } ],
|
||||
# - weights > 1 to test normalization
|
||||
[ "red:1|blonde:2|black:5", { 'blonde': 25, 'red': 12.5, 'black': 62.5 } ],
|
||||
# - disabling 0 weights to force one result
|
||||
[ "red:0|blonde|black:0", { 'blonde': 100 } ],
|
||||
# - weights <= 1 with distribution of the leftover
|
||||
[ "red:0.5|blonde|black:0.3|brown", { 'red': 50, 'black': 30, 'brown': 10, 'blonde': 10 } ],
|
||||
# - weights > 1, unweightes should get default of 1
|
||||
[ "red:2|blonde|black", { 'red': 50, 'blonde': 25, 'black': 25 } ],
|
||||
# - ignore content of ()
|
||||
[ "red:0.5|(blonde:1.3)", { 'red': 50, '(blonde:1.3)': 50 } ],
|
||||
# - ignore content of []
|
||||
[ "red:0.5|[stuff:1.3]", { '[stuff:1.3]': 50, 'red': 50 } ],
|
||||
# - ignore content of <>
|
||||
[ "red:0.5|<lora:1.0>", { '<lora:1.0>': 50, 'red': 50 } ]
|
||||
]
|
||||
|
||||
# -------------------------------------------------
|
||||
|
||||
with open(fn, 'r', encoding='utf-8') as f:
|
||||
src = f.read()
|
||||
start = src.find('def ' + funcname)
|
||||
if start == -1:
|
||||
print('Function not found')
|
||||
sys.exit(1)
|
||||
# find next top-level def or class after start
|
||||
next_def = src.find('\ndef ', start+1)
|
||||
next_class = src.find('\nclass ', start+1)
|
||||
end_candidates = [i for i in (next_def, next_class) if i != -1]
|
||||
end = min(end_candidates) if end_candidates else len(src)
|
||||
func_src = src[start:end]
|
||||
|
||||
exec(func_src, ns)
|
||||
func = ns.get(funcname)
|
||||
if func is None:
|
||||
print('Failed to extract function')
|
||||
sys.exit(1)
|
||||
|
||||
print('Running' , tries, 'isolated quick tests for ' + funcname + ':\n')
|
||||
|
||||
"""Print test summary."""
|
||||
print("\n" + "=" * 70)
|
||||
print("TEST SUMMARY")
|
||||
print("=" * 70)
|
||||
|
||||
for t in tests:
|
||||
print('INPUT:', t)
|
||||
samples = [func(t[0]) for _ in range(tries)]
|
||||
c = Counter(samples)
|
||||
print("SAMPLES: ", dict(c))
|
||||
|
||||
# validation
|
||||
expected_pct = t[1]
|
||||
expected_keys = set(expected_pct.keys())
|
||||
actual_keys = set(c.keys())
|
||||
missing = expected_keys - actual_keys
|
||||
unexpected = actual_keys - expected_keys
|
||||
|
||||
if missing or unexpected:
|
||||
if missing:
|
||||
print("MISSING: ", sorted(missing))
|
||||
if unexpected:
|
||||
print("UNEXPECTED: ", sorted(unexpected))
|
||||
print("RESULT: FAILED (keys)")
|
||||
print('')
|
||||
continue
|
||||
|
||||
failures = []
|
||||
for k, pct in expected_pct.items():
|
||||
expected_count = tries * (pct / 100.0)
|
||||
actual_count = c.get(k, 0)
|
||||
actual_pct = (actual_count / tries) * 100.0
|
||||
if abs(actual_pct - pct) > tolerance_pct:
|
||||
failures.append(
|
||||
f"{k}: expected {pct:.1f}%, got {actual_pct:.1f}% "
|
||||
f"({actual_count}/{tries})"
|
||||
)
|
||||
|
||||
if failures:
|
||||
print("OUT OF RANGE: ")
|
||||
for line in failures:
|
||||
print(" - " + line)
|
||||
print("RESULT: FAILED (distribution)")
|
||||
else:
|
||||
print("RESULT: PASSED")
|
||||
print('')
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit ead16e14410ff177e2e4e105bcbec3eaa737de7d
|
||||
Subproject commit 357985697de0a457e401bccbc41132322648d0ba
|
||||
|
|
@ -1787,6 +1787,7 @@ def add_args(parser):
|
|||
group_http.add_argument("--share", default=os.environ.get("SD_SHARE", False), action='store_true', help="Enable UI accessible through Gradio site, default: %(default)s")
|
||||
group_http.add_argument("--insecure", default=os.environ.get("SD_INSECURE", False), action='store_true', help="Enable extensions tab regardless of other options, default: %(default)s")
|
||||
group_http.add_argument("--listen", default=os.environ.get("SD_LISTEN", False), action='store_true', help="Launch web server using public IP address, default: %(default)s")
|
||||
group_http.add_argument("--remote", default=os.environ.get("SD_REMOTE", False), action='store_true', help="Reduce client-server communication, default: %(default)s")
|
||||
group_http.add_argument("--port", type=int, default=os.environ.get("SD_PORT", 7860), help="Launch web server with given server port, default: %(default)s")
|
||||
|
||||
group_diag = parser.add_argument_group('Diagnostics')
|
||||
|
|
|
|||
|
|
@ -12,10 +12,24 @@ import piexif
|
|||
import piexif.helper
|
||||
from PIL import Image, PngImagePlugin, ExifTags, ImageDraw
|
||||
from modules import sd_samplers, shared, script_callbacks, errors, paths
|
||||
from modules.images_grid import image_grid, get_grid_size, split_grid, combine_grid, check_grid_size, get_font, draw_grid_annotations, draw_prompt_matrix, GridAnnotation, Grid # pylint: disable=unused-import
|
||||
from modules.images_resize import resize_image # pylint: disable=unused-import
|
||||
from modules.images_namegen import FilenameGenerator, get_next_sequence_number # pylint: disable=unused-import
|
||||
from modules.video import save_video # pylint: disable=unused-import
|
||||
from modules.images_grid import (
|
||||
image_grid as image_grid,
|
||||
get_grid_size as get_grid_size,
|
||||
split_grid as split_grid,
|
||||
combine_grid as combine_grid,
|
||||
check_grid_size as check_grid_size,
|
||||
get_font as get_font,
|
||||
draw_grid_annotations as draw_grid_annotations,
|
||||
draw_prompt_matrix as draw_prompt_matrix,
|
||||
GridAnnotation as GridAnnotation,
|
||||
Grid as Grid,
|
||||
)
|
||||
from modules.images_resize import resize_image as resize_image
|
||||
from modules.images_namegen import (
|
||||
FilenameGenerator as FilenameGenerator,
|
||||
get_next_sequence_number as get_next_sequence_number,
|
||||
)
|
||||
from modules.video import save_video as save_video
|
||||
|
||||
|
||||
debug = errors.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
|
||||
|
|
|
|||
|
|
@ -175,6 +175,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
|||
|
||||
def changed(self, requested: List[str], include: List[str] = None, exclude: List[str] = None) -> bool:
|
||||
if shared.opts.lora_force_reload:
|
||||
debug_log(f'Network check: type=LoRA requested={requested} status=forced')
|
||||
return True
|
||||
sd_model = shared.sd_model.pipe if hasattr(shared.sd_model, 'pipe') else shared.sd_model
|
||||
if not hasattr(sd_model, 'loaded_loras'):
|
||||
|
|
@ -185,14 +186,16 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
|||
exclude = ['none']
|
||||
key = f'include={",".join(include)}:exclude={",".join(exclude)}'
|
||||
loaded = sd_model.loaded_loras.get(key, [])
|
||||
debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded}')
|
||||
if len(requested) != len(loaded):
|
||||
sd_model.loaded_loras[key] = requested
|
||||
debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=changed')
|
||||
return True
|
||||
for req, load in zip(requested, loaded):
|
||||
if req != load:
|
||||
sd_model.loaded_loras[key] = requested
|
||||
debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=changed')
|
||||
return True
|
||||
debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=same')
|
||||
return False
|
||||
|
||||
def activate(self, p, params_list, step=0, include=[], exclude=[]):
|
||||
|
|
@ -245,9 +248,8 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
|||
shared.log.info(f'Network load: type=LoRA networks={[n.name for n in l.loaded_networks]} method={load_method} mode={"fuse" if shared.opts.lora_fuse_native else "backup"} te={te_multipliers} unet={unet_multipliers} time={l.timer.summary}')
|
||||
|
||||
def deactivate(self, p, force=False):
|
||||
if len(lora_diffusers.diffuser_loaded) > 0:
|
||||
if not (shared.compiled_model_state is not None and shared.compiled_model_state.is_compiled is True):
|
||||
unload_diffusers()
|
||||
if len(lora_diffusers.diffuser_loaded) > 0 and (shared.opts.lora_force_reload or force):
|
||||
unload_diffusers()
|
||||
if force:
|
||||
networks.network_deactivate()
|
||||
if self.active and l.debug:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import time
|
|||
import threading
|
||||
from collections import namedtuple
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
import torchvision.transforms.functional as TF
|
||||
from PIL import Image
|
||||
from modules import shared, devices, processing, images, sd_samplers, timer
|
||||
from modules.vae import sd_vae_approx, sd_vae_taesd, sd_vae_stablecascade
|
||||
|
|
@ -80,12 +80,11 @@ def single_sample_to_image(sample, approximation=None):
|
|||
else:
|
||||
if x_sample.shape[0] > 4 or x_sample.shape[0] == 4:
|
||||
return Image.new(mode="RGB", size=(512, 512))
|
||||
if x_sample.dtype == torch.bfloat16:
|
||||
x_sample = x_sample.to(torch.float16)
|
||||
x_sample = torch.nan_to_num(x_sample, nan=0.0, posinf=1, neginf=0)
|
||||
x_sample = (255.0 * x_sample).to(torch.uint8)
|
||||
if len(x_sample.shape) == 4:
|
||||
x_sample = x_sample[0]
|
||||
transform = T.ToPILImage()
|
||||
image = transform(x_sample)
|
||||
image = TF.to_pil_image(x_sample)
|
||||
except Exception as e:
|
||||
warn_once(f'Preview: {e}')
|
||||
image = Image.new(mode="RGB", size=(512, 512))
|
||||
|
|
|
|||
|
|
@ -4,23 +4,53 @@ import os
|
|||
import sys
|
||||
import time
|
||||
import contextlib
|
||||
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import gradio as gr
|
||||
from installer import log, print_dict, console, get_version # pylint: disable=unused-import
|
||||
log.debug('Initializing: shared module')
|
||||
|
||||
from installer import (
|
||||
log as log,
|
||||
print_dict,
|
||||
console as console,
|
||||
get_version as get_version,
|
||||
)
|
||||
|
||||
log.debug("Initializing: shared module")
|
||||
|
||||
import modules.memmon
|
||||
import modules.paths as paths
|
||||
from modules.json_helpers import readfile, writefile # pylint: disable=W0611
|
||||
from modules.shared_helpers import listdir, walk_files, html_path, html, req, total_tqdm # pylint: disable=W0611
|
||||
from modules.json_helpers import (
|
||||
readfile as readfile,
|
||||
writefile as writefile,
|
||||
)
|
||||
from modules.shared_helpers import (
|
||||
listdir as listdir,
|
||||
walk_files as walk_files,
|
||||
html_path as html_path,
|
||||
html as html,
|
||||
req as req,
|
||||
total_tqdm as total_tqdm,
|
||||
)
|
||||
from modules import errors, devices, shared_state, cmd_args, theme, history, files_cache
|
||||
from modules.shared_defaults import get_default_modes
|
||||
from modules.paths import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # pylint: disable=W0611
|
||||
from modules.memstats import memory_stats, ram_stats # pylint: disable=unused-import
|
||||
from modules.paths import (
|
||||
models_path as models_path, # For compatibility, do not modify from here...
|
||||
script_path as script_path,
|
||||
data_path as data_path,
|
||||
sd_configs_path as sd_configs_path,
|
||||
sd_default_config as sd_default_config,
|
||||
sd_model_file as sd_model_file,
|
||||
default_sd_model_file as default_sd_model_file,
|
||||
extensions_dir as extensions_dir,
|
||||
extensions_builtin_dir as extensions_builtin_dir, # ... to here.
|
||||
)
|
||||
from modules.memstats import (
|
||||
memory_stats,
|
||||
ram_stats as ram_stats,
|
||||
)
|
||||
|
||||
log.debug('Initializing: pipelines')
|
||||
log.debug("Initializing: pipelines")
|
||||
from modules import shared_items
|
||||
from modules.interrogate.openclip import caption_models, caption_types, get_clip_models, refresh_clip_models
|
||||
from modules.interrogate.vqa import vlm_models, vlm_prompts, vlm_system, vlm_default
|
||||
|
|
|
|||
|
|
@ -115,6 +115,34 @@ def generate_click(job_id: str, state: str, active_tab: str, *args):
|
|||
shared.state.end(jobid)
|
||||
|
||||
|
||||
def generate_click_alt(job_id: str, state: str, active_tab: str, *args):
|
||||
while helpers.busy:
|
||||
debug(f'Control: tab="{active_tab}" job={job_id} busy')
|
||||
time.sleep(0.1)
|
||||
from modules.control.run import control_run
|
||||
debug(f'Control: tab="{active_tab}" job={job_id} args={args}')
|
||||
progress.add_task_to_queue(job_id)
|
||||
with call_queue.get_lock():
|
||||
results = None
|
||||
shared.mem_mon.reset()
|
||||
jobid = shared.state.begin('Control')
|
||||
progress.start_task(job_id)
|
||||
try:
|
||||
t = time.perf_counter()
|
||||
for results in control_run(state, units, helpers.input_source, helpers.input_init, helpers.input_mask, active_tab, True, *args):
|
||||
progress.record_results(job_id, results)
|
||||
except GeneratorExit:
|
||||
shared.log.error("Control: generator exit")
|
||||
except Exception as e:
|
||||
shared.log.error(f"Control exception: {e}")
|
||||
errors.display(e, 'Control')
|
||||
return [None, None, None, None, f'Control: Exception: {e}', '']
|
||||
finally:
|
||||
progress.finish_task(job_id)
|
||||
shared.state.end(jobid)
|
||||
return return_controls(results, t)
|
||||
|
||||
|
||||
def create_ui(_blocks: gr.Blocks=None):
|
||||
helpers.initialize()
|
||||
|
||||
|
|
@ -314,8 +342,9 @@ def create_ui(_blocks: gr.Blocks=None):
|
|||
result_txt,
|
||||
output_html_log,
|
||||
]
|
||||
generate_fn = generate_click_alt if shared.cmd_opts.remote else generate_click
|
||||
control_dict = dict(
|
||||
fn=generate_click,
|
||||
fn=generate_fn,
|
||||
_js="submit_control",
|
||||
inputs=[tabs_state, state, tabs_state] + input_fields + input_script_args,
|
||||
outputs=output_fields,
|
||||
|
|
|
|||
|
|
@ -158,13 +158,13 @@ def decode(latents):
|
|||
try:
|
||||
with devices.inference_context():
|
||||
t0 = time.time()
|
||||
dtype = devices.dtype_vae if devices.dtype_vae != torch.bfloat16 else torch.float16 # taesd does not support bf16
|
||||
dtype = devices.dtype_vae if (devices.dtype_vae != torch.bfloat16) else torch.float16 # taesd does not support bf16
|
||||
tensor = latents.unsqueeze(0) if len(latents.shape) == 3 else latents
|
||||
tensor = tensor.detach().clone().to(devices.device, dtype=dtype)
|
||||
if debug:
|
||||
shared.log.debug(f'Decode: type="taesd" variant="{variant}" input={latents.shape} tensor={tensor.shape}')
|
||||
# Fallback: reshape packed 128-channel latents to 32 channels if not already unpacked
|
||||
if variant == 'TAE FLUX.2' and len(tensor.shape) == 4 and tensor.shape[1] == 128:
|
||||
if (variant == 'TAE FLUX.2') and (len(tensor.shape) == 4) and (tensor.shape[1] == 128):
|
||||
b, _c, h, w = tensor.shape
|
||||
tensor = tensor.reshape(b, 32, h * 2, w * 2)
|
||||
if variant.startswith('TAESD') or variant in {'TAE FLUX.1', 'TAE FLUX.2', 'TAE SD3'}:
|
||||
|
|
|
|||
Loading…
Reference in New Issue