From e8ff09a2d2b1357598d97e9fee56354af2d11bdf Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 7 Feb 2026 09:07:34 +0000 Subject: [PATCH] fix lora unload and improve preview error handler Signed-off-by: Vladimir Mandic --- CHANGELOG.md | 3 ++- extensions-builtin/sdnext-modernui | 2 +- modules/lora/extra_networks_lora.py | 10 ++++++---- modules/sd_samplers_common.py | 9 ++++----- modules/vae/sd_vae_taesd.py | 4 ++-- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7dc0133ae..2d6eb7ee9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Change Log for SD.Next -## Update for 2026-02-06 +## Update for 2026-02-07 - **Upscalers** - add support for [spandrel](https://github.com/chaiNNer-org/spandrel) @@ -19,6 +19,7 @@ - fix: improve wildcard weights parsing, thanks @Tillerz - fix: ui gallery cace recursive cleanup, thanks @awsr - fix: `anima` model detection + - fix: lora unwanted unload ## Update for 2026-02-04 diff --git a/extensions-builtin/sdnext-modernui b/extensions-builtin/sdnext-modernui index ead16e144..2b9f7c293 160000 --- a/extensions-builtin/sdnext-modernui +++ b/extensions-builtin/sdnext-modernui @@ -1 +1 @@ -Subproject commit ead16e14410ff177e2e4e105bcbec3eaa737de7d +Subproject commit 2b9f7c293f3a2146b991a943397e7567059e73c8 diff --git a/modules/lora/extra_networks_lora.py b/modules/lora/extra_networks_lora.py index 3db727d1a..5b1726709 100644 --- a/modules/lora/extra_networks_lora.py +++ b/modules/lora/extra_networks_lora.py @@ -175,6 +175,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): def changed(self, requested: List[str], include: List[str] = None, exclude: List[str] = None) -> bool: if shared.opts.lora_force_reload: + debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=forced') return True sd_model = shared.sd_model.pipe if hasattr(shared.sd_model, 'pipe') else shared.sd_model if not hasattr(sd_model, 'loaded_loras'): @@ -185,14 +186,16 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): exclude = ['none'] key = f'include={",".join(include)}:exclude={",".join(exclude)}' loaded = sd_model.loaded_loras.get(key, []) - debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded}') if len(requested) != len(loaded): sd_model.loaded_loras[key] = requested + debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=changed') return True for req, load in zip(requested, loaded): if req != load: sd_model.loaded_loras[key] = requested + debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=changed') return True + debug_log(f'Network check: type=LoRA key="{key}" requested={requested} loaded={loaded} status=same') return False def activate(self, p, params_list, step=0, include=[], exclude=[]): @@ -245,9 +248,8 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): shared.log.info(f'Network load: type=LoRA networks={[n.name for n in l.loaded_networks]} method={load_method} mode={"fuse" if shared.opts.lora_fuse_native else "backup"} te={te_multipliers} unet={unet_multipliers} time={l.timer.summary}') def deactivate(self, p, force=False): - if len(lora_diffusers.diffuser_loaded) > 0: - if not (shared.compiled_model_state is not None and shared.compiled_model_state.is_compiled is True): - unload_diffusers() + if len(lora_diffusers.diffuser_loaded) > 0 and (shared.opts.lora_force_reload or force): + unload_diffusers() if force: networks.network_deactivate() if self.active and l.debug: diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 14ad69eb8..c49dcf11d 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ import time import threading from collections import namedtuple import torch -import torchvision.transforms as T +import torchvision.transforms.functional as TF from PIL import Image from modules import shared, devices, processing, images, sd_samplers, timer from modules.vae import sd_vae_approx, sd_vae_taesd, sd_vae_stablecascade @@ -80,12 +80,11 @@ def single_sample_to_image(sample, approximation=None): else: if x_sample.shape[0] > 4 or x_sample.shape[0] == 4: return Image.new(mode="RGB", size=(512, 512)) - if x_sample.dtype == torch.bfloat16: - x_sample = x_sample.to(torch.float16) + x_sample = torch.nan_to_num(x_sample, nan=0.0, posinf=1, neginf=0) + x_sample = (255.0 * x_sample).to(torch.uint8) if len(x_sample.shape) == 4: x_sample = x_sample[0] - transform = T.ToPILImage() - image = transform(x_sample) + image = TF.to_pil_image(x_sample) except Exception as e: warn_once(f'Preview: {e}') image = Image.new(mode="RGB", size=(512, 512)) diff --git a/modules/vae/sd_vae_taesd.py b/modules/vae/sd_vae_taesd.py index 1c5dd0dd0..c8fc3ef47 100644 --- a/modules/vae/sd_vae_taesd.py +++ b/modules/vae/sd_vae_taesd.py @@ -158,13 +158,13 @@ def decode(latents): try: with devices.inference_context(): t0 = time.time() - dtype = devices.dtype_vae if devices.dtype_vae != torch.bfloat16 else torch.float16 # taesd does not support bf16 + dtype = devices.dtype_vae if (devices.dtype_vae != torch.bfloat16) else torch.float16 # taesd does not support bf16 tensor = latents.unsqueeze(0) if len(latents.shape) == 3 else latents tensor = tensor.detach().clone().to(devices.device, dtype=dtype) if debug: shared.log.debug(f'Decode: type="taesd" variant="{variant}" input={latents.shape} tensor={tensor.shape}') # Fallback: reshape packed 128-channel latents to 32 channels if not already unpacked - if variant == 'TAE FLUX.2' and len(tensor.shape) == 4 and tensor.shape[1] == 128: + if (variant == 'TAE FLUX.2') and (len(tensor.shape) == 4) and (tensor.shape[1] == 128): b, _c, h, w = tensor.shape tensor = tensor.reshape(b, 32, h * 2, w * 2) if variant.startswith('TAESD') or variant in {'TAE FLUX.1', 'TAE FLUX.2', 'TAE SD3'}: