add hidream

Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/3866/head
Vladimir Mandic 2025-04-10 16:28:15 -04:00
parent f2a4ef1eb0
commit 92af0036c6
19 changed files with 192 additions and 58 deletions

View File

@ -1,13 +1,30 @@
# Change Log for SD.Next
## Update for 2025-04-08
## TODO for 2025-04-10
- HiDream requires sidebranch:
> `pip install git+https://github.com/hlky/diffusers@hidream`
> `./webui.sh --debug --experimental`
- FlashAttn wiki
## Update for 2025-04-10
- **Models**
- [HiDream-I1](https://huggingface.co/HiDream-ai/HiDream-I1-Full) in fast, dev and full variants!
new absolutely massive image generative foundation model with **17B** parameters
and 4 (!?) text-encoders: *clip-l, clip-g, t5-1.1-xxl, llama-3.1-8b-instruct* for total of **8.3B** parameters
simply select from *networks -> models -> reference*
due to size (over 25B params in 58GB), offloading and on-the-fly quantization are pretty much a necessity
difference between variants is recommended number of steps: *fast=16, dev=28, full=50*
hidream is compatible with flowmatching samplers and with taesd live-preview
*note* HiDream-I1 requires `flash-attn` to be installed
> REQUIRES SIDE-BRANCH
- **Features**
- Custom model loader
can be used to load any known diffusion model with default or custom model components
in models -> custom tab
see docs for details: <https://vladmandic.github.io/sdnext-docs/Loader/>
- Pipe: [SoftFill](https://github.com/zacheryvaughn/softfill-pipelines)
can be used to load any known diffusion model with default or custom model components
in models -> custom tab
see docs for details: <https://vladmandic.github.io/sdnext-docs/Loader/>
- Pipe: [SoftFill](https://github.com/zacheryvaughn/softfill-pipelines)
- **Caching**
- add `TeaCache` support to *Flux, CogVideoX, Mochi, LTX*
- add `FasterCache` support to *WanAI, LTX* (other video models already supported)
@ -23,9 +40,12 @@
- ZLUDA: add more GPUs to recognized list
select in scripts, available for sdxl in inpaint model
- LoRA: add option to force-reload LoRA on every generate
- settings: add **Model options** sections as placeholder for per-model settings
- video: update *LTXVideo-0.9.5* pipeline
- te loader: allow free-form input in which case sdnext will attempt to load it as hf repo
- diag: add get-server-status to UI generate context menu
- diag: memory monitor detect gpu swapping
- vide: update LTXVideo-0.9.5 pipeline
- use [hf-xet](https://huggingface.co/blog/xet-on-the-hub) for huggingface downloads where possible
- **Changes**
- params: Reset default guidance-rescale from 0.7 to 0.0
- progress: add additional fields to progress API

View File

@ -333,7 +333,29 @@
"preview": "Alpha-VLLM--Lumina-Image-2.0.jpg",
"skip": true,
"extras": "sampler: Default"
},
},
"HiDream-I1 Fast": {
"path": "HiDream-ai/HiDream-I1-Fast",
"desc": "HiDream-I1 is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.",
"preview": "HiDream-ai--HiDream-I1-Fast.jpg",
"skip": true,
"extras": "sampler: Default"
},
"HiDream-I1 Dev": {
"path": "HiDream-ai/HiDream-I1-Dev",
"desc": "HiDream-I1 is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.",
"preview": "HiDream-ai--HiDream-I1-Fast.jpg",
"skip": true,
"extras": "sampler: Default"
},
"HiDream-I1 Full": {
"path": "HiDream-ai/HiDream-I1-Full",
"desc": "HiDream-I1 is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.",
"preview": "HiDream-ai--HiDream-I1-Fast.jpg",
"skip": true,
"extras": "sampler: Default"
},
"Kwai Kolors": {
"path": "Kwai-Kolors/Kolors-diffusers",

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

View File

@ -29,16 +29,16 @@ def load_cogview3(checkpoint_info, diffusers_load_config={}):
repo_id = sd_models.path_to_repo(checkpoint_info.name)
shared.log.debug(f'Load model: type=CogView3 model="{checkpoint_info.name}" repo="{repo_id}" offload={shared.opts.diffusers_offload_mode} dtype={devices.dtype}')
diffusers_load_config, quant_args = load_common(diffusers_load_config, module='Model')
load_args, quant_args = load_common(diffusers_load_config, module='Transformer')
transformer = diffusers.CogView3PlusTransformer2DModel.from_pretrained(
repo_id,
subfolder="transformer",
cache_dir=shared.opts.diffusers_dir,
**diffusers_load_config,
**load_args,
**quant_args,
)
diffusers_load_config, quant_args = load_common(diffusers_load_config, module='TE')
load_args, quant_args = load_common(diffusers_load_config, module='TE')
text_encoder = transformers.T5EncoderModel.from_pretrained(
repo_id,
subfolder="text_encoder",
@ -47,12 +47,13 @@ def load_cogview3(checkpoint_info, diffusers_load_config={}):
**quant_args,
)
load_args, quant_args = load_common(diffusers_load_config, module='Transformer')
pipe = diffusers.CogView3PlusPipeline.from_pretrained(
repo_id,
text_encoder=text_encoder,
transformer=transformer,
cache_dir=shared.opts.diffusers_dir,
**diffusers_load_config,
**load_args,
)
devices.torch_gc()
return pipe
@ -62,7 +63,7 @@ def load_cogview4(checkpoint_info, diffusers_load_config={}):
repo_id = sd_models.path_to_repo(checkpoint_info.name)
shared.log.debug(f'Load model: type=CogView4 model="{checkpoint_info.name}" repo="{repo_id}" offload={shared.opts.diffusers_offload_mode} dtype={devices.dtype}')
diffusers_load_config, quant_args = load_common(diffusers_load_config, module='Model')
load_args, quant_args = load_common(diffusers_load_config, module='Transformer')
transformer = diffusers.CogView4Transformer2DModel.from_pretrained(
repo_id,
subfolder="transformer",
@ -71,21 +72,22 @@ def load_cogview4(checkpoint_info, diffusers_load_config={}):
**quant_args,
)
diffusers_load_config, quant_args = load_common(diffusers_load_config, module='TE')
load_args, quant_args = load_common(diffusers_load_config, module='TE')
text_encoder = transformers.AutoModelForCausalLM.from_pretrained(
repo_id,
subfolder="text_encoder",
cache_dir=shared.opts.diffusers_dir,
**diffusers_load_config,
**load_args,
**quant_args,
)
load_args, quant_args = load_common(diffusers_load_config, module='Model')
pipe = diffusers.CogView4Pipeline.from_pretrained(
repo_id,
text_encoder=text_encoder,
transformer=transformer,
cache_dir=shared.opts.diffusers_dir,
**diffusers_load_config,
**load_args,
)
if shared.opts.diffusers_eval:
pipe.text_encoder.eval()

92
modules/model_hidream.py Normal file
View File

@ -0,0 +1,92 @@
import time
import transformers
import diffusers
from modules import shared, devices, sd_models, timer
llama_repo = "meta-llama/Meta-Llama-3.1-8B-Instruct"
def hijack_encode_prompt(*args, **kwargs):
t0 = time.time()
res = shared.sd_model.orig_encode_prompt(*args, **kwargs)
t1 = time.time()
timer.process.add('te', t1-t0)
# shared.log.debug(f'Hijack: te={shared.sd_model.text_encoder.__class__.__name__} time={t1-t0:.2f}')
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
return res
def get_args(diffusers_load_config={}, module=None):
from modules import model_quant, modelloader
modelloader.hf_login()
if 'torch_dtype' not in diffusers_load_config:
diffusers_load_config['torch_dtype'] = devices.dtype
if 'low_cpu_mem_usage' in diffusers_load_config:
del diffusers_load_config['low_cpu_mem_usage']
if 'load_connected_pipeline' in diffusers_load_config:
del diffusers_load_config['load_connected_pipeline']
if 'safety_checker' in diffusers_load_config:
del diffusers_load_config['safety_checker']
if 'requires_safety_checker' in diffusers_load_config:
del diffusers_load_config['requires_safety_checker']
quant_args = model_quant.create_config(module=module)
quant_type = model_quant.get_quant_type(quant_args)
if quant_type:
shared.log.debug(f'Load model: type=HiDream quantization module="{module}" {quant_type}')
return diffusers_load_config, quant_args
def load_hidream(checkpoint_info, diffusers_load_config={}):
repo_id = sd_models.path_to_repo(checkpoint_info.name)
shared.log.debug(f'Load model: type=HiDream model="{checkpoint_info.name}" repo="{repo_id}" offload={shared.opts.diffusers_offload_mode} dtype={devices.dtype}')
load_args, quant_args = get_args(diffusers_load_config, module='Transformer')
transformer = diffusers.HiDreamImageTransformer2DModel.from_pretrained(
repo_id,
subfolder="transformer",
cache_dir=shared.opts.hfcache_dir,
**load_args,
**quant_args,
)
load_args, quant_args = get_args(diffusers_load_config, module='TE')
text_encoder_3 = transformers.T5EncoderModel.from_pretrained(
repo_id,
subfolder="text_encoder_3",
cache_dir=shared.opts.hfcache_dir,
**load_args,
**quant_args,
)
load_args, quant_args = get_args(diffusers_load_config, module='LLM')
tokenizer_4 = transformers.PreTrainedTokenizerFast.from_pretrained(
llama_repo,
cache_dir=shared.opts.hfcache_dir,
**load_args,
)
text_encoder_4 = transformers.LlamaForCausalLM.from_pretrained(
llama_repo,
output_hidden_states=True,
output_attentions=True,
cache_dir=shared.opts.hfcache_dir,
**load_args,
**quant_args,
)
load_args, quant_args = get_args(diffusers_load_config, module='Model')
pipe = diffusers.HiDreamImagePipeline.from_pretrained(
repo_id,
text_encoder_3=text_encoder_3,
text_encoder_4=text_encoder_4,
tokenizer_4=tokenizer_4,
transformer=transformer,
cache_dir=shared.opts.diffusers_dir,
**load_args,
)
pipe.orig_encode_prompt = pipe.encode_prompt
pipe.encode_prompt = hijack_encode_prompt
devices.torch_gc()
return pipe

View File

@ -1,7 +1,7 @@
import os
import diffusers
import transformers
from modules import shared, devices, sd_models, sd_unet, model_quant, model_tools
from modules import shared, devices, errors, sd_models, sd_unet, model_quant, model_tools
def load_overrides(kwargs, cache_dir):
@ -20,6 +20,7 @@ def load_overrides(kwargs, cache_dir):
shared.log.debug(f'Load model: type=SD3 unet="{shared.opts.sd_unet}" fmt=gguf')
except Exception as e:
shared.log.error(f"Load model: type=SD3 failed to load UNet: {e}")
errors.display(e, 'UNet')
shared.opts.sd_unet = 'Default'
sd_unet.failed_unet.append(shared.opts.sd_unet)
if shared.opts.sd_text_encoder != 'Default':
@ -36,6 +37,7 @@ def load_overrides(kwargs, cache_dir):
shared.log.debug(f'Load model: type=SD3 variant="t5" te="{shared.opts.sd_text_encoder}"')
except Exception as e:
shared.log.error(f"Load model: type=SD3 failed to load T5: {e}")
errors.display(e, 'TE')
shared.opts.sd_text_encoder = 'Default'
if shared.opts.sd_vae != 'Default' and shared.opts.sd_vae != 'Automatic':
try:
@ -47,6 +49,7 @@ def load_overrides(kwargs, cache_dir):
shared.log.debug(f'Load model: type=SD3 vae="{shared.opts.sd_vae}"')
except Exception as e:
shared.log.error(f"Load model: type=SD3 failed to load VAE: {e}")
errors.display(e, 'VAE')
shared.opts.sd_vae = 'Default'
return kwargs
@ -86,33 +89,6 @@ def load_missing(kwargs, fn, cache_dir):
return kwargs
"""
def load_gguf(kwargs, fn):
ggml.install_gguf()
from accelerate import init_empty_weights
from diffusers.loaders.single_file_utils import convert_sd3_transformer_checkpoint_to_diffusers
from modules import ggml, sd_hijack_accelerate
with init_empty_weights():
config = diffusers.SD3Transformer2DModel.load_config(os.path.join('configs', 'flux'), subfolder="transformer")
transformer = diffusers.SD3Transformer2DModel.from_config(config).to(devices.dtype)
expected_state_dict_keys = list(transformer.state_dict().keys())
state_dict, stats = ggml.load_gguf_state_dict(fn, devices.dtype)
state_dict = convert_sd3_transformer_checkpoint_to_diffusers(state_dict)
applied, skipped = 0, 0
for param_name, param in state_dict.items():
if param_name not in expected_state_dict_keys:
skipped += 1
continue
applied += 1
sd_hijack_accelerate.hijack_set_module_tensor_simple(transformer, tensor_name=param_name, value=param, device=0)
transformer.gguf = 'gguf'
state_dict[param_name] = None
shared.log.debug(f'Load model: type=Unet/Transformer applied={applied} skipped={skipped} stats={stats} compute={devices.dtype}')
kwargs['transformer'] = transformer
return kwargs
"""
def load_sd3(checkpoint_info, cache_dir=None, config=None):
repo_id = sd_models.path_to_repo(checkpoint_info.name)
fn = checkpoint_info.path
@ -149,6 +125,10 @@ def load_sd3(checkpoint_info, cache_dir=None, config=None):
shared.log.debug(f'Load model: type=SD3 kwargs={list(kwargs)} repo="{repo_id}"')
kwargs = model_quant.create_config(kwargs)
if shared.opts.model_sd3_disable_te5:
shared.log.debug('Load model: type=SD3 option="disable-te5"')
kwargs['text_encoder_3'] = None
pipe = loader(
repo_id,
torch_dtype=devices.dtype,

View File

@ -78,6 +78,10 @@ def load_t5(name=None, cache_dir=None):
dtype=torch.float32 if devices.dtype != torch.bfloat16 else torch.bfloat16
)
t5 = nncf_compress_model(t5)
elif '/' in name:
shared.log.debug(f'Load model: type=T5 repo={name}')
quant_config = model_quant.create_config(module='TE')
t5 = transformers.T5EncoderModel.from_pretrained(name, cache_dir=cache_dir, torch_dtype=devices.dtype, **quant_config)
else:
t5 = None
if t5 is not None:

View File

@ -41,6 +41,8 @@ def get_model_type(pipe):
model_type = 'cogview4'
elif "Sana" in name:
model_type = 'sana'
elif "HiDream" in name:
model_type = 'h1'
# video models
elif "CogVideo" in name:
model_type = 'cogvideo'

View File

@ -90,6 +90,8 @@ def detect_pipeline(f: str, op: str = 'model', warning=True, quiet=False):
pipeline = 'custom'
if 'sd3' in f.lower():
guess = 'Stable Diffusion 3'
if 'hidream' in f.lower():
guess = 'HiDream'
if 'flux' in f.lower() or 'flex.1' in f.lower():
guess = 'FLUX'
if size > 11000 and size < 16000:

View File

@ -326,6 +326,9 @@ def load_diffuser_force(model_type, checkpoint_info, diffusers_load_config, op='
elif model_type in ['OmniGen']: # forced pipeline
from modules.model_omnigen import load_omnigen
sd_model = load_omnigen(checkpoint_info, diffusers_load_config)
elif model_type in ['HiDream']:
from modules.model_hidream import load_hidream
sd_model = load_hidream(checkpoint_info, diffusers_load_config)
except Exception as e:
shared.log.error(f'Load {op}: path="{checkpoint_info.path}" {e}')
if debug_load:

View File

@ -52,8 +52,10 @@ def get_model(model_type = 'decoder', variant = None):
global prev_cls, prev_type, prev_model # pylint: disable=global-statement
from modules import shared
cls = shared.sd_model_type
if cls == 'ldm':
if cls == 'ldm': # original backend
cls = 'sd'
if cls == 'h1': # hidream uses flux vae
cls = 'f1'
variant = variant or shared.opts.taesd_variant
folder = os.path.join(paths.models_path, "TAESD")
os.makedirs(folder, exist_ok=True)

View File

@ -403,16 +403,20 @@ options_templates.update(options_section(('sd', "Models & Loading"), {
"diffusers_offload_max_cpu_memory": OptionInfo(0.90, "Balanced offload CPU high watermark", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01, "visible": False }),
"advanced_sep": OptionInfo("<h2>Advanced Options</h2>", "", gr.HTML),
"sd_checkpoint_autoload": OptionInfo(True, "Model autoload on start"),
"sd_checkpoint_autoload": OptionInfo(True, "Model auto-load on start"),
"sd_checkpoint_autodownload": OptionInfo(True, "Model auto-download on demand"),
"stream_load": OptionInfo(False, "Model load using streams", gr.Checkbox),
"diffusers_eval": OptionInfo(True, "Force model eval", gr.Checkbox, {"visible": False }),
"diffusers_to_gpu": OptionInfo(False, "Model Load model direct to GPU"),
"diffusers_to_gpu": OptionInfo(False, "Model load model direct to GPU"),
"disable_accelerate": OptionInfo(False, "Disable accelerate", gr.Checkbox, {"visible": False }),
"sd_model_dict": OptionInfo('None', "Use separate base dict", gr.Dropdown, lambda: {"choices": ['None'] + list_checkpoint_titles(), "visible": False}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Cached models", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1, "visible": not native }),
}))
options_templates.update(options_section(('model_options', "Models Options"), {
"model_sd3_disable_te5": OptionInfo(False, "SD3 disable T5 encoder"),
}))
options_templates.update(options_section(('vae_encoder', "Variable Auto Encoder"), {
"sd_vae": OptionInfo("Automatic", "VAE model", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list),
"diffusers_vae_upcast": OptionInfo("default", "VAE upcasting", gr.Radio, {"choices": ['default', 'true', 'false']}),
@ -429,7 +433,7 @@ options_templates.update(options_section(('vae_encoder', "Variable Auto Encoder"
}))
options_templates.update(options_section(('text_encoder', "Text Encoder"), {
"sd_text_encoder": OptionInfo('Default', "Text encoder model", gr.Dropdown, lambda: {"choices": shared_items.sd_te_items()}, refresh=shared_items.refresh_te_list),
"sd_text_encoder": OptionInfo('Default', "Text encoder model", DropdownEditable, lambda: {"choices": shared_items.sd_te_items()}, refresh=shared_items.refresh_te_list),
"prompt_attention": OptionInfo("native", "Prompt attention parser", gr.Radio, {"choices": ["native", "compel", "xhinker", "a1111", "fixed"] }),
"prompt_mean_norm": OptionInfo(False, "Prompt attention normalization", gr.Checkbox),
"sd_textencoder_cache": OptionInfo(True, "Cache text encoder results", gr.Checkbox, {"visible": False}),

View File

@ -32,6 +32,7 @@ pipelines = {
'CogView 4': getattr(diffusers, 'CogView4Pipeline', None),
'UniDiffuser': getattr(diffusers, 'UniDiffuserPipeline', None),
'Amused': getattr(diffusers, 'AmusedPipeline', None),
'HiDream': getattr(diffusers, 'HiDreamImagePipeline', None),
# dynamically imported and redefined later
'Meissonic': getattr(diffusers, 'DiffusionPipeline', None), # dynamically redefined and loaded in sd_models.load_diffuser
@ -78,7 +79,7 @@ def refresh_unet_list():
def sd_te_items():
import modules.model_te
predefined = ['None', 'T5 FP4', 'T5 FP8', 'T5 INT8', 'T5 QINT8', 'T5 FP16']
predefined = ['None']
return predefined + list(modules.model_te.te_dict)

View File

@ -21,7 +21,7 @@ def hijack_decode(*args, **kwargs):
def hijack_encode_prompt(*args, **kwargs):
t0 = time.time()
res = shared.sd_model.vae.orig_encode_prompt(*args, **kwargs)
res = shared.sd_model.orig_encode_prompt(*args, **kwargs)
t1 = time.time()
timer.process.add('te', t1-t0)
shared.log.debug(f'Video: te={shared.sd_model.text_encoder.__class__.__name__} time={t1-t0:.2f}')
@ -92,7 +92,7 @@ class Script(scripts.Script):
shared.sd_model.sd_checkpoint_info = sd_checkpoint.CheckpointInfo(repo_id)
shared.sd_model.sd_model_hash = None
shared.sd_model.vae.orig_decode = shared.sd_model.vae.decode
shared.sd_model.vae.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.vae.decode = hijack_decode
shared.sd_model.encode_prompt = hijack_encode_prompt
shared.sd_model.vae.enable_tiling()

View File

@ -50,7 +50,7 @@ def hijack_decode(*args, **kwargs):
def hijack_encode_prompt(*args, **kwargs):
t0 = time.time()
res = shared.sd_model.vae.orig_encode_prompt(*args, **kwargs)
res = shared.sd_model.orig_encode_prompt(*args, **kwargs)
t1 = time.time()
timer.process.add('te', t1-t0)
shared.log.debug(f'Video: te={shared.sd_model.text_encoder.__class__.__name__} time={t1-t0:.2f}')
@ -133,7 +133,7 @@ class Script(scripts.Script):
shared.sd_model.sd_checkpoint_info = sd_checkpoint.CheckpointInfo(models.get(model)['repo'])
shared.sd_model.sd_model_hash = None
shared.sd_model.vae.orig_decode = shared.sd_model.vae.decode
shared.sd_model.vae.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.vae.decode = hijack_decode
shared.sd_model.encode_prompt = hijack_encode_prompt
shared.sd_model.vae.enable_slicing()

View File

@ -21,7 +21,7 @@ def hijack_decode(*args, **kwargs):
def hijack_encode_prompt(*args, **kwargs):
t0 = time.time()
res = shared.sd_model.vae.orig_encode_prompt(*args, **kwargs)
res = shared.sd_model.orig_encode_prompt(*args, **kwargs)
t1 = time.time()
timer.process.add('te', t1-t0)
shared.log.debug(f'Video: te={shared.sd_model.text_encoder.__class__.__name__} time={t1-t0:.2f}')
@ -92,7 +92,7 @@ class Script(scripts.Script):
shared.sd_model.sd_checkpoint_info = sd_checkpoint.CheckpointInfo(repo_id)
shared.sd_model.sd_model_hash = None
shared.sd_model.vae.orig_decode = shared.sd_model.vae.decode
shared.sd_model.vae.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.vae.decode = hijack_decode
shared.sd_model.encode_prompt = hijack_encode_prompt
shared.sd_model.vae.enable_tiling()

View File

@ -41,7 +41,7 @@ def hijack_decode(*args, **kwargs):
def hijack_encode_prompt(*args, **kwargs):
t0 = time.time()
res = shared.sd_model.vae.orig_encode_prompt(*args, **kwargs)
res = shared.sd_model.orig_encode_prompt(*args, **kwargs)
t1 = time.time()
timer.process.add('te', t1-t0)
shared.log.debug(f'Video: te={shared.sd_model.text_encoder.__class__.__name__} time={t1-t0:.2f}')
@ -129,7 +129,7 @@ class Script(scripts.Script):
)
sd_models.set_diffuser_options(shared.sd_model)
shared.sd_model.vae.orig_decode = shared.sd_model.vae.decode
shared.sd_model.vae.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.orig_encode_prompt = shared.sd_model.encode_prompt
shared.sd_model.vae.decode = hijack_decode
shared.sd_model.encode_prompt = hijack_encode_prompt
shared.sd_model.sd_checkpoint_info = sd_checkpoint.CheckpointInfo(repo_id)

View File

@ -156,7 +156,7 @@ def initialize():
def load_model():
if not shared.opts.sd_checkpoint_autoload and shared.cmd_opts.ckpt is None:
log.info('Model: auto load disabled')
log.info('Model: autoload=False')
else:
shared.state.begin('Load')
thread_model = Thread(target=lambda: shared.sd_model)

2
wiki

@ -1 +1 @@
Subproject commit f4b7d2e573185f08ef6e06a6a97763aa83d59701
Subproject commit 9f5ee99a8e6c437801a8727f73acfffe2f0b926e