update chroma to v48 and add detailed and flash variants

Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/4089/head
Vladimir Mandic 2025-07-31 14:03:47 -04:00
parent 1d77cefa02
commit 79de6dc396
5 changed files with 23 additions and 5 deletions

View File

@ -6,6 +6,9 @@
- [FLUX.1-Krea-Dev](https://www.krea.ai/blog/flux-krea-open-source-release)
new 12B base model compatible with FLUX.1-Dev from Black Forest Labs with opinionated aesthetics and aesthetic preferences in mind
simply select in *networks -> models -> reference*
- [Chroma](https://huggingface.co/lodestones/Chroma)
update with latest **v48**, **v48 Detail Calibrated** and **v46 Flash** variants
simply select in *networks -> models -> reference*
- **Feature**
- **Wan** select which stage to run: *first/second/both* with configurable *boundary ration* when running both stages
in settings -> model options
@ -29,6 +32,7 @@
- fix progress bar with refine/detailer
- fix api progress reporting endpoint
- fix openvino backend failing to compile
- fix nunchaku fallback on unsupported model
- avoid forced gc and rely on thresholds
- add missing interrogate in output panel

View File

@ -173,13 +173,27 @@
"extras": "sampler: Default, cfg_scale: 4.5"
},
"lodestones Chroma": {
"path": "lodestones/Chroma",
"lodestones Chroma Unlocked v48": {
"path": "vladmandic/chroma-unlocked-v48",
"preview": "lodestones--Chroma.jpg",
"desc": "Chroma is a 8.9B parameter model based on FLUX.1-schnell. Its fully Apache 2.0 licensed, ensuring that anyone can use, modify, and build on top of it—no corporate gatekeeping. The model is still training right now, and Id love to hear your thoughts! Your input and feedback are really appreciated.",
"skip": true,
"extras": "sampler: Default, cfg_scale: 3.5"
},
"lodestones Chroma Unlocked v48 Detail Calibrated": {
"path": "vladmandic/chroma-unlocked-v48-detail-calibrated",
"preview": "lodestones--Chroma.jpg",
"desc": "Chroma is a 8.9B parameter model based on FLUX.1-schnell. Its fully Apache 2.0 licensed, ensuring that anyone can use, modify, and build on top of it—no corporate gatekeeping. The model is still training right now, and Id love to hear your thoughts! Your input and feedback are really appreciated.",
"skip": true,
"extras": "sampler: Default, cfg_scale: 3.5"
},
"lodestones Chroma Unlocked v48 Flash": {
"path": "vladmandic/chroma-unlocked-v46-flash",
"preview": "lodestones--Chroma.jpg",
"desc": "Chroma is a 8.9B parameter model based on FLUX.1-schnell. Its fully Apache 2.0 licensed, ensuring that anyone can use, modify, and build on top of it—no corporate gatekeeping. The model is still training right now, and Id love to hear your thoughts! Your input and feedback are really appreciated.",
"skip": true,
"extras": "sampler: Default, cfg_scale: 1.0"
},
"Ostris Flex.2 Preview": {
"path": "ostris/Flex.2-preview",

View File

@ -1216,7 +1216,7 @@ def ensure_base_requirements():
update_setuptools()
# used by installler itself so must be installed before requirements
install('rich==14.0.0', 'rich', quiet=True)
install('rich==14.1.0', 'rich', quiet=True)
install('psutil', 'psutil', quiet=True)
install('requests==2.32.3', 'requests', quiet=True)
ts('base', t_start)

View File

@ -117,7 +117,7 @@ def load_quants(kwargs, repo_id, cache_dir, allow_quant): # pylint: disable=unus
"cache_dir": cache_dir,
}
if 'transformer' not in kwargs and model_quant.check_nunchaku('Model'):
raise NotImplementedError('Nunchaku does not support Chroma Model yet. See https://github.com/mit-han-lab/nunchaku/issues/167')
shared.log.error(f'Load module: quant=Nunchaku module=transformer repo="{repo_id}" unsupported')
if 'transformer' not in kwargs and model_quant.check_quant('Model'):
load_args, quant_args = model_quant.get_dit_args(diffusers_load_config, module='Model', device_map=True, modules_to_not_convert=["distilled_guidance_layer"])
kwargs['transformer'] = diffusers.ChromaTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", **load_args, **quant_args)

View File

@ -31,7 +31,7 @@ invisible-watermark
pi-heif
# versioned
rich==14.0.0
rich==14.1.0
safetensors==0.5.3
tensordict==0.8.3
peft==0.16.0