mirror of https://github.com/vladmandic/automatic
parent
a78a99f0ff
commit
2a33d9f1a8
|
|
@ -1,10 +1,13 @@
|
|||
# Change Log for SD.Next
|
||||
|
||||
## Update for 2025-12-21
|
||||
## Update for 2025-12-22
|
||||
|
||||
- **Models**
|
||||
- [LongCat Image](https://github.com/meituan-longcat/LongCat-Image) in *Image* and *Image Edit* variants
|
||||
LongCat is a new 8B diffusion base model using Qwen-2.5 as text encoder
|
||||
- [Qwen Image Layered](https://huggingface.co/Qwen/Qwen-Image-Layered)
|
||||
Qwen-Image-Layered, a model capable of decomposing an image into multiple RGBA layers
|
||||
*note*: set number of desired output layers in *settings -> model options*
|
||||
- **Features**
|
||||
- Google **Gemini** and **Veo** models support for both *Dev* and *Vertex* access methods
|
||||
see [docs](https://vladmandic.github.io/sdnext-docs/Google-GenAI/) for details
|
||||
|
|
|
|||
|
|
@ -200,6 +200,15 @@
|
|||
"size": 56.1,
|
||||
"date": "2025 September"
|
||||
},
|
||||
"Qwen-Image-Layered": {
|
||||
"path": "Qwen/Qwen-Image-Layered",
|
||||
"preview": "Qwen--Qwen-Image-Edit-2509.jpg",
|
||||
"desc": "Qwen-Image-Layered, a model capable of decomposing an image into multiple RGBA layers",
|
||||
"skip": true,
|
||||
"extras": "",
|
||||
"size": 53.7,
|
||||
"date": "2025 December"
|
||||
},
|
||||
"Qwen-Image-Lightning": {
|
||||
"path": "vladmandic/Qwen-Lightning",
|
||||
"preview": "vladmandic--Qwen-Lightning.jpg",
|
||||
|
|
|
|||
|
|
@ -185,6 +185,8 @@ options_templates.update(options_section(('model_options', "Model Options"), {
|
|||
"model_wan_boundary": OptionInfo(0.85, "Stage boundary ratio", gr.Slider, {"minimum": 0, "maximum": 1.0, "step": 0.05 }),
|
||||
"model_chrono_sep": OptionInfo("<h2>ChronoEdit</h2>", "", gr.HTML),
|
||||
"model_chrono_temporal_steps": OptionInfo(0, "Temporal steps", gr.Slider, {"minimum": 0, "maximum": 50, "step": 1 }),
|
||||
"model_qwen_layer_sep": OptionInfo("<h2>WanAI</h2>", "", gr.HTML),
|
||||
"model_qwen_layers": OptionInfo(2, "Qwen layered number of layers", gr.Slider, {"minimum": 2, "maximum": 9, "step": 1 }),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('offload', "Model Offloading"), {
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ def load_qwen(checkpoint_info, diffusers_load_config=None):
|
|||
diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["qwen-image"] = diffusers.QwenImageEditPipeline
|
||||
diffusers.pipelines.auto_pipeline.AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["qwen-image"] = diffusers.QwenImageEditPipeline
|
||||
diffusers.pipelines.auto_pipeline.AUTO_INPAINT_PIPELINES_MAPPING["qwen-image"] = diffusers.QwenImageEditPipeline
|
||||
elif 'Layered' in repo_id:
|
||||
cls_name = diffusers.QwenImageLayeredPipeline
|
||||
else:
|
||||
cls_name = diffusers.QwenImagePipeline
|
||||
diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["qwen-image"] = diffusers.QwenImagePipeline
|
||||
|
|
@ -69,6 +71,10 @@ def load_qwen(checkpoint_info, diffusers_load_config=None):
|
|||
pipe.task_args = {
|
||||
'output_type': 'np',
|
||||
}
|
||||
if 'Layered' in repo_id:
|
||||
pipe.task_args['use_en_prompt'] = True
|
||||
pipe.task_args['cfg_normalize'] = True
|
||||
pipe.task_args['layers'] = shared.opts.model_qwen_layers
|
||||
|
||||
del text_encoder
|
||||
del transformer
|
||||
|
|
|
|||
Loading…
Reference in New Issue