hidream-e1.1

Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/4268/head
Vladimir Mandic 2025-10-11 11:15:15 -04:00
parent c0600ae960
commit 0ae4decadc
5 changed files with 12 additions and 14 deletions

View File

@ -15,10 +15,10 @@
impact of nunchaku engine on unet-based model such as sdxl is much less than on a dit-based models, but its still significantly faster than baseline
note that nunchaku optimized and prequantized unet is replacement for base unet, so its only applicable to base models, not any of finetunes
*how to use*: enable nunchaku in settings -> quantization and then load either sdxl-base or sdxl-base-turbo reference models
- [HiDream E1.1](https://huggingface.co/HiDream-ai/HiDream-E1-1)
updated version of E1 image editing model
- [X-Omni SFT](https://x-omni-team.github.io/)
*experimental*: X-omni is a transformer-only discrete autoregressive image generative model trained with reinforcement learning
- [HiDream E1.1](https://huggingface.co/HiDream-ai/HiDream-E1-1)
*experimental*: updated version of E1 image editing model
- **Features**
- **Model save**: ability to save currently loaded model as a new standalone model
why? SD.Next always prefers to start with full model and quantize on-demand during load

View File

@ -22,7 +22,6 @@ Main ToDo list can be found at [GitHub projects](https://github.com/users/vladma
- [STG](https://github.com/huggingface/diffusers/blob/main/examples/community/README.md#spatiotemporal-skip-guidance)
- [SmoothCache](https://github.com/huggingface/diffusers/issues/11135)
- [MagCache](https://github.com/lllyasviel/FramePack/pull/673/files)
- [Nunchaku PulID](https://github.com/mit-han-lab/nunchaku/pull/274)
- [Dream0 guidance](https://huggingface.co/ByteDance/DreamO)
- [SUPIR upscaler](https://github.com/Fanghua-Yu/SUPIR)
- [ByteDance OneReward](https://github.com/bytedance/OneReward)

View File

@ -700,8 +700,7 @@
"desc": "HiDream-E1 is an image editing model built on HiDream-I1.",
"preview": "HiDream-ai--HiDream-E1-Full.jpg",
"skip": true,
"extras": "sampler: Default",
"experimental": true
"extras": "sampler: Default"
},
"Kwai Kolors": {

View File

@ -37,12 +37,10 @@ pipe_switch_task_exclude = [
'FluxFillPipeline',
'InstantIRPipeline',
'LTXConditionPipeline',
'OmniGenPipeline',
'OmniGen2Pipeline',
'OmniGenPipeline', 'OmniGen2Pipeline',
'PhotoMakerStableDiffusionXLPipeline',
'PixelSmithXLPipeline',
'StableDiffusion3ControlNetPipeline',
'StableDiffusionAdapterPipeline',
'StableDiffusionAdapterPipeline', 'StableDiffusionXLAdapterPipeline',
'StableDiffusionControlNetXSPipeline', 'StableDiffusionXLControlNetXSPipeline',
'StableDiffusionReferencePipeline',
@ -50,10 +48,8 @@ pipe_switch_task_exclude = [
'XOmniPipeline',
]
i2i_pipes = [
'LEditsPPPipelineStableDiffusion',
'LEditsPPPipelineStableDiffusionXL',
'OmniGenPipeline',
'OmniGen2Pipeline',
'LEditsPPPipelineStableDiffusion', 'LEditsPPPipelineStableDiffusionXL',
'OmniGenPipeline', 'OmniGen2Pipeline',
'StableDiffusionAdapterPipeline', 'StableDiffusionXLAdapterPipeline',
'StableDiffusionControlNetXSPipeline', 'StableDiffusionXLControlNetXSPipeline',
]
@ -548,12 +544,12 @@ def load_sdnq_model(checkpoint_info, pipeline, diffusers_load_config, op):
quantization_config_path = os.path.join(checkpoint_info.path, module_name, 'quantization_config.json')
if not os.path.exists(quantization_config_path):
continue
model_path = os.path.join(checkpoint_info.path, module_name)
model_name = os.path.join(checkpoint_info.path, module_name)
quantization_config = shared.readfile(quantization_config_path, silent=True)
shared.log.debug(f'Load {op}: model="{checkpoint_info.name}" module="{module_name}" direct={shared.opts.diffusers_to_gpu} prequant=sdnq')
try:
modules[module_name] = sdnq.load_sdnq_model(
model_path=model_path,
model_path=model_name,
quantization_config=quantization_config,
device=devices.device if shared.opts.diffusers_to_gpu else devices.cpu,
dtype=devices.dtype,

View File

@ -52,6 +52,10 @@ def load_hidream(checkpoint_info, diffusers_load_config={}):
diffusers.pipelines.auto_pipeline.AUTO_TEXT2IMAGE_PIPELINES_MAPPING["hidream-e1"] = diffusers.HiDreamImagePipeline
diffusers.pipelines.auto_pipeline.AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["hidream-e1"] = HiDreamImageEditingPipeline
diffusers.pipelines.auto_pipeline.AUTO_INPAINT_PIPELINES_MAPPING["hidream-e1"] = HiDreamImageEditingPipeline
if transformer and 'E1-1' in repo_id:
transformer.max_seq = 8192
elif transformer and 'E1' in repo_id:
transformer.max_seq = 4608
else:
shared.log.error(f'Load model: type=HiDream model="{checkpoint_info.name}" repo="{repo_id}" not recognized')
return False