mirror of https://github.com/vladmandic/automatic
ernie enable/disable built-in pe
Signed-off-by: vladmandic <mandic00@live.com>pull/4778/head
parent
eba2c3a8a3
commit
d1fb1b47a8
|
|
@ -22,6 +22,7 @@ In addition, to jump on a bandwagon, we're now fully **AI agent** ready with det
|
|||
*note*: requires large number of steps to achieve sane results
|
||||
- [Baidu ERNIE-Image](https://huggingface.co/baidu/ERNIE-Image) in *base* and *turbo* (distilled) variants
|
||||
ERNIE is single-stream 8B DiT model with built-in prompt enhancer using Mistral-3 text encoder
|
||||
*note*: ERNIE has a built-in prompt-enhancer which is disabled by default, can be enabled in *settings -> model options*
|
||||
- [Nucleus-Image](https://huggingface.co/NucleusAI/Nucleus-Image)
|
||||
Nucleus is MoE text-to-image model with 2B active and 17B total params
|
||||
- [Bria FIBO](https://huggingface.co/briaai/FIBO) in *Normal*, *Edit*, and *Lite* (distilled) variants
|
||||
|
|
|
|||
|
|
@ -110,6 +110,8 @@ def create_settings(cmd_opts):
|
|||
"model_chrono_temporal_steps": OptionInfo(0, "Temporal steps", gr.Slider, {"minimum": 0, "maximum": 50, "step": 1 }),
|
||||
"model_qwen_layer_sep": OptionInfo("<h2>Qwen layered</h2>", "", gr.HTML),
|
||||
"model_qwen_layers": OptionInfo(2, "Qwen layered number of layers", gr.Slider, {"minimum": 2, "maximum": 9, "step": 1 }),
|
||||
"model_ernie_sep": OptionInfo("<h2>ERNIE-Image</h2>", "", gr.HTML),
|
||||
"model_ernie_enable_pe": OptionInfo(False, "Enable prompt-enhance"),
|
||||
}))
|
||||
|
||||
# --- Model Offloading ---
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ def load_ernie_image(checkpoint_info, diffusers_load_config=None):
|
|||
sd_models.hf_auth_check(checkpoint_info)
|
||||
|
||||
load_args, _quant_args = model_quant.get_dit_args(diffusers_load_config, allow_quant=False)
|
||||
log.debug(f'Load model: type=ERNIE-Image repo="{repo_id}" offload={shared.opts.diffusers_offload_mode} dtype={devices.dtype} args={load_args}')
|
||||
log.debug(f'Load model: type=ERNIE-Image repo="{repo_id}" offload={shared.opts.diffusers_offload_mode} dtype={devices.dtype} args={load_args} pe={shared.opts.model_ernie_enable_pe}')
|
||||
|
||||
transformer = generic.load_transformer(
|
||||
repo_id,
|
||||
|
|
@ -25,6 +25,9 @@ def load_ernie_image(checkpoint_info, diffusers_load_config=None):
|
|||
load_config=diffusers_load_config,
|
||||
)
|
||||
|
||||
if not shared.opts.model_ernie_enable_pe:
|
||||
load_args['pe'] = False
|
||||
|
||||
pipe = diffusers.ErnieImagePipeline.from_pretrained(
|
||||
repo_id,
|
||||
cache_dir=shared.opts.diffusers_dir,
|
||||
|
|
@ -34,6 +37,7 @@ def load_ernie_image(checkpoint_info, diffusers_load_config=None):
|
|||
)
|
||||
pipe.task_args = {
|
||||
'output_type': 'np',
|
||||
'use_pe': shared.opts.model_ernie_enable_pe,
|
||||
}
|
||||
|
||||
del transformer
|
||||
|
|
|
|||
Loading…
Reference in New Issue