Don't force VAE compile with OpenVINO and set min detected memory for dyn atten to 4

pull/3750/head
Disty0 2025-02-09 02:39:30 +03:00
parent c79d12502f
commit a77bd98997
2 changed files with 2 additions and 5 deletions

View File

@ -536,8 +536,8 @@ options_templates.update(options_section(('cuda', "Compute Settings"), {
"cross_attention_optimization": OptionInfo(startup_cross_attention, "Attention optimization method", gr.Radio, lambda: {"choices": shared_items.list_crossattention(native)}),
"sdp_options": OptionInfo(startup_sdp_options, "SDP options", gr.CheckboxGroup, {"choices": ['Flash attention', 'Memory attention', 'Math attention', 'Dynamic attention', 'Sage attention'], "visible": native}),
"xformers_options": OptionInfo(['Flash attention'], "xFormers options", gr.CheckboxGroup, {"choices": ['Flash attention'] }),
"dynamic_attention_slice_rate": OptionInfo(0.5, "Dynamic Attention slicing rate in GB", gr.Slider, {"minimum": 0.01, "maximum": gpu_memory, "step": 0.01, "visible": native}),
"dynamic_attention_trigger_rate": OptionInfo(1, "Dynamic Attention trigger rate in GB", gr.Slider, {"minimum": 0.01, "maximum": gpu_memory*2, "step": 0.01, "visible": native}),
"dynamic_attention_slice_rate": OptionInfo(0.5, "Dynamic Attention slicing rate in GB", gr.Slider, {"minimum": 0.01, "maximum": max(gpu_memory,4), "step": 0.01, "visible": native}),
"dynamic_attention_trigger_rate": OptionInfo(1, "Dynamic Attention trigger rate in GB", gr.Slider, {"minimum": 0.01, "maximum": max(gpu_memory,4)*2, "step": 0.01, "visible": native}),
"sub_quad_sep": OptionInfo("<h3>Sub-quadratic options</h3>", "", gr.HTML, {"visible": not native}),
"sub_quad_q_chunk_size": OptionInfo(512, "Attention query chunk size", gr.Slider, {"minimum": 16, "maximum": 8192, "step": 8, "visible": not native}),
"sub_quad_kv_chunk_size": OptionInfo(512, "Attention kv chunk size", gr.Slider, {"minimum": 0, "maximum": 8192, "step": 8, "visible": not native}),

View File

@ -250,9 +250,6 @@ def create_ui(startup_timer = None):
if "Model" not in shared.opts.cuda_compile:
shared.log.warning("OpenVINO: Enabling Torch Compile Model")
shared.opts.cuda_compile.append("Model")
if "VAE" not in shared.opts.cuda_compile:
shared.log.warning("OpenVINO: Enabling Torch Compile VAE")
shared.opts.cuda_compile.append("VAE")
if shared.opts.cuda_compile_backend != "openvino_fx":
shared.log.warning("OpenVINO: Setting Torch Compiler backend to OpenVINO FX")
shared.opts.cuda_compile_backend = "openvino_fx"