diff --git a/modules/shared.py b/modules/shared.py index 7c9fb5646..891ed1306 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -536,8 +536,8 @@ options_templates.update(options_section(('cuda', "Compute Settings"), { "cross_attention_optimization": OptionInfo(startup_cross_attention, "Attention optimization method", gr.Radio, lambda: {"choices": shared_items.list_crossattention(native)}), "sdp_options": OptionInfo(startup_sdp_options, "SDP options", gr.CheckboxGroup, {"choices": ['Flash attention', 'Memory attention', 'Math attention', 'Dynamic attention', 'Sage attention'], "visible": native}), "xformers_options": OptionInfo(['Flash attention'], "xFormers options", gr.CheckboxGroup, {"choices": ['Flash attention'] }), - "dynamic_attention_slice_rate": OptionInfo(0.5, "Dynamic Attention slicing rate in GB", gr.Slider, {"minimum": 0.01, "maximum": gpu_memory, "step": 0.01, "visible": native}), - "dynamic_attention_trigger_rate": OptionInfo(1, "Dynamic Attention trigger rate in GB", gr.Slider, {"minimum": 0.01, "maximum": gpu_memory*2, "step": 0.01, "visible": native}), + "dynamic_attention_slice_rate": OptionInfo(0.5, "Dynamic Attention slicing rate in GB", gr.Slider, {"minimum": 0.01, "maximum": max(gpu_memory,4), "step": 0.01, "visible": native}), + "dynamic_attention_trigger_rate": OptionInfo(1, "Dynamic Attention trigger rate in GB", gr.Slider, {"minimum": 0.01, "maximum": max(gpu_memory,4)*2, "step": 0.01, "visible": native}), "sub_quad_sep": OptionInfo("

Sub-quadratic options

", "", gr.HTML, {"visible": not native}), "sub_quad_q_chunk_size": OptionInfo(512, "Attention query chunk size", gr.Slider, {"minimum": 16, "maximum": 8192, "step": 8, "visible": not native}), "sub_quad_kv_chunk_size": OptionInfo(512, "Attention kv chunk size", gr.Slider, {"minimum": 0, "maximum": 8192, "step": 8, "visible": not native}), diff --git a/modules/ui.py b/modules/ui.py index 8e9a9db85..04e813cd8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -250,9 +250,6 @@ def create_ui(startup_timer = None): if "Model" not in shared.opts.cuda_compile: shared.log.warning("OpenVINO: Enabling Torch Compile Model") shared.opts.cuda_compile.append("Model") - if "VAE" not in shared.opts.cuda_compile: - shared.log.warning("OpenVINO: Enabling Torch Compile VAE") - shared.opts.cuda_compile.append("VAE") if shared.opts.cuda_compile_backend != "openvino_fx": shared.log.warning("OpenVINO: Setting Torch Compiler backend to OpenVINO FX") shared.opts.cuda_compile_backend = "openvino_fx"