Remove torch.compile from Anima GUI — incompatible with Windows triton

torch.compile with inductor backend requires triton which is not supported
on Windows. Raises ImportError: cannot import name 'triton_key'.
Keep disable_mmap_load_safetensors which works fine.

https://claude.ai/code/session_01FQWfefStwK4SL6Cf4rKK5m
pull/3485/head
Claude 2026-02-12 11:37:33 +00:00
parent 6d900e5a4b
commit 9bf8d66407
No known key found for this signature in database
2 changed files with 1 additions and 12 deletions

View File

@ -180,12 +180,6 @@ class animaTraining:
info="Offload activations to CPU RAM using async non-blocking transfers. Faster than cpu_offload_checkpointing. Cannot combine with blocks_to_swap.",
interactive=True,
)
self.anima_torch_compile = gr.Checkbox(
label="torch.compile",
value=self.config.get("anima.anima_torch_compile", False),
info="JIT-compile DiT with torch.compile (inductor backend). Can speed up training ~10-30%. Incompatible with Unsloth Offload Checkpointing.",
interactive=True,
)
self.anima_disable_mmap_load_safetensors = gr.Checkbox(
label="Disable mmap Load",
value=self.config.get("anima.anima_disable_mmap_load_safetensors", False),

View File

@ -336,7 +336,6 @@ def save_configuration(
anima_cache_text_encoder_outputs_to_disk,
anima_blocks_to_swap,
anima_unsloth_offload_checkpointing,
anima_torch_compile,
anima_disable_mmap_load_safetensors,
anima_vae_chunk_size,
anima_vae_disable_cache,
@ -643,7 +642,6 @@ def open_configuration(
anima_cache_text_encoder_outputs_to_disk,
anima_blocks_to_swap,
anima_unsloth_offload_checkpointing,
anima_torch_compile,
anima_disable_mmap_load_safetensors,
anima_vae_chunk_size,
anima_vae_disable_cache,
@ -1041,7 +1039,6 @@ def train_model(
anima_cache_text_encoder_outputs_to_disk,
anima_blocks_to_swap,
anima_unsloth_offload_checkpointing,
anima_torch_compile,
anima_disable_mmap_load_safetensors,
anima_vae_chunk_size,
anima_vae_disable_cache,
@ -1651,7 +1648,7 @@ def train_model(
"color_aug": color_aug,
"dataset_config": dataset_config,
"debiased_estimation_loss": debiased_estimation_loss,
"dynamo_backend": "inductor" if (anima_checkbox and anima_torch_compile and dynamo_backend == "no") else dynamo_backend,
"dynamo_backend": dynamo_backend,
"dim_from_weights": dim_from_weights,
"disable_mmap_load_safetensors": disable_mmap_load_safetensors_value,
"enable_bucket": enable_bucket,
@ -1873,7 +1870,6 @@ def train_model(
"t5_max_token_length": int(anima_t5_max_token_length) if anima_checkbox else None,
"split_attn": anima_split_attn if anima_checkbox else None,
"unsloth_offload_checkpointing": anima_unsloth_offload_checkpointing if anima_checkbox else None,
"torch_compile": True if (anima_checkbox and anima_torch_compile) else None,
"vae_chunk_size": int(anima_vae_chunk_size) if anima_checkbox and anima_vae_chunk_size else None,
"vae_disable_cache": anima_vae_disable_cache if anima_checkbox else None,
}
@ -3170,7 +3166,6 @@ def lora_tab(
anima_training.anima_cache_text_encoder_outputs_to_disk,
anima_training.anima_blocks_to_swap,
anima_training.anima_unsloth_offload_checkpointing,
anima_training.anima_torch_compile,
anima_training.anima_disable_mmap_load_safetensors,
anima_training.vae_chunk_size,
anima_training.vae_disable_cache,