mirror of https://github.com/bmaltais/kohya_ss
Fix split_attn description: not required for xformers
xformers handles variable-length sequences via BlockDiagonalMask natively; split_attn is an optional alternative, not a requirement. https://claude.ai/code/session_01FQWfefStwK4SL6Cf4rKK5mpull/3485/head
parent
25f9df5fd5
commit
9d2eb48dc6
|
|
@ -147,7 +147,7 @@ class animaTraining:
|
|||
self.anima_split_attn = gr.Checkbox(
|
||||
label="Split Attention",
|
||||
value=self.config.get("anima.anima_split_attn", False),
|
||||
info="Split attention computation to reduce memory. Required when using xformers attn_mode.",
|
||||
info="Split attention per-sequence to save memory. Optional with xformers (uses BlockDiagonalMask otherwise). Useful when xformers lacks mask support or for max VRAM savings.",
|
||||
interactive=True,
|
||||
)
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue