update transformers and fix quant params

Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/4268/head
Vladimir Mandic 2025-10-16 09:21:20 -04:00
parent ffe2a9d148
commit 070edb20b0
6 changed files with 19 additions and 6 deletions

View File

@ -637,7 +637,7 @@ def check_transformers():
target_transformers = '4.52.4'
target_tokenizers = '0.21.4'
else:
target_transformers = '4.56.2'
target_transformers = '4.57.1'
target_tokenizers = '0.22.1'
if (pkg_transformers is None) or ((pkg_transformers.version != target_transformers) or (pkg_tokenizers is None) or ((pkg_tokenizers.version != target_tokenizers) and (not args.experimental))):
if pkg_transformers is None:

View File

@ -66,7 +66,10 @@ def run_ltx(task_id,
progress.finish_task(task_id)
yield None, f'LTX Error: {str(e)}'
from diffusers import LTXConditionPipeline # pylint: disable=unused-import
if model is None or len(model) == 0:
yield from abort('Video: no model selected', ok=True)
return
# from diffusers import LTXConditionPipeline # pylint: disable=unused-import
check_av()
progress.add_task_to_queue(task_id)
with queue_lock:
@ -76,6 +79,10 @@ def run_ltx(task_id,
yield None, 'LTX: Loading...'
engine = 'LTX Video'
load_model(engine, model)
debug(f'Video: cls={shared.sd_model.__class__.__name__} op=init model="{model}"')
if not shared.sd_model.__class__.__name__.startswith("LTX"):
yield from abort(f'Video: cls={shared.sd_model.__class__.__name__} selected model is not LTX model', ok=True)
return
videojob = shared.state.begin('Video', task_id=task_id)
shared.state.job_count = 1

View File

@ -8,6 +8,8 @@ loaded_model: str = None
def get_bucket(size: int):
if not hasattr(shared.sd_model, 'vae_temporal_compression_ratio'):
return int(size) - (int(size) % 16)
return int(size) - (int(size) % shared.sd_model.vae_temporal_compression_ratio)
@ -19,6 +21,10 @@ def load_model(engine: str, model: str):
global loaded_model # pylint: disable=global-statement
if loaded_model == model:
return
if model is None or model == '' or model=='None':
loaded_model = None
shared.sd_model = None
return
t0 = time.time()
from modules.video_models import models_def, video_load
selected: models_def.Model = [m for m in models_def.models[engine] if m.name == model][0]

View File

@ -497,7 +497,7 @@ class SDNQQuantizer(DiffusersQuantizer, HfQuantizer):
param_value: torch.FloatTensor,
param_name: str,
target_device: torch.device,
state_dict: Dict[str, Any],
# state_dict: Dict[str, Any], # removed in transformers==4.57
*args, **kwargs, # pylint: disable=unused-argument
):
if self.pre_quantized:

View File

@ -8,7 +8,7 @@ loaded_model = None
def load_model(selected: models_def.Model):
if selected is None:
if selected is None or selected.te_cls is None or selected.dit_cls is None:
return ''
global loaded_model # pylint: disable=global-statement
if loaded_model == selected.name:

View File

@ -35,8 +35,8 @@ rich==14.1.0
safetensors==0.6.2
tensordict==0.8.3
peft==0.17.1
httpx==0.24.1
compel==2.1.1
httpx==0.28.1
compel==2.2.1
torchsde==0.2.6
antlr4-python3-runtime==4.9.3
requests==2.32.4