refactor: update nunchaku repo URLs and version handling

- Rename HuggingFace org from nunchaku-tech to nunchaku-ai across all
  nunchaku model repos (flux, sdxl, sana, z-image, qwen, t5)
- Add per-torch-version nunchaku version mapping instead of single global
  version, with robust torch version parsing
pull/4634/head
CalamitousFelicitousness 2026-02-05 21:35:51 +00:00
parent 8ff7074da5
commit a2ee885e28
7 changed files with 42 additions and 22 deletions

View File

@ -4,10 +4,27 @@ from installer import log, pip
from modules import devices
nunchaku_ver = '1.1.0'
nunchaku_versions = {
'2.5': '1.0.1',
'2.6': '1.0.1',
'2.7': '1.1.0',
'2.8': '1.1.0',
'2.9': '1.1.0',
'2.10': '1.0.2',
'2.11': '1.1.0',
}
ok = False
def _expected_ver():
try:
import torch
torch_ver = '.'.join(torch.__version__.split('+')[0].split('.')[:2])
return nunchaku_versions.get(torch_ver)
except Exception:
return None
def check():
global ok # pylint: disable=global-statement
if ok:
@ -16,8 +33,9 @@ def check():
import nunchaku
import nunchaku.utils
from nunchaku import __version__
expected = _expected_ver()
log.info(f'Nunchaku: path={nunchaku.__path__} version={__version__.__version__} precision={nunchaku.utils.get_precision()}')
if __version__.__version__ != nunchaku_ver:
if expected is not None and __version__.__version__ != expected:
ok = False
return False
ok = True
@ -49,14 +67,16 @@ def install_nunchaku():
if devices.backend not in ['cuda']:
log.error(f'Nunchaku: backend={devices.backend} unsupported')
return False
torch_ver = torch.__version__[:3]
if torch_ver not in ['2.5', '2.6', '2.7', '2.8', '2.9', '2.10']:
torch_ver = '.'.join(torch.__version__.split('+')[0].split('.')[:2])
nunchaku_ver = nunchaku_versions.get(torch_ver)
if nunchaku_ver is None:
log.error(f'Nunchaku: torch={torch.__version__} unsupported')
return False
suffix = 'x86_64' if arch == 'linux' else 'win_amd64'
url = os.environ.get('NUNCHAKU_COMMAND', None)
if url is None:
arch = f'{arch}_' if arch == 'linux' else ''
url = f'https://huggingface.co/nunchaku-tech/nunchaku/resolve/main/nunchaku-{nunchaku_ver}'
url = f'https://huggingface.co/nunchaku-ai/nunchaku/resolve/main/nunchaku-{nunchaku_ver}'
url += f'+torch{torch_ver}-cp{python_ver}-cp{python_ver}-{arch}{suffix}.whl'
cmd = f'install --upgrade {url}'
log.debug(f'Nunchaku: install="{url}"')

View File

@ -18,9 +18,9 @@ def load_unet_sdxl_nunchaku(repo_id):
shared.log.error(f'Load module: quant=Nunchaku module=unet repo="{repo_id}" low nunchaku version')
return None
if 'turbo' in repo_id.lower():
nunchaku_repo = 'nunchaku-tech/nunchaku-sdxl-turbo/svdq-int4_r32-sdxl-turbo.safetensors'
nunchaku_repo = 'nunchaku-ai/nunchaku-sdxl-turbo/svdq-int4_r32-sdxl-turbo.safetensors'
else:
nunchaku_repo = 'nunchaku-tech/nunchaku-sdxl/svdq-int4_r32-sdxl.safetensors'
nunchaku_repo = 'nunchaku-ai/nunchaku-sdxl/svdq-int4_r32-sdxl.safetensors'
shared.log.debug(f'Load module: quant=Nunchaku module=unet repo="{nunchaku_repo}" offload={shared.opts.nunchaku_offload}')
unet = NunchakuSDXLUNet2DConditionModel.from_pretrained(

View File

@ -9,19 +9,19 @@ def load_flux_nunchaku(repo_id):
if 'srpo' in repo_id.lower():
pass
elif 'flux.1-dev' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-flux.1-dev/svdq-{nunchaku_precision}_r32-flux.1-dev.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-flux.1-dev/svdq-{nunchaku_precision}_r32-flux.1-dev.safetensors"
elif 'flux.1-schnell' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{nunchaku_precision}_r32-flux.1-schnell.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-flux.1-schnell/svdq-{nunchaku_precision}_r32-flux.1-schnell.safetensors"
elif 'flux.1-kontext' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-flux.1-kontext-dev/svdq-{nunchaku_precision}_r32-flux.1-kontext-dev.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-flux.1-kontext-dev/svdq-{nunchaku_precision}_r32-flux.1-kontext-dev.safetensors"
elif 'flux.1-krea' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-flux.1-krea-dev/svdq-{nunchaku_precision}_r32-flux.1-krea-dev.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-flux.1-krea-dev/svdq-{nunchaku_precision}_r32-flux.1-krea-dev.safetensors"
elif 'flux.1-fill' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-flux.1-fill-dev/svdq-{nunchaku_precision}-flux.1-fill-dev.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-flux.1-fill-dev/svdq-{nunchaku_precision}-flux.1-fill-dev.safetensors"
elif 'flux.1-depth' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-flux.1-depth-dev/svdq-{nunchaku_precision}-flux.1-depth-dev.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-flux.1-depth-dev/svdq-{nunchaku_precision}-flux.1-depth-dev.safetensors"
elif 'shuttle' in repo_id.lower():
nunchaku_repo = f"nunchaku-tech/nunchaku-shuttle-jaguar/svdq-{nunchaku_precision}-shuttle-jaguar.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-shuttle-jaguar/svdq-{nunchaku_precision}-shuttle-jaguar.safetensors"
else:
shared.log.error(f'Load module: quant=Nunchaku module=transformer repo="{repo_id}" unsupported')
if nunchaku_repo is not None:

View File

@ -152,7 +152,7 @@ def load_text_encoder(repo_id, cls_name, load_config=None, subfolder="text_encod
elif cls_name == transformers.T5EncoderModel and allow_shared and shared.opts.te_shared_t5:
if model_quant.check_nunchaku('TE'):
import nunchaku
repo_id = 'nunchaku-tech/nunchaku-t5/awq-int4-flux.1-t5xxl.safetensors'
repo_id = 'nunchaku-ai/nunchaku-t5/awq-int4-flux.1-t5xxl.safetensors'
cls_name = nunchaku.NunchakuT5EncoderModel
shared.log.debug(f'Load model: text_encoder="{repo_id}" cls={cls_name.__name__} quant="SVDQuant" loader={_loader("transformers")}')
text_encoder = nunchaku.NunchakuT5EncoderModel.from_pretrained(

View File

@ -9,7 +9,7 @@ def load_quants(kwargs, repo_id, cache_dir):
if 'Sana_1600M_1024px' in repo_id and model_quant.check_nunchaku('Model'): # only available model
import nunchaku
nunchaku_precision = nunchaku.utils.get_precision()
nunchaku_repo = "nunchaku-tech/nunchaku-sana/svdq-int4_r32-sana1.6b.safetensors"
nunchaku_repo = "nunchaku-ai/nunchaku-sana/svdq-int4_r32-sana1.6b.safetensors"
shared.log.debug(f'Load module: quant=Nunchaku module=transformer repo="{nunchaku_repo}" precision={nunchaku_precision} attention={shared.opts.nunchaku_attention}')
kwargs['transformer'] = nunchaku.NunchakuSanaTransformer2DModel.from_pretrained(nunchaku_repo, torch_dtype=devices.dtype, cache_dir=cache_dir)
elif model_quant.check_quant('Model'):

View File

@ -8,7 +8,7 @@ def load_nunchaku():
import nunchaku
nunchaku_precision = nunchaku.utils.get_precision()
nunchaku_rank = 128
nunchaku_repo = f"nunchaku-tech/nunchaku-z-image-turbo/svdq-{nunchaku_precision}_r{nunchaku_rank}-z-image-turbo.safetensors"
nunchaku_repo = f"nunchaku-ai/nunchaku-z-image-turbo/svdq-{nunchaku_precision}_r{nunchaku_rank}-z-image-turbo.safetensors"
shared.log.debug(f'Load module: quant=Nunchaku module=transformer repo="{nunchaku_repo}" attention={shared.opts.nunchaku_attention}')
transformer = nunchaku.NunchakuZImageTransformer2DModel.from_pretrained(
nunchaku_repo,

View File

@ -14,15 +14,15 @@ def load_qwen_nunchaku(repo_id):
if 'pruning' in repo_id.lower() or 'distill' in repo_id.lower():
return None
elif repo_id.lower().endswith('qwen-image'):
nunchaku_repo = f"nunchaku-tech/nunchaku-qwen-image/svdq-{nunchaku_precision}_r128-qwen-image.safetensors" # r32 vs r128
nunchaku_repo = f"nunchaku-ai/nunchaku-qwen-image/svdq-{nunchaku_precision}_r128-qwen-image.safetensors" # r32 vs r128
elif repo_id.lower().endswith('qwen-lightning'):
nunchaku_repo = f"nunchaku-tech/nunchaku-qwen-image/svdq-{nunchaku_precision}_r128-qwen-image-lightningv1.1-8steps.safetensors" # 8-step variant
nunchaku_repo = f"nunchaku-ai/nunchaku-qwen-image/svdq-{nunchaku_precision}_r128-qwen-image-lightningv1.1-8steps.safetensors" # 8-step variant
elif repo_id.lower().endswith('qwen-image-edit-2509'):
nunchaku_repo = f"nunchaku-tech/nunchaku-qwen-image-edit-2509/svdq-{nunchaku_precision}_r128-qwen-image-edit-2509.safetensors" # 8-step variant
nunchaku_repo = f"nunchaku-ai/nunchaku-qwen-image-edit-2509/svdq-{nunchaku_precision}_r128-qwen-image-edit-2509.safetensors" # 8-step variant
elif repo_id.lower().endswith('qwen-image-edit'):
nunchaku_repo = f"nunchaku-tech/nunchaku-qwen-image-edit/svdq-{nunchaku_precision}_r128-qwen-image-edit.safetensors" # 8-step variant
nunchaku_repo = f"nunchaku-ai/nunchaku-qwen-image-edit/svdq-{nunchaku_precision}_r128-qwen-image-edit.safetensors" # 8-step variant
elif repo_id.lower().endswith('qwen-lightning-edit'):
nunchaku_repo = f"nunchaku-tech/nunchaku-qwen-image-edit/svdq-{nunchaku_precision}_r128-qwen-image-edit-lightningv1.0-8steps.safetensors" # 8-step variant
nunchaku_repo = f"nunchaku-ai/nunchaku-qwen-image-edit/svdq-{nunchaku_precision}_r128-qwen-image-edit-lightningv1.0-8steps.safetensors" # 8-step variant
else:
shared.log.error(f'Load module: quant=Nunchaku module=transformer repo="{repo_id}" unsupported')
if nunchaku_repo is not None: