feat: add Nunchaku group to reference

Replace manual Model/TE checkboxes in Quantization Settings with a
dedicated "Nunchaku" tab in the Extra Networks menu where users can
directly select nunchaku-quantized model variants. Detection is now
using a +nunchaku path marker for disambiguation.
pull/4634/head
CalamitousFelicitousness 2026-02-05 15:08:46 +00:00
parent bf1e763156
commit 935a4fcb03
6 changed files with 214 additions and 8 deletions

View File

@ -0,0 +1,183 @@
{
"FLUX.1-Dev Nunchaku SVDQuant": {
"path": "black-forest-labs/FLUX.1-dev",
"subfolder": "nunchaku",
"preview": "black-forest-labs--FLUX.1-dev.jpg",
"desc": "Nunchaku SVDQuant quantization of FLUX.1-dev transformer with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"FLUX.1-Schnell Nunchaku SVDQuant": {
"path": "black-forest-labs/FLUX.1-schnell",
"subfolder": "nunchaku",
"preview": "black-forest-labs--FLUX.1-schnell.jpg",
"desc": "Nunchaku SVDQuant quantization of FLUX.1-schnell transformer with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
"size": 0,
"date": "2025 June"
},
"FLUX.1-Kontext Nunchaku SVDQuant": {
"path": "black-forest-labs/FLUX.1-Kontext-dev",
"subfolder": "nunchaku",
"preview": "black-forest-labs--FLUX.1-Kontext-dev.jpg",
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Kontext-dev transformer with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"FLUX.1-Krea Nunchaku SVDQuant": {
"path": "black-forest-labs/FLUX.1-Krea-dev",
"subfolder": "nunchaku",
"preview": "black-forest-labs--FLUX.1-Krea-dev.jpg",
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Krea-dev transformer with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"FLUX.1-Fill Nunchaku SVDQuant": {
"path": "black-forest-labs/FLUX.1-Fill-dev",
"subfolder": "nunchaku",
"preview": "black-forest-labs--FLUX.1-Fill-dev.jpg",
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Fill-dev transformer for inpainting",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"FLUX.1-Depth Nunchaku SVDQuant": {
"path": "black-forest-labs/FLUX.1-Depth-dev",
"subfolder": "nunchaku",
"preview": "black-forest-labs--FLUX.1-Depth-dev.jpg",
"desc": "Nunchaku SVDQuant quantization of FLUX.1-Depth-dev transformer for depth-conditioned generation",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"Shuttle Jaguar Nunchaku SVDQuant": {
"path": "shuttleai/shuttle-jaguar",
"subfolder": "nunchaku",
"preview": "shuttleai--shuttle-jaguar.jpg",
"desc": "Nunchaku SVDQuant quantization of Shuttle Jaguar transformer",
"skip": true,
"nunchaku": ["Model", "TE"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"Qwen-Image Nunchaku SVDQuant": {
"path": "Qwen/Qwen-Image",
"subfolder": "nunchaku",
"preview": "Qwen--Qwen-Image.jpg",
"desc": "Nunchaku SVDQuant quantization of Qwen-Image transformer with INT4 and SVD rank 128",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"Qwen-Lightning Nunchaku SVDQuant": {
"path": "vladmandic/Qwen-Lightning",
"subfolder": "nunchaku",
"preview": "vladmandic--Qwen-Lightning.jpg",
"desc": "Nunchaku SVDQuant quantization of Qwen-Lightning (8-step distilled) transformer with INT4 and SVD rank 128",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"extras": "steps: 8",
"size": 0,
"date": "2025 June"
},
"Qwen-Image-Edit Nunchaku SVDQuant": {
"path": "Qwen/Qwen-Image-Edit",
"subfolder": "nunchaku",
"preview": "Qwen--Qwen-Image-Edit.jpg",
"desc": "Nunchaku SVDQuant quantization of Qwen-Image-Edit transformer with INT4 and SVD rank 128",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"Qwen-Lightning-Edit Nunchaku SVDQuant": {
"path": "vladmandic/Qwen-Lightning-Edit",
"subfolder": "nunchaku",
"preview": "vladmandic--Qwen-Lightning-Edit.jpg",
"desc": "Nunchaku SVDQuant quantization of Qwen-Lightning-Edit (8-step distilled editing) transformer with INT4 and SVD rank 128",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"extras": "steps: 8",
"size": 0,
"date": "2025 June"
},
"Qwen-Image-Edit-2509 Nunchaku SVDQuant": {
"path": "Qwen/Qwen-Image-Edit-2509",
"subfolder": "nunchaku",
"preview": "Qwen--Qwen-Image-Edit-2509.jpg",
"desc": "Nunchaku SVDQuant quantization of Qwen-Image-Edit-2509 transformer with INT4 and SVD rank 128",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"size": 0,
"date": "2025 September"
},
"Sana 1.6B 1k Nunchaku SVDQuant": {
"path": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
"subfolder": "nunchaku",
"preview": "Efficient-Large-Model--Sana_1600M_1024px_BF16_diffusers.jpg",
"desc": "Nunchaku SVDQuant quantization of Sana 1.6B 1024px transformer with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"Z-Image-Turbo Nunchaku SVDQuant": {
"path": "Tongyi-MAI/Z-Image-Turbo",
"subfolder": "nunchaku",
"preview": "Tongyi-MAI--Z-Image-Turbo.jpg",
"desc": "Nunchaku SVDQuant quantization of Z-Image-Turbo transformer with INT4 and SVD rank 128",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"extras": "sampler: Default, cfg_scale: 1.0, steps: 9",
"size": 0,
"date": "2025 June"
},
"SDXL Base Nunchaku SVDQuant": {
"path": "stabilityai/stable-diffusion-xl-base-1.0",
"subfolder": "nunchaku",
"preview": "stabilityai--stable-diffusion-xl-base-1.0.jpg",
"desc": "Nunchaku SVDQuant quantization of SDXL Base 1.0 UNet with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"size": 0,
"date": "2025 June"
},
"SDXL Turbo Nunchaku SVDQuant": {
"path": "stabilityai/sdxl-turbo",
"subfolder": "nunchaku",
"preview": "stabilityai--sdxl-turbo.jpg",
"desc": "Nunchaku SVDQuant quantization of SDXL Turbo UNet with INT4 and SVD rank 32",
"skip": true,
"nunchaku": ["Model"],
"tags": "nunchaku",
"extras": "sampler: Default, cfg_scale: 1.0, steps: 4",
"size": 0,
"date": "2025 June"
}
}

View File

@ -171,6 +171,12 @@ async function filterExtraNetworksForTab(searchTerm) {
.toLowerCase() .toLowerCase()
.includes('quantized') ? '' : 'none'; .includes('quantized') ? '' : 'none';
}); });
} else if (searchTerm === 'nunchaku/') {
cards.forEach((elem) => {
elem.style.display = elem.dataset.tags
.toLowerCase()
.includes('nunchaku') ? '' : 'none';
});
} else if (searchTerm === 'local/') { } else if (searchTerm === 'local/') {
cards.forEach((elem) => { cards.forEach((elem) => {
elem.style.display = elem.dataset.name elem.style.display = elem.dataset.name

View File

@ -255,13 +255,25 @@ def check_quant(module: str = ''):
def check_nunchaku(module: str = ''): def check_nunchaku(module: str = ''):
from modules import shared from modules import shared
if module not in shared.opts.nunchaku_quantization: model_name = getattr(shared.opts, 'sd_model_checkpoint', '')
if '+nunchaku' not in model_name:
return False return False
from modules import mit_nunchaku base_path = model_name.split('+')[0]
mit_nunchaku.install_nunchaku() for v in shared.reference_models.values():
if not mit_nunchaku.ok: if v.get('path', '') != base_path:
return False continue
return True nunchaku_modules = v.get('nunchaku', None)
if nunchaku_modules is None:
continue
if isinstance(nunchaku_modules, bool) and nunchaku_modules:
nunchaku_modules = ['Model', 'TE']
if not isinstance(nunchaku_modules, list):
continue
if module in nunchaku_modules:
from modules import mit_nunchaku
mit_nunchaku.install_nunchaku()
return mit_nunchaku.ok
return False
def create_config(kwargs = None, allow: bool = True, module: str = 'Model', modules_to_not_convert: list = None, modules_dtype_dict: dict = None): def create_config(kwargs = None, allow: bool = True, module: str = 'Model', modules_to_not_convert: list = None, modules_dtype_dict: dict = None):

View File

@ -281,7 +281,6 @@ options_templates.update(options_section(("quantization", "Model Quantization"),
"sdnq_quantize_shuffle_weights": OptionInfo(False, "Shuffle weights in post mode", gr.Checkbox), "sdnq_quantize_shuffle_weights": OptionInfo(False, "Shuffle weights in post mode", gr.Checkbox),
"nunchaku_sep": OptionInfo("<h2>Nunchaku Engine</h2>", "", gr.HTML), "nunchaku_sep": OptionInfo("<h2>Nunchaku Engine</h2>", "", gr.HTML),
"nunchaku_quantization": OptionInfo([], "SVDQuant enabled", gr.CheckboxGroup, {"choices": ["Model", "TE"]}),
"nunchaku_attention": OptionInfo(False, "Nunchaku attention", gr.Checkbox), "nunchaku_attention": OptionInfo(False, "Nunchaku attention", gr.Checkbox),
"nunchaku_offload": OptionInfo(False, "Nunchaku offloading", gr.Checkbox), "nunchaku_offload": OptionInfo(False, "Nunchaku offloading", gr.Checkbox),
@ -881,6 +880,7 @@ profiler = None
import modules.styles import modules.styles
prompt_styles = modules.styles.StyleDatabase(opts) prompt_styles = modules.styles.StyleDatabase(opts)
reference_models = readfile(os.path.join('data', 'reference.json'), as_type="dict") if opts.extra_network_reference_enable else {} reference_models = readfile(os.path.join('data', 'reference.json'), as_type="dict") if opts.extra_network_reference_enable else {}
reference_models.update(readfile(os.path.join('data', 'reference-nunchaku.json'), as_type="dict") if opts.extra_network_reference_enable else {})
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or (cmd_opts.server_name or False)) and not cmd_opts.insecure cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or (cmd_opts.server_name or False)) and not cmd_opts.insecure
log.debug('Initializing: devices') log.debug('Initializing: devices')

View File

@ -305,6 +305,7 @@ class ExtraNetworksPage:
subdirs['Reference'] = 1 subdirs['Reference'] = 1
subdirs['Distilled'] = 1 subdirs['Distilled'] = 1
subdirs['Quantized'] = 1 subdirs['Quantized'] = 1
subdirs['Nunchaku'] = 1
subdirs['Community'] = 1 subdirs['Community'] = 1
subdirs['Cloud'] = 1 subdirs['Cloud'] = 1
subdirs[diffusers_base] = 1 subdirs[diffusers_base] = 1
@ -324,6 +325,8 @@ class ExtraNetworksPage:
subdirs.move_to_end('Distilled', last=True) subdirs.move_to_end('Distilled', last=True)
if 'Quantized' in subdirs: if 'Quantized' in subdirs:
subdirs.move_to_end('Quantized', last=True) subdirs.move_to_end('Quantized', last=True)
if 'Nunchaku' in subdirs:
subdirs.move_to_end('Nunchaku', last=True)
if 'Community' in subdirs: if 'Community' in subdirs:
subdirs.move_to_end('Community', last=True) subdirs.move_to_end('Community', last=True)
if 'Cloud' in subdirs: if 'Cloud' in subdirs:
@ -332,7 +335,7 @@ class ExtraNetworksPage:
for subdir in subdirs: for subdir in subdirs:
if len(subdir) == 0: if len(subdir) == 0:
continue continue
if subdir in ['All', 'Local', 'Diffusers', 'Reference', 'Distilled', 'Quantized', 'Community', 'Cloud']: if subdir in ['All', 'Local', 'Diffusers', 'Reference', 'Distilled', 'Quantized', 'Nunchaku', 'Community', 'Cloud']:
style = 'network-reference' style = 'network-reference'
else: else:
style = 'network-folder' style = 'network-folder'

View File

@ -48,12 +48,14 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
reference_distilled = readfile(os.path.join('data', 'reference-distilled.json'), as_type="dict") reference_distilled = readfile(os.path.join('data', 'reference-distilled.json'), as_type="dict")
reference_community = readfile(os.path.join('data', 'reference-community.json'), as_type="dict") reference_community = readfile(os.path.join('data', 'reference-community.json'), as_type="dict")
reference_cloud = readfile(os.path.join('data', 'reference-cloud.json'), as_type="dict") reference_cloud = readfile(os.path.join('data', 'reference-cloud.json'), as_type="dict")
reference_nunchaku = readfile(os.path.join('data', 'reference-nunchaku.json'), as_type="dict")
shared.reference_models = {} shared.reference_models = {}
shared.reference_models.update(reference_base) shared.reference_models.update(reference_base)
shared.reference_models.update(reference_quant) shared.reference_models.update(reference_quant)
shared.reference_models.update(reference_community) shared.reference_models.update(reference_community)
shared.reference_models.update(reference_distilled) shared.reference_models.update(reference_distilled)
shared.reference_models.update(reference_cloud) shared.reference_models.update(reference_cloud)
shared.reference_models.update(reference_nunchaku)
for k, v in shared.reference_models.items(): for k, v in shared.reference_models.items():
count['total'] += 1 count['total'] += 1