{ "icons": [ {"id":"","label":"🎲️","localized":"","reload":"","hint":"Use random seed"}, {"id":"","label":"🔄","localized":"","reload":"","hint":"Reset values"}, {"id":"","label":"⬆️","localized":"","reload":"","hint":"Upload image"}, {"id":"","label":"⬅️","localized":"","reload":"","hint":"Reuse image"}, {"id":"","label":"⇅","localized":"","reload":"","hint":"Swap values"}, {"id":"","label":"⇨","localized":"","reload":"","hint":"Apply preset to Manual Block Merge tab"}, {"id":"","label":"🕮","localized":"","reload":"","hint":"Save parameters from last generated image as style template"}, {"id":"","label":"⇕","localized":"","reload":"","hint":"Sort by: Name asc/desc, Size largest/smallest, Time newest/oldest"}, {"id":"","label":"⟲","localized":"","reload":"","hint":"Refresh"}, {"id":"","label":"✕","localized":"","reload":"","hint":"Close"}, {"id":"","label":"⊜","localized":"","reload":"","hint":"Fill"}, {"id":"","label":"※","localized":"","reload":"","hint":"Load model as refiner model when selected, otherwise load as base model"}, {"id":"","label":"🔎︎","localized":"","reload":"","hint":"Scan CivitAI for missing metadata and previews"}, {"id":"","label":"☲","localized":"","reload":"","hint":"Change view type"}, {"id":"","label":"⊗","localized":"","reload":"","hint":"Reset values"}, {"id":"","label":"📐","localized":"","reload":"","hint":"Measure"}, {"id":"","label":"🔍","localized":"","reload":"","hint":"Search"}, {"id":"","label":"🖌️","localized":"","reload":"","hint":"LaMa remove selected object from image"}, {"id":"","label":"🖼️","localized":"","reload":"","hint":"Show preview"}, {"id":"","label":"","localized":"","reload":"","hint":"Caption image"}, {"id":"","label":"⁜","localized":"","reload":"","hint":"Cycle image fit method"}, {"id":"","label":"↶","localized":"","reload":"","hint":"Apply selected style to prompt"}, {"id":"","label":"↷","localized":"","reload":"","hint":"Save current prompt to style"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by name, ascending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by name, descending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by size, ascending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by size, descending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by resolution, ascending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by resolution, descending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by time, ascending"}, {"id":"","label":"","localized":"","reload":"","hint":"Sort by time, descending"} ], "main": [ {"id":"","label":"Prompt","localized":"","reload":"","hint":"Describe image you want to generate"}, {"id":"","label":"Start","localized":"","reload":"","hint":"Start"}, {"id":"","label":"End","localized":"","reload":"","hint":"End"}, {"id":"","label":"Core","localized":"","reload":"","hint":"Core settings"}, {"id":"","label":"Negative prompt","localized":"","reload":"","hint":"Describe what you don't want to see in generated image"}, {"id":"","label":"Text","localized":"","reload":"","hint":"Create image from text"}, {"id":"","label":"Image","localized":"","reload":"","hint":"Create image from image"}, {"id":"","label":"Control","localized":"","reload":"","hint":"Create image with full guidance"}, {"id":"","label":"Images","localized":"","reload":"","hint":"Create images
Unified interface
Supports T2I and I2I
With optional control guidance"}, {"id":"","label":"T2I","localized":"","reload":"","hint":"Create image from text
Legacy interface that mimics original text-to-image interface and behavior"}, {"id":"","label":"I2I","localized":"","reload":"","hint":"Create image from image
Legacy interface that mimics original image-to-image interface and behavior"}, {"id":"","label":"Process","localized":"","reload":"","hint":"Process existing image
Can be used to upscale images, remove backgrounds, obfuscate NSFW content, apply various filters and effects"}, {"id":"","label":"Models","localized":"","reload":"","hint":"Download, convert or merge your models and manage models metadata"}, {"id":"","label":"Sampler","localized":"","reload":"","hint":"Settings related to sampler and seed selection and configuration. Samplers guide the process of turning noise into an image over multiple steps."}, {"id":"","label":"Agent Scheduler","localized":"","reload":"","hint":"Enqueue your generate requests and run them in the background"}, {"id":"","label":"AgentScheduler","localized":"","reload":"","hint":"Enqueue your generate requests and run them in the background"}, {"id":"","label":"System","localized":"","reload":"","hint":"System settings and information"}, {"id":"","label":"System Info","localized":"","reload":"","hint":"System information"}, {"id":"","label":"Settings","localized":"","reload":"","hint":"Application settings"}, {"id":"","label":"Script","localized":"","reload":"","hint":"Additional scripts to be used"}, {"id":"","label":"Generate","localized":"","reload":"","hint":"Start processing"}, {"id":"","label":"Generate forever","localized":"","reload":"","hint":"Start processing and continue until cancelled"}, {"id":"","label":"Enqueue","localized":"","reload":"","hint":"Add task to background queue in Agent Scheduler"}, {"id":"","label":"Reprocess","localized":"","reload":"","hint":"Reprocess previous generations using different parameters"}, {"id":"","label":"Stop","localized":"","reload":"","hint":"Stop processing"}, {"id":"","label":"Skip","localized":"","reload":"","hint":"Stop processing current job and continue processing"}, {"id":"","label":"Pause","localized":"","reload":"","hint":"Pause processing"}, {"id":"","label":"Restore","localized":"","reload":"","hint":"Restore parameters from current prompt or last known generated image"}, {"id":"","label":"Clear","localized":"","reload":"","hint":"Clear prompts"}, {"id":"","label":"Networks","localized":"","reload":"","hint":"Networks user interface"}, {"id":"","label":"Default strength","localized":"","reload":"","hint":"When adding extra network such as Lora to prompt, use this multiplier for it"}, {"id":"","label":"Upscale","localized":"","reload":"","hint":"Upscale image"}, {"id":"","label":"Model","localized":"","reload":"","hint":"Base model"}, {"id":"","label":"Prompts","localized":"","reload":"","hint":"Image prompt and negative prompt"}, {"id":"","label":"Base","localized":"","reload":"","hint":"Base settings used to run image generation"}, {"id":"","label":"Style","localized":"","reload":"","hint":"Additional styles to be applied on selected generation parameters"}, {"id":"","label":"Styles","localized":"","reload":"","hint":"Additional styles to be applied on selected generation parameters"}, {"id":"","label":"Lora","localized":"","reload":"","hint":"LoRA: Low-Rank Adaptation. Fine-tuned model that is applied on top of a loaded model"}, {"id":"","label":"Embedding","localized":"","reload":"","hint":"Textual inversion embedding is a trained embedded information about the subject"}, {"id":"","label":"Hypernetwork","localized":"","reload":"","hint":"Small trained neural network that modifies behavior of the loaded model"}, {"id":"","label":"VAE","localized":"","reload":"","hint":"Variational Auto Encoder: model used to run image decode at the end of generate"}, {"id":"","label":"History","localized":"","reload":"","hint":"List of previous generations that can be further reprocessed"}, {"id":"","label":"UI disable variable aspect ratio","localized":"","reload":"","hint":"When disabled, all thumbnails appear as squared images"}, {"id":"","label":"Build info on first access","localized":"","reload":"","hint":"Prevents server from building EN page on server startup and instead build it when requested"}, {"id":"","label":"Show reference styles","localized":"","reload":"","hint":"Show or hide build-it styles"}, {"id":"","label":"LoRA load using Diffusers method","localized":"","reload":"","hint":"Alternative method uses diffusers built-in LoRA capabilities instead of native SD.Next implementation (may reduce LoRA compatibility)"}, {"id":"","label":"LoRA native fuse with model","localized":"","reload":"","hint":"Merge LoRA into the model for lower memory usage.

Warning: After removing or switching a LoRA, you may still see its style in generated images. To get a clean model, reload it from the model selector."}, {"id":"","label":"LoRA memory cache","localized":"","reload":"","hint":"How many LoRAs to keep in network for future use before requiring reloading from storage"}, {"id":"","label":"LoRA force reload always","localized":"","reload":"","hint":"Forces LoRA networks to reload from storage on every generation, even if already cached.
Useful for debugging or when LoRA files are being modified externally.
Disable for normal use to benefit from caching."}, {"id":"","label":"LoRA diffusers fuse with model","localized":"","reload":"","hint":"Merge LoRA into the model for lower memory usage and torch.compile compatibility.

Warning: After removing or switching a LoRA, you may still see its style in generated images. To get a clean model, reload it from the model selector."}, {"id":"","label":"LoRA precision when quantized","localized":"","reload":"","hint":"When using a BnB 4-bit model, LoRA is applied by decompressing the weights, adding the LoRA, then recompressing. This controls the format used for recompression.

Only affects BnB 4-bit models. SDNQ models keep their original format."}, {"id":"","label":"Local","localized":"","reload":"","hint":"Models that are downlaoded and ready to use"}, {"id":"","label":"Gallery","localized":"","reload":"","hint":"Image gallery"}, {"id":"","label":"Reference","localized":"","reload":"","hint":"List of reference models that can be automatically downloaded on first use"}, {"id":"","label":"Samplers","localized":"","reload":"","hint":"Samplers/schedulers advanced settings"}, {"id":"","label":"Seed","localized":"","reload":"","hint":"Initial seed and variation"}, {"id":"","label":"Advanced","localized":"","reload":"","hint":"Advanced settings used to run image generation"}, {"id":"","label":"Scripts","localized":"","reload":"","hint":"Enable additional features by using selected scripts during generate process"}, {"id":"","label":"Corrections","localized":"","reload":"","hint":"Control image color/sharpen/brighness corrections during generate process"}, {"id":"","label":"Parameters","localized":"","reload":"","hint":"Base parameters used during image generation"}, {"id":"","label":"Refine","localized":"","reload":"","hint":"Refine runs additonal processing after initial processing has completed and can be used to upscale image and run optionally process it again to increase quality and details"}, {"id":"","label":"Detailer","localized":"","reload":"","hint":"Detailer runs additional generate at higher resolution for a detected objects"}, {"id":"","label":"Resize","localized":"","reload":"","hint":"Image resizing, can be using fixed resolution on based on scale"}, {"id":"","label":"Batch","localized":"","reload":"","hint":"Batch processing settings"}, {"id":"","label":"Denoise","localized":"","reload":"","hint":"Denoising settings. Higher denoise means that more of existing image content is allowed to change during generate"}, {"id":"","label":"Mask","localized":"","reload":"","hint":"Image masking and mask options"}, {"id":"","label":"Input","localized":"","reload":"","hint":"Selection of input media"}, {"id":"","label":"Video","localized":"","reload":"","hint":"Create videos using different methods
Supports text-to-image, image-to-image first-last-frame, etc."}, {"id":"","label":"Control elements","localized":"","reload":"","hint":"Control elements are advanced models that can guide generation towards desired outcome"}, {"id":"","label":"IP adapter","localized":"","reload":"","hint":"Guide generation towards desired outcome using IP adapters plugin models"}, {"id":"","label":"IP adapters","localized":"","reload":"","hint":"IP adapters are plugin models that can guide generation towards desired outcome"}, {"id":"","label":"Extensions","localized":"","reload":"","hint":"Application extensions"}, {"id":"","label":"XYZ Grid","localized":"","reload":"","hint":"XYZ grid is a powerful module that create image grid based on varying multiple generation parameters"}, {"id":"","label":"Cover","localized":"","reload":"","hint":"cover full area"}, {"id":"","label":"Inline","localized":"","reload":"","hint":"inline with all additional elements (scrollable)"}, {"id":"","label":"Sidebar","localized":"","reload":"","hint":"sidebar on the right side of the screen"}, {"id":"","label":"SD15","localized":"","reload":"","hint":"StableDiffusion 1.5"}, {"id":"","label":"SD21","localized":"","reload":"","hint":"StableDiffusion 2.1"}, {"id":"","label":"SD35","localized":"","reload":"","hint":"StableDiffusion 3.5"}, {"id":"","label":"SDXL","localized":"","reload":"","hint":"StableDiffusion XL"}, {"id":"","label":"SC","localized":"","reload":"","hint":"StableCascade"}, {"id":"","label":"Flux","localized":"","reload":"","hint":"FLUX.1"}, {"id":"","label":"Show","localized":"","reload":"","hint":"Show image location"}, {"id":"","label":"Save","localized":"","reload":"","hint":"Save image"}, {"id":"","label":"Delete","localized":"","reload":"","hint":"Delete image"}, {"id":"","label":"Replace","localized":"","reload":"","hint":"Replace image"}, {"id":"","label":"List","localized":"","reload":"","hint":"List all available models"}, {"id":"","label":"Metadata","localized":"","reload":"","hint":"Update metadata for all available models"}, {"id":"","label":"Loader","localized":"","reload":"","hint":"Allows to manually assemble a diffusion model from individual modules"}, {"id":"","label":"➠ Text","localized":"","reload":"","hint":"Transfer image to text interface"}, {"id":"","label":"➠ Image","localized":"","reload":"","hint":"Transfer image to image interface"}, {"id":"","label":"➠ Inpaint","localized":"","reload":"","hint":"Transfer image to inpaint interface"}, {"id":"","label":"➠ Sketch","localized":"","reload":"","hint":"Transfer image to sketch interface"}, {"id":"","label":"➠ Composite","localized":"","reload":"","hint":"Transfer image to inpaint sketch interface"}, {"id":"","label":"➠ Process","localized":"","reload":"","hint":"Transfer image to process interface"}, {"id":"","label":"➠ Control","localized":"","reload":"","hint":"Transfer image to control interface"}, {"id":"","label":"➠ Caption","localized":"","reload":"","hint":"Transfer image to caption interface"} ], "llm": [ {"id":"","label":"System prompt","localized":"","reload":"","hint":"System prompt controls behavior of the LLM. Processed first and persists throughout conversation. Has highest priority weighting and is always appended at the beginning of the sequence.

Use for: Response formatting rules, role definition, style."}, {"id":"","label":"Prefill text","localized":"","reload":"","hint":"Pre-fills the start of the model's response to guide its output format or content by forcing it to continue the prefill text.
Prefill is filtered out and does not appear in the final response.

Leave empty to let the model generate its own response from scratch."}, {"id":"","label":"VLM Max tokens","localized":"","reload":"","hint":"Maximum number of tokens the model can generate in its response.
The model is not aware of this limit during generation and it won't make the model try to generate more detailed or more concise responses, it simply sets the hard limit for the length, and will forcefully cut off the response when the limit is reached."}, {"id":"","label":"VLM Temperature","localized":"","reload":"","hint":"Controls randomness in token selection. Lower values (e.g., 0.1) make outputs more focused and deterministic, always choosing high-probability tokens.
Higher values (e.g., 0.9) increase creativity and diversity by allowing less probable tokens.

Set to 0 for fully deterministic output (always picks the most likely token)."}, {"id":"","label":"VLM Num Beams","localized":"","reload":"","hint":"Maintains multiple candidate paths simultaneously and selects the overall best sequence.
Like exploring several drafts at once to find the best one. More thorough but much slower and less creative than random sampling.
Generally not recommended, most modern VLMs perform better with sampling methods.
Set to 1 to disable."}, {"id":"","label":"Top-K","localized":"","reload":"","hint":"Limits token selection to the K most likely candidates at each step.
Lower values (e.g., 40) make outputs more focused and predictable, while higher values allow more diverse choices.
Set to 0 to disable."}, {"id":"","label":"Top-P","localized":"","reload":"","hint":"Selects tokens from the smallest set whose cumulative probability exceeds P (e.g., 0.9).
Dynamically adapts the number of candidates based on model confidence; fewer options when certain, more when uncertain.
Set to 1 to disable."}, {"id":"","label":"Use Samplers","localized":"","reload":"","hint":"Enable to use sampling (randomly selecting tokens based on sampling methods like Top-k or Top-p) or disable to use greedy decoding (selecting the most probable token at each step).
Enabling makes outputs more diverse and creative but less deterministic."}, {"id":"","label":"Thinking Mode","localized":"","reload":"","hint":"Enables thinking/reasoning, allowing the model to take more time to generate responses.
This can lead to more thoughtful and detailed answers, but will increase response time.
This setting affects both hybrid and thinking-only models, and in some may result in lower overall quality than expected. For thinking-only models like Qwen3-VL this setting might have to be combined with prefill to guarantee preventing thinking.

Models supporting this feature are marked with an  icon."}, {"id":"","label":"Keep Thinking Trace","localized":"","reload":"","hint":"Include the model's reasoning process in the final output.
Useful for understanding how the model arrived at its answer.
Only works with models that support thinking mode."}, {"id":"","label":"Keep Prefill","localized":"","reload":"","hint":"Include the prefill text at the beginning of the final output.
If disabled, the prefill text used to guide the model is removed from the result."} ], "caption": [ {"id":"","label":"Caption","localized":"","reload":"","hint":"Analyze existing images and create text descriptions"}, {"id":"","label":"VLM Caption","localized":"","reload":"","hint":"Analyze image using vision language model"}, {"id":"","label":"OpenCLiP","localized":"","reload":"","hint":"Analyze image using CLiP model via OpenCLiP"}, {"id":"","label":"Tagger","localized":"","reload":"","hint":"Tag images using anime-focused classification models like WaifuDiffusion or DeepBooru."}, {"id":"","label":"Caption","localized":"","reload":"","hint":"Run caption to get description of your image"}, {"id":"","label":"Caption: Advanced Options","localized":"Advanced Options","reload":"","hint":"Advanced configuration options for captioning models including sampling parameters, output formatting, and model-specific settings."}, {"id":"","label":"Caption: Batch","localized":"Batch","reload":"","hint":"Process multiple images in a batch.
Select files directly or specify a folder path to process all images within."}, {"id":"","label":"Default Caption Type","localized":"","reload":"","hint":"Default captioning method to use when clicking the main Interrogate button.
VLM: Vision-Language Model for detailed natural language descriptions.
OpenCLiP: CLIP-based analysis with style and flavor terms.
Tagger: Anime-style tags using WaifuDiffusion or DeepBooru models."}, {"id":"","label":"VLM: Prompt","localized":"Prompt","reload":"","hint":"Enter your prompt/question here."}, {"id":"","label":"vlm model","localized":"","reload":"","hint":"Select which model to use for Visual Language tasks.

Models which support thinking mode are marked with an  icon."}, {"id":"","label":"Task","localized":"","reload":"","hint":"Changes which task the model will perform. Regular text prompts can be used when the task is set to Use Prompt.
When other options are selected, see the hint text inside an empty Prompt field for guidance."}, {"id":"","label":"CLiP Model","localized":"","reload":"","hint":"CLIP model used for image-text similarity matching.
Larger models (ViT-L, ViT-H) are more accurate but slower and use more VRAM."}, {"id":"","label":"Caption Model","localized":"","reload":"","hint":"BLIP model used to generate the initial image caption.
The caption model describes the image content which CLiP then enriches with style and flavor terms."}, {"id":"","label":"Mode","localized":"","reload":"","hint":"Interrogation mode.
Fast: Quick caption with minimal flavor terms.
Classic: Standard interrogation with balanced quality and speed.
Best: Most thorough analysis, slowest but highest quality.
Negative: Generate terms to use as negative prompt."}, {"id":"","label":"clip: min length","localized":"Min Length","reload":"","hint":"Minimum number of tokens in the generated caption."}, {"id":"","label":"clip: max length","localized":"Max Length","reload":"","hint":"Maximum number of tokens in the generated caption."}, {"id":"","label":"clip: chunk size","localized":"Chunk Size","reload":"","hint":"Batch size for processing description candidates (flavors). Higher values speed up interrogation but increase VRAM usage."}, {"id":"","label":"clip: min flavors","localized":"Min Flavors","reload":"","hint":"Minimum number of descriptive tags (flavors) to keep in the final prompt."}, {"id":"","label":"clip: max flavors","localized":"Max Flavors","reload":"","hint":"Maximum number of descriptive tags (flavors) to keep in the final prompt."}, {"id":"","label":"clip: intermediates","localized":"Intermediates","reload":"","hint":"Size of the intermediate candidate pool when matching image features to descriptive tags (flavours). From this pool, the final tags are selected based on Min/Max Flavors. Higher values may improve quality but are slower."}, {"id":"","label":"clip: num beams","localized":"CLiP Num Beams","reload":"","hint":"Number of beams for beam search during caption generation. Higher values search more possibilities but are slower."}, {"id":"","label":"Tagger Model","localized":"","reload":"","hint":"Model to use for image tagging.
WaifuDiffusion models (wd-*): Modern taggers with separate general and character thresholds.
DeepBooru: Legacy tagger, uses only general threshold."}, {"id":"","label":"General threshold","localized":"","reload":"","hint":"Confidence threshold for general tags (e.g., objects, actions, settings).
Only tags with confidence above this threshold are included in the output.
Higher values are more selective (fewer tags), lower values include more tags."}, {"id":"","label":"Character threshold","localized":"","reload":"","hint":"Confidence threshold for character-specific tags (e.g., character names, specific traits).
Only tags with confidence above this threshold are included.
Higher values are more selective, lower values include more potential matches.
Not supported by DeepBooru models."}, {"id":"","label":"Max tags","localized":"","reload":"","hint":"Maximum number of tags to include in the output.
Limits the result length when an image has many detected features.
Tags are sorted by confidence, so the most relevant ones are kept."}, {"id":"","label":"Include rating","localized":"","reload":"","hint":"Include content rating tags in the output (e.g., safe, questionable, explicit).
Useful for filtering or categorizing images by their content rating."}, {"id":"","label":"Sort alphabetically","localized":"","reload":"","hint":"Sort tags alphabetically instead of by confidence score.
When disabled, tags are sorted by confidence (highest first).
Alphabetical sorting makes it easier to find specific tags."}, {"id":"","label":"Use spaces","localized":"","reload":"","hint":"Replace underscores with spaces in tag output.
Some prompt systems prefer spaces between words (e.g., 'long hair') while others use underscores (e.g., 'long_hair')."}, {"id":"","label":"Escape brackets","localized":"","reload":"","hint":"Escape parentheses and brackets in tags with backslashes.
Required when tags contain characters that have special meaning in prompt syntax, such as ( ) [ ].
Enable this when using the output directly in prompts."}, {"id":"","label":"Exclude tags","localized":"","reload":"","hint":"Comma-separated list of tags to exclude from the output.
Useful for filtering out unwanted or redundant tags that appear frequently."}, {"id":"","label":"Show confidence scores","localized":"","reload":"","hint":"Display confidence scores alongside each tag.
Shows how certain the model is about each tag (0.0 to 1.0).
Useful for understanding which tags are most reliable."}, {"id":"","label":"Save Caption Files","localized":"","reload":"","hint":"Save generated captions to .txt files alongside the images.
Each image gets a matching caption file with the same base name."}, {"id":"","label":"Append Caption Files","localized":"","reload":"","hint":"Append to existing caption files instead of overwriting them.
Useful for adding additional descriptions or tags to images that already have captions."}, {"id":"","label":"Recursive","localized":"","reload":"","hint":"Process images in subfolders recursively.
When enabled, searches all nested subdirectories for images to process."} ], "generate": [ {"id":"","label":"Sampling method","localized":"","reload":"","hint":"Which algorithm to use to produce the image"}, {"id":"","label":"Steps","localized":"","reload":"","hint":"How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results"}, {"id":"","label":"Tiling","localized":"","reload":"","hint":"Produce an image that can be tiled"}, {"id":"","label":"Full quality","localized":"","reload":"","hint":"Use full quality VAE to decode latent samples"}, {"id":"","label":"HiDiffusion","localized":"","reload":"","hint":"HiDiffusion allows creation of high-resolution images using your standard models without duplicates/distortions and improved performance"}, {"id":"","label":"HDR Clamp","localized":"","reload":"","hint":"Adjusts the level of nonsensical details by pruning values that deviate significantly from the distribution mean. It is particularly useful for enhancing generation at higher guidance scales, identifying outliers early in the process and applying mathematical adjustments based on the Range (Boundary) and Threshold settings. Think of it as setting the range within which you want your image values to be, and adjusting the threshold determines which values should be brought back into that range"}, {"id":"","label":"HDR Maximize","localized":"","reload":"","hint":"Calculates a 'normalization factor' by dividing the maximum tensor value by the specified range multiplied by 4. This factor is then used to shift the channels within the given boundary, ensuring maximum dynamic range for subsequent processing. The objective is to optimize dynamic range for external applications like Photoshop, particularly for adjusting levels, contrast, and brightness"}, {"id":"","label":"Enable refine pass","localized":"","reload":"","hint":"Use a similar process as image to image to upscale and/or add detail to the final image. Optionally uses refiner model to enhance image details."}, {"id":"","label":"Enable detailer pass","localized":"","reload":"","hint":"Detect target objects such as face and reprocess it at higher resolution"}, {"id":"","label":"Include detections","localized":"","reload":"","hint":"Include original image with detected areas marked"}, {"id":"","label":"Sort detections","localized":"","reload":"","hint":"Sort detected areas by from left to right instead of detection score"}, {"id":"","label":"Denoising strength","localized":"","reload":"","hint":"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies"}, {"id":"","label":"Denoise start","localized":"","reload":"","hint":"Override denoise strength by stating how early base model should finish and when refiner should start. Only applicable to refiner usage. If set to 0 or 1, denoising strength will be used"}, {"id":"","label":"Hires steps","localized":"","reload":"","hint":"Number of sampling steps for upscaled picture. If 0, uses same as for original"}, {"id":"","label":"Strength","localized":"","reload":"","hint":"Denoising strength of during image operation controls how much of original image is allowed to change during generate"}, {"id":"","label":"Upscaler","localized":"","reload":"","hint":"Which pre-trained model to use for the upscaling process."}, {"id":"","label":"Force Hires","localized":"","reload":"","hint":"Hires runs automatically when Latent upscale is selected, but its skipped when using non-latent upscalers. Enable force hires to run hires with non-latent upscalers"}, {"id":"","label":"Resize width","localized":"","reload":"","hint":"Resizes image to this width. If 0, width is inferred from either of two nearby sliders"}, {"id":"","label":"Resize height","localized":"","reload":"","hint":"Resizes image to this height. If 0, height is inferred from either of two nearby sliders"}, {"id":"","label":"Refine sampler","localized":"","reload":"","hint":"Use specific sampler as fallback sampler if primary is not supported for specific operation"}, {"id":"","label":"Refiner start","localized":"","reload":"","hint":"Refiner pass will start when base model is this much complete (set to larger than 0 and smaller than 1 to run after full base model run)"}, {"id":"","label":"Refiner steps","localized":"","reload":"","hint":"Number of steps to use for refiner pass"}, {"id":"","label":"Refine guidance","localized":"","reload":"","hint":"CFG scale used for refiner pass"}, {"id":"","label":"Input media","localized":"","reload":"","hint":"Add input image to be used for image-to-image, inpaint or control processing"}, {"id":"","label":"Control media","localized":"","reload":"","hint":"Add input image as separate initialization image for control processing"}, {"id":"","label":"Processed preview","localized":"","reload":"","hint":"Display results from pre-processing of input images before actual generate"}, {"id":"","label":"Attention guidance","localized":"","reload":"","hint":"CFG scale used for with PAG: Perturbed-Attention Guidance"}, {"id":"","label":"Adaptive scaling","localized":"","reload":"","hint":"Adaptive modifier for attention guidance scale"}, {"id":"","label":"Rescale guidance","localized":"","reload":"","hint":"Rescale CFG generated noise to avoid overexposed images"}, {"id":"","label":"Refine Prompt","localized":"","reload":"","hint":"Prompt used for both second encoder in base model (if it exists) and for refiner pass (if enabled)"}, {"id":"","label":"Refine negative prompt","localized":"","reload":"","hint":"Negative prompt used for both second encoder in base model (if it exists) and for refiner pass (if enabled)"}, {"id":"","label":"Initial","localized":"","reload":"","hint":"Set image resolution before processing"}, {"id":"","label":"Post","localized":"","reload":"","hint":"Resize image after processing"}, {"id":"","label":"Width","localized":"","reload":"","hint":"Image width"}, {"id":"","label":"Height","localized":"","reload":"","hint":"Image height"}, {"id":"","label":"Batch count","localized":"","reload":"","hint":"How many batches of images to create (has no impact on generation performance or VRAM usage)"}, {"id":"","label":"Batch size","localized":"","reload":"","hint":"How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)"}, {"id":"","label":"Guidance scale","localized":"","reload":"","hint":"Classifier Free Guidance scale: how strongly the image should conform to prompt. Lower values produce more creative results, higher values make it follow the prompt more strictly; recommended values between 5-10"}, {"id":"","label":"Guidance rescale","localized":"","reload":"","hint":"Rescale guidance to avoid overexposed images at higher guidance values"}, {"id":"","label":"Guidance End","localized":"","reload":"","hint":"Ends the effect of CFG and PAG early: A value of 1 acts as normal, 0.5 stops guidance at 50% of steps"}, {"id":"","label":"Initial seed","localized":"","reload":"","hint":"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result"}, {"id":"","label":"Variation","localized":"","reload":"","hint":"Second seed to be mixed with primary seed"}, {"id":"","label":"Variation strength","localized":"","reload":"","hint":"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something)"}, {"id":"","label":"Resize method","localized":"","reload":"","hint":"Method used to resize the image: can be simple resize, upscaling model, latent resize or asymmetric decode"}, {"id":"","label":"Resize seed from width","localized":"","reload":"","hint":"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution"}, {"id":"","label":"Resize seed from height","localized":"","reload":"","hint":"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution"}, {"id":"","label":"Fixed","localized":"","reload":"","hint":"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio"}, {"id":"","label":"scale","localized":"","reload":"","hint":"Resize image to target scale. If resize fixed width/height are set this option is ignored"}, {"id":"","label":"Crop","localized":"","reload":"","hint":"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out"}, {"id":"","label":"Fill","localized":"","reload":"","hint":"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors"}, {"id":"","label":"Mask blur","localized":"","reload":"","hint":"How much to blur the mask before processing, in pixels"}, {"id":"","label":"Latent noise","localized":"","reload":"","hint":"fill it with latent space noise"}, {"id":"","label":"Latent nothing","localized":"","reload":"","hint":"fill it with latent space zeroes"}, {"id":"","label":"Adapters","localized":"","reload":"","hint":"Settings related to IP Adapters"}, {"id":"","label":"Inputs","localized":"","reload":"","hint":"Settings related to Input images"}, {"id":"","label":"Control input type","localized":"","reload":"","hint":"Choose which input image is used for control process"}, {"id":"","label":"Video format","localized":"","reload":"","hint":"Format and codec of output video"}, {"id":"","label":"Size & Batch","localized":"","reload":"","hint":"Image size and batch"}, {"id":"","label":"Sigma adjust","localized":"","reload":"","hint":"Adjust sampler sigma value"}, {"id":"","label":"Adjust start","localized":"","reload":"","hint":"Starting step when sigma adjust occurs"}, {"id":"","label":"Adjust end","localized":"","reload":"","hint":"Ending step when sigma adjust occurs"}, {"id":"","label":"Options","localized":"","reload":"","hint":"Options"}, {"id":"","label":"ControlNet","localized":"","reload":"","hint":"ControlNet is an advanced guidance model"}, {"id":"","label":"Processor","localized":"","reload":"","hint":"Processor type to use to preprocess image used for ControlNet"}, {"id":"","label":"Renoise","localized":"","reload":"","hint":"Apply additional noise during detailing"}, {"id":"","label":"Renoise end","localized":"","reload":"","hint":"Final step when renoise is applied"}, {"id":"","label":"Merge detailers","localized":"","reload":"","hint":"Merge results from multiple detailers into single mask before running detailing process"}, {"id":"","label":"Inpaint mode","localized":"","reload":"","hint":"Inpaint mode"}, {"id":"","label":"Inpaint area","localized":"","reload":"","hint":"Inpaint area"}, {"id":"","label":"Texture tiling","localized":"","reload":"","hint":"Apply seamless tiling to generated image so it can be used as a texture"}, {"id":"","label":"Override","localized":"","reload":"","hint":"Override settings that can change server behavior and are typically applied from imported image metadata"}, {"id":"","label":"VAE type","localized":"","reload":"","hint":"Choose if you want to run full VAE, reduced quality VAE or attempt to use remote VAE service"}, {"id":"","label":"Guess Mode","localized":"","reload":"","hint":"Removes the requirement to supply a prompt to a ControlNet. It forces Controlnet encoder to do it's 'best guess' based on the contents of the input control map."}, {"id":"","label":"Control Only","localized":"","reload":"","hint":"This uses only the Control input below as the source for any ControlNet or IP Adapter type tasks based on any of our various options."}, {"id":"","label":"Init Image Same As Control","localized":"","reload":"","hint":"Will additionally treat any image placed into the Control input window as a source for img2img type tasks, an image to modify for example."}, {"id":"","label":"Separate Init Image","localized":"","reload":"","hint":"Creates an additional window next to Control input labeled Init input, so you can have a separate image for both Control operations and an init source."}, {"id":"","label":"Override settings","localized":"","reload":"","hint":"If generation parameters deviate from your system settings override settings populated with those settings to override your system configuration for this workflow"}, {"id":"","label":"sigma method","localized":"","reload":"","hint":"Controls how noise levels (sigmas) are distributed across diffusion steps. Options:
- default: the model default
- karras: smoother noise schedule, higher quality with fewer steps
- beta: based on beta schedule values
- exponential: exponential decay of noise
- lambdas: experimental, balances signal-to-noise
- flowmatch: tuned for flow-matching models"}, {"id":"","label":"timestep spacing","localized":"","reload":"","hint":"Determines how timesteps are spaced across the diffusion process. Options:
- default: the model default
- leading: creates evenly spaced steps
- linspace: includes the first and last steps and evenly selects the remaining intermediate steps
- trailing: only includes the last step and evenly selects the remaining intermediate steps starting from the end"}, {"id":"","label":"beta schedule","localized":"","reload":"","hint":"Defines how beta (noise strength per step) grows. Options:
- default: the model default
- linear: evenly decays noise per step
- scaled: squared version of linear, used only by Stable Diffusion
- cosine: smoother decay, often better results with fewer steps
- sigmoid: sharp transition, experimental"}, {"id":"","label":"prediction method","localized":"","reload":"","hint":"Defines what the model predicts at each step. Options:
- default: the model default
- epsilon: noise (most common for Stable Diffusion)
- sample: direct denoised image prediction, also called as x0 prediction
- v_prediction: velocity prediction, used by CosXL and NoobAI VPred models
- flow_prediction: used with newer flow-matching models like SD3 and Flux"}, {"id":"","label":"sampler order","localized":"","reload":"","hint":"Order of solver updates in the sampler. Higher order improves stability/accuracy but increases compute cost."}, {"id":"","label":"flow shift","localized":"","reload":"","hint":"Shift value for flowmatching models. Controls the distribution of denoising steps.

Values:
- >1.0: allocate more steps to early denoising (better structure)
-<1.0: allocate more steps to late denoising (better fine details)
- 1.0: balanced schedule

Most flowmatching models use the value of 3 as default. Effectively inactive if dynamic shift is enabled."}, {"id":"","label":"dynamic","localized":"","reload":"","hint":"Dynamic shifting automatically adjusts the denoising schedule based on your image resolution.

The scheduler interpolates between base_shift and max_shift based on actual image resolution.

Enabling disables static Flow shift."}, {"id":"","label":"base shift","localized":"","reload":"","hint":"Minimum shift value for low resolutions when using dynamic shifting."}, {"id":"","label":"max shift","localized":"","reload":"","hint":"Maximum shift value for high resolutions when using dynamic shifting."}, {"id":"","label":"resize mode","localized":"","reload":"","hint":"Defines how the input is resized or adapted in second-pass refinement:
- none: no resizing, keep original resolution
- fixed: force resize to target resolution (may distort)
- crop: center-crop to fit target while keeping aspect ratio
- fill: resize to fit and pad empty space with borders
- outpaint: extend canvas beyond image borders
- context aware: smart resize that blends or adapts surrounding areas"} ], "other": [ {"id":"","label":"Install","localized":"","reload":"","hint":"Install"}, {"id":"","label":"Search","localized":"","reload":"","hint":"Search"}, {"id":"","label":"Sort by","localized":"","reload":"","hint":"Sort by"}, {"id":"","label":"Nudenet","localized":"","reload":"","hint":"Flexible extension that can detect and obfustate nudity in images"}, {"id":"","label":"Prompt enhance","localized":"","reload":"","hint":"Extension that can use different LLMs to rewrite prompt for improved results"}, {"id":"","label":"Enhance now","localized":"","reload":"","hint":"Run prompt enhancement using the selected LLM model"}, {"id":"","label":"Apply to prompt","localized":"","reload":"","hint":"Automatically copy enhanced result to the prompt input box"}, {"id":"","label":"Auto enhance","localized":"","reload":"","hint":"Automatically enhance prompt before every image generation"}, {"id":"","label":"Use vision","localized":"","reload":"","hint":"Include input image when enhancing prompt.

Only available for vision-capable models, marked with  icon."}, {"id":"","label":"LLM model","localized":"","reload":"","hint":"Select the language model to use for prompt enhancement.

Models supporting vision are marked with  icon.
Models supporting thinking mode are marked with  icon."}, {"id":"","label":"Model repo","localized":"","reload":"","hint":"HuggingFace repository ID for the model"}, {"id":"","label":"Model gguf","localized":"","reload":"","hint":"Optional GGUF quantized model repository on HuggingFace"}, {"id":"","label":"Model type","localized":"","reload":"","hint":"Optional GGUF model quantization type"}, {"id":"","label":"Model file","localized":"","reload":"","hint":"Optional specific GGUF model file inside the repository"}, {"id":"","label":"Load custom model","localized":"","reload":"","hint":"Load a custom model with the specified configuration"}, {"id":"","label":"NSFW allowed","localized":"","reload":"","hint":"Allow the model to generate adult content in enhanced prompts"}, {"id":"","label":"Prompt prefix","localized":"","reload":"","hint":"Text prepended at the beginning of the enhanced prompt result.

Useful for adding prompt elements which need to be copied to the image prompt unchanged, like quality tags 'masterpiece, best quality' or artist names, which would otherwise be rewritten by the LLM."}, {"id":"","label":"Prompt suffix","localized":"","reload":"","hint":"Text appended to the end of the enhanced prompt result.

Useful for adding prompt elements which need to be copied to the image prompt unchanged, which would otherwise be rewritten by the LLM."}, {"id":"","label":"Enhanced prompt","localized":"","reload":"","hint":"The enhanced prompt output from the LLM"}, {"id":"","label":"Set prompt","localized":"","reload":"","hint":"Copy the enhanced prompt to the main prompt input"}, {"id":"","label":"Manage extensions","localized":"","reload":"","hint":"Manage extensions"}, {"id":"","label":"Manual install","localized":"","reload":"","hint":"Manually install extension"}, {"id":"","label":"Extension GIT repository URL","localized":"","reload":"","hint":"Specify extension repository URL on GitHub"}, {"id":"","label":"Specific branch name","localized":"","reload":"","hint":"Specify extension branch name, leave blank for default"}, {"id":"","label":"Local directory name","localized":"","reload":"","hint":"Directory where to install extension, leave blank for default"}, {"id":"","label":"Refresh extension list","localized":"","reload":"","hint":"Refresh list of available extensions"}, {"id":"","label":"Update all installed","localized":"","reload":"","hint":"Update installed extensions to their latest available version"}, {"id":"","label":"Apply changes","localized":"","reload":"","hint":"Apply all changes and restart server"}, {"id":"","label":"Uninstall","localized":"","reload":"","hint":"uninstall this extension"}, {"id":"","label":"User interface","localized":"","reload":"","hint":"Review and set user interface preferences"}, {"id":"","label":"Set UI defaults","localized":"","reload":"","hint":"Set current values as default values for the user interface"}, {"id":"","label":"Benchmark","localized":"","reload":"","hint":"Run benchmarks"}, {"id":"","label":"Models & Networks","localized":"","reload":"","hint":"View lists of all available models and networks"}, {"id":"","label":"Restore UI defaults","localized":"","reload":"","hint":"Restore default user interface values"}, {"id":"","label":"Detailer classes","localized":"","reload":"","hint":"Specify specific classes to use if selected detailer model is a multi-class model"}, {"id":"","label":"Detailer models","localized":"","reload":"","hint":"Select detection models to use for detailing"}, {"id":"","label":"Detailer negative prompt","localized":"","reload":"","hint":"Use separate negative prompt for detailer. If not present, it will use primary negative prompt"}, {"id":"","label":"Detailer prompt","localized":"","reload":"","hint":"Use separate prompt for detailer. If not present, it will use primary prompt"}, {"id":"","label":"Detailer steps","localized":"","reload":"","hint":"Number of steps to run for detailer process"}, {"id":"","label":"Detailer strength","localized":"","reload":"","hint":"Denoising strength of detailer process"}, {"id":"","label":"Detailer use model augment","localized":"","reload":"","hint":"Run detailer detection models at extra precision"}, {"id":"","label":"Max detected","localized":"","reload":"","hint":"Maximum number of detected objects to run detailer on"}, {"id":"","label":"Edge blur","localized":"","reload":"","hint":"Blur edge of masked area by this percentage"}, {"id":"","label":"Edge padding","localized":"","reload":"","hint":"Expand edge of masked area by this percentage"}, {"id":"","label":"Min confidence","localized":"","reload":"","hint":"Minimum confidence in detected item"}, {"id":"","label":"Max overlap","localized":"","reload":"","hint":"Maximum overlap between two detected items before one is discarded"}, {"id":"","label":"Min size","localized":"","reload":"","hint":"Minimum size of detected object as percentage of overal image"}, {"id":"","label":"Max size","localized":"","reload":"","hint":"Maximum size of detected object as percentage of overal image"}, {"id":"","label":"Process Image","localized":"","reload":"","hint":"Process single image"}, {"id":"","label":"Process Batch","localized":"","reload":"","hint":"Process batch of images"}, {"id":"","label":"Process Folder","localized":"","reload":"","hint":"Process all images in a folder"}, {"id":"","label":"Current","localized":"","reload":"","hint":"Analyze modules inside currently loaded model"}, {"id":"","label":"Merge","localized":"","reload":"","hint":"Merge two or more models into a new model"}, {"id":"","label":"Modules","localized":"","reload":"","hint":"Merge and/or replace modules into an existing model"}, {"id":"","label":"Validate","localized":"","reload":"","hint":"Validate all local models"}, {"id":"","label":"CivitAI","localized":"","reload":"","hint":"Search and download models from CitivAI"}, {"id":"","label":"Scale by","localized":"","reload":"","hint":"Use this tab to resize the source image(s) by a chosen factor"}, {"id":"","label":"Scale to","localized":"","reload":"","hint":"Use this tab to resize the source image(s) to a chosen target size"}, {"id":"","label":"Input directory","localized":"","reload":"","hint":"Folder where the images are that you want to process"}, {"id":"","label":"Output directory","localized":"","reload":"","hint":"Folder where the processed images should be saved to"}, {"id":"","label":"Show result images","localized":"","reload":"","hint":"Enable to show the processed images in the image pane"}, {"id":"","label":"Crop to fit","localized":"","reload":"","hint":"If the dimensions of your source image (e.g. 512x510) deviate from your target dimensions (e.g. 1024x768) this function will fit your upscaled image into your target size image. Excess will be cropped"}, {"id":"","label":"Refine Upscaler","localized":"","reload":"","hint":"Select secondary upscaler to run after initial upscaler"}, {"id":"","label":"Upscaler 2 visibility","localized":"","reload":"","hint":"Strength of the secondary upscaler"}, {"id":"","label":"Calculate hash for all models","localized":"","reload":"","hint":"Calculates hash for all available models which may take a very long time"}, {"id":"","label":"Weights Clip","localized":"","reload":"","hint":"Forced merged weights to be no heavier than the original model, preventing burn in and overly saturated models"}, {"id":"","label":"ReBasin","localized":"","reload":"","hint":"Performs multiple merges with permutations in order to keep more features from both models"}, {"id":"","label":"Number of ReBasin Iterations","localized":"","reload":"","hint":"Number of times to merge and permute the model before saving"}, {"id":"","label":"CPU","localized":"","reload":"","hint":"Uses cpu and RAM only: slowest but least likely to OOM"}, {"id":"","label":"Shuffle","localized":"","reload":"","hint":"Loads full model in RAM and calculates on VRAM: Less speedup, suggested for SDXL merges"}, {"id":"","label":"In Blocks","localized":"","reload":"","hint":"Downsampling Blocks of the UNet (12 values for SD1.5, 9 values for SDXL)"}, {"id":"","label":"Mid Block","localized":"","reload":"","hint":"Central Block of the UNet (1 value)"}, {"id":"","label":"Out Block","localized":"","reload":"","hint":"Upsampling Blocks of the UNet (12 values for SD1.5, 9 values for SDXL)"}, {"id":"","label":"Preset Interpolation Ratio","localized":"","reload":"","hint":"If two presets are selected, interpolate between them"}, {"id":"","label":"Adapter","localized":"","reload":"","hint":"IP adapter model"}, {"id":"","label":"Active ip adapters","localized":"","reload":"","hint":"Number of active IP adapter"}, {"id":"","label":"Unload adapter","localized":"","reload":"","hint":"Unload IP adapter immediately after generate. Otherwise IP adapter will remain loaded for faster use in next generate process"}, {"id":"","label":"Crop to portrait","localized":"","reload":"","hint":"Crop input image to portrait-only before using it as IP adapter input"}, {"id":"","label":"Layer options","localized":"","reload":"","hint":"Manually specify IP adapter advanced layer options"}, {"id":"","label":"X values","localized":"","reload":"","hint":"Separate values for X axis using commas"}, {"id":"","label":"Y values","localized":"","reload":"","hint":"Separate values for Y axis using commas"}, {"id":"","label":"Z values","localized":"","reload":"","hint":"Separate values for Z axis using commas"}, {"id":"","label":"Loops","localized":"","reload":"","hint":"How many times to process an image. Each output is used as the input of the next loop. If set to 1, behavior will be as if this script were not used"}, {"id":"","label":"Final denoising strength","localized":"","reload":"","hint":"The denoising strength for the final loop of each image in the batch"}, {"id":"","label":"Denoising strength curve","localized":"","reload":"","hint":"The denoising curve controls the rate of denoising strength change each loop. Aggressive: Most of the change will happen towards the start of the loops. Linear: Change will be constant through all loops. Lazy: Most of the change will happen towards the end of the loops"}, {"id":"","label":"Tile overlap","localized":"","reload":"","hint":"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam"}, {"id":"","label":"ACI: Color to Mask","localized":"","reload":"","hint":"Pick the color you want to mask and inpaint. Click on the color in the image to automatically select it.
Advised to use images like green screens to get precise results."}, {"id":"","label":"ACI: Color Tolerance","localized":"","reload":"","hint":"Adjust the tolerance to include similar colors in the mask. Lower values = mask only very similar colors. Higher = values mask a wider range of similar colors."}, {"id":"","label":"ACI: Mask Erode","localized":"","reload":"","hint":"Adjust padding to apply a inside offset to the mask. (Recommended value = 2 to remove leftovers at edges)"}, {"id":"","label":"ACI: Mask Blur","localized":"","reload":"","hint":"Adjust blur to apply a smooth transition between image and inpainted area. (Recommended value = 0 for sharpness)"}, {"id":"","label":"ACI: Denoising Strength","localized":"","reload":"","hint":"Change Denoising Strength to achieve desired inpaint amount."} ], "settings": [ {"id":"","label":"Apply settings","localized":"","reload":"","hint":"Save current settings, server restart is recommended"}, {"id":"","label":"Model Loading","localized":"","reload":"","hint":"Settings related to how model is loaded"}, {"id":"","label":"Model Options","localized":"","reload":"","hint":"Settings related to behavior of specific models"}, {"id":"","label":"Model Offloading","localized":"","reload":"","hint":"Settings related to model offloading and memory management"}, {"id":"","label":"Model Quantization","localized":"","reload":"","hint":"Settings related to model quantization which is used to reduce memory usage"}, {"id":"","label":"Nunchaku attention","localized":"","reload":"","hint":"Replaces default attention with Nunchaku's custom FP16 attention kernel for faster inference on consumer NVIDIA GPUs.
Might provide performance improvement on GPUs which have higher FP16 tensor cores throughput than BF16.

Currently only affects Flux-based models (Dev, Schnell, Kontext, Fill, Depth, etc.). Has no effect on Qwen, SDXL, Sana, or other architectures.

Disabled by default."}, {"id":"","label":"Nunchaku offloading","localized":"","reload":"","hint":"Enables Nunchaku's own per-block CPU offloading with asynchronous CUDA streams to reduce VRAM usage.
Uses a ping-pong buffer strategy: while one transformer block computes on GPU, the next block preloads from CPU in the background, hiding most of the transfer latency.

Can reduce VRAM usage at the cost of slower inference.
This replaces SD.Next's pipeline offloading for the transformer component.

Only useful on low-VRAM GPUs. If your GPU has enough memory to hold the quantized model (16+ GB), keep this disabled for maximum speed.
Supports Flux and Qwen models. Not supported for SDXL where this setting is ignored.
Disabled by default."}, {"id":"","label":"Image Metadata","localized":"","reload":"","hint":"Settings related to handling of metadata that is created with generated images"}, {"id":"","label":"Legacy Options","localized":"","reload":"","hint":"Settings related to legacy options - should not be used"}, {"id":"","label":"Restart server","localized":"","reload":"","hint":"Restart server"}, {"id":"","label":"Shutdown server","localized":"","reload":"","hint":"Shutdown server"}, {"id":"","label":"Preview theme","localized":"","reload":"","hint":"Show theme preview"}, {"id":"","label":"Restore defaults","localized":"","reload":"","hint":"Restore default server settings"}, {"id":"","label":"Unload model","localized":"","reload":"","hint":"Unload currently loaded model"}, {"id":"","label":"Reload model","localized":"","reload":"","hint":"Reload currently selected model"}, {"id":"","label":"Models & Loading","localized":"","reload":"","hint":"Settings related to base models, primary backend and model load behavior"}, {"id":"","label":"Variational Auto Encoder","localized":"","reload":"","hint":"Settings related to Variational Auto Encoder and image decoding process during generate"}, {"id":"","label":"Text encoder","localized":"","reload":"","hint":"Settings related to text encoder and prompt encoding processing during generate"}, {"id":"","label":"Compute Settings","localized":"","reload":"","hint":"Settings related to compute precision, cross attention, and optimizations for computing platforms"}, {"id":"","label":"Backend Settings","localized":"","reload":"","hint":"Settings related to compute backends: torch, onnx and olive"}, {"id":"","label":"Pipeline modifiers","localized":"","reload":"","hint":"Additional functionality that can be enabled during generate"}, {"id":"","label":"Model compile","localized":"","reload":"","hint":"Settings related to different model compilation methods"}, {"id":"","label":"System Paths","localized":"","reload":"","hint":"Settings related to location of various model directories"}, {"id":"","label":"Image Options","localized":"","reload":"","hint":"Settings related to image format, metadata, and image grids"}, {"id":"","label":"Image Paths","localized":"","reload":"","hint":"Settings related to image filenames, and output directories"}, {"id":"","label":"Live Previews","localized":"","reload":"","hint":"Settings related to live previews, audio notification"}, {"id":"","label":"Sampler Settings","localized":"","reload":"","hint":"Settings related to sampler selection and configuration, and diffuser specific sampler configuration"}, {"id":"","label":"Postprocessing","localized":"","reload":"","hint":"Settings related to post image generation processing, face restoration, and upscaling"}, {"id":"","label":"Control Options","localized":"","reload":"","hint":"Settings related the Control tab"}, {"id":"","label":"Huggingface","localized":"","reload":"","hint":"Settings related huggingface access"}, {"id":"","label":"Show all pages","localized":"","reload":"","hint":"Show all settings pages"}, {"id":"","label":"Base model","localized":"","reload":"","hint":"Main model used for all operations"}, {"id":"","label":"Refiner model","localized":"","reload":"","hint":"Refiner model used for second-pass operations"}, {"id":"","label":"Cached models","localized":"","reload":"","hint":"The number of models to store in RAM for quick access"}, {"id":"","label":"VAE model","localized":"","reload":"","hint":"VAE helps with fine details in the final image and may also alter colors"}, {"id":"","label":"Model load using streams","localized":"","reload":"","hint":"When loading models attempt stream loading optimized for slow or network storage"}, {"id":"","label":"xFormers","localized":"","reload":"","hint":"Memory optimization. Non-Deterministic (different results each time)"}, {"id":"","label":"Scaled-Dot-Product","localized":"","reload":"","hint":"Memory optimization. Non-Deterministic unless SDP memory attention is disabled."}, {"id":"","label":"Prompt padding","localized":"","reload":"","hint":"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens"}, {"id":"","label":"Original","localized":"","reload":"","hint":"Original LDM backend"}, {"id":"","label":"Autocast","localized":"","reload":"","hint":"Automatically determine precision during runtime"}, {"id":"","label":"Full","localized":"","reload":"","hint":"Always use full precision"}, {"id":"","label":"FP32","localized":"","reload":"","hint":"Use 32-bit floating point precision for calculations"}, {"id":"","label":"FP16","localized":"","reload":"","hint":"Use 16-bit floating point precision for calculations"}, {"id":"","label":"BF16","localized":"","reload":"","hint":"Use modified 16-bit floating point precision for calculations"}, {"id":"","label":"Full precision (--no-half-vae)","localized":"","reload":"","hint":"Uses FP32 for the VAE. May produce better results while using more VRAM and slower generation"}, {"id":"","label":"Force full precision (--no-half)","localized":"","reload":"","hint":"Uses FP32 for the model. May produce better results while using more VRAM and slower generation"}, {"id":"","label":"Upcast sampling","localized":"","reload":"","hint":"Usually produces similar results to --no-half with better performance while using less memory"}, {"id":"","label":"Attempt VAE roll back for NaN values","localized":"","reload":"","hint":"Requires Torch 2.1 and NaN check enabled"}, {"id":"","label":"Olive use FP16 on optimization","localized":"","reload":"","hint":"Use 16-bit floating point precision for the output model of Olive optimization process. Use 32-bit floating point precision if disabled"}, {"id":"","label":"Olive force FP32 for VAE Encoder","localized":"","reload":"","hint":"Use 32-bit floating point precision for VAE Encoder of the output model. This overrides 'use FP16 on optimization' option. If you are getting NaN or black blank images from Img2Img, enable this option and remove cache"}, {"id":"","label":"Olive use static dimensions","localized":"","reload":"","hint":"Make the inference with Olive optimized models much faster. (OrtTransformersOptimization)"}, {"id":"","label":"Olive cache optimized models","localized":"","reload":"","hint":"Save Olive processed models as a cache. You can manage them in ONNX tab"}, {"id":"","label":"File format","localized":"","reload":"","hint":"Select file format for images"}, {"id":"","label":"Include metadata","localized":"","reload":"","hint":"Save image create parameters as metadata tags inside image file"}, {"id":"","label":"Images filename pattern","localized":"","reload":"","hint":"Use following tags to define how filenames for images are chosen:
seq, uuid
date, datetime, job_timestamp
generation_number, batch_number
model, model_shortname
model_hash, model_name
sampler, seed, steps, cfg
clip_skip, denoising
hasprompt, prompt, styles
prompt_hash, prompt_no_styles
prompt_spaces, prompt_words
height, width, image_hash
"}, {"id":"","label":"Row count","localized":"","reload":"","hint":"Use -1 for autodetect and 0 for it to be same as batch size"}, {"id":"","label":"Directory name pattern","localized":"","reload":"","hint":"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime], [datetime